xref: /freebsd/sys/dev/ocs_fc/ocs_hw.c (revision 5def4c47d4bd90b209b9b4a4ba9faec15846d8fd)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  * Defines and implements the Hardware Abstraction Layer (HW).
37  * All interaction with the hardware is performed through the HW, which abstracts
38  * the details of the underlying SLI-4 implementation.
39  */
40 
41 /**
42  * @defgroup devInitShutdown Device Initialization and Shutdown
43  * @defgroup domain Domain Functions
44  * @defgroup port Port Functions
45  * @defgroup node Remote Node Functions
46  * @defgroup io IO Functions
47  * @defgroup interrupt Interrupt handling
48  * @defgroup os OS Required Functions
49  */
50 
51 #include "ocs.h"
52 #include "ocs_os.h"
53 #include "ocs_hw.h"
54 #include "ocs_hw_queues.h"
55 
56 #define OCS_HW_MQ_DEPTH	128
57 #define OCS_HW_READ_FCF_SIZE	4096
58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS	256
59 #define OCS_HW_WQ_TIMER_PERIOD_MS	500
60 
61 /* values used for setting the auto xfer rdy parameters */
62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT		0 /* 512 bytes */
63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT	TRUE
64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT	FALSE
65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT	0
66 #define OCS_HW_REQUE_XRI_REGTAG			65534
67 /* max command and response buffer lengths -- arbitrary at the moment */
68 #define OCS_HW_DMTF_CLP_CMD_MAX	256
69 #define OCS_HW_DMTF_CLP_RSP_MAX	256
70 
71 /* HW global data */
72 ocs_hw_global_t hw_global;
73 
74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
77 static int32_t ocs_hw_cb_link(void *, void *);
78 static int32_t ocs_hw_cb_fip(void *, void *);
79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
87 static int32_t ocs_hw_flush(ocs_hw_t *);
88 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
89 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void  *);
95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void  *);
96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void  *);
97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void  *);
98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
104 
105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
116 static void ocs_hw_io_free_internal(void *arg);
117 static void ocs_hw_io_free_port_owned(void *arg);
118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
124 
125 /* HW domain database operations */
126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
128 
129 /* Port state machine */
130 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
131 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
134 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
135 
136 /* Domain state machine */
137 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
138 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
140 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
141 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
142 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
143 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
144 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
145 
146 /* BZ 161832 */
147 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
148 
149 /* WQE timeouts */
150 static void target_wqe_timer_cb(void *arg);
151 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
152 
153 static inline void
154 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
155 {
156 	if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
157 		/*
158 		 * Active WQE list currently only used for
159 		 * target WQE timeouts.
160 		 */
161 		ocs_lock(&hw->io_lock);
162 			ocs_list_add_tail(&hw->io_timed_wqe, io);
163 			io->submit_ticks = ocs_get_os_ticks();
164 		ocs_unlock(&hw->io_lock);
165 	}
166 }
167 
168 static inline void
169 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
170 {
171 	if (hw->config.emulate_tgt_wqe_timeout) {
172 		/*
173 		 * If target wqe timeouts are enabled,
174 		 * remove from active wqe list.
175 		 */
176 		ocs_lock(&hw->io_lock);
177 			if (ocs_list_on_list(&io->wqe_link)) {
178 				ocs_list_remove(&hw->io_timed_wqe, io);
179 			}
180 		ocs_unlock(&hw->io_lock);
181 	}
182 }
183 
184 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
185 {
186 	switch (io_type) {
187 	case OCS_HW_IO_INITIATOR_READ:
188 	case OCS_HW_IO_INITIATOR_WRITE:
189 	case OCS_HW_IO_INITIATOR_NODATA:
190 	case OCS_HW_FC_CT:
191 	case OCS_HW_ELS_REQ:
192 		return 1;
193 	default:
194 		return 0;
195 	}
196 }
197 
198 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
199 {
200 	/* if exchange not active, nothing to abort */
201 	if (!xb) {
202 		return FALSE;
203 	}
204 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
205 		switch (ext) {
206 		/* exceptions where abort is not needed */
207 		case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
208 		case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
209 			return FALSE;
210 		default:
211 			break;
212 		}
213 	}
214 	return TRUE;
215 }
216 
217 /**
218  * @brief Determine the number of chutes on the device.
219  *
220  * @par Description
221  * Some devices require queue resources allocated per protocol processor
222  * (chute). This function returns the number of chutes on this device.
223  *
224  * @param hw Hardware context allocated by the caller.
225  *
226  * @return Returns the number of chutes on the device for protocol.
227  */
228 static uint32_t
229 ocs_hw_get_num_chutes(ocs_hw_t *hw)
230 {
231 	uint32_t num_chutes = 1;
232 
233 	if (sli_get_is_dual_ulp_capable(&hw->sli) &&
234 	    sli_get_is_ulp_enabled(&hw->sli, 0) &&
235 	    sli_get_is_ulp_enabled(&hw->sli, 1)) {
236 		num_chutes = 2;
237 	}
238 	return num_chutes;
239 }
240 
241 static ocs_hw_rtn_e
242 ocs_hw_link_event_init(ocs_hw_t *hw)
243 {
244 	ocs_hw_assert(hw);
245 
246 	hw->link.status = SLI_LINK_STATUS_MAX;
247 	hw->link.topology = SLI_LINK_TOPO_NONE;
248 	hw->link.medium = SLI_LINK_MEDIUM_MAX;
249 	hw->link.speed = 0;
250 	hw->link.loop_map = NULL;
251 	hw->link.fc_id = UINT32_MAX;
252 
253 	return OCS_HW_RTN_SUCCESS;
254 }
255 
256 /**
257  * @ingroup devInitShutdown
258  * @brief If this is physical port 0, then read the max dump size.
259  *
260  * @par Description
261  * Queries the FW for the maximum dump size
262  *
263  * @param hw Hardware context allocated by the caller.
264  *
265  * @return Returns 0 on success, or a non-zero value on failure.
266  */
267 static ocs_hw_rtn_e
268 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
269 {
270 	uint8_t	buf[SLI4_BMBX_SIZE];
271 	uint8_t bus, dev, func;
272 	int 	rc;
273 
274 	/* lancer only */
275 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
276 		ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
277 		return OCS_HW_RTN_ERROR;
278 	}
279 
280 	/*
281 	 * Make sure the FW is new enough to support this command. If the FW
282 	 * is too old, the FW will UE.
283 	 */
284 	if (hw->workaround.disable_dump_loc) {
285 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
286 		return OCS_HW_RTN_ERROR;
287 	}
288 
289 	/* attempt to detemine the dump size for function 0 only. */
290 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
291 	if (func == 0) {
292 		if (sli_cmd_common_set_dump_location(&hw->sli, buf,
293 							SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
294 			sli4_res_common_set_dump_location_t *rsp =
295 				(sli4_res_common_set_dump_location_t *)
296 				(buf + offsetof(sli4_cmd_sli_config_t,
297 						payload.embed));
298 
299 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
300 			if (rc != OCS_HW_RTN_SUCCESS) {
301 				ocs_log_test(hw->os, "set dump location command failed\n");
302 				return rc;
303 			} else {
304 				hw->dump_size = rsp->buffer_length;
305 				ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
306 			}
307 		}
308 	}
309 	return OCS_HW_RTN_SUCCESS;
310 }
311 
312 /**
313  * @ingroup devInitShutdown
314  * @brief Set up the Hardware Abstraction Layer module.
315  *
316  * @par Description
317  * Calls set up to configure the hardware.
318  *
319  * @param hw Hardware context allocated by the caller.
320  * @param os Device abstraction.
321  * @param port_type Protocol type of port, such as FC and NIC.
322  *
323  * @todo Why is port_type a parameter?
324  *
325  * @return Returns 0 on success, or a non-zero value on failure.
326  */
327 ocs_hw_rtn_e
328 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
329 {
330 	uint32_t i;
331 	char prop_buf[32];
332 
333 	if (hw == NULL) {
334 		ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
335 		return OCS_HW_RTN_ERROR;
336 	}
337 
338 	if (hw->hw_setup_called) {
339 		/* Setup run-time workarounds.
340 		 * Call for each setup, to allow for hw_war_version
341 		 */
342 		ocs_hw_workaround_setup(hw);
343 		return OCS_HW_RTN_SUCCESS;
344 	}
345 
346 	/*
347 	 * ocs_hw_init() relies on NULL pointers indicating that a structure
348 	 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
349 	 * free/realloc that memory
350 	 */
351 	ocs_memset(hw, 0, sizeof(ocs_hw_t));
352 
353 	hw->hw_setup_called = TRUE;
354 
355 	hw->os = os;
356 
357 	ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
358 	ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
359 	ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
360 	hw->cmd_head_count = 0;
361 
362 	ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
363 	ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
364 
365 	ocs_atomic_init(&hw->io_alloc_failed_count, 0);
366 
367 	hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
368 	hw->config.dif_seed = 0;
369 	hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
370 	hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
371 	hw->config.auto_xfer_rdy_app_tag_valid =  OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
372 	hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
373 
374 	if (sli_setup(&hw->sli, hw->os, port_type)) {
375 		ocs_log_err(hw->os, "SLI setup failed\n");
376 		return OCS_HW_RTN_ERROR;
377 	}
378 
379 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
380 
381 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
382 
383 	ocs_hw_link_event_init(hw);
384 
385 	sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
386 	sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
387 
388 	/*
389 	 * Set all the queue sizes to the maximum allowed. These values may
390 	 * be changes later by the adjust and workaround functions.
391 	 */
392 	for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
393 		hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
394 	}
395 
396 	/*
397 	 * The RQ assignment for RQ pair mode.
398 	 */
399 	hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
400 	hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
401 	if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
402 		hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
403 	}
404 
405 	/* by default, enable initiator-only auto-ABTS emulation */
406 	hw->config.i_only_aab = TRUE;
407 
408 	/* Setup run-time workarounds */
409 	ocs_hw_workaround_setup(hw);
410 
411 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
412 	if (hw->workaround.override_fcfi) {
413 		hw->first_domain_idx = -1;
414 	}
415 
416 	/* Must be done after the workaround setup */
417 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
418 		(void)ocs_hw_read_max_dump_size(hw);
419 	}
420 
421 	/* calculate the number of WQs required. */
422 	ocs_hw_adjust_wqs(hw);
423 
424 	/* Set the default dif mode */
425 	if (! sli_is_dif_inline_capable(&hw->sli)) {
426 		ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
427 		hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
428 	}
429 	/* Workaround: BZ 161832 */
430 	if (hw->workaround.use_dif_sec_xri) {
431 		ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
432 	}
433 
434 	/*
435 	 * Figure out the starting and max ULP to spread the WQs across the
436 	 * ULPs.
437 	 */
438 	if (sli_get_is_dual_ulp_capable(&hw->sli)) {
439 		if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
440 		    sli_get_is_ulp_enabled(&hw->sli, 1)) {
441 			hw->ulp_start = 0;
442 			hw->ulp_max   = 1;
443 		} else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
444 			hw->ulp_start = 0;
445 			hw->ulp_max   = 0;
446 		} else {
447 			hw->ulp_start = 1;
448 			hw->ulp_max   = 1;
449 		}
450 	} else {
451 		if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
452 			hw->ulp_start = 0;
453 			hw->ulp_max   = 0;
454 		} else {
455 			hw->ulp_start = 1;
456 			hw->ulp_max   = 1;
457 		}
458 	}
459 	ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
460 		hw->ulp_start, hw->ulp_max);
461 	hw->config.queue_topology = hw_global.queue_topology_string;
462 
463 	hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
464 
465 	hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
466 	hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
467 	hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
468 	hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
469 	hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
470 
471 	/* Verify qtop configuration against driver supported configuration */
472 	if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
473 		ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
474 				OCE_HW_MAX_NUM_MRQ_PAIRS);
475 		return OCS_HW_RTN_ERROR;
476 	}
477 
478 	if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
479 		ocs_log_crit(hw->os, "Max supported EQs = %d\n",
480 				OCS_HW_MAX_NUM_EQ);
481 		return OCS_HW_RTN_ERROR;
482 	}
483 
484 	if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
485 		ocs_log_crit(hw->os, "Max supported CQs = %d\n",
486 				OCS_HW_MAX_NUM_CQ);
487 		return OCS_HW_RTN_ERROR;
488 	}
489 
490 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
491 		ocs_log_crit(hw->os, "Max supported WQs = %d\n",
492 				OCS_HW_MAX_NUM_WQ);
493 		return OCS_HW_RTN_ERROR;
494 	}
495 
496 	if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
497 		ocs_log_crit(hw->os, "Max supported MQs = %d\n",
498 				OCS_HW_MAX_NUM_MQ);
499 		return OCS_HW_RTN_ERROR;
500 	}
501 
502 	return OCS_HW_RTN_SUCCESS;
503 }
504 
505 /**
506  * @ingroup devInitShutdown
507  * @brief Allocate memory structures to prepare for the device operation.
508  *
509  * @par Description
510  * Allocates memory structures needed by the device and prepares the device
511  * for operation.
512  * @n @n @b Note: This function may be called more than once (for example, at
513  * initialization and then after a reset), but the size of the internal resources
514  * may not be changed without tearing down the HW (ocs_hw_teardown()).
515  *
516  * @param hw Hardware context allocated by the caller.
517  *
518  * @return Returns 0 on success, or a non-zero value on failure.
519  */
520 ocs_hw_rtn_e
521 ocs_hw_init(ocs_hw_t *hw)
522 {
523 	ocs_hw_rtn_e	rc;
524 	uint32_t	i = 0;
525 	uint8_t		buf[SLI4_BMBX_SIZE];
526 	uint32_t	max_rpi;
527 	int		rem_count;
528 	int	        written_size = 0;
529 	uint32_t	count;
530 	char		prop_buf[32];
531 	uint32_t ramdisc_blocksize = 512;
532 	uint32_t q_count = 0;
533 	/*
534 	 * Make sure the command lists are empty. If this is start-of-day,
535 	 * they'll be empty since they were just initialized in ocs_hw_setup.
536 	 * If we've just gone through a reset, the command and command pending
537 	 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
538 	 */
539 	ocs_lock(&hw->cmd_lock);
540 		if (!ocs_list_empty(&hw->cmd_head)) {
541 			ocs_log_test(hw->os, "command found on cmd list\n");
542 			ocs_unlock(&hw->cmd_lock);
543 			return OCS_HW_RTN_ERROR;
544 		}
545 		if (!ocs_list_empty(&hw->cmd_pending)) {
546 			ocs_log_test(hw->os, "command found on pending list\n");
547 			ocs_unlock(&hw->cmd_lock);
548 			return OCS_HW_RTN_ERROR;
549 		}
550 	ocs_unlock(&hw->cmd_lock);
551 
552 	/* Free RQ buffers if prevously allocated */
553 	ocs_hw_rx_free(hw);
554 
555 	/*
556 	 * The IO queues must be initialized here for the reset case. The
557 	 * ocs_hw_init_io() function will re-add the IOs to the free list.
558 	 * The cmd_head list should be OK since we free all entries in
559 	 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
560 	 */
561 
562 	/* If we are in this function due to a reset, there may be stale items
563 	 * on lists that need to be removed.  Clean them up.
564 	 */
565 	rem_count=0;
566 	if (ocs_list_valid(&hw->io_wait_free)) {
567 		while ((!ocs_list_empty(&hw->io_wait_free))) {
568 			rem_count++;
569 			ocs_list_remove_head(&hw->io_wait_free);
570 		}
571 		if (rem_count > 0) {
572 			ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
573 		}
574 	}
575 	rem_count=0;
576 	if (ocs_list_valid(&hw->io_inuse)) {
577 		while ((!ocs_list_empty(&hw->io_inuse))) {
578 			rem_count++;
579 			ocs_list_remove_head(&hw->io_inuse);
580 		}
581 		if (rem_count > 0) {
582 			ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
583 		}
584 	}
585 	rem_count=0;
586 	if (ocs_list_valid(&hw->io_free)) {
587 		while ((!ocs_list_empty(&hw->io_free))) {
588 			rem_count++;
589 			ocs_list_remove_head(&hw->io_free);
590 		}
591 		if (rem_count > 0) {
592 			ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
593 		}
594 	}
595 	if (ocs_list_valid(&hw->io_port_owned)) {
596 		while ((!ocs_list_empty(&hw->io_port_owned))) {
597 			ocs_list_remove_head(&hw->io_port_owned);
598 		}
599 	}
600 	ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
601 	ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
602 	ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
603 	ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
604 	ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
605 	ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
606 
607 	/* If MRQ not required, Make sure we dont request feature. */
608 	if (hw->config.n_rq == 1) {
609 		hw->sli.config.features.flag.mrqp = FALSE;
610 	}
611 
612 	if (sli_init(&hw->sli)) {
613 		ocs_log_err(hw->os, "SLI failed to initialize\n");
614 		return OCS_HW_RTN_ERROR;
615 	}
616 
617 	/*
618 	 * Enable the auto xfer rdy feature if requested.
619 	 */
620 	hw->auto_xfer_rdy_enabled = FALSE;
621 	if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
622 	    hw->config.auto_xfer_rdy_size > 0) {
623 		if (hw->config.esoc){
624 			if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
625 				ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
626 			}
627 			written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
628 		} else {
629 			written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
630 		}
631 		if (written_size) {
632 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
633 			if (rc != OCS_HW_RTN_SUCCESS) {
634 				ocs_log_err(hw->os, "config auto xfer rdy failed\n");
635 				return rc;
636 			}
637 		}
638 		hw->auto_xfer_rdy_enabled = TRUE;
639 
640 		if (hw->config.auto_xfer_rdy_t10_enable) {
641 			rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
642 			if (rc != OCS_HW_RTN_SUCCESS) {
643 				ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
644 				return rc;
645 			}
646 		}
647 	}
648 
649 	if(hw->sliport_healthcheck) {
650 		rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
651 		if (rc != OCS_HW_RTN_SUCCESS) {
652 			ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
653 			return rc;
654 		}
655 	}
656 
657 	/*
658 	 * Set FDT transfer hint, only works on Lancer
659 	 */
660 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
661 		/*
662 		 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
663 		 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
664 		 */
665 		ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
666 	}
667 
668 	/*
669 	 * Verify that we have not exceeded any queue sizes
670 	 */
671 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
672 					OCS_HW_MAX_NUM_EQ);
673 	if (hw->config.n_eq > q_count) {
674 		ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
675 			    hw->config.n_eq, q_count);
676 		return OCS_HW_RTN_ERROR;
677 	}
678 
679 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
680 					OCS_HW_MAX_NUM_CQ);
681 	if (hw->config.n_cq > q_count) {
682 		ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
683 			    hw->config.n_cq, q_count);
684 		return OCS_HW_RTN_ERROR;
685 	}
686 
687 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
688 					OCS_HW_MAX_NUM_MQ);
689 	if (hw->config.n_mq > q_count) {
690 		ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
691 			    hw->config.n_mq, q_count);
692 		return OCS_HW_RTN_ERROR;
693 	}
694 
695 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
696 					OCS_HW_MAX_NUM_RQ);
697 	if (hw->config.n_rq > q_count) {
698 		ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
699 			    hw->config.n_rq, q_count);
700 		return OCS_HW_RTN_ERROR;
701 	}
702 
703 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
704 					OCS_HW_MAX_NUM_WQ);
705 	if (hw->config.n_wq > q_count) {
706 		ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
707 			    hw->config.n_wq, q_count);
708 		return OCS_HW_RTN_ERROR;
709 	}
710 
711 	/* zero the hashes */
712 	ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
713 	ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
714 			OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
715 
716 	ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
717 	ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
718 			OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
719 
720 	ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
721 	ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
722 			OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
723 
724 	rc = ocs_hw_init_queues(hw, hw->qtop);
725 	if (rc != OCS_HW_RTN_SUCCESS) {
726 		return rc;
727 	}
728 
729 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
730 	i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
731 	if (i) {
732 		ocs_dma_t payload_memory;
733 
734 		rc = OCS_HW_RTN_ERROR;
735 
736 		if (hw->rnode_mem.size) {
737 			ocs_dma_free(hw->os, &hw->rnode_mem);
738 		}
739 
740 		if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
741 			ocs_log_err(hw->os, "remote node memory allocation fail\n");
742 			return OCS_HW_RTN_NO_MEMORY;
743 		}
744 
745 		payload_memory.size = 0;
746 		if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
747 					&hw->rnode_mem, UINT16_MAX, &payload_memory)) {
748 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
749 
750 			if (payload_memory.size != 0) {
751 				/* The command was non-embedded - need to free the dma buffer */
752 				ocs_dma_free(hw->os, &payload_memory);
753 			}
754 		}
755 
756 		if (rc != OCS_HW_RTN_SUCCESS) {
757 			ocs_log_err(hw->os, "header template registration failed\n");
758 			return rc;
759 		}
760 	}
761 
762 	/* Allocate and post RQ buffers */
763 	rc = ocs_hw_rx_allocate(hw);
764 	if (rc) {
765 		ocs_log_err(hw->os, "rx_allocate failed\n");
766 		return rc;
767 	}
768 
769 	/* Populate hw->seq_free_list */
770 	if (hw->seq_pool == NULL) {
771 		uint32_t count = 0;
772 		uint32_t i;
773 
774 		/* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
775 		for (i = 0; i < hw->hw_rq_count; i++) {
776 			count += hw->hw_rq[i]->entry_count;
777 		}
778 
779 		hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
780 		if (hw->seq_pool == NULL) {
781 			ocs_log_err(hw->os, "malloc seq_pool failed\n");
782 			return OCS_HW_RTN_NO_MEMORY;
783 		}
784 	}
785 
786 	if(ocs_hw_rx_post(hw)) {
787 		ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
788 	}
789 
790 	/* Allocate rpi_ref if not previously allocated */
791 	if (hw->rpi_ref == NULL) {
792 		hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
793 					  OCS_M_ZERO | OCS_M_NOWAIT);
794 		if (hw->rpi_ref == NULL) {
795 			ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
796 			return OCS_HW_RTN_NO_MEMORY;
797 		}
798 	}
799 
800 	for (i = 0; i < max_rpi; i ++) {
801 		ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
802 		ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
803 	}
804 
805 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
806 
807 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
808 	if (hw->workaround.override_fcfi) {
809 		hw->first_domain_idx = -1;
810 	}
811 
812 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
813 
814 	/* Register a FCFI to allow unsolicited frames to be routed to the driver */
815 	if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
816 		if (hw->hw_mrq_count) {
817 			ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
818 
819 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
820 			if (rc != OCS_HW_RTN_SUCCESS) {
821 				ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
822 				return rc;
823 			}
824 
825 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
826 			if (rc != OCS_HW_RTN_SUCCESS) {
827 				ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
828 				return rc;
829 			}
830 		} else {
831 			sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
832 
833 			ocs_log_debug(hw->os, "using REG_FCFI standard\n");
834 
835 			/* Set the filter match/mask values from hw's filter_def values */
836 			for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
837 				rq_cfg[i].rq_id = 0xffff;
838 				rq_cfg[i].r_ctl_mask =	(uint8_t)  hw->config.filter_def[i];
839 				rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
840 				rq_cfg[i].type_mask =	(uint8_t) (hw->config.filter_def[i] >> 16);
841 				rq_cfg[i].type_match =	(uint8_t) (hw->config.filter_def[i] >> 24);
842 			}
843 
844 			/*
845 			 * Update the rq_id's of the FCF configuration (don't update more than the number
846 			 * of rq_cfg elements)
847 			 */
848 			for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
849 				hw_rq_t *rq = hw->hw_rq[i];
850 				uint32_t j;
851 				for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
852 					uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
853 					if (mask & (1U << j)) {
854 						rq_cfg[j].rq_id = rq->hdr->id;
855 						ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
856 							j, hw->config.filter_def[j], i, rq->hdr->id);
857 					}
858 				}
859 			}
860 
861 			rc = OCS_HW_RTN_ERROR;
862 
863 			if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
864 				rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
865 			}
866 
867 			if (rc != OCS_HW_RTN_SUCCESS) {
868 				ocs_log_err(hw->os, "FCFI registration failed\n");
869 				return rc;
870 			}
871 			hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
872 		}
873 	}
874 
875 	/*
876 	 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
877 	 * thus the pool allocation size of 64k)
878 	 */
879 	rc = ocs_hw_reqtag_init(hw);
880 	if (rc) {
881 		ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
882 		return rc;
883 	}
884 
885 	rc = ocs_hw_setup_io(hw);
886 	if (rc) {
887 		ocs_log_err(hw->os, "IO allocation failure\n");
888 		return rc;
889 	}
890 
891 	rc = ocs_hw_init_io(hw);
892 	if (rc) {
893 		ocs_log_err(hw->os, "IO initialization failure\n");
894 		return rc;
895 	}
896 
897 	ocs_queue_history_init(hw->os, &hw->q_hist);
898 
899 	/* get hw link config; polling, so callback will be called immediately */
900 	hw->linkcfg = OCS_HW_LINKCFG_NA;
901 	ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
902 
903 	/* if lancer ethernet, ethernet ports need to be enabled */
904 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
905 	    (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
906 		if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
907 			/* log warning but continue */
908 			ocs_log_err(hw->os, "Failed to set ethernet license\n");
909 		}
910 	}
911 
912 	/* Set the DIF seed - only for lancer right now */
913 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
914 	    ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
915 		ocs_log_err(hw->os, "Failed to set DIF seed value\n");
916 		return rc;
917 	}
918 
919 	/* Set the DIF mode - skyhawk only */
920 	if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
921 	    sli_get_dif_capable(&hw->sli)) {
922 		rc = ocs_hw_set_dif_mode(hw);
923 		if (rc != OCS_HW_RTN_SUCCESS) {
924 			ocs_log_err(hw->os, "Failed to set DIF mode value\n");
925 			return rc;
926 		}
927 	}
928 
929 	/*
930 	 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
931 	 */
932 	for (i = 0; i < hw->eq_count; i++) {
933 		sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
934 	}
935 
936 	/*
937 	 * Initialize RQ hash
938 	 */
939 	for (i = 0; i < hw->rq_count; i++) {
940 		ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
941 	}
942 
943 	/*
944 	 * Initialize WQ hash
945 	 */
946 	for (i = 0; i < hw->wq_count; i++) {
947 		ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
948 	}
949 
950 	/*
951 	 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
952 	 */
953 	for (i = 0; i < hw->cq_count; i++) {
954 		ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
955 		sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
956 	}
957 
958 	/* record the fact that the queues are functional */
959 	hw->state = OCS_HW_STATE_ACTIVE;
960 
961 	/* Note: Must be after the IOs are setup and the state is active*/
962 	if (ocs_hw_rqpair_init(hw)) {
963 		ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
964 	}
965 
966 	/* finally kick off periodic timer to check for timed out target WQEs */
967 	if (hw->config.emulate_tgt_wqe_timeout) {
968 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
969 				OCS_HW_WQ_TIMER_PERIOD_MS);
970 	}
971 
972 	/*
973 	 * Allocate a HW IOs for send frame.  Allocate one for each Class 1 WQ, or if there
974 	 * are none of those, allocate one for WQ[0]
975 	 */
976 	if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
977 		for (i = 0; i < count; i++) {
978 			hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
979 			wq->send_frame_io = ocs_hw_io_alloc(hw);
980 			if (wq->send_frame_io == NULL) {
981 				ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
982 			}
983 		}
984 	} else {
985 		hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
986 		if (hw->hw_wq[0]->send_frame_io == NULL) {
987 			ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
988 		}
989 	}
990 
991 	/* Initialize send frame frame sequence id */
992 	ocs_atomic_init(&hw->send_frame_seq_id, 0);
993 
994 	/* Initialize watchdog timer if enabled by user */
995 	hw->expiration_logged = 0;
996 	if(hw->watchdog_timeout) {
997 		if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
998 			ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
999 		}else if(!ocs_hw_config_watchdog_timer(hw)) {
1000 			ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1001 		}
1002 	}
1003 
1004 	if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1005 	   ocs_log_err(hw->os, "domain node memory allocation fail\n");
1006 	   return OCS_HW_RTN_NO_MEMORY;
1007 	}
1008 
1009 	if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1010 	   ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1011 	   return OCS_HW_RTN_NO_MEMORY;
1012 	}
1013 
1014 	if ((0 == hw->loop_map.size) &&	ocs_dma_alloc(hw->os, &hw->loop_map,
1015 				SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1016 		ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1017 	}
1018 
1019 	return OCS_HW_RTN_SUCCESS;
1020 }
1021 
1022 /**
1023  * @brief Configure Multi-RQ
1024  *
1025  * @param hw	Hardware context allocated by the caller.
1026  * @param mode	1 to set MRQ filters and 0 to set FCFI index
1027  * @param vlanid    valid in mode 0
1028  * @param fcf_index valid in mode 0
1029  *
1030  * @return Returns 0 on success, or a non-zero value on failure.
1031  */
1032 static int32_t
1033 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1034 {
1035 	uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1036 	hw_rq_t *rq;
1037 	sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1038 	uint32_t i, j;
1039 	sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1040 	int32_t rc;
1041 
1042 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1043 		goto issue_cmd;
1044 	}
1045 
1046 	/* Set the filter match/mask values from hw's filter_def values */
1047 	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1048 		rq_filter[i].rq_id = 0xffff;
1049 		rq_filter[i].r_ctl_mask  = (uint8_t)  hw->config.filter_def[i];
1050 		rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1051 		rq_filter[i].type_mask   = (uint8_t) (hw->config.filter_def[i] >> 16);
1052 		rq_filter[i].type_match  = (uint8_t) (hw->config.filter_def[i] >> 24);
1053 	}
1054 
1055 	/* Accumulate counts for each filter type used, build rq_ids[] list */
1056 	for (i = 0; i < hw->hw_rq_count; i++) {
1057 		rq = hw->hw_rq[i];
1058 		for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1059 			if (rq->filter_mask & (1U << j)) {
1060 				if (rq_filter[j].rq_id != 0xffff) {
1061 					/* Already used. Bailout ifts not RQset case */
1062 					if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1063 						ocs_log_err(hw->os, "Wrong queue topology.\n");
1064 						return OCS_HW_RTN_ERROR;
1065 					}
1066 					continue;
1067 				}
1068 
1069 				if (rq->is_mrq) {
1070 					rq_filter[j].rq_id = rq->base_mrq_id;
1071 					mrq_bitmask |= (1U << j);
1072 				} else {
1073 					rq_filter[j].rq_id = rq->hdr->id;
1074 				}
1075 			}
1076 		}
1077 	}
1078 
1079 issue_cmd:
1080 	/* Invoke REG_FCFI_MRQ */
1081 	rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1082 				 buf,					/* buf */
1083 				 SLI4_BMBX_SIZE,			/* size */
1084 				 mode,					/* mode 1 */
1085 				 fcf_index,				/* fcf_index */
1086 				 vlanid,				/* vlan_id */
1087 				 hw->config.rq_selection_policy,	/* RQ selection policy*/
1088 				 mrq_bitmask,				/* MRQ bitmask */
1089 				 hw->hw_mrq_count,			/* num_mrqs */
1090 				 rq_filter);				/* RQ filter */
1091 	if (rc == 0) {
1092 		ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1093 		return OCS_HW_RTN_ERROR;
1094 	}
1095 
1096 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1097 
1098 	rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1099 
1100 	if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1101 		ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1102 			    rsp->hdr.command, rsp->hdr.status);
1103 		return OCS_HW_RTN_ERROR;
1104 	}
1105 
1106 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1107 		hw->fcf_indicator = rsp->fcfi;
1108 	}
1109 	return 0;
1110 }
1111 
1112 /**
1113  * @brief Callback function for getting linkcfg during HW initialization.
1114  *
1115  * @param status Status of the linkcfg get operation.
1116  * @param value Link configuration enum to which the link configuration is set.
1117  * @param arg Callback argument (ocs_hw_t *).
1118  *
1119  * @return None.
1120  */
1121 static void
1122 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1123 {
1124 	ocs_hw_t *hw = (ocs_hw_t *)arg;
1125 	if (status == 0) {
1126 		hw->linkcfg = (ocs_hw_linkcfg_e)value;
1127 	} else {
1128 		hw->linkcfg = OCS_HW_LINKCFG_NA;
1129 	}
1130 	ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1131 }
1132 
1133 /**
1134  * @ingroup devInitShutdown
1135  * @brief Tear down the Hardware Abstraction Layer module.
1136  *
1137  * @par Description
1138  * Frees memory structures needed by the device, and shuts down the device. Does
1139  * not free the HW context memory (which is done by the caller).
1140  *
1141  * @param hw Hardware context allocated by the caller.
1142  *
1143  * @return Returns 0 on success, or a non-zero value on failure.
1144  */
1145 ocs_hw_rtn_e
1146 ocs_hw_teardown(ocs_hw_t *hw)
1147 {
1148 	uint32_t	i = 0;
1149 	uint32_t	iters = 10;/*XXX*/
1150 	uint32_t	max_rpi;
1151 	uint32_t destroy_queues;
1152 	uint32_t free_memory;
1153 
1154 	if (!hw) {
1155 		ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1156 		return OCS_HW_RTN_ERROR;
1157 	}
1158 
1159 	destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1160 	free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1161 
1162 	/* shutdown target wqe timer */
1163 	shutdown_target_wqe_timer(hw);
1164 
1165 	/* Cancel watchdog timer if enabled */
1166 	if(hw->watchdog_timeout) {
1167 		hw->watchdog_timeout = 0;
1168 		ocs_hw_config_watchdog_timer(hw);
1169 	}
1170 
1171 	/* Cancel Sliport Healthcheck */
1172 	if(hw->sliport_healthcheck) {
1173 		hw->sliport_healthcheck = 0;
1174 		ocs_hw_config_sli_port_health_check(hw, 0, 0);
1175 	}
1176 
1177 	if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1178 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1179 
1180 		ocs_hw_flush(hw);
1181 
1182 		/* If there are outstanding commands, wait for them to complete */
1183 		while (!ocs_list_empty(&hw->cmd_head) && iters) {
1184 			ocs_udelay(10000);
1185 			ocs_hw_flush(hw);
1186 			iters--;
1187 		}
1188 
1189 		if (ocs_list_empty(&hw->cmd_head)) {
1190 			ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1191 		} else {
1192 			ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1193 		}
1194 
1195 		/* Cancel any remaining commands */
1196 		ocs_hw_command_cancel(hw);
1197 	} else {
1198 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1199 	}
1200 
1201 	ocs_lock_free(&hw->cmd_lock);
1202 
1203 	/* Free unregistered RPI if workaround is in force */
1204 	if (hw->workaround.use_unregistered_rpi) {
1205 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1206 	}
1207 
1208 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1209 	if (hw->rpi_ref) {
1210 		for (i = 0; i < max_rpi; i++) {
1211 			if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1212 				ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1213 						i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1214 			}
1215 		}
1216 		ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1217 		hw->rpi_ref = NULL;
1218 	}
1219 
1220 	ocs_dma_free(hw->os, &hw->rnode_mem);
1221 
1222 	if (hw->io) {
1223 		for (i = 0; i < hw->config.n_io; i++) {
1224 			if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1225 			    (hw->io[i]->sgl->virt != NULL)) {
1226 				if(hw->io[i]->is_port_owned) {
1227 					ocs_lock_free(&hw->io[i]->axr_lock);
1228 				}
1229 				ocs_dma_free(hw->os, hw->io[i]->sgl);
1230 			}
1231 			ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1232 			hw->io[i] = NULL;
1233 		}
1234 		ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1235 		hw->wqe_buffs = NULL;
1236 		ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1237 		hw->io = NULL;
1238 	}
1239 
1240 	ocs_dma_free(hw->os, &hw->xfer_rdy);
1241 	ocs_dma_free(hw->os, &hw->dump_sges);
1242 	ocs_dma_free(hw->os, &hw->loop_map);
1243 
1244 	ocs_lock_free(&hw->io_lock);
1245 	ocs_lock_free(&hw->io_abort_lock);
1246 
1247 	for (i = 0; i < hw->wq_count; i++) {
1248 		sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1249 	}
1250 
1251 	for (i = 0; i < hw->rq_count; i++) {
1252 		sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1253 	}
1254 
1255 	for (i = 0; i < hw->mq_count; i++) {
1256 		sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1257 	}
1258 
1259 	for (i = 0; i < hw->cq_count; i++) {
1260 		sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1261 	}
1262 
1263 	for (i = 0; i < hw->eq_count; i++) {
1264 		sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1265 	}
1266 
1267 	ocs_hw_qtop_free(hw->qtop);
1268 
1269 	/* Free rq buffers */
1270 	ocs_hw_rx_free(hw);
1271 
1272 	hw_queue_teardown(hw);
1273 
1274 	ocs_hw_rqpair_teardown(hw);
1275 
1276 	if (sli_teardown(&hw->sli)) {
1277 		ocs_log_err(hw->os, "SLI teardown failed\n");
1278 	}
1279 
1280 	ocs_queue_history_free(&hw->q_hist);
1281 
1282 	/* record the fact that the queues are non-functional */
1283 	hw->state = OCS_HW_STATE_UNINITIALIZED;
1284 
1285 	/* free sequence free pool */
1286 	ocs_array_free(hw->seq_pool);
1287 	hw->seq_pool = NULL;
1288 
1289 	/* free hw_wq_callback pool */
1290 	ocs_pool_free(hw->wq_reqtag_pool);
1291 
1292 	ocs_dma_free(hw->os, &hw->domain_dmem);
1293 	ocs_dma_free(hw->os, &hw->fcf_dmem);
1294 	/* Mark HW setup as not having been called */
1295 	hw->hw_setup_called = FALSE;
1296 
1297 	return OCS_HW_RTN_SUCCESS;
1298 }
1299 
1300 ocs_hw_rtn_e
1301 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1302 {
1303 	uint32_t	i;
1304 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1305 	uint32_t	iters;
1306 	ocs_hw_state_e prev_state = hw->state;
1307 
1308 	if (hw->state != OCS_HW_STATE_ACTIVE) {
1309 		ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1310 	}
1311 
1312 	hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1313 
1314 	/* shutdown target wqe timer */
1315 	shutdown_target_wqe_timer(hw);
1316 
1317 	ocs_hw_flush(hw);
1318 
1319 	/*
1320 	 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1321 	 * then the FW will UE when the reset is issued. So attempt to complete
1322 	 * all mailbox commands.
1323 	 */
1324 	iters = 10;
1325 	while (!ocs_list_empty(&hw->cmd_head) && iters) {
1326 		ocs_udelay(10000);
1327 		ocs_hw_flush(hw);
1328 		iters--;
1329 	}
1330 
1331 	if (ocs_list_empty(&hw->cmd_head)) {
1332 		ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1333 	} else {
1334 		ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1335 	}
1336 
1337 	/* Reset the chip */
1338 	switch(reset) {
1339 	case OCS_HW_RESET_FUNCTION:
1340 		ocs_log_debug(hw->os, "issuing function level reset\n");
1341 		if (sli_reset(&hw->sli)) {
1342 			ocs_log_err(hw->os, "sli_reset failed\n");
1343 			rc = OCS_HW_RTN_ERROR;
1344 		}
1345 		break;
1346 	case OCS_HW_RESET_FIRMWARE:
1347 		ocs_log_debug(hw->os, "issuing firmware reset\n");
1348 		if (sli_fw_reset(&hw->sli)) {
1349 			ocs_log_err(hw->os, "sli_soft_reset failed\n");
1350 			rc = OCS_HW_RTN_ERROR;
1351 		}
1352 		/*
1353 		 * Because the FW reset leaves the FW in a non-running state,
1354 		 * follow that with a regular reset.
1355 		 */
1356 		ocs_log_debug(hw->os, "issuing function level reset\n");
1357 		if (sli_reset(&hw->sli)) {
1358 			ocs_log_err(hw->os, "sli_reset failed\n");
1359 			rc = OCS_HW_RTN_ERROR;
1360 		}
1361 		break;
1362 	default:
1363 		ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1364 		hw->state = prev_state;
1365 		return OCS_HW_RTN_ERROR;
1366 	}
1367 
1368 	/* Not safe to walk command/io lists unless they've been initialized */
1369 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1370 		ocs_hw_command_cancel(hw);
1371 
1372 		/* Clean up the inuse list, the free list and the wait free list */
1373 		ocs_hw_io_cancel(hw);
1374 
1375 		ocs_memset(hw->domains, 0, sizeof(hw->domains));
1376 		ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1377 
1378 		ocs_hw_link_event_init(hw);
1379 
1380 		ocs_lock(&hw->io_lock);
1381 			/* The io lists should be empty, but remove any that didn't get cleaned up. */
1382 			while (!ocs_list_empty(&hw->io_timed_wqe)) {
1383 				ocs_list_remove_head(&hw->io_timed_wqe);
1384 			}
1385 			/* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1386 
1387 			while (!ocs_list_empty(&hw->io_free)) {
1388 				ocs_list_remove_head(&hw->io_free);
1389 			}
1390 			while (!ocs_list_empty(&hw->io_wait_free)) {
1391 				ocs_list_remove_head(&hw->io_wait_free);
1392 			}
1393 
1394 			/* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1395 			ocs_hw_reqtag_reset(hw);
1396 
1397 		ocs_unlock(&hw->io_lock);
1398 	}
1399 
1400 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1401 		for (i = 0; i < hw->wq_count; i++) {
1402 			sli_queue_reset(&hw->sli, &hw->wq[i]);
1403 		}
1404 
1405 		for (i = 0; i < hw->rq_count; i++) {
1406 			sli_queue_reset(&hw->sli, &hw->rq[i]);
1407 		}
1408 
1409 		for (i = 0; i < hw->hw_rq_count; i++) {
1410 			hw_rq_t *rq = hw->hw_rq[i];
1411 			if (rq->rq_tracker != NULL) {
1412 				uint32_t j;
1413 
1414 				for (j = 0; j < rq->entry_count; j++) {
1415 					rq->rq_tracker[j] = NULL;
1416 				}
1417 			}
1418 		}
1419 
1420 		for (i = 0; i < hw->mq_count; i++) {
1421 			sli_queue_reset(&hw->sli, &hw->mq[i]);
1422 		}
1423 
1424 		for (i = 0; i < hw->cq_count; i++) {
1425 			sli_queue_reset(&hw->sli, &hw->cq[i]);
1426 		}
1427 
1428 		for (i = 0; i < hw->eq_count; i++) {
1429 			sli_queue_reset(&hw->sli, &hw->eq[i]);
1430 		}
1431 
1432 		/* Free rq buffers */
1433 		ocs_hw_rx_free(hw);
1434 
1435 		/* Teardown the HW queue topology */
1436 		hw_queue_teardown(hw);
1437 	} else {
1438 		/* Free rq buffers */
1439 		ocs_hw_rx_free(hw);
1440 	}
1441 
1442 	/*
1443 	 * Re-apply the run-time workarounds after clearing the SLI config
1444 	 * fields in sli_reset.
1445 	 */
1446 	ocs_hw_workaround_setup(hw);
1447 	hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1448 
1449 	return rc;
1450 }
1451 
1452 int32_t
1453 ocs_hw_get_num_eq(ocs_hw_t *hw)
1454 {
1455 	return hw->eq_count;
1456 }
1457 
1458 static int32_t
1459 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1460 {
1461 	/* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1462 	* No further explanation is given in the document.
1463 	* */
1464 	return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1465 		sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1466 }
1467 
1468 ocs_hw_rtn_e
1469 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1470 {
1471 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1472 	int32_t			tmp;
1473 
1474 	if (!value) {
1475 		return OCS_HW_RTN_ERROR;
1476 	}
1477 
1478 	*value = 0;
1479 
1480 	switch (prop) {
1481 	case OCS_HW_N_IO:
1482 		*value = hw->config.n_io;
1483 		break;
1484 	case OCS_HW_N_SGL:
1485 		*value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1486 		break;
1487 	case OCS_HW_MAX_IO:
1488 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1489 		break;
1490 	case OCS_HW_MAX_NODES:
1491 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1492 		break;
1493 	case OCS_HW_MAX_RQ_ENTRIES:
1494 		*value = hw->num_qentries[SLI_QTYPE_RQ];
1495 		break;
1496 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1497 		*value = hw->config.rq_default_buffer_size;
1498 		break;
1499 	case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1500 		*value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1501 		break;
1502 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1503 		*value = hw->config.auto_xfer_rdy_xri_cnt;
1504 		break;
1505 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1506 		*value = hw->config.auto_xfer_rdy_size;
1507 		break;
1508 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1509 		switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1510 		case 0:
1511 			*value = 512;
1512 			break;
1513 		case 1:
1514 			*value = 1024;
1515 			break;
1516 		case 2:
1517 			*value = 2048;
1518 			break;
1519 		case 3:
1520 			*value = 4096;
1521 			break;
1522 		case 4:
1523 			*value = 520;
1524 			break;
1525 		default:
1526 			*value = 0;
1527 			rc = OCS_HW_RTN_ERROR;
1528 			break;
1529 		}
1530 		break;
1531 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1532 		*value = hw->config.auto_xfer_rdy_t10_enable;
1533 		break;
1534 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1535 		*value = hw->config.auto_xfer_rdy_p_type;
1536 		break;
1537 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1538 		*value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1539 		break;
1540 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1541 		*value = hw->config.auto_xfer_rdy_app_tag_valid;
1542 		break;
1543 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1544 		*value = hw->config.auto_xfer_rdy_app_tag_value;
1545 		break;
1546 	case OCS_HW_MAX_SGE:
1547 		*value = sli_get_max_sge(&hw->sli);
1548 		break;
1549 	case OCS_HW_MAX_SGL:
1550 		*value = sli_get_max_sgl(&hw->sli);
1551 		break;
1552 	case OCS_HW_TOPOLOGY:
1553 		/*
1554 		 * Infer link.status based on link.speed.
1555 		 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1556 		 */
1557 		if (hw->link.speed == 0) {
1558 			*value = OCS_HW_TOPOLOGY_NONE;
1559 			break;
1560 		}
1561 		switch (hw->link.topology) {
1562 		case SLI_LINK_TOPO_NPORT:
1563 			*value = OCS_HW_TOPOLOGY_NPORT;
1564 			break;
1565 		case SLI_LINK_TOPO_LOOP:
1566 			*value = OCS_HW_TOPOLOGY_LOOP;
1567 			break;
1568 		case SLI_LINK_TOPO_NONE:
1569 			*value = OCS_HW_TOPOLOGY_NONE;
1570 			break;
1571 		default:
1572 			ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1573 			rc = OCS_HW_RTN_ERROR;
1574 			break;
1575 		}
1576 		break;
1577 	case OCS_HW_CONFIG_TOPOLOGY:
1578 		*value = hw->config.topology;
1579 		break;
1580 	case OCS_HW_LINK_SPEED:
1581 		*value = hw->link.speed;
1582 		break;
1583 	case OCS_HW_LINK_CONFIG_SPEED:
1584 		switch (hw->config.speed) {
1585 		case FC_LINK_SPEED_10G:
1586 			*value = 10000;
1587 			break;
1588 		case FC_LINK_SPEED_AUTO_16_8_4:
1589 			*value = 0;
1590 			break;
1591 		case FC_LINK_SPEED_2G:
1592 			*value = 2000;
1593 			break;
1594 		case FC_LINK_SPEED_4G:
1595 			*value = 4000;
1596 			break;
1597 		case FC_LINK_SPEED_8G:
1598 			*value = 8000;
1599 			break;
1600 		case FC_LINK_SPEED_16G:
1601 			*value = 16000;
1602 			break;
1603 		case FC_LINK_SPEED_32G:
1604 			*value = 32000;
1605 			break;
1606 		default:
1607 			ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1608 			rc = OCS_HW_RTN_ERROR;
1609 			break;
1610 		}
1611 		break;
1612 	case OCS_HW_IF_TYPE:
1613 		*value = sli_get_if_type(&hw->sli);
1614 		break;
1615 	case OCS_HW_SLI_REV:
1616 		*value = sli_get_sli_rev(&hw->sli);
1617 		break;
1618 	case OCS_HW_SLI_FAMILY:
1619 		*value = sli_get_sli_family(&hw->sli);
1620 		break;
1621 	case OCS_HW_DIF_CAPABLE:
1622 		*value = sli_get_dif_capable(&hw->sli);
1623 		break;
1624 	case OCS_HW_DIF_SEED:
1625 		*value = hw->config.dif_seed;
1626 		break;
1627 	case OCS_HW_DIF_MODE:
1628 		*value = hw->config.dif_mode;
1629 		break;
1630 	case OCS_HW_DIF_MULTI_SEPARATE:
1631 		/* Lancer supports multiple DIF separates */
1632 		if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1633 			*value = TRUE;
1634 		} else {
1635 			*value = FALSE;
1636 		}
1637 		break;
1638 	case OCS_HW_DUMP_MAX_SIZE:
1639 		*value = hw->dump_size;
1640 		break;
1641 	case OCS_HW_DUMP_READY:
1642 		*value = sli_dump_is_ready(&hw->sli);
1643 		break;
1644 	case OCS_HW_DUMP_PRESENT:
1645 		*value = sli_dump_is_present(&hw->sli);
1646 		break;
1647 	case OCS_HW_RESET_REQUIRED:
1648 		tmp = sli_reset_required(&hw->sli);
1649 		if(tmp < 0) {
1650 			rc = OCS_HW_RTN_ERROR;
1651 		} else {
1652 			*value = tmp;
1653 		}
1654 		break;
1655 	case OCS_HW_FW_ERROR:
1656 		*value = sli_fw_error_status(&hw->sli);
1657 		break;
1658 	case OCS_HW_FW_READY:
1659 		*value = sli_fw_ready(&hw->sli);
1660 		break;
1661 	case OCS_HW_FW_TIMED_OUT:
1662 		*value = ocs_hw_get_fw_timed_out(hw);
1663 		break;
1664 	case OCS_HW_HIGH_LOGIN_MODE:
1665 		*value = sli_get_hlm_capable(&hw->sli);
1666 		break;
1667 	case OCS_HW_PREREGISTER_SGL:
1668 		*value = sli_get_sgl_preregister_required(&hw->sli);
1669 		break;
1670 	case OCS_HW_HW_REV1:
1671 		*value = sli_get_hw_revision(&hw->sli, 0);
1672 		break;
1673 	case OCS_HW_HW_REV2:
1674 		*value = sli_get_hw_revision(&hw->sli, 1);
1675 		break;
1676 	case OCS_HW_HW_REV3:
1677 		*value = sli_get_hw_revision(&hw->sli, 2);
1678 		break;
1679 	case OCS_HW_LINKCFG:
1680 		*value = hw->linkcfg;
1681 		break;
1682 	case OCS_HW_ETH_LICENSE:
1683 		*value = hw->eth_license;
1684 		break;
1685 	case OCS_HW_LINK_MODULE_TYPE:
1686 		*value = sli_get_link_module_type(&hw->sli);
1687 		break;
1688 	case OCS_HW_NUM_CHUTES:
1689 		*value = ocs_hw_get_num_chutes(hw);
1690 		break;
1691 	case OCS_HW_DISABLE_AR_TGT_DIF:
1692 		*value = hw->workaround.disable_ar_tgt_dif;
1693 		break;
1694 	case OCS_HW_EMULATE_I_ONLY_AAB:
1695 		*value = hw->config.i_only_aab;
1696 		break;
1697 	case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1698 		*value = hw->config.emulate_tgt_wqe_timeout;
1699 		break;
1700 	case OCS_HW_VPD_LEN:
1701 		*value = sli_get_vpd_len(&hw->sli);
1702 		break;
1703 	case OCS_HW_SGL_CHAINING_CAPABLE:
1704 		*value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1705 		break;
1706 	case OCS_HW_SGL_CHAINING_ALLOWED:
1707 		/*
1708 		 * SGL Chaining is allowed in the following cases:
1709 		 *   1. Lancer with host SGL Lists
1710 		 *   2. Skyhawk with pre-registered SGL Lists
1711 		 */
1712 		*value = FALSE;
1713 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1714 		    !sli_get_sgl_preregister(&hw->sli) &&
1715 		    SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)) {
1716 			*value = TRUE;
1717 		}
1718 
1719 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1720 		    sli_get_sgl_preregister(&hw->sli) &&
1721 		    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1722 			(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1723 			*value = TRUE;
1724 		}
1725 		break;
1726 	case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1727 		/* Only lancer supports host allocated SGL Chaining buffers. */
1728 		*value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1729 			  (SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)));
1730 		break;
1731 	case OCS_HW_SEND_FRAME_CAPABLE:
1732 		if (hw->workaround.ignore_send_frame) {
1733 			*value = 0;
1734 		} else {
1735 			/* Only lancer is capable */
1736 			*value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1737 		}
1738 		break;
1739 	case OCS_HW_RQ_SELECTION_POLICY:
1740 		*value = hw->config.rq_selection_policy;
1741 		break;
1742 	case OCS_HW_RR_QUANTA:
1743 		*value = hw->config.rr_quanta;
1744 		break;
1745 	case OCS_HW_MAX_VPORTS:
1746 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1747 		break;
1748 	default:
1749 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1750 		rc = OCS_HW_RTN_ERROR;
1751 	}
1752 
1753 	return rc;
1754 }
1755 
1756 void *
1757 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1758 {
1759 	void	*rc = NULL;
1760 
1761 	switch (prop) {
1762 	case OCS_HW_WWN_NODE:
1763 		rc = sli_get_wwn_node(&hw->sli);
1764 		break;
1765 	case OCS_HW_WWN_PORT:
1766 		rc = sli_get_wwn_port(&hw->sli);
1767 		break;
1768 	case OCS_HW_VPD:
1769 		/* make sure VPD length is non-zero */
1770 		if (sli_get_vpd_len(&hw->sli)) {
1771 			rc = sli_get_vpd(&hw->sli);
1772 		}
1773 		break;
1774 	case OCS_HW_FW_REV:
1775 		rc = sli_get_fw_name(&hw->sli, 0);
1776 		break;
1777 	case OCS_HW_FW_REV2:
1778 		rc = sli_get_fw_name(&hw->sli, 1);
1779 		break;
1780 	case OCS_HW_IPL:
1781 		rc = sli_get_ipl_name(&hw->sli);
1782 		break;
1783 	case OCS_HW_PORTNUM:
1784 		rc = sli_get_portnum(&hw->sli);
1785 		break;
1786 	case OCS_HW_BIOS_VERSION_STRING:
1787 		rc = sli_get_bios_version_string(&hw->sli);
1788 		break;
1789 	default:
1790 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1791 	}
1792 
1793 	return rc;
1794 }
1795 
1796 ocs_hw_rtn_e
1797 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1798 {
1799 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1800 
1801 	switch (prop) {
1802 	case OCS_HW_N_IO:
1803 		if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1804 		    value == 0) {
1805 			ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1806 					value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1807 			rc = OCS_HW_RTN_ERROR;
1808 		} else {
1809 			hw->config.n_io = value;
1810 		}
1811 		break;
1812 	case OCS_HW_N_SGL:
1813 		value += SLI4_SGE_MAX_RESERVED;
1814 		if (value > sli_get_max_sgl(&hw->sli)) {
1815 			ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1816 					value, sli_get_max_sgl(&hw->sli));
1817 			rc = OCS_HW_RTN_ERROR;
1818 		} else {
1819 			hw->config.n_sgl = value;
1820 		}
1821 		break;
1822 	case OCS_HW_TOPOLOGY:
1823 		if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1824 				(value != OCS_HW_TOPOLOGY_AUTO)) {
1825 			ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1826 					value, sli_get_medium(&hw->sli));
1827 			rc = OCS_HW_RTN_ERROR;
1828 			break;
1829 		}
1830 
1831 		switch (value) {
1832 		case OCS_HW_TOPOLOGY_AUTO:
1833 			if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1834 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1835 			} else {
1836 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1837 			}
1838 			break;
1839 		case OCS_HW_TOPOLOGY_NPORT:
1840 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1841 			break;
1842 		case OCS_HW_TOPOLOGY_LOOP:
1843 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1844 			break;
1845 		default:
1846 			ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1847 			rc = OCS_HW_RTN_ERROR;
1848 		}
1849 		hw->config.topology = value;
1850 		break;
1851 	case OCS_HW_LINK_SPEED:
1852 		if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1853 			switch (value) {
1854 			case 0: 	/* Auto-speed negotiation */
1855 			case 10000:	/* FCoE speed */
1856 				hw->config.speed = FC_LINK_SPEED_10G;
1857 				break;
1858 			default:
1859 				ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1860 						value, sli_get_medium(&hw->sli));
1861 				rc = OCS_HW_RTN_ERROR;
1862 			}
1863 			break;
1864 		}
1865 
1866 		switch (value) {
1867 		case 0:		/* Auto-speed negotiation */
1868 			hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1869 			break;
1870 		case 2000:	/* FC speeds */
1871 			hw->config.speed = FC_LINK_SPEED_2G;
1872 			break;
1873 		case 4000:
1874 			hw->config.speed = FC_LINK_SPEED_4G;
1875 			break;
1876 		case 8000:
1877 			hw->config.speed = FC_LINK_SPEED_8G;
1878 			break;
1879 		case 16000:
1880 			hw->config.speed = FC_LINK_SPEED_16G;
1881 			break;
1882 		case 32000:
1883 			hw->config.speed = FC_LINK_SPEED_32G;
1884 			break;
1885 		default:
1886 			ocs_log_test(hw->os, "unsupported speed %d\n", value);
1887 			rc = OCS_HW_RTN_ERROR;
1888 		}
1889 		break;
1890 	case OCS_HW_DIF_SEED:
1891 		/* Set the DIF seed - only for lancer right now */
1892 		if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1893 			ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1894 			rc = OCS_HW_RTN_ERROR;
1895 		} else {
1896 			hw->config.dif_seed = value;
1897 		}
1898 		break;
1899 	case OCS_HW_DIF_MODE:
1900 		switch (value) {
1901 		case OCS_HW_DIF_MODE_INLINE:
1902 			/*
1903 			 *  Make sure we support inline DIF.
1904 			 *
1905 			 * Note: Having both bits clear means that we have old
1906 			 *	FW that doesn't set the bits.
1907 			 */
1908 			if (sli_is_dif_inline_capable(&hw->sli)) {
1909 				hw->config.dif_mode = value;
1910 			} else {
1911 				ocs_log_test(hw->os, "chip does not support DIF inline\n");
1912 				rc = OCS_HW_RTN_ERROR;
1913 			}
1914 			break;
1915 		case OCS_HW_DIF_MODE_SEPARATE:
1916 			/* Make sure we support DIF separates. */
1917 			if (sli_is_dif_separate_capable(&hw->sli)) {
1918 				hw->config.dif_mode = value;
1919 			} else {
1920 				ocs_log_test(hw->os, "chip does not support DIF separate\n");
1921 				rc = OCS_HW_RTN_ERROR;
1922 			}
1923 		}
1924 		break;
1925 	case OCS_HW_RQ_PROCESS_LIMIT: {
1926 		hw_rq_t *rq;
1927 		uint32_t i;
1928 
1929 		/* For each hw_rq object, set its parent CQ limit value */
1930 		for (i = 0; i < hw->hw_rq_count; i++) {
1931 			rq = hw->hw_rq[i];
1932 			hw->cq[rq->cq->instance].proc_limit = value;
1933 		}
1934 		break;
1935 	}
1936 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1937 		hw->config.rq_default_buffer_size = value;
1938 		break;
1939 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1940 		hw->config.auto_xfer_rdy_xri_cnt = value;
1941 		break;
1942 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1943 		hw->config.auto_xfer_rdy_size = value;
1944 		break;
1945 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1946 		switch (value) {
1947 		case 512:
1948 			hw->config.auto_xfer_rdy_blk_size_chip = 0;
1949 			break;
1950 		case 1024:
1951 			hw->config.auto_xfer_rdy_blk_size_chip = 1;
1952 			break;
1953 		case 2048:
1954 			hw->config.auto_xfer_rdy_blk_size_chip = 2;
1955 			break;
1956 		case 4096:
1957 			hw->config.auto_xfer_rdy_blk_size_chip = 3;
1958 			break;
1959 		case 520:
1960 			hw->config.auto_xfer_rdy_blk_size_chip = 4;
1961 			break;
1962 		default:
1963 			ocs_log_err(hw->os, "Invalid block size %d\n",
1964 				    value);
1965 			rc = OCS_HW_RTN_ERROR;
1966 		}
1967 		break;
1968 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1969 		hw->config.auto_xfer_rdy_t10_enable = value;
1970 		break;
1971 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1972 		hw->config.auto_xfer_rdy_p_type = value;
1973 		break;
1974 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1975 		hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1976 		break;
1977 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1978 		hw->config.auto_xfer_rdy_app_tag_valid = value;
1979 		break;
1980 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1981 		hw->config.auto_xfer_rdy_app_tag_value = value;
1982 		break;
1983 	case OCS_ESOC:
1984 		hw->config.esoc = value;
1985 		break;
1986 	case OCS_HW_HIGH_LOGIN_MODE:
1987 		rc = sli_set_hlm(&hw->sli, value);
1988 		break;
1989 	case OCS_HW_PREREGISTER_SGL:
1990 		rc = sli_set_sgl_preregister(&hw->sli, value);
1991 		break;
1992 	case OCS_HW_ETH_LICENSE:
1993 		hw->eth_license = value;
1994 		break;
1995 	case OCS_HW_EMULATE_I_ONLY_AAB:
1996 		hw->config.i_only_aab = value;
1997 		break;
1998 	case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1999 		hw->config.emulate_tgt_wqe_timeout = value;
2000 		break;
2001 	case OCS_HW_BOUNCE:
2002 		hw->config.bounce = value;
2003 		break;
2004 	case OCS_HW_RQ_SELECTION_POLICY:
2005 		hw->config.rq_selection_policy = value;
2006 		break;
2007 	case OCS_HW_RR_QUANTA:
2008 		hw->config.rr_quanta = value;
2009 		break;
2010 	default:
2011 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2012 		rc = OCS_HW_RTN_ERROR;
2013 	}
2014 
2015 	return rc;
2016 }
2017 
2018 ocs_hw_rtn_e
2019 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2020 {
2021 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2022 
2023 	switch (prop) {
2024 	case OCS_HW_WAR_VERSION:
2025 		hw->hw_war_version = value;
2026 		break;
2027 	case OCS_HW_FILTER_DEF: {
2028 		char *p = value;
2029 		uint32_t idx = 0;
2030 
2031 		for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2032 			hw->config.filter_def[idx] = 0;
2033 		}
2034 
2035 		for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2036 			hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2037 			p = ocs_strchr(p, ',');
2038 			if (p != NULL) {
2039 				p++;
2040 			}
2041 		}
2042 
2043 		break;
2044 	}
2045 	default:
2046 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2047 		rc = OCS_HW_RTN_ERROR;
2048 		break;
2049 	}
2050 	return rc;
2051 }
2052 /**
2053  * @ingroup interrupt
2054  * @brief Check for the events associated with the interrupt vector.
2055  *
2056  * @param hw Hardware context.
2057  * @param vector Zero-based interrupt vector number.
2058  *
2059  * @return Returns 0 on success, or a non-zero value on failure.
2060  */
2061 int32_t
2062 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2063 {
2064 	int32_t rc = 0;
2065 
2066 	if (!hw) {
2067 		ocs_log_err(NULL, "HW context NULL?!?\n");
2068 		return -1;
2069 	}
2070 
2071 	if (vector > hw->eq_count) {
2072 		ocs_log_err(hw->os, "vector %d. max %d\n",
2073 				vector, hw->eq_count);
2074 		return -1;
2075 	}
2076 
2077 	/*
2078 	 * The caller should disable interrupts if they wish to prevent us
2079 	 * from processing during a shutdown. The following states are defined:
2080 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2081 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2082 	 *                                    queues are cleared.
2083 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2084 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2085 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2086 	 *                                        completions.
2087 	 */
2088 	if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2089 		rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2090 
2091 		/* Re-arm queue if there are no entries */
2092 		if (rc != 0) {
2093 			sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2094 		}
2095 	}
2096 	return rc;
2097 }
2098 
2099 void
2100 ocs_hw_unsol_process_bounce(void *arg)
2101 {
2102 	ocs_hw_sequence_t *seq = arg;
2103 	ocs_hw_t *hw = seq->hw;
2104 
2105 	ocs_hw_assert(hw != NULL);
2106 	ocs_hw_assert(hw->callback.unsolicited != NULL);
2107 
2108 	hw->callback.unsolicited(hw->args.unsolicited, seq);
2109 }
2110 
2111 int32_t
2112 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2113 {
2114 	hw_eq_t *eq;
2115 	int32_t rc = 0;
2116 
2117 	CPUTRACE("");
2118 
2119 	/*
2120 	 * The caller should disable interrupts if they wish to prevent us
2121 	 * from processing during a shutdown. The following states are defined:
2122 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2123 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2124 	 *                                    queues are cleared.
2125 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2126 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2127 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2128 	 *                                        completions.
2129 	 */
2130 	if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2131 		return 0;
2132 	}
2133 
2134 	/* Get pointer to hw_eq_t */
2135 	eq = hw->hw_eq[vector];
2136 
2137 	OCS_STAT(eq->use_count++);
2138 
2139 	rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2140 
2141 	return rc;
2142 }
2143 
2144 /**
2145  * @ingroup interrupt
2146  * @brief Process events associated with an EQ.
2147  *
2148  * @par Description
2149  * Loop termination:
2150  * @n @n Without a mechanism to terminate the completion processing loop, it
2151  * is possible under some workload conditions for the loop to never terminate
2152  * (or at least take longer than the OS is happy to have an interrupt handler
2153  * or kernel thread context hold a CPU without yielding).
2154  * @n @n The approach taken here is to periodically check how much time
2155  * we have been in this
2156  * processing loop, and if we exceed a predetermined time (multiple seconds), the
2157  * loop is terminated, and ocs_hw_process() returns.
2158  *
2159  * @param hw Hardware context.
2160  * @param eq Pointer to HW EQ object.
2161  * @param max_isr_time_msec Maximum time in msec to stay in this function.
2162  *
2163  * @return Returns 0 on success, or a non-zero value on failure.
2164  */
2165 int32_t
2166 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2167 {
2168 	uint8_t		eqe[sizeof(sli4_eqe_t)] = { 0 };
2169 	uint32_t	done = FALSE;
2170 	uint32_t	tcheck_count;
2171 	time_t		tstart;
2172 	time_t		telapsed;
2173 
2174 	tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2175 	tstart = ocs_msectime();
2176 
2177 	CPUTRACE("");
2178 
2179 	while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2180 		uint16_t	cq_id = 0;
2181 		int32_t		rc;
2182 
2183 		rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2184 		if (unlikely(rc)) {
2185 			if (rc > 0) {
2186 				uint32_t i;
2187 
2188 				/*
2189 				 * Received a sentinel EQE indicating the EQ is full.
2190 				 * Process all CQs
2191 				 */
2192 				for (i = 0; i < hw->cq_count; i++) {
2193 					ocs_hw_cq_process(hw, hw->hw_cq[i]);
2194 				}
2195 				continue;
2196 			} else {
2197 				return rc;
2198 			}
2199 		} else {
2200 			int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2201 			if (likely(index >= 0)) {
2202 				ocs_hw_cq_process(hw, hw->hw_cq[index]);
2203 			} else {
2204 				ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2205 			}
2206 		}
2207 
2208 		if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2209 			sli_queue_arm(&hw->sli, eq->queue, FALSE);
2210 		}
2211 
2212 		if (tcheck_count && (--tcheck_count == 0)) {
2213 			tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2214 			telapsed = ocs_msectime() - tstart;
2215 			if (telapsed >= max_isr_time_msec) {
2216 				done = TRUE;
2217 			}
2218 		}
2219 	}
2220 	sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2221 
2222 	return 0;
2223 }
2224 
2225 /**
2226  * @brief Submit queued (pending) mbx commands.
2227  *
2228  * @par Description
2229  * Submit queued mailbox commands.
2230  * --- Assumes that hw->cmd_lock is held ---
2231  *
2232  * @param hw Hardware context.
2233  *
2234  * @return Returns 0 on success, or a negative error code value on failure.
2235  */
2236 static int32_t
2237 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2238 {
2239 	ocs_command_ctx_t *ctx;
2240 	int32_t rc = 0;
2241 
2242 	/* Assumes lock held */
2243 
2244 	/* Only submit MQE if there's room */
2245 	while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2246 		ctx = ocs_list_remove_head(&hw->cmd_pending);
2247 		if (ctx == NULL) {
2248 			break;
2249 		}
2250 		ocs_list_add_tail(&hw->cmd_head, ctx);
2251 		hw->cmd_head_count++;
2252 		if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2253 			ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2254 			rc = -1;
2255 			break;
2256 		}
2257 	}
2258 	return rc;
2259 }
2260 
2261 /**
2262  * @ingroup io
2263  * @brief Issue a SLI command.
2264  *
2265  * @par Description
2266  * Send a mailbox command to the hardware, and either wait for a completion
2267  * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2268  *
2269  * @param hw Hardware context.
2270  * @param cmd Buffer containing a formatted command and results.
2271  * @param opts Command options:
2272  *  - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2273  *  - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2274  * @param cb Function callback used for asynchronous mode. May be NULL.
2275  * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2276  * @n @n @b Note: If the
2277  * callback function pointer is NULL, the results of the command are silently
2278  * discarded, allowing this pointer to exist solely on the stack.
2279  * @param arg Argument passed to an asynchronous callback.
2280  *
2281  * @return Returns 0 on success, or a non-zero value on failure.
2282  */
2283 ocs_hw_rtn_e
2284 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2285 {
2286 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2287 
2288 	/*
2289 	 * If the chip is in an error state (UE'd) then reject this mailbox
2290 	 *  command.
2291 	 */
2292 	if (sli_fw_error_status(&hw->sli) > 0) {
2293 		uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2294 		uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2295 		if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2296 			hw->expiration_logged = 1;
2297 			ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2298 					hw->watchdog_timeout);
2299 		}
2300 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2301 		ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2302 			sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2303 			err1, err2);
2304 
2305 		return OCS_HW_RTN_ERROR;
2306 	}
2307 
2308 	if (OCS_CMD_POLL == opts) {
2309 		ocs_lock(&hw->cmd_lock);
2310 		if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2311 			/*
2312 			 * Can't issue Boot-strap mailbox command with other
2313 			 * mail-queue commands pending as this interaction is
2314 			 * undefined
2315 			 */
2316 			rc = OCS_HW_RTN_ERROR;
2317 		} else {
2318 			void *bmbx = hw->sli.bmbx.virt;
2319 
2320 			ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2321 			ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2322 
2323 			if (sli_bmbx_command(&hw->sli) == 0) {
2324 				rc = OCS_HW_RTN_SUCCESS;
2325 				ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2326 			}
2327 		}
2328 		ocs_unlock(&hw->cmd_lock);
2329 	} else if (OCS_CMD_NOWAIT == opts) {
2330 		ocs_command_ctx_t	*ctx = NULL;
2331 
2332 		ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2333 		if (!ctx) {
2334 			ocs_log_err(hw->os, "can't allocate command context\n");
2335 			return OCS_HW_RTN_NO_RESOURCES;
2336 		}
2337 
2338 		if (hw->state != OCS_HW_STATE_ACTIVE) {
2339 			ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2340 			ocs_free(hw->os, ctx, sizeof(*ctx));
2341 			return OCS_HW_RTN_ERROR;
2342 		}
2343 
2344 		if (cb) {
2345 			ctx->cb = cb;
2346 			ctx->arg = arg;
2347 		}
2348 		ctx->buf = cmd;
2349 		ctx->ctx = hw;
2350 
2351 		ocs_lock(&hw->cmd_lock);
2352 
2353 			/* Add to pending list */
2354 			ocs_list_add_tail(&hw->cmd_pending, ctx);
2355 
2356 			/* Submit as much of the pending list as we can */
2357 			if (ocs_hw_cmd_submit_pending(hw) == 0) {
2358 				rc = OCS_HW_RTN_SUCCESS;
2359 			}
2360 
2361 		ocs_unlock(&hw->cmd_lock);
2362 	}
2363 
2364 	return rc;
2365 }
2366 
2367 /**
2368  * @ingroup devInitShutdown
2369  * @brief Register a callback for the given event.
2370  *
2371  * @param hw Hardware context.
2372  * @param which Event of interest.
2373  * @param func Function to call when the event occurs.
2374  * @param arg Argument passed to the callback function.
2375  *
2376  * @return Returns 0 on success, or a non-zero value on failure.
2377  */
2378 ocs_hw_rtn_e
2379 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2380 {
2381 
2382 	if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2383 		ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2384 			    hw, which, func);
2385 		return OCS_HW_RTN_ERROR;
2386 	}
2387 
2388 	switch (which) {
2389 	case OCS_HW_CB_DOMAIN:
2390 		hw->callback.domain = func;
2391 		hw->args.domain = arg;
2392 		break;
2393 	case OCS_HW_CB_PORT:
2394 		hw->callback.port = func;
2395 		hw->args.port = arg;
2396 		break;
2397 	case OCS_HW_CB_UNSOLICITED:
2398 		hw->callback.unsolicited = func;
2399 		hw->args.unsolicited = arg;
2400 		break;
2401 	case OCS_HW_CB_REMOTE_NODE:
2402 		hw->callback.rnode = func;
2403 		hw->args.rnode = arg;
2404 		break;
2405 	case OCS_HW_CB_BOUNCE:
2406 		hw->callback.bounce = func;
2407 		hw->args.bounce = arg;
2408 		break;
2409 	default:
2410 		ocs_log_test(hw->os, "unknown callback %#x\n", which);
2411 		return OCS_HW_RTN_ERROR;
2412 	}
2413 
2414 	return OCS_HW_RTN_SUCCESS;
2415 }
2416 
2417 /**
2418  * @ingroup port
2419  * @brief Allocate a port object.
2420  *
2421  * @par Description
2422  * This function allocates a VPI object for the port and stores it in the
2423  * indicator field of the port object.
2424  *
2425  * @param hw Hardware context.
2426  * @param sport SLI port object used to connect to the domain.
2427  * @param domain Domain object associated with this port (may be NULL).
2428  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2429  *
2430  * @return Returns 0 on success, or a non-zero value on failure.
2431  */
2432 ocs_hw_rtn_e
2433 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2434 		uint8_t *wwpn)
2435 {
2436 	uint8_t	*cmd = NULL;
2437 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2438 	uint32_t index;
2439 
2440 	sport->indicator = UINT32_MAX;
2441 	sport->hw = hw;
2442 	sport->ctx.app = sport;
2443 	sport->sm_free_req_pending = 0;
2444 
2445 	/*
2446 	 * Check if the chip is in an error state (UE'd) before proceeding.
2447 	 */
2448 	if (sli_fw_error_status(&hw->sli) > 0) {
2449 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2450 		return OCS_HW_RTN_ERROR;
2451 	}
2452 
2453 	if (wwpn) {
2454 		ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2455 	}
2456 
2457 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2458 		ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2459 		return OCS_HW_RTN_ERROR;
2460 	}
2461 
2462 	if (domain != NULL) {
2463 		ocs_sm_function_t	next = NULL;
2464 
2465 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2466 		if (!cmd) {
2467 			ocs_log_err(hw->os, "command memory allocation failed\n");
2468 			rc = OCS_HW_RTN_NO_MEMORY;
2469 			goto ocs_hw_port_alloc_out;
2470 		}
2471 
2472 		/* If the WWPN is NULL, fetch the default WWPN and WWNN before
2473 		 * initializing the VPI
2474 		 */
2475 		if (!wwpn) {
2476 			next = __ocs_hw_port_alloc_read_sparm64;
2477 		} else {
2478 			next = __ocs_hw_port_alloc_init_vpi;
2479 		}
2480 
2481 		ocs_sm_transition(&sport->ctx, next, cmd);
2482 	} else if (!wwpn) {
2483 		/* This is the convention for the HW, not SLI */
2484 		ocs_log_test(hw->os, "need WWN for physical port\n");
2485 		rc = OCS_HW_RTN_ERROR;
2486 	} else {
2487 		/* domain NULL and wwpn non-NULL */
2488 		ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2489 	}
2490 
2491 ocs_hw_port_alloc_out:
2492 	if (rc != OCS_HW_RTN_SUCCESS) {
2493 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2494 
2495 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2496 	}
2497 
2498 	return rc;
2499 }
2500 
2501 /**
2502  * @ingroup port
2503  * @brief Attach a physical/virtual SLI port to a domain.
2504  *
2505  * @par Description
2506  * This function registers a previously-allocated VPI with the
2507  * device.
2508  *
2509  * @param hw Hardware context.
2510  * @param sport Pointer to the SLI port object.
2511  * @param fc_id Fibre Channel ID to associate with this port.
2512  *
2513  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2514  */
2515 ocs_hw_rtn_e
2516 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2517 {
2518 	uint8_t	*buf = NULL;
2519 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2520 
2521 	if (!hw || !sport) {
2522 		ocs_log_err(hw ? hw->os : NULL,
2523 			"bad parameter(s) hw=%p sport=%p\n", hw,
2524 			sport);
2525 		return OCS_HW_RTN_ERROR;
2526 	}
2527 
2528 	/*
2529 	 * Check if the chip is in an error state (UE'd) before proceeding.
2530 	 */
2531 	if (sli_fw_error_status(&hw->sli) > 0) {
2532 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2533 		return OCS_HW_RTN_ERROR;
2534 	}
2535 
2536 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2537 	if (!buf) {
2538 		ocs_log_err(hw->os, "no buffer for command\n");
2539 		return OCS_HW_RTN_NO_MEMORY;
2540 	}
2541 
2542 	sport->fc_id = fc_id;
2543 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2544 	return rc;
2545 }
2546 
2547 /**
2548  * @brief Called when the port control command completes.
2549  *
2550  * @par Description
2551  * We only need to free the mailbox command buffer.
2552  *
2553  * @param hw Hardware context.
2554  * @param status Status field from the mbox completion.
2555  * @param mqe Mailbox response structure.
2556  * @param arg Pointer to a callback function that signals the caller that the command is done.
2557  *
2558  * @return Returns 0.
2559  */
2560 static int32_t
2561 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2562 {
2563 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2564 	return 0;
2565 }
2566 
2567 /**
2568  * @ingroup port
2569  * @brief Control a port (initialize, shutdown, or set link configuration).
2570  *
2571  * @par Description
2572  * This function controls a port depending on the @c ctrl parameter:
2573  * - @b OCS_HW_PORT_INIT -
2574  * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2575  * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2576  * .
2577  * - @b OCS_HW_PORT_SHUTDOWN -
2578  * Issues the DOWN_LINK command for the specified port.
2579  * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2580  * .
2581  * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2582  * Sets the link configuration.
2583  *
2584  * @param hw Hardware context.
2585  * @param ctrl Specifies the operation:
2586  * - OCS_HW_PORT_INIT
2587  * - OCS_HW_PORT_SHUTDOWN
2588  * - OCS_HW_PORT_SET_LINK_CONFIG
2589  *
2590  * @param value Operation-specific value.
2591  * - OCS_HW_PORT_INIT - Selective reset AL_PA
2592  * - OCS_HW_PORT_SHUTDOWN - N/A
2593  * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2594  *
2595  * @param cb Callback function to invoke the following operation.
2596  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2597  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2598  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2599  * completes.
2600  *
2601  * @param arg Callback argument invoked after the command completes.
2602  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2603  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2604  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2605  * completes.
2606  *
2607  * @return Returns 0 on success, or a non-zero value on failure.
2608  */
2609 ocs_hw_rtn_e
2610 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2611 {
2612 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2613 
2614 	switch (ctrl) {
2615 	case OCS_HW_PORT_INIT:
2616 	{
2617 		uint8_t	*init_link;
2618 		uint32_t speed = 0;
2619 		uint8_t reset_alpa = 0;
2620 
2621 		if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2622 			uint8_t	*cfg_link;
2623 
2624 			cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2625 			if (cfg_link == NULL) {
2626 				ocs_log_err(hw->os, "no buffer for command\n");
2627 				return OCS_HW_RTN_NO_MEMORY;
2628 			}
2629 
2630 			if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2631 				rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2632 							ocs_hw_cb_port_control, NULL);
2633 			}
2634 
2635 			if (rc != OCS_HW_RTN_SUCCESS) {
2636 				ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2637 				ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2638 				break;
2639 			}
2640 			speed = hw->config.speed;
2641 			reset_alpa = (uint8_t)(value & 0xff);
2642 		} else {
2643 			speed = FC_LINK_SPEED_10G;
2644 		}
2645 
2646 		/*
2647 		 * Bring link up, unless FW version is not supported
2648 		 */
2649 		if (hw->workaround.fw_version_too_low) {
2650 			if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2651 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2652 					OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2653 			} else {
2654 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2655 					OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2656 			}
2657 
2658 			return OCS_HW_RTN_ERROR;
2659 		}
2660 
2661 		rc = OCS_HW_RTN_ERROR;
2662 
2663 		/* Allocate a new buffer for the init_link command */
2664 		init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2665 		if (init_link == NULL) {
2666 			ocs_log_err(hw->os, "no buffer for command\n");
2667 			return OCS_HW_RTN_NO_MEMORY;
2668 		}
2669 
2670 		if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2671 			rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2672 						ocs_hw_cb_port_control, NULL);
2673 		}
2674 		/* Free buffer on error, since no callback is coming */
2675 		if (rc != OCS_HW_RTN_SUCCESS) {
2676 			ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2677 			ocs_log_err(hw->os, "INIT_LINK failed\n");
2678 		}
2679 		break;
2680 	}
2681 	case OCS_HW_PORT_SHUTDOWN:
2682 	{
2683 		uint8_t	*down_link;
2684 
2685 		down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2686 		if (down_link == NULL) {
2687 			ocs_log_err(hw->os, "no buffer for command\n");
2688 			return OCS_HW_RTN_NO_MEMORY;
2689 		}
2690 		if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2691 			rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2692 						ocs_hw_cb_port_control, NULL);
2693 		}
2694 		/* Free buffer on error, since no callback is coming */
2695 		if (rc != OCS_HW_RTN_SUCCESS) {
2696 			ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2697 			ocs_log_err(hw->os, "DOWN_LINK failed\n");
2698 		}
2699 		break;
2700 	}
2701 	case OCS_HW_PORT_SET_LINK_CONFIG:
2702 		rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2703 		break;
2704 	default:
2705 		ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2706 		break;
2707 	}
2708 
2709 	return rc;
2710 }
2711 
2712 /**
2713  * @ingroup port
2714  * @brief Free port resources.
2715  *
2716  * @par Description
2717  * Issue the UNREG_VPI command to free the assigned VPI context.
2718  *
2719  * @param hw Hardware context.
2720  * @param sport SLI port object used to connect to the domain.
2721  *
2722  * @return Returns 0 on success, or a non-zero value on failure.
2723  */
2724 ocs_hw_rtn_e
2725 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2726 {
2727 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2728 
2729 	if (!hw || !sport) {
2730 		ocs_log_err(hw ? hw->os : NULL,
2731 			"bad parameter(s) hw=%p sport=%p\n", hw,
2732 			sport);
2733 		return OCS_HW_RTN_ERROR;
2734 	}
2735 
2736 	/*
2737 	 * Check if the chip is in an error state (UE'd) before proceeding.
2738 	 */
2739 	if (sli_fw_error_status(&hw->sli) > 0) {
2740 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2741 		return OCS_HW_RTN_ERROR;
2742 	}
2743 
2744 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2745 	return rc;
2746 }
2747 
2748 /**
2749  * @ingroup domain
2750  * @brief Allocate a fabric domain object.
2751  *
2752  * @par Description
2753  * This function starts a series of commands needed to connect to the domain, including
2754  *   - REG_FCFI
2755  *   - INIT_VFI
2756  *   - READ_SPARMS
2757  *   .
2758  * @b Note: Not all SLI interface types use all of the above commands.
2759  * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2760  * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2761  *
2762  * @param hw Hardware context.
2763  * @param domain Pointer to the domain object.
2764  * @param fcf FCF index.
2765  * @param vlan VLAN ID.
2766  *
2767  * @return Returns 0 on success, or a non-zero value on failure.
2768  */
2769 ocs_hw_rtn_e
2770 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2771 {
2772 	uint8_t		*cmd = NULL;
2773 	uint32_t	index;
2774 
2775 	if (!hw || !domain || !domain->sport) {
2776 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2777 				hw, domain, domain ? domain->sport : NULL);
2778 		return OCS_HW_RTN_ERROR;
2779 	}
2780 
2781 	/*
2782 	 * Check if the chip is in an error state (UE'd) before proceeding.
2783 	 */
2784 	if (sli_fw_error_status(&hw->sli) > 0) {
2785 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2786 		return OCS_HW_RTN_ERROR;
2787 	}
2788 
2789 	cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2790 	if (!cmd) {
2791 		ocs_log_err(hw->os, "command memory allocation failed\n");
2792 		return OCS_HW_RTN_NO_MEMORY;
2793 	}
2794 
2795 	domain->dma = hw->domain_dmem;
2796 
2797 	domain->hw = hw;
2798 	domain->sm.app = domain;
2799 	domain->fcf = fcf;
2800 	domain->fcf_indicator = UINT32_MAX;
2801 	domain->vlan_id = vlan;
2802 	domain->indicator = UINT32_MAX;
2803 
2804 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2805 		ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2806 
2807 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2808 
2809 		return OCS_HW_RTN_ERROR;
2810 	}
2811 
2812 	ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2813 	return OCS_HW_RTN_SUCCESS;
2814 }
2815 
2816 /**
2817  * @ingroup domain
2818  * @brief Attach a SLI port to a domain.
2819  *
2820  * @param hw Hardware context.
2821  * @param domain Pointer to the domain object.
2822  * @param fc_id Fibre Channel ID to associate with this port.
2823  *
2824  * @return Returns 0 on success, or a non-zero value on failure.
2825  */
2826 ocs_hw_rtn_e
2827 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2828 {
2829 	uint8_t	*buf = NULL;
2830 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2831 
2832 	if (!hw || !domain) {
2833 		ocs_log_err(hw ? hw->os : NULL,
2834 			"bad parameter(s) hw=%p domain=%p\n",
2835 			hw, domain);
2836 		return OCS_HW_RTN_ERROR;
2837 	}
2838 
2839 	/*
2840 	 * Check if the chip is in an error state (UE'd) before proceeding.
2841 	 */
2842 	if (sli_fw_error_status(&hw->sli) > 0) {
2843 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2844 		return OCS_HW_RTN_ERROR;
2845 	}
2846 
2847 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2848 	if (!buf) {
2849 		ocs_log_err(hw->os, "no buffer for command\n");
2850 		return OCS_HW_RTN_NO_MEMORY;
2851 	}
2852 
2853 	domain->sport->fc_id = fc_id;
2854 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2855 	return rc;
2856 }
2857 
2858 /**
2859  * @ingroup domain
2860  * @brief Free a fabric domain object.
2861  *
2862  * @par Description
2863  * Free both the driver and SLI port resources associated with the domain.
2864  *
2865  * @param hw Hardware context.
2866  * @param domain Pointer to the domain object.
2867  *
2868  * @return Returns 0 on success, or a non-zero value on failure.
2869  */
2870 ocs_hw_rtn_e
2871 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2872 {
2873 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2874 
2875 	if (!hw || !domain) {
2876 		ocs_log_err(hw ? hw->os : NULL,
2877 			"bad parameter(s) hw=%p domain=%p\n",
2878 			hw, domain);
2879 		return OCS_HW_RTN_ERROR;
2880 	}
2881 
2882 	/*
2883 	 * Check if the chip is in an error state (UE'd) before proceeding.
2884 	 */
2885 	if (sli_fw_error_status(&hw->sli) > 0) {
2886 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2887 		return OCS_HW_RTN_ERROR;
2888 	}
2889 
2890 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2891 	return rc;
2892 }
2893 
2894 /**
2895  * @ingroup domain
2896  * @brief Free a fabric domain object.
2897  *
2898  * @par Description
2899  * Free the driver resources associated with the domain. The difference between
2900  * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2901  * exist on the SLI port, due to a reset or after some error conditions.
2902  *
2903  * @param hw Hardware context.
2904  * @param domain Pointer to the domain object.
2905  *
2906  * @return Returns 0 on success, or a non-zero value on failure.
2907  */
2908 ocs_hw_rtn_e
2909 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2910 {
2911 	if (!hw || !domain) {
2912 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2913 		return OCS_HW_RTN_ERROR;
2914 	}
2915 
2916 	sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2917 
2918 	return OCS_HW_RTN_SUCCESS;
2919 }
2920 
2921 /**
2922  * @ingroup node
2923  * @brief Allocate a remote node object.
2924  *
2925  * @param hw Hardware context.
2926  * @param rnode Allocated remote node object to initialize.
2927  * @param fc_addr FC address of the remote node.
2928  * @param sport SLI port used to connect to remote node.
2929  *
2930  * @return Returns 0 on success, or a non-zero value on failure.
2931  */
2932 ocs_hw_rtn_e
2933 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2934 		ocs_sli_port_t *sport)
2935 {
2936 	/* Check for invalid indicator */
2937 	if (UINT32_MAX != rnode->indicator) {
2938 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2939 				fc_addr, rnode->indicator);
2940 		return OCS_HW_RTN_ERROR;
2941 	}
2942 
2943 	/*
2944 	 * Check if the chip is in an error state (UE'd) before proceeding.
2945 	 */
2946 	if (sli_fw_error_status(&hw->sli) > 0) {
2947 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2948 		return OCS_HW_RTN_ERROR;
2949 	}
2950 
2951 	/* NULL SLI port indicates an unallocated remote node */
2952 	rnode->sport = NULL;
2953 
2954 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2955 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2956 				fc_addr);
2957 		return OCS_HW_RTN_ERROR;
2958 	}
2959 
2960 	rnode->fc_id = fc_addr;
2961 	rnode->sport = sport;
2962 
2963 	return OCS_HW_RTN_SUCCESS;
2964 }
2965 
2966 /**
2967  * @ingroup node
2968  * @brief Update a remote node object with the remote port's service parameters.
2969  *
2970  * @param hw Hardware context.
2971  * @param rnode Allocated remote node object to initialize.
2972  * @param sparms DMA buffer containing the remote port's service parameters.
2973  *
2974  * @return Returns 0 on success, or a non-zero value on failure.
2975  */
2976 ocs_hw_rtn_e
2977 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2978 {
2979 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
2980 	uint8_t		*buf = NULL;
2981 	uint32_t	count = 0;
2982 
2983 	if (!hw || !rnode || !sparms) {
2984 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
2985 			    hw, rnode, sparms);
2986 		return OCS_HW_RTN_ERROR;
2987 	}
2988 
2989 	/*
2990 	 * Check if the chip is in an error state (UE'd) before proceeding.
2991 	 */
2992 	if (sli_fw_error_status(&hw->sli) > 0) {
2993 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2994 		return OCS_HW_RTN_ERROR;
2995 	}
2996 
2997 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2998 	if (!buf) {
2999 		ocs_log_err(hw->os, "no buffer for command\n");
3000 		return OCS_HW_RTN_NO_MEMORY;
3001 	}
3002 
3003 	/*
3004 	 * If the attach count is non-zero, this RPI has already been registered.
3005 	 * Otherwise, register the RPI
3006 	 */
3007 	if (rnode->index == UINT32_MAX) {
3008 		ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3009 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3010 		return OCS_HW_RTN_ERROR;
3011 	}
3012 	count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3013 	if (count) {
3014 		/*
3015 		 * Can't attach multiple FC_ID's to a node unless High Login
3016 		 * Mode is enabled
3017 		 */
3018 		if (sli_get_hlm(&hw->sli) == FALSE) {
3019 			ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3020 					sli_get_hlm(&hw->sli), count);
3021 			rc = OCS_HW_RTN_SUCCESS;
3022 		} else {
3023 			rnode->node_group = TRUE;
3024 			rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3025 			rc = rnode->attached  ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3026 		}
3027 	} else {
3028 		rnode->node_group = FALSE;
3029 
3030 		ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3031 		if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3032 					rnode->indicator, rnode->sport->indicator,
3033 					sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3034 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3035 					ocs_hw_cb_node_attach, rnode);
3036 		}
3037 	}
3038 
3039 	if (count || rc) {
3040 		if (rc < OCS_HW_RTN_SUCCESS) {
3041 			ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3042 			ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3043 		}
3044 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3045 	}
3046 
3047 	return rc;
3048 }
3049 
3050 /**
3051  * @ingroup node
3052  * @brief Free a remote node resource.
3053  *
3054  * @param hw Hardware context.
3055  * @param rnode Remote node object to free.
3056  *
3057  * @return Returns 0 on success, or a non-zero value on failure.
3058  */
3059 ocs_hw_rtn_e
3060 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3061 {
3062 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3063 
3064 	if (!hw || !rnode) {
3065 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3066 			    hw, rnode);
3067 		return OCS_HW_RTN_ERROR;
3068 	}
3069 
3070 	if (rnode->sport) {
3071 		if (!rnode->attached) {
3072 			if (rnode->indicator != UINT32_MAX) {
3073 				if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3074 					ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3075 						    rnode->indicator, rnode->fc_id);
3076 					rc = OCS_HW_RTN_ERROR;
3077 				} else {
3078 					rnode->node_group = FALSE;
3079 					rnode->indicator = UINT32_MAX;
3080 					rnode->index = UINT32_MAX;
3081 					rnode->free_group = FALSE;
3082 				}
3083 			}
3084 		} else {
3085 			ocs_log_err(hw->os, "Error: rnode is still attached\n");
3086 			rc = OCS_HW_RTN_ERROR;
3087 		}
3088 	}
3089 
3090 	return rc;
3091 }
3092 
3093 /**
3094  * @ingroup node
3095  * @brief Free a remote node object.
3096  *
3097  * @param hw Hardware context.
3098  * @param rnode Remote node object to free.
3099  *
3100  * @return Returns 0 on success, or a non-zero value on failure.
3101  */
3102 ocs_hw_rtn_e
3103 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3104 {
3105 	uint8_t	*buf = NULL;
3106 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS_SYNC;
3107 	uint32_t	index = UINT32_MAX;
3108 
3109 	if (!hw || !rnode) {
3110 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3111 			    hw, rnode);
3112 		return OCS_HW_RTN_ERROR;
3113 	}
3114 
3115 	/*
3116 	 * Check if the chip is in an error state (UE'd) before proceeding.
3117 	 */
3118 	if (sli_fw_error_status(&hw->sli) > 0) {
3119 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3120 		return OCS_HW_RTN_ERROR;
3121 	}
3122 
3123 	index = rnode->index;
3124 
3125 	if (rnode->sport) {
3126 		uint32_t	count = 0;
3127 		uint32_t	fc_id;
3128 
3129 		if (!rnode->attached) {
3130 			return OCS_HW_RTN_SUCCESS_SYNC;
3131 		}
3132 
3133 		buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3134 		if (!buf) {
3135 			ocs_log_err(hw->os, "no buffer for command\n");
3136 			return OCS_HW_RTN_NO_MEMORY;
3137 		}
3138 
3139 		count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3140 
3141 		if (count <= 1) {
3142 			/* There are no other references to this RPI
3143 			 * so unregister it and free the resource. */
3144 			fc_id = UINT32_MAX;
3145 			rnode->node_group = FALSE;
3146 			rnode->free_group = TRUE;
3147 		} else {
3148 			if (sli_get_hlm(&hw->sli) == FALSE) {
3149 				ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3150 						count);
3151 			}
3152 			fc_id = rnode->fc_id & 0x00ffffff;
3153 		}
3154 
3155 		rc = OCS_HW_RTN_ERROR;
3156 
3157 		if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3158 					SLI_RSRC_FCOE_RPI, fc_id)) {
3159 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3160 		}
3161 
3162 		if (rc != OCS_HW_RTN_SUCCESS) {
3163 			ocs_log_err(hw->os, "UNREG_RPI failed\n");
3164 			ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3165 			rc = OCS_HW_RTN_ERROR;
3166 		}
3167 	}
3168 
3169 	return rc;
3170 }
3171 
3172 /**
3173  * @ingroup node
3174  * @brief Free all remote node objects.
3175  *
3176  * @param hw Hardware context.
3177  *
3178  * @return Returns 0 on success, or a non-zero value on failure.
3179  */
3180 ocs_hw_rtn_e
3181 ocs_hw_node_free_all(ocs_hw_t *hw)
3182 {
3183 	uint8_t	*buf = NULL;
3184 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
3185 
3186 	if (!hw) {
3187 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3188 		return OCS_HW_RTN_ERROR;
3189 	}
3190 
3191 	/*
3192 	 * Check if the chip is in an error state (UE'd) before proceeding.
3193 	 */
3194 	if (sli_fw_error_status(&hw->sli) > 0) {
3195 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3196 		return OCS_HW_RTN_ERROR;
3197 	}
3198 
3199 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3200 	if (!buf) {
3201 		ocs_log_err(hw->os, "no buffer for command\n");
3202 		return OCS_HW_RTN_NO_MEMORY;
3203 	}
3204 
3205 	if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3206 				SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3207 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3208 				NULL);
3209 	}
3210 
3211 	if (rc != OCS_HW_RTN_SUCCESS) {
3212 		ocs_log_err(hw->os, "UNREG_RPI failed\n");
3213 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3214 		rc = OCS_HW_RTN_ERROR;
3215 	}
3216 
3217 	return rc;
3218 }
3219 
3220 ocs_hw_rtn_e
3221 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3222 {
3223 
3224 	if (!hw || !ngroup) {
3225 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3226 				hw, ngroup);
3227 		return OCS_HW_RTN_ERROR;
3228 	}
3229 
3230 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3231 				&ngroup->index)) {
3232 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3233 				ngroup->indicator);
3234 		return OCS_HW_RTN_ERROR;
3235 	}
3236 
3237 	return OCS_HW_RTN_SUCCESS;
3238 }
3239 
3240 ocs_hw_rtn_e
3241 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3242 {
3243 
3244 	if (!hw || !ngroup || !rnode) {
3245 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3246 			    hw, ngroup, rnode);
3247 		return OCS_HW_RTN_ERROR;
3248 	}
3249 
3250 	if (rnode->attached) {
3251 		ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3252 			    rnode->indicator, rnode->fc_id);
3253 		return OCS_HW_RTN_ERROR;
3254 	}
3255 
3256 	if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3257 		ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3258 				rnode->indicator);
3259 		return OCS_HW_RTN_ERROR;
3260 	}
3261 
3262 	rnode->indicator = ngroup->indicator;
3263 	rnode->index = ngroup->index;
3264 
3265 	return OCS_HW_RTN_SUCCESS;
3266 }
3267 
3268 ocs_hw_rtn_e
3269 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3270 {
3271 	int	ref;
3272 
3273 	if (!hw || !ngroup) {
3274 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3275 				hw, ngroup);
3276 		return OCS_HW_RTN_ERROR;
3277 	}
3278 
3279 	ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3280 	if (ref) {
3281 		/* Hmmm, the reference count is non-zero */
3282 		ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3283 				ref, ngroup->indicator);
3284 
3285 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3286 			ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3287 				    ngroup->indicator);
3288 			return OCS_HW_RTN_ERROR;
3289 		}
3290 
3291 		ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3292 	}
3293 
3294 	ngroup->indicator = UINT32_MAX;
3295 	ngroup->index = UINT32_MAX;
3296 
3297 	return OCS_HW_RTN_SUCCESS;
3298 }
3299 
3300 /**
3301  * @brief Initialize IO fields on each free call.
3302  *
3303  * @n @b Note: This is done on each free call (as opposed to each
3304  * alloc call) because port-owned XRIs are not
3305  * allocated with ocs_hw_io_alloc() but are freed with this
3306  * function.
3307  *
3308  * @param io Pointer to HW IO.
3309  */
3310 static inline void
3311 ocs_hw_init_free_io(ocs_hw_io_t *io)
3312 {
3313 	/*
3314 	 * Set io->done to NULL, to avoid any callbacks, should
3315 	 * a completion be received for one of these IOs
3316 	 */
3317 	io->done = NULL;
3318 	io->abort_done = NULL;
3319 	io->status_saved = 0;
3320 	io->abort_in_progress = FALSE;
3321 	io->port_owned_abort_count = 0;
3322 	io->rnode = NULL;
3323 	io->type = 0xFFFF;
3324 	io->wq = NULL;
3325 	io->ul_io = NULL;
3326 	io->tgt_wqe_timeout = 0;
3327 }
3328 
3329 /**
3330  * @ingroup io
3331  * @brief Lockless allocate a HW IO object.
3332  *
3333  * @par Description
3334  * Assume that hw->ocs_lock is held. This function is only used if
3335  * use_dif_sec_xri workaround is being used.
3336  *
3337  * @param hw Hardware context.
3338  *
3339  * @return Returns a pointer to an object on success, or NULL on failure.
3340  */
3341 static inline ocs_hw_io_t *
3342 _ocs_hw_io_alloc(ocs_hw_t *hw)
3343 {
3344 	ocs_hw_io_t	*io = NULL;
3345 
3346 	if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3347 		ocs_list_add_tail(&hw->io_inuse, io);
3348 		io->state = OCS_HW_IO_STATE_INUSE;
3349 		io->quarantine = FALSE;
3350 		io->quarantine_first_phase = TRUE;
3351 		io->abort_reqtag = UINT32_MAX;
3352 		ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3353 	} else {
3354 		ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3355 	}
3356 
3357 	return io;
3358 }
3359 /**
3360  * @ingroup io
3361  * @brief Allocate a HW IO object.
3362  *
3363  * @par Description
3364  * @n @b Note: This function applies to non-port owned XRIs
3365  * only.
3366  *
3367  * @param hw Hardware context.
3368  *
3369  * @return Returns a pointer to an object on success, or NULL on failure.
3370  */
3371 ocs_hw_io_t *
3372 ocs_hw_io_alloc(ocs_hw_t *hw)
3373 {
3374 	ocs_hw_io_t	*io = NULL;
3375 
3376 	ocs_lock(&hw->io_lock);
3377 		io = _ocs_hw_io_alloc(hw);
3378 	ocs_unlock(&hw->io_lock);
3379 
3380 	return io;
3381 }
3382 
3383 /**
3384  * @ingroup io
3385  * @brief Allocate/Activate a port owned HW IO object.
3386  *
3387  * @par Description
3388  * This function is called by the transport layer when an XRI is
3389  * allocated by the SLI-Port. This will "activate" the HW IO
3390  * associated with the XRI received from the SLI-Port to mirror
3391  * the state of the XRI.
3392  * @n @n @b Note: This function applies to port owned XRIs only.
3393  *
3394  * @param hw Hardware context.
3395  * @param io Pointer HW IO to activate/allocate.
3396  *
3397  * @return Returns a pointer to an object on success, or NULL on failure.
3398  */
3399 ocs_hw_io_t *
3400 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3401 {
3402 	if (ocs_ref_read_count(&io->ref) > 0) {
3403 		ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3404 		return NULL;
3405 	}
3406 
3407 	if (io->wq != NULL) {
3408 		ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3409 		return NULL;
3410 	}
3411 
3412 	ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3413 	io->xbusy = TRUE;
3414 
3415 	return io;
3416 }
3417 
3418 /**
3419  * @ingroup io
3420  * @brief When an IO is freed, depending on the exchange busy flag, and other
3421  * workarounds, move it to the correct list.
3422  *
3423  * @par Description
3424  * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3425  * from the busy or wait_free list.
3426  *
3427  * @param hw Hardware context.
3428  * @param io Pointer to the IO object to move.
3429  */
3430 static void
3431 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3432 {
3433 	if (io->xbusy) {
3434 		/* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3435 		ocs_list_add_tail(&hw->io_wait_free, io);
3436 		io->state = OCS_HW_IO_STATE_WAIT_FREE;
3437 	} else {
3438 		/* IO not busy, add to free list */
3439 		ocs_list_add_tail(&hw->io_free, io);
3440 		io->state = OCS_HW_IO_STATE_FREE;
3441 	}
3442 
3443 	/* BZ 161832 workaround */
3444 	if (hw->workaround.use_dif_sec_xri) {
3445 		ocs_hw_check_sec_hio_list(hw);
3446 	}
3447 }
3448 
3449 /**
3450  * @ingroup io
3451  * @brief Free a HW IO object. Perform cleanup common to
3452  * port and host-owned IOs.
3453  *
3454  * @param hw Hardware context.
3455  * @param io Pointer to the HW IO object.
3456  */
3457 static inline void
3458 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3459 {
3460 	/* initialize IO fields */
3461 	ocs_hw_init_free_io(io);
3462 
3463 	/* Restore default SGL */
3464 	ocs_hw_io_restore_sgl(hw, io);
3465 }
3466 
3467 /**
3468  * @ingroup io
3469  * @brief Free a HW IO object associated with a port-owned XRI.
3470  *
3471  * @param arg Pointer to the HW IO object.
3472  */
3473 static void
3474 ocs_hw_io_free_port_owned(void *arg)
3475 {
3476 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3477 	ocs_hw_t *hw = io->hw;
3478 
3479 	/*
3480 	 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3481 	 * waiting for buffers.
3482 	 */
3483 	if (io->auto_xfer_rdy_dnrx) {
3484 		ocs_lock(&hw->io_lock);
3485 			/* take a reference count because we still own the IO until the buffer is posted */
3486 			ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3487 			ocs_list_add_tail(&hw->io_port_dnrx, io);
3488 		ocs_unlock(&hw->io_lock);
3489 	}
3490 
3491 	/* perform common cleanup */
3492 	ocs_hw_io_free_common(hw, io);
3493 }
3494 
3495 /**
3496  * @ingroup io
3497  * @brief Free a previously-allocated HW IO object. Called when
3498  * IO refcount goes to zero (host-owned IOs only).
3499  *
3500  * @param arg Pointer to the HW IO object.
3501  */
3502 static void
3503 ocs_hw_io_free_internal(void *arg)
3504 {
3505 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3506 	ocs_hw_t *hw = io->hw;
3507 
3508 	/* perform common cleanup */
3509 	ocs_hw_io_free_common(hw, io);
3510 
3511 	ocs_lock(&hw->io_lock);
3512 		/* remove from in-use list */
3513 		ocs_list_remove(&hw->io_inuse, io);
3514 		ocs_hw_io_free_move_correct_list(hw, io);
3515 	ocs_unlock(&hw->io_lock);
3516 }
3517 
3518 /**
3519  * @ingroup io
3520  * @brief Free a previously-allocated HW IO object.
3521  *
3522  * @par Description
3523  * @n @b Note: This function applies to port and host owned XRIs.
3524  *
3525  * @param hw Hardware context.
3526  * @param io Pointer to the HW IO object.
3527  *
3528  * @return Returns a non-zero value if HW IO was freed, 0 if references
3529  * on the IO still exist, or a negative value if an error occurred.
3530  */
3531 int32_t
3532 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3533 {
3534 	/* just put refcount */
3535 	if (ocs_ref_read_count(&io->ref) <= 0) {
3536 		ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3537 			    io->indicator, io->reqtag);
3538 		return -1;
3539 	}
3540 
3541 	return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3542 }
3543 
3544 /**
3545  * @ingroup io
3546  * @brief Check if given HW IO is in-use
3547  *
3548  * @par Description
3549  * This function returns TRUE if the given HW IO has been
3550  * allocated and is in-use, and FALSE otherwise. It applies to
3551  * port and host owned XRIs.
3552  *
3553  * @param hw Hardware context.
3554  * @param io Pointer to the HW IO object.
3555  *
3556  * @return TRUE if an IO is in use, or FALSE otherwise.
3557  */
3558 uint8_t
3559 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3560 {
3561 	return (ocs_ref_read_count(&io->ref) > 0);
3562 }
3563 
3564 /**
3565  * @brief Write a HW IO to a work queue.
3566  *
3567  * @par Description
3568  * A HW IO is written to a work queue.
3569  *
3570  * @param wq Pointer to work queue.
3571  * @param wqe Pointer to WQ entry.
3572  *
3573  * @n @b Note: Assumes the SLI-4 queue lock is held.
3574  *
3575  * @return Returns 0 on success, or a negative error code value on failure.
3576  */
3577 static int32_t
3578 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3579 {
3580 	int32_t rc;
3581 	int32_t queue_rc;
3582 
3583 	/* Every so often, set the wqec bit to generate comsummed completions */
3584 	if (wq->wqec_count) {
3585 		wq->wqec_count--;
3586 	}
3587 	if (wq->wqec_count == 0) {
3588 		sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3589 		genwqe->wqec = 1;
3590 		wq->wqec_count = wq->wqec_set_count;
3591 	}
3592 
3593 	/* Decrement WQ free count */
3594 	wq->free_count--;
3595 
3596 	queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3597 
3598 	if (queue_rc < 0) {
3599 		rc = -1;
3600 	} else {
3601 		rc = 0;
3602 		ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3603 	}
3604 
3605 	return rc;
3606 }
3607 
3608 /**
3609  * @brief Write a HW IO to a work queue.
3610  *
3611  * @par Description
3612  * A HW IO is written to a work queue.
3613  *
3614  * @param wq Pointer to work queue.
3615  * @param wqe Pointer to WQE entry.
3616  *
3617  * @n @b Note: Takes the SLI-4 queue lock.
3618  *
3619  * @return Returns 0 on success, or a negative error code value on failure.
3620  */
3621 int32_t
3622 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3623 {
3624 	int32_t rc = 0;
3625 
3626 	sli_queue_lock(wq->queue);
3627 		if ( ! ocs_list_empty(&wq->pending_list)) {
3628 			ocs_list_add_tail(&wq->pending_list, wqe);
3629 			OCS_STAT(wq->wq_pending_count++;)
3630 			while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3631 				rc = _hw_wq_write(wq, wqe);
3632 				if (rc < 0) {
3633 					break;
3634 				}
3635 				if (wqe->abort_wqe_submit_needed) {
3636 					wqe->abort_wqe_submit_needed = 0;
3637 					sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3638 							wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3639 					ocs_list_add_tail(&wq->pending_list, wqe);
3640 					OCS_STAT(wq->wq_pending_count++;)
3641 				}
3642 			}
3643 		} else {
3644 			if (wq->free_count > 0) {
3645 				rc = _hw_wq_write(wq, wqe);
3646 			} else {
3647 				ocs_list_add_tail(&wq->pending_list, wqe);
3648 				OCS_STAT(wq->wq_pending_count++;)
3649 			}
3650 		}
3651 
3652 	sli_queue_unlock(wq->queue);
3653 
3654 	return rc;
3655 
3656 }
3657 
3658 /**
3659  * @brief Update free count and submit any pending HW IOs
3660  *
3661  * @par Description
3662  * The WQ free count is updated, and any pending HW IOs are submitted that
3663  * will fit in the queue.
3664  *
3665  * @param wq Pointer to work queue.
3666  * @param update_free_count Value added to WQs free count.
3667  *
3668  * @return None.
3669  */
3670 static void
3671 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3672 {
3673 	ocs_hw_wqe_t *wqe;
3674 
3675 	sli_queue_lock(wq->queue);
3676 
3677 		/* Update free count with value passed in */
3678 		wq->free_count += update_free_count;
3679 
3680 		while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3681 			_hw_wq_write(wq, wqe);
3682 
3683 			if (wqe->abort_wqe_submit_needed) {
3684 				wqe->abort_wqe_submit_needed = 0;
3685 				sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3686 						wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3687 				ocs_list_add_tail(&wq->pending_list, wqe);
3688 				OCS_STAT(wq->wq_pending_count++;)
3689 			}
3690 		}
3691 
3692 	sli_queue_unlock(wq->queue);
3693 }
3694 
3695 /**
3696  * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3697  *
3698  * @par Description
3699  * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3700  * to allocate a secondary HW io, and dispatch it.
3701  *
3702  * @n @b Note: hw->io_lock MUST be taken when called.
3703  *
3704  * @param hw pointer to HW object
3705  *
3706  * @return none
3707  */
3708 static void
3709 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3710 {
3711 	ocs_hw_io_t *io;
3712 	ocs_hw_io_t *sec_io;
3713 	int rc = 0;
3714 
3715 	while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3716 		uint16_t flags;
3717 
3718 		sec_io = _ocs_hw_io_alloc(hw);
3719 		if (sec_io == NULL) {
3720 			break;
3721 		}
3722 
3723 		io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3724 		ocs_list_add_tail(&hw->io_inuse, io);
3725 		io->state = OCS_HW_IO_STATE_INUSE;
3726 		io->sec_hio = sec_io;
3727 
3728 		/* mark secondary XRI for second and subsequent data phase as quarantine */
3729 		if (io->xbusy) {
3730 			sec_io->quarantine = TRUE;
3731 		}
3732 
3733 		flags = io->sec_iparam.fcp_tgt.flags;
3734 		if (io->xbusy) {
3735 			flags |= SLI4_IO_CONTINUATION;
3736 		} else {
3737 			flags &= ~SLI4_IO_CONTINUATION;
3738 		}
3739 
3740 		io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3741 
3742 		/* Complete (continue) TRECV IO */
3743 		if (io->xbusy) {
3744 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3745 				io->first_data_sge,
3746 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3747 				io->reqtag, SLI4_CQ_DEFAULT,
3748 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3749 				flags,
3750 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3751 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3752 					break;
3753 			}
3754 		} else {
3755 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3756 				io->first_data_sge,
3757 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3758 				io->reqtag, SLI4_CQ_DEFAULT,
3759 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3760 				flags,
3761 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3762 				io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3763 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3764 					break;
3765 			}
3766 		}
3767 
3768 		if (io->wq == NULL) {
3769 			io->wq = ocs_hw_queue_next_wq(hw, io);
3770 			ocs_hw_assert(io->wq != NULL);
3771 		}
3772 		io->xbusy = TRUE;
3773 
3774 		/*
3775 		 * Add IO to active io wqe list before submitting, in case the
3776 		 * wcqe processing preempts this thread.
3777 		 */
3778 		ocs_hw_add_io_timed_wqe(hw, io);
3779 		rc = hw_wq_write(io->wq, &io->wqe);
3780 		if (rc >= 0) {
3781 			/* non-negative return is success */
3782 			rc = 0;
3783 		} else {
3784 			/* failed to write wqe, remove from active wqe list */
3785 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3786 			io->xbusy = FALSE;
3787 			ocs_hw_remove_io_timed_wqe(hw, io);
3788 		}
3789 	}
3790 }
3791 
3792 /**
3793  * @ingroup io
3794  * @brief Send a Single Request/Response Sequence (SRRS).
3795  *
3796  * @par Description
3797  * This routine supports communication sequences consisting of a single
3798  * request and single response between two endpoints. Examples include:
3799  *  - Sending an ELS request.
3800  *  - Sending an ELS response - To send an ELS reponse, the caller must provide
3801  * the OX_ID from the received request.
3802  *  - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3803  * the caller must provide the R_CTL, TYPE, and DF_CTL
3804  * values to place in the FC frame header.
3805  *  .
3806  * @n @b Note: The caller is expected to provide both send and receive
3807  * buffers for requests. In the case of sending a response, no receive buffer
3808  * is necessary and the caller may pass in a NULL pointer.
3809  *
3810  * @param hw Hardware context.
3811  * @param type Type of sequence (ELS request/response, FC-CT).
3812  * @param io Previously-allocated HW IO object.
3813  * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3814  * @param len Length, in bytes, of data to send.
3815  * @param receive Optional DMA memory to hold a response.
3816  * @param rnode Destination of data (that is, a remote node).
3817  * @param iparam IO parameters (ELS response and FC-CT).
3818  * @param cb Function call upon completion of sending the data (may be NULL).
3819  * @param arg Argument to pass to IO completion function.
3820  *
3821  * @return Returns 0 on success, or a non-zero on failure.
3822  */
3823 ocs_hw_rtn_e
3824 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3825 		  ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3826 		  ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3827 		  ocs_hw_srrs_cb_t cb, void *arg)
3828 {
3829 	sli4_sge_t	*sge = NULL;
3830 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3831 	uint16_t	local_flags = 0;
3832 
3833 	if (!hw || !io || !rnode || !iparam) {
3834 		ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3835 			    hw, io, send, receive, rnode, iparam);
3836 		return OCS_HW_RTN_ERROR;
3837 	}
3838 
3839 	if (hw->state != OCS_HW_STATE_ACTIVE) {
3840 		ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3841 		return OCS_HW_RTN_ERROR;
3842 	}
3843 
3844 	if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3845 		/* We must set the XC bit for port owned XRIs */
3846 		local_flags |= SLI4_IO_CONTINUATION;
3847 	}
3848 	io->rnode = rnode;
3849 	io->type  = type;
3850 	io->done = cb;
3851 	io->arg  = arg;
3852 
3853 	sge = io->sgl->virt;
3854 
3855 	/* clear both SGE */
3856 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3857 
3858 	if (send) {
3859 		sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3860 		sge[0].buffer_address_low  = ocs_addr32_lo(send->phys);
3861 		sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3862 		sge[0].buffer_length = len;
3863 	}
3864 
3865 	if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3866 		sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3867 		sge[1].buffer_address_low  = ocs_addr32_lo(receive->phys);
3868 		sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3869 		sge[1].buffer_length = receive->size;
3870 		sge[1].last = TRUE;
3871 	} else {
3872 		sge[0].last = TRUE;
3873 	}
3874 
3875 	switch (type) {
3876 	case OCS_HW_ELS_REQ:
3877 		if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3878 							*((uint8_t *)(send->virt)), /* req_type */
3879 							len, receive->size,
3880 							iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3881 			ocs_log_err(hw->os, "REQ WQE error\n");
3882 			rc = OCS_HW_RTN_ERROR;
3883 		}
3884 		break;
3885 	case OCS_HW_ELS_RSP:
3886 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3887 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3888 					   iparam->els.ox_id,
3889 							rnode, local_flags, UINT32_MAX)) {
3890 			ocs_log_err(hw->os, "RSP WQE error\n");
3891 			rc = OCS_HW_RTN_ERROR;
3892 		}
3893 		break;
3894 	case OCS_HW_ELS_RSP_SID:
3895 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3896 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3897 					   iparam->els_sid.ox_id,
3898 							rnode, local_flags, iparam->els_sid.s_id)) {
3899 			ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3900 			rc = OCS_HW_RTN_ERROR;
3901 		}
3902 		break;
3903 	case OCS_HW_FC_CT:
3904 		if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3905 					  receive->size, iparam->fc_ct.timeout, io->indicator,
3906 					  io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3907 					  iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3908 			ocs_log_err(hw->os, "GEN WQE error\n");
3909 			rc = OCS_HW_RTN_ERROR;
3910 		}
3911 		break;
3912 	case OCS_HW_FC_CT_RSP:
3913 		if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3914 					  iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3915 					  io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3916 					  iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3917 			ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3918 			rc = OCS_HW_RTN_ERROR;
3919 		}
3920 		break;
3921 	case OCS_HW_BLS_ACC:
3922 	case OCS_HW_BLS_RJT:
3923 	{
3924 		sli_bls_payload_t	bls;
3925 
3926 		if (OCS_HW_BLS_ACC == type) {
3927 			bls.type = SLI_BLS_ACC;
3928 			ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3929 		} else {
3930 			bls.type = SLI_BLS_RJT;
3931 			ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3932 		}
3933 
3934 		bls.ox_id = iparam->bls.ox_id;
3935 		bls.rx_id = iparam->bls.rx_id;
3936 
3937 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3938 					   io->indicator, io->reqtag,
3939 					   SLI4_CQ_DEFAULT,
3940 					   rnode, UINT32_MAX)) {
3941 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3942 			rc = OCS_HW_RTN_ERROR;
3943 		}
3944 		break;
3945 	}
3946 	case OCS_HW_BLS_ACC_SID:
3947 	{
3948 		sli_bls_payload_t	bls;
3949 
3950 		bls.type = SLI_BLS_ACC;
3951 		ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3952 
3953 		bls.ox_id = iparam->bls_sid.ox_id;
3954 		bls.rx_id = iparam->bls_sid.rx_id;
3955 
3956 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3957 					   io->indicator, io->reqtag,
3958 					   SLI4_CQ_DEFAULT,
3959 					   rnode, iparam->bls_sid.s_id)) {
3960 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3961 			rc = OCS_HW_RTN_ERROR;
3962 		}
3963 		break;
3964 	}
3965 	case OCS_HW_BCAST:
3966 		if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3967 					iparam->bcast.timeout, io->indicator, io->reqtag,
3968 					SLI4_CQ_DEFAULT, rnode,
3969 					iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3970 			ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3971 			rc = OCS_HW_RTN_ERROR;
3972 		}
3973 		break;
3974 	default:
3975 		ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3976 		rc = OCS_HW_RTN_ERROR;
3977 	}
3978 
3979 	if (OCS_HW_RTN_SUCCESS == rc) {
3980 		if (io->wq == NULL) {
3981 			io->wq = ocs_hw_queue_next_wq(hw, io);
3982 			ocs_hw_assert(io->wq != NULL);
3983 		}
3984 		io->xbusy = TRUE;
3985 
3986 		/*
3987 		 * Add IO to active io wqe list before submitting, in case the
3988 		 * wcqe processing preempts this thread.
3989 		 */
3990 		OCS_STAT(io->wq->use_count++);
3991 		ocs_hw_add_io_timed_wqe(hw, io);
3992 		rc = hw_wq_write(io->wq, &io->wqe);
3993 		if (rc >= 0) {
3994 			/* non-negative return is success */
3995 			rc = 0;
3996 		} else {
3997 			/* failed to write wqe, remove from active wqe list */
3998 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3999 			io->xbusy = FALSE;
4000 			ocs_hw_remove_io_timed_wqe(hw, io);
4001 		}
4002 	}
4003 
4004 	return rc;
4005 }
4006 
4007 /**
4008  * @ingroup io
4009  * @brief Send a read, write, or response IO.
4010  *
4011  * @par Description
4012  * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4013  * as a target or initiator. Examples include:
4014  *  - Sending read data and good response (target).
4015  *  - Sending a response (target with no data or after receiving write data).
4016  *  .
4017  * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4018  * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4019  *
4020  * @param hw Hardware context.
4021  * @param type Type of IO (target read, target response, and so on).
4022  * @param io Previously-allocated HW IO object.
4023  * @param len Length, in bytes, of data to send.
4024  * @param iparam IO parameters.
4025  * @param rnode Destination of data (that is, a remote node).
4026  * @param cb Function call upon completion of sending data (may be NULL).
4027  * @param arg Argument to pass to IO completion function.
4028  *
4029  * @return Returns 0 on success, or a non-zero value on failure.
4030  *
4031  * @todo
4032  *  - Support specifiying relative offset.
4033  *  - Use a WQ other than 0.
4034  */
4035 ocs_hw_rtn_e
4036 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4037 		uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4038 		void *cb, void *arg)
4039 {
4040 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4041 	uint32_t	rpi;
4042 	uint8_t		send_wqe = TRUE;
4043 
4044 	CPUTRACE("");
4045 
4046 	if (!hw || !io || !rnode || !iparam) {
4047 		ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4048 			    hw, io, iparam, rnode);
4049 		return OCS_HW_RTN_ERROR;
4050 	}
4051 
4052 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4053 		ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4054 		return OCS_HW_RTN_ERROR;
4055 	}
4056 
4057 	rpi = rnode->indicator;
4058 
4059 	if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4060 		rpi = hw->workaround.unregistered_rid;
4061 		ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4062 	}
4063 
4064 	/*
4065 	 * Save state needed during later stages
4066 	 */
4067 	io->rnode = rnode;
4068 	io->type  = type;
4069 	io->done  = cb;
4070 	io->arg   = arg;
4071 
4072 	/*
4073 	 * Format the work queue entry used to send the IO
4074 	 */
4075 	switch (type) {
4076 	case OCS_HW_IO_INITIATOR_READ:
4077 		/*
4078 		 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4079 		 * initiator read IO for quarantine
4080 		 */
4081 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4082 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4083 			io->quarantine = TRUE;
4084 		}
4085 
4086 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4087 				iparam->fcp_ini.rsp);
4088 
4089 		if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4090 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4091 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4092 					iparam->fcp_ini.timeout)) {
4093 			ocs_log_err(hw->os, "IREAD WQE error\n");
4094 			rc = OCS_HW_RTN_ERROR;
4095 		}
4096 		break;
4097 	case OCS_HW_IO_INITIATOR_WRITE:
4098 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4099 				iparam->fcp_ini.rsp);
4100 
4101 		if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4102 					 len, iparam->fcp_ini.first_burst,
4103 					 io->indicator, io->reqtag,
4104 					SLI4_CQ_DEFAULT, rpi, rnode,
4105 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4106 					iparam->fcp_ini.timeout)) {
4107 			ocs_log_err(hw->os, "IWRITE WQE error\n");
4108 			rc = OCS_HW_RTN_ERROR;
4109 		}
4110 		break;
4111 	case OCS_HW_IO_INITIATOR_NODATA:
4112 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4113 				iparam->fcp_ini.rsp);
4114 
4115 		if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4116 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4117 					rpi, rnode, iparam->fcp_ini.timeout)) {
4118 			ocs_log_err(hw->os, "ICMND WQE error\n");
4119 			rc = OCS_HW_RTN_ERROR;
4120 		}
4121 		break;
4122 	case OCS_HW_IO_TARGET_WRITE: {
4123 		uint16_t flags = iparam->fcp_tgt.flags;
4124 		fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4125 
4126 		/*
4127 		 * Fill in the XFER_RDY for IF_TYPE 0 devices
4128 		 */
4129 		*((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4130 		*((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4131 		*((uint32_t *)xfer->rsvd) = 0;
4132 
4133 		if (io->xbusy) {
4134 			flags |= SLI4_IO_CONTINUATION;
4135 		} else {
4136 			flags &= ~SLI4_IO_CONTINUATION;
4137 		}
4138 
4139 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4140 
4141 		/*
4142 		 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4143 		 * then mark the target write IO for quarantine
4144 		 */
4145 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4146 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4147 			io->quarantine = TRUE;
4148 		}
4149 
4150 		/*
4151 		 * BZ 161832 Workaround:
4152 		 * Check for use_dif_sec_xri workaround.  Note, even though the first dataphase
4153 		 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4154 		 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4155 		 * are on hw->sec_hio_wait_list.   If this secondary XRI is not for the first
4156 		 * data phase, it is marked for quarantine.
4157 		 */
4158 		if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4159 			/*
4160 			 * If we have allocated a chained SGL for skyhawk, then
4161 			 * we can re-use this for the sec_hio.
4162 			 */
4163 			if (io->ovfl_io != NULL) {
4164 				io->sec_hio = io->ovfl_io;
4165 				io->sec_hio->quarantine = TRUE;
4166 			} else {
4167 				io->sec_hio = ocs_hw_io_alloc(hw);
4168 			}
4169 			if (io->sec_hio == NULL) {
4170 				/* Failed to allocate, so save full request context and put
4171 				 * this IO on the wait list
4172 				 */
4173 				io->sec_iparam = *iparam;
4174 				io->sec_len = len;
4175 				ocs_lock(&hw->io_lock);
4176 					ocs_list_remove(&hw->io_inuse,  io);
4177 					ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4178 					io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4179 					hw->sec_hio_wait_count++;
4180 				ocs_unlock(&hw->io_lock);
4181 				send_wqe = FALSE;
4182 				/* Done */
4183 				break;
4184 			}
4185 			/* We quarantine the secondary IO if this is the second or subsequent data phase */
4186 			if (io->xbusy) {
4187 				io->sec_hio->quarantine = TRUE;
4188 			}
4189 		}
4190 
4191 		/*
4192 		 * If not the first data phase, and io->sec_hio has been allocated, then issue
4193 		 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4194 		 */
4195 		if (io->xbusy && (io->sec_hio != NULL)) {
4196 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4197 						   iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4198 						   io->reqtag, SLI4_CQ_DEFAULT,
4199 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4200 						   flags,
4201 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4202 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4203 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4204 				rc = OCS_HW_RTN_ERROR;
4205 			}
4206 		} else {
4207 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4208 						   iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4209 						   SLI4_CQ_DEFAULT,
4210 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4211 						   flags,
4212 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4213 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4214 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4215 				rc = OCS_HW_RTN_ERROR;
4216 			}
4217 		}
4218 		break;
4219 	}
4220 	case OCS_HW_IO_TARGET_READ: {
4221 		uint16_t flags = iparam->fcp_tgt.flags;
4222 
4223 		if (io->xbusy) {
4224 			flags |= SLI4_IO_CONTINUATION;
4225 		} else {
4226 			flags &= ~SLI4_IO_CONTINUATION;
4227 		}
4228 
4229 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4230 		if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4231 					iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4232 					SLI4_CQ_DEFAULT,
4233 					iparam->fcp_tgt.ox_id, rpi, rnode,
4234 					flags,
4235 					iparam->fcp_tgt.dif_oper,
4236 					iparam->fcp_tgt.blk_size,
4237 					iparam->fcp_tgt.cs_ctl,
4238 					iparam->fcp_tgt.app_id)) {
4239 			ocs_log_err(hw->os, "TSEND WQE error\n");
4240 			rc = OCS_HW_RTN_ERROR;
4241 		} else if (hw->workaround.retain_tsend_io_length) {
4242 			io->length = len;
4243 		}
4244 		break;
4245 	}
4246 	case OCS_HW_IO_TARGET_RSP: {
4247 		uint16_t flags = iparam->fcp_tgt.flags;
4248 
4249 		if (io->xbusy) {
4250 			flags |= SLI4_IO_CONTINUATION;
4251 		} else {
4252 			flags &= ~SLI4_IO_CONTINUATION;
4253 		}
4254 
4255 		/* post a new auto xfer ready buffer */
4256 		if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4257 			if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4258 				flags |= SLI4_IO_DNRX;
4259 			}
4260 		}
4261 
4262 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4263 		if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4264 					&io->def_sgl,
4265 					len,
4266 					io->indicator, io->reqtag,
4267 					SLI4_CQ_DEFAULT,
4268 					iparam->fcp_tgt.ox_id,
4269 					rpi, rnode,
4270 					flags, iparam->fcp_tgt.cs_ctl,
4271 					io->is_port_owned,
4272 					iparam->fcp_tgt.app_id)) {
4273 			ocs_log_err(hw->os, "TRSP WQE error\n");
4274 			rc = OCS_HW_RTN_ERROR;
4275 		}
4276 
4277 		break;
4278 	}
4279 	default:
4280 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4281 		rc = OCS_HW_RTN_ERROR;
4282 	}
4283 
4284 	if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4285 		if (io->wq == NULL) {
4286 			io->wq = ocs_hw_queue_next_wq(hw, io);
4287 			ocs_hw_assert(io->wq != NULL);
4288 		}
4289 
4290 		io->xbusy = TRUE;
4291 
4292 		/*
4293 		 * Add IO to active io wqe list before submitting, in case the
4294 		 * wcqe processing preempts this thread.
4295 		 */
4296 		OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4297 		OCS_STAT(io->wq->use_count++);
4298 		ocs_hw_add_io_timed_wqe(hw, io);
4299 		rc = hw_wq_write(io->wq, &io->wqe);
4300 		if (rc >= 0) {
4301 			/* non-negative return is success */
4302 			rc = 0;
4303 		} else {
4304 			/* failed to write wqe, remove from active wqe list */
4305 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4306 			io->xbusy = FALSE;
4307 			ocs_hw_remove_io_timed_wqe(hw, io);
4308 		}
4309 	}
4310 
4311 	return rc;
4312 }
4313 
4314 /**
4315  * @brief Send a raw frame
4316  *
4317  * @par Description
4318  * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4319  *
4320  * @param hw Pointer to HW object.
4321  * @param hdr Pointer to a little endian formatted FC header.
4322  * @param sof Value to use as the frame SOF.
4323  * @param eof Value to use as the frame EOF.
4324  * @param payload Pointer to payload DMA buffer.
4325  * @param ctx Pointer to caller provided send frame context.
4326  * @param callback Callback function.
4327  * @param arg Callback function argument.
4328  *
4329  * @return Returns 0 on success, or a negative error code value on failure.
4330  */
4331 ocs_hw_rtn_e
4332 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4333 		   ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4334 {
4335 	int32_t rc;
4336 	ocs_hw_wqe_t *wqe;
4337 	uint32_t xri;
4338 	hw_wq_t *wq;
4339 
4340 	wqe = &ctx->wqe;
4341 
4342 	/* populate the callback object */
4343 	ctx->hw = hw;
4344 
4345 	/* Fetch and populate request tag */
4346 	ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4347 	if (ctx->wqcb == NULL) {
4348 		ocs_log_err(hw->os, "can't allocate request tag\n");
4349 		return OCS_HW_RTN_NO_RESOURCES;
4350 	}
4351 
4352 	/* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4353 	wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4354 	if (wq == NULL) {
4355 		wq = hw->hw_wq[0];
4356 	}
4357 
4358 	/* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4359 	xri = wq->send_frame_io->indicator;
4360 
4361 	/* Build the send frame WQE */
4362 	rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4363 				payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4364 	if (rc) {
4365 		ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4366 		return OCS_HW_RTN_ERROR;
4367 	}
4368 
4369 	/* Write to WQ */
4370 	rc = hw_wq_write(wq, wqe);
4371 	if (rc) {
4372 		ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4373 		return OCS_HW_RTN_ERROR;
4374 	}
4375 
4376 	OCS_STAT(wq->use_count++);
4377 
4378 	return OCS_HW_RTN_SUCCESS;
4379 }
4380 
4381 ocs_hw_rtn_e
4382 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4383 {
4384 	if (sli_get_sgl_preregister(&hw->sli)) {
4385 		ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4386 		return OCS_HW_RTN_ERROR;
4387 	}
4388 	io->ovfl_sgl = sgl;
4389 	io->ovfl_sgl_count = sgl_count;
4390 	io->ovfl_io = NULL;
4391 
4392 	return OCS_HW_RTN_SUCCESS;
4393 }
4394 
4395 static void
4396 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4397 {
4398 	/* Restore the default */
4399 	io->sgl = &io->def_sgl;
4400 	io->sgl_count = io->def_sgl_count;
4401 
4402 	/*
4403 	 * For skyhawk, we need to free the IO allocated for the chained
4404 	 * SGL. For all devices, clear the overflow fields on the IO.
4405 	 *
4406 	 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4407 	 *       the chained SGLs. If so, then we clear the ovfl_io field
4408 	 *       when the sec_hio is freed.
4409 	 */
4410 	if (io->ovfl_io != NULL) {
4411 		ocs_hw_io_free(hw, io->ovfl_io);
4412 		io->ovfl_io = NULL;
4413 	}
4414 
4415 	/* Clear the overflow SGL */
4416 	io->ovfl_sgl = NULL;
4417 	io->ovfl_sgl_count = 0;
4418 	io->ovfl_lsp = NULL;
4419 }
4420 
4421 /**
4422  * @ingroup io
4423  * @brief Initialize the scatter gather list entries of an IO.
4424  *
4425  * @param hw Hardware context.
4426  * @param io Previously-allocated HW IO object.
4427  * @param type Type of IO (target read, target response, and so on).
4428  *
4429  * @return Returns 0 on success, or a non-zero value on failure.
4430  */
4431 ocs_hw_rtn_e
4432 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4433 {
4434 	sli4_sge_t	*data = NULL;
4435 	uint32_t	i = 0;
4436 	uint32_t	skips = 0;
4437 
4438 	if (!hw || !io) {
4439 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4440 			    hw, io);
4441 		return OCS_HW_RTN_ERROR;
4442 	}
4443 
4444 	/* Clear / reset the scatter-gather list */
4445 	io->sgl = &io->def_sgl;
4446 	io->sgl_count = io->def_sgl_count;
4447 	io->first_data_sge = 0;
4448 
4449 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4450 	io->n_sge = 0;
4451 	io->sge_offset = 0;
4452 
4453 	io->type = type;
4454 
4455 	data = io->sgl->virt;
4456 
4457 	/*
4458 	 * Some IO types have underlying hardware requirements on the order
4459 	 * of SGEs. Process all special entries here.
4460 	 */
4461 	switch (type) {
4462 	case OCS_HW_IO_INITIATOR_READ:
4463 	case OCS_HW_IO_INITIATOR_WRITE:
4464 	case OCS_HW_IO_INITIATOR_NODATA:
4465 		/*
4466 		 * No skips, 2 special for initiator I/Os
4467 		 * The addresses and length are written later
4468 		 */
4469 		/* setup command pointer */
4470 		data->sge_type = SLI4_SGE_TYPE_DATA;
4471 		data++;
4472 
4473 		/* setup response pointer */
4474 		data->sge_type = SLI4_SGE_TYPE_DATA;
4475 
4476 		if (OCS_HW_IO_INITIATOR_NODATA == type) {
4477 			data->last = TRUE;
4478 		}
4479 		data++;
4480 
4481 		io->n_sge = 2;
4482 		break;
4483 	case OCS_HW_IO_TARGET_WRITE:
4484 #define OCS_TARGET_WRITE_SKIPS	2
4485 		skips = OCS_TARGET_WRITE_SKIPS;
4486 
4487 		/* populate host resident XFER_RDY buffer */
4488 		data->sge_type = SLI4_SGE_TYPE_DATA;
4489 		data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4490 		data->buffer_address_low  = ocs_addr32_lo(io->xfer_rdy.phys);
4491 		data->buffer_length = io->xfer_rdy.size;
4492 		data++;
4493 
4494 		skips--;
4495 
4496 		io->n_sge = 1;
4497 		break;
4498 	case OCS_HW_IO_TARGET_READ:
4499 		/*
4500 		 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4501 		 */
4502 #define OCS_TARGET_READ_SKIPS	2
4503 		skips = OCS_TARGET_READ_SKIPS;
4504 		break;
4505 	case OCS_HW_IO_TARGET_RSP:
4506 		/*
4507 		 * No skips, etc. for FCP_TRSP64
4508 		 */
4509 		break;
4510 	default:
4511 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4512 		return OCS_HW_RTN_ERROR;
4513 	}
4514 
4515 	/*
4516 	 * Write skip entries
4517 	 */
4518 	for (i = 0; i < skips; i++) {
4519 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4520 		data++;
4521 	}
4522 
4523 	io->n_sge += skips;
4524 
4525 	/*
4526 	 * Set last
4527 	 */
4528 	data->last = TRUE;
4529 
4530 	return OCS_HW_RTN_SUCCESS;
4531 }
4532 
4533 /**
4534  * @ingroup io
4535  * @brief Add a T10 PI seed scatter gather list entry.
4536  *
4537  * @param hw Hardware context.
4538  * @param io Previously-allocated HW IO object.
4539  * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4540  *
4541  * @return Returns 0 on success, or a non-zero value on failure.
4542  */
4543 ocs_hw_rtn_e
4544 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4545 {
4546 	sli4_sge_t	*data = NULL;
4547 	sli4_diseed_sge_t *dif_seed;
4548 
4549 	/* If no dif_info, or dif_oper is disabled, then just return success */
4550 	if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4551 		return OCS_HW_RTN_SUCCESS;
4552 	}
4553 
4554 	if (!hw || !io) {
4555 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4556 			    hw, io, dif_info);
4557 		return OCS_HW_RTN_ERROR;
4558 	}
4559 
4560 	data = io->sgl->virt;
4561 	data += io->n_sge;
4562 
4563 	/* If we are doing T10 DIF add the DIF Seed SGE */
4564 	ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4565 	dif_seed = (sli4_diseed_sge_t *)data;
4566 	dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4567 	dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4568 	dif_seed->app_tag_repl = dif_info->app_tag_repl;
4569 	dif_seed->repl_app_tag = dif_info->repl_app_tag;
4570 	if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4571 		dif_seed->atrt = dif_info->disable_app_ref_ffff;
4572 		dif_seed->at = dif_info->disable_app_ffff;
4573 	}
4574 	dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4575 	/* Workaround for SKH (BZ157233) */
4576 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4577 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4578 		dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4579 	}
4580 
4581 	dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4582 	dif_seed->dif_blk_size = dif_info->blk_size;
4583 	dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4584 	dif_seed->check_app_tag = dif_info->check_app_tag;
4585 	dif_seed->check_ref_tag = dif_info->check_ref_tag;
4586 	dif_seed->check_crc = dif_info->check_guard;
4587 	dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4588 
4589 	switch(dif_info->dif_oper) {
4590 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4591 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4592 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4593 		break;
4594 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4595 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4596 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4597 		break;
4598 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4599 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4600 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4601 		break;
4602 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4603 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4604 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4605 		break;
4606 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4607 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4608 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4609 		break;
4610 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4611 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4612 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4613 		break;
4614 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4615 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4616 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4617 		break;
4618 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4619 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4620 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4621 		break;
4622 	case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4623 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4624 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4625 		break;
4626 	default:
4627 		ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4628 			    dif_info->dif_oper);
4629 		return OCS_HW_RTN_ERROR;
4630 	}
4631 
4632 	/*
4633 	 * Set last, clear previous last
4634 	 */
4635 	data->last = TRUE;
4636 	if (io->n_sge) {
4637 		data[-1].last = FALSE;
4638 	}
4639 
4640 	io->n_sge++;
4641 
4642 	return OCS_HW_RTN_SUCCESS;
4643 }
4644 
4645 static ocs_hw_rtn_e
4646 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4647 {
4648 	sli4_lsp_sge_t *lsp;
4649 
4650 	/* fail if we're already pointing to the overflow SGL */
4651 	if (io->sgl == io->ovfl_sgl) {
4652 		return OCS_HW_RTN_ERROR;
4653 	}
4654 
4655 	/*
4656 	 * For skyhawk, we can use another SGL to extend the SGL list. The
4657 	 * Chained entry must not be in the first 4 entries.
4658 	 *
4659 	 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4660 	 */
4661 	if (sli_get_sgl_preregister(&hw->sli) &&
4662 	    io->def_sgl_count > 4 &&
4663 	    io->ovfl_io == NULL &&
4664 	    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4665 		(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4666 		io->ovfl_io = ocs_hw_io_alloc(hw);
4667 		if (io->ovfl_io != NULL) {
4668 			/*
4669 			 * Note: We can't call ocs_hw_io_register_sgl() here
4670 			 * because it checks that SGLs are not pre-registered
4671 			 * and for shyhawk, preregistered SGLs are required.
4672 			 */
4673 			io->ovfl_sgl = &io->ovfl_io->def_sgl;
4674 			io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4675 		}
4676 	}
4677 
4678 	/* fail if we don't have an overflow SGL registered */
4679 	if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) {
4680 		return OCS_HW_RTN_ERROR;
4681 	}
4682 
4683 	/*
4684 	 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4685 	 * copying the the last SGE to the overflow SGL
4686 	 */
4687 
4688 	((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4689 
4690 	lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4691 	ocs_memset(lsp, 0, sizeof(*lsp));
4692 
4693 	if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4694 	    (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4695 		sli_skh_chain_sge_build(&hw->sli,
4696 					(sli4_sge_t*)lsp,
4697 					io->ovfl_io->indicator,
4698 					0, /* frag_num */
4699 					0); /* offset */
4700 	} else {
4701 		lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4702 		lsp->buffer_address_low  = ocs_addr32_lo(io->ovfl_sgl->phys);
4703 		lsp->sge_type = SLI4_SGE_TYPE_LSP;
4704 		lsp->last = 0;
4705 		io->ovfl_lsp = lsp;
4706 		io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4707 	}
4708 
4709 	/* Update the current SGL pointer, and n_sgl */
4710 	io->sgl = io->ovfl_sgl;
4711 	io->sgl_count = io->ovfl_sgl_count;
4712 	io->n_sge = 1;
4713 
4714 	return OCS_HW_RTN_SUCCESS;
4715 }
4716 
4717 /**
4718  * @ingroup io
4719  * @brief Add a scatter gather list entry to an IO.
4720  *
4721  * @param hw Hardware context.
4722  * @param io Previously-allocated HW IO object.
4723  * @param addr Physical address.
4724  * @param length Length of memory pointed to by @c addr.
4725  *
4726  * @return Returns 0 on success, or a non-zero value on failure.
4727  */
4728 ocs_hw_rtn_e
4729 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4730 {
4731 	sli4_sge_t	*data = NULL;
4732 
4733 	if (!hw || !io || !addr || !length) {
4734 		ocs_log_err(hw ? hw->os : NULL,
4735 			    "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4736 			    hw, io, addr, length);
4737 		return OCS_HW_RTN_ERROR;
4738 	}
4739 
4740 	if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4741 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4742 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4743 			return OCS_HW_RTN_ERROR;
4744 		}
4745 	}
4746 
4747 	if (length > sli_get_max_sge(&hw->sli)) {
4748 		ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4749 			    length, sli_get_max_sge(&hw->sli));
4750 		return OCS_HW_RTN_ERROR;
4751 	}
4752 
4753 	data = io->sgl->virt;
4754 	data += io->n_sge;
4755 
4756 	data->sge_type = SLI4_SGE_TYPE_DATA;
4757 	data->buffer_address_high = ocs_addr32_hi(addr);
4758 	data->buffer_address_low  = ocs_addr32_lo(addr);
4759 	data->buffer_length = length;
4760 	data->data_offset = io->sge_offset;
4761 	/*
4762 	 * Always assume this is the last entry and mark as such.
4763 	 * If this is not the first entry unset the "last SGE"
4764 	 * indication for the previous entry
4765 	 */
4766 	data->last = TRUE;
4767 	if (io->n_sge) {
4768 		data[-1].last = FALSE;
4769 	}
4770 
4771 	/* Set first_data_bde if not previously set */
4772 	if (io->first_data_sge == 0) {
4773 		io->first_data_sge = io->n_sge;
4774 	}
4775 
4776 	io->sge_offset += length;
4777 	io->n_sge++;
4778 
4779 	/* Update the linked segment length (only executed after overflow has begun) */
4780 	if (io->ovfl_lsp != NULL) {
4781 		io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4782 	}
4783 
4784 	return OCS_HW_RTN_SUCCESS;
4785 }
4786 
4787 /**
4788  * @ingroup io
4789  * @brief Add a T10 DIF scatter gather list entry to an IO.
4790  *
4791  * @param hw Hardware context.
4792  * @param io Previously-allocated HW IO object.
4793  * @param addr DIF physical address.
4794  *
4795  * @return Returns 0 on success, or a non-zero value on failure.
4796  */
4797 ocs_hw_rtn_e
4798 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4799 {
4800 	sli4_dif_sge_t	*data = NULL;
4801 
4802 	if (!hw || !io || !addr) {
4803 		ocs_log_err(hw ? hw->os : NULL,
4804 			    "bad parameter hw=%p io=%p addr=%lx\n",
4805 			    hw, io, addr);
4806 		return OCS_HW_RTN_ERROR;
4807 	}
4808 
4809 	if ((io->n_sge + 1) > hw->config.n_sgl) {
4810 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4811 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4812 			return OCS_HW_RTN_ERROR;
4813 		}
4814 	}
4815 
4816 	data = io->sgl->virt;
4817 	data += io->n_sge;
4818 
4819 	data->sge_type = SLI4_SGE_TYPE_DIF;
4820 	/* Workaround for SKH (BZ157233) */
4821 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4822 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4823 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4824 	}
4825 
4826 	data->buffer_address_high = ocs_addr32_hi(addr);
4827 	data->buffer_address_low  = ocs_addr32_lo(addr);
4828 
4829 	/*
4830 	 * Always assume this is the last entry and mark as such.
4831 	 * If this is not the first entry unset the "last SGE"
4832 	 * indication for the previous entry
4833 	 */
4834 	data->last = TRUE;
4835 	if (io->n_sge) {
4836 		data[-1].last = FALSE;
4837 	}
4838 
4839 	io->n_sge++;
4840 
4841 	return OCS_HW_RTN_SUCCESS;
4842 }
4843 
4844 /**
4845  * @ingroup io
4846  * @brief Abort a previously-started IO.
4847  *
4848  * @param hw Hardware context.
4849  * @param io_to_abort The IO to abort.
4850  * @param send_abts Boolean to have the hardware automatically
4851  * generate an ABTS.
4852  * @param cb Function call upon completion of the abort (may be NULL).
4853  * @param arg Argument to pass to abort completion function.
4854  *
4855  * @return Returns 0 on success, or a non-zero value on failure.
4856  */
4857 ocs_hw_rtn_e
4858 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4859 {
4860 	sli4_abort_type_e atype = SLI_ABORT_MAX;
4861 	uint32_t	id = 0, mask = 0;
4862 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4863 	hw_wq_callback_t *wqcb;
4864 
4865 	if (!hw || !io_to_abort) {
4866 		ocs_log_err(hw ? hw->os : NULL,
4867 			    "bad parameter hw=%p io=%p\n",
4868 			    hw, io_to_abort);
4869 		return OCS_HW_RTN_ERROR;
4870 	}
4871 
4872 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4873 		ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4874 			    hw->state);
4875 		return OCS_HW_RTN_ERROR;
4876 	}
4877 
4878 	/* take a reference on IO being aborted */
4879 	if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4880 		/* command no longer active */
4881 		ocs_log_test(hw ? hw->os : NULL,
4882 				"io not active xri=0x%x tag=0x%x\n",
4883 				io_to_abort->indicator, io_to_abort->reqtag);
4884 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4885 	}
4886 
4887 	/* non-port owned XRI checks */
4888 	/* Must have a valid WQ reference */
4889 	if (io_to_abort->wq == NULL) {
4890 		ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4891 				io_to_abort->indicator);
4892 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4893 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4894 	}
4895 
4896 	/* Validation checks complete; now check to see if already being aborted */
4897 	ocs_lock(&hw->io_abort_lock);
4898 		if (io_to_abort->abort_in_progress) {
4899 			ocs_unlock(&hw->io_abort_lock);
4900 			ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4901 			ocs_log_debug(hw ? hw->os : NULL,
4902 				"io already being aborted xri=0x%x tag=0x%x\n",
4903 				io_to_abort->indicator, io_to_abort->reqtag);
4904 			return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4905 		}
4906 
4907 		/*
4908 		 * This IO is not already being aborted. Set flag so we won't try to
4909 		 * abort it again. After all, we only have one abort_done callback.
4910 		 */
4911 		io_to_abort->abort_in_progress = 1;
4912 	ocs_unlock(&hw->io_abort_lock);
4913 
4914 	/*
4915 	 * If we got here, the possibilities are:
4916 	 * - host owned xri
4917 	 *	- io_to_abort->wq_index != UINT32_MAX
4918 	 *		- submit ABORT_WQE to same WQ
4919 	 * - port owned xri:
4920 	 *	- rxri: io_to_abort->wq_index == UINT32_MAX
4921 	 *		- submit ABORT_WQE to any WQ
4922 	 *	- non-rxri
4923 	 *		- io_to_abort->index != UINT32_MAX
4924 	 *			- submit ABORT_WQE to same WQ
4925 	 *		- io_to_abort->index == UINT32_MAX
4926 	 *			- submit ABORT_WQE to any WQ
4927 	 */
4928 	io_to_abort->abort_done = cb;
4929 	io_to_abort->abort_arg  = arg;
4930 
4931 	atype = SLI_ABORT_XRI;
4932 	id = io_to_abort->indicator;
4933 
4934 	/* Allocate a request tag for the abort portion of this IO */
4935 	wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4936 	if (wqcb == NULL) {
4937 		ocs_log_err(hw->os, "can't allocate request tag\n");
4938 		return OCS_HW_RTN_NO_RESOURCES;
4939 	}
4940 	io_to_abort->abort_reqtag = wqcb->instance_index;
4941 
4942 	/*
4943 	 * If the wqe is on the pending list, then set this wqe to be
4944 	 * aborted when the IO's wqe is removed from the list.
4945 	 */
4946 	if (io_to_abort->wq != NULL) {
4947 		sli_queue_lock(io_to_abort->wq->queue);
4948 			if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4949 				io_to_abort->wqe.abort_wqe_submit_needed = 1;
4950 				io_to_abort->wqe.send_abts = send_abts;
4951 				io_to_abort->wqe.id = id;
4952 				io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4953 				sli_queue_unlock(io_to_abort->wq->queue);
4954 				return 0;
4955 		}
4956 		sli_queue_unlock(io_to_abort->wq->queue);
4957 	}
4958 
4959 	if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4960 			  io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4961 		ocs_log_err(hw->os, "ABORT WQE error\n");
4962 		io_to_abort->abort_reqtag = UINT32_MAX;
4963 		ocs_hw_reqtag_free(hw, wqcb);
4964 		rc = OCS_HW_RTN_ERROR;
4965 	}
4966 
4967 	if (OCS_HW_RTN_SUCCESS == rc) {
4968 		if (io_to_abort->wq == NULL) {
4969 			io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4970 			ocs_hw_assert(io_to_abort->wq != NULL);
4971 		}
4972 		/* ABORT_WQE does not actually utilize an XRI on the Port,
4973 		 * therefore, keep xbusy as-is to track the exchange's state,
4974 		 * not the ABORT_WQE's state
4975 		 */
4976 		rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4977 		if (rc > 0) {
4978 			/* non-negative return is success */
4979 			rc = 0;
4980 			/* can't abort an abort so skip adding to timed wqe list */
4981 		}
4982 	}
4983 
4984 	if (OCS_HW_RTN_SUCCESS != rc) {
4985 		ocs_lock(&hw->io_abort_lock);
4986 			io_to_abort->abort_in_progress = 0;
4987 		ocs_unlock(&hw->io_abort_lock);
4988 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4989 	}
4990 	return rc;
4991 }
4992 
4993 /**
4994  * @ingroup io
4995  * @brief Return the OX_ID/RX_ID of the IO.
4996  *
4997  * @param hw Hardware context.
4998  * @param io HW IO object.
4999  *
5000  * @return Returns X_ID on success, or -1 on failure.
5001  */
5002 int32_t
5003 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5004 {
5005 	if (!hw || !io) {
5006 		ocs_log_err(hw ? hw->os : NULL,
5007 			    "bad parameter hw=%p io=%p\n", hw, io);
5008 		return -1;
5009 	}
5010 
5011 	return io->indicator;
5012 }
5013 
5014 typedef struct ocs_hw_fw_write_cb_arg {
5015 	ocs_hw_fw_cb_t cb;
5016 	void *arg;
5017 } ocs_hw_fw_write_cb_arg_t;
5018 
5019 typedef struct ocs_hw_sfp_cb_arg {
5020 	ocs_hw_sfp_cb_t cb;
5021 	void *arg;
5022 	ocs_dma_t payload;
5023 } ocs_hw_sfp_cb_arg_t;
5024 
5025 typedef struct ocs_hw_temp_cb_arg {
5026 	ocs_hw_temp_cb_t cb;
5027 	void *arg;
5028 } ocs_hw_temp_cb_arg_t;
5029 
5030 typedef struct ocs_hw_link_stat_cb_arg {
5031 	ocs_hw_link_stat_cb_t cb;
5032 	void *arg;
5033 } ocs_hw_link_stat_cb_arg_t;
5034 
5035 typedef struct ocs_hw_host_stat_cb_arg {
5036 	ocs_hw_host_stat_cb_t cb;
5037 	void *arg;
5038 } ocs_hw_host_stat_cb_arg_t;
5039 
5040 typedef struct ocs_hw_dump_get_cb_arg {
5041 	ocs_hw_dump_get_cb_t cb;
5042 	void *arg;
5043 	void *mbox_cmd;
5044 } ocs_hw_dump_get_cb_arg_t;
5045 
5046 typedef struct ocs_hw_dump_clear_cb_arg {
5047 	ocs_hw_dump_clear_cb_t cb;
5048 	void *arg;
5049 	void *mbox_cmd;
5050 } ocs_hw_dump_clear_cb_arg_t;
5051 
5052 /**
5053  * @brief Write a portion of a firmware image to the device.
5054  *
5055  * @par Description
5056  * Calls the correct firmware write function based on the device type.
5057  *
5058  * @param hw Hardware context.
5059  * @param dma DMA structure containing the firmware image chunk.
5060  * @param size Size of the firmware image chunk.
5061  * @param offset Offset, in bytes, from the beginning of the firmware image.
5062  * @param last True if this is the last chunk of the image.
5063  * Causes the image to be committed to flash.
5064  * @param cb Pointer to a callback function that is called when the command completes.
5065  * The callback function prototype is
5066  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5067  * @param arg Pointer to be passed to the callback function.
5068  *
5069  * @return Returns 0 on success, or a non-zero value on failure.
5070  */
5071 ocs_hw_rtn_e
5072 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5073 {
5074 	if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5075 		return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5076 	} else {
5077 		/* Write firmware_write for BE3/Skyhawk not supported */
5078 		return -1;
5079 	}
5080 }
5081 
5082 /**
5083  * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5084  *
5085  * @par Description
5086  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5087  * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5088  * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5089  * and to signal the caller that the write has completed.
5090  *
5091  * @param hw Hardware context.
5092  * @param dma DMA structure containing the firmware image chunk.
5093  * @param size Size of the firmware image chunk.
5094  * @param offset Offset, in bytes, from the beginning of the firmware image.
5095  * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5096  * @param cb Pointer to a callback function that is called when the command completes.
5097  * The callback function prototype is
5098  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5099  * @param arg Pointer to be passed to the callback function.
5100  *
5101  * @return Returns 0 on success, or a non-zero value on failure.
5102  */
5103 ocs_hw_rtn_e
5104 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5105 {
5106 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5107 	uint8_t *mbxdata;
5108 	ocs_hw_fw_write_cb_arg_t *cb_arg;
5109 	int noc=0;	/* No Commit bit - set to 1 for testing */
5110 
5111 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5112 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5113 		return OCS_HW_RTN_ERROR;
5114 	}
5115 
5116 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5117 	if (mbxdata == NULL) {
5118 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5119 		return OCS_HW_RTN_NO_MEMORY;
5120 	}
5121 
5122 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5123 	if (cb_arg == NULL) {
5124 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5125 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5126 		return OCS_HW_RTN_NO_MEMORY;
5127 	}
5128 
5129 	cb_arg->cb = cb;
5130 	cb_arg->arg = arg;
5131 
5132 	if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5133 			size, offset, "/prg/", dma)) {
5134 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5135 	}
5136 
5137 	if (rc != OCS_HW_RTN_SUCCESS) {
5138 		ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5139 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5140 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5141 	}
5142 
5143 	return rc;
5144 
5145 }
5146 
5147 /**
5148  * @brief Called when the WRITE OBJECT command completes.
5149  *
5150  * @par Description
5151  * Get the number of bytes actually written out of the response, free the mailbox
5152  * that was malloc'd by ocs_hw_firmware_write(),
5153  * then call the callback and pass the status and bytes written.
5154  *
5155  * @param hw Hardware context.
5156  * @param status Status field from the mbox completion.
5157  * @param mqe Mailbox response structure.
5158  * @param arg Pointer to a callback function that signals the caller that the command is done.
5159  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5160  *
5161  * @return Returns 0.
5162  */
5163 static int32_t
5164 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5165 {
5166 
5167 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5168 	sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5169 	ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5170 	uint32_t bytes_written;
5171 	uint16_t mbox_status;
5172 	uint32_t change_status;
5173 
5174 	bytes_written = wr_obj_rsp->actual_write_length;
5175 	mbox_status = mbox_rsp->hdr.status;
5176 	change_status = wr_obj_rsp->change_status;
5177 
5178 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5179 
5180 	if (cb_arg) {
5181 		if (cb_arg->cb) {
5182 			if ((status == 0) && mbox_status) {
5183 				status = mbox_status;
5184 			}
5185 			cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5186 		}
5187 
5188 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5189 	}
5190 
5191 	return 0;
5192 
5193 }
5194 
5195 /**
5196  * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5197  *
5198  * @par Description
5199  * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5200  * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5201  *
5202  * @param hw Hardware context.
5203  * @param status Status field from the mbox completion.
5204  * @param mqe Mailbox response structure.
5205  * @param arg Pointer to a callback function that signals the caller that the command is done.
5206  * The callback function prototype is
5207  * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5208  *
5209  * @return Returns 0.
5210  */
5211 static int32_t
5212 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5213 {
5214 
5215 	ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5216 	ocs_dma_t *payload = NULL;
5217 	sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5218 	uint32_t bytes_written;
5219 
5220 	if (cb_arg) {
5221 		payload = &(cb_arg->payload);
5222 		if (cb_arg->cb) {
5223 			mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5224 			bytes_written = mbox_rsp->hdr.response_length;
5225 			if ((status == 0) && mbox_rsp->hdr.status) {
5226 				status = mbox_rsp->hdr.status;
5227 			}
5228 			cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5229 		}
5230 
5231 		ocs_dma_free(hw->os, &cb_arg->payload);
5232 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5233 	}
5234 
5235 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5236 	return 0;
5237 }
5238 
5239 /**
5240  * @ingroup io
5241  * @brief Function to retrieve the SFP information.
5242  *
5243  * @param hw Hardware context.
5244  * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5245  * @param cb Function call upon completion of sending the data (may be NULL).
5246  * @param arg Argument to pass to IO completion function.
5247  *
5248  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5249  */
5250 ocs_hw_rtn_e
5251 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5252 {
5253 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5254 	ocs_hw_sfp_cb_arg_t *cb_arg;
5255 	uint8_t *mbxdata;
5256 
5257 	/* mbxdata holds the header of the command */
5258 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5259 	if (mbxdata == NULL) {
5260 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5261 		return OCS_HW_RTN_NO_MEMORY;
5262 	}
5263 
5264 	/* cb_arg holds the data that will be passed to the callback on completion */
5265 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5266 	if (cb_arg == NULL) {
5267 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5268 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5269 		return OCS_HW_RTN_NO_MEMORY;
5270 	}
5271 
5272 	cb_arg->cb = cb;
5273 	cb_arg->arg = arg;
5274 
5275 	/* payload holds the non-embedded portion */
5276 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5277 			  OCS_MIN_DMA_ALIGNMENT)) {
5278 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5279 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5280 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5281 		return OCS_HW_RTN_NO_MEMORY;
5282 	}
5283 
5284 	/* Send the HW command */
5285 	if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5286 	    &cb_arg->payload)) {
5287 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5288 	}
5289 
5290 	if (rc != OCS_HW_RTN_SUCCESS) {
5291 		ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5292 				rc);
5293 		ocs_dma_free(hw->os, &cb_arg->payload);
5294 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5295 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5296 	}
5297 
5298 	return rc;
5299 }
5300 
5301 /**
5302  * @brief Function to retrieve the temperature information.
5303  *
5304  * @param hw Hardware context.
5305  * @param cb Function call upon completion of sending the data (may be NULL).
5306  * @param arg Argument to pass to IO completion function.
5307  *
5308  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5309  */
5310 ocs_hw_rtn_e
5311 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5312 {
5313 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5314 	ocs_hw_temp_cb_arg_t *cb_arg;
5315 	uint8_t *mbxdata;
5316 
5317 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5318 	if (mbxdata == NULL) {
5319 		ocs_log_err(hw->os, "failed to malloc mbox");
5320 		return OCS_HW_RTN_NO_MEMORY;
5321 	}
5322 
5323 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5324 	if (cb_arg == NULL) {
5325 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5326 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5327 		return OCS_HW_RTN_NO_MEMORY;
5328 	}
5329 
5330 	cb_arg->cb = cb;
5331 	cb_arg->arg = arg;
5332 
5333 	if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5334 				SLI4_WKI_TAG_SAT_TEM)) {
5335 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5336 	}
5337 
5338 	if (rc != OCS_HW_RTN_SUCCESS) {
5339 		ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5340 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5341 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5342 	}
5343 
5344 	return rc;
5345 }
5346 
5347 /**
5348  * @brief Called when the DUMP command completes.
5349  *
5350  * @par Description
5351  * Get the temperature data out of the response, free the mailbox that was malloc'd
5352  * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5353  *
5354  * @param hw Hardware context.
5355  * @param status Status field from the mbox completion.
5356  * @param mqe Mailbox response structure.
5357  * @param arg Pointer to a callback function that signals the caller that the command is done.
5358  * The callback function prototype is defined by ocs_hw_temp_cb_t.
5359  *
5360  * @return Returns 0.
5361  */
5362 static int32_t
5363 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5364 {
5365 
5366 	sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5367 	ocs_hw_temp_cb_arg_t *cb_arg = arg;
5368 	uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5369 	uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5370 	uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5371 	uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5372 	uint32_t fan_off_thrshld = mbox_rsp->resp_data[4];   /* word 9 */
5373 	uint32_t fan_on_thrshld = mbox_rsp->resp_data[5];    /* word 10 */
5374 
5375 	if (cb_arg) {
5376 		if (cb_arg->cb) {
5377 			if ((status == 0) && mbox_rsp->hdr.status) {
5378 				status = mbox_rsp->hdr.status;
5379 			}
5380 			cb_arg->cb(status,
5381 				   curr_temp,
5382 				   crit_temp_thrshld,
5383 				   warn_temp_thrshld,
5384 				   norm_temp_thrshld,
5385 				   fan_off_thrshld,
5386 				   fan_on_thrshld,
5387 				   cb_arg->arg);
5388 		}
5389 
5390 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5391 	}
5392 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5393 
5394 	return 0;
5395 }
5396 
5397 /**
5398  * @brief Function to retrieve the link statistics.
5399  *
5400  * @param hw Hardware context.
5401  * @param req_ext_counters If TRUE, then the extended counters will be requested.
5402  * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5403  * @param clear_all_counters If TRUE, the counters will be cleared.
5404  * @param cb Function call upon completion of sending the data (may be NULL).
5405  * @param arg Argument to pass to IO completion function.
5406  *
5407  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5408  */
5409 ocs_hw_rtn_e
5410 ocs_hw_get_link_stats(ocs_hw_t *hw,
5411 			uint8_t req_ext_counters,
5412 			uint8_t clear_overflow_flags,
5413 			uint8_t clear_all_counters,
5414 			ocs_hw_link_stat_cb_t cb,
5415 			void *arg)
5416 {
5417 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5418 	ocs_hw_link_stat_cb_arg_t *cb_arg;
5419 	uint8_t *mbxdata;
5420 
5421 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5422 	if (mbxdata == NULL) {
5423 		ocs_log_err(hw->os, "failed to malloc mbox");
5424 		return OCS_HW_RTN_NO_MEMORY;
5425 	}
5426 
5427 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5428 	if (cb_arg == NULL) {
5429 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5430 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5431 		return OCS_HW_RTN_NO_MEMORY;
5432 	}
5433 
5434 	cb_arg->cb = cb;
5435 	cb_arg->arg = arg;
5436 
5437 	if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5438 				    req_ext_counters,
5439 				    clear_overflow_flags,
5440 				    clear_all_counters)) {
5441 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5442 	}
5443 
5444 	if (rc != OCS_HW_RTN_SUCCESS) {
5445 		ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5446 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5447 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5448 	}
5449 
5450 	return rc;
5451 }
5452 
5453 /**
5454  * @brief Called when the READ_LINK_STAT command completes.
5455  *
5456  * @par Description
5457  * Get the counters out of the response, free the mailbox that was malloc'd
5458  * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5459  *
5460  * @param hw Hardware context.
5461  * @param status Status field from the mbox completion.
5462  * @param mqe Mailbox response structure.
5463  * @param arg Pointer to a callback function that signals the caller that the command is done.
5464  * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5465  *
5466  * @return Returns 0.
5467  */
5468 static int32_t
5469 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5470 {
5471 
5472 	sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5473 	ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5474 	ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5475 	uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5476 
5477 	ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5478 		   OCS_HW_LINK_STAT_MAX);
5479 
5480 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5481 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5482 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5483 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5484 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5485 	counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5486 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5487 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5488 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5489 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5490 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5491 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5492 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5493 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5494 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5495 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5496 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5497 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5498 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5499 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5500 
5501 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5502 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5503 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5504 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5505 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5506 	counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5507 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5508 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5509 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5510 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5511 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5512 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5513 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5514 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5515 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5516 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5517 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5518 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5519 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5520 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5521 
5522 	if (cb_arg) {
5523 		if (cb_arg->cb) {
5524 			if ((status == 0) && mbox_rsp->hdr.status) {
5525 				status = mbox_rsp->hdr.status;
5526 			}
5527 			cb_arg->cb(status,
5528 				   num_counters,
5529 				   counts,
5530 				   cb_arg->arg);
5531 		}
5532 
5533 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5534 	}
5535 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5536 
5537 	return 0;
5538 }
5539 
5540 /**
5541  * @brief Function to retrieve the link and host statistics.
5542  *
5543  * @param hw Hardware context.
5544  * @param cc clear counters, if TRUE all counters will be cleared.
5545  * @param cb Function call upon completion of receiving the data.
5546  * @param arg Argument to pass to pointer fc hosts statistics structure.
5547  *
5548  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5549  */
5550 ocs_hw_rtn_e
5551 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5552 {
5553 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5554 	ocs_hw_host_stat_cb_arg_t *cb_arg;
5555 	uint8_t *mbxdata;
5556 
5557 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5558 	if (mbxdata == NULL) {
5559 		ocs_log_err(hw->os, "failed to malloc mbox");
5560 		return OCS_HW_RTN_NO_MEMORY;
5561 	}
5562 
5563 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5564 	if (cb_arg == NULL) {
5565 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5566 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5567 		return OCS_HW_RTN_NO_MEMORY;
5568 	 }
5569 
5570 	 cb_arg->cb = cb;
5571 	 cb_arg->arg = arg;
5572 
5573 	 /* Send the HW command to get the host stats */
5574 	if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5575 		 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5576 	}
5577 
5578 	if (rc != OCS_HW_RTN_SUCCESS) {
5579 		ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5580 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5581 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5582 	}
5583 
5584 	return rc;
5585 }
5586 
5587 /**
5588  * @brief Called when the READ_STATUS command completes.
5589  *
5590  * @par Description
5591  * Get the counters out of the response, free the mailbox that was malloc'd
5592  * by ocs_hw_get_host_stats(), then call the callback and pass
5593  * the status and data.
5594  *
5595  * @param hw Hardware context.
5596  * @param status Status field from the mbox completion.
5597  * @param mqe Mailbox response structure.
5598  * @param arg Pointer to a callback function that signals the caller that the command is done.
5599  * The callback function prototype is defined by
5600  * ocs_hw_host_stat_cb_t.
5601  *
5602  * @return Returns 0.
5603  */
5604 static int32_t
5605 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5606 {
5607 
5608 	sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5609 	ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5610 	ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5611 	uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5612 
5613 	ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5614 		   OCS_HW_HOST_STAT_MAX);
5615 
5616 	counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5617 	counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5618 	counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5619 	counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5620 	counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5621 	counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5622 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5623 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5624 	counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5625 	counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5626 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5627 	counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5628 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5629 	counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5630 
5631 	if (cb_arg) {
5632 		if (cb_arg->cb) {
5633 			if ((status == 0) && mbox_rsp->hdr.status) {
5634 				status = mbox_rsp->hdr.status;
5635 			}
5636 			cb_arg->cb(status,
5637 				   num_counters,
5638 				   counts,
5639 				   cb_arg->arg);
5640 		}
5641 
5642 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5643 	}
5644 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5645 
5646 	return 0;
5647 }
5648 
5649 /**
5650  * @brief HW link configuration enum to the CLP string value mapping.
5651  *
5652  * This structure provides a mapping from the ocs_hw_linkcfg_e
5653  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5654  * control) to the CLP string that is used
5655  * in the DMTF_CLP_CMD mailbox command.
5656  */
5657 typedef struct ocs_hw_linkcfg_map_s {
5658 	ocs_hw_linkcfg_e linkcfg;
5659 	const char *clp_str;
5660 } ocs_hw_linkcfg_map_t;
5661 
5662 /**
5663  * @brief Mapping from the HW linkcfg enum to the CLP command value
5664  * string.
5665  */
5666 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5667 	{OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5668 	{OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5669 	{OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5670 	{OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5671 	{OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5672 	{OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5673 	{OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5674 
5675 /**
5676  * @brief HW link configuration enum to Skyhawk link config ID mapping.
5677  *
5678  * This structure provides a mapping from the ocs_hw_linkcfg_e
5679  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5680  * control) to the link config ID numbers used by Skyhawk
5681  */
5682 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5683 	ocs_hw_linkcfg_e linkcfg;
5684 	uint32_t	config_id;
5685 } ocs_hw_skyhawk_linkcfg_map_t;
5686 
5687 /**
5688  * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5689  */
5690 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5691 	{OCS_HW_LINKCFG_4X10G, 0x0a},
5692 	{OCS_HW_LINKCFG_1X40G, 0x09},
5693 };
5694 
5695 /**
5696  * @brief Helper function for getting the HW linkcfg enum from the CLP
5697  * string value
5698  *
5699  * @param clp_str CLP string value from OEMELX_LinkConfig.
5700  *
5701  * @return Returns the HW linkcfg enum corresponding to clp_str.
5702  */
5703 static ocs_hw_linkcfg_e
5704 ocs_hw_linkcfg_from_clp(const char *clp_str)
5705 {
5706 	uint32_t i;
5707 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5708 		if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5709 			return linkcfg_map[i].linkcfg;
5710 		}
5711 	}
5712 	return OCS_HW_LINKCFG_NA;
5713 }
5714 
5715 /**
5716  * @brief Helper function for getting the CLP string value from the HW
5717  * linkcfg enum.
5718  *
5719  * @param linkcfg HW linkcfg enum.
5720  *
5721  * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5722  * given linkcfg.
5723  */
5724 static const char *
5725 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5726 {
5727 	uint32_t i;
5728 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5729 		if (linkcfg_map[i].linkcfg == linkcfg) {
5730 			return linkcfg_map[i].clp_str;
5731 		}
5732 	}
5733 	return NULL;
5734 }
5735 
5736 /**
5737  * @brief Helper function for getting a Skyhawk link config ID from the HW
5738  * linkcfg enum.
5739  *
5740  * @param linkcfg HW linkcfg enum.
5741  *
5742  * @return Returns the Skyhawk link config ID corresponding to
5743  * given linkcfg.
5744  */
5745 static uint32_t
5746 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5747 {
5748 	uint32_t i;
5749 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5750 		if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5751 			return skyhawk_linkcfg_map[i].config_id;
5752 		}
5753 	}
5754 	return 0;
5755 }
5756 
5757 /**
5758  * @brief Helper function for getting the HW linkcfg enum from a
5759  * Skyhawk config ID.
5760  *
5761  * @param config_id Skyhawk link config ID.
5762  *
5763  * @return Returns the HW linkcfg enum corresponding to config_id.
5764  */
5765 static ocs_hw_linkcfg_e
5766 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5767 {
5768 	uint32_t i;
5769 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5770 		if (skyhawk_linkcfg_map[i].config_id == config_id) {
5771 			return skyhawk_linkcfg_map[i].linkcfg;
5772 		}
5773 	}
5774 	return OCS_HW_LINKCFG_NA;
5775 }
5776 
5777 /**
5778  * @brief Link configuration callback argument.
5779  */
5780 typedef struct ocs_hw_linkcfg_cb_arg_s {
5781 	ocs_hw_port_control_cb_t cb;
5782 	void *arg;
5783 	uint32_t opts;
5784 	int32_t status;
5785 	ocs_dma_t dma_cmd;
5786 	ocs_dma_t dma_resp;
5787 	uint32_t result_len;
5788 } ocs_hw_linkcfg_cb_arg_t;
5789 
5790 /**
5791  * @brief Set link configuration.
5792  *
5793  * @param hw Hardware context.
5794  * @param value Link configuration enum to which the link configuration is
5795  * set.
5796  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5797  * @param cb Callback function to invoke following mbx command.
5798  * @param arg Callback argument.
5799  *
5800  * @return Returns OCS_HW_RTN_SUCCESS on success.
5801  */
5802 static ocs_hw_rtn_e
5803 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5804 {
5805 	if (!sli_link_is_configurable(&hw->sli)) {
5806 		ocs_log_debug(hw->os, "Function not supported\n");
5807 		return OCS_HW_RTN_ERROR;
5808 	}
5809 
5810 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5811 		return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5812 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5813 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5814 		return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5815 	} else {
5816 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5817 		return OCS_HW_RTN_ERROR;
5818 	}
5819 }
5820 
5821 /**
5822  * @brief Set link configuration for Lancer
5823  *
5824  * @param hw Hardware context.
5825  * @param value Link configuration enum to which the link configuration is
5826  * set.
5827  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5828  * @param cb Callback function to invoke following mbx command.
5829  * @param arg Callback argument.
5830  *
5831  * @return Returns OCS_HW_RTN_SUCCESS on success.
5832  */
5833 static ocs_hw_rtn_e
5834 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5835 {
5836 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5837 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5838 	const char *value_str = NULL;
5839 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5840 
5841 	/* translate ocs_hw_linkcfg_e to CLP string */
5842 	value_str = ocs_hw_clp_from_linkcfg(value);
5843 
5844 	/* allocate memory for callback argument */
5845 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5846 	if (cb_arg == NULL) {
5847 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5848 		return OCS_HW_RTN_NO_MEMORY;
5849 	}
5850 
5851 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5852 	/* allocate DMA for command  */
5853 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5854 		ocs_log_err(hw->os, "malloc failed\n");
5855 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5856 		return OCS_HW_RTN_NO_MEMORY;
5857 	}
5858 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5859 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5860 
5861 	/* allocate DMA for response */
5862 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5863 		ocs_log_err(hw->os, "malloc failed\n");
5864 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5865 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5866 		return OCS_HW_RTN_NO_MEMORY;
5867 	}
5868 	cb_arg->cb = cb;
5869 	cb_arg->arg = arg;
5870 	cb_arg->opts = opts;
5871 
5872 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5873 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5874 
5875 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5876 		/* if failed, or polling, free memory here; if success and not
5877 		 * polling, will free in callback function
5878 		 */
5879 		if (rc) {
5880 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5881 					(char *)cb_arg->dma_cmd.virt);
5882 		}
5883 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5884 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
5885 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5886 	}
5887 	return rc;
5888 }
5889 
5890 /**
5891  * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5892  *
5893  * @param hw Hardware context.
5894  * @param status Status from the RECONFIG_GET_LINK_INFO command.
5895  * @param mqe Mailbox response structure.
5896  * @param arg Pointer to a callback argument.
5897  *
5898  * @return none
5899  */
5900 static void
5901 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5902 {
5903 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5904 
5905 	if (status) {
5906 		ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5907 	}
5908 
5909 	/* invoke callback */
5910 	if (cb_arg->cb) {
5911 		cb_arg->cb(status, 0, cb_arg->arg);
5912 	}
5913 
5914 	/* if polling, will free memory in calling function */
5915 	if (cb_arg->opts != OCS_CMD_POLL) {
5916 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5917 	}
5918 }
5919 
5920 /**
5921  * @brief Set link configuration for a Skyhawk
5922  *
5923  * @param hw Hardware context.
5924  * @param value Link configuration enum to which the link configuration is
5925  * set.
5926  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5927  * @param cb Callback function to invoke following mbx command.
5928  * @param arg Callback argument.
5929  *
5930  * @return Returns OCS_HW_RTN_SUCCESS on success.
5931  */
5932 static ocs_hw_rtn_e
5933 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5934 {
5935 	uint8_t *mbxdata;
5936 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5937 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5938 	uint32_t config_id;
5939 
5940 	config_id = ocs_hw_config_id_from_linkcfg(value);
5941 
5942 	if (config_id == 0) {
5943 		ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
5944 		return OCS_HW_RTN_ERROR;
5945 	}
5946 
5947 	/* mbxdata holds the header of the command */
5948 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5949 	if (mbxdata == NULL) {
5950 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5951 		return OCS_HW_RTN_NO_MEMORY;
5952 	}
5953 
5954 	/* cb_arg holds the data that will be passed to the callback on completion */
5955 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
5956 	if (cb_arg == NULL) {
5957 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5958 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5959 		return OCS_HW_RTN_NO_MEMORY;
5960 	}
5961 
5962 	cb_arg->cb = cb;
5963 	cb_arg->arg = arg;
5964 
5965 	if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
5966 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
5967 	}
5968 
5969 	if (rc != OCS_HW_RTN_SUCCESS) {
5970 		ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
5971 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5972 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5973 	} else if (opts == OCS_CMD_POLL) {
5974 		/* if we're polling we have to call the callback here. */
5975 		ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
5976 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5977 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5978 	} else {
5979 		/* We weren't poling, so the callback got called */
5980 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5981 	}
5982 
5983 	return rc;
5984 }
5985 
5986 /**
5987  * @brief Get link configuration.
5988  *
5989  * @param hw Hardware context.
5990  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5991  * @param cb Callback function to invoke following mbx command.
5992  * @param arg Callback argument.
5993  *
5994  * @return Returns OCS_HW_RTN_SUCCESS on success.
5995  */
5996 static ocs_hw_rtn_e
5997 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5998 {
5999 	if (!sli_link_is_configurable(&hw->sli)) {
6000 		ocs_log_debug(hw->os, "Function not supported\n");
6001 		return OCS_HW_RTN_ERROR;
6002 	}
6003 
6004 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
6005 		return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
6006 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
6007 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
6008 		return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
6009 	} else {
6010 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
6011 		return OCS_HW_RTN_ERROR;
6012 	}
6013 }
6014 
6015 /**
6016  * @brief Get link configuration for a Lancer
6017  *
6018  * @param hw Hardware context.
6019  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6020  * @param cb Callback function to invoke following mbx command.
6021  * @param arg Callback argument.
6022  *
6023  * @return Returns OCS_HW_RTN_SUCCESS on success.
6024  */
6025 static ocs_hw_rtn_e
6026 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6027 {
6028 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6029 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6030 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6031 
6032 	/* allocate memory for callback argument */
6033 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6034 	if (cb_arg == NULL) {
6035 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6036 		return OCS_HW_RTN_NO_MEMORY;
6037 	}
6038 
6039 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
6040 
6041 	/* allocate DMA for command  */
6042 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6043 		ocs_log_err(hw->os, "malloc failed\n");
6044 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6045 		return OCS_HW_RTN_NO_MEMORY;
6046 	}
6047 
6048 	/* copy CLP command to DMA command */
6049 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6050 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
6051 
6052 	/* allocate DMA for response */
6053 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6054 		ocs_log_err(hw->os, "malloc failed\n");
6055 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6056 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6057 		return OCS_HW_RTN_NO_MEMORY;
6058 	}
6059 	cb_arg->cb = cb;
6060 	cb_arg->arg = arg;
6061 	cb_arg->opts = opts;
6062 
6063 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
6064 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
6065 
6066 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6067 		/* if failed or polling, free memory here; if not polling and success,
6068 		 * will free in callback function
6069 		 */
6070 		if (rc) {
6071 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
6072 					(char *)cb_arg->dma_cmd.virt);
6073 		}
6074 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6075 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6076 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6077 	}
6078 	return rc;
6079 }
6080 
6081 /**
6082  * @brief Get the link configuration callback.
6083  *
6084  * @param hw Hardware context.
6085  * @param status Status from the RECONFIG_GET_LINK_INFO command.
6086  * @param mqe Mailbox response structure.
6087  * @param arg Pointer to a callback argument.
6088  *
6089  * @return none
6090  */
6091 static void
6092 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6093 {
6094 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6095 	sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
6096 	ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
6097 
6098 	if (status) {
6099 		ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
6100 	} else {
6101 		/* Call was successful */
6102 		value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
6103 	}
6104 
6105 	/* invoke callback */
6106 	if (cb_arg->cb) {
6107 		cb_arg->cb(status, value, cb_arg->arg);
6108 	}
6109 
6110 	/* if polling, will free memory in calling function */
6111 	if (cb_arg->opts != OCS_CMD_POLL) {
6112 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6113 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6114 	}
6115 }
6116 
6117 /**
6118  * @brief Get link configuration for a Skyhawk.
6119  *
6120  * @param hw Hardware context.
6121  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6122  * @param cb Callback function to invoke following mbx command.
6123  * @param arg Callback argument.
6124  *
6125  * @return Returns OCS_HW_RTN_SUCCESS on success.
6126  */
6127 static ocs_hw_rtn_e
6128 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6129 {
6130 	uint8_t *mbxdata;
6131 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6132 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6133 
6134 	/* mbxdata holds the header of the command */
6135 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6136 	if (mbxdata == NULL) {
6137 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6138 		return OCS_HW_RTN_NO_MEMORY;
6139 	}
6140 
6141 	/* cb_arg holds the data that will be passed to the callback on completion */
6142 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
6143 	if (cb_arg == NULL) {
6144 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6145 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6146 		return OCS_HW_RTN_NO_MEMORY;
6147 	}
6148 
6149 	cb_arg->cb = cb;
6150 	cb_arg->arg = arg;
6151 	cb_arg->opts = opts;
6152 
6153 	/* dma_mem holds the non-embedded portion */
6154 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
6155 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
6156 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6157 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6158 		return OCS_HW_RTN_NO_MEMORY;
6159 	}
6160 
6161 	if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
6162 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
6163 	}
6164 
6165 	if (rc != OCS_HW_RTN_SUCCESS) {
6166 		ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
6167 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6168 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6169 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6170 	} else if (opts == OCS_CMD_POLL) {
6171 		/* if we're polling we have to call the callback here. */
6172 		ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
6173 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6174 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6175 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6176 	} else {
6177 		/* We weren't poling, so the callback got called */
6178 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6179 	}
6180 
6181 	return rc;
6182 }
6183 
6184 /**
6185  * @brief Sets the DIF seed value.
6186  *
6187  * @param hw Hardware context.
6188  *
6189  * @return Returns OCS_HW_RTN_SUCCESS on success.
6190  */
6191 static ocs_hw_rtn_e
6192 ocs_hw_set_dif_seed(ocs_hw_t *hw)
6193 {
6194 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6195 	uint8_t buf[SLI4_BMBX_SIZE];
6196 	sli4_req_common_set_features_dif_seed_t seed_param;
6197 
6198 	ocs_memset(&seed_param, 0, sizeof(seed_param));
6199 	seed_param.seed = hw->config.dif_seed;
6200 
6201 	/* send set_features command */
6202 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6203 					SLI4_SET_FEATURES_DIF_SEED,
6204 					4,
6205 					(uint32_t*)&seed_param)) {
6206 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6207 		if (rc) {
6208 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6209 		} else {
6210 			ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
6211 					hw->config.dif_seed);
6212 		}
6213 	} else {
6214 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6215 		rc = OCS_HW_RTN_ERROR;
6216 	}
6217 	return rc;
6218 }
6219 
6220 /**
6221  * @brief Sets the DIF mode value.
6222  *
6223  * @param hw Hardware context.
6224  *
6225  * @return Returns OCS_HW_RTN_SUCCESS on success.
6226  */
6227 static ocs_hw_rtn_e
6228 ocs_hw_set_dif_mode(ocs_hw_t *hw)
6229 {
6230 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6231 	uint8_t buf[SLI4_BMBX_SIZE];
6232 	sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
6233 
6234 	ocs_memset(&mode_param, 0, sizeof(mode_param));
6235 	mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
6236 
6237 	/* send set_features command */
6238 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6239 					SLI4_SET_FEATURES_DIF_MEMORY_MODE,
6240 					sizeof(mode_param),
6241 					(uint32_t*)&mode_param)) {
6242 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6243 		if (rc) {
6244 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6245 		} else {
6246 			ocs_log_test(hw->os, "DIF mode set to %s\n",
6247 				(hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
6248 		}
6249 	} else {
6250 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6251 		rc = OCS_HW_RTN_ERROR;
6252 	}
6253 	return rc;
6254 }
6255 
6256 static void
6257 ocs_hw_watchdog_timer_cb(void *arg)
6258 {
6259 	ocs_hw_t *hw = (ocs_hw_t *)arg;
6260 
6261 	ocs_hw_config_watchdog_timer(hw);
6262 	return;
6263 }
6264 
6265 static void
6266 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6267 {
6268 	uint16_t timeout = hw->watchdog_timeout;
6269 
6270 	if (status != 0) {
6271 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
6272 	} else {
6273 		if(timeout != 0) {
6274 			/* keeping callback 500ms before timeout to keep heartbeat alive */
6275 			ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
6276 		}else {
6277 			ocs_del_timer(&hw->watchdog_timer);
6278 		}
6279 	}
6280 
6281 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6282 	return;
6283 }
6284 
6285 /**
6286  * @brief Set configuration parameters for watchdog timer feature.
6287  *
6288  * @param hw Hardware context.
6289  * @param timeout Timeout for watchdog timer in seconds
6290  *
6291  * @return Returns OCS_HW_RTN_SUCCESS on success.
6292  */
6293 static ocs_hw_rtn_e
6294 ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
6295 {
6296 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6297 	uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
6298 
6299 	if (!buf) {
6300 		ocs_log_err(hw->os, "no buffer for command\n");
6301 		return OCS_HW_RTN_NO_MEMORY;
6302 	}
6303 
6304 	sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
6305 	rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
6306 	if (rc) {
6307 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
6308 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
6309 	}
6310 	return rc;
6311 }
6312 
6313 /**
6314  * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
6315  *
6316  * @param hw Hardware context.
6317  * @param buf Pointer to a mailbox buffer area.
6318  *
6319  * @return Returns OCS_HW_RTN_SUCCESS on success.
6320  */
6321 static ocs_hw_rtn_e
6322 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
6323 {
6324 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6325 	sli4_req_common_set_features_xfer_rdy_t10pi_t param;
6326 
6327 	ocs_memset(&param, 0, sizeof(param));
6328 	param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
6329 	param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
6330 	param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
6331 	param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
6332 	param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
6333 
6334 	switch (hw->config.auto_xfer_rdy_p_type) {
6335 	case 1:
6336 		param.p_type = 0;
6337 		break;
6338 	case 3:
6339 		param.p_type = 2;
6340 		break;
6341 	default:
6342 		ocs_log_err(hw->os, "unsupported p_type %d\n",
6343 			hw->config.auto_xfer_rdy_p_type);
6344 		return OCS_HW_RTN_ERROR;
6345 	}
6346 
6347 	/* build the set_features command */
6348 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6349 				    SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
6350 				    sizeof(param),
6351 				    &param);
6352 
6353 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6354 	if (rc) {
6355 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6356 	} else {
6357 		ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
6358 				param.rtc, param.atv, param.p_type,
6359 				param.app_tag, param.blk_size);
6360 	}
6361 
6362 	return rc;
6363 }
6364 
6365 /**
6366  * @brief enable sli port health check
6367  *
6368  * @param hw Hardware context.
6369  * @param buf Pointer to a mailbox buffer area.
6370  * @param query current status of the health check feature enabled/disabled
6371  * @param enable if 1: enable 0: disable
6372  * @param buf Pointer to a mailbox buffer area.
6373  *
6374  * @return Returns OCS_HW_RTN_SUCCESS on success.
6375  */
6376 static ocs_hw_rtn_e
6377 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
6378 {
6379 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6380 	uint8_t buf[SLI4_BMBX_SIZE];
6381 	sli4_req_common_set_features_health_check_t param;
6382 
6383 	ocs_memset(&param, 0, sizeof(param));
6384 	param.hck = enable;
6385 	param.qry = query;
6386 
6387 	/* build the set_features command */
6388 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6389 				    SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
6390 				    sizeof(param),
6391 				    &param);
6392 
6393 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6394 	if (rc) {
6395 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6396 	} else {
6397 		ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
6398 	}
6399 
6400 	return rc;
6401 }
6402 
6403 /**
6404  * @brief Set FTD transfer hint feature
6405  *
6406  * @param hw Hardware context.
6407  * @param fdt_xfer_hint size in bytes where read requests are segmented.
6408  *
6409  * @return Returns OCS_HW_RTN_SUCCESS on success.
6410  */
6411 static ocs_hw_rtn_e
6412 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
6413 {
6414 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6415 	uint8_t buf[SLI4_BMBX_SIZE];
6416 	sli4_req_common_set_features_set_fdt_xfer_hint_t param;
6417 
6418 	ocs_memset(&param, 0, sizeof(param));
6419 	param.fdt_xfer_hint = fdt_xfer_hint;
6420 	/* build the set_features command */
6421 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6422 				    SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
6423 				    sizeof(param),
6424 				    &param);
6425 
6426 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6427 	if (rc) {
6428 		ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
6429 	} else {
6430 		ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
6431 	}
6432 
6433 	return rc;
6434 }
6435 
6436 /**
6437  * @brief Get the link configuration callback.
6438  *
6439  * @param hw Hardware context.
6440  * @param status Status from the DMTF CLP command.
6441  * @param result_len Length, in bytes, of the DMTF CLP result.
6442  * @param arg Pointer to a callback argument.
6443  *
6444  * @return Returns OCS_HW_RTN_SUCCESS on success.
6445  */
6446 static void
6447 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
6448 {
6449 	int32_t rval;
6450 	char retdata_str[64];
6451 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6452 	ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
6453 
6454 	if (status) {
6455 		ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
6456 	} else {
6457 		/* parse CLP response to get return data */
6458 		rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
6459 						  sizeof(retdata_str),
6460 						  cb_arg->dma_resp.virt,
6461 						  result_len);
6462 
6463 		if (rval <= 0) {
6464 			ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
6465 		} else {
6466 			/* translate string into hw enum */
6467 			linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
6468 		}
6469 	}
6470 
6471 	/* invoke callback */
6472 	if (cb_arg->cb) {
6473 		cb_arg->cb(status, linkcfg, cb_arg->arg);
6474 	}
6475 
6476 	/* if polling, will free memory in calling function */
6477 	if (cb_arg->opts != OCS_CMD_POLL) {
6478 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6479 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6480 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6481 	}
6482 }
6483 
6484 /**
6485  * @brief Set the Lancer dump location
6486  * @par Description
6487  * This function tells a Lancer chip to use a specific DMA
6488  * buffer as a dump location rather than the internal flash.
6489  *
6490  * @param hw Hardware context.
6491  * @param num_buffers The number of DMA buffers to hold the dump (1..n).
6492  * @param dump_buffers DMA buffers to hold the dump.
6493  *
6494  * @return Returns OCS_HW_RTN_SUCCESS on success.
6495  */
6496 ocs_hw_rtn_e
6497 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
6498 {
6499 	uint8_t bus, dev, func;
6500 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6501 	uint8_t	buf[SLI4_BMBX_SIZE];
6502 
6503 	/*
6504 	 * Make sure the FW is new enough to support this command. If the FW
6505 	 * is too old, the FW will UE.
6506 	 */
6507 	if (hw->workaround.disable_dump_loc) {
6508 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
6509 		return OCS_HW_RTN_ERROR;
6510 	}
6511 
6512 	/* This command is only valid for physical port 0 */
6513 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
6514 	if (fdb == 0 && func != 0) {
6515 		ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
6516 			     func);
6517 		return OCS_HW_RTN_ERROR;
6518 	}
6519 
6520 	/*
6521 	 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
6522 	 * We must allocate a SGL list and then pass the address of the list to the chip.
6523 	 */
6524 	if (num_buffers > 1) {
6525 		uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
6526 		sli4_sge_t *sge;
6527 		uint32_t i;
6528 
6529 		if (hw->dump_sges.size < sge_size) {
6530 			ocs_dma_free(hw->os, &hw->dump_sges);
6531 			if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
6532 				ocs_log_err(hw->os, "SGE DMA allocation failed\n");
6533 				return OCS_HW_RTN_NO_MEMORY;
6534 			}
6535 		}
6536 		/* build the SGE list */
6537 		ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
6538 		hw->dump_sges.len = sge_size;
6539 		sge = hw->dump_sges.virt;
6540 		for (i = 0; i < num_buffers; i++) {
6541 			sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
6542 			sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
6543 			sge[i].last = (i == num_buffers - 1 ? 1 : 0);
6544 			sge[i].buffer_length = dump_buffers[i].size;
6545 		}
6546 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6547 						      SLI4_BMBX_SIZE, FALSE, TRUE,
6548 						      &hw->dump_sges, fdb);
6549 	} else {
6550 		dump_buffers->len = dump_buffers->size;
6551 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6552 						      SLI4_BMBX_SIZE, FALSE, FALSE,
6553 						      dump_buffers, fdb);
6554 	}
6555 
6556 	if (rc) {
6557 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
6558 				     NULL, NULL);
6559 		if (rc) {
6560 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
6561 				rc);
6562 		}
6563 	} else {
6564 		ocs_log_err(hw->os,
6565 			"sli_cmd_common_set_dump_location failed\n");
6566 		rc = OCS_HW_RTN_ERROR;
6567 	}
6568 
6569 	return rc;
6570 }
6571 
6572 /**
6573  * @brief Set the Ethernet license.
6574  *
6575  * @par Description
6576  * This function sends the appropriate mailbox command (DMTF
6577  * CLP) to set the Ethernet license to the given license value.
6578  * Since it is used during the time of ocs_hw_init(), the mailbox
6579  * command is sent via polling (the BMBX route).
6580  *
6581  * @param hw Hardware context.
6582  * @param license 32-bit license value.
6583  *
6584  * @return Returns OCS_HW_RTN_SUCCESS on success.
6585  */
6586 static ocs_hw_rtn_e
6587 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
6588 {
6589 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6590 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6591 	ocs_dma_t dma_cmd;
6592 	ocs_dma_t dma_resp;
6593 
6594 	/* only for lancer right now */
6595 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6596 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6597 		return OCS_HW_RTN_ERROR;
6598 	}
6599 
6600 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
6601 	/* allocate DMA for command  */
6602 	if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6603 		ocs_log_err(hw->os, "malloc failed\n");
6604 		return OCS_HW_RTN_NO_MEMORY;
6605 	}
6606 	ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6607 	ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
6608 
6609 	/* allocate DMA for response */
6610 	if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6611 		ocs_log_err(hw->os, "malloc failed\n");
6612 		ocs_dma_free(hw->os, &dma_cmd);
6613 		return OCS_HW_RTN_NO_MEMORY;
6614 	}
6615 
6616 	/* send DMTF CLP command mbx and poll */
6617 	if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
6618 		ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
6619 		rc = OCS_HW_RTN_ERROR;
6620 	}
6621 
6622 	ocs_dma_free(hw->os, &dma_cmd);
6623 	ocs_dma_free(hw->os, &dma_resp);
6624 	return rc;
6625 }
6626 
6627 /**
6628  * @brief Callback argument structure for the DMTF CLP commands.
6629  */
6630 typedef struct ocs_hw_clp_cb_arg_s {
6631 	ocs_hw_dmtf_clp_cb_t cb;
6632 	ocs_dma_t *dma_resp;
6633 	int32_t status;
6634 	uint32_t opts;
6635 	void *arg;
6636 } ocs_hw_clp_cb_arg_t;
6637 
6638 /**
6639  * @brief Execute the DMTF CLP command.
6640  *
6641  * @param hw Hardware context.
6642  * @param dma_cmd DMA buffer containing the CLP command.
6643  * @param dma_resp DMA buffer that will contain the response (if successful).
6644  * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
6645  * @param cb Callback function.
6646  * @param arg Callback argument.
6647  *
6648  * @return Returns the number of bytes written to the response
6649  * buffer on success, or a negative value if failed.
6650  */
6651 static ocs_hw_rtn_e
6652 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
6653 {
6654 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6655 	ocs_hw_clp_cb_arg_t *cb_arg;
6656 	uint8_t *mbxdata;
6657 
6658 	/* allocate DMA for mailbox */
6659 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6660 	if (mbxdata == NULL) {
6661 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6662 		return OCS_HW_RTN_NO_MEMORY;
6663 	}
6664 
6665 	/* allocate memory for callback argument */
6666 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6667 	if (cb_arg == NULL) {
6668 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6669 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6670 		return OCS_HW_RTN_NO_MEMORY;
6671 	}
6672 
6673 	cb_arg->cb = cb;
6674 	cb_arg->arg = arg;
6675 	cb_arg->dma_resp = dma_resp;
6676 	cb_arg->opts = opts;
6677 
6678 	/* Send the HW command */
6679 	if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6680 				      dma_cmd, dma_resp)) {
6681 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
6682 
6683 		if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
6684 			/* if we're polling, copy response and invoke callback to
6685 			 * parse result */
6686 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6687 			ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
6688 
6689 			/* set rc to resulting or "parsed" status */
6690 			rc = cb_arg->status;
6691 		}
6692 
6693 		/* if failed, or polling, free memory here */
6694 		if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6695 			if (rc != OCS_HW_RTN_SUCCESS) {
6696 				ocs_log_test(hw->os, "ocs_hw_command failed\n");
6697 			}
6698 			ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6699 			ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6700 		}
6701 	} else {
6702 		ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
6703 		rc = OCS_HW_RTN_ERROR;
6704 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6705 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6706 	}
6707 
6708 	return rc;
6709 }
6710 
6711 /**
6712  * @brief Called when the DMTF CLP command completes.
6713  *
6714  * @param hw Hardware context.
6715  * @param status Status field from the mbox completion.
6716  * @param mqe Mailbox response structure.
6717  * @param arg Pointer to a callback argument.
6718  *
6719  * @return None.
6720  *
6721  */
6722 static void
6723 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6724 {
6725 	int32_t cb_status = 0;
6726 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6727 	sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
6728 	ocs_hw_clp_cb_arg_t *cb_arg = arg;
6729 	uint32_t result_len = 0;
6730 	int32_t stat_len;
6731 	char stat_str[8];
6732 
6733 	/* there are several status codes here, check them all and condense
6734 	 * into a single callback status
6735 	 */
6736 	if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
6737 		ocs_log_debug(hw->os, "status=x%x/x%x/x%x  addl=x%x clp=x%x detail=x%x\n",
6738 			status,
6739 			mbox_rsp->hdr.status,
6740 			clp_rsp->hdr.status,
6741 			clp_rsp->hdr.additional_status,
6742 			clp_rsp->clp_status,
6743 			clp_rsp->clp_detailed_status);
6744 		if (status) {
6745 			cb_status = status;
6746 		} else if (mbox_rsp->hdr.status) {
6747 			cb_status = mbox_rsp->hdr.status;
6748 		} else {
6749 			cb_status = clp_rsp->clp_status;
6750 		}
6751 	} else {
6752 		result_len = clp_rsp->resp_length;
6753 	}
6754 
6755 	if (cb_status) {
6756 		goto ocs_hw_cb_dmtf_clp_done;
6757 	}
6758 
6759 	if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
6760 		ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
6761 			     cb_arg->dma_resp->size, result_len);
6762 		cb_status = -1;
6763 		goto ocs_hw_cb_dmtf_clp_done;
6764 	}
6765 
6766 	/* parse CLP response to get status */
6767 	stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
6768 					      sizeof(stat_str),
6769 					      cb_arg->dma_resp->virt,
6770 					      result_len);
6771 
6772 	if (stat_len <= 0) {
6773 		ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
6774 		cb_status = -1;
6775 		goto ocs_hw_cb_dmtf_clp_done;
6776 	}
6777 
6778 	if (ocs_strcmp(stat_str, "0") != 0) {
6779 		ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
6780 		cb_status = -1;
6781 		goto ocs_hw_cb_dmtf_clp_done;
6782 	}
6783 
6784 ocs_hw_cb_dmtf_clp_done:
6785 
6786 	/* save status in cb_arg for callers with NULL cb's + polling */
6787 	cb_arg->status = cb_status;
6788 	if (cb_arg->cb) {
6789 		cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
6790 	}
6791 	/* if polling, caller will free memory */
6792 	if (cb_arg->opts != OCS_CMD_POLL) {
6793 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6794 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6795 	}
6796 }
6797 
6798 /**
6799  * @brief Parse the CLP result and get the value corresponding to the given
6800  * keyword.
6801  *
6802  * @param hw Hardware context.
6803  * @param keyword CLP keyword for which the value is returned.
6804  * @param value Location to which the resulting value is copied.
6805  * @param value_len Length of the value parameter.
6806  * @param resp Pointer to the response buffer that is searched
6807  * for the keyword and value.
6808  * @param resp_len Length of response buffer passed in.
6809  *
6810  * @return Returns the number of bytes written to the value
6811  * buffer on success, or a negative vaue on failure.
6812  */
6813 static int32_t
6814 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
6815 {
6816 	char *start = NULL;
6817 	char *end = NULL;
6818 
6819 	/* look for specified keyword in string */
6820 	start = ocs_strstr(resp, keyword);
6821 	if (start == NULL) {
6822 		ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
6823 			     keyword);
6824 		return -1;
6825 	}
6826 
6827 	/* now look for '=' and go one past */
6828 	start = ocs_strchr(start, '=');
6829 	if (start == NULL) {
6830 		ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
6831 			     keyword);
6832 		return -1;
6833 	}
6834 	start++;
6835 
6836 	/* \r\n terminates value */
6837 	end = ocs_strstr(start, "\r\n");
6838 	if (end == NULL) {
6839 		ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
6840 			     keyword);
6841 		return -1;
6842 	}
6843 
6844 	/* make sure given result array is big enough */
6845 	if ((end - start + 1) > value_len) {
6846 		ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
6847 			     value_len, (end-start));
6848 		return -1;
6849 	}
6850 
6851 	ocs_strncpy(value, start, (end - start));
6852 	value[end-start] = '\0';
6853 	return (end-start+1);
6854 }
6855 
6856 /**
6857  * @brief Cause chip to enter an unrecoverable error state.
6858  *
6859  * @par Description
6860  * Cause chip to enter an unrecoverable error state. This is
6861  * used when detecting unexpected FW behavior so that the FW can be
6862  * hwted from the driver as soon as the error is detected.
6863  *
6864  * @param hw Hardware context.
6865  * @param dump Generate dump as part of reset.
6866  *
6867  * @return Returns 0 on success, or a non-zero value on failure.
6868  *
6869  */
6870 ocs_hw_rtn_e
6871 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
6872 {
6873 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6874 
6875 	if (sli_raise_ue(&hw->sli, dump) != 0) {
6876 		rc = OCS_HW_RTN_ERROR;
6877 	} else {
6878 		if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
6879 			hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
6880 		}
6881 	}
6882 
6883 	return rc;
6884 }
6885 
6886 /**
6887  * @brief Called when the OBJECT_GET command completes.
6888  *
6889  * @par Description
6890  * Get the number of bytes actually written out of the response, free the mailbox
6891  * that was malloc'd by ocs_hw_dump_get(), then call the callback
6892  * and pass the status and bytes read.
6893  *
6894  * @param hw Hardware context.
6895  * @param status Status field from the mbox completion.
6896  * @param mqe Mailbox response structure.
6897  * @param arg Pointer to a callback function that signals the caller that the command is done.
6898  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
6899  *
6900  * @return Returns 0.
6901  */
6902 static int32_t
6903 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6904 {
6905 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6906 	sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
6907 	ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
6908 	uint32_t bytes_read;
6909 	uint8_t eof;
6910 
6911 	bytes_read = rd_obj_rsp->actual_read_length;
6912 	eof = rd_obj_rsp->eof;
6913 
6914 	if (cb_arg) {
6915 		if (cb_arg->cb) {
6916 			if ((status == 0) && mbox_rsp->hdr.status) {
6917 				status = mbox_rsp->hdr.status;
6918 			}
6919 			cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
6920 		}
6921 
6922 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
6923 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
6924 	}
6925 
6926 	return 0;
6927 }
6928 
6929 /**
6930  * @brief Read a dump image to the host.
6931  *
6932  * @par Description
6933  * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
6934  * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
6935  * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
6936  * and signal the caller that the read has completed.
6937  *
6938  * @param hw Hardware context.
6939  * @param dma DMA structure to transfer the dump chunk into.
6940  * @param size Size of the dump chunk.
6941  * @param offset Offset, in bytes, from the beginning of the dump.
6942  * @param cb Pointer to a callback function that is called when the command completes.
6943  * The callback function prototype is
6944  * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
6945  * @param arg Pointer to be passed to the callback function.
6946  *
6947  * @return Returns 0 on success, or a non-zero value on failure.
6948  */
6949 ocs_hw_rtn_e
6950 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
6951 {
6952 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6953 	uint8_t *mbxdata;
6954 	ocs_hw_dump_get_cb_arg_t *cb_arg;
6955 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
6956 
6957 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6958 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6959 		return OCS_HW_RTN_ERROR;
6960 	}
6961 
6962 	if (1 != sli_dump_is_present(&hw->sli)) {
6963 		ocs_log_test(hw->os, "No dump is present\n");
6964 		return OCS_HW_RTN_ERROR;
6965 	}
6966 
6967 	if (1 == sli_reset_required(&hw->sli)) {
6968 		ocs_log_test(hw->os, "device reset required\n");
6969 		return OCS_HW_RTN_ERROR;
6970 	}
6971 
6972 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6973 	if (mbxdata == NULL) {
6974 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6975 		return OCS_HW_RTN_NO_MEMORY;
6976 	}
6977 
6978 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
6979 	if (cb_arg == NULL) {
6980 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6981 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6982 		return OCS_HW_RTN_NO_MEMORY;
6983 	}
6984 
6985 	cb_arg->cb = cb;
6986 	cb_arg->arg = arg;
6987 	cb_arg->mbox_cmd = mbxdata;
6988 
6989 	if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6990 			size, offset, "/dbg/dump.bin", dma)) {
6991 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
6992 		if (rc == 0 && opts == OCS_CMD_POLL) {
6993 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6994 			rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
6995 		}
6996 	}
6997 
6998 	if (rc != OCS_HW_RTN_SUCCESS) {
6999 		ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
7000 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7001 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
7002 	}
7003 
7004 	return rc;
7005 }
7006 
7007 /**
7008  * @brief Called when the OBJECT_DELETE command completes.
7009  *
7010  * @par Description
7011  * Free the mailbox that was malloc'd
7012  * by ocs_hw_dump_clear(), then call the callback and pass the status.
7013  *
7014  * @param hw Hardware context.
7015  * @param status Status field from the mbox completion.
7016  * @param mqe Mailbox response structure.
7017  * @param arg Pointer to a callback function that signals the caller that the command is done.
7018  * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
7019  *
7020  * @return Returns 0.
7021  */
7022 static int32_t
7023 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
7024 {
7025 	ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
7026 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7027 
7028 	if (cb_arg) {
7029 		if (cb_arg->cb) {
7030 			if ((status == 0) && mbox_rsp->hdr.status) {
7031 				status = mbox_rsp->hdr.status;
7032 			}
7033 			cb_arg->cb(status, cb_arg->arg);
7034 		}
7035 
7036 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
7037 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7038 	}
7039 
7040 	return 0;
7041 }
7042 
7043 /**
7044  * @brief Clear a dump image from the device.
7045  *
7046  * @par Description
7047  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
7048  * the dump, then sends the command with ocs_hw_command(). On completion,
7049  * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
7050  * and to signal the caller that the write has completed.
7051  *
7052  * @param hw Hardware context.
7053  * @param cb Pointer to a callback function that is called when the command completes.
7054  * The callback function prototype is
7055  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
7056  * @param arg Pointer to be passed to the callback function.
7057  *
7058  * @return Returns 0 on success, or a non-zero value on failure.
7059  */
7060 ocs_hw_rtn_e
7061 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
7062 {
7063 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7064 	uint8_t *mbxdata;
7065 	ocs_hw_dump_clear_cb_arg_t *cb_arg;
7066 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
7067 
7068 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
7069 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
7070 		return OCS_HW_RTN_ERROR;
7071 	}
7072 
7073 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7074 	if (mbxdata == NULL) {
7075 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7076 		return OCS_HW_RTN_NO_MEMORY;
7077 	}
7078 
7079 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
7080 	if (cb_arg == NULL) {
7081 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7082 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7083 		return OCS_HW_RTN_NO_MEMORY;
7084 	}
7085 
7086 	cb_arg->cb = cb;
7087 	cb_arg->arg = arg;
7088 	cb_arg->mbox_cmd = mbxdata;
7089 
7090 	if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7091 			"/dbg/dump.bin")) {
7092 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
7093 		if (rc == 0 && opts == OCS_CMD_POLL) {
7094 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7095 			rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
7096 		}
7097 	}
7098 
7099 	if (rc != OCS_HW_RTN_SUCCESS) {
7100 		ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
7101 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7102 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7103 	}
7104 
7105 	return rc;
7106 }
7107 
7108 typedef struct ocs_hw_get_port_protocol_cb_arg_s {
7109 	ocs_get_port_protocol_cb_t cb;
7110 	void *arg;
7111 	uint32_t pci_func;
7112 	ocs_dma_t payload;
7113 } ocs_hw_get_port_protocol_cb_arg_t;
7114 
7115 /**
7116  * @brief Called for the completion of get_port_profile for a
7117  *        user request.
7118  *
7119  * @param hw Hardware context.
7120  * @param status The status from the MQE.
7121  * @param mqe Pointer to mailbox command buffer.
7122  * @param arg Pointer to a callback argument.
7123  *
7124  * @return Returns 0 on success, or a non-zero value on failure.
7125  */
7126 static int32_t
7127 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
7128 			    uint8_t *mqe, void *arg)
7129 {
7130 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
7131 	ocs_dma_t *payload = &(cb_arg->payload);
7132 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7133 	ocs_hw_port_protocol_e port_protocol;
7134 	int num_descriptors;
7135 	sli4_resource_descriptor_v1_t *desc_p;
7136 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7137 	int i;
7138 
7139 	port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7140 
7141 	num_descriptors = response->desc_count;
7142 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7143 	for (i=0; i<num_descriptors; i++) {
7144 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7145 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7146 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7147 				switch(pcie_desc_p->pf_type) {
7148 				case 0x02:
7149 					port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
7150 					break;
7151 				case 0x04:
7152 					port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
7153 					break;
7154 				case 0x10:
7155 					port_protocol = OCS_HW_PORT_PROTOCOL_FC;
7156 					break;
7157 				default:
7158 					port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7159 					break;
7160 				}
7161 			}
7162 		}
7163 
7164 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7165 	}
7166 
7167 	if (cb_arg->cb) {
7168 		cb_arg->cb(status, port_protocol, cb_arg->arg);
7169 	}
7170 
7171 	ocs_dma_free(hw->os, &cb_arg->payload);
7172 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7173 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7174 
7175 	return 0;
7176 }
7177 
7178 /**
7179  * @ingroup io
7180  * @brief  Get the current port protocol.
7181  * @par Description
7182  * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox.  When the
7183  * command completes the provided mgmt callback function is
7184  * called.
7185  *
7186  * @param hw Hardware context.
7187  * @param pci_func PCI function to query for current protocol.
7188  * @param cb Callback function to be called when the command completes.
7189  * @param ul_arg An argument that is passed to the callback function.
7190  *
7191  * @return
7192  * - OCS_HW_RTN_SUCCESS on success.
7193  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7194  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7195  *   context.
7196  * - OCS_HW_RTN_ERROR on any other error.
7197  */
7198 ocs_hw_rtn_e
7199 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
7200 	ocs_get_port_protocol_cb_t cb, void* ul_arg)
7201 {
7202 	uint8_t *mbxdata;
7203 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
7204 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7205 
7206 	/* Only supported on Skyhawk */
7207 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7208 		return OCS_HW_RTN_ERROR;
7209 	}
7210 
7211 	/* mbxdata holds the header of the command */
7212 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7213 	if (mbxdata == NULL) {
7214 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7215 		return OCS_HW_RTN_NO_MEMORY;
7216 	}
7217 
7218 	/* cb_arg holds the data that will be passed to the callback on completion */
7219 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7220 	if (cb_arg == NULL) {
7221 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7222 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7223 		return OCS_HW_RTN_NO_MEMORY;
7224 	}
7225 
7226 	cb_arg->cb = cb;
7227 	cb_arg->arg = ul_arg;
7228 	cb_arg->pci_func = pci_func;
7229 
7230 	/* dma_mem holds the non-embedded portion */
7231 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7232 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7233 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7234 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7235 		return OCS_HW_RTN_NO_MEMORY;
7236 	}
7237 
7238 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7239 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
7240 	}
7241 
7242 	if (rc != OCS_HW_RTN_SUCCESS) {
7243 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7244 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7245 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7246 		ocs_dma_free(hw->os, &cb_arg->payload);
7247 	}
7248 
7249 	return rc;
7250 
7251 }
7252 
7253 typedef struct ocs_hw_set_port_protocol_cb_arg_s {
7254 	ocs_set_port_protocol_cb_t cb;
7255 	void *arg;
7256 	ocs_dma_t payload;
7257 	uint32_t new_protocol;
7258 	uint32_t pci_func;
7259 } ocs_hw_set_port_protocol_cb_arg_t;
7260 
7261 /**
7262  * @brief Called for the completion of set_port_profile for a
7263  *        user request.
7264  *
7265  * @par Description
7266  * This is the second of two callbacks for the set_port_protocol
7267  * function. The set operation is a read-modify-write. This
7268  * callback is called when the write (SET_PROFILE_CONFIG)
7269  * completes.
7270  *
7271  * @param hw Hardware context.
7272  * @param status The status from the MQE.
7273  * @param mqe Pointer to mailbox command buffer.
7274  * @param arg Pointer to a callback argument.
7275  *
7276  * @return 0 on success, non-zero otherwise
7277  */
7278 static int32_t
7279 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7280 {
7281 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7282 
7283 	if (cb_arg->cb) {
7284 		cb_arg->cb( status, cb_arg->arg);
7285 	}
7286 
7287 	ocs_dma_free(hw->os, &(cb_arg->payload));
7288 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7289 	ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7290 
7291 	return 0;
7292 }
7293 
7294 /**
7295  * @brief Called for the completion of set_port_profile for a
7296  *        user request.
7297  *
7298  * @par Description
7299  * This is the first of two callbacks for the set_port_protocol
7300  * function.  The set operation is a read-modify-write.  This
7301  * callback is called when the read completes
7302  * (GET_PROFILE_CONFG).  It will updated the resource
7303  * descriptors, then queue the write (SET_PROFILE_CONFIG).
7304  *
7305  * On entry there are three memory areas that were allocated by
7306  * ocs_hw_set_port_protocol.  If a failure is detected in this
7307  * function those need to be freed.  If this function succeeds
7308  * it allocates three more areas.
7309  *
7310  * @param hw Hardware context.
7311  * @param status The status from the MQE
7312  * @param mqe Pointer to mailbox command buffer.
7313  * @param arg Pointer to a callback argument.
7314  *
7315  * @return Returns 0 on success, or a non-zero value otherwise.
7316  */
7317 static int32_t
7318 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7319 {
7320 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7321 	ocs_dma_t *payload = &(cb_arg->payload);
7322 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7323 	int num_descriptors;
7324 	sli4_resource_descriptor_v1_t *desc_p;
7325 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7326 	int i;
7327 	ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
7328 	ocs_hw_port_protocol_e new_protocol;
7329 	uint8_t *dst;
7330 	sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
7331 	uint8_t *mbxdata;
7332 	int pci_descriptor_count;
7333 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7334 	int num_fcoe_ports = 0;
7335 	int num_iscsi_ports = 0;
7336 
7337 	new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
7338 
7339 	num_descriptors = response->desc_count;
7340 
7341 	/* Count PCI descriptors */
7342 	pci_descriptor_count = 0;
7343 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7344 	for (i=0; i<num_descriptors; i++) {
7345 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7346 			++pci_descriptor_count;
7347 		}
7348 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7349 	}
7350 
7351 	/* mbxdata holds the header of the command */
7352 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7353 	if (mbxdata == NULL) {
7354 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7355 		return OCS_HW_RTN_NO_MEMORY;
7356 	}
7357 
7358 	/* cb_arg holds the data that will be passed to the callback on completion */
7359 	new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7360 	if (new_cb_arg == NULL) {
7361 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7362 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7363 		return OCS_HW_RTN_NO_MEMORY;
7364 	}
7365 
7366 	new_cb_arg->cb = cb_arg->cb;
7367 	new_cb_arg->arg = cb_arg->arg;
7368 
7369 	/* Allocate memory for the descriptors we're going to send.  This is
7370 	 * one for each PCI descriptor plus one ISAP descriptor. */
7371 	if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
7372 			  (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
7373 			  sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
7374 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7375 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7376 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7377 		return OCS_HW_RTN_NO_MEMORY;
7378 	}
7379 
7380 	sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7381 						   &new_cb_arg->payload,
7382 						   0, pci_descriptor_count+1, 1);
7383 
7384 	/* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
7385 	dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
7386 
7387 	/* Loop over all descriptors.  If the descriptor is a PCIe descriptor, copy it
7388 	 * to the SET_PROFILE_CONFIG command to be written back.  If it's the descriptor
7389 	 * that we're trying to change also set its pf_type.
7390 	 */
7391 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7392 	for (i=0; i<num_descriptors; i++) {
7393 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7394 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7395 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7396 				/* This is the PCIe descriptor for this OCS instance.
7397 				 * Update it with the new pf_type */
7398 				switch(new_protocol) {
7399 				case OCS_HW_PORT_PROTOCOL_FC:
7400 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
7401 					break;
7402 				case OCS_HW_PORT_PROTOCOL_FCOE:
7403 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
7404 					break;
7405 				case OCS_HW_PORT_PROTOCOL_ISCSI:
7406 					pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
7407 					break;
7408 				default:
7409 					pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
7410 					break;
7411 				}
7412 			}
7413 
7414 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
7415 				++num_fcoe_ports;
7416 			}
7417 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
7418 				++num_iscsi_ports;
7419 			}
7420 			ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
7421 			dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
7422 		}
7423 
7424 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7425 	}
7426 
7427 	/* Create an ISAP resource descriptor */
7428 	isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
7429 	isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
7430 	isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
7431 	if (num_iscsi_ports > 0) {
7432 		isap_desc_p->iscsi_tgt = 1;
7433 		isap_desc_p->iscsi_ini = 1;
7434 		isap_desc_p->iscsi_dif = 1;
7435 	}
7436 	if (num_fcoe_ports > 0) {
7437 		isap_desc_p->fcoe_tgt = 1;
7438 		isap_desc_p->fcoe_ini = 1;
7439 		isap_desc_p->fcoe_dif = 1;
7440 	}
7441 
7442 	/* At this point we're done with the memory allocated by ocs_port_set_protocol */
7443 	ocs_dma_free(hw->os, &cb_arg->payload);
7444 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7445 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7446 
7447 	/* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
7448 	rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
7449 	if (rc) {
7450 		ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
7451 		/* Call the upper level callback to report a failure */
7452 		if (new_cb_arg->cb) {
7453 			new_cb_arg->cb( rc, new_cb_arg->arg);
7454 		}
7455 
7456 		/* Free the memory allocated by this function */
7457 		ocs_dma_free(hw->os, &new_cb_arg->payload);
7458 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7459 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7460 	}
7461 
7462 	return rc;
7463 }
7464 
7465 /**
7466  * @ingroup io
7467  * @brief  Set the port protocol.
7468  * @par Description
7469  * Setting the port protocol is a read-modify-write operation.
7470  * This function submits a GET_PROFILE_CONFIG command to read
7471  * the current settings.  The callback function will modify the
7472  * settings and issue the write.
7473  *
7474  * On successful completion this function will have allocated
7475  * two regular memory areas and one dma area which will need to
7476  * get freed later in the callbacks.
7477  *
7478  * @param hw Hardware context.
7479  * @param new_protocol New protocol to use.
7480  * @param pci_func PCI function to configure.
7481  * @param cb Callback function to be called when the command completes.
7482  * @param ul_arg An argument that is passed to the callback function.
7483  *
7484  * @return
7485  * - OCS_HW_RTN_SUCCESS on success.
7486  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7487  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7488  *   context.
7489  * - OCS_HW_RTN_ERROR on any other error.
7490  */
7491 ocs_hw_rtn_e
7492 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
7493 		uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
7494 {
7495 	uint8_t *mbxdata;
7496 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
7497 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7498 
7499 	/* Only supported on Skyhawk */
7500 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7501 		return OCS_HW_RTN_ERROR;
7502 	}
7503 
7504 	/* mbxdata holds the header of the command */
7505 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7506 	if (mbxdata == NULL) {
7507 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7508 		return OCS_HW_RTN_NO_MEMORY;
7509 	}
7510 
7511 	/* cb_arg holds the data that will be passed to the callback on completion */
7512 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7513 	if (cb_arg == NULL) {
7514 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7515 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7516 		return OCS_HW_RTN_NO_MEMORY;
7517 	}
7518 
7519 	cb_arg->cb = cb;
7520 	cb_arg->arg = ul_arg;
7521 	cb_arg->new_protocol = new_protocol;
7522 	cb_arg->pci_func = pci_func;
7523 
7524 	/* dma_mem holds the non-embedded portion */
7525 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7526 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7527 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7528 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7529 		return OCS_HW_RTN_NO_MEMORY;
7530 	}
7531 
7532 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7533 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
7534 	}
7535 
7536 	if (rc != OCS_HW_RTN_SUCCESS) {
7537 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7538 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7539 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7540 		ocs_dma_free(hw->os, &cb_arg->payload);
7541 	}
7542 
7543 	return rc;
7544 }
7545 
7546 typedef struct ocs_hw_get_profile_list_cb_arg_s {
7547 	ocs_get_profile_list_cb_t cb;
7548 	void *arg;
7549 	ocs_dma_t payload;
7550 } ocs_hw_get_profile_list_cb_arg_t;
7551 
7552 /**
7553  * @brief Called for the completion of get_profile_list for a
7554  *        user request.
7555  * @par Description
7556  * This function is called when the COMMMON_GET_PROFILE_LIST
7557  * mailbox completes.  The response will be in
7558  * ctx->non_embedded_mem.virt.  This function parses the
7559  * response and creates a ocs_hw_profile_list, then calls the
7560  * mgmt_cb callback function and passes that list to it.
7561  *
7562  * @param hw Hardware context.
7563  * @param status The status from the MQE
7564  * @param mqe Pointer to mailbox command buffer.
7565  * @param arg Pointer to a callback argument.
7566  *
7567  * @return Returns 0 on success, or a non-zero value on failure.
7568  */
7569 static int32_t
7570 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7571 {
7572 	ocs_hw_profile_list_t *list;
7573 	ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
7574 	ocs_dma_t *payload = &(cb_arg->payload);
7575 	sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
7576 	int i;
7577 	int num_descriptors;
7578 
7579 	list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
7580 	list->num_descriptors = response->profile_descriptor_count;
7581 
7582 	num_descriptors = list->num_descriptors;
7583 	if (num_descriptors > OCS_HW_MAX_PROFILES) {
7584 		num_descriptors = OCS_HW_MAX_PROFILES;
7585 	}
7586 
7587 	for (i=0; i<num_descriptors; i++) {
7588 		list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
7589 		list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
7590 		ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
7591 	}
7592 
7593 	if (cb_arg->cb) {
7594 		cb_arg->cb(status, list, cb_arg->arg);
7595 	} else {
7596 		ocs_free(hw->os, list, sizeof(*list));
7597 	}
7598 
7599 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7600 	ocs_dma_free(hw->os, &cb_arg->payload);
7601 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7602 
7603 	return 0;
7604 }
7605 
7606 /**
7607  * @ingroup io
7608  * @brief  Get a list of available profiles.
7609  * @par Description
7610  * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox.  When the
7611  * command completes the provided mgmt callback function is
7612  * called.
7613  *
7614  * @param hw Hardware context.
7615  * @param cb Callback function to be called when the
7616  *      	  command completes.
7617  * @param ul_arg An argument that is passed to the callback
7618  *      	 function.
7619  *
7620  * @return
7621  * - OCS_HW_RTN_SUCCESS on success.
7622  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7623  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7624  *   context.
7625  * - OCS_HW_RTN_ERROR on any other error.
7626  */
7627 ocs_hw_rtn_e
7628 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
7629 {
7630 	uint8_t *mbxdata;
7631 	ocs_hw_get_profile_list_cb_arg_t *cb_arg;
7632 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7633 
7634 	/* Only supported on Skyhawk */
7635 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7636 		return OCS_HW_RTN_ERROR;
7637 	}
7638 
7639 	/* mbxdata holds the header of the command */
7640 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7641 	if (mbxdata == NULL) {
7642 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7643 		return OCS_HW_RTN_NO_MEMORY;
7644 	}
7645 
7646 	/* cb_arg holds the data that will be passed to the callback on completion */
7647 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
7648 	if (cb_arg == NULL) {
7649 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7650 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7651 		return OCS_HW_RTN_NO_MEMORY;
7652 	}
7653 
7654 	cb_arg->cb = cb;
7655 	cb_arg->arg = ul_arg;
7656 
7657 	/* dma_mem holds the non-embedded portion */
7658 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
7659 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7660 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7661 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7662 		return OCS_HW_RTN_NO_MEMORY;
7663 	}
7664 
7665 	if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
7666 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
7667 	}
7668 
7669 	if (rc != OCS_HW_RTN_SUCCESS) {
7670 		ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
7671 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7672 		ocs_dma_free(hw->os, &cb_arg->payload);
7673 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7674 	}
7675 
7676 	return rc;
7677 }
7678 
7679 typedef struct ocs_hw_get_active_profile_cb_arg_s {
7680 	ocs_get_active_profile_cb_t cb;
7681 	void *arg;
7682 } ocs_hw_get_active_profile_cb_arg_t;
7683 
7684 /**
7685  * @brief Called for the completion of get_active_profile for a
7686  *        user request.
7687  *
7688  * @param hw Hardware context.
7689  * @param status The status from the MQE
7690  * @param mqe Pointer to mailbox command buffer.
7691  * @param arg Pointer to a callback argument.
7692  *
7693  * @return Returns 0 on success, or a non-zero value on failure.
7694  */
7695 static int32_t
7696 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7697 {
7698 	ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
7699 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7700 	sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
7701 	uint32_t active_profile;
7702 
7703 	active_profile = response->active_profile_id;
7704 
7705 	if (cb_arg->cb) {
7706 		cb_arg->cb(status, active_profile, cb_arg->arg);
7707 	}
7708 
7709 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7710 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7711 
7712 	return 0;
7713 }
7714 
7715 /**
7716  * @ingroup io
7717  * @brief  Get the currently active profile.
7718  * @par Description
7719  * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
7720  * command completes the provided mgmt callback function is
7721  * called.
7722  *
7723  * @param hw Hardware context.
7724  * @param cb Callback function to be called when the
7725  *	     command completes.
7726  * @param ul_arg An argument that is passed to the callback
7727  *      	 function.
7728  *
7729  * @return
7730  * - OCS_HW_RTN_SUCCESS on success.
7731  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7732  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7733  *   context.
7734  * - OCS_HW_RTN_ERROR on any other error.
7735  */
7736 int32_t
7737 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
7738 {
7739 	uint8_t *mbxdata;
7740 	ocs_hw_get_active_profile_cb_arg_t *cb_arg;
7741 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7742 
7743 	/* Only supported on Skyhawk */
7744 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7745 		return OCS_HW_RTN_ERROR;
7746 	}
7747 
7748 	/* mbxdata holds the header of the command */
7749 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7750 	if (mbxdata == NULL) {
7751 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7752 		return OCS_HW_RTN_NO_MEMORY;
7753 	}
7754 
7755 	/* cb_arg holds the data that will be passed to the callback on completion */
7756 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
7757 	if (cb_arg == NULL) {
7758 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7759 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7760 		return OCS_HW_RTN_NO_MEMORY;
7761 	}
7762 
7763 	cb_arg->cb = cb;
7764 	cb_arg->arg = ul_arg;
7765 
7766 	if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7767 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
7768 	}
7769 
7770 	if (rc != OCS_HW_RTN_SUCCESS) {
7771 		ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
7772 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7773 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7774 	}
7775 
7776 	return rc;
7777 }
7778 
7779 typedef struct ocs_hw_get_nvparms_cb_arg_s {
7780 	ocs_get_nvparms_cb_t cb;
7781 	void *arg;
7782 } ocs_hw_get_nvparms_cb_arg_t;
7783 
7784 /**
7785  * @brief Called for the completion of get_nvparms for a
7786  *        user request.
7787  *
7788  * @param hw Hardware context.
7789  * @param status The status from the MQE.
7790  * @param mqe Pointer to mailbox command buffer.
7791  * @param arg Pointer to a callback argument.
7792  *
7793  * @return 0 on success, non-zero otherwise
7794  */
7795 static int32_t
7796 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7797 {
7798 	ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
7799 	sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
7800 
7801 	if (cb_arg->cb) {
7802 		cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
7803 				mbox_rsp->preferred_d_id, cb_arg->arg);
7804 	}
7805 
7806 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7807 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7808 
7809 	return 0;
7810 }
7811 
7812 /**
7813  * @ingroup io
7814  * @brief  Read non-volatile parms.
7815  * @par Description
7816  * Issues a SLI-4 READ_NVPARMS mailbox. When the
7817  * command completes the provided mgmt callback function is
7818  * called.
7819  *
7820  * @param hw Hardware context.
7821  * @param cb Callback function to be called when the
7822  *	  command completes.
7823  * @param ul_arg An argument that is passed to the callback
7824  *	  function.
7825  *
7826  * @return
7827  * - OCS_HW_RTN_SUCCESS on success.
7828  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7829  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7830  *   context.
7831  * - OCS_HW_RTN_ERROR on any other error.
7832  */
7833 int32_t
7834 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
7835 {
7836 	uint8_t *mbxdata;
7837 	ocs_hw_get_nvparms_cb_arg_t *cb_arg;
7838 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7839 
7840 	/* mbxdata holds the header of the command */
7841 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7842 	if (mbxdata == NULL) {
7843 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7844 		return OCS_HW_RTN_NO_MEMORY;
7845 	}
7846 
7847 	/* cb_arg holds the data that will be passed to the callback on completion */
7848 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
7849 	if (cb_arg == NULL) {
7850 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7851 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7852 		return OCS_HW_RTN_NO_MEMORY;
7853 	}
7854 
7855 	cb_arg->cb = cb;
7856 	cb_arg->arg = ul_arg;
7857 
7858 	if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7859 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
7860 	}
7861 
7862 	if (rc != OCS_HW_RTN_SUCCESS) {
7863 		ocs_log_test(hw->os, "READ_NVPARMS failed\n");
7864 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7865 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7866 	}
7867 
7868 	return rc;
7869 }
7870 
7871 typedef struct ocs_hw_set_nvparms_cb_arg_s {
7872 	ocs_set_nvparms_cb_t cb;
7873 	void *arg;
7874 } ocs_hw_set_nvparms_cb_arg_t;
7875 
7876 /**
7877  * @brief Called for the completion of set_nvparms for a
7878  *        user request.
7879  *
7880  * @param hw Hardware context.
7881  * @param status The status from the MQE.
7882  * @param mqe Pointer to mailbox command buffer.
7883  * @param arg Pointer to a callback argument.
7884  *
7885  * @return Returns 0 on success, or a non-zero value on failure.
7886  */
7887 static int32_t
7888 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7889 {
7890 	ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
7891 
7892 	if (cb_arg->cb) {
7893 		cb_arg->cb(status, cb_arg->arg);
7894 	}
7895 
7896 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7897 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7898 
7899 	return 0;
7900 }
7901 
7902 /**
7903  * @ingroup io
7904  * @brief  Write non-volatile parms.
7905  * @par Description
7906  * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
7907  * command completes the provided mgmt callback function is
7908  * called.
7909  *
7910  * @param hw Hardware context.
7911  * @param cb Callback function to be called when the
7912  *	  command completes.
7913  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
7914  * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
7915  * @param hard_alpa A hard AL_PA address setting used during loop
7916  * initialization. If no hard AL_PA is required, set to 0.
7917  * @param preferred_d_id A preferred D_ID address setting
7918  * that may be overridden with the CONFIG_LINK mailbox command.
7919  * If there is no preference, set to 0.
7920  * @param ul_arg An argument that is passed to the callback
7921  *	  function.
7922  *
7923  * @return
7924  * - OCS_HW_RTN_SUCCESS on success.
7925  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7926  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7927  *   context.
7928  * - OCS_HW_RTN_ERROR on any other error.
7929  */
7930 int32_t
7931 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
7932 		uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
7933 {
7934 	uint8_t *mbxdata;
7935 	ocs_hw_set_nvparms_cb_arg_t *cb_arg;
7936 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7937 
7938 	/* mbxdata holds the header of the command */
7939 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7940 	if (mbxdata == NULL) {
7941 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7942 		return OCS_HW_RTN_NO_MEMORY;
7943 	}
7944 
7945 	/* cb_arg holds the data that will be passed to the callback on completion */
7946 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
7947 	if (cb_arg == NULL) {
7948 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7949 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7950 		return OCS_HW_RTN_NO_MEMORY;
7951 	}
7952 
7953 	cb_arg->cb = cb;
7954 	cb_arg->arg = ul_arg;
7955 
7956 	if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
7957 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
7958 	}
7959 
7960 	if (rc != OCS_HW_RTN_SUCCESS) {
7961 		ocs_log_test(hw->os, "SET_NVPARMS failed\n");
7962 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7963 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7964 	}
7965 
7966 	return rc;
7967 }
7968 
7969 /**
7970  * @brief Called to obtain the count for the specified type.
7971  *
7972  * @param hw Hardware context.
7973  * @param io_count_type IO count type (inuse, free, wait_free).
7974  *
7975  * @return Returns the number of IOs on the specified list type.
7976  */
7977 uint32_t
7978 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
7979 {
7980 	ocs_hw_io_t *io = NULL;
7981 	uint32_t count = 0;
7982 
7983 	ocs_lock(&hw->io_lock);
7984 
7985 	switch (io_count_type) {
7986 	case OCS_HW_IO_INUSE_COUNT :
7987 		ocs_list_foreach(&hw->io_inuse, io) {
7988 			count++;
7989 		}
7990 		break;
7991 	case OCS_HW_IO_FREE_COUNT :
7992 		 ocs_list_foreach(&hw->io_free, io) {
7993 			 count++;
7994 		 }
7995 		 break;
7996 	case OCS_HW_IO_WAIT_FREE_COUNT :
7997 		 ocs_list_foreach(&hw->io_wait_free, io) {
7998 			 count++;
7999 		 }
8000 		 break;
8001 	case OCS_HW_IO_PORT_OWNED_COUNT:
8002 		 ocs_list_foreach(&hw->io_port_owned, io) {
8003 			 count++;
8004 		 }
8005 		 break;
8006 	case OCS_HW_IO_N_TOTAL_IO_COUNT :
8007 		count = hw->config.n_io;
8008 		break;
8009 	}
8010 
8011 	ocs_unlock(&hw->io_lock);
8012 
8013 	return count;
8014 }
8015 
8016 /**
8017  * @brief Called to obtain the count of produced RQs.
8018  *
8019  * @param hw Hardware context.
8020  *
8021  * @return Returns the number of RQs produced.
8022  */
8023 uint32_t
8024 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
8025 {
8026 	uint32_t count = 0;
8027 	uint32_t i;
8028 	uint32_t j;
8029 
8030 	for (i = 0; i < hw->hw_rq_count; i++) {
8031 		hw_rq_t *rq = hw->hw_rq[i];
8032 		if (rq->rq_tracker != NULL) {
8033 			for (j = 0; j < rq->entry_count; j++) {
8034 				if (rq->rq_tracker[j] != NULL) {
8035 					count++;
8036 				}
8037 			}
8038 		}
8039 	}
8040 
8041 	return count;
8042 }
8043 
8044 typedef struct ocs_hw_set_active_profile_cb_arg_s {
8045 	ocs_set_active_profile_cb_t cb;
8046 	void *arg;
8047 } ocs_hw_set_active_profile_cb_arg_t;
8048 
8049 /**
8050  * @brief Called for the completion of set_active_profile for a
8051  *        user request.
8052  *
8053  * @param hw Hardware context.
8054  * @param status The status from the MQE
8055  * @param mqe Pointer to mailbox command buffer.
8056  * @param arg Pointer to a callback argument.
8057  *
8058  * @return Returns 0 on success, or a non-zero value on failure.
8059  */
8060 static int32_t
8061 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
8062 {
8063 	ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
8064 
8065 	if (cb_arg->cb) {
8066 		cb_arg->cb(status, cb_arg->arg);
8067 	}
8068 
8069 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
8070 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
8071 
8072 	return 0;
8073 }
8074 
8075 /**
8076  * @ingroup io
8077  * @brief  Set the currently active profile.
8078  * @par Description
8079  * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
8080  * command completes the provided mgmt callback function is
8081  * called.
8082  *
8083  * @param hw Hardware context.
8084  * @param profile_id Profile ID to activate.
8085  * @param cb Callback function to be called when the command completes.
8086  * @param ul_arg An argument that is passed to the callback function.
8087  *
8088  * @return
8089  * - OCS_HW_RTN_SUCCESS on success.
8090  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
8091  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
8092  *   context.
8093  * - OCS_HW_RTN_ERROR on any other error.
8094  */
8095 int32_t
8096 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
8097 {
8098 	uint8_t *mbxdata;
8099 	ocs_hw_set_active_profile_cb_arg_t *cb_arg;
8100 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
8101 
8102 	/* Only supported on Skyhawk */
8103 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
8104 		return OCS_HW_RTN_ERROR;
8105 	}
8106 
8107 	/* mbxdata holds the header of the command */
8108 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
8109 	if (mbxdata == NULL) {
8110 		ocs_log_err(hw->os, "failed to malloc mbox\n");
8111 		return OCS_HW_RTN_NO_MEMORY;
8112 	}
8113 
8114 	/* cb_arg holds the data that will be passed to the callback on completion */
8115 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
8116 	if (cb_arg == NULL) {
8117 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
8118 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8119 		return OCS_HW_RTN_NO_MEMORY;
8120 	}
8121 
8122 	cb_arg->cb = cb;
8123 	cb_arg->arg = ul_arg;
8124 
8125 	if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
8126 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
8127 	}
8128 
8129 	if (rc != OCS_HW_RTN_SUCCESS) {
8130 		ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
8131 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8132 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
8133 	}
8134 
8135 	return rc;
8136 }
8137 
8138 /*
8139  * Private functions
8140  */
8141 
8142 /**
8143  * @brief Update the queue hash with the ID and index.
8144  *
8145  * @param hash Pointer to hash table.
8146  * @param id ID that was created.
8147  * @param index The index into the hash object.
8148  */
8149 static void
8150 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
8151 {
8152 	uint32_t	hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
8153 
8154 	/*
8155 	 * Since the hash is always bigger than the number of queues, then we
8156 	 * never have to worry about an infinite loop.
8157 	 */
8158 	while(hash[hash_index].in_use) {
8159 		hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8160 	}
8161 
8162 	/* not used, claim the entry */
8163 	hash[hash_index].id = id;
8164 	hash[hash_index].in_use = 1;
8165 	hash[hash_index].index = index;
8166 }
8167 
8168 /**
8169  * @brief Find index given queue ID.
8170  *
8171  * @param hash Pointer to hash table.
8172  * @param id ID to find.
8173  *
8174  * @return Returns the index into the HW cq array or -1 if not found.
8175  */
8176 int32_t
8177 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
8178 {
8179 	int32_t	rc = -1;
8180 	int32_t	index = id & (OCS_HW_Q_HASH_SIZE - 1);
8181 
8182 	/*
8183 	 * Since the hash is always bigger than the maximum number of Qs, then we
8184 	 * never have to worry about an infinite loop. We will always find an
8185 	 * unused entry.
8186 	 */
8187 	do {
8188 		if (hash[index].in_use &&
8189 		    hash[index].id == id) {
8190 			rc = hash[index].index;
8191 		} else {
8192 			index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8193 		}
8194 	} while(rc == -1 && hash[index].in_use);
8195 
8196 	return rc;
8197 }
8198 
8199 static int32_t
8200 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
8201 {
8202 	int32_t		rc = OCS_HW_RTN_ERROR;
8203 	uint16_t	fcfi = UINT16_MAX;
8204 
8205 	if ((hw == NULL) || (domain == NULL)) {
8206 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8207 				hw, domain);
8208 		return OCS_HW_RTN_ERROR;
8209 	}
8210 
8211 	fcfi = domain->fcf_indicator;
8212 
8213 	if (fcfi < SLI4_MAX_FCFI) {
8214 		uint16_t	fcf_index = UINT16_MAX;
8215 
8216 		ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
8217 				domain, fcfi);
8218 		hw->domains[fcfi] = domain;
8219 
8220 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8221 		if (hw->workaround.override_fcfi) {
8222 			if (hw->first_domain_idx < 0) {
8223 				hw->first_domain_idx = fcfi;
8224 			}
8225 		}
8226 
8227 		fcf_index = domain->fcf;
8228 
8229 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8230 			ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
8231 				      fcf_index, fcfi);
8232 			hw->fcf_index_fcfi[fcf_index] = fcfi;
8233 			rc = OCS_HW_RTN_SUCCESS;
8234 		} else {
8235 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8236 				     fcf_index, SLI4_MAX_FCF_INDEX);
8237 			hw->domains[fcfi] = NULL;
8238 		}
8239 	} else {
8240 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8241 				fcfi, SLI4_MAX_FCFI);
8242 	}
8243 
8244 	return rc;
8245 }
8246 
8247 static int32_t
8248 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
8249 {
8250 	int32_t		rc = OCS_HW_RTN_ERROR;
8251 	uint16_t	fcfi = UINT16_MAX;
8252 
8253 	if ((hw == NULL) || (domain == NULL)) {
8254 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8255 				hw, domain);
8256 		return OCS_HW_RTN_ERROR;
8257 	}
8258 
8259 	fcfi = domain->fcf_indicator;
8260 
8261 	if (fcfi < SLI4_MAX_FCFI) {
8262 		uint16_t	fcf_index = UINT16_MAX;
8263 
8264 		ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
8265 				domain, fcfi);
8266 
8267 		if (domain != hw->domains[fcfi]) {
8268 			ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
8269 				     domain, hw->domains[fcfi]);
8270 			return OCS_HW_RTN_ERROR;
8271 		}
8272 
8273 		hw->domains[fcfi] = NULL;
8274 
8275 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8276 		if (hw->workaround.override_fcfi) {
8277 			if (hw->first_domain_idx == fcfi) {
8278 				hw->first_domain_idx = -1;
8279 			}
8280 		}
8281 
8282 		fcf_index = domain->fcf;
8283 
8284 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8285 			if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
8286 				hw->fcf_index_fcfi[fcf_index] = 0;
8287 				rc = OCS_HW_RTN_SUCCESS;
8288 			} else {
8289 				ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
8290 					     hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
8291 			}
8292 		} else {
8293 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8294 				     fcf_index, SLI4_MAX_FCF_INDEX);
8295 		}
8296 	} else {
8297 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8298 				fcfi, SLI4_MAX_FCFI);
8299 	}
8300 
8301 	return rc;
8302 }
8303 
8304 ocs_domain_t *
8305 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
8306 {
8307 
8308 	if (hw == NULL) {
8309 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8310 		return NULL;
8311 	}
8312 
8313 	if (fcfi < SLI4_MAX_FCFI) {
8314 		return hw->domains[fcfi];
8315 	} else {
8316 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8317 				fcfi, SLI4_MAX_FCFI);
8318 		return NULL;
8319 	}
8320 }
8321 
8322 static ocs_domain_t *
8323 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
8324 {
8325 
8326 	if (hw == NULL) {
8327 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8328 		return NULL;
8329 	}
8330 
8331 	if (fcf_index < SLI4_MAX_FCF_INDEX) {
8332 		return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
8333 	} else {
8334 		ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8335 			     fcf_index, SLI4_MAX_FCF_INDEX);
8336 		return NULL;
8337 	}
8338 }
8339 
8340 /**
8341  * @brief Quaratine an IO by taking a reference count and adding it to the
8342  *        quarantine list. When the IO is popped from the list then the
8343  *        count is released and the IO MAY be freed depending on whether
8344  *        it is still referenced by the IO.
8345  *
8346  *        @n @b Note: BZ 160124 - If this is a target write or an initiator read using
8347  *        DIF, then we must add the XRI to a quarantine list until we receive
8348  *        4 more completions of this same type.
8349  *
8350  * @param hw Hardware context.
8351  * @param wq Pointer to the WQ associated with the IO object to quarantine.
8352  * @param io Pointer to the io object to quarantine.
8353  */
8354 static void
8355 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
8356 {
8357 	ocs_quarantine_info_t *q_info = &wq->quarantine_info;
8358 	uint32_t	index;
8359 	ocs_hw_io_t	*free_io = NULL;
8360 
8361 	/* return if the QX bit was clear */
8362 	if (!io->quarantine) {
8363 		return;
8364 	}
8365 
8366 	/* increment the IO refcount to prevent it from being freed before the quarantine is over */
8367 	if (ocs_ref_get_unless_zero(&io->ref) == 0) {
8368 		/* command no longer active */
8369 		ocs_log_debug(hw ? hw->os : NULL,
8370 			      "io not active xri=0x%x tag=0x%x\n",
8371 			      io->indicator, io->reqtag);
8372 		return;
8373 	}
8374 
8375 	sli_queue_lock(wq->queue);
8376 		index = q_info->quarantine_index;
8377 		free_io = q_info->quarantine_ios[index];
8378 		q_info->quarantine_ios[index] = io;
8379 		q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
8380 	sli_queue_unlock(wq->queue);
8381 
8382 	if (free_io != NULL) {
8383 		ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
8384 	}
8385 }
8386 
8387 /**
8388  * @brief Process entries on the given completion queue.
8389  *
8390  * @param hw Hardware context.
8391  * @param cq Pointer to the HW completion queue object.
8392  *
8393  * @return None.
8394  */
8395 void
8396 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
8397 {
8398 	uint8_t		cqe[sizeof(sli4_mcqe_t)];
8399 	uint16_t	rid = UINT16_MAX;
8400 	sli4_qentry_e	ctype;		/* completion type */
8401 	int32_t		status;
8402 	uint32_t	n_processed = 0;
8403 	time_t		tstart;
8404 	time_t		telapsed;
8405 
8406 	tstart = ocs_msectime();
8407 
8408 	while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
8409 		status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
8410 		/*
8411 		 * The sign of status is significant. If status is:
8412 		 * == 0 : call completed correctly and the CQE indicated success
8413 		 *  > 0 : call completed correctly and the CQE indicated an error
8414 		 *  < 0 : call failed and no information is available about the CQE
8415 		 */
8416 		if (status < 0) {
8417 			if (status == -2) {
8418 				/* Notification that an entry was consumed, but not completed */
8419 				continue;
8420 			}
8421 
8422 			break;
8423 		}
8424 
8425 		switch (ctype) {
8426 		case SLI_QENTRY_ASYNC:
8427 			CPUTRACE("async");
8428 			sli_cqe_async(&hw->sli, cqe);
8429 			break;
8430 		case SLI_QENTRY_MQ:
8431 			/*
8432 			 * Process MQ entry. Note there is no way to determine
8433 			 * the MQ_ID from the completion entry.
8434 			 */
8435 			CPUTRACE("mq");
8436 			ocs_hw_mq_process(hw, status, hw->mq);
8437 			break;
8438 		case SLI_QENTRY_OPT_WRITE_CMD:
8439 			ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
8440 			break;
8441 		case SLI_QENTRY_OPT_WRITE_DATA:
8442 			ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
8443 			break;
8444 		case SLI_QENTRY_WQ:
8445 			CPUTRACE("wq");
8446 			ocs_hw_wq_process(hw, cq, cqe, status, rid);
8447 			break;
8448 		case SLI_QENTRY_WQ_RELEASE: {
8449 			uint32_t wq_id = rid;
8450 			int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
8451 
8452 			if (unlikely(index < 0)) {
8453 				ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n",
8454 					    index, rid);
8455 				break;
8456 			}
8457 
8458 			hw_wq_t *wq = hw->hw_wq[index];
8459 
8460 			/* Submit any HW IOs that are on the WQ pending list */
8461 			hw_wq_submit_pending(wq, wq->wqec_set_count);
8462 
8463 			break;
8464 		}
8465 
8466 		case SLI_QENTRY_RQ:
8467 			CPUTRACE("rq");
8468 			ocs_hw_rqpair_process_rq(hw, cq, cqe);
8469 			break;
8470 		case SLI_QENTRY_XABT: {
8471 			CPUTRACE("xabt");
8472 			ocs_hw_xabt_process(hw, cq, cqe, rid);
8473 			break;
8474 		}
8475 		default:
8476 			ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
8477 			break;
8478 		}
8479 
8480 		n_processed++;
8481 		if (n_processed == cq->queue->proc_limit) {
8482 			break;
8483 		}
8484 
8485 		if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
8486 			sli_queue_arm(&hw->sli, cq->queue, FALSE);
8487 		}
8488 	}
8489 
8490 	sli_queue_arm(&hw->sli, cq->queue, TRUE);
8491 
8492 	if (n_processed > cq->queue->max_num_processed) {
8493 		cq->queue->max_num_processed = n_processed;
8494 	}
8495 	telapsed = ocs_msectime() - tstart;
8496 	if (telapsed > cq->queue->max_process_time) {
8497 		cq->queue->max_process_time = telapsed;
8498 	}
8499 }
8500 
8501 /**
8502  * @brief Process WQ completion queue entries.
8503  *
8504  * @param hw Hardware context.
8505  * @param cq Pointer to the HW completion queue object.
8506  * @param cqe Pointer to WQ completion queue.
8507  * @param status Completion status.
8508  * @param rid Resource ID (IO tag).
8509  *
8510  * @return none
8511  */
8512 void
8513 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
8514 {
8515 	hw_wq_callback_t *wqcb;
8516 
8517 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
8518 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8519 
8520 	if(rid == OCS_HW_REQUE_XRI_REGTAG) {
8521 		if(status) {
8522 			ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
8523 		}
8524 		return;
8525 	}
8526 
8527 	wqcb = ocs_hw_reqtag_get_instance(hw, rid);
8528 	if (wqcb == NULL) {
8529 		ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
8530 		return;
8531 	}
8532 
8533 	if (wqcb->callback == NULL) {
8534 		ocs_log_err(hw->os, "wqcb callback is NULL\n");
8535 		return;
8536 	}
8537 
8538 	(*wqcb->callback)(wqcb->arg, cqe, status);
8539 }
8540 
8541 /**
8542  * @brief Process WQ completions for IO requests
8543  *
8544  * @param arg Generic callback argument
8545  * @param cqe Pointer to completion queue entry
8546  * @param status Completion status
8547  *
8548  * @par Description
8549  * @n @b Note:  Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
8550  * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
8551  *
8552  * @return None.
8553  */
8554 static void
8555 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
8556 {
8557 	ocs_hw_io_t *io = arg;
8558 	ocs_hw_t *hw = io->hw;
8559 	sli4_fc_wcqe_t *wcqe = (void *)cqe;
8560 	uint32_t	len = 0;
8561 	uint32_t ext = 0;
8562 	uint8_t out_of_order_axr_cmd = 0;
8563 	uint8_t out_of_order_axr_data = 0;
8564 	uint8_t lock_taken = 0;
8565 #if defined(OCS_DISC_SPIN_DELAY)
8566 	uint32_t delay = 0;
8567 	char prop_buf[32];
8568 #endif
8569 
8570 	/*
8571 	 * For the primary IO, this will also be used for the
8572 	 * response. So it is important to only set/clear this
8573 	 * flag on the first data phase of the IO because
8574 	 * subsequent phases will be done on the secondary XRI.
8575 	 */
8576 	if (io->quarantine && io->quarantine_first_phase) {
8577 		io->quarantine = (wcqe->qx == 1);
8578 		ocs_hw_io_quarantine(hw, io->wq, io);
8579 	}
8580 	io->quarantine_first_phase = FALSE;
8581 
8582 	/* BZ 161832 - free secondary HW IO */
8583 	if (io->sec_hio != NULL &&
8584 	    io->sec_hio->quarantine) {
8585 		/*
8586 		 * If the quarantine flag is set on the
8587 		 * IO, then set it on the secondary IO
8588 		 * based on the quarantine XRI (QX) bit
8589 		 * sent by the FW.
8590 		 */
8591 		io->sec_hio->quarantine = (wcqe->qx == 1);
8592 		/* use the primary io->wq because it is not set on the secondary IO. */
8593 		ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
8594 	}
8595 
8596 	ocs_hw_remove_io_timed_wqe(hw, io);
8597 
8598 	/* clear xbusy flag if WCQE[XB] is clear */
8599 	if (io->xbusy && wcqe->xb == 0) {
8600 		io->xbusy = FALSE;
8601 	}
8602 
8603 	/* get extended CQE status */
8604 	switch (io->type) {
8605 	case OCS_HW_BLS_ACC:
8606 	case OCS_HW_BLS_ACC_SID:
8607 		break;
8608 	case OCS_HW_ELS_REQ:
8609 		sli_fc_els_did(&hw->sli, cqe, &ext);
8610 		len = sli_fc_response_length(&hw->sli, cqe);
8611 		break;
8612 	case OCS_HW_ELS_RSP:
8613 	case OCS_HW_ELS_RSP_SID:
8614 	case OCS_HW_FC_CT_RSP:
8615 		break;
8616 	case OCS_HW_FC_CT:
8617 		len = sli_fc_response_length(&hw->sli, cqe);
8618 		break;
8619 	case OCS_HW_IO_TARGET_WRITE:
8620 		len = sli_fc_io_length(&hw->sli, cqe);
8621 #if defined(OCS_DISC_SPIN_DELAY)
8622 		if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
8623 			delay = ocs_strtoul(prop_buf, 0, 0);
8624 			ocs_udelay(delay);
8625 		}
8626 #endif
8627 		break;
8628 	case OCS_HW_IO_TARGET_READ:
8629 		len = sli_fc_io_length(&hw->sli, cqe);
8630 		/*
8631 		 * if_type == 2 seems to return 0 "total length placed" on
8632 		 * FCP_TSEND64_WQE completions. If this appears to happen,
8633 		 * use the CTIO data transfer length instead.
8634 		 */
8635 		if (hw->workaround.retain_tsend_io_length && !len && !status) {
8636 			len = io->length;
8637 		}
8638 
8639 		break;
8640 	case OCS_HW_IO_TARGET_RSP:
8641 		if(io->is_port_owned) {
8642 			ocs_lock(&io->axr_lock);
8643 			lock_taken = 1;
8644 			if(io->axr_buf->call_axr_cmd) {
8645 				out_of_order_axr_cmd = 1;
8646 			}
8647 			if(io->axr_buf->call_axr_data) {
8648 				out_of_order_axr_data = 1;
8649 			}
8650 		}
8651 		break;
8652 	case OCS_HW_IO_INITIATOR_READ:
8653 		len = sli_fc_io_length(&hw->sli, cqe);
8654 		break;
8655 	case OCS_HW_IO_INITIATOR_WRITE:
8656 		len = sli_fc_io_length(&hw->sli, cqe);
8657 		break;
8658 	case OCS_HW_IO_INITIATOR_NODATA:
8659 		break;
8660 	case OCS_HW_IO_DNRX_REQUEUE:
8661 		/* release the count for re-posting the buffer */
8662 		//ocs_hw_io_free(hw, io);
8663 		break;
8664 	default:
8665 		ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
8666 			     io->type, io->indicator);
8667 		break;
8668 	}
8669 	if (status) {
8670 		ext = sli_fc_ext_status(&hw->sli, cqe);
8671 		/* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
8672 		 * abort exchange if an error occurred and exchange is still busy.
8673 		 */
8674 		if (hw->config.i_only_aab &&
8675 		    (ocs_hw_iotype_is_originator(io->type)) &&
8676 		    (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
8677 			ocs_hw_rtn_e rc;
8678 
8679 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
8680 				      io->indicator, io->reqtag);
8681 			/*
8682 			 * Because the initiator will not issue another IO phase, then it is OK to to issue the
8683 			 * callback on the abort completion, but for consistency with the target, wait for the
8684 			 * XRI_ABORTED CQE to issue the IO callback.
8685 			 */
8686 			rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
8687 
8688 			if (rc == OCS_HW_RTN_SUCCESS) {
8689 				/* latch status to return after abort is complete */
8690 				io->status_saved = 1;
8691 				io->saved_status = status;
8692 				io->saved_ext = ext;
8693 				io->saved_len = len;
8694 				goto exit_ocs_hw_wq_process_io;
8695 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8696 				/*
8697 				 * Already being aborted by someone else (ABTS
8698 				 * perhaps). Just fall through and return original
8699 				 * error.
8700 				 */
8701 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8702 					      io->indicator, io->reqtag);
8703 
8704 			} else {
8705 				/* Failed to abort for some other reason, log error */
8706 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8707 					     io->indicator, io->reqtag, rc);
8708 			}
8709 		}
8710 
8711 		/*
8712 		 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
8713 		 */
8714 		if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
8715 			ocs_hw_rtn_e rc;
8716 
8717 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
8718 
8719 			/*
8720 			 * Because targets may send a response when the IO completes using the same XRI, we must
8721 			 * wait for the XRI_ABORTED CQE to issue the IO callback
8722 			 */
8723 			rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
8724 			if (rc == OCS_HW_RTN_SUCCESS) {
8725 				/* latch status to return after abort is complete */
8726 				io->status_saved = 1;
8727 				io->saved_status = status;
8728 				io->saved_ext = ext;
8729 				io->saved_len = len;
8730 				goto exit_ocs_hw_wq_process_io;
8731 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8732 				/*
8733 				 * Already being aborted by someone else (ABTS
8734 				 * perhaps). Just fall through and return original
8735 				 * error.
8736 				 */
8737 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8738 					      io->indicator, io->reqtag);
8739 
8740 			} else {
8741 				/* Failed to abort for some other reason, log error */
8742 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8743 					     io->indicator, io->reqtag, rc);
8744 			}
8745 		}
8746 	}
8747 	/* BZ 161832 - free secondary HW IO */
8748 	if (io->sec_hio != NULL) {
8749 		ocs_hw_io_free(hw, io->sec_hio);
8750 		io->sec_hio = NULL;
8751 	}
8752 
8753 	if (io->done != NULL) {
8754 		ocs_hw_done_t  done = io->done;
8755 		void		*arg = io->arg;
8756 
8757 		io->done = NULL;
8758 
8759 		if (io->status_saved) {
8760 			/* use latched status if exists */
8761 			status = io->saved_status;
8762 			len = io->saved_len;
8763 			ext = io->saved_ext;
8764 			io->status_saved = 0;
8765 		}
8766 
8767 		/* Restore default SGL */
8768 		ocs_hw_io_restore_sgl(hw, io);
8769 		done(io, io->rnode, len, status, ext, arg);
8770 	}
8771 
8772 	if(out_of_order_axr_cmd) {
8773 		/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8774 		if (hw->config.bounce) {
8775 			fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
8776 			uint32_t s_id = fc_be24toh(hdr->s_id);
8777 			uint32_t d_id = fc_be24toh(hdr->d_id);
8778 			uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8779 			if (hw->callback.bounce != NULL) {
8780 				(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
8781 			}
8782 		}else {
8783 			hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
8784 		}
8785 
8786 		if(out_of_order_axr_data) {
8787 			/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8788 			if (hw->config.bounce) {
8789 				fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
8790 				uint32_t s_id = fc_be24toh(hdr->s_id);
8791 				uint32_t d_id = fc_be24toh(hdr->d_id);
8792 				uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8793 				if (hw->callback.bounce != NULL) {
8794 					(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
8795 				}
8796 			}else {
8797 				hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
8798 			}
8799 		}
8800 	}
8801 
8802 exit_ocs_hw_wq_process_io:
8803 	if(lock_taken) {
8804 		ocs_unlock(&io->axr_lock);
8805 	}
8806 }
8807 
8808 /**
8809  * @brief Process WQ completions for abort requests.
8810  *
8811  * @param arg Generic callback argument.
8812  * @param cqe Pointer to completion queue entry.
8813  * @param status Completion status.
8814  *
8815  * @return None.
8816  */
8817 static void
8818 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
8819 {
8820 	ocs_hw_io_t *io = arg;
8821 	ocs_hw_t *hw = io->hw;
8822 	uint32_t ext = 0;
8823 	uint32_t len = 0;
8824 	hw_wq_callback_t *wqcb;
8825 
8826 	/*
8827 	 * For IOs that were aborted internally, we may need to issue the callback here depending
8828 	 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
8829 	 * issue the callback now.
8830 	*/
8831 	ext = sli_fc_ext_status(&hw->sli, cqe);
8832 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
8833 	    ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
8834 		io->done != NULL) {
8835 		ocs_hw_done_t  done = io->done;
8836 		void		*arg = io->arg;
8837 
8838 		io->done = NULL;
8839 
8840 		/*
8841 		 * Use latched status as this is always saved for an internal abort
8842 		 *
8843 		 * Note: We wont have both a done and abort_done function, so don't worry about
8844 		 *       clobbering the len, status and ext fields.
8845 		 */
8846 		status = io->saved_status;
8847 		len = io->saved_len;
8848 		ext = io->saved_ext;
8849 		io->status_saved = 0;
8850 		done(io, io->rnode, len, status, ext, arg);
8851 	}
8852 
8853 	if (io->abort_done != NULL) {
8854 		ocs_hw_done_t  done = io->abort_done;
8855 		void		*arg = io->abort_arg;
8856 
8857 		io->abort_done = NULL;
8858 
8859 		done(io, io->rnode, len, status, ext, arg);
8860 	}
8861 	ocs_lock(&hw->io_abort_lock);
8862 		/* clear abort bit to indicate abort is complete */
8863 		io->abort_in_progress = 0;
8864 	ocs_unlock(&hw->io_abort_lock);
8865 
8866 	/* Free the WQ callback */
8867 	ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
8868 	wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
8869 	ocs_hw_reqtag_free(hw, wqcb);
8870 
8871 	/*
8872 	 * Call ocs_hw_io_free() because this releases the WQ reservation as
8873 	 * well as doing the refcount put. Don't duplicate the code here.
8874 	 */
8875 	(void)ocs_hw_io_free(hw, io);
8876 }
8877 
8878 /**
8879  * @brief Process XABT completions
8880  *
8881  * @param hw Hardware context.
8882  * @param cq Pointer to the HW completion queue object.
8883  * @param cqe Pointer to WQ completion queue.
8884  * @param rid Resource ID (IO tag).
8885  *
8886  *
8887  * @return None.
8888  */
8889 void
8890 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
8891 {
8892 	/* search IOs wait free list */
8893 	ocs_hw_io_t *io = NULL;
8894 
8895 	io = ocs_hw_io_lookup(hw, rid);
8896 
8897 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
8898 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8899 	if (io == NULL) {
8900 		/* IO lookup failure should never happen */
8901 		ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
8902 		return;
8903 	}
8904 
8905 	if (!io->xbusy) {
8906 		ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
8907 	} else {
8908 		/* mark IO as no longer busy */
8909 		io->xbusy = FALSE;
8910 	}
8911 
8912        if (io->is_port_owned) {
8913                ocs_lock(&hw->io_lock);
8914                /* Take reference so that below callback will not free io before reque */
8915                ocs_ref_get(&io->ref);
8916                ocs_unlock(&hw->io_lock);
8917        }
8918 
8919 	/* For IOs that were aborted internally, we need to issue any pending callback here. */
8920 	if (io->done != NULL) {
8921 		ocs_hw_done_t  done = io->done;
8922 		void		*arg = io->arg;
8923 
8924 		/* Use latched status as this is always saved for an internal abort */
8925 		int32_t status = io->saved_status;
8926 		uint32_t len = io->saved_len;
8927 		uint32_t ext = io->saved_ext;
8928 
8929 		io->done = NULL;
8930 		io->status_saved = 0;
8931 
8932 		done(io, io->rnode, len, status, ext, arg);
8933 	}
8934 
8935 	/* Check to see if this is a port owned XRI */
8936 	if (io->is_port_owned) {
8937 		ocs_lock(&hw->io_lock);
8938 		ocs_hw_reque_xri(hw, io);
8939 		ocs_unlock(&hw->io_lock);
8940 		/* Not hanlding reque xri completion, free io */
8941 		ocs_hw_io_free(hw, io);
8942 		return;
8943 	}
8944 
8945 	ocs_lock(&hw->io_lock);
8946 		if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
8947 			/* if on wait_free list, caller has already freed IO;
8948 			 * remove from wait_free list and add to free list.
8949 			 * if on in-use list, already marked as no longer busy;
8950 			 * just leave there and wait for caller to free.
8951 			 */
8952 			if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
8953 				io->state = OCS_HW_IO_STATE_FREE;
8954 				ocs_list_remove(&hw->io_wait_free, io);
8955 				ocs_hw_io_free_move_correct_list(hw, io);
8956 			}
8957 		}
8958 	ocs_unlock(&hw->io_lock);
8959 }
8960 
8961 /**
8962  * @brief Adjust the number of WQs and CQs within the HW.
8963  *
8964  * @par Description
8965  * Calculates the number of WQs and associated CQs needed in the HW based on
8966  * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
8967  * MQ.
8968  *
8969  * @param hw Hardware context allocated by the caller.
8970  */
8971 static void
8972 ocs_hw_adjust_wqs(ocs_hw_t *hw)
8973 {
8974 	uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
8975 	uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
8976 	uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
8977 
8978 	/*
8979 	 * possibly adjust the the size of the WQs so that the CQ is twice as
8980 	 * big as the WQ to allow for 2 completions per IO. This allows us to
8981 	 * handle multi-phase as well as aborts.
8982 	 */
8983 	if (max_cq_entries < max_wq_entries * 2) {
8984 		max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
8985 	}
8986 
8987 	/*
8988 	 * Calculate the number of WQs to use base on the number of IOs.
8989 	 *
8990 	 * Note: We need to reserve room for aborts which must be sent down
8991 	 *       the same WQ as the IO. So we allocate enough WQ space to
8992 	 *       handle 2 times the number of IOs. Half of the space will be
8993 	 *       used for normal IOs and the other hwf is reserved for aborts.
8994 	 */
8995 	hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
8996 
8997 	/*
8998 	 * For performance reasons, it is best to use use a minimum of 4 WQs
8999 	 * for BE3 and Skyhawk.
9000 	 */
9001 	if (hw->config.n_wq < 4 &&
9002 	    SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
9003 		hw->config.n_wq = 4;
9004 	}
9005 
9006 	/*
9007 	 * For dual-chute support, we need to have at least one WQ per chute.
9008 	 */
9009 	if (hw->config.n_wq < 2 &&
9010 	    ocs_hw_get_num_chutes(hw) > 1) {
9011 		hw->config.n_wq = 2;
9012 	}
9013 
9014 	/* make sure we haven't exceeded the max supported in the HW */
9015 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
9016 		hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
9017 	}
9018 
9019 	/* make sure we haven't exceeded the chip maximum */
9020 	if (hw->config.n_wq > max_wq_num) {
9021 		hw->config.n_wq = max_wq_num;
9022 	}
9023 
9024 	/*
9025 	 * Using Queue Topology string, we divide by number of chutes
9026 	 */
9027 	hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
9028 }
9029 
9030 static int32_t
9031 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
9032 {
9033 	ocs_command_ctx_t *ctx = NULL;
9034 
9035 	ocs_lock(&hw->cmd_lock);
9036 		if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
9037 			ocs_log_err(hw->os, "XXX no command context?!?\n");
9038 			ocs_unlock(&hw->cmd_lock);
9039 			return -1;
9040 		}
9041 
9042 		hw->cmd_head_count--;
9043 
9044 		/* Post any pending requests */
9045 		ocs_hw_cmd_submit_pending(hw);
9046 
9047 	ocs_unlock(&hw->cmd_lock);
9048 
9049 	if (ctx->cb) {
9050 		if (ctx->buf) {
9051 			ocs_memcpy(ctx->buf, mqe, size);
9052 		}
9053 		ctx->cb(hw, status, ctx->buf, ctx->arg);
9054 	}
9055 
9056 	ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
9057 	ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
9058 
9059 	return 0;
9060 }
9061 
9062 /**
9063  * @brief Process entries on the given mailbox queue.
9064  *
9065  * @param hw Hardware context.
9066  * @param status CQE status.
9067  * @param mq Pointer to the mailbox queue object.
9068  *
9069  * @return Returns 0 on success, or a non-zero value on failure.
9070  */
9071 static int32_t
9072 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
9073 {
9074 	uint8_t		mqe[SLI4_BMBX_SIZE];
9075 
9076 	if (!sli_queue_read(&hw->sli, mq, mqe)) {
9077 		ocs_hw_command_process(hw, status, mqe, mq->size);
9078 	}
9079 
9080 	return 0;
9081 }
9082 
9083 /**
9084  * @brief Read a FCF table entry.
9085  *
9086  * @param hw Hardware context.
9087  * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
9088  * read and the next_index field from the FCOE_READ_FCF_TABLE command
9089  * for subsequent reads.
9090  *
9091  * @return Returns 0 on success, or a non-zero value on failure.
9092  */
9093 static ocs_hw_rtn_e
9094 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
9095 {
9096 	uint8_t		*buf = NULL;
9097 	int32_t		rc = OCS_HW_RTN_ERROR;
9098 
9099 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9100 	if (!buf) {
9101 		ocs_log_err(hw->os, "no buffer for command\n");
9102 		return OCS_HW_RTN_NO_MEMORY;
9103 	}
9104 
9105 	if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
9106 			index)) {
9107 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
9108 	}
9109 
9110 	if (rc != OCS_HW_RTN_SUCCESS) {
9111 		ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
9112 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9113 	}
9114 
9115 	return rc;
9116 }
9117 
9118 /**
9119  * @brief Callback function for the FCOE_READ_FCF_TABLE command.
9120  *
9121  * @par Description
9122  * Note that the caller has allocated:
9123  *  - DMA memory to hold the table contents
9124  *  - DMA memory structure
9125  *  - Command/results buffer
9126  *  .
9127  * Each of these must be freed here.
9128  *
9129  * @param hw Hardware context.
9130  * @param status Hardware status.
9131  * @param mqe Pointer to the mailbox command/results buffer.
9132  * @param arg Pointer to the DMA memory structure.
9133  *
9134  * @return Returns 0 on success, or a non-zero value on failure.
9135  */
9136 static int32_t
9137 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9138 {
9139 	ocs_dma_t	*dma = arg;
9140 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9141 
9142 	if (status || hdr->status) {
9143 		ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
9144 				status, hdr->status);
9145 	} else if (dma->virt) {
9146 		sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
9147 
9148 		/* if FC or FCOE and FCF entry valid, process it */
9149 		if (read_fcf->fcf_entry.fc ||
9150 				(read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
9151 			if (hw->callback.domain != NULL) {
9152 				ocs_domain_record_t drec = {0};
9153 
9154 				if (read_fcf->fcf_entry.fc) {
9155 					/*
9156 					 * This is a pseudo FCF entry. Create a domain
9157 					 * record based on the read topology information
9158 					 */
9159 					drec.speed = hw->link.speed;
9160 					drec.fc_id = hw->link.fc_id;
9161 					drec.is_fc = TRUE;
9162 					if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
9163 						drec.is_loop = TRUE;
9164 						ocs_memcpy(drec.map.loop, hw->link.loop_map,
9165 							   sizeof(drec.map.loop));
9166 					} else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
9167 						drec.is_nport = TRUE;
9168 					}
9169 				} else {
9170 					drec.index = read_fcf->fcf_entry.fcf_index;
9171 					drec.priority = read_fcf->fcf_entry.fip_priority;
9172 
9173 					/* copy address, wwn and vlan_bitmap */
9174 					ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
9175 						   sizeof(drec.address));
9176 					ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
9177 						   sizeof(drec.wwn));
9178 					ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
9179 						   sizeof(drec.map.vlan));
9180 
9181 					drec.is_ethernet = TRUE;
9182 					drec.is_nport = TRUE;
9183 				}
9184 
9185 				hw->callback.domain(hw->args.domain,
9186 						OCS_HW_DOMAIN_FOUND,
9187 						&drec);
9188 			}
9189 		} else {
9190 			/* if FCOE and FCF is not valid, ignore it */
9191 			ocs_log_test(hw->os, "ignore invalid FCF entry\n");
9192 		}
9193 
9194 		if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
9195 			ocs_hw_read_fcf(hw, read_fcf->next_index);
9196 		}
9197 	}
9198 
9199 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9200 	//ocs_dma_free(hw->os, dma);
9201 	//ocs_free(hw->os, dma, sizeof(ocs_dma_t));
9202 
9203 	return 0;
9204 }
9205 
9206 /**
9207  * @brief Callback function for the SLI link events.
9208  *
9209  * @par Description
9210  * This function allocates memory which must be freed in its callback.
9211  *
9212  * @param ctx Hardware context pointer (that is, ocs_hw_t *).
9213  * @param e Event structure pointer (that is, sli4_link_event_t *).
9214  *
9215  * @return Returns 0 on success, or a non-zero value on failure.
9216  */
9217 static int32_t
9218 ocs_hw_cb_link(void *ctx, void *e)
9219 {
9220 	ocs_hw_t	*hw = ctx;
9221 	sli4_link_event_t *event = e;
9222 	ocs_domain_t	*d = NULL;
9223 	uint32_t	i = 0;
9224 	int32_t		rc = OCS_HW_RTN_ERROR;
9225 	ocs_t 		*ocs = hw->os;
9226 
9227 	ocs_hw_link_event_init(hw);
9228 
9229 	switch (event->status) {
9230 	case SLI_LINK_STATUS_UP:
9231 
9232 		hw->link = *event;
9233 
9234 		if (SLI_LINK_TOPO_NPORT == event->topology) {
9235 			device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
9236 			ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9237 		} else if (SLI_LINK_TOPO_LOOP == event->topology) {
9238 			uint8_t	*buf = NULL;
9239 			device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
9240 
9241 			buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9242 			if (!buf) {
9243 				ocs_log_err(hw->os, "no buffer for command\n");
9244 				break;
9245 			}
9246 
9247 			if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
9248 				rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
9249 			}
9250 
9251 			if (rc != OCS_HW_RTN_SUCCESS) {
9252 				ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
9253 				ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9254 			}
9255 		} else {
9256 			device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
9257 					event->topology, event->speed);
9258 		}
9259 		break;
9260 	case SLI_LINK_STATUS_DOWN:
9261 		device_printf(ocs->dev, "Link Down\n");
9262 
9263 		hw->link.status = event->status;
9264 
9265 		for (i = 0; i < SLI4_MAX_FCFI; i++) {
9266 			d = hw->domains[i];
9267 			if (d != NULL &&
9268 			    hw->callback.domain != NULL) {
9269 				hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
9270 			}
9271 		}
9272 		break;
9273 	default:
9274 		ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
9275 		break;
9276 	}
9277 
9278 	return 0;
9279 }
9280 
9281 static int32_t
9282 ocs_hw_cb_fip(void *ctx, void *e)
9283 {
9284 	ocs_hw_t	*hw = ctx;
9285 	ocs_domain_t	*domain = NULL;
9286 	sli4_fip_event_t *event = e;
9287 
9288 	ocs_hw_assert(event);
9289 	ocs_hw_assert(hw);
9290 
9291 	/* Find the associated domain object */
9292 	if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
9293 		ocs_domain_t *d = NULL;
9294 		uint32_t	i = 0;
9295 
9296 		/* Clear VLINK is different from the other FIP events as it passes back
9297 		 * a VPI instead of a FCF index. Check all attached SLI ports for a
9298 		 * matching VPI */
9299 		for (i = 0; i < SLI4_MAX_FCFI; i++) {
9300 			d = hw->domains[i];
9301 			if (d != NULL) {
9302 				ocs_sport_t	*sport = NULL;
9303 
9304 				ocs_list_foreach(&d->sport_list, sport) {
9305 					if (sport->indicator == event->index) {
9306 						domain = d;
9307 						break;
9308 					}
9309 				}
9310 
9311 				if (domain != NULL) {
9312 					break;
9313 				}
9314 			}
9315 		}
9316 	} else {
9317 		domain = ocs_hw_domain_get_indexed(hw, event->index);
9318 	}
9319 
9320 	switch (event->type) {
9321 	case SLI4_FCOE_FIP_FCF_DISCOVERED:
9322 		ocs_hw_read_fcf(hw, event->index);
9323 		break;
9324 	case SLI4_FCOE_FIP_FCF_DEAD:
9325 		if (domain != NULL &&
9326 		    hw->callback.domain != NULL) {
9327 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9328 		}
9329 		break;
9330 	case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
9331 		if (domain != NULL &&
9332 		    hw->callback.domain != NULL) {
9333 			/*
9334 			 * We will want to issue rediscover FCF when this domain is free'd  in order
9335 			 * to invalidate the FCF table
9336 			 */
9337 			domain->req_rediscover_fcf = TRUE;
9338 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9339 		}
9340 		break;
9341 	case SLI4_FCOE_FIP_FCF_MODIFIED:
9342 		if (domain != NULL &&
9343 		    hw->callback.domain != NULL) {
9344 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9345 		}
9346 
9347 		ocs_hw_read_fcf(hw, event->index);
9348 		break;
9349 	default:
9350 		ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
9351 	}
9352 
9353 	return 0;
9354 }
9355 
9356 static int32_t
9357 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9358 {
9359 	ocs_remote_node_t *rnode = arg;
9360 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9361 	ocs_hw_remote_node_event_e	evt = 0;
9362 
9363 	if (status || hdr->status) {
9364 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9365 				hdr->status);
9366 		ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
9367 		rnode->attached = FALSE;
9368 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9369 		evt = OCS_HW_NODE_ATTACH_FAIL;
9370 	} else {
9371 		rnode->attached = TRUE;
9372 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
9373 		evt = OCS_HW_NODE_ATTACH_OK;
9374 	}
9375 
9376 	if (hw->callback.rnode != NULL) {
9377 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9378 	}
9379 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9380 
9381 	return 0;
9382 }
9383 
9384 static int32_t
9385 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9386 {
9387 	ocs_remote_node_t *rnode = arg;
9388 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9389 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9390 	int32_t		rc = 0;
9391 
9392 	if (status || hdr->status) {
9393 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9394 				hdr->status);
9395 
9396 		/*
9397 		 * In certain cases, a non-zero MQE status is OK (all must be true):
9398 		 *   - node is attached
9399 		 *   - if High Login Mode is enabled, node is part of a node group
9400 		 *   - status is 0x1400
9401 		 */
9402 		if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
9403 				(hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
9404 			rc = -1;
9405 		}
9406 	}
9407 
9408 	if (rc == 0) {
9409 		rnode->node_group = FALSE;
9410 		rnode->attached = FALSE;
9411 
9412 		if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
9413 			ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9414 		}
9415 
9416 		evt = OCS_HW_NODE_FREE_OK;
9417 	}
9418 
9419 	if (hw->callback.rnode != NULL) {
9420 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9421 	}
9422 
9423 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9424 
9425 	return rc;
9426 }
9427 
9428 static int32_t
9429 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9430 {
9431 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9432 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9433 	int32_t		rc = 0;
9434 	uint32_t	i;
9435 
9436 	if (status || hdr->status) {
9437 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9438 				hdr->status);
9439 	} else {
9440 		evt = OCS_HW_NODE_FREE_ALL_OK;
9441 	}
9442 
9443 	if (evt == OCS_HW_NODE_FREE_ALL_OK) {
9444 		for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
9445 			ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
9446 		}
9447 
9448 		if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
9449 			ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
9450 			rc = -1;
9451 		}
9452 	}
9453 
9454 	if (hw->callback.rnode != NULL) {
9455 		hw->callback.rnode(hw->args.rnode, evt, NULL);
9456 	}
9457 
9458 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9459 
9460 	return rc;
9461 }
9462 
9463 /**
9464  * @brief Initialize the pool of HW IO objects.
9465  *
9466  * @param hw Hardware context.
9467  *
9468  * @return Returns 0 on success, or a non-zero value on failure.
9469  */
9470 static ocs_hw_rtn_e
9471 ocs_hw_setup_io(ocs_hw_t *hw)
9472 {
9473 	uint32_t	i = 0;
9474 	ocs_hw_io_t	*io = NULL;
9475 	uintptr_t	xfer_virt = 0;
9476 	uintptr_t	xfer_phys = 0;
9477 	uint32_t	index;
9478 	uint8_t		new_alloc = TRUE;
9479 
9480 	if (NULL == hw->io) {
9481 		hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
9482 
9483 		if (NULL == hw->io) {
9484 			ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
9485 				    hw->config.n_io,
9486 				    sizeof(ocs_hw_io_t *));
9487 			return OCS_HW_RTN_NO_MEMORY;
9488 		}
9489 		for (i = 0; i < hw->config.n_io; i++) {
9490 			hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
9491 						OCS_M_ZERO | OCS_M_NOWAIT);
9492 			if (hw->io[i] == NULL) {
9493 				ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
9494 				goto error;
9495 			}
9496 		}
9497 
9498 		/* Create WQE buffs for IO */
9499 		hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
9500 				OCS_M_ZERO | OCS_M_NOWAIT);
9501 		if (NULL == hw->wqe_buffs) {
9502 			ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
9503 			ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
9504 					__func__, hw->config.n_io, hw->sli.config.wqe_size);
9505 			return OCS_HW_RTN_NO_MEMORY;
9506 		}
9507 
9508 	} else {
9509 		/* re-use existing IOs, including SGLs */
9510 		new_alloc = FALSE;
9511 	}
9512 
9513 	if (new_alloc) {
9514 		if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
9515 					sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
9516 					4/*XXX what does this need to be? */)) {
9517 			ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
9518 			return OCS_HW_RTN_NO_MEMORY;
9519 		}
9520 	}
9521 	xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
9522 	xfer_phys = hw->xfer_rdy.phys;
9523 
9524 	for (i = 0; i < hw->config.n_io; i++) {
9525 		hw_wq_callback_t *wqcb;
9526 
9527 		io = hw->io[i];
9528 
9529 		/* initialize IO fields */
9530 		io->hw = hw;
9531 
9532 		/* Assign a WQE buff */
9533 		io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
9534 
9535 		/* Allocate the request tag for this IO */
9536 		wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
9537 		if (wqcb == NULL) {
9538 			ocs_log_err(hw->os, "can't allocate request tag\n");
9539 			return OCS_HW_RTN_NO_RESOURCES;
9540 		}
9541 		io->reqtag = wqcb->instance_index;
9542 
9543 		/* Now for the fields that are initialized on each free */
9544 		ocs_hw_init_free_io(io);
9545 
9546 		/* The XB flag isn't cleared on IO free, so initialize it to zero here */
9547 		io->xbusy = 0;
9548 
9549 		if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
9550 			ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
9551 			return OCS_HW_RTN_NO_MEMORY;
9552 		}
9553 
9554 		if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
9555 			ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
9556 			ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
9557 			return OCS_HW_RTN_NO_MEMORY;
9558 		}
9559 		io->def_sgl_count = hw->config.n_sgl;
9560 		io->sgl = &io->def_sgl;
9561 		io->sgl_count = io->def_sgl_count;
9562 
9563 		if (hw->xfer_rdy.size) {
9564 			io->xfer_rdy.virt = (void *)xfer_virt;
9565 			io->xfer_rdy.phys = xfer_phys;
9566 			io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
9567 
9568 			xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
9569 			xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
9570 		}
9571 	}
9572 
9573 	return OCS_HW_RTN_SUCCESS;
9574 error:
9575 	for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
9576 		ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
9577 		hw->io[i] = NULL;
9578 	}
9579 
9580 	return OCS_HW_RTN_NO_MEMORY;
9581 }
9582 
9583 static ocs_hw_rtn_e
9584 ocs_hw_init_io(ocs_hw_t *hw)
9585 {
9586 	uint32_t        i = 0, io_index = 0;
9587 	uint32_t        prereg = 0;
9588 	ocs_hw_io_t	*io = NULL;
9589 	uint8_t		cmd[SLI4_BMBX_SIZE];
9590 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
9591 	uint32_t	nremaining;
9592 	uint32_t	n = 0;
9593 	uint32_t	sgls_per_request = 256;
9594 	ocs_dma_t	**sgls = NULL;
9595 	ocs_dma_t	reqbuf = { 0 };
9596 
9597 	prereg = sli_get_sgl_preregister(&hw->sli);
9598 
9599 	if (prereg) {
9600 		sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
9601 		if (sgls == NULL) {
9602 			ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
9603 			return OCS_HW_RTN_NO_MEMORY;
9604 		}
9605 
9606 		rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
9607 		if (rc) {
9608 			ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
9609 			ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9610 			return OCS_HW_RTN_NO_MEMORY;
9611 		}
9612 	}
9613 
9614 	io = hw->io[io_index];
9615 	for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
9616 		if (prereg) {
9617 			/* Copy address of SGL's into local sgls[] array, break out if the xri
9618 			 * is not contiguous.
9619 			 */
9620 			for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
9621 				/* Check that we have contiguous xri values */
9622 				if (n > 0) {
9623 					if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
9624 						break;
9625 					}
9626 				}
9627 				sgls[n] = hw->io[io_index + n]->sgl;
9628 			}
9629 
9630 			if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
9631 						io->indicator, n, sgls, NULL, &reqbuf)) {
9632 				if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
9633 					rc = OCS_HW_RTN_ERROR;
9634 					ocs_log_err(hw->os, "SGL post failed\n");
9635 					break;
9636 				}
9637 			}
9638 		} else {
9639 			n = nremaining;
9640 		}
9641 
9642 		/* Add to tail if successful */
9643 		for (i = 0; i < n; i ++) {
9644 			io->is_port_owned = 0;
9645 			io->state = OCS_HW_IO_STATE_FREE;
9646 			ocs_list_add_tail(&hw->io_free, io);
9647 			io = hw->io[io_index+1];
9648 			io_index++;
9649 		}
9650 	}
9651 
9652 	if (prereg) {
9653 		ocs_dma_free(hw->os, &reqbuf);
9654 		ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9655 	}
9656 
9657 	return rc;
9658 }
9659 
9660 static int32_t
9661 ocs_hw_flush(ocs_hw_t *hw)
9662 {
9663 	uint32_t	i = 0;
9664 
9665 	/* Process any remaining completions */
9666 	for (i = 0; i < hw->eq_count; i++) {
9667 		ocs_hw_process(hw, i, ~0);
9668 	}
9669 
9670 	return 0;
9671 }
9672 
9673 static int32_t
9674 ocs_hw_command_cancel(ocs_hw_t *hw)
9675 {
9676 
9677 	ocs_lock(&hw->cmd_lock);
9678 
9679 	/*
9680 	 * Manually clean up remaining commands. Note: since this calls
9681 	 * ocs_hw_command_process(), we'll also process the cmd_pending
9682 	 * list, so no need to manually clean that out.
9683 	 */
9684 	while (!ocs_list_empty(&hw->cmd_head)) {
9685 		uint8_t		mqe[SLI4_BMBX_SIZE] = { 0 };
9686 		ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
9687 
9688 		ocs_log_test(hw->os, "hung command %08x\n",
9689 				NULL == ctx ? UINT32_MAX :
9690 				(NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
9691 		ocs_unlock(&hw->cmd_lock);
9692 		ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
9693 		ocs_lock(&hw->cmd_lock);
9694 	}
9695 
9696 	ocs_unlock(&hw->cmd_lock);
9697 
9698 	return 0;
9699 }
9700 
9701 /**
9702  * @brief Find IO given indicator (xri).
9703  *
9704  * @param hw Hal context.
9705  * @param indicator Indicator (xri) to look for.
9706  *
9707  * @return Returns io if found, NULL otherwise.
9708  */
9709 ocs_hw_io_t *
9710 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
9711 {
9712 	uint32_t ioindex;
9713 	ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
9714 	return hw->io[ioindex];
9715 }
9716 
9717 /**
9718  * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
9719  *
9720  * @param hw Hal context.
9721  * @param io Pointer to the IO to cleanup.
9722  */
9723 static void
9724 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
9725 {
9726 	ocs_hw_done_t  done = io->done;
9727 	ocs_hw_done_t  abort_done = io->abort_done;
9728 
9729 	/* first check active_wqe list and remove if there */
9730 	if (ocs_list_on_list(&io->wqe_link)) {
9731 		ocs_list_remove(&hw->io_timed_wqe, io);
9732 	}
9733 
9734 	/* Remove from WQ pending list */
9735 	if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
9736 		ocs_list_remove(&io->wq->pending_list, io);
9737 	}
9738 
9739 	if (io->done) {
9740 		void		*arg = io->arg;
9741 
9742 		io->done = NULL;
9743 		ocs_unlock(&hw->io_lock);
9744 		done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
9745 		ocs_lock(&hw->io_lock);
9746 	}
9747 
9748 	if (io->abort_done != NULL) {
9749 		void		*abort_arg = io->abort_arg;
9750 
9751 		io->abort_done = NULL;
9752 		ocs_unlock(&hw->io_lock);
9753 		abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
9754 		ocs_lock(&hw->io_lock);
9755 	}
9756 }
9757 
9758 static int32_t
9759 ocs_hw_io_cancel(ocs_hw_t *hw)
9760 {
9761 	ocs_hw_io_t	*io = NULL;
9762 	ocs_hw_io_t	*tmp_io = NULL;
9763 	uint32_t	iters = 100; /* One second limit */
9764 
9765 	/*
9766 	 * Manually clean up outstanding IO.
9767 	 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
9768 	 */
9769 	ocs_lock(&hw->io_lock);
9770 	ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
9771 		ocs_hw_done_t  done = io->done;
9772 		ocs_hw_done_t  abort_done = io->abort_done;
9773 
9774 		ocs_hw_io_cancel_cleanup(hw, io);
9775 
9776 		/*
9777 		 * Since this is called in a reset/shutdown
9778 		 * case, If there is no callback, then just
9779 		 * free the IO.
9780 		 *
9781 		 * Note: A port owned XRI cannot be on
9782 		 *       the in use list. We cannot call
9783 		 *       ocs_hw_io_free() because we already
9784 		 *       hold the io_lock.
9785 		 */
9786 		if (done == NULL &&
9787 		    abort_done == NULL) {
9788 			/*
9789 			 * Since this is called in a reset/shutdown
9790 			 * case, If there is no callback, then just
9791 			 * free the IO.
9792 			 */
9793 			ocs_hw_io_free_common(hw, io);
9794 			ocs_list_remove(&hw->io_inuse, io);
9795 			ocs_hw_io_free_move_correct_list(hw, io);
9796 		}
9797 	}
9798 
9799 	/*
9800 	 * For port owned XRIs, they are not on the in use list, so
9801 	 * walk though XRIs and issue any callbacks.
9802 	 */
9803 	ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
9804 		/* check  list and remove if there */
9805 		if (ocs_list_on_list(&io->dnrx_link)) {
9806 			ocs_list_remove(&hw->io_port_dnrx, io);
9807 			ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
9808 		}
9809 		ocs_hw_io_cancel_cleanup(hw, io);
9810 		ocs_list_remove(&hw->io_port_owned, io);
9811 		ocs_hw_io_free_common(hw, io);
9812 	}
9813 	ocs_unlock(&hw->io_lock);
9814 
9815 	/* Give time for the callbacks to complete */
9816 	do {
9817 		ocs_udelay(10000);
9818 		iters--;
9819 	} while (!ocs_list_empty(&hw->io_inuse) && iters);
9820 
9821 	/* Leave a breadcrumb that cleanup is not yet complete. */
9822 	if (!ocs_list_empty(&hw->io_inuse)) {
9823 		ocs_log_test(hw->os, "io_inuse list is not empty\n");
9824 	}
9825 
9826 	return 0;
9827 }
9828 
9829 static int32_t
9830 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
9831 		ocs_dma_t *rsp)
9832 {
9833 	sli4_sge_t	*data = NULL;
9834 
9835 	if (!hw || !io) {
9836 		ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
9837 		return OCS_HW_RTN_ERROR;
9838 	}
9839 
9840 	data = io->def_sgl.virt;
9841 
9842 	/* setup command pointer */
9843 	data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
9844 	data->buffer_address_low  = ocs_addr32_lo(cmnd->phys);
9845 	data->buffer_length = cmnd_size;
9846 	data++;
9847 
9848 	/* setup response pointer */
9849 	data->buffer_address_high = ocs_addr32_hi(rsp->phys);
9850 	data->buffer_address_low  = ocs_addr32_lo(rsp->phys);
9851 	data->buffer_length = rsp->size;
9852 
9853 	return 0;
9854 }
9855 
9856 static int32_t
9857 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9858 {
9859 	sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
9860 
9861 	if (status || read_topo->hdr.status) {
9862 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
9863 				status, read_topo->hdr.status);
9864 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9865 		return -1;
9866 	}
9867 
9868 	switch (read_topo->attention_type) {
9869 	case SLI4_READ_TOPOLOGY_LINK_UP:
9870 		hw->link.status = SLI_LINK_STATUS_UP;
9871 		break;
9872 	case SLI4_READ_TOPOLOGY_LINK_DOWN:
9873 		hw->link.status = SLI_LINK_STATUS_DOWN;
9874 		break;
9875 	case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
9876 		hw->link.status = SLI_LINK_STATUS_NO_ALPA;
9877 		break;
9878 	default:
9879 		hw->link.status = SLI_LINK_STATUS_MAX;
9880 		break;
9881 	}
9882 
9883 	switch (read_topo->topology) {
9884 	case SLI4_READ_TOPOLOGY_NPORT:
9885 		hw->link.topology = SLI_LINK_TOPO_NPORT;
9886 		break;
9887 	case SLI4_READ_TOPOLOGY_FC_AL:
9888 		hw->link.topology = SLI_LINK_TOPO_LOOP;
9889 		if (SLI_LINK_STATUS_UP == hw->link.status) {
9890 			hw->link.loop_map = hw->loop_map.virt;
9891 		}
9892 		hw->link.fc_id = read_topo->acquired_al_pa;
9893 		break;
9894 	default:
9895 		hw->link.topology = SLI_LINK_TOPO_MAX;
9896 		break;
9897 	}
9898 
9899 	hw->link.medium = SLI_LINK_MEDIUM_FC;
9900 
9901 	switch (read_topo->link_current.link_speed) {
9902 	case SLI4_READ_TOPOLOGY_SPEED_1G:
9903 		hw->link.speed =  1 * 1000;
9904 		break;
9905 	case SLI4_READ_TOPOLOGY_SPEED_2G:
9906 		hw->link.speed =  2 * 1000;
9907 		break;
9908 	case SLI4_READ_TOPOLOGY_SPEED_4G:
9909 		hw->link.speed =  4 * 1000;
9910 		break;
9911 	case SLI4_READ_TOPOLOGY_SPEED_8G:
9912 		hw->link.speed =  8 * 1000;
9913 		break;
9914 	case SLI4_READ_TOPOLOGY_SPEED_16G:
9915 		hw->link.speed = 16 * 1000;
9916 		hw->link.loop_map = NULL;
9917 		break;
9918 	case SLI4_READ_TOPOLOGY_SPEED_32G:
9919 		hw->link.speed = 32 * 1000;
9920 		hw->link.loop_map = NULL;
9921 		break;
9922 	}
9923 
9924 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9925 
9926 	ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9927 
9928 	return 0;
9929 }
9930 
9931 static int32_t
9932 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9933 {
9934 	ocs_sli_port_t	*sport = ctx->app;
9935 	ocs_hw_t	*hw = sport->hw;
9936 
9937 	smtrace("port");
9938 
9939 	switch (evt) {
9940 	case OCS_EVT_EXIT:
9941 		/* ignore */
9942 		break;
9943 
9944 	case OCS_EVT_HW_PORT_REQ_FREE:
9945 	case OCS_EVT_HW_PORT_REQ_ATTACH:
9946 		if (data != NULL) {
9947 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9948 		}
9949 		/* fall through */
9950 	default:
9951 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
9952 		break;
9953 	}
9954 
9955 	return 0;
9956 }
9957 
9958 static void *
9959 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9960 {
9961 	ocs_sli_port_t	*sport = ctx->app;
9962 	ocs_hw_t	*hw = sport->hw;
9963 
9964 	smtrace("port");
9965 
9966 	switch (evt) {
9967 	case OCS_EVT_ENTER:
9968 		if (data != NULL) {
9969 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9970 		}
9971 		if (hw->callback.port != NULL) {
9972 			hw->callback.port(hw->args.port,
9973 					OCS_HW_PORT_FREE_FAIL, sport);
9974 		}
9975 		break;
9976 	default:
9977 		break;
9978 	}
9979 
9980 	return NULL;
9981 }
9982 
9983 static void *
9984 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9985 {
9986 	ocs_sli_port_t	*sport = ctx->app;
9987 	ocs_hw_t	*hw = sport->hw;
9988 
9989 	smtrace("port");
9990 
9991 	switch (evt) {
9992 	case OCS_EVT_ENTER:
9993 		/* free SLI resource */
9994 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
9995 			ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
9996 		}
9997 
9998 		/* free mailbox buffer */
9999 		if (data != NULL) {
10000 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10001 		}
10002 		if (hw->callback.port != NULL) {
10003 			hw->callback.port(hw->args.port,
10004 					OCS_HW_PORT_FREE_OK, sport);
10005 		}
10006 		break;
10007 	default:
10008 		break;
10009 	}
10010 
10011 	return NULL;
10012 }
10013 
10014 static void *
10015 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10016 {
10017 	ocs_sli_port_t	*sport = ctx->app;
10018 	ocs_hw_t	*hw = sport->hw;
10019 
10020 	smtrace("port");
10021 
10022 	switch (evt) {
10023 	case OCS_EVT_ENTER:
10024 		/* free SLI resource */
10025 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10026 
10027 		/* free mailbox buffer */
10028 		if (data != NULL) {
10029 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10030 		}
10031 
10032 		if (hw->callback.port != NULL) {
10033 			hw->callback.port(hw->args.port,
10034 					OCS_HW_PORT_ATTACH_FAIL, sport);
10035 		}
10036 		if (sport->sm_free_req_pending) {
10037 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10038 		}
10039 		break;
10040 	default:
10041 		__ocs_hw_port_common(__func__, ctx, evt, data);
10042 		break;
10043 	}
10044 
10045 	return NULL;
10046 }
10047 
10048 static void *
10049 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10050 {
10051 	ocs_sli_port_t	*sport = ctx->app;
10052 	ocs_hw_t	*hw = sport->hw;
10053 	uint8_t		*cmd = NULL;
10054 
10055 	smtrace("port");
10056 
10057 	switch (evt) {
10058 	case OCS_EVT_ENTER:
10059 		/* allocate memory and send unreg_vpi */
10060 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10061 		if (!cmd) {
10062 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10063 			break;
10064 		}
10065 
10066 		if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
10067 					   SLI4_UNREG_TYPE_PORT)) {
10068 			ocs_log_err(hw->os, "UNREG_VPI format failure\n");
10069 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10070 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10071 			break;
10072 		}
10073 
10074 		if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10075 			ocs_log_err(hw->os, "UNREG_VPI command failure\n");
10076 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10077 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10078 			break;
10079 		}
10080 		break;
10081 	case OCS_EVT_RESPONSE:
10082 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10083 		break;
10084 	case OCS_EVT_ERROR:
10085 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10086 		break;
10087 	default:
10088 		__ocs_hw_port_common(__func__, ctx, evt, data);
10089 		break;
10090 	}
10091 
10092 	return NULL;
10093 }
10094 
10095 static void *
10096 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10097 {
10098 	ocs_sli_port_t	*sport = ctx->app;
10099 	ocs_hw_t	*hw = sport->hw;
10100 
10101 	smtrace("port");
10102 
10103 	switch (evt) {
10104 	case OCS_EVT_ENTER:
10105 		/* Forward to execute in mailbox completion processing context */
10106 		if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
10107 			ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
10108 		}
10109 		break;
10110 	case OCS_EVT_RESPONSE:
10111 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10112 		break;
10113 	case OCS_EVT_ERROR:
10114 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10115 		break;
10116 	default:
10117 		break;
10118 	}
10119 
10120 	return NULL;
10121 }
10122 
10123 static void *
10124 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10125 {
10126 	ocs_sli_port_t	*sport = ctx->app;
10127 	ocs_hw_t	*hw = sport->hw;
10128 
10129 	smtrace("port");
10130 
10131 	switch (evt) {
10132 	case OCS_EVT_ENTER:
10133 		if (data != NULL) {
10134 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10135 		}
10136 		if (hw->callback.port != NULL) {
10137 			hw->callback.port(hw->args.port,
10138 					OCS_HW_PORT_ATTACH_OK, sport);
10139 		}
10140 		if (sport->sm_free_req_pending) {
10141 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10142 		}
10143 		break;
10144 	case OCS_EVT_HW_PORT_REQ_FREE:
10145 		/* virtual/physical port request free */
10146 		ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10147 		break;
10148 	default:
10149 		__ocs_hw_port_common(__func__, ctx, evt, data);
10150 		break;
10151 	}
10152 
10153 	return NULL;
10154 }
10155 
10156 static void *
10157 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10158 {
10159 	ocs_sli_port_t	*sport = ctx->app;
10160 	ocs_hw_t	*hw = sport->hw;
10161 
10162 	smtrace("port");
10163 
10164 	switch (evt) {
10165 	case OCS_EVT_ENTER:
10166 		if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
10167 			ocs_log_err(hw->os, "REG_VPI format failure\n");
10168 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10169 			break;
10170 		}
10171 
10172 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10173 			ocs_log_err(hw->os, "REG_VPI command failure\n");
10174 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10175 			break;
10176 		}
10177 		break;
10178 	case OCS_EVT_RESPONSE:
10179 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10180 		break;
10181 	case OCS_EVT_ERROR:
10182 		ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
10183 		break;
10184 	case OCS_EVT_HW_PORT_REQ_FREE:
10185 		/* Wait for attach response and then free */
10186 		sport->sm_free_req_pending = 1;
10187 		break;
10188 	default:
10189 		__ocs_hw_port_common(__func__, ctx, evt, data);
10190 		break;
10191 	}
10192 
10193 	return NULL;
10194 }
10195 
10196 static void *
10197 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10198 {
10199 	ocs_sli_port_t	*sport = ctx->app;
10200 	ocs_hw_t	*hw = sport->hw;
10201 
10202 	smtrace("port");
10203 
10204 	switch (evt) {
10205 	case OCS_EVT_ENTER:
10206 		/* free SLI resource */
10207 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10208 
10209 		/* free mailbox buffer */
10210 		if (data != NULL) {
10211 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10212 		}
10213 		break;
10214 	default:
10215 		__ocs_hw_port_common(__func__, ctx, evt, data);
10216 		break;
10217 	}
10218 
10219 	return NULL;
10220 }
10221 
10222 static void *
10223 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10224 {
10225 	ocs_sli_port_t	*sport = ctx->app;
10226 	ocs_hw_t	*hw = sport->hw;
10227 
10228 	smtrace("port");
10229 
10230 	switch (evt) {
10231 	case OCS_EVT_ENTER:
10232 		if (data != NULL) {
10233 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10234 		}
10235 		if (hw->callback.port != NULL) {
10236 			hw->callback.port(hw->args.port,
10237 					OCS_HW_PORT_ALLOC_OK, sport);
10238 		}
10239 		/* If there is a pending free request, then handle it now */
10240 		if (sport->sm_free_req_pending) {
10241 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10242 		}
10243 		break;
10244 	case OCS_EVT_HW_PORT_REQ_ATTACH:
10245 		/* virtual port requests attach */
10246 		ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
10247 		break;
10248 	case OCS_EVT_HW_PORT_ATTACH_OK:
10249 		/* physical port attached (as part of attaching domain) */
10250 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10251 		break;
10252 	case OCS_EVT_HW_PORT_REQ_FREE:
10253 		/* virtual port request free */
10254 		if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
10255 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10256 		} else {
10257 			/*
10258 			 * Note: BE3/Skyhawk will respond with a status of 0x20
10259 			 *       unless the reg_vpi has been issued, so we can
10260 			 *       skip the unreg_vpi for these adapters.
10261 			 *
10262 			 * Send a nop to make sure that free doesn't occur in
10263 			 * same context
10264 			 */
10265 			ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
10266 		}
10267 		break;
10268 	default:
10269 		__ocs_hw_port_common(__func__, ctx, evt, data);
10270 		break;
10271 	}
10272 
10273 	return NULL;
10274 }
10275 
10276 static void *
10277 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10278 {
10279 	ocs_sli_port_t	*sport = ctx->app;
10280 	ocs_hw_t	*hw = sport->hw;
10281 
10282 	smtrace("port");
10283 
10284 	switch (evt) {
10285 	case OCS_EVT_ENTER:
10286 		/* free SLI resource */
10287 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10288 
10289 		/* free mailbox buffer */
10290 		if (data != NULL) {
10291 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10292 		}
10293 
10294 		if (hw->callback.port != NULL) {
10295 			hw->callback.port(hw->args.port,
10296 					OCS_HW_PORT_ALLOC_FAIL, sport);
10297 		}
10298 
10299 		/* If there is a pending free request, then handle it now */
10300 		if (sport->sm_free_req_pending) {
10301 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10302 		}
10303 		break;
10304 	default:
10305 		__ocs_hw_port_common(__func__, ctx, evt, data);
10306 		break;
10307 	}
10308 
10309 	return NULL;
10310 }
10311 
10312 static void *
10313 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10314 {
10315 	ocs_sli_port_t	*sport = ctx->app;
10316 	ocs_hw_t	*hw = sport->hw;
10317 	uint8_t		*payload = NULL;
10318 
10319 	smtrace("port");
10320 
10321 	switch (evt) {
10322 	case OCS_EVT_ENTER:
10323 		/* allocate memory for the service parameters */
10324 		if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
10325 			ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
10326 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10327 			break;
10328 		}
10329 
10330 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10331 					&sport->dma, sport->indicator)) {
10332 			ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
10333 			ocs_dma_free(hw->os, &sport->dma);
10334 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10335 			break;
10336 		}
10337 
10338 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10339 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10340 			ocs_dma_free(hw->os, &sport->dma);
10341 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10342 			break;
10343 		}
10344 		break;
10345 	case OCS_EVT_RESPONSE:
10346 		payload = sport->dma.virt;
10347 
10348 		ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
10349 
10350 		ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
10351 				sizeof(sport->sli_wwpn));
10352 		ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
10353 				sizeof(sport->sli_wwnn));
10354 
10355 		ocs_dma_free(hw->os, &sport->dma);
10356 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
10357 		break;
10358 	case OCS_EVT_ERROR:
10359 		ocs_dma_free(hw->os, &sport->dma);
10360 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10361 		break;
10362 	case OCS_EVT_HW_PORT_REQ_FREE:
10363 		/* Wait for attach response and then free */
10364 		sport->sm_free_req_pending = 1;
10365 		break;
10366 	case OCS_EVT_EXIT:
10367 		break;
10368 	default:
10369 		__ocs_hw_port_common(__func__, ctx, evt, data);
10370 		break;
10371 	}
10372 
10373 	return NULL;
10374 }
10375 
10376 static void *
10377 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10378 {
10379 	ocs_sli_port_t	*sport = ctx->app;
10380 
10381 	smtrace("port");
10382 
10383 	switch (evt) {
10384 	case OCS_EVT_ENTER:
10385 		/* no-op */
10386 		break;
10387 	case OCS_EVT_HW_PORT_ALLOC_OK:
10388 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
10389 		break;
10390 	case OCS_EVT_HW_PORT_ALLOC_FAIL:
10391 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
10392 		break;
10393 	case OCS_EVT_HW_PORT_REQ_FREE:
10394 		/* Wait for attach response and then free */
10395 		sport->sm_free_req_pending = 1;
10396 		break;
10397 	default:
10398 		__ocs_hw_port_common(__func__, ctx, evt, data);
10399 		break;
10400 	}
10401 
10402 	return NULL;
10403 }
10404 
10405 static void *
10406 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10407 {
10408 	ocs_sli_port_t	*sport = ctx->app;
10409 	ocs_hw_t	*hw = sport->hw;
10410 
10411 	smtrace("port");
10412 
10413 	switch (evt) {
10414 	case OCS_EVT_ENTER:
10415 		/* If there is a pending free request, then handle it now */
10416 		if (sport->sm_free_req_pending) {
10417 			ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
10418 			return NULL;
10419 		}
10420 
10421 		/* TODO XXX transitioning to done only works if this is called
10422 		 * directly from ocs_hw_port_alloc BUT not if called from
10423 		 * read_sparm64. In the later case, we actually want to go
10424 		 * through report_ok/fail
10425 		 */
10426 		if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
10427 					sport->indicator, sport->domain->indicator)) {
10428 			ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
10429 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10430 			break;
10431 		}
10432 
10433 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10434 			ocs_log_err(hw->os, "INIT_VPI command failure\n");
10435 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10436 			break;
10437 		}
10438 		break;
10439 	case OCS_EVT_RESPONSE:
10440 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
10441 		break;
10442 	case OCS_EVT_ERROR:
10443 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10444 		break;
10445 	case OCS_EVT_HW_PORT_REQ_FREE:
10446 		/* Wait for attach response and then free */
10447 		sport->sm_free_req_pending = 1;
10448 		break;
10449 	case OCS_EVT_EXIT:
10450 		break;
10451 	default:
10452 		__ocs_hw_port_common(__func__, ctx, evt, data);
10453 		break;
10454 	}
10455 
10456 	return NULL;
10457 }
10458 
10459 static int32_t
10460 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10461 {
10462 	ocs_sli_port_t *sport = arg;
10463 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10464 	ocs_sm_event_t	evt;
10465 
10466 	if (status || hdr->status) {
10467 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10468 			      sport->indicator, status, hdr->status);
10469 		evt = OCS_EVT_ERROR;
10470 	} else {
10471 		evt = OCS_EVT_RESPONSE;
10472 	}
10473 
10474 	ocs_sm_post_event(&sport->ctx, evt, mqe);
10475 
10476 	return 0;
10477 }
10478 
10479 static int32_t
10480 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10481 {
10482 	ocs_sli_port_t *sport = arg;
10483 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10484 	ocs_sm_event_t	evt;
10485 	uint8_t *mqecpy;
10486 
10487 	if (status || hdr->status) {
10488 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10489 			      sport->indicator, status, hdr->status);
10490 		evt = OCS_EVT_ERROR;
10491 	} else {
10492 		evt = OCS_EVT_RESPONSE;
10493 	}
10494 
10495 	/*
10496 	 * In this case we have to malloc a mailbox command buffer, as it is reused
10497 	 * in the state machine post event call, and eventually freed
10498 	 */
10499 	mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10500 	if (mqecpy == NULL) {
10501 		ocs_log_err(hw->os, "malloc mqecpy failed\n");
10502 		return -1;
10503 	}
10504 	ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
10505 
10506 	ocs_sm_post_event(&sport->ctx, evt, mqecpy);
10507 
10508 	return 0;
10509 }
10510 
10511 /***************************************************************************
10512  * Domain state machine
10513  */
10514 
10515 static int32_t
10516 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10517 {
10518 	ocs_domain_t	*domain = ctx->app;
10519 	ocs_hw_t	*hw = domain->hw;
10520 
10521 	smtrace("domain");
10522 
10523 	switch (evt) {
10524 	case OCS_EVT_EXIT:
10525 		/* ignore */
10526 		break;
10527 
10528 	default:
10529 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10530 		break;
10531 	}
10532 
10533 	return 0;
10534 }
10535 
10536 static void *
10537 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10538 {
10539 	ocs_domain_t	*domain = ctx->app;
10540 	ocs_hw_t	*hw = domain->hw;
10541 
10542 	smtrace("domain");
10543 
10544 	switch (evt) {
10545 	case OCS_EVT_ENTER:
10546 		/* free command buffer */
10547 		if (data != NULL) {
10548 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10549 		}
10550 		/* free SLI resources */
10551 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10552 		/* TODO how to free FCFI (or do we at all)? */
10553 
10554 		if (hw->callback.domain != NULL) {
10555 			hw->callback.domain(hw->args.domain,
10556 					OCS_HW_DOMAIN_ALLOC_FAIL,
10557 					domain);
10558 		}
10559 		break;
10560 	default:
10561 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10562 		break;
10563 	}
10564 
10565 	return NULL;
10566 }
10567 
10568 static void *
10569 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10570 {
10571 	ocs_domain_t	*domain = ctx->app;
10572 	ocs_hw_t	*hw = domain->hw;
10573 
10574 	smtrace("domain");
10575 
10576 	switch (evt) {
10577 	case OCS_EVT_ENTER:
10578 		/* free mailbox buffer and send alloc ok to physical sport */
10579 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10580 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
10581 
10582 		/* now inform registered callbacks */
10583 		if (hw->callback.domain != NULL) {
10584 			hw->callback.domain(hw->args.domain,
10585 					OCS_HW_DOMAIN_ATTACH_OK,
10586 					domain);
10587 		}
10588 		break;
10589 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10590 		ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10591 		break;
10592 	default:
10593 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10594 		break;
10595 	}
10596 
10597 	return NULL;
10598 }
10599 
10600 static void *
10601 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10602 {
10603 	ocs_domain_t	*domain = ctx->app;
10604 	ocs_hw_t	*hw = domain->hw;
10605 
10606 	smtrace("domain");
10607 
10608 	switch (evt) {
10609 	case OCS_EVT_ENTER:
10610 		if (data != NULL) {
10611 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10612 		}
10613 		/* free SLI resources */
10614 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10615 		/* TODO how to free FCFI (or do we at all)? */
10616 
10617 		if (hw->callback.domain != NULL) {
10618 			hw->callback.domain(hw->args.domain,
10619 					OCS_HW_DOMAIN_ATTACH_FAIL,
10620 					domain);
10621 		}
10622 		break;
10623 	case OCS_EVT_EXIT:
10624 		break;
10625 	default:
10626 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10627 		break;
10628 	}
10629 
10630 	return NULL;
10631 }
10632 
10633 static void *
10634 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10635 {
10636 	ocs_domain_t	*domain = ctx->app;
10637 	ocs_hw_t	*hw = domain->hw;
10638 
10639 	smtrace("domain");
10640 
10641 	switch (evt) {
10642 	case OCS_EVT_ENTER:
10643 
10644 		ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
10645 
10646 		if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
10647 			ocs_log_err(hw->os, "REG_VFI format failure\n");
10648 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10649 			break;
10650 		}
10651 
10652 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10653 			ocs_log_err(hw->os, "REG_VFI command failure\n");
10654 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10655 			break;
10656 		}
10657 		break;
10658 	case OCS_EVT_RESPONSE:
10659 		ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
10660 		break;
10661 	case OCS_EVT_ERROR:
10662 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
10663 		break;
10664 	default:
10665 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10666 		break;
10667 	}
10668 
10669 	return NULL;
10670 }
10671 
10672 static void *
10673 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10674 {
10675 	ocs_domain_t	*domain = ctx->app;
10676 	ocs_hw_t	*hw = domain->hw;
10677 
10678 	smtrace("domain");
10679 
10680 	switch (evt) {
10681 	case OCS_EVT_ENTER:
10682 		/* free mailbox buffer and send alloc ok to physical sport */
10683 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10684 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
10685 
10686 		ocs_hw_domain_add(hw, domain);
10687 
10688 		/* now inform registered callbacks */
10689 		if (hw->callback.domain != NULL) {
10690 			hw->callback.domain(hw->args.domain,
10691 					OCS_HW_DOMAIN_ALLOC_OK,
10692 					domain);
10693 		}
10694 		break;
10695 	case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
10696 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
10697 		break;
10698 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10699 		/* unreg_fcfi/vfi */
10700 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10701 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
10702 		} else {
10703 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10704 		}
10705 		break;
10706 	default:
10707 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10708 		break;
10709 	}
10710 
10711 	return NULL;
10712 }
10713 
10714 static void *
10715 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10716 {
10717 	ocs_domain_t	*domain = ctx->app;
10718 	ocs_hw_t	*hw = domain->hw;
10719 
10720 	smtrace("domain");
10721 
10722 	switch (evt) {
10723 	case OCS_EVT_ENTER:
10724 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10725 					&domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
10726 			ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
10727 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10728 			break;
10729 		}
10730 
10731 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10732 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10733 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10734 			break;
10735 		}
10736 		break;
10737 	case OCS_EVT_EXIT:
10738 		break;
10739 	case OCS_EVT_RESPONSE:
10740 		ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
10741 
10742 		ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
10743 		break;
10744 	case OCS_EVT_ERROR:
10745 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10746 		break;
10747 	default:
10748 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10749 		break;
10750 	}
10751 
10752 	return NULL;
10753 }
10754 
10755 static void *
10756 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10757 {
10758 	ocs_domain_t	*domain = ctx->app;
10759 	ocs_sli_port_t	*sport = domain->sport;
10760 	ocs_hw_t	*hw = domain->hw;
10761 
10762 	smtrace("domain");
10763 
10764 	switch (evt) {
10765 	case OCS_EVT_ENTER:
10766 		if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
10767 					domain->fcf_indicator, sport->indicator)) {
10768 			ocs_log_err(hw->os, "INIT_VFI format failure\n");
10769 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10770 			break;
10771 		}
10772 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10773 			ocs_log_err(hw->os, "INIT_VFI command failure\n");
10774 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10775 			break;
10776 		}
10777 		break;
10778 	case OCS_EVT_EXIT:
10779 		break;
10780 	case OCS_EVT_RESPONSE:
10781 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10782 		break;
10783 	case OCS_EVT_ERROR:
10784 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10785 		break;
10786 	default:
10787 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10788 		break;
10789 	}
10790 
10791 	return NULL;
10792 }
10793 
10794 static void *
10795 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10796 {
10797 	ocs_domain_t	*domain = ctx->app;
10798 	ocs_hw_t	*hw = domain->hw;
10799 
10800 	smtrace("domain");
10801 
10802 	switch (evt) {
10803 	case OCS_EVT_ENTER: {
10804 		sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
10805 		uint32_t i;
10806 
10807 		/* Set the filter match/mask values from hw's filter_def values */
10808 		for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
10809 			rq_cfg[i].rq_id = 0xffff;
10810 			rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
10811 			rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
10812 			rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
10813 			rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
10814 		}
10815 
10816 		/* Set the rq_id for each, in order of RQ definition */
10817 		for (i = 0; i < hw->hw_rq_count; i++) {
10818 			if (i >= ARRAY_SIZE(rq_cfg)) {
10819 				ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
10820 				break;
10821 			}
10822 			rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
10823 		}
10824 
10825 		if (!data) {
10826 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10827 			break;
10828 		}
10829 
10830 		if (hw->hw_mrq_count) {
10831 			if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
10832 				 domain->vlan_id, domain->fcf)) {
10833 				ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
10834 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10835 				break;
10836 			}
10837 
10838 		} else {
10839 			if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
10840 						rq_cfg, domain->vlan_id)) {
10841 				ocs_log_err(hw->os, "REG_FCFI format failure\n");
10842 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10843 				break;
10844 			}
10845 		}
10846 
10847 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10848 			ocs_log_err(hw->os, "REG_FCFI command failure\n");
10849 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10850 			break;
10851 		}
10852 		break;
10853 	}
10854 	case OCS_EVT_EXIT:
10855 		break;
10856 	case OCS_EVT_RESPONSE:
10857 		if (!data) {
10858 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10859 			break;
10860 		}
10861 
10862 		domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
10863 
10864 		/*
10865 		 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
10866 		 * and instead rely on implicit initialization during VFI registration.
10867 		 * Short circuit normal processing here for those devices.
10868 		 */
10869 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10870 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10871 		} else {
10872 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
10873 		}
10874 		break;
10875 	case OCS_EVT_ERROR:
10876 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10877 		break;
10878 	default:
10879 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10880 		break;
10881 	}
10882 
10883 	return NULL;
10884 }
10885 
10886 static void *
10887 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10888 {
10889 	ocs_domain_t	*domain = ctx->app;
10890 	ocs_hw_t	*hw = domain->hw;
10891 
10892 	smtrace("domain");
10893 
10894 	switch (evt) {
10895 	case OCS_EVT_ENTER:
10896 		if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
10897 			/*
10898 			 * For FC, the HW alread registered a FCFI
10899 			 * Copy FCF information into the domain and jump to INIT_VFI
10900 			 */
10901 			domain->fcf_indicator = hw->fcf_indicator;
10902 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
10903 		} else {
10904 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
10905 		}
10906 		break;
10907 	default:
10908 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10909 		break;
10910 	}
10911 
10912 	return NULL;
10913 }
10914 
10915 static void *
10916 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10917 {
10918 	ocs_domain_t	*domain = ctx->app;
10919 
10920 	smtrace("domain");
10921 
10922 	switch (evt) {
10923 	case OCS_EVT_ENTER:
10924 		if (domain != NULL) {
10925 			ocs_hw_t	*hw = domain->hw;
10926 
10927 			ocs_hw_domain_del(hw, domain);
10928 
10929 			if (hw->callback.domain != NULL) {
10930 				hw->callback.domain(hw->args.domain,
10931 						     OCS_HW_DOMAIN_FREE_FAIL,
10932 						     domain);
10933 			}
10934 		}
10935 
10936 		/* free command buffer */
10937 		if (data != NULL) {
10938 			ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
10939 		}
10940 		break;
10941 	case OCS_EVT_EXIT:
10942 		break;
10943 	default:
10944 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10945 		break;
10946 	}
10947 
10948 	return NULL;
10949 }
10950 
10951 static void *
10952 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10953 {
10954 	ocs_domain_t	*domain = ctx->app;
10955 
10956 	smtrace("domain");
10957 
10958 	switch (evt) {
10959 	case OCS_EVT_ENTER:
10960 		/* Free DMA and mailbox buffer */
10961 		if (domain != NULL) {
10962 			ocs_hw_t *hw = domain->hw;
10963 
10964 			/* free VFI resource */
10965 			sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
10966 					  domain->indicator);
10967 
10968 			ocs_hw_domain_del(hw, domain);
10969 
10970 			/* inform registered callbacks */
10971 			if (hw->callback.domain != NULL) {
10972 				hw->callback.domain(hw->args.domain,
10973 						     OCS_HW_DOMAIN_FREE_OK,
10974 						     domain);
10975 			}
10976 		}
10977 		if (data != NULL) {
10978 			ocs_free(NULL, data, SLI4_BMBX_SIZE);
10979 		}
10980 		break;
10981 	case OCS_EVT_EXIT:
10982 		break;
10983 	default:
10984 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10985 		break;
10986 	}
10987 
10988 	return NULL;
10989 }
10990 
10991 static void *
10992 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10993 {
10994 	ocs_domain_t	*domain = ctx->app;
10995 	ocs_hw_t	*hw = domain->hw;
10996 
10997 	smtrace("domain");
10998 
10999 	switch (evt) {
11000 	case OCS_EVT_ENTER:
11001 		/* if we're in the middle of a teardown, skip sending rediscover */
11002 		if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
11003 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11004 			break;
11005 		}
11006 		if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
11007 			ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
11008 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11009 			break;
11010 		}
11011 
11012 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11013 			ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
11014 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11015 		}
11016 		break;
11017 	case OCS_EVT_RESPONSE:
11018 	case OCS_EVT_ERROR:
11019 		/* REDISCOVER_FCF can fail if none exist */
11020 		ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11021 		break;
11022 	case OCS_EVT_EXIT:
11023 		break;
11024 	default:
11025 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11026 		break;
11027 	}
11028 
11029 	return NULL;
11030 }
11031 
11032 static void *
11033 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11034 {
11035 	ocs_domain_t	*domain = ctx->app;
11036 	ocs_hw_t	*hw = domain->hw;
11037 
11038 	smtrace("domain");
11039 
11040 	switch (evt) {
11041 	case OCS_EVT_ENTER:
11042 		if (data == NULL) {
11043 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11044 			if (!data) {
11045 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11046 				break;
11047 			}
11048 		}
11049 
11050 		if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
11051 			ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
11052 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11053 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11054 			break;
11055 		}
11056 
11057 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11058 			ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
11059 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11060 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11061 			break;
11062 		}
11063 		break;
11064 	case OCS_EVT_RESPONSE:
11065 		if (domain->req_rediscover_fcf) {
11066 			domain->req_rediscover_fcf = FALSE;
11067 			ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
11068 		} else {
11069 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11070 		}
11071 		break;
11072 	case OCS_EVT_ERROR:
11073 		ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11074 		break;
11075 	case OCS_EVT_EXIT:
11076 		break;
11077 	default:
11078 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11079 		break;
11080 	}
11081 
11082 	return NULL;
11083 }
11084 
11085 static void *
11086 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11087 {
11088 	ocs_domain_t	*domain = ctx->app;
11089 	ocs_hw_t	*hw = domain->hw;
11090 	uint8_t		is_fc = FALSE;
11091 
11092 	smtrace("domain");
11093 
11094 	is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
11095 
11096 	switch (evt) {
11097 	case OCS_EVT_ENTER:
11098 		if (data == NULL) {
11099 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11100 			if (!data) {
11101 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11102 				break;
11103 			}
11104 		}
11105 
11106 		if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
11107 					SLI4_UNREG_TYPE_DOMAIN)) {
11108 			ocs_log_err(hw->os, "UNREG_VFI format failure\n");
11109 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11110 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11111 			break;
11112 		}
11113 
11114 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11115 			ocs_log_err(hw->os, "UNREG_VFI command failure\n");
11116 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11117 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11118 			break;
11119 		}
11120 		break;
11121 	case OCS_EVT_ERROR:
11122 		if (is_fc) {
11123 			ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11124 		} else {
11125 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11126 		}
11127 		break;
11128 	case OCS_EVT_RESPONSE:
11129 		if (is_fc) {
11130 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11131 		} else {
11132 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11133 		}
11134 		break;
11135 	default:
11136 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11137 		break;
11138 	}
11139 
11140 	return NULL;
11141 }
11142 
11143 /* callback for domain alloc/attach/free */
11144 static int32_t
11145 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11146 {
11147 	ocs_domain_t	*domain = arg;
11148 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11149 	ocs_sm_event_t	evt;
11150 
11151 	if (status || hdr->status) {
11152 		ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
11153 			      domain->indicator, status, hdr->status);
11154 		evt = OCS_EVT_ERROR;
11155 	} else {
11156 		evt = OCS_EVT_RESPONSE;
11157 	}
11158 
11159 	ocs_sm_post_event(&domain->sm, evt, mqe);
11160 
11161 	return 0;
11162 }
11163 
11164 static int32_t
11165 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11166 {
11167 	ocs_hw_io_t *io = NULL;
11168 	ocs_hw_io_t *io_next = NULL;
11169 	uint64_t ticks_current = ocs_get_os_ticks();
11170 	uint32_t sec_elapsed;
11171 	ocs_hw_rtn_e rc;
11172 
11173 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11174 
11175 	if (status || hdr->status) {
11176 		ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
11177 			      status, hdr->status);
11178 		/* go ahead and proceed with wqe timer checks... */
11179 	}
11180 
11181 	/* loop through active WQE list and check for timeouts */
11182 	ocs_lock(&hw->io_lock);
11183 	ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
11184 		sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
11185 
11186 		/*
11187 		 * If elapsed time > timeout, abort it. No need to check type since
11188 		 * it wouldn't be on this list unless it was a target WQE
11189 		 */
11190 		if (sec_elapsed > io->tgt_wqe_timeout) {
11191 			ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
11192 				     io->indicator, io->reqtag, io->type);
11193 
11194 			/* remove from active_wqe list so won't try to abort again */
11195 			ocs_list_remove(&hw->io_timed_wqe, io);
11196 
11197 			/* save status of "timed out" for when abort completes */
11198 			io->status_saved = 1;
11199 			io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
11200 			io->saved_ext = 0;
11201 			io->saved_len = 0;
11202 
11203 			/* now abort outstanding IO */
11204 			rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
11205 			if (rc) {
11206 				ocs_log_test(hw->os,
11207 					"abort failed xri=%#x tag=%#x rc=%d\n",
11208 					io->indicator, io->reqtag, rc);
11209 			}
11210 		}
11211 		/*
11212 		 * need to go through entire list since each IO could have a
11213 		 * different timeout value
11214 		 */
11215 	}
11216 	ocs_unlock(&hw->io_lock);
11217 
11218 	/* if we're not in the middle of shutting down, schedule next timer */
11219 	if (!hw->active_wqe_timer_shutdown) {
11220 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
11221 	}
11222 	hw->in_active_wqe_timer = FALSE;
11223 	return 0;
11224 }
11225 
11226 static void
11227 target_wqe_timer_cb(void *arg)
11228 {
11229 	ocs_hw_t *hw = (ocs_hw_t *)arg;
11230 
11231 	/* delete existing timer; will kick off new timer after checking wqe timeouts */
11232 	hw->in_active_wqe_timer = TRUE;
11233 	ocs_del_timer(&hw->wqe_timer);
11234 
11235 	/* Forward timer callback to execute in the mailbox completion processing context */
11236 	if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
11237 		ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
11238 	}
11239 }
11240 
11241 static void
11242 shutdown_target_wqe_timer(ocs_hw_t *hw)
11243 {
11244 	uint32_t	iters = 100;
11245 
11246 	if (hw->config.emulate_tgt_wqe_timeout) {
11247 		/* request active wqe timer shutdown, then wait for it to complete */
11248 		hw->active_wqe_timer_shutdown = TRUE;
11249 
11250 		/* delete WQE timer and wait for timer handler to complete (if necessary) */
11251 		ocs_del_timer(&hw->wqe_timer);
11252 
11253 		/* now wait for timer handler to complete (if necessary) */
11254 		while (hw->in_active_wqe_timer && iters) {
11255 			/*
11256 			 * if we happen to have just sent NOP mailbox command, make sure
11257 			 * completions are being processed
11258 			 */
11259 			ocs_hw_flush(hw);
11260 			iters--;
11261 		}
11262 
11263 		if (iters == 0) {
11264 			ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
11265 		}
11266 	}
11267 }
11268 
11269 /**
11270  * @brief Determine if HW IO is owned by the port.
11271  *
11272  * @par Description
11273  * Determines if the given HW IO has been posted to the chip.
11274  *
11275  * @param hw Hardware context allocated by the caller.
11276  * @param io HW IO.
11277  *
11278  * @return Returns TRUE if given HW IO is port-owned.
11279  */
11280 uint8_t
11281 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
11282 {
11283 	/* Check to see if this is a port owned XRI */
11284 	return io->is_port_owned;
11285 }
11286 
11287 /**
11288  * @brief Return TRUE if exchange is port-owned.
11289  *
11290  * @par Description
11291  * Test to see if the xri is a port-owned xri.
11292  *
11293  * @param hw Hardware context.
11294  * @param xri Exchange indicator.
11295  *
11296  * @return Returns TRUE if XRI is a port owned XRI.
11297  */
11298 
11299 uint8_t
11300 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
11301 {
11302 	ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
11303 	return (io == NULL ? FALSE : io->is_port_owned);
11304 }
11305 
11306 /**
11307  * @brief Returns an XRI from the port owned list to the host.
11308  *
11309  * @par Description
11310  * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
11311  *
11312  * @param hw Hardware context.
11313  * @param xri_base The starting XRI number.
11314  * @param xri_count The number of XRIs to free from the base.
11315  */
11316 static void
11317 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
11318 {
11319 	ocs_hw_io_t	*io;
11320 	uint32_t i;
11321 
11322 	for (i = 0; i < xri_count; i++) {
11323 		io = ocs_hw_io_lookup(hw, xri_base + i);
11324 
11325 		/*
11326 		 * if this is an auto xfer rdy XRI, then we need to release any
11327 		 * buffer attached to the XRI before moving the XRI back to the free pool.
11328 		 */
11329 		if (hw->auto_xfer_rdy_enabled) {
11330 			ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
11331 		}
11332 
11333 		ocs_lock(&hw->io_lock);
11334 			ocs_list_remove(&hw->io_port_owned, io);
11335 			io->is_port_owned = 0;
11336 			ocs_list_add_tail(&hw->io_free, io);
11337 		ocs_unlock(&hw->io_lock);
11338 	}
11339 }
11340 
11341 /**
11342  * @brief Called when the POST_XRI command completes.
11343  *
11344  * @par Description
11345  * Free the mailbox command buffer and reclaim the XRIs on failure.
11346  *
11347  * @param hw Hardware context.
11348  * @param status Status field from the mbox completion.
11349  * @param mqe Mailbox response structure.
11350  * @param arg Pointer to a callback function that signals the caller that the command is done.
11351  *
11352  * @return Returns 0.
11353  */
11354 static int32_t
11355 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11356 {
11357 	sli4_cmd_post_xri_t	*post_xri = (sli4_cmd_post_xri_t*)mqe;
11358 
11359 	/* Reclaim the XRIs as host owned if the command fails */
11360 	if (status != 0) {
11361 		ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
11362 			      status, post_xri->xri_base, post_xri->xri_count);
11363 		ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
11364 	}
11365 
11366 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11367 	return 0;
11368 }
11369 
11370 /**
11371  * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
11372  *
11373  * @param hw Hardware context.
11374  * @param xri_start The starting XRI to post.
11375  * @param num_to_post The number of XRIs to post.
11376  *
11377  * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
11378  */
11379 
11380 static ocs_hw_rtn_e
11381 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
11382 {
11383 	uint8_t	*post_xri;
11384 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11385 
11386 	/* Since we need to allocate for mailbox queue, just always allocate */
11387 	post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11388 	if (post_xri == NULL) {
11389 		ocs_log_err(hw->os, "no buffer for command\n");
11390 		return OCS_HW_RTN_NO_MEMORY;
11391 	}
11392 
11393 	/* Register the XRIs */
11394 	if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
11395 			     xri_start, num_to_post)) {
11396 		rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
11397 		if (rc != OCS_HW_RTN_SUCCESS) {
11398 			ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
11399 			ocs_log_err(hw->os, "post_xri failed\n");
11400 		}
11401 	}
11402 	return rc;
11403 }
11404 
11405 /**
11406  * @brief Move XRIs from the host-controlled pool to the port.
11407  *
11408  * @par Description
11409  * Removes IOs from the free list and moves them to the port.
11410  *
11411  * @param hw Hardware context.
11412  * @param num_xri The number of XRIs being requested to move to the chip.
11413  *
11414  * @return Returns the number of XRIs that were moved.
11415  */
11416 
11417 uint32_t
11418 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
11419 {
11420 	ocs_hw_io_t	*io;
11421 	uint32_t i;
11422 	uint32_t num_posted = 0;
11423 
11424 	/*
11425 	 * Note: We cannot use ocs_hw_io_alloc() because that would place the
11426 	 *       IO on the io_inuse list. We need to move from the io_free to
11427 	 *       the io_port_owned list.
11428 	 */
11429 	ocs_lock(&hw->io_lock);
11430 
11431 	for (i = 0; i < num_xri; i++) {
11432 		if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
11433 			ocs_hw_rtn_e rc;
11434 
11435 			/*
11436 			 * if this is an auto xfer rdy XRI, then we need to attach a
11437 			 * buffer to the XRI before submitting it to the chip. If a
11438 			 * buffer is unavailable, then we cannot post it, so return it
11439 			 * to the free pool.
11440 			 */
11441 			if (hw->auto_xfer_rdy_enabled) {
11442 				/* Note: uses the IO lock to get the auto xfer rdy buffer */
11443 				ocs_unlock(&hw->io_lock);
11444 				rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
11445 				ocs_lock(&hw->io_lock);
11446 				if (rc != OCS_HW_RTN_SUCCESS) {
11447 					ocs_list_add_head(&hw->io_free, io);
11448 					break;
11449 				}
11450 			}
11451 			ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
11452 			io->is_port_owned = 1;
11453 			ocs_list_add_tail(&hw->io_port_owned, io);
11454 
11455 			/* Post XRI */
11456 			if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
11457 				ocs_hw_reclaim_xri(hw, io->indicator, i);
11458 				break;
11459 			}
11460 			num_posted++;
11461 		} else {
11462 			/* no more free XRIs */
11463 			break;
11464 		}
11465 	}
11466 	ocs_unlock(&hw->io_lock);
11467 
11468 	return num_posted;
11469 }
11470 
11471 /**
11472  * @brief Called when the RELEASE_XRI command completes.
11473  *
11474  * @par Description
11475  * Move the IOs back to the free pool on success.
11476  *
11477  * @param hw Hardware context.
11478  * @param status Status field from the mbox completion.
11479  * @param mqe Mailbox response structure.
11480  * @param arg Pointer to a callback function that signals the caller that the command is done.
11481  *
11482  * @return Returns 0.
11483  */
11484 static int32_t
11485 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11486 {
11487 	sli4_cmd_release_xri_t	*release_xri = (sli4_cmd_release_xri_t*)mqe;
11488 	uint8_t i;
11489 
11490 	/* Reclaim the XRIs as host owned if the command fails */
11491 	if (status != 0) {
11492 		ocs_log_err(hw->os, "Status 0x%x\n", status);
11493 	} else {
11494 		for (i = 0; i < release_xri->released_xri_count; i++) {
11495 			uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
11496 					release_xri->xri_tbl[i/2].xri_tag1);
11497 			ocs_hw_reclaim_xri(hw, xri, 1);
11498 		}
11499 	}
11500 
11501 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11502 	return 0;
11503 }
11504 
11505 /**
11506  * @brief Move XRIs from the port-controlled pool to the host.
11507  *
11508  * Requests XRIs from the FW to return to the host-owned pool.
11509  *
11510  * @param hw Hardware context.
11511  * @param num_xri The number of XRIs being requested to moved from the chip.
11512  *
11513  * @return Returns 0 for success, or a negative error code value for failure.
11514  */
11515 
11516 ocs_hw_rtn_e
11517 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
11518 {
11519 	uint8_t	*release_xri;
11520 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11521 
11522 	/* non-local buffer required for mailbox queue */
11523 	release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11524 	if (release_xri == NULL) {
11525 		ocs_log_err(hw->os, "no buffer for command\n");
11526 		return OCS_HW_RTN_NO_MEMORY;
11527 	}
11528 
11529 	/* release the XRIs */
11530 	if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
11531 		rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
11532 		if (rc != OCS_HW_RTN_SUCCESS) {
11533 			ocs_log_err(hw->os, "release_xri failed\n");
11534 		}
11535 	}
11536 	/* If we are polling or an error occurred, then free the mailbox buffer */
11537 	if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
11538 		ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
11539 	}
11540 	return rc;
11541 }
11542 
11543 /**
11544  * @brief Allocate an ocs_hw_rx_buffer_t array.
11545  *
11546  * @par Description
11547  * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
11548  *
11549  * @param hw Pointer to HW object.
11550  * @param rqindex RQ index for this buffer.
11551  * @param count Count of buffers in array.
11552  * @param size Size of buffer.
11553  *
11554  * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
11555  */
11556 static ocs_hw_rq_buffer_t *
11557 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
11558 {
11559 	ocs_t *ocs = hw->os;
11560 	ocs_hw_rq_buffer_t *rq_buf = NULL;
11561 	ocs_hw_rq_buffer_t *prq;
11562 	uint32_t i;
11563 
11564 	if (count != 0) {
11565 		rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
11566 		if (rq_buf == NULL) {
11567 			ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
11568 			return NULL;
11569 		}
11570 
11571 		for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
11572 			prq->rqindex = rqindex;
11573 			if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
11574 				ocs_log_err(hw->os, "DMA allocation failed\n");
11575 				ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11576 				rq_buf = NULL;
11577 				break;
11578 			}
11579 		}
11580 	}
11581 	return rq_buf;
11582 }
11583 
11584 /**
11585  * @brief Free an ocs_hw_rx_buffer_t array.
11586  *
11587  * @par Description
11588  * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
11589  *
11590  * @param hw Pointer to HW object.
11591  * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
11592  * @param count Count of buffers in array.
11593  *
11594  * @return None.
11595  */
11596 static void
11597 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
11598 {
11599 	ocs_t *ocs = hw->os;
11600 	uint32_t i;
11601 	ocs_hw_rq_buffer_t *prq;
11602 
11603 	if (rq_buf != NULL) {
11604 		for (i = 0, prq = rq_buf; i < count; i++, prq++) {
11605 			ocs_dma_free(ocs, &prq->dma);
11606 		}
11607 		ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11608 	}
11609 }
11610 
11611 /**
11612  * @brief Allocate the RQ data buffers.
11613  *
11614  * @param hw Pointer to HW object.
11615  *
11616  * @return Returns 0 on success, or a non-zero value on failure.
11617  */
11618 ocs_hw_rtn_e
11619 ocs_hw_rx_allocate(ocs_hw_t *hw)
11620 {
11621 	ocs_t *ocs = hw->os;
11622 	uint32_t i;
11623 	int32_t rc = OCS_HW_RTN_SUCCESS;
11624 	uint32_t rqindex = 0;
11625 	hw_rq_t *rq;
11626 	uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
11627 	uint32_t payload_size = hw->config.rq_default_buffer_size;
11628 
11629 	rqindex = 0;
11630 
11631 	for (i = 0; i < hw->hw_rq_count; i++) {
11632 		rq = hw->hw_rq[i];
11633 
11634 		/* Allocate header buffers */
11635 		rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
11636 		if (rq->hdr_buf == NULL) {
11637 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
11638 			rc = OCS_HW_RTN_ERROR;
11639 			break;
11640 		}
11641 
11642 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header  %4d by %4d bytes\n", i, rq->hdr->id,
11643 			      rq->entry_count, hdr_size);
11644 
11645 		rqindex++;
11646 
11647 		/* Allocate payload buffers */
11648 		rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
11649 		if (rq->payload_buf == NULL) {
11650 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
11651 			rc = OCS_HW_RTN_ERROR;
11652 			break;
11653 		}
11654 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
11655 			      rq->entry_count, payload_size);
11656 		rqindex++;
11657 	}
11658 
11659 	return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
11660 }
11661 
11662 /**
11663  * @brief Post the RQ data buffers to the chip.
11664  *
11665  * @param hw Pointer to HW object.
11666  *
11667  * @return Returns 0 on success, or a non-zero value on failure.
11668  */
11669 ocs_hw_rtn_e
11670 ocs_hw_rx_post(ocs_hw_t *hw)
11671 {
11672 	uint32_t i;
11673 	uint32_t idx;
11674 	uint32_t rq_idx;
11675 	int32_t rc = 0;
11676 
11677 	/*
11678 	 * In RQ pair mode, we MUST post the header and payload buffer at the
11679 	 * same time.
11680 	 */
11681 	for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
11682 		hw_rq_t *rq = hw->hw_rq[rq_idx];
11683 
11684 		for (i = 0; i < rq->entry_count-1; i++) {
11685 			ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
11686 			ocs_hw_assert(seq != NULL);
11687 
11688 			seq->header = &rq->hdr_buf[i];
11689 
11690 			seq->payload = &rq->payload_buf[i];
11691 
11692 			rc = ocs_hw_sequence_free(hw, seq);
11693 			if (rc) {
11694 				break;
11695 			}
11696 		}
11697 		if (rc) {
11698 			break;
11699 		}
11700 	}
11701 
11702 	return rc;
11703 }
11704 
11705 /**
11706  * @brief Free the RQ data buffers.
11707  *
11708  * @param hw Pointer to HW object.
11709  *
11710  */
11711 void
11712 ocs_hw_rx_free(ocs_hw_t *hw)
11713 {
11714 	hw_rq_t *rq;
11715 	uint32_t i;
11716 
11717 	/* Free hw_rq buffers */
11718 	for (i = 0; i < hw->hw_rq_count; i++) {
11719 		rq = hw->hw_rq[i];
11720 		if (rq != NULL) {
11721 			ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
11722 			rq->hdr_buf = NULL;
11723 			ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
11724 			rq->payload_buf = NULL;
11725 		}
11726 	}
11727 }
11728 
11729 /**
11730  * @brief HW async call context structure.
11731  */
11732 typedef struct {
11733 	ocs_hw_async_cb_t callback;
11734 	void *arg;
11735 	uint8_t cmd[SLI4_BMBX_SIZE];
11736 } ocs_hw_async_call_ctx_t;
11737 
11738 /**
11739  * @brief HW async callback handler
11740  *
11741  * @par Description
11742  * This function is called when the NOP mailbox command completes.  The callback stored
11743  * in the requesting context is invoked.
11744  *
11745  * @param hw Pointer to HW object.
11746  * @param status Completion status.
11747  * @param mqe Pointer to mailbox completion queue entry.
11748  * @param arg Caller-provided argument.
11749  *
11750  * @return None.
11751  */
11752 static void
11753 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11754 {
11755 	ocs_hw_async_call_ctx_t *ctx = arg;
11756 
11757 	if (ctx != NULL) {
11758 		if (ctx->callback != NULL) {
11759 			(*ctx->callback)(hw, status, mqe, ctx->arg);
11760 		}
11761 		ocs_free(hw->os, ctx, sizeof(*ctx));
11762 	}
11763 }
11764 
11765 /**
11766  * @brief Make an async callback using NOP mailbox command
11767  *
11768  * @par Description
11769  * Post a NOP mailbox command; the callback with argument is invoked upon completion
11770  * while in the event processing context.
11771  *
11772  * @param hw Pointer to HW object.
11773  * @param callback Pointer to callback function.
11774  * @param arg Caller-provided callback.
11775  *
11776  * @return Returns 0 on success, or a negative error code value on failure.
11777  */
11778 int32_t
11779 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
11780 {
11781 	ocs_hw_async_call_ctx_t *ctx;
11782 
11783 	/*
11784 	 * Allocate a callback context (which includes the mailbox command buffer), we need
11785 	 * this to be persistent as the mailbox command submission may be queued and executed later
11786 	 * execution.
11787 	 */
11788 	ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
11789 	if (ctx == NULL) {
11790 		ocs_log_err(hw->os, "failed to malloc async call context\n");
11791 		return OCS_HW_RTN_NO_MEMORY;
11792 	}
11793 	ctx->callback = callback;
11794 	ctx->arg = arg;
11795 
11796 	/* Build and send a NOP mailbox command */
11797 	if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
11798 		ocs_log_err(hw->os, "COMMON_NOP format failure\n");
11799 		ocs_free(hw->os, ctx, sizeof(*ctx));
11800 		return OCS_HW_RTN_ERROR;
11801 	}
11802 
11803 	if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
11804 		ocs_log_err(hw->os, "COMMON_NOP command failure\n");
11805 		ocs_free(hw->os, ctx, sizeof(*ctx));
11806 		return OCS_HW_RTN_ERROR;
11807 	}
11808 	return OCS_HW_RTN_SUCCESS;
11809 }
11810 
11811 /**
11812  * @brief Initialize the reqtag pool.
11813  *
11814  * @par Description
11815  * The WQ request tag pool is initialized.
11816  *
11817  * @param hw Pointer to HW object.
11818  *
11819  * @return Returns 0 on success, or a negative error code value on failure.
11820  */
11821 ocs_hw_rtn_e
11822 ocs_hw_reqtag_init(ocs_hw_t *hw)
11823 {
11824 	if (hw->wq_reqtag_pool == NULL) {
11825 		hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
11826 		if (hw->wq_reqtag_pool == NULL) {
11827 			ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
11828 			return OCS_HW_RTN_NO_MEMORY;
11829 		}
11830 	}
11831 	ocs_hw_reqtag_reset(hw);
11832 	return OCS_HW_RTN_SUCCESS;
11833 }
11834 
11835 /**
11836  * @brief Allocate a WQ request tag.
11837  *
11838  * Allocate and populate a WQ request tag from the WQ request tag pool.
11839  *
11840  * @param hw Pointer to HW object.
11841  * @param callback Callback function.
11842  * @param arg Pointer to callback argument.
11843  *
11844  * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
11845  */
11846 hw_wq_callback_t *
11847 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
11848 {
11849 	hw_wq_callback_t *wqcb;
11850 
11851 	ocs_hw_assert(callback != NULL);
11852 
11853 	wqcb = ocs_pool_get(hw->wq_reqtag_pool);
11854 	if (wqcb != NULL) {
11855 		ocs_hw_assert(wqcb->callback == NULL);
11856 		wqcb->callback = callback;
11857 		wqcb->arg = arg;
11858 	}
11859 	return wqcb;
11860 }
11861 
11862 /**
11863  * @brief Free a WQ request tag.
11864  *
11865  * Free the passed in WQ request tag.
11866  *
11867  * @param hw Pointer to HW object.
11868  * @param wqcb Pointer to WQ request tag object to free.
11869  *
11870  * @return None.
11871  */
11872 void
11873 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
11874 {
11875 	ocs_hw_assert(wqcb->callback != NULL);
11876 	wqcb->callback = NULL;
11877 	wqcb->arg = NULL;
11878 	ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11879 }
11880 
11881 /**
11882  * @brief Return WQ request tag by index.
11883  *
11884  * @par Description
11885  * Return pointer to WQ request tag object given an index.
11886  *
11887  * @param hw Pointer to HW object.
11888  * @param instance_index Index of WQ request tag to return.
11889  *
11890  * @return Pointer to WQ request tag, or NULL.
11891  */
11892 hw_wq_callback_t *
11893 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
11894 {
11895 	hw_wq_callback_t *wqcb;
11896 
11897 	wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
11898 	if (wqcb == NULL) {
11899 		ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
11900 	}
11901 	return wqcb;
11902 }
11903 
11904 /**
11905  * @brief Reset the WQ request tag pool.
11906  *
11907  * @par Description
11908  * Reset the WQ request tag pool, returning all to the free list.
11909  *
11910  * @param hw pointer to HW object.
11911  *
11912  * @return None.
11913  */
11914 void
11915 ocs_hw_reqtag_reset(ocs_hw_t *hw)
11916 {
11917 	hw_wq_callback_t *wqcb;
11918 	uint32_t i;
11919 
11920 	/* Remove all from freelist */
11921 	while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
11922 		;
11923 	}
11924 
11925 	/* Put them all back */
11926 	for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
11927 		wqcb->instance_index = i;
11928 		wqcb->callback = NULL;
11929 		wqcb->arg = NULL;
11930 		ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11931 	}
11932 }
11933 
11934 /**
11935  * @brief Handle HW assertion
11936  *
11937  * HW assert, display diagnostic message, and abort.
11938  *
11939  * @param cond string describing failing assertion condition
11940  * @param filename file name
11941  * @param linenum line number
11942  *
11943  * @return none
11944  */
11945 void
11946 _ocs_hw_assert(const char *cond, const char *filename, int linenum)
11947 {
11948 	ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
11949 	ocs_abort();
11950 		/* no return */
11951 }
11952 
11953 /**
11954  * @brief Handle HW verify
11955  *
11956  * HW verify, display diagnostic message, dump stack and return.
11957  *
11958  * @param cond string describing failing verify condition
11959  * @param filename file name
11960  * @param linenum line number
11961  *
11962  * @return none
11963  */
11964 void
11965 _ocs_hw_verify(const char *cond, const char *filename, int linenum)
11966 {
11967 	ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
11968 	ocs_print_stack();
11969 }
11970 
11971 /**
11972  * @brief Reque XRI
11973  *
11974  * @par Description
11975  * Reque XRI
11976  *
11977  * @param hw Pointer to HW object.
11978  * @param io Pointer to HW IO
11979  *
11980  * @return Return 0 if successful else returns -1
11981  */
11982 int32_t
11983 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
11984 {
11985 	int32_t rc = 0;
11986 
11987 	rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
11988 	if (rc) {
11989 		ocs_list_add_tail(&hw->io_port_dnrx, io);
11990 		rc = -1;
11991 		goto exit_ocs_hw_reque_xri;
11992 	}
11993 
11994 	io->auto_xfer_rdy_dnrx = 0;
11995 	io->type = OCS_HW_IO_DNRX_REQUEUE;
11996 	if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
11997 		/* Clear buffer from XRI */
11998 		ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
11999 		io->axr_buf = NULL;
12000 
12001 		ocs_log_err(hw->os, "requeue_xri WQE error\n");
12002 		ocs_list_add_tail(&hw->io_port_dnrx, io);
12003 
12004 		rc = -1;
12005 		goto exit_ocs_hw_reque_xri;
12006 	}
12007 
12008 	if (io->wq == NULL) {
12009 		io->wq = ocs_hw_queue_next_wq(hw, io);
12010 		ocs_hw_assert(io->wq != NULL);
12011 	}
12012 
12013 	/*
12014 	 * Add IO to active io wqe list before submitting, in case the
12015 	 * wcqe processing preempts this thread.
12016 	 */
12017 	OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
12018 	OCS_STAT(io->wq->use_count++);
12019 
12020 	rc = hw_wq_write(io->wq, &io->wqe);
12021 	if (rc < 0) {
12022 		ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
12023 		rc = -1;
12024 	}
12025 
12026 exit_ocs_hw_reque_xri:
12027 	return 0;
12028 }
12029 
12030 uint32_t
12031 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
12032 {
12033 	sli4_t *sli4 = &ocs->hw.sli;
12034 	ocs_dma_t       dma;
12035 	uint8_t		*payload = NULL;
12036 
12037 	int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
12038 
12039 	/* allocate memory for the service parameters */
12040 	if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
12041 		ocs_log_err(ocs, "Failed to allocate DMA memory\n");
12042 		return 1;
12043 	}
12044 
12045 	if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
12046 				&dma, indicator)) {
12047 		ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
12048 		ocs_dma_free(ocs, &dma);
12049 		return 1;
12050 	}
12051 
12052 	if (sli_bmbx_command(sli4)) {
12053 		ocs_log_err(ocs, "READ_SPARM64 command failure\n");
12054 		ocs_dma_free(ocs, &dma);
12055 		return 1;
12056 	}
12057 
12058 	payload = dma.virt;
12059 	ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
12060 	ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
12061 	ocs_dma_free(ocs, &dma);
12062 	return 0;
12063 }
12064 
12065 /**
12066  * @page fc_hw_api_overview HW APIs
12067  * - @ref devInitShutdown
12068  * - @ref domain
12069  * - @ref port
12070  * - @ref node
12071  * - @ref io
12072  * - @ref interrupt
12073  *
12074  * <div class="overview">
12075  * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
12076  * message details, but the higher level code must still manage domains, ports,
12077  * IT nexuses, and IOs. The HW API is designed to help the higher level manage
12078  * these objects.<br><br>
12079  *
12080  * The HW uses function callbacks to notify the higher-level code of events
12081  * that are received from the chip. There are currently three types of
12082  * functions that may be registered:
12083  *
12084  * <ul><li>domain – This function is called whenever a domain event is generated
12085  * within the HW. Examples include a new FCF is discovered, a connection
12086  * to a domain is disrupted, and allocation callbacks.</li>
12087  * <li>unsolicited – This function is called whenever new data is received in
12088  * the SLI-4 receive queue.</li>
12089  * <li>rnode – This function is called for remote node events, such as attach status
12090  * and  allocation callbacks.</li></ul>
12091  *
12092  * Upper layer functions may be registered by using the ocs_hw_callback() function.
12093  *
12094  * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
12095  * <h2>FC/FCoE HW API</h2>
12096  * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
12097  * interface for creating the necessary common objects and sending I/Os. It may be used
12098  * “as is” in customer implementations or it can serve as an example of typical interactions
12099  * between a driver and the SLI-4 hardware. The broad categories of functionality include:
12100  *
12101  * <ul><li>Setting-up and tearing-down of the HW.</li>
12102  * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
12103  * <li>Sending and receiving I/Os.</li></ul>
12104  *
12105  * <h3>HW Setup</h3>
12106  * To set up the HW:
12107  *
12108  * <ol>
12109  * <li>Set up the HW object using ocs_hw_setup().<br>
12110  * This step performs a basic configuration of the SLI-4 component and the HW to
12111  * enable querying the hardware for its capabilities. At this stage, the HW is not
12112  * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
12113  * <li>Configure the HW according to the driver requirements.<br>
12114  * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
12115  * well as configures the amount of resources required (ocs_hw_set()). The driver
12116  * must also register callback functions (ocs_hw_callback()) to receive notification of
12117  * various asynchronous events.<br><br>
12118  * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
12119  * step creates the underlying queues, commits resources to the hardware, and
12120  * prepares the hardware for operation. While the hardware is operational, the
12121  * port is not online, and cannot send or receive data.</li><br><br>
12122  * <br><br>
12123  * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
12124  * When the link comes up, the HW determines if a domain is present and notifies the
12125  * driver using the domain callback function. This is the starting point of the driver's
12126  * interaction with the common objects.<br><br>
12127  * @b Note: For FCoE, there may be more than one domain available and, therefore,
12128  * more than one callback.</li>
12129  * </ol>
12130  *
12131  * <h3>Allocating and Using Common Objects</h3>
12132  * Common objects provide a mechanism through which the various OneCore Storage
12133  * driver components share and track information. These data structures are primarily
12134  * used to track SLI component information but can be extended by other components, if
12135  * needed. The main objects are:
12136  *
12137  * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
12138  * memory access (DMA) transactions.</li>
12139  * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
12140  * any infrastructure devices such as FC switches and FC forwarders. The domain
12141  * object contains both an FCFI and a VFI.</li>
12142  * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
12143  * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
12144  * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
12145  * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
12146  *
12147  * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
12148  * node common objects and establish the connections between them. The goal is to
12149  * connect the driver to the SCSI domain to exchange I/Os with other devices. These
12150  * common object connections are shown in the following figure, FC Driver Common Objects:
12151  * <img src="elx_fc_common_objects.jpg"
12152  * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
12153  *
12154  * The first step is to create a connection to the domain by allocating an SLI Port object.
12155  * The SLI Port object represents a particular FC ID and must be initialized with one. With
12156  * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
12157  * a domain, the driver allocates a domain object and attaches to it using the previous SLI
12158  * port object.<br><br>
12159  *
12160  * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
12161  * FLOGI) with the domain before attaching.<br><br>
12162  *
12163  * Once attached to the domain, the driver can discover and attach to other devices
12164  * (remote nodes). The exact discovery method depends on the driver, but it typically
12165  * includes using a position map, querying the fabric name server, or an out-of-band
12166  * method. In most cases, it is necessary to log in with devices before performing I/Os.
12167  * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
12168  * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
12169  * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
12170  * before exchanging FCP I/O.<br><br>
12171  *
12172  * @b Note: The HW manages both the well known fabric address and the name server as
12173  * nodes in the domain. Therefore, the driver must allocate node objects prior to
12174  * communicating with either of these entities.
12175  *
12176  * <h3>Sending and Receiving I/Os</h3>
12177  * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
12178  * commands are conceptually similar. Since the commands complete asynchronously,
12179  * the caller must provide a HW I/O object that maintains the I/O state, as well as
12180  * provide a callback function. The driver may use the same callback function for all I/O
12181  * operations, but each operation must use a unique HW I/O object. In the SLI-4
12182  * architecture, there is a direct association between the HW I/O object and the SGL used
12183  * to describe the data. Therefore, a driver typically performs the following operations:
12184  *
12185  * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
12186  * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
12187  * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
12188  * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
12189  *
12190  * <h3>HW Tear Down</h3>
12191  * To tear-down the HW:
12192  *
12193  * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
12194  * data andevents.</li>
12195  * <li>Destroy the HW object (ocs_hw_teardown()).</li>
12196  * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
12197  * <br>
12198  * </div><!-- overview -->
12199  *
12200  */
12201 
12202 /**
12203  * This contains all hw runtime workaround code.  Based on the asic type,
12204  * asic revision, and range of fw revisions, a particular workaround may be enabled.
12205  *
12206  * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
12207  * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
12208  * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
12209  * control specific runtime behavior.
12210  *
12211  * It is intended that the controls in ocs_hw_workaround_t be defined functionally.  So we
12212  * would have the driver look like:  "if (hw->workaround.enable_xxx) then ...", rather than
12213  * what we might previously see as "if this is a BE3, then do xxx"
12214  *
12215  */
12216 
12217 #define HW_FWREV_ZERO		(0ull)
12218 #define HW_FWREV_MAX		(~0ull)
12219 
12220 #define SLI4_ASIC_TYPE_ANY	0
12221 #define SLI4_ASIC_REV_ANY	0
12222 
12223 /**
12224  * @brief Internal definition of workarounds
12225  */
12226 
12227 typedef enum {
12228 	HW_WORKAROUND_TEST = 1,
12229 	HW_WORKAROUND_MAX_QUEUE,	/**< Limits all queues */
12230 	HW_WORKAROUND_MAX_RQ,		/**< Limits only the RQ */
12231 	HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
12232 	HW_WORKAROUND_WQE_COUNT_METHOD,
12233 	HW_WORKAROUND_RQE_COUNT_METHOD,
12234 	HW_WORKAROUND_USE_UNREGISTERD_RPI,
12235 	HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
12236 	HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
12237 	HW_WORKAROUND_USE_DIF_QUARANTINE,
12238 	HW_WORKAROUND_USE_DIF_SEC_XRI,		/**< Use secondary xri for multiple data phases */
12239 	HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB,	/**< FCFI reported in SRB not correct, use "first" registered domain */
12240 	HW_WORKAROUND_FW_VERSION_TOO_LOW,	/**< The FW version is not the min version supported by this driver */
12241 	HW_WORKAROUND_SGLC_MISREPORTED,	/**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
12242 	HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE,	/**< Don't use SEND_FRAME capable if FW version is too old */
12243 } hw_workaround_e;
12244 
12245 /**
12246  * @brief Internal workaround structure instance
12247  */
12248 
12249 typedef struct {
12250 	sli4_asic_type_e asic_type;
12251 	sli4_asic_rev_e asic_rev;
12252 	uint64_t fwrev_low;
12253 	uint64_t fwrev_high;
12254 
12255 	hw_workaround_e workaround;
12256 	uint32_t value;
12257 } hw_workaround_t;
12258 
12259 static hw_workaround_t hw_workarounds[] = {
12260 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12261 		HW_WORKAROUND_TEST, 999},
12262 
12263 	/* Bug: 127585: if_type == 2 returns 0 for total length placed on
12264 	 * FCP_TSEND64_WQE completions.   Note, original driver code enables this
12265 	 * workaround for all asic types
12266 	 */
12267 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12268 		HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
12269 
12270 	/* Bug: unknown, Lancer A0 has mis-reported max queue depth */
12271 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
12272 		HW_WORKAROUND_MAX_QUEUE, 2048},
12273 
12274 	/* Bug: 143399, BE3 has mis-reported max RQ queue depth */
12275 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
12276 		HW_WORKAROUND_MAX_RQ, 2048},
12277 
12278 	/* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
12279 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
12280 		HW_WORKAROUND_MAX_RQ, 2048},
12281 
12282 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
12283 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12284 		HW_WORKAROUND_WQE_COUNT_METHOD, 1},
12285 
12286 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
12287 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12288 		HW_WORKAROUND_RQE_COUNT_METHOD, 1},
12289 
12290 	/* Bug: 142968, BE3 UE with RPI == 0xffff */
12291 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12292 		HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
12293 
12294 	/* Bug: unknown, Skyhawk won't support auto-response on target T10-PI  */
12295 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12296 		HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
12297 
12298 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
12299 		HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
12300 
12301 	/* Bug: 160124, Skyhawk quarantine DIF XRIs  */
12302 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12303 		HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
12304 
12305 	/* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
12306 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12307 		HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
12308 
12309 	/* Bug: xxxxxx, FCFI reported in SRB not corrrect */
12310 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12311 		HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
12312 #if 0
12313 	/* Bug: 165642, FW version check for driver */
12314 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
12315 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12316 #endif
12317 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
12318 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12319 
12320 	/* Bug 177061, Lancer FW does not set the SGLC bit */
12321 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12322 		HW_WORKAROUND_SGLC_MISREPORTED, 0},
12323 
12324 	/* BZ 181208/183914, enable this workaround for ALL revisions */
12325 	{SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12326 		HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
12327 };
12328 
12329 /**
12330  * @brief Function prototypes
12331  */
12332 
12333 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
12334 
12335 /**
12336  * @brief Parse the firmware version (name)
12337  *
12338  * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
12339  * by the HW_FWREV() macro
12340  *
12341  * @param fwrev_string pointer to the firmware string
12342  *
12343  * @return packed firmware revision value
12344  */
12345 
12346 static uint64_t
12347 parse_fw_version(const char *fwrev_string)
12348 {
12349 	int v[4] = {0};
12350 	const char *p;
12351 	int i;
12352 
12353 	for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
12354 		v[i] = ocs_strtoul(p, 0, 0);
12355 		while(*p && *p != '.') {
12356 			p ++;
12357 		}
12358 		if (*p) {
12359 			p ++;
12360 		}
12361 	}
12362 
12363 	/* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
12364 	if (v[2] == 9999) {
12365 		return HW_FWREV_MAX;
12366 	} else {
12367 		return HW_FWREV(v[0], v[1], v[2], v[3]);
12368 	}
12369 }
12370 
12371 /**
12372  * @brief Test for a workaround match
12373  *
12374  * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
12375  *
12376  * @param hw Pointer to the HW structure
12377  * @param w Pointer to a workaround structure entry
12378  *
12379  * @return Return TRUE for a match
12380  */
12381 
12382 static int32_t
12383 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
12384 {
12385 	return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
12386 		    ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
12387 		    (w->fwrev_low <= hw->workaround.fwrev) &&
12388 		    ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
12389 }
12390 
12391 /**
12392  * @brief Setup HW runtime workarounds
12393  *
12394  * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
12395  * based on the HW/SLI setup.
12396  *
12397  * @param hw Pointer to HW structure
12398  *
12399  * @return none
12400  */
12401 
12402 void
12403 ocs_hw_workaround_setup(struct ocs_hw_s *hw)
12404 {
12405 	hw_workaround_t *w;
12406 	sli4_t *sli4 = &hw->sli;
12407 	uint32_t i;
12408 
12409 	/* Initialize the workaround settings */
12410 	ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
12411 
12412 	/* If hw_war_version is non-null, then its a value that was set by a module parameter
12413 	 * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
12414 	 */
12415 
12416 	if (hw->hw_war_version) {
12417 		hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
12418 	} else {
12419 		hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
12420 	}
12421 
12422 	/* Walk the workaround list, if a match is found, then handle it */
12423 	for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
12424 		if (ocs_hw_workaround_match(hw, w)) {
12425 			switch(w->workaround) {
12426 			case HW_WORKAROUND_TEST: {
12427 				ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
12428 				break;
12429 			}
12430 
12431 			case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
12432 				ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
12433 				hw->workaround.retain_tsend_io_length = 1;
12434 				break;
12435 			}
12436 			case HW_WORKAROUND_MAX_QUEUE: {
12437 				sli4_qtype_e q;
12438 
12439 				ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
12440 				for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
12441 					if (hw->num_qentries[q] > w->value) {
12442 						hw->num_qentries[q] = w->value;
12443 					}
12444 				}
12445 				break;
12446 			}
12447 			case HW_WORKAROUND_MAX_RQ: {
12448 				ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
12449 				if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
12450 					hw->num_qentries[SLI_QTYPE_RQ] = w->value;
12451 				}
12452 				break;
12453 			}
12454 			case HW_WORKAROUND_WQE_COUNT_METHOD: {
12455 				ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
12456 				sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
12457 				sli_calc_max_qentries(sli4);
12458 				break;
12459 			}
12460 			case HW_WORKAROUND_RQE_COUNT_METHOD: {
12461 				ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
12462 				sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
12463 				sli_calc_max_qentries(sli4);
12464 				break;
12465 			}
12466 			case HW_WORKAROUND_USE_UNREGISTERD_RPI:
12467 				ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
12468 				hw->workaround.use_unregistered_rpi = TRUE;
12469 				/*
12470 				 * Allocate an RPI that is never registered, to be used in the case where
12471 				 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
12472 				 */
12473 				if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
12474 					&hw->workaround.unregistered_index)) {
12475 					ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
12476 					hw->workaround.use_unregistered_rpi = FALSE;
12477 				}
12478 				break;
12479 			case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
12480 				ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
12481 				hw->workaround.disable_ar_tgt_dif = TRUE;
12482 				break;
12483 			case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
12484 				ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
12485 				hw->workaround.disable_dump_loc = TRUE;
12486 				break;
12487 			case HW_WORKAROUND_USE_DIF_QUARANTINE:
12488 				ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
12489 				hw->workaround.use_dif_quarantine = TRUE;
12490 				break;
12491 			case HW_WORKAROUND_USE_DIF_SEC_XRI:
12492 				ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
12493 				hw->workaround.use_dif_sec_xri = TRUE;
12494 				break;
12495 			case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
12496 				ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
12497 				hw->workaround.override_fcfi = TRUE;
12498 				break;
12499 
12500 			case HW_WORKAROUND_FW_VERSION_TOO_LOW:
12501 				ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
12502 				hw->workaround.fw_version_too_low = TRUE;
12503 				break;
12504 			case HW_WORKAROUND_SGLC_MISREPORTED:
12505 				ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
12506 				hw->workaround.sglc_misreported = TRUE;
12507 				break;
12508 			case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
12509 				ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
12510 				hw->workaround.ignore_send_frame = TRUE;
12511 				break;
12512 			} /* switch(w->workaround) */
12513 		}
12514 	}
12515 }
12516