xref: /freebsd/sys/dev/ocs_fc/ocs_hw.c (revision 36756195a342dbfb8fcfc7561b1d656fc6310d05)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file
34  * Defines and implements the Hardware Abstraction Layer (HW).
35  * All interaction with the hardware is performed through the HW, which abstracts
36  * the details of the underlying SLI-4 implementation.
37  */
38 
39 /**
40  * @defgroup devInitShutdown Device Initialization and Shutdown
41  * @defgroup domain Domain Functions
42  * @defgroup port Port Functions
43  * @defgroup node Remote Node Functions
44  * @defgroup io IO Functions
45  * @defgroup interrupt Interrupt handling
46  * @defgroup os OS Required Functions
47  */
48 
49 #include "ocs.h"
50 #include "ocs_os.h"
51 #include "ocs_hw.h"
52 #include "ocs_hw_queues.h"
53 
54 #define OCS_HW_MQ_DEPTH	128
55 #define OCS_HW_READ_FCF_SIZE	4096
56 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS	256
57 #define OCS_HW_WQ_TIMER_PERIOD_MS	500
58 
59 /* values used for setting the auto xfer rdy parameters */
60 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT		0 /* 512 bytes */
61 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT	TRUE
62 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT	FALSE
63 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT	0
64 #define OCS_HW_REQUE_XRI_REGTAG			65534
65 /* max command and response buffer lengths -- arbitrary at the moment */
66 #define OCS_HW_DMTF_CLP_CMD_MAX	256
67 #define OCS_HW_DMTF_CLP_RSP_MAX	256
68 
69 /* HW global data */
70 ocs_hw_global_t hw_global;
71 
72 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
73 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
74 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
75 static int32_t ocs_hw_cb_link(void *, void *);
76 static int32_t ocs_hw_cb_fip(void *, void *);
77 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
78 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
79 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
80 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
81 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
84 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
85 static int32_t ocs_hw_flush(ocs_hw_t *);
86 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
87 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
88 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
89 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
90 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
91 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
92 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void  *);
93 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void  *);
94 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void  *);
95 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void  *);
96 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
97 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
98 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
99 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
100 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
101 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 
103 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
104 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
105 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
111 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
112 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
113 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
114 static void ocs_hw_io_free_internal(void *arg);
115 static void ocs_hw_io_free_port_owned(void *arg);
116 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
117 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
118 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
119 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
120 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
121 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
122 
123 /* HW domain database operations */
124 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
125 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
126 
127 /* Port state machine */
128 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
129 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
130 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
131 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 
134 /* Domain state machine */
135 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
136 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
137 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
138 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
140 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
141 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
142 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
143 
144 /* BZ 161832 */
145 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
146 
147 /* WQE timeouts */
148 static void target_wqe_timer_cb(void *arg);
149 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
150 
151 /* WQE timeout for initiator IOs */
152 static inline uint8_t
ocs_hw_set_io_wqe_timeout(ocs_hw_io_t * io,uint32_t timeout)153 ocs_hw_set_io_wqe_timeout(ocs_hw_io_t *io, uint32_t timeout)
154 {
155 	if (timeout > 255) {
156 		io->wqe_timeout = timeout;
157 		return 0;
158 	} else {
159 		return timeout;
160 	}
161 }
162 
163 static inline void
ocs_hw_add_io_timed_wqe(ocs_hw_t * hw,ocs_hw_io_t * io)164 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
165 {
166 	if (hw->config.emulate_wqe_timeout && io->wqe_timeout) {
167 		/*
168 		 * Active WQE list currently only used for
169 		 * target WQE timeouts.
170 		 */
171 		ocs_lock(&hw->io_lock);
172 			ocs_list_add_tail(&hw->io_timed_wqe, io);
173 			getmicrouptime(&io->submit_time);
174 		ocs_unlock(&hw->io_lock);
175 	}
176 }
177 
178 static inline void
ocs_hw_remove_io_timed_wqe(ocs_hw_t * hw,ocs_hw_io_t * io)179 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
180 {
181 	if (hw->config.emulate_wqe_timeout) {
182 		/*
183 		 * If target wqe timeouts are enabled,
184 		 * remove from active wqe list.
185 		 */
186 		ocs_lock(&hw->io_lock);
187 			if (ocs_list_on_list(&io->wqe_link)) {
188 				ocs_list_remove(&hw->io_timed_wqe, io);
189 			}
190 		ocs_unlock(&hw->io_lock);
191 	}
192 }
193 
ocs_hw_iotype_is_originator(uint16_t io_type)194 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
195 {
196 	switch (io_type) {
197 	case OCS_HW_IO_INITIATOR_READ:
198 	case OCS_HW_IO_INITIATOR_WRITE:
199 	case OCS_HW_IO_INITIATOR_NODATA:
200 	case OCS_HW_FC_CT:
201 	case OCS_HW_ELS_REQ:
202 		return 1;
203 	default:
204 		return 0;
205 	}
206 }
207 
ocs_hw_wcqe_abort_needed(uint16_t status,uint8_t ext,uint8_t xb)208 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
209 {
210 	/* if exchange not active, nothing to abort */
211 	if (!xb) {
212 		return FALSE;
213 	}
214 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
215 		switch (ext) {
216 		/* exceptions where abort is not needed */
217 		case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
218 		case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
219 			return FALSE;
220 		default:
221 			break;
222 		}
223 	}
224 	return TRUE;
225 }
226 
227 /**
228  * @brief Determine the number of chutes on the device.
229  *
230  * @par Description
231  * Some devices require queue resources allocated per protocol processor
232  * (chute). This function returns the number of chutes on this device.
233  *
234  * @param hw Hardware context allocated by the caller.
235  *
236  * @return Returns the number of chutes on the device for protocol.
237  */
238 static uint32_t
ocs_hw_get_num_chutes(ocs_hw_t * hw)239 ocs_hw_get_num_chutes(ocs_hw_t *hw)
240 {
241 	uint32_t num_chutes = 1;
242 
243 	if (sli_get_is_dual_ulp_capable(&hw->sli) &&
244 	    sli_get_is_ulp_enabled(&hw->sli, 0) &&
245 	    sli_get_is_ulp_enabled(&hw->sli, 1)) {
246 		num_chutes = 2;
247 	}
248 	return num_chutes;
249 }
250 
251 static ocs_hw_rtn_e
ocs_hw_link_event_init(ocs_hw_t * hw)252 ocs_hw_link_event_init(ocs_hw_t *hw)
253 {
254 	ocs_hw_assert(hw);
255 
256 	hw->link.status = SLI_LINK_STATUS_MAX;
257 	hw->link.topology = SLI_LINK_TOPO_NONE;
258 	hw->link.medium = SLI_LINK_MEDIUM_MAX;
259 	hw->link.speed = 0;
260 	hw->link.loop_map = NULL;
261 	hw->link.fc_id = UINT32_MAX;
262 
263 	return OCS_HW_RTN_SUCCESS;
264 }
265 
266 /**
267  * @ingroup devInitShutdown
268  * @brief If this is physical port 0, then read the max dump size.
269  *
270  * @par Description
271  * Queries the FW for the maximum dump size
272  *
273  * @param hw Hardware context allocated by the caller.
274  *
275  * @return Returns 0 on success, or a non-zero value on failure.
276  */
277 static ocs_hw_rtn_e
ocs_hw_read_max_dump_size(ocs_hw_t * hw)278 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
279 {
280 	uint8_t	buf[SLI4_BMBX_SIZE];
281 	uint8_t bus, dev, func;
282 	int 	rc;
283 
284 	/* lancer only */
285 	if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) &&
286 	    (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(&hw->sli))) {
287 		ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
288 		return OCS_HW_RTN_ERROR;
289 	}
290 
291 	/*
292 	 * Make sure the FW is new enough to support this command. If the FW
293 	 * is too old, the FW will UE.
294 	 */
295 	if (hw->workaround.disable_dump_loc) {
296 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
297 		return OCS_HW_RTN_ERROR;
298 	}
299 
300 	/* attempt to detemine the dump size for function 0 only. */
301 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
302 	if (func == 0) {
303 		if (sli_cmd_common_set_dump_location(&hw->sli, buf,
304 							SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
305 			sli4_res_common_set_dump_location_t *rsp =
306 				(sli4_res_common_set_dump_location_t *)
307 				(buf + offsetof(sli4_cmd_sli_config_t,
308 						payload.embed));
309 
310 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
311 			if (rc != OCS_HW_RTN_SUCCESS) {
312 				ocs_log_test(hw->os, "set dump location command failed\n");
313 				return rc;
314 			} else {
315 				hw->dump_size = rsp->buffer_length;
316 				ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
317 			}
318 		}
319 	}
320 	return OCS_HW_RTN_SUCCESS;
321 }
322 
323 /**
324  * @ingroup devInitShutdown
325  * @brief Set up the Hardware Abstraction Layer module.
326  *
327  * @par Description
328  * Calls set up to configure the hardware.
329  *
330  * @param hw Hardware context allocated by the caller.
331  * @param os Device abstraction.
332  * @param port_type Protocol type of port, such as FC and NIC.
333  *
334  * @todo Why is port_type a parameter?
335  *
336  * @return Returns 0 on success, or a non-zero value on failure.
337  */
338 ocs_hw_rtn_e
ocs_hw_setup(ocs_hw_t * hw,ocs_os_handle_t os,sli4_port_type_e port_type)339 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
340 {
341 	uint32_t i;
342 	char prop_buf[32];
343 
344 	if (hw == NULL) {
345 		ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
346 		return OCS_HW_RTN_ERROR;
347 	}
348 
349 	if (hw->hw_setup_called) {
350 		/* Setup run-time workarounds.
351 		 * Call for each setup, to allow for hw_war_version
352 		 */
353 		ocs_hw_workaround_setup(hw);
354 		return OCS_HW_RTN_SUCCESS;
355 	}
356 
357 	/*
358 	 * ocs_hw_init() relies on NULL pointers indicating that a structure
359 	 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
360 	 * free/realloc that memory
361 	 */
362 	ocs_memset(hw, 0, sizeof(ocs_hw_t));
363 
364 	hw->hw_setup_called = TRUE;
365 
366 	hw->os = os;
367 
368 	ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
369 	ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
370 	ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
371 	hw->cmd_head_count = 0;
372 
373 	ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
374 	ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
375 
376 	ocs_atomic_init(&hw->io_alloc_failed_count, 0);
377 
378 	hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
379 	hw->config.dif_seed = 0;
380 	hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
381 	hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
382 	hw->config.auto_xfer_rdy_app_tag_valid =  OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
383 	hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
384 
385 	if (sli_setup(&hw->sli, hw->os, port_type)) {
386 		ocs_log_err(hw->os, "SLI setup failed\n");
387 		return OCS_HW_RTN_ERROR;
388 	}
389 
390 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
391 
392 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
393 
394 	ocs_hw_link_event_init(hw);
395 
396 	sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
397 	sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
398 
399 	/*
400 	 * Set all the queue sizes to the maximum allowed. These values may
401 	 * be changes later by the adjust and workaround functions.
402 	 */
403 	for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
404 		hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
405 	}
406 
407 	/*
408 	 * The RQ assignment for RQ pair mode.
409 	 */
410 	hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
411 	hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
412 	if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
413 		hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
414 	}
415 
416 	/* by default, enable initiator-only auto-ABTS emulation */
417 	hw->config.i_only_aab = TRUE;
418 
419 	/* Setup run-time workarounds */
420 	ocs_hw_workaround_setup(hw);
421 
422 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
423 	if (hw->workaround.override_fcfi) {
424 		hw->first_domain_idx = -1;
425 	}
426 
427 	/* Must be done after the workaround setup */
428 	if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) ||
429 	    (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))) {
430 
431 		(void)ocs_hw_read_max_dump_size(hw);
432 	}
433 
434 	/* calculate the number of WQs required. */
435 	ocs_hw_adjust_wqs(hw);
436 
437 	/* Set the default dif mode */
438 	if (! sli_is_dif_inline_capable(&hw->sli)) {
439 		ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
440 		hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
441 	}
442 	/* Workaround: BZ 161832 */
443 	if (hw->workaround.use_dif_sec_xri) {
444 		ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
445 	}
446 
447 	/*
448 	 * Figure out the starting and max ULP to spread the WQs across the
449 	 * ULPs.
450 	 */
451 	if (sli_get_is_dual_ulp_capable(&hw->sli)) {
452 		if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
453 		    sli_get_is_ulp_enabled(&hw->sli, 1)) {
454 			hw->ulp_start = 0;
455 			hw->ulp_max   = 1;
456 		} else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
457 			hw->ulp_start = 0;
458 			hw->ulp_max   = 0;
459 		} else {
460 			hw->ulp_start = 1;
461 			hw->ulp_max   = 1;
462 		}
463 	} else {
464 		if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
465 			hw->ulp_start = 0;
466 			hw->ulp_max   = 0;
467 		} else {
468 			hw->ulp_start = 1;
469 			hw->ulp_max   = 1;
470 		}
471 	}
472 	ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
473 		hw->ulp_start, hw->ulp_max);
474 	hw->config.queue_topology = hw_global.queue_topology_string;
475 
476 	hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
477 
478 	hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
479 	hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
480 	hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
481 	hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
482 	hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
483 
484 	/* Verify qtop configuration against driver supported configuration */
485 	if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
486 		ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
487 				OCE_HW_MAX_NUM_MRQ_PAIRS);
488 		return OCS_HW_RTN_ERROR;
489 	}
490 
491 	if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
492 		ocs_log_crit(hw->os, "Max supported EQs = %d\n",
493 				OCS_HW_MAX_NUM_EQ);
494 		return OCS_HW_RTN_ERROR;
495 	}
496 
497 	if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
498 		ocs_log_crit(hw->os, "Max supported CQs = %d\n",
499 				OCS_HW_MAX_NUM_CQ);
500 		return OCS_HW_RTN_ERROR;
501 	}
502 
503 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
504 		ocs_log_crit(hw->os, "Max supported WQs = %d\n",
505 				OCS_HW_MAX_NUM_WQ);
506 		return OCS_HW_RTN_ERROR;
507 	}
508 
509 	if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
510 		ocs_log_crit(hw->os, "Max supported MQs = %d\n",
511 				OCS_HW_MAX_NUM_MQ);
512 		return OCS_HW_RTN_ERROR;
513 	}
514 
515 	return OCS_HW_RTN_SUCCESS;
516 }
517 
518 /**
519  * @ingroup devInitShutdown
520  * @brief Allocate memory structures to prepare for the device operation.
521  *
522  * @par Description
523  * Allocates memory structures needed by the device and prepares the device
524  * for operation.
525  * @n @n @b Note: This function may be called more than once (for example, at
526  * initialization and then after a reset), but the size of the internal resources
527  * may not be changed without tearing down the HW (ocs_hw_teardown()).
528  *
529  * @param hw Hardware context allocated by the caller.
530  *
531  * @return Returns 0 on success, or a non-zero value on failure.
532  */
533 ocs_hw_rtn_e
ocs_hw_init(ocs_hw_t * hw)534 ocs_hw_init(ocs_hw_t *hw)
535 {
536 	ocs_hw_rtn_e	rc;
537 	uint32_t	i = 0;
538 	uint8_t		buf[SLI4_BMBX_SIZE];
539 	uint32_t	max_rpi;
540 	int		rem_count;
541 	int	        written_size = 0;
542 	uint32_t	count;
543 	char		prop_buf[32];
544 	uint32_t ramdisc_blocksize = 512;
545 	uint32_t q_count = 0;
546 	/*
547 	 * Make sure the command lists are empty. If this is start-of-day,
548 	 * they'll be empty since they were just initialized in ocs_hw_setup.
549 	 * If we've just gone through a reset, the command and command pending
550 	 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
551 	 */
552 	ocs_lock(&hw->cmd_lock);
553 		if (!ocs_list_empty(&hw->cmd_head)) {
554 			ocs_log_test(hw->os, "command found on cmd list\n");
555 			ocs_unlock(&hw->cmd_lock);
556 			return OCS_HW_RTN_ERROR;
557 		}
558 		if (!ocs_list_empty(&hw->cmd_pending)) {
559 			ocs_log_test(hw->os, "command found on pending list\n");
560 			ocs_unlock(&hw->cmd_lock);
561 			return OCS_HW_RTN_ERROR;
562 		}
563 	ocs_unlock(&hw->cmd_lock);
564 
565 	/* Free RQ buffers if prevously allocated */
566 	ocs_hw_rx_free(hw);
567 
568 	/*
569 	 * The IO queues must be initialized here for the reset case. The
570 	 * ocs_hw_init_io() function will re-add the IOs to the free list.
571 	 * The cmd_head list should be OK since we free all entries in
572 	 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
573 	 */
574 
575 	/* If we are in this function due to a reset, there may be stale items
576 	 * on lists that need to be removed.  Clean them up.
577 	 */
578 	rem_count=0;
579 	if (ocs_list_valid(&hw->io_wait_free)) {
580 		while ((!ocs_list_empty(&hw->io_wait_free))) {
581 			rem_count++;
582 			ocs_list_remove_head(&hw->io_wait_free);
583 		}
584 		if (rem_count > 0) {
585 			ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
586 		}
587 	}
588 	rem_count=0;
589 	if (ocs_list_valid(&hw->io_inuse)) {
590 		while ((!ocs_list_empty(&hw->io_inuse))) {
591 			rem_count++;
592 			ocs_list_remove_head(&hw->io_inuse);
593 		}
594 		if (rem_count > 0) {
595 			ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
596 		}
597 	}
598 	rem_count=0;
599 	if (ocs_list_valid(&hw->io_free)) {
600 		while ((!ocs_list_empty(&hw->io_free))) {
601 			rem_count++;
602 			ocs_list_remove_head(&hw->io_free);
603 		}
604 		if (rem_count > 0) {
605 			ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
606 		}
607 	}
608 	if (ocs_list_valid(&hw->io_port_owned)) {
609 		while ((!ocs_list_empty(&hw->io_port_owned))) {
610 			ocs_list_remove_head(&hw->io_port_owned);
611 		}
612 	}
613 	ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
614 	ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
615 	ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
616 	ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
617 	ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
618 	ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
619 
620 	/* If MRQ not required, Make sure we dont request feature. */
621 	if (hw->config.n_rq == 1) {
622 		hw->sli.config.features.flag.mrqp = FALSE;
623 	}
624 
625 	if (sli_init(&hw->sli)) {
626 		ocs_log_err(hw->os, "SLI failed to initialize\n");
627 		return OCS_HW_RTN_ERROR;
628 	}
629 
630 	/*
631 	 * Enable the auto xfer rdy feature if requested.
632 	 */
633 	hw->auto_xfer_rdy_enabled = FALSE;
634 	if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
635 	    hw->config.auto_xfer_rdy_size > 0) {
636 		if (hw->config.esoc){
637 			if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
638 				ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
639 			}
640 			written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
641 		} else {
642 			written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
643 		}
644 		if (written_size) {
645 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
646 			if (rc != OCS_HW_RTN_SUCCESS) {
647 				ocs_log_err(hw->os, "config auto xfer rdy failed\n");
648 				return rc;
649 			}
650 		}
651 		hw->auto_xfer_rdy_enabled = TRUE;
652 
653 		if (hw->config.auto_xfer_rdy_t10_enable) {
654 			rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
655 			if (rc != OCS_HW_RTN_SUCCESS) {
656 				ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
657 				return rc;
658 			}
659 		}
660 	}
661 
662 	if(hw->sliport_healthcheck) {
663 		rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
664 		if (rc != OCS_HW_RTN_SUCCESS) {
665 			ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
666 			return rc;
667 		}
668 	}
669 
670 	/*
671 	 * Set FDT transfer hint, only works on Lancer
672 	 */
673 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
674 		/*
675 		 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
676 		 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
677 		 */
678 		ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
679 	}
680 
681 	/*
682 	 * Verify that we have not exceeded any queue sizes
683 	 */
684 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
685 					OCS_HW_MAX_NUM_EQ);
686 	if (hw->config.n_eq > q_count) {
687 		ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
688 			    hw->config.n_eq, q_count);
689 		return OCS_HW_RTN_ERROR;
690 	}
691 
692 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
693 					OCS_HW_MAX_NUM_CQ);
694 	if (hw->config.n_cq > q_count) {
695 		ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
696 			    hw->config.n_cq, q_count);
697 		return OCS_HW_RTN_ERROR;
698 	}
699 
700 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
701 					OCS_HW_MAX_NUM_MQ);
702 	if (hw->config.n_mq > q_count) {
703 		ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
704 			    hw->config.n_mq, q_count);
705 		return OCS_HW_RTN_ERROR;
706 	}
707 
708 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
709 					OCS_HW_MAX_NUM_RQ);
710 	if (hw->config.n_rq > q_count) {
711 		ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
712 			    hw->config.n_rq, q_count);
713 		return OCS_HW_RTN_ERROR;
714 	}
715 
716 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
717 					OCS_HW_MAX_NUM_WQ);
718 	if (hw->config.n_wq > q_count) {
719 		ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
720 			    hw->config.n_wq, q_count);
721 		return OCS_HW_RTN_ERROR;
722 	}
723 
724 	/* zero the hashes */
725 	ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
726 	ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
727 			OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
728 
729 	ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
730 	ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
731 			OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
732 
733 	ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
734 	ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
735 			OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
736 
737 	rc = ocs_hw_init_queues(hw, hw->qtop);
738 	if (rc != OCS_HW_RTN_SUCCESS) {
739 		return rc;
740 	}
741 
742 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
743 	i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
744 	if (i) {
745 		ocs_dma_t payload_memory;
746 
747 		rc = OCS_HW_RTN_ERROR;
748 
749 		if (hw->rnode_mem.size) {
750 			ocs_dma_free(hw->os, &hw->rnode_mem);
751 		}
752 
753 		if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
754 			ocs_log_err(hw->os, "remote node memory allocation fail\n");
755 			return OCS_HW_RTN_NO_MEMORY;
756 		}
757 
758 		payload_memory.size = 0;
759 		if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
760 					&hw->rnode_mem, UINT16_MAX, &payload_memory)) {
761 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
762 
763 			if (payload_memory.size != 0) {
764 				/* The command was non-embedded - need to free the dma buffer */
765 				ocs_dma_free(hw->os, &payload_memory);
766 			}
767 		}
768 
769 		if (rc != OCS_HW_RTN_SUCCESS) {
770 			ocs_log_err(hw->os, "header template registration failed\n");
771 			return rc;
772 		}
773 	}
774 
775 	/* Allocate and post RQ buffers */
776 	rc = ocs_hw_rx_allocate(hw);
777 	if (rc) {
778 		ocs_log_err(hw->os, "rx_allocate failed\n");
779 		return rc;
780 	}
781 
782 	/* Populate hw->seq_free_list */
783 	if (hw->seq_pool == NULL) {
784 		uint32_t count = 0;
785 		uint32_t i;
786 
787 		/* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
788 		for (i = 0; i < hw->hw_rq_count; i++) {
789 			count += hw->hw_rq[i]->entry_count;
790 		}
791 
792 		hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
793 		if (hw->seq_pool == NULL) {
794 			ocs_log_err(hw->os, "malloc seq_pool failed\n");
795 			return OCS_HW_RTN_NO_MEMORY;
796 		}
797 	}
798 
799 	if(ocs_hw_rx_post(hw)) {
800 		ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
801 	}
802 
803 	/* Allocate rpi_ref if not previously allocated */
804 	if (hw->rpi_ref == NULL) {
805 		hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
806 					  OCS_M_ZERO | OCS_M_NOWAIT);
807 		if (hw->rpi_ref == NULL) {
808 			ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
809 			return OCS_HW_RTN_NO_MEMORY;
810 		}
811 	}
812 
813 	for (i = 0; i < max_rpi; i ++) {
814 		ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
815 		ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
816 	}
817 
818 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
819 
820 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
821 	if (hw->workaround.override_fcfi) {
822 		hw->first_domain_idx = -1;
823 	}
824 
825 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
826 
827 	/* Register a FCFI to allow unsolicited frames to be routed to the driver */
828 	if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
829 		if (hw->hw_mrq_count) {
830 			ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
831 
832 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
833 			if (rc != OCS_HW_RTN_SUCCESS) {
834 				ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
835 				return rc;
836 			}
837 
838 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
839 			if (rc != OCS_HW_RTN_SUCCESS) {
840 				ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
841 				return rc;
842 			}
843 		} else {
844 			sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
845 
846 			ocs_log_debug(hw->os, "using REG_FCFI standard\n");
847 
848 			/* Set the filter match/mask values from hw's filter_def values */
849 			for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
850 				rq_cfg[i].rq_id = 0xffff;
851 				rq_cfg[i].r_ctl_mask =	(uint8_t)  hw->config.filter_def[i];
852 				rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
853 				rq_cfg[i].type_mask =	(uint8_t) (hw->config.filter_def[i] >> 16);
854 				rq_cfg[i].type_match =	(uint8_t) (hw->config.filter_def[i] >> 24);
855 			}
856 
857 			/*
858 			 * Update the rq_id's of the FCF configuration (don't update more than the number
859 			 * of rq_cfg elements)
860 			 */
861 			for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
862 				hw_rq_t *rq = hw->hw_rq[i];
863 				uint32_t j;
864 				for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
865 					uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
866 					if (mask & (1U << j)) {
867 						rq_cfg[j].rq_id = rq->hdr->id;
868 						ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
869 							j, hw->config.filter_def[j], i, rq->hdr->id);
870 					}
871 				}
872 			}
873 
874 			rc = OCS_HW_RTN_ERROR;
875 
876 			if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
877 				rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
878 			}
879 
880 			if (rc != OCS_HW_RTN_SUCCESS) {
881 				ocs_log_err(hw->os, "FCFI registration failed\n");
882 				return rc;
883 			}
884 			hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
885 		}
886 	}
887 
888 	/*
889 	 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
890 	 * thus the pool allocation size of 64k)
891 	 */
892 	rc = ocs_hw_reqtag_init(hw);
893 	if (rc) {
894 		ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
895 		return rc;
896 	}
897 
898 	rc = ocs_hw_setup_io(hw);
899 	if (rc) {
900 		ocs_log_err(hw->os, "IO allocation failure\n");
901 		return rc;
902 	}
903 
904 	rc = ocs_hw_init_io(hw);
905 	if (rc) {
906 		ocs_log_err(hw->os, "IO initialization failure\n");
907 		return rc;
908 	}
909 
910 	ocs_queue_history_init(hw->os, &hw->q_hist);
911 
912 	/* get hw link config; polling, so callback will be called immediately */
913 	hw->linkcfg = OCS_HW_LINKCFG_NA;
914 	ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
915 
916 	/* if lancer ethernet, ethernet ports need to be enabled */
917 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
918 	    (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
919 		if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
920 			/* log warning but continue */
921 			ocs_log_err(hw->os, "Failed to set ethernet license\n");
922 		}
923 	}
924 
925 	/* Set the DIF seed - only for lancer right now */
926 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
927 	    ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
928 		ocs_log_err(hw->os, "Failed to set DIF seed value\n");
929 		return rc;
930 	}
931 
932 	/* Set the DIF mode - skyhawk only */
933 	if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
934 	    sli_get_dif_capable(&hw->sli)) {
935 		rc = ocs_hw_set_dif_mode(hw);
936 		if (rc != OCS_HW_RTN_SUCCESS) {
937 			ocs_log_err(hw->os, "Failed to set DIF mode value\n");
938 			return rc;
939 		}
940 	}
941 
942 	/*
943 	 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
944 	 */
945 	for (i = 0; i < hw->eq_count; i++) {
946 		sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
947 	}
948 
949 	/*
950 	 * Initialize RQ hash
951 	 */
952 	for (i = 0; i < hw->rq_count; i++) {
953 		ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
954 	}
955 
956 	/*
957 	 * Initialize WQ hash
958 	 */
959 	for (i = 0; i < hw->wq_count; i++) {
960 		ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
961 	}
962 
963 	/*
964 	 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
965 	 */
966 	for (i = 0; i < hw->cq_count; i++) {
967 		ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
968 		sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
969 	}
970 
971 	/* record the fact that the queues are functional */
972 	hw->state = OCS_HW_STATE_ACTIVE;
973 
974 	/* Note: Must be after the IOs are setup and the state is active*/
975 	if (ocs_hw_rqpair_init(hw)) {
976 		ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
977 	}
978 
979 	/* finally kick off periodic timer to check for timed out target WQEs */
980 	if (hw->config.emulate_wqe_timeout) {
981 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
982 				OCS_HW_WQ_TIMER_PERIOD_MS);
983 	}
984 
985 	/*
986 	 * Allocate a HW IOs for send frame.  Allocate one for each Class 1 WQ, or if there
987 	 * are none of those, allocate one for WQ[0]
988 	 */
989 	if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
990 		for (i = 0; i < count; i++) {
991 			hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
992 			wq->send_frame_io = ocs_hw_io_alloc(hw);
993 			if (wq->send_frame_io == NULL) {
994 				ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
995 			}
996 		}
997 	} else {
998 		hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
999 		if (hw->hw_wq[0]->send_frame_io == NULL) {
1000 			ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
1001 		}
1002 	}
1003 
1004 	/* Initialize send frame frame sequence id */
1005 	ocs_atomic_init(&hw->send_frame_seq_id, 0);
1006 
1007 	/* Initialize watchdog timer if enabled by user */
1008 	hw->expiration_logged = 0;
1009 	if(hw->watchdog_timeout) {
1010 		if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
1011 			ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
1012 		}else if(!ocs_hw_config_watchdog_timer(hw)) {
1013 			ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1014 		}
1015 	}
1016 
1017 	if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1018 	   ocs_log_err(hw->os, "domain node memory allocation fail\n");
1019 	   return OCS_HW_RTN_NO_MEMORY;
1020 	}
1021 
1022 	if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1023 	   ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1024 	   return OCS_HW_RTN_NO_MEMORY;
1025 	}
1026 
1027 	if ((0 == hw->loop_map.size) &&	ocs_dma_alloc(hw->os, &hw->loop_map,
1028 				SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1029 		ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1030 	}
1031 
1032 	return OCS_HW_RTN_SUCCESS;
1033 }
1034 
1035 /**
1036  * @brief Configure Multi-RQ
1037  *
1038  * @param hw	Hardware context allocated by the caller.
1039  * @param mode	1 to set MRQ filters and 0 to set FCFI index
1040  * @param vlanid    valid in mode 0
1041  * @param fcf_index valid in mode 0
1042  *
1043  * @return Returns 0 on success, or a non-zero value on failure.
1044  */
1045 static int32_t
ocs_hw_config_mrq(ocs_hw_t * hw,uint8_t mode,uint16_t vlanid,uint16_t fcf_index)1046 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1047 {
1048 	uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1049 	hw_rq_t *rq;
1050 	sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1051 	uint32_t i, j;
1052 	sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1053 	int32_t rc;
1054 
1055 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1056 		goto issue_cmd;
1057 	}
1058 
1059 	/* Set the filter match/mask values from hw's filter_def values */
1060 	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1061 		rq_filter[i].rq_id = 0xffff;
1062 		rq_filter[i].r_ctl_mask  = (uint8_t)  hw->config.filter_def[i];
1063 		rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1064 		rq_filter[i].type_mask   = (uint8_t) (hw->config.filter_def[i] >> 16);
1065 		rq_filter[i].type_match  = (uint8_t) (hw->config.filter_def[i] >> 24);
1066 	}
1067 
1068 	/* Accumulate counts for each filter type used, build rq_ids[] list */
1069 	for (i = 0; i < hw->hw_rq_count; i++) {
1070 		rq = hw->hw_rq[i];
1071 		for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1072 			if (rq->filter_mask & (1U << j)) {
1073 				if (rq_filter[j].rq_id != 0xffff) {
1074 					/* Already used. Bailout ifts not RQset case */
1075 					if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1076 						ocs_log_err(hw->os, "Wrong queue topology.\n");
1077 						return OCS_HW_RTN_ERROR;
1078 					}
1079 					continue;
1080 				}
1081 
1082 				if (rq->is_mrq) {
1083 					rq_filter[j].rq_id = rq->base_mrq_id;
1084 					mrq_bitmask |= (1U << j);
1085 				} else {
1086 					rq_filter[j].rq_id = rq->hdr->id;
1087 				}
1088 			}
1089 		}
1090 	}
1091 
1092 issue_cmd:
1093 	/* Invoke REG_FCFI_MRQ */
1094 	rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1095 				 buf,					/* buf */
1096 				 SLI4_BMBX_SIZE,			/* size */
1097 				 mode,					/* mode 1 */
1098 				 fcf_index,				/* fcf_index */
1099 				 vlanid,				/* vlan_id */
1100 				 hw->config.rq_selection_policy,	/* RQ selection policy*/
1101 				 mrq_bitmask,				/* MRQ bitmask */
1102 				 hw->hw_mrq_count,			/* num_mrqs */
1103 				 rq_filter);				/* RQ filter */
1104 	if (rc == 0) {
1105 		ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1106 		return OCS_HW_RTN_ERROR;
1107 	}
1108 
1109 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1110 
1111 	rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1112 
1113 	if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1114 		ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1115 			    rsp->hdr.command, rsp->hdr.status);
1116 		return OCS_HW_RTN_ERROR;
1117 	}
1118 
1119 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1120 		hw->fcf_indicator = rsp->fcfi;
1121 	}
1122 	return 0;
1123 }
1124 
1125 /**
1126  * @brief Callback function for getting linkcfg during HW initialization.
1127  *
1128  * @param status Status of the linkcfg get operation.
1129  * @param value Link configuration enum to which the link configuration is set.
1130  * @param arg Callback argument (ocs_hw_t *).
1131  *
1132  * @return None.
1133  */
1134 static void
ocs_hw_init_linkcfg_cb(int32_t status,uintptr_t value,void * arg)1135 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1136 {
1137 	ocs_hw_t *hw = (ocs_hw_t *)arg;
1138 	if (status == 0) {
1139 		hw->linkcfg = (ocs_hw_linkcfg_e)value;
1140 	} else {
1141 		hw->linkcfg = OCS_HW_LINKCFG_NA;
1142 	}
1143 	ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1144 }
1145 
1146 /**
1147  * @ingroup devInitShutdown
1148  * @brief Tear down the Hardware Abstraction Layer module.
1149  *
1150  * @par Description
1151  * Frees memory structures needed by the device, and shuts down the device. Does
1152  * not free the HW context memory (which is done by the caller).
1153  *
1154  * @param hw Hardware context allocated by the caller.
1155  *
1156  * @return Returns 0 on success, or a non-zero value on failure.
1157  */
1158 ocs_hw_rtn_e
ocs_hw_teardown(ocs_hw_t * hw)1159 ocs_hw_teardown(ocs_hw_t *hw)
1160 {
1161 	uint32_t	i = 0;
1162 	uint32_t	iters = 10;/*XXX*/
1163 	uint32_t	max_rpi;
1164 	uint32_t destroy_queues;
1165 	uint32_t free_memory;
1166 
1167 	if (!hw) {
1168 		ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1169 		return OCS_HW_RTN_ERROR;
1170 	}
1171 
1172 	destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1173 	free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1174 
1175 	/* shutdown target wqe timer */
1176 	shutdown_target_wqe_timer(hw);
1177 
1178 	/* Cancel watchdog timer if enabled */
1179 	if(hw->watchdog_timeout) {
1180 		hw->watchdog_timeout = 0;
1181 		ocs_hw_config_watchdog_timer(hw);
1182 	}
1183 
1184 	/* Cancel Sliport Healthcheck */
1185 	if(hw->sliport_healthcheck) {
1186 		hw->sliport_healthcheck = 0;
1187 		ocs_hw_config_sli_port_health_check(hw, 0, 0);
1188 	}
1189 
1190 	if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1191 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1192 
1193 		ocs_hw_flush(hw);
1194 
1195 		/* If there are outstanding commands, wait for them to complete */
1196 		while (!ocs_list_empty(&hw->cmd_head) && iters) {
1197 			ocs_udelay(10000);
1198 			ocs_hw_flush(hw);
1199 			iters--;
1200 		}
1201 
1202 		if (ocs_list_empty(&hw->cmd_head)) {
1203 			ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1204 		} else {
1205 			ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1206 		}
1207 
1208 		/* Cancel any remaining commands */
1209 		ocs_hw_command_cancel(hw);
1210 	} else {
1211 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1212 	}
1213 
1214 	ocs_lock_free(&hw->cmd_lock);
1215 
1216 	/* Free unregistered RPI if workaround is in force */
1217 	if (hw->workaround.use_unregistered_rpi) {
1218 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1219 	}
1220 
1221 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1222 	if (hw->rpi_ref) {
1223 		for (i = 0; i < max_rpi; i++) {
1224 			if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1225 				ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1226 						i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1227 			}
1228 		}
1229 		ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1230 		hw->rpi_ref = NULL;
1231 	}
1232 
1233 	ocs_dma_free(hw->os, &hw->rnode_mem);
1234 
1235 	if (hw->io) {
1236 		for (i = 0; i < hw->config.n_io; i++) {
1237 			if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1238 			    (hw->io[i]->sgl->virt != NULL)) {
1239 				if(hw->io[i]->is_port_owned) {
1240 					ocs_lock_free(&hw->io[i]->axr_lock);
1241 				}
1242 				ocs_dma_free(hw->os, hw->io[i]->sgl);
1243 			}
1244 			ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1245 			hw->io[i] = NULL;
1246 		}
1247 		ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1248 		hw->wqe_buffs = NULL;
1249 		ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1250 		hw->io = NULL;
1251 	}
1252 
1253 	ocs_dma_free(hw->os, &hw->xfer_rdy);
1254 	ocs_dma_free(hw->os, &hw->dump_sges);
1255 	ocs_dma_free(hw->os, &hw->loop_map);
1256 
1257 	ocs_lock_free(&hw->io_lock);
1258 	ocs_lock_free(&hw->io_abort_lock);
1259 
1260 	for (i = 0; i < hw->wq_count; i++) {
1261 		sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1262 	}
1263 
1264 	for (i = 0; i < hw->rq_count; i++) {
1265 		sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1266 	}
1267 
1268 	for (i = 0; i < hw->mq_count; i++) {
1269 		sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1270 	}
1271 
1272 	for (i = 0; i < hw->cq_count; i++) {
1273 		sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1274 	}
1275 
1276 	for (i = 0; i < hw->eq_count; i++) {
1277 		sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1278 	}
1279 
1280 	ocs_hw_qtop_free(hw->qtop);
1281 
1282 	/* Free rq buffers */
1283 	ocs_hw_rx_free(hw);
1284 
1285 	hw_queue_teardown(hw);
1286 
1287 	ocs_hw_rqpair_teardown(hw);
1288 
1289 	if (sli_teardown(&hw->sli)) {
1290 		ocs_log_err(hw->os, "SLI teardown failed\n");
1291 	}
1292 
1293 	ocs_queue_history_free(&hw->q_hist);
1294 
1295 	/* record the fact that the queues are non-functional */
1296 	hw->state = OCS_HW_STATE_UNINITIALIZED;
1297 
1298 	/* free sequence free pool */
1299 	ocs_array_free(hw->seq_pool);
1300 	hw->seq_pool = NULL;
1301 
1302 	/* free hw_wq_callback pool */
1303 	ocs_pool_free(hw->wq_reqtag_pool);
1304 
1305 	ocs_dma_free(hw->os, &hw->domain_dmem);
1306 	ocs_dma_free(hw->os, &hw->fcf_dmem);
1307 	/* Mark HW setup as not having been called */
1308 	hw->hw_setup_called = FALSE;
1309 
1310 	return OCS_HW_RTN_SUCCESS;
1311 }
1312 
1313 ocs_hw_rtn_e
ocs_hw_reset(ocs_hw_t * hw,ocs_hw_reset_e reset)1314 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1315 {
1316 	uint32_t	i;
1317 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1318 	uint32_t	iters;
1319 	ocs_hw_state_e prev_state = hw->state;
1320 
1321 	if (hw->state != OCS_HW_STATE_ACTIVE) {
1322 		ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1323 	}
1324 
1325 	hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1326 
1327 	/* shutdown target wqe timer */
1328 	shutdown_target_wqe_timer(hw);
1329 
1330 	ocs_hw_flush(hw);
1331 
1332 	/*
1333 	 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1334 	 * then the FW will UE when the reset is issued. So attempt to complete
1335 	 * all mailbox commands.
1336 	 */
1337 	iters = 10;
1338 	while (!ocs_list_empty(&hw->cmd_head) && iters) {
1339 		ocs_udelay(10000);
1340 		ocs_hw_flush(hw);
1341 		iters--;
1342 	}
1343 
1344 	if (ocs_list_empty(&hw->cmd_head)) {
1345 		ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1346 	} else {
1347 		ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1348 	}
1349 
1350 	/* Reset the chip */
1351 	switch(reset) {
1352 	case OCS_HW_RESET_FUNCTION:
1353 		ocs_log_debug(hw->os, "issuing function level reset\n");
1354 		if (sli_reset(&hw->sli)) {
1355 			ocs_log_err(hw->os, "sli_reset failed\n");
1356 			rc = OCS_HW_RTN_ERROR;
1357 		}
1358 		break;
1359 	case OCS_HW_RESET_FIRMWARE:
1360 		ocs_log_debug(hw->os, "issuing firmware reset\n");
1361 		if (sli_fw_reset(&hw->sli)) {
1362 			ocs_log_err(hw->os, "sli_soft_reset failed\n");
1363 			rc = OCS_HW_RTN_ERROR;
1364 		}
1365 		/*
1366 		 * Because the FW reset leaves the FW in a non-running state,
1367 		 * follow that with a regular reset.
1368 		 */
1369 		ocs_log_debug(hw->os, "issuing function level reset\n");
1370 		if (sli_reset(&hw->sli)) {
1371 			ocs_log_err(hw->os, "sli_reset failed\n");
1372 			rc = OCS_HW_RTN_ERROR;
1373 		}
1374 		break;
1375 	default:
1376 		ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1377 		hw->state = prev_state;
1378 		return OCS_HW_RTN_ERROR;
1379 	}
1380 
1381 	/* Not safe to walk command/io lists unless they've been initialized */
1382 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1383 		ocs_hw_command_cancel(hw);
1384 
1385 		/* Clean up the inuse list, the free list and the wait free list */
1386 		ocs_hw_io_cancel(hw);
1387 
1388 		ocs_memset(hw->domains, 0, sizeof(hw->domains));
1389 		ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1390 
1391 		ocs_hw_link_event_init(hw);
1392 
1393 		ocs_lock(&hw->io_lock);
1394 			/* The io lists should be empty, but remove any that didn't get cleaned up. */
1395 			while (!ocs_list_empty(&hw->io_timed_wqe)) {
1396 				ocs_list_remove_head(&hw->io_timed_wqe);
1397 			}
1398 			/* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1399 
1400 			while (!ocs_list_empty(&hw->io_free)) {
1401 				ocs_list_remove_head(&hw->io_free);
1402 			}
1403 			while (!ocs_list_empty(&hw->io_wait_free)) {
1404 				ocs_list_remove_head(&hw->io_wait_free);
1405 			}
1406 
1407 			/* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1408 			ocs_hw_reqtag_reset(hw);
1409 
1410 		ocs_unlock(&hw->io_lock);
1411 	}
1412 
1413 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1414 		for (i = 0; i < hw->wq_count; i++) {
1415 			sli_queue_reset(&hw->sli, &hw->wq[i]);
1416 		}
1417 
1418 		for (i = 0; i < hw->rq_count; i++) {
1419 			sli_queue_reset(&hw->sli, &hw->rq[i]);
1420 		}
1421 
1422 		for (i = 0; i < hw->hw_rq_count; i++) {
1423 			hw_rq_t *rq = hw->hw_rq[i];
1424 			if (rq->rq_tracker != NULL) {
1425 				uint32_t j;
1426 
1427 				for (j = 0; j < rq->entry_count; j++) {
1428 					rq->rq_tracker[j] = NULL;
1429 				}
1430 			}
1431 		}
1432 
1433 		for (i = 0; i < hw->mq_count; i++) {
1434 			sli_queue_reset(&hw->sli, &hw->mq[i]);
1435 		}
1436 
1437 		for (i = 0; i < hw->cq_count; i++) {
1438 			sli_queue_reset(&hw->sli, &hw->cq[i]);
1439 		}
1440 
1441 		for (i = 0; i < hw->eq_count; i++) {
1442 			sli_queue_reset(&hw->sli, &hw->eq[i]);
1443 		}
1444 
1445 		/* Free rq buffers */
1446 		ocs_hw_rx_free(hw);
1447 
1448 		/* Teardown the HW queue topology */
1449 		hw_queue_teardown(hw);
1450 	} else {
1451 		/* Free rq buffers */
1452 		ocs_hw_rx_free(hw);
1453 	}
1454 
1455 	/*
1456 	 * Re-apply the run-time workarounds after clearing the SLI config
1457 	 * fields in sli_reset.
1458 	 */
1459 	ocs_hw_workaround_setup(hw);
1460 	hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1461 
1462 	return rc;
1463 }
1464 
1465 int32_t
ocs_hw_get_num_eq(ocs_hw_t * hw)1466 ocs_hw_get_num_eq(ocs_hw_t *hw)
1467 {
1468 	return hw->eq_count;
1469 }
1470 
1471 static int32_t
ocs_hw_get_fw_timed_out(ocs_hw_t * hw)1472 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1473 {
1474 	/* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1475 	* No further explanation is given in the document.
1476 	* */
1477 	return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1478 		sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1479 }
1480 
1481 ocs_hw_rtn_e
ocs_hw_get(ocs_hw_t * hw,ocs_hw_property_e prop,uint32_t * value)1482 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1483 {
1484 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1485 	int32_t			tmp;
1486 
1487 	if (!value) {
1488 		return OCS_HW_RTN_ERROR;
1489 	}
1490 
1491 	*value = 0;
1492 
1493 	switch (prop) {
1494 	case OCS_HW_N_IO:
1495 		*value = hw->config.n_io;
1496 		break;
1497 	case OCS_HW_N_SGL:
1498 		*value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1499 		break;
1500 	case OCS_HW_MAX_IO:
1501 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1502 		break;
1503 	case OCS_HW_MAX_NODES:
1504 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1505 		break;
1506 	case OCS_HW_MAX_RQ_ENTRIES:
1507 		*value = hw->num_qentries[SLI_QTYPE_RQ];
1508 		break;
1509 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1510 		*value = hw->config.rq_default_buffer_size;
1511 		break;
1512 	case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1513 		*value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1514 		break;
1515 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1516 		*value = hw->config.auto_xfer_rdy_xri_cnt;
1517 		break;
1518 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1519 		*value = hw->config.auto_xfer_rdy_size;
1520 		break;
1521 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1522 		switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1523 		case 0:
1524 			*value = 512;
1525 			break;
1526 		case 1:
1527 			*value = 1024;
1528 			break;
1529 		case 2:
1530 			*value = 2048;
1531 			break;
1532 		case 3:
1533 			*value = 4096;
1534 			break;
1535 		case 4:
1536 			*value = 520;
1537 			break;
1538 		default:
1539 			*value = 0;
1540 			rc = OCS_HW_RTN_ERROR;
1541 			break;
1542 		}
1543 		break;
1544 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1545 		*value = hw->config.auto_xfer_rdy_t10_enable;
1546 		break;
1547 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1548 		*value = hw->config.auto_xfer_rdy_p_type;
1549 		break;
1550 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1551 		*value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1552 		break;
1553 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1554 		*value = hw->config.auto_xfer_rdy_app_tag_valid;
1555 		break;
1556 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1557 		*value = hw->config.auto_xfer_rdy_app_tag_value;
1558 		break;
1559 	case OCS_HW_MAX_SGE:
1560 		*value = sli_get_max_sge(&hw->sli);
1561 		break;
1562 	case OCS_HW_MAX_SGL:
1563 		*value = sli_get_max_sgl(&hw->sli);
1564 		break;
1565 	case OCS_HW_TOPOLOGY:
1566 		/*
1567 		 * Infer link.status based on link.speed.
1568 		 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1569 		 */
1570 		if (hw->link.speed == 0) {
1571 			*value = OCS_HW_TOPOLOGY_NONE;
1572 			break;
1573 		}
1574 		switch (hw->link.topology) {
1575 		case SLI_LINK_TOPO_NPORT:
1576 			*value = OCS_HW_TOPOLOGY_NPORT;
1577 			break;
1578 		case SLI_LINK_TOPO_LOOP:
1579 			*value = OCS_HW_TOPOLOGY_LOOP;
1580 			break;
1581 		case SLI_LINK_TOPO_NONE:
1582 			*value = OCS_HW_TOPOLOGY_NONE;
1583 			break;
1584 		default:
1585 			ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1586 			rc = OCS_HW_RTN_ERROR;
1587 			break;
1588 		}
1589 		break;
1590 	case OCS_HW_CONFIG_TOPOLOGY:
1591 		*value = hw->config.topology;
1592 		break;
1593 	case OCS_HW_LINK_SPEED:
1594 		*value = hw->link.speed;
1595 		break;
1596 	case OCS_HW_LINK_CONFIG_SPEED:
1597 		switch (hw->config.speed) {
1598 		case FC_LINK_SPEED_10G:
1599 			*value = 10000;
1600 			break;
1601 		case FC_LINK_SPEED_AUTO_16_8_4:
1602 			*value = 0;
1603 			break;
1604 		case FC_LINK_SPEED_2G:
1605 			*value = 2000;
1606 			break;
1607 		case FC_LINK_SPEED_4G:
1608 			*value = 4000;
1609 			break;
1610 		case FC_LINK_SPEED_8G:
1611 			*value = 8000;
1612 			break;
1613 		case FC_LINK_SPEED_16G:
1614 			*value = 16000;
1615 			break;
1616 		case FC_LINK_SPEED_32G:
1617 			*value = 32000;
1618 			break;
1619 		default:
1620 			ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1621 			rc = OCS_HW_RTN_ERROR;
1622 			break;
1623 		}
1624 		break;
1625 	case OCS_HW_IF_TYPE:
1626 		*value = sli_get_if_type(&hw->sli);
1627 		break;
1628 	case OCS_HW_SLI_REV:
1629 		*value = sli_get_sli_rev(&hw->sli);
1630 		break;
1631 	case OCS_HW_SLI_FAMILY:
1632 		*value = sli_get_sli_family(&hw->sli);
1633 		break;
1634 	case OCS_HW_DIF_CAPABLE:
1635 		*value = sli_get_dif_capable(&hw->sli);
1636 		break;
1637 	case OCS_HW_DIF_SEED:
1638 		*value = hw->config.dif_seed;
1639 		break;
1640 	case OCS_HW_DIF_MODE:
1641 		*value = hw->config.dif_mode;
1642 		break;
1643 	case OCS_HW_DIF_MULTI_SEPARATE:
1644 		/* Lancer supports multiple DIF separates */
1645 		if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1646 			*value = TRUE;
1647 		} else {
1648 			*value = FALSE;
1649 		}
1650 		break;
1651 	case OCS_HW_DUMP_MAX_SIZE:
1652 		*value = hw->dump_size;
1653 		break;
1654 	case OCS_HW_DUMP_READY:
1655 		*value = sli_dump_is_ready(&hw->sli);
1656 		break;
1657 	case OCS_HW_DUMP_PRESENT:
1658 		*value = sli_dump_is_present(&hw->sli);
1659 		break;
1660 	case OCS_HW_RESET_REQUIRED:
1661 		tmp = sli_reset_required(&hw->sli);
1662 		if(tmp < 0) {
1663 			rc = OCS_HW_RTN_ERROR;
1664 		} else {
1665 			*value = tmp;
1666 		}
1667 		break;
1668 	case OCS_HW_FW_ERROR:
1669 		*value = sli_fw_error_status(&hw->sli);
1670 		break;
1671 	case OCS_HW_FW_READY:
1672 		*value = sli_fw_ready(&hw->sli);
1673 		break;
1674 	case OCS_HW_FW_TIMED_OUT:
1675 		*value = ocs_hw_get_fw_timed_out(hw);
1676 		break;
1677 	case OCS_HW_HIGH_LOGIN_MODE:
1678 		*value = sli_get_hlm_capable(&hw->sli);
1679 		break;
1680 	case OCS_HW_PREREGISTER_SGL:
1681 		*value = sli_get_sgl_preregister_required(&hw->sli);
1682 		break;
1683 	case OCS_HW_HW_REV1:
1684 		*value = sli_get_hw_revision(&hw->sli, 0);
1685 		break;
1686 	case OCS_HW_HW_REV2:
1687 		*value = sli_get_hw_revision(&hw->sli, 1);
1688 		break;
1689 	case OCS_HW_HW_REV3:
1690 		*value = sli_get_hw_revision(&hw->sli, 2);
1691 		break;
1692 	case OCS_HW_LINKCFG:
1693 		*value = hw->linkcfg;
1694 		break;
1695 	case OCS_HW_ETH_LICENSE:
1696 		*value = hw->eth_license;
1697 		break;
1698 	case OCS_HW_LINK_MODULE_TYPE:
1699 		*value = sli_get_link_module_type(&hw->sli);
1700 		break;
1701 	case OCS_HW_NUM_CHUTES:
1702 		*value = ocs_hw_get_num_chutes(hw);
1703 		break;
1704 	case OCS_HW_DISABLE_AR_TGT_DIF:
1705 		*value = hw->workaround.disable_ar_tgt_dif;
1706 		break;
1707 	case OCS_HW_EMULATE_I_ONLY_AAB:
1708 		*value = hw->config.i_only_aab;
1709 		break;
1710 	case OCS_HW_EMULATE_WQE_TIMEOUT:
1711 		*value = hw->config.emulate_wqe_timeout;
1712 		break;
1713 	case OCS_HW_VPD_LEN:
1714 		*value = sli_get_vpd_len(&hw->sli);
1715 		break;
1716 	case OCS_HW_SGL_CHAINING_CAPABLE:
1717 		*value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1718 		break;
1719 	case OCS_HW_SGL_CHAINING_ALLOWED:
1720 		/*
1721 		 * SGL Chaining is allowed in the following cases:
1722 		 *   1. Lancer with host SGL Lists
1723 		 *   2. Skyhawk with pre-registered SGL Lists
1724 		 */
1725 		*value = FALSE;
1726 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1727 		    !sli_get_sgl_preregister(&hw->sli) &&
1728 		    SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)) {
1729 			*value = TRUE;
1730 		}
1731 
1732 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1733 		    sli_get_sgl_preregister(&hw->sli) &&
1734 		    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1735 			(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1736 			*value = TRUE;
1737 		}
1738 		break;
1739 	case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1740 		/* Only lancer supports host allocated SGL Chaining buffers. */
1741 		*value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1742 			  (SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)));
1743 		break;
1744 	case OCS_HW_SEND_FRAME_CAPABLE:
1745 		if (hw->workaround.ignore_send_frame) {
1746 			*value = 0;
1747 		} else {
1748 			/* Only lancer is capable */
1749 			*value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1750 		}
1751 		break;
1752 	case OCS_HW_RQ_SELECTION_POLICY:
1753 		*value = hw->config.rq_selection_policy;
1754 		break;
1755 	case OCS_HW_RR_QUANTA:
1756 		*value = hw->config.rr_quanta;
1757 		break;
1758 	case OCS_HW_MAX_VPORTS:
1759 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1760 		break;
1761 	default:
1762 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1763 		rc = OCS_HW_RTN_ERROR;
1764 	}
1765 
1766 	return rc;
1767 }
1768 
1769 void *
ocs_hw_get_ptr(ocs_hw_t * hw,ocs_hw_property_e prop)1770 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1771 {
1772 	void	*rc = NULL;
1773 
1774 	switch (prop) {
1775 	case OCS_HW_WWN_NODE:
1776 		rc = sli_get_wwn_node(&hw->sli);
1777 		break;
1778 	case OCS_HW_WWN_PORT:
1779 		rc = sli_get_wwn_port(&hw->sli);
1780 		break;
1781 	case OCS_HW_VPD:
1782 		/* make sure VPD length is non-zero */
1783 		if (sli_get_vpd_len(&hw->sli)) {
1784 			rc = sli_get_vpd(&hw->sli);
1785 		}
1786 		break;
1787 	case OCS_HW_FW_REV:
1788 		rc = sli_get_fw_name(&hw->sli, 0);
1789 		break;
1790 	case OCS_HW_FW_REV2:
1791 		rc = sli_get_fw_name(&hw->sli, 1);
1792 		break;
1793 	case OCS_HW_IPL:
1794 		rc = sli_get_ipl_name(&hw->sli);
1795 		break;
1796 	case OCS_HW_PORTNUM:
1797 		rc = sli_get_portnum(&hw->sli);
1798 		break;
1799 	case OCS_HW_BIOS_VERSION_STRING:
1800 		rc = sli_get_bios_version_string(&hw->sli);
1801 		break;
1802 	default:
1803 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1804 	}
1805 
1806 	return rc;
1807 }
1808 
1809 ocs_hw_rtn_e
ocs_hw_set(ocs_hw_t * hw,ocs_hw_property_e prop,uint32_t value)1810 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1811 {
1812 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1813 
1814 	switch (prop) {
1815 	case OCS_HW_N_IO:
1816 		if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1817 		    value == 0) {
1818 			ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1819 					value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1820 			rc = OCS_HW_RTN_ERROR;
1821 		} else {
1822 			hw->config.n_io = value;
1823 		}
1824 		break;
1825 	case OCS_HW_N_SGL:
1826 		value += SLI4_SGE_MAX_RESERVED;
1827 		if (value > sli_get_max_sgl(&hw->sli)) {
1828 			ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1829 					value, sli_get_max_sgl(&hw->sli));
1830 			rc = OCS_HW_RTN_ERROR;
1831 		} else {
1832 			hw->config.n_sgl = value;
1833 		}
1834 		break;
1835 	case OCS_HW_TOPOLOGY:
1836 		if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1837 				(value != OCS_HW_TOPOLOGY_AUTO)) {
1838 			ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1839 					value, sli_get_medium(&hw->sli));
1840 			rc = OCS_HW_RTN_ERROR;
1841 			break;
1842 		}
1843 
1844 		switch (value) {
1845 		case OCS_HW_TOPOLOGY_AUTO:
1846 			if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1847 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1848 			} else {
1849 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1850 			}
1851 			break;
1852 		case OCS_HW_TOPOLOGY_NPORT:
1853 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1854 			break;
1855 		case OCS_HW_TOPOLOGY_LOOP:
1856 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1857 			break;
1858 		default:
1859 			ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1860 			rc = OCS_HW_RTN_ERROR;
1861 		}
1862 		hw->config.topology = value;
1863 		break;
1864 	case OCS_HW_LINK_SPEED:
1865 		if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1866 			switch (value) {
1867 			case 0: 	/* Auto-speed negotiation */
1868 			case 10000:	/* FCoE speed */
1869 				hw->config.speed = FC_LINK_SPEED_10G;
1870 				break;
1871 			default:
1872 				ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1873 						value, sli_get_medium(&hw->sli));
1874 				rc = OCS_HW_RTN_ERROR;
1875 			}
1876 			break;
1877 		}
1878 
1879 		switch (value) {
1880 		case 0:		/* Auto-speed negotiation */
1881 			hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1882 			break;
1883 		case 2000:	/* FC speeds */
1884 			hw->config.speed = FC_LINK_SPEED_2G;
1885 			break;
1886 		case 4000:
1887 			hw->config.speed = FC_LINK_SPEED_4G;
1888 			break;
1889 		case 8000:
1890 			hw->config.speed = FC_LINK_SPEED_8G;
1891 			break;
1892 		case 16000:
1893 			hw->config.speed = FC_LINK_SPEED_16G;
1894 			break;
1895 		case 32000:
1896 			hw->config.speed = FC_LINK_SPEED_32G;
1897 			break;
1898 		default:
1899 			ocs_log_test(hw->os, "unsupported speed %d\n", value);
1900 			rc = OCS_HW_RTN_ERROR;
1901 		}
1902 		break;
1903 	case OCS_HW_DIF_SEED:
1904 		/* Set the DIF seed - only for lancer right now */
1905 		if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1906 			ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1907 			rc = OCS_HW_RTN_ERROR;
1908 		} else {
1909 			hw->config.dif_seed = value;
1910 		}
1911 		break;
1912 	case OCS_HW_DIF_MODE:
1913 		switch (value) {
1914 		case OCS_HW_DIF_MODE_INLINE:
1915 			/*
1916 			 *  Make sure we support inline DIF.
1917 			 *
1918 			 * Note: Having both bits clear means that we have old
1919 			 *	FW that doesn't set the bits.
1920 			 */
1921 			if (sli_is_dif_inline_capable(&hw->sli)) {
1922 				hw->config.dif_mode = value;
1923 			} else {
1924 				ocs_log_test(hw->os, "chip does not support DIF inline\n");
1925 				rc = OCS_HW_RTN_ERROR;
1926 			}
1927 			break;
1928 		case OCS_HW_DIF_MODE_SEPARATE:
1929 			/* Make sure we support DIF separates. */
1930 			if (sli_is_dif_separate_capable(&hw->sli)) {
1931 				hw->config.dif_mode = value;
1932 			} else {
1933 				ocs_log_test(hw->os, "chip does not support DIF separate\n");
1934 				rc = OCS_HW_RTN_ERROR;
1935 			}
1936 		}
1937 		break;
1938 	case OCS_HW_RQ_PROCESS_LIMIT: {
1939 		hw_rq_t *rq;
1940 		uint32_t i;
1941 
1942 		/* For each hw_rq object, set its parent CQ limit value */
1943 		for (i = 0; i < hw->hw_rq_count; i++) {
1944 			rq = hw->hw_rq[i];
1945 			hw->cq[rq->cq->instance].proc_limit = value;
1946 		}
1947 		break;
1948 	}
1949 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1950 		hw->config.rq_default_buffer_size = value;
1951 		break;
1952 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1953 		hw->config.auto_xfer_rdy_xri_cnt = value;
1954 		break;
1955 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1956 		hw->config.auto_xfer_rdy_size = value;
1957 		break;
1958 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1959 		switch (value) {
1960 		case 512:
1961 			hw->config.auto_xfer_rdy_blk_size_chip = 0;
1962 			break;
1963 		case 1024:
1964 			hw->config.auto_xfer_rdy_blk_size_chip = 1;
1965 			break;
1966 		case 2048:
1967 			hw->config.auto_xfer_rdy_blk_size_chip = 2;
1968 			break;
1969 		case 4096:
1970 			hw->config.auto_xfer_rdy_blk_size_chip = 3;
1971 			break;
1972 		case 520:
1973 			hw->config.auto_xfer_rdy_blk_size_chip = 4;
1974 			break;
1975 		default:
1976 			ocs_log_err(hw->os, "Invalid block size %d\n",
1977 				    value);
1978 			rc = OCS_HW_RTN_ERROR;
1979 		}
1980 		break;
1981 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1982 		hw->config.auto_xfer_rdy_t10_enable = value;
1983 		break;
1984 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1985 		hw->config.auto_xfer_rdy_p_type = value;
1986 		break;
1987 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1988 		hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1989 		break;
1990 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1991 		hw->config.auto_xfer_rdy_app_tag_valid = value;
1992 		break;
1993 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1994 		hw->config.auto_xfer_rdy_app_tag_value = value;
1995 		break;
1996 	case OCS_ESOC:
1997 		hw->config.esoc = value;
1998 		break;
1999 	case OCS_HW_HIGH_LOGIN_MODE:
2000 		rc = sli_set_hlm(&hw->sli, value);
2001 		break;
2002 	case OCS_HW_PREREGISTER_SGL:
2003 		rc = sli_set_sgl_preregister(&hw->sli, value);
2004 		break;
2005 	case OCS_HW_ETH_LICENSE:
2006 		hw->eth_license = value;
2007 		break;
2008 	case OCS_HW_EMULATE_I_ONLY_AAB:
2009 		hw->config.i_only_aab = value;
2010 		break;
2011 	case OCS_HW_EMULATE_WQE_TIMEOUT:
2012 		hw->config.emulate_wqe_timeout = value;
2013 		break;
2014 	case OCS_HW_BOUNCE:
2015 		hw->config.bounce = value;
2016 		break;
2017 	case OCS_HW_RQ_SELECTION_POLICY:
2018 		hw->config.rq_selection_policy = value;
2019 		break;
2020 	case OCS_HW_RR_QUANTA:
2021 		hw->config.rr_quanta = value;
2022 		break;
2023 	default:
2024 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2025 		rc = OCS_HW_RTN_ERROR;
2026 	}
2027 
2028 	return rc;
2029 }
2030 
2031 ocs_hw_rtn_e
ocs_hw_set_ptr(ocs_hw_t * hw,ocs_hw_property_e prop,void * value)2032 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2033 {
2034 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2035 
2036 	switch (prop) {
2037 	case OCS_HW_WAR_VERSION:
2038 		hw->hw_war_version = value;
2039 		break;
2040 	case OCS_HW_FILTER_DEF: {
2041 		char *p = value;
2042 		uint32_t idx = 0;
2043 
2044 		for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2045 			hw->config.filter_def[idx] = 0;
2046 		}
2047 
2048 		for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2049 			hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2050 			p = ocs_strchr(p, ',');
2051 			if (p != NULL) {
2052 				p++;
2053 			}
2054 		}
2055 
2056 		break;
2057 	}
2058 	default:
2059 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2060 		rc = OCS_HW_RTN_ERROR;
2061 		break;
2062 	}
2063 	return rc;
2064 }
2065 /**
2066  * @ingroup interrupt
2067  * @brief Check for the events associated with the interrupt vector.
2068  *
2069  * @param hw Hardware context.
2070  * @param vector Zero-based interrupt vector number.
2071  *
2072  * @return Returns 0 on success, or a non-zero value on failure.
2073  */
2074 int32_t
ocs_hw_event_check(ocs_hw_t * hw,uint32_t vector)2075 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2076 {
2077 	int32_t rc = 0;
2078 
2079 	if (!hw) {
2080 		ocs_log_err(NULL, "HW context NULL?!?\n");
2081 		return -1;
2082 	}
2083 
2084 	if (vector > hw->eq_count) {
2085 		ocs_log_err(hw->os, "vector %d. max %d\n",
2086 				vector, hw->eq_count);
2087 		return -1;
2088 	}
2089 
2090 	/*
2091 	 * The caller should disable interrupts if they wish to prevent us
2092 	 * from processing during a shutdown. The following states are defined:
2093 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2094 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2095 	 *                                    queues are cleared.
2096 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2097 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2098 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2099 	 *                                        completions.
2100 	 */
2101 	if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2102 		rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2103 
2104 		/* Re-arm queue if there are no entries */
2105 		if (rc != 0) {
2106 			sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2107 		}
2108 	}
2109 	return rc;
2110 }
2111 
2112 void
ocs_hw_unsol_process_bounce(void * arg)2113 ocs_hw_unsol_process_bounce(void *arg)
2114 {
2115 	ocs_hw_sequence_t *seq = arg;
2116 	ocs_hw_t *hw = seq->hw;
2117 
2118 	ocs_hw_assert(hw != NULL);
2119 	ocs_hw_assert(hw->callback.unsolicited != NULL);
2120 
2121 	hw->callback.unsolicited(hw->args.unsolicited, seq);
2122 }
2123 
2124 int32_t
ocs_hw_process(ocs_hw_t * hw,uint32_t vector,uint32_t max_isr_time_msec)2125 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2126 {
2127 	hw_eq_t *eq;
2128 	int32_t rc = 0;
2129 
2130 	CPUTRACE("");
2131 
2132 	/*
2133 	 * The caller should disable interrupts if they wish to prevent us
2134 	 * from processing during a shutdown. The following states are defined:
2135 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2136 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2137 	 *                                    queues are cleared.
2138 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2139 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2140 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2141 	 *                                        completions.
2142 	 */
2143 	if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2144 		return 0;
2145 	}
2146 
2147 	/* Get pointer to hw_eq_t */
2148 	eq = hw->hw_eq[vector];
2149 
2150 	OCS_STAT(eq->use_count++);
2151 
2152 	rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2153 
2154 	return rc;
2155 }
2156 
2157 /**
2158  * @ingroup interrupt
2159  * @brief Process events associated with an EQ.
2160  *
2161  * @par Description
2162  * Loop termination:
2163  * @n @n Without a mechanism to terminate the completion processing loop, it
2164  * is possible under some workload conditions for the loop to never terminate
2165  * (or at least take longer than the OS is happy to have an interrupt handler
2166  * or kernel thread context hold a CPU without yielding).
2167  * @n @n The approach taken here is to periodically check how much time
2168  * we have been in this
2169  * processing loop, and if we exceed a predetermined time (multiple seconds), the
2170  * loop is terminated, and ocs_hw_process() returns.
2171  *
2172  * @param hw Hardware context.
2173  * @param eq Pointer to HW EQ object.
2174  * @param max_isr_time_msec Maximum time in msec to stay in this function.
2175  *
2176  * @return Returns 0 on success, or a non-zero value on failure.
2177  */
2178 int32_t
ocs_hw_eq_process(ocs_hw_t * hw,hw_eq_t * eq,uint32_t max_isr_time_msec)2179 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2180 {
2181 	uint8_t		eqe[sizeof(sli4_eqe_t)] = { 0 };
2182 	uint32_t	done = FALSE;
2183 	uint32_t	tcheck_count;
2184 	time_t		tstart;
2185 	time_t		telapsed;
2186 
2187 	tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2188 	tstart = ocs_msectime();
2189 
2190 	CPUTRACE("");
2191 
2192 	while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2193 		uint16_t	cq_id = 0;
2194 		int32_t		rc;
2195 
2196 		rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2197 		if (unlikely(rc)) {
2198 			if (rc > 0) {
2199 				uint32_t i;
2200 
2201 				/*
2202 				 * Received a sentinel EQE indicating the EQ is full.
2203 				 * Process all CQs
2204 				 */
2205 				for (i = 0; i < hw->cq_count; i++) {
2206 					ocs_hw_cq_process(hw, hw->hw_cq[i]);
2207 				}
2208 				continue;
2209 			} else {
2210 				return rc;
2211 			}
2212 		} else {
2213 			int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2214 			if (likely(index >= 0)) {
2215 				ocs_hw_cq_process(hw, hw->hw_cq[index]);
2216 			} else {
2217 				ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2218 			}
2219 		}
2220 
2221 		if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2222 			sli_queue_arm(&hw->sli, eq->queue, FALSE);
2223 		}
2224 
2225 		if (tcheck_count && (--tcheck_count == 0)) {
2226 			tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2227 			telapsed = ocs_msectime() - tstart;
2228 			if (telapsed >= max_isr_time_msec) {
2229 				done = TRUE;
2230 			}
2231 		}
2232 	}
2233 	sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2234 
2235 	return 0;
2236 }
2237 
2238 /**
2239  * @brief Submit queued (pending) mbx commands.
2240  *
2241  * @par Description
2242  * Submit queued mailbox commands.
2243  * --- Assumes that hw->cmd_lock is held ---
2244  *
2245  * @param hw Hardware context.
2246  *
2247  * @return Returns 0 on success, or a negative error code value on failure.
2248  */
2249 static int32_t
ocs_hw_cmd_submit_pending(ocs_hw_t * hw)2250 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2251 {
2252 	ocs_command_ctx_t *ctx;
2253 	int32_t rc = 0;
2254 
2255 	/* Assumes lock held */
2256 
2257 	/* Only submit MQE if there's room */
2258 	while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2259 		ctx = ocs_list_remove_head(&hw->cmd_pending);
2260 		if (ctx == NULL) {
2261 			break;
2262 		}
2263 		ocs_list_add_tail(&hw->cmd_head, ctx);
2264 		hw->cmd_head_count++;
2265 		if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2266 			ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2267 			rc = -1;
2268 			break;
2269 		}
2270 	}
2271 	return rc;
2272 }
2273 
2274 /**
2275  * @ingroup io
2276  * @brief Issue a SLI command.
2277  *
2278  * @par Description
2279  * Send a mailbox command to the hardware, and either wait for a completion
2280  * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2281  *
2282  * @param hw Hardware context.
2283  * @param cmd Buffer containing a formatted command and results.
2284  * @param opts Command options:
2285  *  - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2286  *  - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2287  * @param cb Function callback used for asynchronous mode. May be NULL.
2288  * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2289  * @n @n @b Note: If the
2290  * callback function pointer is NULL, the results of the command are silently
2291  * discarded, allowing this pointer to exist solely on the stack.
2292  * @param arg Argument passed to an asynchronous callback.
2293  *
2294  * @return Returns 0 on success, or a non-zero value on failure.
2295  */
2296 ocs_hw_rtn_e
ocs_hw_command(ocs_hw_t * hw,uint8_t * cmd,uint32_t opts,void * cb,void * arg)2297 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2298 {
2299 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2300 
2301 	/*
2302 	 * If the chip is in an error state (UE'd) then reject this mailbox
2303 	 *  command.
2304 	 */
2305 	if (sli_fw_error_status(&hw->sli) > 0) {
2306 		uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2307 		uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2308 		if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2309 			hw->expiration_logged = 1;
2310 			ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2311 					hw->watchdog_timeout);
2312 		}
2313 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2314 		ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2315 			sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2316 			err1, err2);
2317 
2318 		return OCS_HW_RTN_ERROR;
2319 	}
2320 
2321 	if (OCS_CMD_POLL == opts) {
2322 		ocs_lock(&hw->cmd_lock);
2323 		if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2324 			/*
2325 			 * Can't issue Boot-strap mailbox command with other
2326 			 * mail-queue commands pending as this interaction is
2327 			 * undefined
2328 			 */
2329 			rc = OCS_HW_RTN_ERROR;
2330 		} else {
2331 			void *bmbx = hw->sli.bmbx.virt;
2332 
2333 			ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2334 			ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2335 
2336 			if (sli_bmbx_command(&hw->sli) == 0) {
2337 				rc = OCS_HW_RTN_SUCCESS;
2338 				ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2339 			}
2340 		}
2341 		ocs_unlock(&hw->cmd_lock);
2342 	} else if (OCS_CMD_NOWAIT == opts) {
2343 		ocs_command_ctx_t	*ctx = NULL;
2344 
2345 		ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2346 		if (!ctx) {
2347 			ocs_log_err(hw->os, "can't allocate command context\n");
2348 			return OCS_HW_RTN_NO_RESOURCES;
2349 		}
2350 
2351 		if (hw->state != OCS_HW_STATE_ACTIVE) {
2352 			ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2353 			ocs_free(hw->os, ctx, sizeof(*ctx));
2354 			return OCS_HW_RTN_ERROR;
2355 		}
2356 
2357 		if (cb) {
2358 			ctx->cb = cb;
2359 			ctx->arg = arg;
2360 		}
2361 		ctx->buf = cmd;
2362 		ctx->ctx = hw;
2363 
2364 		ocs_lock(&hw->cmd_lock);
2365 
2366 			/* Add to pending list */
2367 			ocs_list_add_tail(&hw->cmd_pending, ctx);
2368 
2369 			/* Submit as much of the pending list as we can */
2370 			if (ocs_hw_cmd_submit_pending(hw) == 0) {
2371 				rc = OCS_HW_RTN_SUCCESS;
2372 			}
2373 
2374 		ocs_unlock(&hw->cmd_lock);
2375 	}
2376 
2377 	return rc;
2378 }
2379 
2380 /**
2381  * @ingroup devInitShutdown
2382  * @brief Register a callback for the given event.
2383  *
2384  * @param hw Hardware context.
2385  * @param which Event of interest.
2386  * @param func Function to call when the event occurs.
2387  * @param arg Argument passed to the callback function.
2388  *
2389  * @return Returns 0 on success, or a non-zero value on failure.
2390  */
2391 ocs_hw_rtn_e
ocs_hw_callback(ocs_hw_t * hw,ocs_hw_callback_e which,void * func,void * arg)2392 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2393 {
2394 
2395 	if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2396 		ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2397 			    hw, which, func);
2398 		return OCS_HW_RTN_ERROR;
2399 	}
2400 
2401 	switch (which) {
2402 	case OCS_HW_CB_DOMAIN:
2403 		hw->callback.domain = func;
2404 		hw->args.domain = arg;
2405 		break;
2406 	case OCS_HW_CB_PORT:
2407 		hw->callback.port = func;
2408 		hw->args.port = arg;
2409 		break;
2410 	case OCS_HW_CB_UNSOLICITED:
2411 		hw->callback.unsolicited = func;
2412 		hw->args.unsolicited = arg;
2413 		break;
2414 	case OCS_HW_CB_REMOTE_NODE:
2415 		hw->callback.rnode = func;
2416 		hw->args.rnode = arg;
2417 		break;
2418 	case OCS_HW_CB_BOUNCE:
2419 		hw->callback.bounce = func;
2420 		hw->args.bounce = arg;
2421 		break;
2422 	default:
2423 		ocs_log_test(hw->os, "unknown callback %#x\n", which);
2424 		return OCS_HW_RTN_ERROR;
2425 	}
2426 
2427 	return OCS_HW_RTN_SUCCESS;
2428 }
2429 
2430 /**
2431  * @ingroup port
2432  * @brief Allocate a port object.
2433  *
2434  * @par Description
2435  * This function allocates a VPI object for the port and stores it in the
2436  * indicator field of the port object.
2437  *
2438  * @param hw Hardware context.
2439  * @param sport SLI port object used to connect to the domain.
2440  * @param domain Domain object associated with this port (may be NULL).
2441  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2442  *
2443  * @return Returns 0 on success, or a non-zero value on failure.
2444  */
2445 ocs_hw_rtn_e
ocs_hw_port_alloc(ocs_hw_t * hw,ocs_sli_port_t * sport,ocs_domain_t * domain,uint8_t * wwpn)2446 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2447 		uint8_t *wwpn)
2448 {
2449 	uint8_t	*cmd = NULL;
2450 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2451 	uint32_t index;
2452 
2453 	sport->indicator = UINT32_MAX;
2454 	sport->hw = hw;
2455 	sport->ctx.app = sport;
2456 	sport->sm_free_req_pending = 0;
2457 
2458 	/*
2459 	 * Check if the chip is in an error state (UE'd) before proceeding.
2460 	 */
2461 	if (sli_fw_error_status(&hw->sli) > 0) {
2462 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2463 		return OCS_HW_RTN_ERROR;
2464 	}
2465 
2466 	if (wwpn) {
2467 		ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2468 	}
2469 
2470 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2471 		ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2472 		return OCS_HW_RTN_ERROR;
2473 	}
2474 
2475 	if (domain != NULL) {
2476 		ocs_sm_function_t	next = NULL;
2477 
2478 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2479 		if (!cmd) {
2480 			ocs_log_err(hw->os, "command memory allocation failed\n");
2481 			rc = OCS_HW_RTN_NO_MEMORY;
2482 			goto ocs_hw_port_alloc_out;
2483 		}
2484 
2485 		/* If the WWPN is NULL, fetch the default WWPN and WWNN before
2486 		 * initializing the VPI
2487 		 */
2488 		if (!wwpn) {
2489 			next = __ocs_hw_port_alloc_read_sparm64;
2490 		} else {
2491 			next = __ocs_hw_port_alloc_init_vpi;
2492 		}
2493 
2494 		ocs_sm_transition(&sport->ctx, next, cmd);
2495 	} else if (!wwpn) {
2496 		/* This is the convention for the HW, not SLI */
2497 		ocs_log_test(hw->os, "need WWN for physical port\n");
2498 		rc = OCS_HW_RTN_ERROR;
2499 	} else {
2500 		/* domain NULL and wwpn non-NULL */
2501 		ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2502 	}
2503 
2504 ocs_hw_port_alloc_out:
2505 	if (rc != OCS_HW_RTN_SUCCESS) {
2506 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2507 
2508 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2509 	}
2510 
2511 	return rc;
2512 }
2513 
2514 /**
2515  * @ingroup port
2516  * @brief Attach a physical/virtual SLI port to a domain.
2517  *
2518  * @par Description
2519  * This function registers a previously-allocated VPI with the
2520  * device.
2521  *
2522  * @param hw Hardware context.
2523  * @param sport Pointer to the SLI port object.
2524  * @param fc_id Fibre Channel ID to associate with this port.
2525  *
2526  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2527  */
2528 ocs_hw_rtn_e
ocs_hw_port_attach(ocs_hw_t * hw,ocs_sli_port_t * sport,uint32_t fc_id)2529 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2530 {
2531 	uint8_t	*buf = NULL;
2532 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2533 
2534 	if (!hw || !sport) {
2535 		ocs_log_err(hw ? hw->os : NULL,
2536 			"bad parameter(s) hw=%p sport=%p\n", hw,
2537 			sport);
2538 		return OCS_HW_RTN_ERROR;
2539 	}
2540 
2541 	/*
2542 	 * Check if the chip is in an error state (UE'd) before proceeding.
2543 	 */
2544 	if (sli_fw_error_status(&hw->sli) > 0) {
2545 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2546 		return OCS_HW_RTN_ERROR;
2547 	}
2548 
2549 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2550 	if (!buf) {
2551 		ocs_log_err(hw->os, "no buffer for command\n");
2552 		return OCS_HW_RTN_NO_MEMORY;
2553 	}
2554 
2555 	sport->fc_id = fc_id;
2556 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2557 	return rc;
2558 }
2559 
2560 /**
2561  * @brief Called when the port control command completes.
2562  *
2563  * @par Description
2564  * We only need to free the mailbox command buffer.
2565  *
2566  * @param hw Hardware context.
2567  * @param status Status field from the mbox completion.
2568  * @param mqe Mailbox response structure.
2569  * @param arg Pointer to a callback function that signals the caller that the command is done.
2570  *
2571  * @return Returns 0.
2572  */
2573 static int32_t
ocs_hw_cb_port_control(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)2574 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2575 {
2576 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2577 	return 0;
2578 }
2579 
2580 /**
2581  * @ingroup port
2582  * @brief Control a port (initialize, shutdown, or set link configuration).
2583  *
2584  * @par Description
2585  * This function controls a port depending on the @c ctrl parameter:
2586  * - @b OCS_HW_PORT_INIT -
2587  * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2588  * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2589  * .
2590  * - @b OCS_HW_PORT_SHUTDOWN -
2591  * Issues the DOWN_LINK command for the specified port.
2592  * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2593  * .
2594  * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2595  * Sets the link configuration.
2596  *
2597  * @param hw Hardware context.
2598  * @param ctrl Specifies the operation:
2599  * - OCS_HW_PORT_INIT
2600  * - OCS_HW_PORT_SHUTDOWN
2601  * - OCS_HW_PORT_SET_LINK_CONFIG
2602  *
2603  * @param value Operation-specific value.
2604  * - OCS_HW_PORT_INIT - Selective reset AL_PA
2605  * - OCS_HW_PORT_SHUTDOWN - N/A
2606  * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2607  *
2608  * @param cb Callback function to invoke the following operation.
2609  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2610  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2611  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2612  * completes.
2613  *
2614  * @param arg Callback argument invoked after the command completes.
2615  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2616  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2617  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2618  * completes.
2619  *
2620  * @return Returns 0 on success, or a non-zero value on failure.
2621  */
2622 ocs_hw_rtn_e
ocs_hw_port_control(ocs_hw_t * hw,ocs_hw_port_e ctrl,uintptr_t value,ocs_hw_port_control_cb_t cb,void * arg)2623 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2624 {
2625 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2626 
2627 	switch (ctrl) {
2628 	case OCS_HW_PORT_INIT:
2629 	{
2630 		uint8_t	*init_link;
2631 		uint32_t speed = 0;
2632 		uint8_t reset_alpa = 0;
2633 
2634 		if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2635 			uint8_t	*cfg_link;
2636 
2637 			cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2638 			if (cfg_link == NULL) {
2639 				ocs_log_err(hw->os, "no buffer for command\n");
2640 				return OCS_HW_RTN_NO_MEMORY;
2641 			}
2642 
2643 			if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2644 				rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2645 							ocs_hw_cb_port_control, NULL);
2646 			}
2647 
2648 			if (rc != OCS_HW_RTN_SUCCESS) {
2649 				ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2650 				ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2651 				break;
2652 			}
2653 			speed = hw->config.speed;
2654 			reset_alpa = (uint8_t)(value & 0xff);
2655 		} else {
2656 			speed = FC_LINK_SPEED_10G;
2657 		}
2658 
2659 		/*
2660 		 * Bring link up, unless FW version is not supported
2661 		 */
2662 		if (hw->workaround.fw_version_too_low) {
2663 			if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2664 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2665 					OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2666 			} else {
2667 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2668 					OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2669 			}
2670 
2671 			return OCS_HW_RTN_ERROR;
2672 		}
2673 
2674 		rc = OCS_HW_RTN_ERROR;
2675 
2676 		/* Allocate a new buffer for the init_link command */
2677 		init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2678 		if (init_link == NULL) {
2679 			ocs_log_err(hw->os, "no buffer for command\n");
2680 			return OCS_HW_RTN_NO_MEMORY;
2681 		}
2682 
2683 		if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2684 			rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2685 						ocs_hw_cb_port_control, NULL);
2686 		}
2687 		/* Free buffer on error, since no callback is coming */
2688 		if (rc != OCS_HW_RTN_SUCCESS) {
2689 			ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2690 			ocs_log_err(hw->os, "INIT_LINK failed\n");
2691 		}
2692 		break;
2693 	}
2694 	case OCS_HW_PORT_SHUTDOWN:
2695 	{
2696 		uint8_t	*down_link;
2697 
2698 		down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2699 		if (down_link == NULL) {
2700 			ocs_log_err(hw->os, "no buffer for command\n");
2701 			return OCS_HW_RTN_NO_MEMORY;
2702 		}
2703 		if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2704 			rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2705 						ocs_hw_cb_port_control, NULL);
2706 		}
2707 		/* Free buffer on error, since no callback is coming */
2708 		if (rc != OCS_HW_RTN_SUCCESS) {
2709 			ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2710 			ocs_log_err(hw->os, "DOWN_LINK failed\n");
2711 		}
2712 		break;
2713 	}
2714 	case OCS_HW_PORT_SET_LINK_CONFIG:
2715 		rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2716 		break;
2717 	default:
2718 		ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2719 		break;
2720 	}
2721 
2722 	return rc;
2723 }
2724 
2725 /**
2726  * @ingroup port
2727  * @brief Free port resources.
2728  *
2729  * @par Description
2730  * Issue the UNREG_VPI command to free the assigned VPI context.
2731  *
2732  * @param hw Hardware context.
2733  * @param sport SLI port object used to connect to the domain.
2734  *
2735  * @return Returns 0 on success, or a non-zero value on failure.
2736  */
2737 ocs_hw_rtn_e
ocs_hw_port_free(ocs_hw_t * hw,ocs_sli_port_t * sport)2738 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2739 {
2740 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2741 
2742 	if (!hw || !sport) {
2743 		ocs_log_err(hw ? hw->os : NULL,
2744 			"bad parameter(s) hw=%p sport=%p\n", hw,
2745 			sport);
2746 		return OCS_HW_RTN_ERROR;
2747 	}
2748 
2749 	/*
2750 	 * Check if the chip is in an error state (UE'd) before proceeding.
2751 	 */
2752 	if (sli_fw_error_status(&hw->sli) > 0) {
2753 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2754 		return OCS_HW_RTN_ERROR;
2755 	}
2756 
2757 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2758 	return rc;
2759 }
2760 
2761 /**
2762  * @ingroup domain
2763  * @brief Allocate a fabric domain object.
2764  *
2765  * @par Description
2766  * This function starts a series of commands needed to connect to the domain, including
2767  *   - REG_FCFI
2768  *   - INIT_VFI
2769  *   - READ_SPARMS
2770  *   .
2771  * @b Note: Not all SLI interface types use all of the above commands.
2772  * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2773  * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2774  *
2775  * @param hw Hardware context.
2776  * @param domain Pointer to the domain object.
2777  * @param fcf FCF index.
2778  * @param vlan VLAN ID.
2779  *
2780  * @return Returns 0 on success, or a non-zero value on failure.
2781  */
2782 ocs_hw_rtn_e
ocs_hw_domain_alloc(ocs_hw_t * hw,ocs_domain_t * domain,uint32_t fcf,uint32_t vlan)2783 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2784 {
2785 	uint8_t		*cmd = NULL;
2786 	uint32_t	index;
2787 
2788 	if (!hw || !domain || !domain->sport) {
2789 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2790 				hw, domain, domain ? domain->sport : NULL);
2791 		return OCS_HW_RTN_ERROR;
2792 	}
2793 
2794 	/*
2795 	 * Check if the chip is in an error state (UE'd) before proceeding.
2796 	 */
2797 	if (sli_fw_error_status(&hw->sli) > 0) {
2798 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2799 		return OCS_HW_RTN_ERROR;
2800 	}
2801 
2802 	cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2803 	if (!cmd) {
2804 		ocs_log_err(hw->os, "command memory allocation failed\n");
2805 		return OCS_HW_RTN_NO_MEMORY;
2806 	}
2807 
2808 	domain->dma = hw->domain_dmem;
2809 
2810 	domain->hw = hw;
2811 	domain->sm.app = domain;
2812 	domain->fcf = fcf;
2813 	domain->fcf_indicator = UINT32_MAX;
2814 	domain->vlan_id = vlan;
2815 	domain->indicator = UINT32_MAX;
2816 
2817 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2818 		ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2819 
2820 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2821 
2822 		return OCS_HW_RTN_ERROR;
2823 	}
2824 
2825 	ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2826 	return OCS_HW_RTN_SUCCESS;
2827 }
2828 
2829 /**
2830  * @ingroup domain
2831  * @brief Attach a SLI port to a domain.
2832  *
2833  * @param hw Hardware context.
2834  * @param domain Pointer to the domain object.
2835  * @param fc_id Fibre Channel ID to associate with this port.
2836  *
2837  * @return Returns 0 on success, or a non-zero value on failure.
2838  */
2839 ocs_hw_rtn_e
ocs_hw_domain_attach(ocs_hw_t * hw,ocs_domain_t * domain,uint32_t fc_id)2840 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2841 {
2842 	uint8_t	*buf = NULL;
2843 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2844 
2845 	if (!hw || !domain) {
2846 		ocs_log_err(hw ? hw->os : NULL,
2847 			"bad parameter(s) hw=%p domain=%p\n",
2848 			hw, domain);
2849 		return OCS_HW_RTN_ERROR;
2850 	}
2851 
2852 	/*
2853 	 * Check if the chip is in an error state (UE'd) before proceeding.
2854 	 */
2855 	if (sli_fw_error_status(&hw->sli) > 0) {
2856 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2857 		return OCS_HW_RTN_ERROR;
2858 	}
2859 
2860 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2861 	if (!buf) {
2862 		ocs_log_err(hw->os, "no buffer for command\n");
2863 		return OCS_HW_RTN_NO_MEMORY;
2864 	}
2865 
2866 	domain->sport->fc_id = fc_id;
2867 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2868 	return rc;
2869 }
2870 
2871 /**
2872  * @ingroup domain
2873  * @brief Free a fabric domain object.
2874  *
2875  * @par Description
2876  * Free both the driver and SLI port resources associated with the domain.
2877  *
2878  * @param hw Hardware context.
2879  * @param domain Pointer to the domain object.
2880  *
2881  * @return Returns 0 on success, or a non-zero value on failure.
2882  */
2883 ocs_hw_rtn_e
ocs_hw_domain_free(ocs_hw_t * hw,ocs_domain_t * domain)2884 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2885 {
2886 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2887 
2888 	if (!hw || !domain) {
2889 		ocs_log_err(hw ? hw->os : NULL,
2890 			"bad parameter(s) hw=%p domain=%p\n",
2891 			hw, domain);
2892 		return OCS_HW_RTN_ERROR;
2893 	}
2894 
2895 	/*
2896 	 * Check if the chip is in an error state (UE'd) before proceeding.
2897 	 */
2898 	if (sli_fw_error_status(&hw->sli) > 0) {
2899 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2900 		return OCS_HW_RTN_ERROR;
2901 	}
2902 
2903 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2904 	return rc;
2905 }
2906 
2907 /**
2908  * @ingroup domain
2909  * @brief Free a fabric domain object.
2910  *
2911  * @par Description
2912  * Free the driver resources associated with the domain. The difference between
2913  * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2914  * exist on the SLI port, due to a reset or after some error conditions.
2915  *
2916  * @param hw Hardware context.
2917  * @param domain Pointer to the domain object.
2918  *
2919  * @return Returns 0 on success, or a non-zero value on failure.
2920  */
2921 ocs_hw_rtn_e
ocs_hw_domain_force_free(ocs_hw_t * hw,ocs_domain_t * domain)2922 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2923 {
2924 	if (!hw || !domain) {
2925 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2926 		return OCS_HW_RTN_ERROR;
2927 	}
2928 
2929 	sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2930 
2931 	return OCS_HW_RTN_SUCCESS;
2932 }
2933 
2934 /**
2935  * @ingroup node
2936  * @brief Allocate a remote node object.
2937  *
2938  * @param hw Hardware context.
2939  * @param rnode Allocated remote node object to initialize.
2940  * @param fc_addr FC address of the remote node.
2941  * @param sport SLI port used to connect to remote node.
2942  *
2943  * @return Returns 0 on success, or a non-zero value on failure.
2944  */
2945 ocs_hw_rtn_e
ocs_hw_node_alloc(ocs_hw_t * hw,ocs_remote_node_t * rnode,uint32_t fc_addr,ocs_sli_port_t * sport)2946 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2947 		ocs_sli_port_t *sport)
2948 {
2949 	/* Check for invalid indicator */
2950 	if (UINT32_MAX != rnode->indicator) {
2951 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2952 				fc_addr, rnode->indicator);
2953 		return OCS_HW_RTN_ERROR;
2954 	}
2955 
2956 	/*
2957 	 * Check if the chip is in an error state (UE'd) before proceeding.
2958 	 */
2959 	if (sli_fw_error_status(&hw->sli) > 0) {
2960 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2961 		return OCS_HW_RTN_ERROR;
2962 	}
2963 
2964 	/* NULL SLI port indicates an unallocated remote node */
2965 	rnode->sport = NULL;
2966 
2967 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2968 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2969 				fc_addr);
2970 		return OCS_HW_RTN_ERROR;
2971 	}
2972 
2973 	rnode->fc_id = fc_addr;
2974 	rnode->sport = sport;
2975 
2976 	return OCS_HW_RTN_SUCCESS;
2977 }
2978 
2979 /**
2980  * @ingroup node
2981  * @brief Update a remote node object with the remote port's service parameters.
2982  *
2983  * @param hw Hardware context.
2984  * @param rnode Allocated remote node object to initialize.
2985  * @param sparms DMA buffer containing the remote port's service parameters.
2986  *
2987  * @return Returns 0 on success, or a non-zero value on failure.
2988  */
2989 ocs_hw_rtn_e
ocs_hw_node_attach(ocs_hw_t * hw,ocs_remote_node_t * rnode,ocs_dma_t * sparms)2990 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2991 {
2992 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
2993 	uint8_t		*buf = NULL;
2994 	uint32_t	count = 0;
2995 
2996 	if (!hw || !rnode || !sparms) {
2997 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
2998 			    hw, rnode, sparms);
2999 		return OCS_HW_RTN_ERROR;
3000 	}
3001 
3002 	/*
3003 	 * Check if the chip is in an error state (UE'd) before proceeding.
3004 	 */
3005 	if (sli_fw_error_status(&hw->sli) > 0) {
3006 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3007 		return OCS_HW_RTN_ERROR;
3008 	}
3009 
3010 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3011 	if (!buf) {
3012 		ocs_log_err(hw->os, "no buffer for command\n");
3013 		return OCS_HW_RTN_NO_MEMORY;
3014 	}
3015 
3016 	/*
3017 	 * If the attach count is non-zero, this RPI has already been registered.
3018 	 * Otherwise, register the RPI
3019 	 */
3020 	if (rnode->index == UINT32_MAX) {
3021 		ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3022 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3023 		return OCS_HW_RTN_ERROR;
3024 	}
3025 	count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3026 	if (count) {
3027 		/*
3028 		 * Can't attach multiple FC_ID's to a node unless High Login
3029 		 * Mode is enabled
3030 		 */
3031 		if (sli_get_hlm(&hw->sli) == FALSE) {
3032 			ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3033 					sli_get_hlm(&hw->sli), count);
3034 			rc = OCS_HW_RTN_SUCCESS;
3035 		} else {
3036 			rnode->node_group = TRUE;
3037 			rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3038 			rc = rnode->attached  ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3039 		}
3040 	} else {
3041 		rnode->node_group = FALSE;
3042 
3043 		ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3044 		if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3045 					rnode->indicator, rnode->sport->indicator,
3046 					sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3047 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3048 					ocs_hw_cb_node_attach, rnode);
3049 		}
3050 	}
3051 
3052 	if (count || rc) {
3053 		if (rc < OCS_HW_RTN_SUCCESS) {
3054 			ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3055 			ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3056 		}
3057 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3058 	}
3059 
3060 	return rc;
3061 }
3062 
3063 /**
3064  * @ingroup node
3065  * @brief Free a remote node resource.
3066  *
3067  * @param hw Hardware context.
3068  * @param rnode Remote node object to free.
3069  *
3070  * @return Returns 0 on success, or a non-zero value on failure.
3071  */
3072 ocs_hw_rtn_e
ocs_hw_node_free_resources(ocs_hw_t * hw,ocs_remote_node_t * rnode)3073 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3074 {
3075 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3076 
3077 	if (!hw || !rnode) {
3078 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3079 			    hw, rnode);
3080 		return OCS_HW_RTN_ERROR;
3081 	}
3082 
3083 	if (rnode->sport) {
3084 		if (!rnode->attached) {
3085 			if (rnode->indicator != UINT32_MAX) {
3086 				if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3087 					ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3088 						    rnode->indicator, rnode->fc_id);
3089 					rc = OCS_HW_RTN_ERROR;
3090 				} else {
3091 					rnode->node_group = FALSE;
3092 					rnode->indicator = UINT32_MAX;
3093 					rnode->index = UINT32_MAX;
3094 					rnode->free_group = FALSE;
3095 				}
3096 			}
3097 		} else {
3098 			ocs_log_err(hw->os, "Error: rnode is still attached\n");
3099 			rc = OCS_HW_RTN_ERROR;
3100 		}
3101 	}
3102 
3103 	return rc;
3104 }
3105 
3106 /**
3107  * @ingroup node
3108  * @brief Free a remote node object.
3109  *
3110  * @param hw Hardware context.
3111  * @param rnode Remote node object to free.
3112  *
3113  * @return Returns 0 on success, or a non-zero value on failure.
3114  */
3115 ocs_hw_rtn_e
ocs_hw_node_detach(ocs_hw_t * hw,ocs_remote_node_t * rnode)3116 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3117 {
3118 	uint8_t	*buf = NULL;
3119 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS_SYNC;
3120 	uint32_t	index = UINT32_MAX;
3121 
3122 	if (!hw || !rnode) {
3123 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3124 			    hw, rnode);
3125 		return OCS_HW_RTN_ERROR;
3126 	}
3127 
3128 	/*
3129 	 * Check if the chip is in an error state (UE'd) before proceeding.
3130 	 */
3131 	if (sli_fw_error_status(&hw->sli) > 0) {
3132 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3133 		return OCS_HW_RTN_ERROR;
3134 	}
3135 
3136 	index = rnode->index;
3137 
3138 	if (rnode->sport) {
3139 		uint32_t	count = 0;
3140 		uint32_t	fc_id;
3141 
3142 		if (!rnode->attached) {
3143 			return OCS_HW_RTN_SUCCESS_SYNC;
3144 		}
3145 
3146 		buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3147 		if (!buf) {
3148 			ocs_log_err(hw->os, "no buffer for command\n");
3149 			return OCS_HW_RTN_NO_MEMORY;
3150 		}
3151 
3152 		count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3153 
3154 		if (count <= 1) {
3155 			/* There are no other references to this RPI
3156 			 * so unregister it and free the resource. */
3157 			fc_id = UINT32_MAX;
3158 			rnode->node_group = FALSE;
3159 			rnode->free_group = TRUE;
3160 		} else {
3161 			if (sli_get_hlm(&hw->sli) == FALSE) {
3162 				ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3163 						count);
3164 			}
3165 			fc_id = rnode->fc_id & 0x00ffffff;
3166 		}
3167 
3168 		rc = OCS_HW_RTN_ERROR;
3169 
3170 		if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3171 					SLI_RSRC_FCOE_RPI, fc_id)) {
3172 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3173 		}
3174 
3175 		if (rc != OCS_HW_RTN_SUCCESS) {
3176 			ocs_log_err(hw->os, "UNREG_RPI failed\n");
3177 			ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3178 			rc = OCS_HW_RTN_ERROR;
3179 		}
3180 	}
3181 
3182 	return rc;
3183 }
3184 
3185 /**
3186  * @ingroup node
3187  * @brief Free all remote node objects.
3188  *
3189  * @param hw Hardware context.
3190  *
3191  * @return Returns 0 on success, or a non-zero value on failure.
3192  */
3193 ocs_hw_rtn_e
ocs_hw_node_free_all(ocs_hw_t * hw)3194 ocs_hw_node_free_all(ocs_hw_t *hw)
3195 {
3196 	uint8_t	*buf = NULL;
3197 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
3198 
3199 	if (!hw) {
3200 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3201 		return OCS_HW_RTN_ERROR;
3202 	}
3203 
3204 	/*
3205 	 * Check if the chip is in an error state (UE'd) before proceeding.
3206 	 */
3207 	if (sli_fw_error_status(&hw->sli) > 0) {
3208 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3209 		return OCS_HW_RTN_ERROR;
3210 	}
3211 
3212 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3213 	if (!buf) {
3214 		ocs_log_err(hw->os, "no buffer for command\n");
3215 		return OCS_HW_RTN_NO_MEMORY;
3216 	}
3217 
3218 	if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3219 				SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3220 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3221 				NULL);
3222 	}
3223 
3224 	if (rc != OCS_HW_RTN_SUCCESS) {
3225 		ocs_log_err(hw->os, "UNREG_RPI failed\n");
3226 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3227 		rc = OCS_HW_RTN_ERROR;
3228 	}
3229 
3230 	return rc;
3231 }
3232 
3233 ocs_hw_rtn_e
ocs_hw_node_group_alloc(ocs_hw_t * hw,ocs_remote_node_group_t * ngroup)3234 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3235 {
3236 
3237 	if (!hw || !ngroup) {
3238 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3239 				hw, ngroup);
3240 		return OCS_HW_RTN_ERROR;
3241 	}
3242 
3243 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3244 				&ngroup->index)) {
3245 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3246 				ngroup->indicator);
3247 		return OCS_HW_RTN_ERROR;
3248 	}
3249 
3250 	return OCS_HW_RTN_SUCCESS;
3251 }
3252 
3253 ocs_hw_rtn_e
ocs_hw_node_group_attach(ocs_hw_t * hw,ocs_remote_node_group_t * ngroup,ocs_remote_node_t * rnode)3254 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3255 {
3256 
3257 	if (!hw || !ngroup || !rnode) {
3258 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3259 			    hw, ngroup, rnode);
3260 		return OCS_HW_RTN_ERROR;
3261 	}
3262 
3263 	if (rnode->attached) {
3264 		ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3265 			    rnode->indicator, rnode->fc_id);
3266 		return OCS_HW_RTN_ERROR;
3267 	}
3268 
3269 	if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3270 		ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3271 				rnode->indicator);
3272 		return OCS_HW_RTN_ERROR;
3273 	}
3274 
3275 	rnode->indicator = ngroup->indicator;
3276 	rnode->index = ngroup->index;
3277 
3278 	return OCS_HW_RTN_SUCCESS;
3279 }
3280 
3281 ocs_hw_rtn_e
ocs_hw_node_group_free(ocs_hw_t * hw,ocs_remote_node_group_t * ngroup)3282 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3283 {
3284 	int	ref;
3285 
3286 	if (!hw || !ngroup) {
3287 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3288 				hw, ngroup);
3289 		return OCS_HW_RTN_ERROR;
3290 	}
3291 
3292 	ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3293 	if (ref) {
3294 		/* Hmmm, the reference count is non-zero */
3295 		ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3296 				ref, ngroup->indicator);
3297 
3298 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3299 			ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3300 				    ngroup->indicator);
3301 			return OCS_HW_RTN_ERROR;
3302 		}
3303 
3304 		ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3305 	}
3306 
3307 	ngroup->indicator = UINT32_MAX;
3308 	ngroup->index = UINT32_MAX;
3309 
3310 	return OCS_HW_RTN_SUCCESS;
3311 }
3312 
3313 /**
3314  * @brief Initialize IO fields on each free call.
3315  *
3316  * @n @b Note: This is done on each free call (as opposed to each
3317  * alloc call) because port-owned XRIs are not
3318  * allocated with ocs_hw_io_alloc() but are freed with this
3319  * function.
3320  *
3321  * @param io Pointer to HW IO.
3322  */
3323 static inline void
ocs_hw_init_free_io(ocs_hw_io_t * io)3324 ocs_hw_init_free_io(ocs_hw_io_t *io)
3325 {
3326 	/*
3327 	 * Set io->done to NULL, to avoid any callbacks, should
3328 	 * a completion be received for one of these IOs
3329 	 */
3330 	io->done = NULL;
3331 	io->abort_done = NULL;
3332 	io->status_saved = 0;
3333 	io->abort_in_progress = FALSE;
3334 	io->port_owned_abort_count = 0;
3335 	io->rnode = NULL;
3336 	io->type = 0xFFFF;
3337 	io->wq = NULL;
3338 	io->ul_io = NULL;
3339 	io->wqe_timeout = 0;
3340 }
3341 
3342 /**
3343  * @ingroup io
3344  * @brief Lockless allocate a HW IO object.
3345  *
3346  * @par Description
3347  * Assume that hw->ocs_lock is held. This function is only used if
3348  * use_dif_sec_xri workaround is being used.
3349  *
3350  * @param hw Hardware context.
3351  *
3352  * @return Returns a pointer to an object on success, or NULL on failure.
3353  */
3354 static inline ocs_hw_io_t *
_ocs_hw_io_alloc(ocs_hw_t * hw)3355 _ocs_hw_io_alloc(ocs_hw_t *hw)
3356 {
3357 	ocs_hw_io_t	*io = NULL;
3358 
3359 	if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3360 		ocs_list_add_tail(&hw->io_inuse, io);
3361 		io->state = OCS_HW_IO_STATE_INUSE;
3362 		io->quarantine = FALSE;
3363 		io->quarantine_first_phase = TRUE;
3364 		io->abort_reqtag = UINT32_MAX;
3365 		ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3366 	} else {
3367 		ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3368 	}
3369 
3370 	return io;
3371 }
3372 /**
3373  * @ingroup io
3374  * @brief Allocate a HW IO object.
3375  *
3376  * @par Description
3377  * @n @b Note: This function applies to non-port owned XRIs
3378  * only.
3379  *
3380  * @param hw Hardware context.
3381  *
3382  * @return Returns a pointer to an object on success, or NULL on failure.
3383  */
3384 ocs_hw_io_t *
ocs_hw_io_alloc(ocs_hw_t * hw)3385 ocs_hw_io_alloc(ocs_hw_t *hw)
3386 {
3387 	ocs_hw_io_t	*io = NULL;
3388 
3389 	ocs_lock(&hw->io_lock);
3390 		io = _ocs_hw_io_alloc(hw);
3391 	ocs_unlock(&hw->io_lock);
3392 
3393 	return io;
3394 }
3395 
3396 /**
3397  * @ingroup io
3398  * @brief Allocate/Activate a port owned HW IO object.
3399  *
3400  * @par Description
3401  * This function is called by the transport layer when an XRI is
3402  * allocated by the SLI-Port. This will "activate" the HW IO
3403  * associated with the XRI received from the SLI-Port to mirror
3404  * the state of the XRI.
3405  * @n @n @b Note: This function applies to port owned XRIs only.
3406  *
3407  * @param hw Hardware context.
3408  * @param io Pointer HW IO to activate/allocate.
3409  *
3410  * @return Returns a pointer to an object on success, or NULL on failure.
3411  */
3412 ocs_hw_io_t *
ocs_hw_io_activate_port_owned(ocs_hw_t * hw,ocs_hw_io_t * io)3413 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3414 {
3415 	if (ocs_ref_read_count(&io->ref) > 0) {
3416 		ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3417 		return NULL;
3418 	}
3419 
3420 	if (io->wq != NULL) {
3421 		ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3422 		return NULL;
3423 	}
3424 
3425 	ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3426 	io->xbusy = TRUE;
3427 
3428 	return io;
3429 }
3430 
3431 /**
3432  * @ingroup io
3433  * @brief When an IO is freed, depending on the exchange busy flag, and other
3434  * workarounds, move it to the correct list.
3435  *
3436  * @par Description
3437  * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3438  * from the busy or wait_free list.
3439  *
3440  * @param hw Hardware context.
3441  * @param io Pointer to the IO object to move.
3442  */
3443 static void
ocs_hw_io_free_move_correct_list(ocs_hw_t * hw,ocs_hw_io_t * io)3444 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3445 {
3446 	if (io->xbusy) {
3447 		/* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3448 		ocs_list_add_tail(&hw->io_wait_free, io);
3449 		io->state = OCS_HW_IO_STATE_WAIT_FREE;
3450 	} else {
3451 		/* IO not busy, add to free list */
3452 		ocs_list_add_tail(&hw->io_free, io);
3453 		io->state = OCS_HW_IO_STATE_FREE;
3454 	}
3455 
3456 	/* BZ 161832 workaround */
3457 	if (hw->workaround.use_dif_sec_xri) {
3458 		ocs_hw_check_sec_hio_list(hw);
3459 	}
3460 }
3461 
3462 /**
3463  * @ingroup io
3464  * @brief Free a HW IO object. Perform cleanup common to
3465  * port and host-owned IOs.
3466  *
3467  * @param hw Hardware context.
3468  * @param io Pointer to the HW IO object.
3469  */
3470 static inline void
ocs_hw_io_free_common(ocs_hw_t * hw,ocs_hw_io_t * io)3471 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3472 {
3473 	/* initialize IO fields */
3474 	ocs_hw_init_free_io(io);
3475 
3476 	/* Restore default SGL */
3477 	ocs_hw_io_restore_sgl(hw, io);
3478 }
3479 
3480 /**
3481  * @ingroup io
3482  * @brief Free a HW IO object associated with a port-owned XRI.
3483  *
3484  * @param arg Pointer to the HW IO object.
3485  */
3486 static void
ocs_hw_io_free_port_owned(void * arg)3487 ocs_hw_io_free_port_owned(void *arg)
3488 {
3489 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3490 	ocs_hw_t *hw = io->hw;
3491 
3492 	/*
3493 	 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3494 	 * waiting for buffers.
3495 	 */
3496 	if (io->auto_xfer_rdy_dnrx) {
3497 		ocs_lock(&hw->io_lock);
3498 			/* take a reference count because we still own the IO until the buffer is posted */
3499 			ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3500 			ocs_list_add_tail(&hw->io_port_dnrx, io);
3501 		ocs_unlock(&hw->io_lock);
3502 	}
3503 
3504 	/* perform common cleanup */
3505 	ocs_hw_io_free_common(hw, io);
3506 }
3507 
3508 /**
3509  * @ingroup io
3510  * @brief Free a previously-allocated HW IO object. Called when
3511  * IO refcount goes to zero (host-owned IOs only).
3512  *
3513  * @param arg Pointer to the HW IO object.
3514  */
3515 static void
ocs_hw_io_free_internal(void * arg)3516 ocs_hw_io_free_internal(void *arg)
3517 {
3518 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3519 	ocs_hw_t *hw = io->hw;
3520 
3521 	/* perform common cleanup */
3522 	ocs_hw_io_free_common(hw, io);
3523 
3524 	ocs_lock(&hw->io_lock);
3525 		/* remove from in-use list */
3526 		ocs_list_remove(&hw->io_inuse, io);
3527 		ocs_hw_io_free_move_correct_list(hw, io);
3528 	ocs_unlock(&hw->io_lock);
3529 }
3530 
3531 /**
3532  * @ingroup io
3533  * @brief Free a previously-allocated HW IO object.
3534  *
3535  * @par Description
3536  * @n @b Note: This function applies to port and host owned XRIs.
3537  *
3538  * @param hw Hardware context.
3539  * @param io Pointer to the HW IO object.
3540  *
3541  * @return Returns a non-zero value if HW IO was freed, 0 if references
3542  * on the IO still exist, or a negative value if an error occurred.
3543  */
3544 int32_t
ocs_hw_io_free(ocs_hw_t * hw,ocs_hw_io_t * io)3545 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3546 {
3547 	/* just put refcount */
3548 	if (ocs_ref_read_count(&io->ref) <= 0) {
3549 		ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3550 			    io->indicator, io->reqtag);
3551 		return -1;
3552 	}
3553 
3554 	return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3555 }
3556 
3557 /**
3558  * @ingroup io
3559  * @brief Check if given HW IO is in-use
3560  *
3561  * @par Description
3562  * This function returns TRUE if the given HW IO has been
3563  * allocated and is in-use, and FALSE otherwise. It applies to
3564  * port and host owned XRIs.
3565  *
3566  * @param hw Hardware context.
3567  * @param io Pointer to the HW IO object.
3568  *
3569  * @return TRUE if an IO is in use, or FALSE otherwise.
3570  */
3571 uint8_t
ocs_hw_io_inuse(ocs_hw_t * hw,ocs_hw_io_t * io)3572 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3573 {
3574 	return (ocs_ref_read_count(&io->ref) > 0);
3575 }
3576 
3577 /**
3578  * @brief Write a HW IO to a work queue.
3579  *
3580  * @par Description
3581  * A HW IO is written to a work queue.
3582  *
3583  * @param wq Pointer to work queue.
3584  * @param wqe Pointer to WQ entry.
3585  *
3586  * @n @b Note: Assumes the SLI-4 queue lock is held.
3587  *
3588  * @return Returns 0 on success, or a negative error code value on failure.
3589  */
3590 static int32_t
_hw_wq_write(hw_wq_t * wq,ocs_hw_wqe_t * wqe)3591 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3592 {
3593 	int32_t rc;
3594 	int32_t queue_rc;
3595 
3596 	/* Every so often, set the wqec bit to generate comsummed completions */
3597 	if (wq->wqec_count) {
3598 		wq->wqec_count--;
3599 	}
3600 	if (wq->wqec_count == 0) {
3601 		sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3602 		genwqe->wqec = 1;
3603 		wq->wqec_count = wq->wqec_set_count;
3604 	}
3605 
3606 	/* Decrement WQ free count */
3607 	wq->free_count--;
3608 
3609 	queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3610 
3611 	if (queue_rc < 0) {
3612 		rc = -1;
3613 	} else {
3614 		rc = 0;
3615 		ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3616 	}
3617 
3618 	return rc;
3619 }
3620 
3621 /**
3622  * @brief Write a HW IO to a work queue.
3623  *
3624  * @par Description
3625  * A HW IO is written to a work queue.
3626  *
3627  * @param wq Pointer to work queue.
3628  * @param wqe Pointer to WQE entry.
3629  *
3630  * @n @b Note: Takes the SLI-4 queue lock.
3631  *
3632  * @return Returns 0 on success, or a negative error code value on failure.
3633  */
3634 int32_t
hw_wq_write(hw_wq_t * wq,ocs_hw_wqe_t * wqe)3635 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3636 {
3637 	int32_t rc = 0;
3638 
3639 	sli_queue_lock(wq->queue);
3640 		if ( ! ocs_list_empty(&wq->pending_list)) {
3641 			ocs_list_add_tail(&wq->pending_list, wqe);
3642 			OCS_STAT(wq->wq_pending_count++;)
3643 			while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3644 				rc = _hw_wq_write(wq, wqe);
3645 				if (rc < 0) {
3646 					break;
3647 				}
3648 				if (wqe->abort_wqe_submit_needed) {
3649 					wqe->abort_wqe_submit_needed = 0;
3650 					sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3651 							wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3652 					ocs_list_add_tail(&wq->pending_list, wqe);
3653 					OCS_STAT(wq->wq_pending_count++;)
3654 				}
3655 			}
3656 		} else {
3657 			if (wq->free_count > 0) {
3658 				rc = _hw_wq_write(wq, wqe);
3659 			} else {
3660 				ocs_list_add_tail(&wq->pending_list, wqe);
3661 				OCS_STAT(wq->wq_pending_count++;)
3662 			}
3663 		}
3664 
3665 	sli_queue_unlock(wq->queue);
3666 
3667 	return rc;
3668 
3669 }
3670 
3671 /**
3672  * @brief Update free count and submit any pending HW IOs
3673  *
3674  * @par Description
3675  * The WQ free count is updated, and any pending HW IOs are submitted that
3676  * will fit in the queue.
3677  *
3678  * @param wq Pointer to work queue.
3679  * @param update_free_count Value added to WQs free count.
3680  *
3681  * @return None.
3682  */
3683 static void
hw_wq_submit_pending(hw_wq_t * wq,uint32_t update_free_count)3684 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3685 {
3686 	ocs_hw_wqe_t *wqe;
3687 
3688 	sli_queue_lock(wq->queue);
3689 
3690 		/* Update free count with value passed in */
3691 		wq->free_count += update_free_count;
3692 
3693 		while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3694 			_hw_wq_write(wq, wqe);
3695 
3696 			if (wqe->abort_wqe_submit_needed) {
3697 				wqe->abort_wqe_submit_needed = 0;
3698 				sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3699 						wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3700 				ocs_list_add_tail(&wq->pending_list, wqe);
3701 				OCS_STAT(wq->wq_pending_count++;)
3702 			}
3703 		}
3704 
3705 	sli_queue_unlock(wq->queue);
3706 }
3707 
3708 /**
3709  * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3710  *
3711  * @par Description
3712  * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3713  * to allocate a secondary HW io, and dispatch it.
3714  *
3715  * @n @b Note: hw->io_lock MUST be taken when called.
3716  *
3717  * @param hw pointer to HW object
3718  *
3719  * @return none
3720  */
3721 static void
ocs_hw_check_sec_hio_list(ocs_hw_t * hw)3722 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3723 {
3724 	ocs_hw_io_t *io;
3725 	ocs_hw_io_t *sec_io;
3726 	int rc = 0;
3727 
3728 	while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3729 		uint16_t flags;
3730 
3731 		sec_io = _ocs_hw_io_alloc(hw);
3732 		if (sec_io == NULL) {
3733 			break;
3734 		}
3735 
3736 		io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3737 		ocs_list_add_tail(&hw->io_inuse, io);
3738 		io->state = OCS_HW_IO_STATE_INUSE;
3739 		io->sec_hio = sec_io;
3740 
3741 		/* mark secondary XRI for second and subsequent data phase as quarantine */
3742 		if (io->xbusy) {
3743 			sec_io->quarantine = TRUE;
3744 		}
3745 
3746 		flags = io->sec_iparam.fcp_tgt.flags;
3747 		if (io->xbusy) {
3748 			flags |= SLI4_IO_CONTINUATION;
3749 		} else {
3750 			flags &= ~SLI4_IO_CONTINUATION;
3751 		}
3752 
3753 		io->wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3754 
3755 		/* Complete (continue) TRECV IO */
3756 		if (io->xbusy) {
3757 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3758 				io->first_data_sge,
3759 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3760 				io->reqtag, SLI4_CQ_DEFAULT,
3761 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3762 				flags,
3763 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3764 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3765 					break;
3766 			}
3767 		} else {
3768 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3769 				io->first_data_sge,
3770 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3771 				io->reqtag, SLI4_CQ_DEFAULT,
3772 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3773 				flags,
3774 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3775 				io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3776 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3777 					break;
3778 			}
3779 		}
3780 
3781 		if (io->wq == NULL) {
3782 			io->wq = ocs_hw_queue_next_wq(hw, io);
3783 			ocs_hw_assert(io->wq != NULL);
3784 		}
3785 		io->xbusy = TRUE;
3786 
3787 		/*
3788 		 * Add IO to active io wqe list before submitting, in case the
3789 		 * wcqe processing preempts this thread.
3790 		 */
3791 		ocs_hw_add_io_timed_wqe(hw, io);
3792 		rc = hw_wq_write(io->wq, &io->wqe);
3793 		if (rc >= 0) {
3794 			/* non-negative return is success */
3795 			rc = 0;
3796 		} else {
3797 			/* failed to write wqe, remove from active wqe list */
3798 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3799 			io->xbusy = FALSE;
3800 			ocs_hw_remove_io_timed_wqe(hw, io);
3801 		}
3802 	}
3803 }
3804 
3805 /**
3806  * @ingroup io
3807  * @brief Send a Single Request/Response Sequence (SRRS).
3808  *
3809  * @par Description
3810  * This routine supports communication sequences consisting of a single
3811  * request and single response between two endpoints. Examples include:
3812  *  - Sending an ELS request.
3813  *  - Sending an ELS response - To send an ELS reponse, the caller must provide
3814  * the OX_ID from the received request.
3815  *  - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3816  * the caller must provide the R_CTL, TYPE, and DF_CTL
3817  * values to place in the FC frame header.
3818  *  .
3819  * @n @b Note: The caller is expected to provide both send and receive
3820  * buffers for requests. In the case of sending a response, no receive buffer
3821  * is necessary and the caller may pass in a NULL pointer.
3822  *
3823  * @param hw Hardware context.
3824  * @param type Type of sequence (ELS request/response, FC-CT).
3825  * @param io Previously-allocated HW IO object.
3826  * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3827  * @param len Length, in bytes, of data to send.
3828  * @param receive Optional DMA memory to hold a response.
3829  * @param rnode Destination of data (that is, a remote node).
3830  * @param iparam IO parameters (ELS response and FC-CT).
3831  * @param cb Function call upon completion of sending the data (may be NULL).
3832  * @param arg Argument to pass to IO completion function.
3833  *
3834  * @return Returns 0 on success, or a non-zero on failure.
3835  */
3836 ocs_hw_rtn_e
ocs_hw_srrs_send(ocs_hw_t * hw,ocs_hw_io_type_e type,ocs_hw_io_t * io,ocs_dma_t * send,uint32_t len,ocs_dma_t * receive,ocs_remote_node_t * rnode,ocs_hw_io_param_t * iparam,ocs_hw_srrs_cb_t cb,void * arg)3837 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3838 		  ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3839 		  ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3840 		  ocs_hw_srrs_cb_t cb, void *arg)
3841 {
3842 	sli4_sge_t	*sge = NULL;
3843 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3844 	uint16_t	local_flags = 0;
3845 
3846 	if (!hw || !io || !rnode || !iparam) {
3847 		ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3848 			    hw, io, send, receive, rnode, iparam);
3849 		return OCS_HW_RTN_ERROR;
3850 	}
3851 
3852 	if (hw->state != OCS_HW_STATE_ACTIVE) {
3853 		ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3854 		return OCS_HW_RTN_ERROR;
3855 	}
3856 
3857 	if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3858 		/* We must set the XC bit for port owned XRIs */
3859 		local_flags |= SLI4_IO_CONTINUATION;
3860 	}
3861 	io->rnode = rnode;
3862 	io->type  = type;
3863 	io->done = cb;
3864 	io->arg  = arg;
3865 
3866 	sge = io->sgl->virt;
3867 
3868 	/* clear both SGE */
3869 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3870 
3871 	if (send) {
3872 		sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3873 		sge[0].buffer_address_low  = ocs_addr32_lo(send->phys);
3874 		sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3875 		sge[0].buffer_length = len;
3876 	}
3877 
3878 	if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3879 		sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3880 		sge[1].buffer_address_low  = ocs_addr32_lo(receive->phys);
3881 		sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3882 		sge[1].buffer_length = receive->size;
3883 		sge[1].last = TRUE;
3884 	} else {
3885 		sge[0].last = TRUE;
3886 	}
3887 
3888 	switch (type) {
3889 	case OCS_HW_ELS_REQ:
3890 		if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3891 							*((uint8_t *)(send->virt)), /* req_type */
3892 							len, receive->size,
3893 							iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3894 			ocs_log_err(hw->os, "REQ WQE error\n");
3895 			rc = OCS_HW_RTN_ERROR;
3896 		}
3897 		break;
3898 	case OCS_HW_ELS_RSP:
3899 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3900 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3901 					   iparam->els.ox_id,
3902 							rnode, local_flags, UINT32_MAX)) {
3903 			ocs_log_err(hw->os, "RSP WQE error\n");
3904 			rc = OCS_HW_RTN_ERROR;
3905 		}
3906 		break;
3907 	case OCS_HW_ELS_RSP_SID:
3908 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3909 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3910 					   iparam->els_sid.ox_id,
3911 							rnode, local_flags, iparam->els_sid.s_id)) {
3912 			ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3913 			rc = OCS_HW_RTN_ERROR;
3914 		}
3915 		break;
3916 	case OCS_HW_FC_CT:
3917 		if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3918 					  receive->size, iparam->fc_ct.timeout, io->indicator,
3919 					  io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3920 					  iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3921 			ocs_log_err(hw->os, "GEN WQE error\n");
3922 			rc = OCS_HW_RTN_ERROR;
3923 		}
3924 		break;
3925 	case OCS_HW_FC_CT_RSP:
3926 		if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3927 					  iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3928 					  io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3929 					  iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3930 			ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3931 			rc = OCS_HW_RTN_ERROR;
3932 		}
3933 		break;
3934 	case OCS_HW_BLS_ACC:
3935 	case OCS_HW_BLS_RJT:
3936 	{
3937 		sli_bls_payload_t	bls;
3938 
3939 		if (OCS_HW_BLS_ACC == type) {
3940 			bls.type = SLI_BLS_ACC;
3941 			ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3942 		} else {
3943 			bls.type = SLI_BLS_RJT;
3944 			ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3945 		}
3946 
3947 		bls.ox_id = iparam->bls.ox_id;
3948 		bls.rx_id = iparam->bls.rx_id;
3949 
3950 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3951 					   io->indicator, io->reqtag,
3952 					   SLI4_CQ_DEFAULT,
3953 					   rnode, UINT32_MAX)) {
3954 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3955 			rc = OCS_HW_RTN_ERROR;
3956 		}
3957 		break;
3958 	}
3959 	case OCS_HW_BLS_ACC_SID:
3960 	{
3961 		sli_bls_payload_t	bls;
3962 
3963 		bls.type = SLI_BLS_ACC;
3964 		ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3965 
3966 		bls.ox_id = iparam->bls_sid.ox_id;
3967 		bls.rx_id = iparam->bls_sid.rx_id;
3968 
3969 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3970 					   io->indicator, io->reqtag,
3971 					   SLI4_CQ_DEFAULT,
3972 					   rnode, iparam->bls_sid.s_id)) {
3973 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3974 			rc = OCS_HW_RTN_ERROR;
3975 		}
3976 		break;
3977 	}
3978 	case OCS_HW_BCAST:
3979 		if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3980 					iparam->bcast.timeout, io->indicator, io->reqtag,
3981 					SLI4_CQ_DEFAULT, rnode,
3982 					iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3983 			ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3984 			rc = OCS_HW_RTN_ERROR;
3985 		}
3986 		break;
3987 	default:
3988 		ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3989 		rc = OCS_HW_RTN_ERROR;
3990 	}
3991 
3992 	if (OCS_HW_RTN_SUCCESS == rc) {
3993 		if (io->wq == NULL) {
3994 			io->wq = ocs_hw_queue_next_wq(hw, io);
3995 			ocs_hw_assert(io->wq != NULL);
3996 		}
3997 		io->xbusy = TRUE;
3998 
3999 		/*
4000 		 * Add IO to active io wqe list before submitting, in case the
4001 		 * wcqe processing preempts this thread.
4002 		 */
4003 		OCS_STAT(io->wq->use_count++);
4004 		ocs_hw_add_io_timed_wqe(hw, io);
4005 		rc = hw_wq_write(io->wq, &io->wqe);
4006 		if (rc >= 0) {
4007 			/* non-negative return is success */
4008 			rc = 0;
4009 		} else {
4010 			/* failed to write wqe, remove from active wqe list */
4011 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4012 			io->xbusy = FALSE;
4013 			ocs_hw_remove_io_timed_wqe(hw, io);
4014 		}
4015 	}
4016 
4017 	return rc;
4018 }
4019 
4020 /**
4021  * @ingroup io
4022  * @brief Send a read, write, or response IO.
4023  *
4024  * @par Description
4025  * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4026  * as a target or initiator. Examples include:
4027  *  - Sending read data and good response (target).
4028  *  - Sending a response (target with no data or after receiving write data).
4029  *  .
4030  * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4031  * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4032  *
4033  * @param hw Hardware context.
4034  * @param type Type of IO (target read, target response, and so on).
4035  * @param io Previously-allocated HW IO object.
4036  * @param len Length, in bytes, of data to send.
4037  * @param iparam IO parameters.
4038  * @param rnode Destination of data (that is, a remote node).
4039  * @param cb Function call upon completion of sending data (may be NULL).
4040  * @param arg Argument to pass to IO completion function.
4041  *
4042  * @return Returns 0 on success, or a non-zero value on failure.
4043  *
4044  * @todo
4045  *  - Support specifiying relative offset.
4046  *  - Use a WQ other than 0.
4047  */
4048 ocs_hw_rtn_e
ocs_hw_io_send(ocs_hw_t * hw,ocs_hw_io_type_e type,ocs_hw_io_t * io,uint32_t len,ocs_hw_io_param_t * iparam,ocs_remote_node_t * rnode,void * cb,void * arg)4049 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4050 		uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4051 		void *cb, void *arg)
4052 {
4053 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4054 	uint32_t	rpi;
4055 	uint8_t		send_wqe = TRUE;
4056 	uint8_t		timeout = 0;
4057 
4058 	CPUTRACE("");
4059 
4060 	if (!hw || !io || !rnode || !iparam) {
4061 		ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4062 			    hw, io, iparam, rnode);
4063 		return OCS_HW_RTN_ERROR;
4064 	}
4065 
4066 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4067 		ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4068 		return OCS_HW_RTN_ERROR;
4069 	}
4070 
4071 	rpi = rnode->indicator;
4072 
4073 	if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4074 		rpi = hw->workaround.unregistered_rid;
4075 		ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4076 	}
4077 
4078 	/*
4079 	 * Save state needed during later stages
4080 	 */
4081 	io->rnode = rnode;
4082 	io->type  = type;
4083 	io->done  = cb;
4084 	io->arg   = arg;
4085 
4086 	/*
4087 	 * Format the work queue entry used to send the IO
4088 	 */
4089 	switch (type) {
4090 	case OCS_HW_IO_INITIATOR_READ:
4091 		timeout = ocs_hw_set_io_wqe_timeout(io, iparam->fcp_ini.timeout);
4092 
4093 		/*
4094 		 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4095 		 * initiator read IO for quarantine
4096 		 */
4097 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4098 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4099 			io->quarantine = TRUE;
4100 		}
4101 
4102 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4103 				iparam->fcp_ini.rsp);
4104 
4105 		if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4106 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4107 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4108 					timeout)) {
4109 			ocs_log_err(hw->os, "IREAD WQE error\n");
4110 			rc = OCS_HW_RTN_ERROR;
4111 		}
4112 		break;
4113 	case OCS_HW_IO_INITIATOR_WRITE:
4114 		timeout = ocs_hw_set_io_wqe_timeout(io, iparam->fcp_ini.timeout);
4115 
4116 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4117 				iparam->fcp_ini.rsp);
4118 
4119 		if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4120 					 len, iparam->fcp_ini.first_burst,
4121 					 io->indicator, io->reqtag,
4122 					SLI4_CQ_DEFAULT, rpi, rnode,
4123 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4124 					timeout)) {
4125 			ocs_log_err(hw->os, "IWRITE WQE error\n");
4126 			rc = OCS_HW_RTN_ERROR;
4127 		}
4128 		break;
4129 	case OCS_HW_IO_INITIATOR_NODATA:
4130 		timeout = ocs_hw_set_io_wqe_timeout(io, iparam->fcp_ini.timeout);
4131 
4132 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4133 				iparam->fcp_ini.rsp);
4134 
4135 		if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4136 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4137 					rpi, rnode, timeout)) {
4138 			ocs_log_err(hw->os, "ICMND WQE error\n");
4139 			rc = OCS_HW_RTN_ERROR;
4140 		}
4141 		break;
4142 	case OCS_HW_IO_TARGET_WRITE: {
4143 		uint16_t flags = iparam->fcp_tgt.flags;
4144 		fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4145 
4146 		/*
4147 		 * Fill in the XFER_RDY for IF_TYPE 0 devices
4148 		 */
4149 		*((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4150 		*((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4151 		*((uint32_t *)xfer->rsvd) = 0;
4152 
4153 		if (io->xbusy) {
4154 			flags |= SLI4_IO_CONTINUATION;
4155 		} else {
4156 			flags &= ~SLI4_IO_CONTINUATION;
4157 		}
4158 
4159 		io->wqe_timeout = iparam->fcp_tgt.timeout;
4160 
4161 		/*
4162 		 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4163 		 * then mark the target write IO for quarantine
4164 		 */
4165 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4166 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4167 			io->quarantine = TRUE;
4168 		}
4169 
4170 		/*
4171 		 * BZ 161832 Workaround:
4172 		 * Check for use_dif_sec_xri workaround.  Note, even though the first dataphase
4173 		 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4174 		 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4175 		 * are on hw->sec_hio_wait_list.   If this secondary XRI is not for the first
4176 		 * data phase, it is marked for quarantine.
4177 		 */
4178 		if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4179 			/*
4180 			 * If we have allocated a chained SGL for skyhawk, then
4181 			 * we can re-use this for the sec_hio.
4182 			 */
4183 			if (io->ovfl_io != NULL) {
4184 				io->sec_hio = io->ovfl_io;
4185 				io->sec_hio->quarantine = TRUE;
4186 			} else {
4187 				io->sec_hio = ocs_hw_io_alloc(hw);
4188 			}
4189 			if (io->sec_hio == NULL) {
4190 				/* Failed to allocate, so save full request context and put
4191 				 * this IO on the wait list
4192 				 */
4193 				io->sec_iparam = *iparam;
4194 				io->sec_len = len;
4195 				ocs_lock(&hw->io_lock);
4196 					ocs_list_remove(&hw->io_inuse,  io);
4197 					ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4198 					io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4199 					hw->sec_hio_wait_count++;
4200 				ocs_unlock(&hw->io_lock);
4201 				send_wqe = FALSE;
4202 				/* Done */
4203 				break;
4204 			}
4205 			/* We quarantine the secondary IO if this is the second or subsequent data phase */
4206 			if (io->xbusy) {
4207 				io->sec_hio->quarantine = TRUE;
4208 			}
4209 		}
4210 
4211 		/*
4212 		 * If not the first data phase, and io->sec_hio has been allocated, then issue
4213 		 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4214 		 */
4215 		if (io->xbusy && (io->sec_hio != NULL)) {
4216 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4217 						   iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4218 						   io->reqtag, SLI4_CQ_DEFAULT,
4219 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4220 						   flags,
4221 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4222 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4223 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4224 				rc = OCS_HW_RTN_ERROR;
4225 			}
4226 		} else {
4227 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4228 						   iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4229 						   SLI4_CQ_DEFAULT,
4230 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4231 						   flags,
4232 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4233 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4234 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4235 				rc = OCS_HW_RTN_ERROR;
4236 			}
4237 		}
4238 		break;
4239 	}
4240 	case OCS_HW_IO_TARGET_READ: {
4241 		uint16_t flags = iparam->fcp_tgt.flags;
4242 
4243 		if (io->xbusy) {
4244 			flags |= SLI4_IO_CONTINUATION;
4245 		} else {
4246 			flags &= ~SLI4_IO_CONTINUATION;
4247 		}
4248 
4249 		io->wqe_timeout = iparam->fcp_tgt.timeout;
4250 		if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4251 					iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4252 					SLI4_CQ_DEFAULT,
4253 					iparam->fcp_tgt.ox_id, rpi, rnode,
4254 					flags,
4255 					iparam->fcp_tgt.dif_oper,
4256 					iparam->fcp_tgt.blk_size,
4257 					iparam->fcp_tgt.cs_ctl,
4258 					iparam->fcp_tgt.app_id)) {
4259 			ocs_log_err(hw->os, "TSEND WQE error\n");
4260 			rc = OCS_HW_RTN_ERROR;
4261 		} else if (hw->workaround.retain_tsend_io_length) {
4262 			io->length = len;
4263 		}
4264 		break;
4265 	}
4266 	case OCS_HW_IO_TARGET_RSP: {
4267 		uint16_t flags = iparam->fcp_tgt.flags;
4268 
4269 		if (io->xbusy) {
4270 			flags |= SLI4_IO_CONTINUATION;
4271 		} else {
4272 			flags &= ~SLI4_IO_CONTINUATION;
4273 		}
4274 
4275 		/* post a new auto xfer ready buffer */
4276 		if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4277 			if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4278 				flags |= SLI4_IO_DNRX;
4279 			}
4280 		}
4281 
4282 		io->wqe_timeout = iparam->fcp_tgt.timeout;
4283 		if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4284 					&io->def_sgl,
4285 					len,
4286 					io->indicator, io->reqtag,
4287 					SLI4_CQ_DEFAULT,
4288 					iparam->fcp_tgt.ox_id,
4289 					rpi, rnode,
4290 					flags, iparam->fcp_tgt.cs_ctl,
4291 					io->is_port_owned,
4292 					iparam->fcp_tgt.app_id)) {
4293 			ocs_log_err(hw->os, "TRSP WQE error\n");
4294 			rc = OCS_HW_RTN_ERROR;
4295 		}
4296 
4297 		break;
4298 	}
4299 	default:
4300 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4301 		rc = OCS_HW_RTN_ERROR;
4302 	}
4303 
4304 	if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4305 		if (io->wq == NULL) {
4306 			io->wq = ocs_hw_queue_next_wq(hw, io);
4307 			ocs_hw_assert(io->wq != NULL);
4308 		}
4309 
4310 		io->xbusy = TRUE;
4311 
4312 		/*
4313 		 * Add IO to active io wqe list before submitting, in case the
4314 		 * wcqe processing preempts this thread.
4315 		 */
4316 		OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4317 		OCS_STAT(io->wq->use_count++);
4318 		ocs_hw_add_io_timed_wqe(hw, io);
4319 		rc = hw_wq_write(io->wq, &io->wqe);
4320 		if (rc >= 0) {
4321 			/* non-negative return is success */
4322 			rc = 0;
4323 		} else {
4324 			/* failed to write wqe, remove from active wqe list */
4325 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4326 			io->xbusy = FALSE;
4327 			ocs_hw_remove_io_timed_wqe(hw, io);
4328 		}
4329 	}
4330 
4331 	return rc;
4332 }
4333 
4334 /**
4335  * @brief Send a raw frame
4336  *
4337  * @par Description
4338  * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4339  *
4340  * @param hw Pointer to HW object.
4341  * @param hdr Pointer to a little endian formatted FC header.
4342  * @param sof Value to use as the frame SOF.
4343  * @param eof Value to use as the frame EOF.
4344  * @param payload Pointer to payload DMA buffer.
4345  * @param ctx Pointer to caller provided send frame context.
4346  * @param callback Callback function.
4347  * @param arg Callback function argument.
4348  *
4349  * @return Returns 0 on success, or a negative error code value on failure.
4350  */
4351 ocs_hw_rtn_e
ocs_hw_send_frame(ocs_hw_t * hw,fc_header_le_t * hdr,uint8_t sof,uint8_t eof,ocs_dma_t * payload,ocs_hw_send_frame_context_t * ctx,void (* callback)(void * arg,uint8_t * cqe,int32_t status),void * arg)4352 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4353 		   ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4354 {
4355 	int32_t rc;
4356 	ocs_hw_wqe_t *wqe;
4357 	uint32_t xri;
4358 	hw_wq_t *wq;
4359 
4360 	wqe = &ctx->wqe;
4361 
4362 	/* populate the callback object */
4363 	ctx->hw = hw;
4364 
4365 	/* Fetch and populate request tag */
4366 	ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4367 	if (ctx->wqcb == NULL) {
4368 		ocs_log_err(hw->os, "can't allocate request tag\n");
4369 		return OCS_HW_RTN_NO_RESOURCES;
4370 	}
4371 
4372 	/* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4373 	wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4374 	if (wq == NULL) {
4375 		wq = hw->hw_wq[0];
4376 	}
4377 
4378 	/* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4379 	xri = wq->send_frame_io->indicator;
4380 
4381 	/* Build the send frame WQE */
4382 	rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4383 				payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4384 	if (rc) {
4385 		ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4386 		return OCS_HW_RTN_ERROR;
4387 	}
4388 
4389 	/* Write to WQ */
4390 	rc = hw_wq_write(wq, wqe);
4391 	if (rc) {
4392 		ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4393 		return OCS_HW_RTN_ERROR;
4394 	}
4395 
4396 	OCS_STAT(wq->use_count++);
4397 
4398 	return OCS_HW_RTN_SUCCESS;
4399 }
4400 
4401 ocs_hw_rtn_e
ocs_hw_io_register_sgl(ocs_hw_t * hw,ocs_hw_io_t * io,ocs_dma_t * sgl,uint32_t sgl_count)4402 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4403 {
4404 	if (sli_get_sgl_preregister(&hw->sli)) {
4405 		ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4406 		return OCS_HW_RTN_ERROR;
4407 	}
4408 	io->ovfl_sgl = sgl;
4409 	io->ovfl_sgl_count = sgl_count;
4410 	io->ovfl_io = NULL;
4411 
4412 	return OCS_HW_RTN_SUCCESS;
4413 }
4414 
4415 static void
ocs_hw_io_restore_sgl(ocs_hw_t * hw,ocs_hw_io_t * io)4416 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4417 {
4418 	/* Restore the default */
4419 	io->sgl = &io->def_sgl;
4420 	io->sgl_count = io->def_sgl_count;
4421 
4422 	/*
4423 	 * For skyhawk, we need to free the IO allocated for the chained
4424 	 * SGL. For all devices, clear the overflow fields on the IO.
4425 	 *
4426 	 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4427 	 *       the chained SGLs. If so, then we clear the ovfl_io field
4428 	 *       when the sec_hio is freed.
4429 	 */
4430 	if (io->ovfl_io != NULL) {
4431 		ocs_hw_io_free(hw, io->ovfl_io);
4432 		io->ovfl_io = NULL;
4433 	}
4434 
4435 	/* Clear the overflow SGL */
4436 	io->ovfl_sgl = NULL;
4437 	io->ovfl_sgl_count = 0;
4438 	io->ovfl_lsp = NULL;
4439 }
4440 
4441 /**
4442  * @ingroup io
4443  * @brief Initialize the scatter gather list entries of an IO.
4444  *
4445  * @param hw Hardware context.
4446  * @param io Previously-allocated HW IO object.
4447  * @param type Type of IO (target read, target response, and so on).
4448  *
4449  * @return Returns 0 on success, or a non-zero value on failure.
4450  */
4451 ocs_hw_rtn_e
ocs_hw_io_init_sges(ocs_hw_t * hw,ocs_hw_io_t * io,ocs_hw_io_type_e type)4452 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4453 {
4454 	sli4_sge_t	*data = NULL;
4455 	uint32_t	i = 0;
4456 	uint32_t	skips = 0;
4457 
4458 	if (!hw || !io) {
4459 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4460 			    hw, io);
4461 		return OCS_HW_RTN_ERROR;
4462 	}
4463 
4464 	/* Clear / reset the scatter-gather list */
4465 	io->sgl = &io->def_sgl;
4466 	io->sgl_count = io->def_sgl_count;
4467 	io->first_data_sge = 0;
4468 
4469 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4470 	io->n_sge = 0;
4471 	io->sge_offset = 0;
4472 
4473 	io->type = type;
4474 
4475 	data = io->sgl->virt;
4476 
4477 	/*
4478 	 * Some IO types have underlying hardware requirements on the order
4479 	 * of SGEs. Process all special entries here.
4480 	 */
4481 	switch (type) {
4482 	case OCS_HW_IO_INITIATOR_READ:
4483 	case OCS_HW_IO_INITIATOR_WRITE:
4484 	case OCS_HW_IO_INITIATOR_NODATA:
4485 		/*
4486 		 * No skips, 2 special for initiator I/Os
4487 		 * The addresses and length are written later
4488 		 */
4489 		/* setup command pointer */
4490 		data->sge_type = SLI4_SGE_TYPE_DATA;
4491 		data++;
4492 
4493 		/* setup response pointer */
4494 		data->sge_type = SLI4_SGE_TYPE_DATA;
4495 
4496 		if (OCS_HW_IO_INITIATOR_NODATA == type) {
4497 			data->last = TRUE;
4498 		}
4499 		data++;
4500 
4501 		io->n_sge = 2;
4502 		break;
4503 	case OCS_HW_IO_TARGET_WRITE:
4504 #define OCS_TARGET_WRITE_SKIPS	2
4505 		skips = OCS_TARGET_WRITE_SKIPS;
4506 
4507 		/* populate host resident XFER_RDY buffer */
4508 		data->sge_type = SLI4_SGE_TYPE_DATA;
4509 		data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4510 		data->buffer_address_low  = ocs_addr32_lo(io->xfer_rdy.phys);
4511 		data->buffer_length = io->xfer_rdy.size;
4512 		data++;
4513 
4514 		skips--;
4515 
4516 		io->n_sge = 1;
4517 		break;
4518 	case OCS_HW_IO_TARGET_READ:
4519 		/*
4520 		 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4521 		 */
4522 #define OCS_TARGET_READ_SKIPS	2
4523 		skips = OCS_TARGET_READ_SKIPS;
4524 		break;
4525 	case OCS_HW_IO_TARGET_RSP:
4526 		/*
4527 		 * No skips, etc. for FCP_TRSP64
4528 		 */
4529 		break;
4530 	default:
4531 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4532 		return OCS_HW_RTN_ERROR;
4533 	}
4534 
4535 	/*
4536 	 * Write skip entries
4537 	 */
4538 	for (i = 0; i < skips; i++) {
4539 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4540 		data++;
4541 	}
4542 
4543 	io->n_sge += skips;
4544 
4545 	/*
4546 	 * Set last
4547 	 */
4548 	data->last = TRUE;
4549 
4550 	return OCS_HW_RTN_SUCCESS;
4551 }
4552 
4553 /**
4554  * @ingroup io
4555  * @brief Add a T10 PI seed scatter gather list entry.
4556  *
4557  * @param hw Hardware context.
4558  * @param io Previously-allocated HW IO object.
4559  * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4560  *
4561  * @return Returns 0 on success, or a non-zero value on failure.
4562  */
4563 ocs_hw_rtn_e
ocs_hw_io_add_seed_sge(ocs_hw_t * hw,ocs_hw_io_t * io,ocs_hw_dif_info_t * dif_info)4564 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4565 {
4566 	sli4_sge_t	*data = NULL;
4567 	sli4_diseed_sge_t *dif_seed;
4568 
4569 	/* If no dif_info, or dif_oper is disabled, then just return success */
4570 	if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4571 		return OCS_HW_RTN_SUCCESS;
4572 	}
4573 
4574 	if (!hw || !io) {
4575 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4576 			    hw, io, dif_info);
4577 		return OCS_HW_RTN_ERROR;
4578 	}
4579 
4580 	data = io->sgl->virt;
4581 	data += io->n_sge;
4582 
4583 	/* If we are doing T10 DIF add the DIF Seed SGE */
4584 	ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4585 	dif_seed = (sli4_diseed_sge_t *)data;
4586 	dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4587 	dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4588 	dif_seed->app_tag_repl = dif_info->app_tag_repl;
4589 	dif_seed->repl_app_tag = dif_info->repl_app_tag;
4590 	if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4591 		dif_seed->atrt = dif_info->disable_app_ref_ffff;
4592 		dif_seed->at = dif_info->disable_app_ffff;
4593 	}
4594 	dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4595 	/* Workaround for SKH (BZ157233) */
4596 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4597 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4598 		dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4599 	}
4600 
4601 	dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4602 	dif_seed->dif_blk_size = dif_info->blk_size;
4603 	dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4604 	dif_seed->check_app_tag = dif_info->check_app_tag;
4605 	dif_seed->check_ref_tag = dif_info->check_ref_tag;
4606 	dif_seed->check_crc = dif_info->check_guard;
4607 	dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4608 
4609 	switch(dif_info->dif_oper) {
4610 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4611 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4612 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4613 		break;
4614 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4615 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4616 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4617 		break;
4618 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4619 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4620 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4621 		break;
4622 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4623 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4624 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4625 		break;
4626 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4627 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4628 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4629 		break;
4630 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4631 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4632 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4633 		break;
4634 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4635 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4636 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4637 		break;
4638 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4639 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4640 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4641 		break;
4642 	case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4643 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4644 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4645 		break;
4646 	default:
4647 		ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4648 			    dif_info->dif_oper);
4649 		return OCS_HW_RTN_ERROR;
4650 	}
4651 
4652 	/*
4653 	 * Set last, clear previous last
4654 	 */
4655 	data->last = TRUE;
4656 	if (io->n_sge) {
4657 		data[-1].last = FALSE;
4658 	}
4659 
4660 	io->n_sge++;
4661 
4662 	return OCS_HW_RTN_SUCCESS;
4663 }
4664 
4665 static ocs_hw_rtn_e
ocs_hw_io_overflow_sgl(ocs_hw_t * hw,ocs_hw_io_t * io)4666 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4667 {
4668 	sli4_lsp_sge_t *lsp;
4669 
4670 	/* fail if we're already pointing to the overflow SGL */
4671 	if (io->sgl == io->ovfl_sgl) {
4672 		return OCS_HW_RTN_ERROR;
4673 	}
4674 
4675 	/*
4676 	 * For skyhawk, we can use another SGL to extend the SGL list. The
4677 	 * Chained entry must not be in the first 4 entries.
4678 	 *
4679 	 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4680 	 */
4681 	if (sli_get_sgl_preregister(&hw->sli) &&
4682 	    io->def_sgl_count > 4 &&
4683 	    io->ovfl_io == NULL &&
4684 	    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4685 		(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4686 		io->ovfl_io = ocs_hw_io_alloc(hw);
4687 		if (io->ovfl_io != NULL) {
4688 			/*
4689 			 * Note: We can't call ocs_hw_io_register_sgl() here
4690 			 * because it checks that SGLs are not pre-registered
4691 			 * and for shyhawk, preregistered SGLs are required.
4692 			 */
4693 			io->ovfl_sgl = &io->ovfl_io->def_sgl;
4694 			io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4695 		}
4696 	}
4697 
4698 	/* fail if we don't have an overflow SGL registered */
4699 	if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) {
4700 		return OCS_HW_RTN_ERROR;
4701 	}
4702 
4703 	/*
4704 	 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4705 	 * copying the the last SGE to the overflow SGL
4706 	 */
4707 
4708 	((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4709 
4710 	lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4711 	ocs_memset(lsp, 0, sizeof(*lsp));
4712 
4713 	if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4714 	    (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4715 		sli_skh_chain_sge_build(&hw->sli,
4716 					(sli4_sge_t*)lsp,
4717 					io->ovfl_io->indicator,
4718 					0, /* frag_num */
4719 					0); /* offset */
4720 	} else {
4721 		lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4722 		lsp->buffer_address_low  = ocs_addr32_lo(io->ovfl_sgl->phys);
4723 		lsp->sge_type = SLI4_SGE_TYPE_LSP;
4724 		lsp->last = 0;
4725 		io->ovfl_lsp = lsp;
4726 		io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4727 	}
4728 
4729 	/* Update the current SGL pointer, and n_sgl */
4730 	io->sgl = io->ovfl_sgl;
4731 	io->sgl_count = io->ovfl_sgl_count;
4732 	io->n_sge = 1;
4733 
4734 	return OCS_HW_RTN_SUCCESS;
4735 }
4736 
4737 /**
4738  * @ingroup io
4739  * @brief Add a scatter gather list entry to an IO.
4740  *
4741  * @param hw Hardware context.
4742  * @param io Previously-allocated HW IO object.
4743  * @param addr Physical address.
4744  * @param length Length of memory pointed to by @c addr.
4745  *
4746  * @return Returns 0 on success, or a non-zero value on failure.
4747  */
4748 ocs_hw_rtn_e
ocs_hw_io_add_sge(ocs_hw_t * hw,ocs_hw_io_t * io,uintptr_t addr,uint32_t length)4749 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4750 {
4751 	sli4_sge_t	*data = NULL;
4752 
4753 	if (!hw || !io || !addr || !length) {
4754 		ocs_log_err(hw ? hw->os : NULL,
4755 			    "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4756 			    hw, io, addr, length);
4757 		return OCS_HW_RTN_ERROR;
4758 	}
4759 
4760 	if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4761 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4762 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4763 			return OCS_HW_RTN_ERROR;
4764 		}
4765 	}
4766 
4767 	if (length > sli_get_max_sge(&hw->sli)) {
4768 		ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4769 			    length, sli_get_max_sge(&hw->sli));
4770 		return OCS_HW_RTN_ERROR;
4771 	}
4772 
4773 	data = io->sgl->virt;
4774 	data += io->n_sge;
4775 
4776 	data->sge_type = SLI4_SGE_TYPE_DATA;
4777 	data->buffer_address_high = ocs_addr32_hi(addr);
4778 	data->buffer_address_low  = ocs_addr32_lo(addr);
4779 	data->buffer_length = length;
4780 	data->data_offset = io->sge_offset;
4781 	/*
4782 	 * Always assume this is the last entry and mark as such.
4783 	 * If this is not the first entry unset the "last SGE"
4784 	 * indication for the previous entry
4785 	 */
4786 	data->last = TRUE;
4787 	if (io->n_sge) {
4788 		data[-1].last = FALSE;
4789 	}
4790 
4791 	/* Set first_data_bde if not previously set */
4792 	if (io->first_data_sge == 0) {
4793 		io->first_data_sge = io->n_sge;
4794 	}
4795 
4796 	io->sge_offset += length;
4797 	io->n_sge++;
4798 
4799 	/* Update the linked segment length (only executed after overflow has begun) */
4800 	if (io->ovfl_lsp != NULL) {
4801 		io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4802 	}
4803 
4804 	return OCS_HW_RTN_SUCCESS;
4805 }
4806 
4807 /**
4808  * @ingroup io
4809  * @brief Add a T10 DIF scatter gather list entry to an IO.
4810  *
4811  * @param hw Hardware context.
4812  * @param io Previously-allocated HW IO object.
4813  * @param addr DIF physical address.
4814  *
4815  * @return Returns 0 on success, or a non-zero value on failure.
4816  */
4817 ocs_hw_rtn_e
ocs_hw_io_add_dif_sge(ocs_hw_t * hw,ocs_hw_io_t * io,uintptr_t addr)4818 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4819 {
4820 	sli4_dif_sge_t	*data = NULL;
4821 
4822 	if (!hw || !io || !addr) {
4823 		ocs_log_err(hw ? hw->os : NULL,
4824 			    "bad parameter hw=%p io=%p addr=%lx\n",
4825 			    hw, io, addr);
4826 		return OCS_HW_RTN_ERROR;
4827 	}
4828 
4829 	if ((io->n_sge + 1) > hw->config.n_sgl) {
4830 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4831 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4832 			return OCS_HW_RTN_ERROR;
4833 		}
4834 	}
4835 
4836 	data = io->sgl->virt;
4837 	data += io->n_sge;
4838 
4839 	data->sge_type = SLI4_SGE_TYPE_DIF;
4840 	/* Workaround for SKH (BZ157233) */
4841 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4842 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4843 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4844 	}
4845 
4846 	data->buffer_address_high = ocs_addr32_hi(addr);
4847 	data->buffer_address_low  = ocs_addr32_lo(addr);
4848 
4849 	/*
4850 	 * Always assume this is the last entry and mark as such.
4851 	 * If this is not the first entry unset the "last SGE"
4852 	 * indication for the previous entry
4853 	 */
4854 	data->last = TRUE;
4855 	if (io->n_sge) {
4856 		data[-1].last = FALSE;
4857 	}
4858 
4859 	io->n_sge++;
4860 
4861 	return OCS_HW_RTN_SUCCESS;
4862 }
4863 
4864 /**
4865  * @ingroup io
4866  * @brief Abort a previously-started IO.
4867  *
4868  * @param hw Hardware context.
4869  * @param io_to_abort The IO to abort.
4870  * @param send_abts Boolean to have the hardware automatically
4871  * generate an ABTS.
4872  * @param cb Function call upon completion of the abort (may be NULL).
4873  * @param arg Argument to pass to abort completion function.
4874  *
4875  * @return Returns 0 on success, or a non-zero value on failure.
4876  */
4877 ocs_hw_rtn_e
ocs_hw_io_abort(ocs_hw_t * hw,ocs_hw_io_t * io_to_abort,uint32_t send_abts,void * cb,void * arg)4878 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4879 {
4880 	sli4_abort_type_e atype = SLI_ABORT_MAX;
4881 	uint32_t	id = 0, mask = 0;
4882 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4883 	hw_wq_callback_t *wqcb;
4884 
4885 	if (!hw || !io_to_abort) {
4886 		ocs_log_err(hw ? hw->os : NULL,
4887 			    "bad parameter hw=%p io=%p\n",
4888 			    hw, io_to_abort);
4889 		return OCS_HW_RTN_ERROR;
4890 	}
4891 
4892 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4893 		ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4894 			    hw->state);
4895 		return OCS_HW_RTN_ERROR;
4896 	}
4897 
4898 	/* take a reference on IO being aborted */
4899 	if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4900 		/* command no longer active */
4901 		ocs_log_test(hw ? hw->os : NULL,
4902 				"io not active xri=0x%x tag=0x%x\n",
4903 				io_to_abort->indicator, io_to_abort->reqtag);
4904 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4905 	}
4906 
4907 	/* non-port owned XRI checks */
4908 	/* Must have a valid WQ reference */
4909 	if (io_to_abort->wq == NULL) {
4910 		ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4911 				io_to_abort->indicator);
4912 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4913 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4914 	}
4915 
4916 	/* Validation checks complete; now check to see if already being aborted */
4917 	ocs_lock(&hw->io_abort_lock);
4918 		if (io_to_abort->abort_in_progress) {
4919 			ocs_unlock(&hw->io_abort_lock);
4920 			ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4921 			ocs_log_debug(hw ? hw->os : NULL,
4922 				"io already being aborted xri=0x%x tag=0x%x\n",
4923 				io_to_abort->indicator, io_to_abort->reqtag);
4924 			return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4925 		}
4926 
4927 		/*
4928 		 * This IO is not already being aborted. Set flag so we won't try to
4929 		 * abort it again. After all, we only have one abort_done callback.
4930 		 */
4931 		io_to_abort->abort_in_progress = 1;
4932 	ocs_unlock(&hw->io_abort_lock);
4933 
4934 	/*
4935 	 * If we got here, the possibilities are:
4936 	 * - host owned xri
4937 	 *	- io_to_abort->wq_index != UINT32_MAX
4938 	 *		- submit ABORT_WQE to same WQ
4939 	 * - port owned xri:
4940 	 *	- rxri: io_to_abort->wq_index == UINT32_MAX
4941 	 *		- submit ABORT_WQE to any WQ
4942 	 *	- non-rxri
4943 	 *		- io_to_abort->index != UINT32_MAX
4944 	 *			- submit ABORT_WQE to same WQ
4945 	 *		- io_to_abort->index == UINT32_MAX
4946 	 *			- submit ABORT_WQE to any WQ
4947 	 */
4948 	io_to_abort->abort_done = cb;
4949 	io_to_abort->abort_arg  = arg;
4950 
4951 	atype = SLI_ABORT_XRI;
4952 	id = io_to_abort->indicator;
4953 
4954 	/* Allocate a request tag for the abort portion of this IO */
4955 	wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4956 	if (wqcb == NULL) {
4957 		ocs_log_err(hw->os, "can't allocate request tag\n");
4958 		return OCS_HW_RTN_NO_RESOURCES;
4959 	}
4960 	io_to_abort->abort_reqtag = wqcb->instance_index;
4961 
4962 	/*
4963 	 * If the wqe is on the pending list, then set this wqe to be
4964 	 * aborted when the IO's wqe is removed from the list.
4965 	 */
4966 	if (io_to_abort->wq != NULL) {
4967 		sli_queue_lock(io_to_abort->wq->queue);
4968 			if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4969 				io_to_abort->wqe.abort_wqe_submit_needed = 1;
4970 				io_to_abort->wqe.send_abts = send_abts;
4971 				io_to_abort->wqe.id = id;
4972 				io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4973 				sli_queue_unlock(io_to_abort->wq->queue);
4974 				return 0;
4975 		}
4976 		sli_queue_unlock(io_to_abort->wq->queue);
4977 	}
4978 
4979 	if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4980 			  io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4981 		ocs_log_err(hw->os, "ABORT WQE error\n");
4982 		io_to_abort->abort_reqtag = UINT32_MAX;
4983 		ocs_hw_reqtag_free(hw, wqcb);
4984 		rc = OCS_HW_RTN_ERROR;
4985 	}
4986 
4987 	if (OCS_HW_RTN_SUCCESS == rc) {
4988 		if (io_to_abort->wq == NULL) {
4989 			io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4990 			ocs_hw_assert(io_to_abort->wq != NULL);
4991 		}
4992 		/* ABORT_WQE does not actually utilize an XRI on the Port,
4993 		 * therefore, keep xbusy as-is to track the exchange's state,
4994 		 * not the ABORT_WQE's state
4995 		 */
4996 		rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4997 		if (rc > 0) {
4998 			/* non-negative return is success */
4999 			rc = 0;
5000 			/* can't abort an abort so skip adding to timed wqe list */
5001 		}
5002 	}
5003 
5004 	if (OCS_HW_RTN_SUCCESS != rc) {
5005 		ocs_lock(&hw->io_abort_lock);
5006 			io_to_abort->abort_in_progress = 0;
5007 		ocs_unlock(&hw->io_abort_lock);
5008 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
5009 	}
5010 	return rc;
5011 }
5012 
5013 /**
5014  * @ingroup io
5015  * @brief Return the OX_ID/RX_ID of the IO.
5016  *
5017  * @param hw Hardware context.
5018  * @param io HW IO object.
5019  *
5020  * @return Returns X_ID on success, or -1 on failure.
5021  */
5022 int32_t
ocs_hw_io_get_xid(ocs_hw_t * hw,ocs_hw_io_t * io)5023 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5024 {
5025 	if (!hw || !io) {
5026 		ocs_log_err(hw ? hw->os : NULL,
5027 			    "bad parameter hw=%p io=%p\n", hw, io);
5028 		return -1;
5029 	}
5030 
5031 	return io->indicator;
5032 }
5033 
5034 typedef struct ocs_hw_fw_write_cb_arg {
5035 	ocs_hw_fw_cb_t cb;
5036 	void *arg;
5037 } ocs_hw_fw_write_cb_arg_t;
5038 
5039 typedef struct ocs_hw_sfp_cb_arg {
5040 	ocs_hw_sfp_cb_t cb;
5041 	void *arg;
5042 	ocs_dma_t payload;
5043 } ocs_hw_sfp_cb_arg_t;
5044 
5045 typedef struct ocs_hw_temp_cb_arg {
5046 	ocs_hw_temp_cb_t cb;
5047 	void *arg;
5048 } ocs_hw_temp_cb_arg_t;
5049 
5050 typedef struct ocs_hw_link_stat_cb_arg {
5051 	ocs_hw_link_stat_cb_t cb;
5052 	void *arg;
5053 } ocs_hw_link_stat_cb_arg_t;
5054 
5055 typedef struct ocs_hw_host_stat_cb_arg {
5056 	ocs_hw_host_stat_cb_t cb;
5057 	void *arg;
5058 } ocs_hw_host_stat_cb_arg_t;
5059 
5060 typedef struct ocs_hw_dump_get_cb_arg {
5061 	ocs_hw_dump_get_cb_t cb;
5062 	void *arg;
5063 	void *mbox_cmd;
5064 } ocs_hw_dump_get_cb_arg_t;
5065 
5066 typedef struct ocs_hw_dump_clear_cb_arg {
5067 	ocs_hw_dump_clear_cb_t cb;
5068 	void *arg;
5069 	void *mbox_cmd;
5070 } ocs_hw_dump_clear_cb_arg_t;
5071 
5072 /**
5073  * @brief Write a portion of a firmware image to the device.
5074  *
5075  * @par Description
5076  * Calls the correct firmware write function based on the device type.
5077  *
5078  * @param hw Hardware context.
5079  * @param dma DMA structure containing the firmware image chunk.
5080  * @param size Size of the firmware image chunk.
5081  * @param offset Offset, in bytes, from the beginning of the firmware image.
5082  * @param last True if this is the last chunk of the image.
5083  * Causes the image to be committed to flash.
5084  * @param cb Pointer to a callback function that is called when the command completes.
5085  * The callback function prototype is
5086  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5087  * @param arg Pointer to be passed to the callback function.
5088  *
5089  * @return Returns 0 on success, or a non-zero value on failure.
5090  */
5091 ocs_hw_rtn_e
ocs_hw_firmware_write(ocs_hw_t * hw,ocs_dma_t * dma,uint32_t size,uint32_t offset,int last,ocs_hw_fw_cb_t cb,void * arg)5092 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5093 {
5094 	if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5095 		return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5096 	} else {
5097 		/* Write firmware_write for BE3/Skyhawk not supported */
5098 		return -1;
5099 	}
5100 }
5101 
5102 /**
5103  * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5104  *
5105  * @par Description
5106  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5107  * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5108  * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5109  * and to signal the caller that the write has completed.
5110  *
5111  * @param hw Hardware context.
5112  * @param dma DMA structure containing the firmware image chunk.
5113  * @param size Size of the firmware image chunk.
5114  * @param offset Offset, in bytes, from the beginning of the firmware image.
5115  * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5116  * @param cb Pointer to a callback function that is called when the command completes.
5117  * The callback function prototype is
5118  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5119  * @param arg Pointer to be passed to the callback function.
5120  *
5121  * @return Returns 0 on success, or a non-zero value on failure.
5122  */
5123 ocs_hw_rtn_e
ocs_hw_firmware_write_lancer(ocs_hw_t * hw,ocs_dma_t * dma,uint32_t size,uint32_t offset,int last,ocs_hw_fw_cb_t cb,void * arg)5124 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5125 {
5126 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5127 	uint8_t *mbxdata;
5128 	ocs_hw_fw_write_cb_arg_t *cb_arg;
5129 	int noc=0;	/* No Commit bit - set to 1 for testing */
5130 
5131 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5132 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5133 		return OCS_HW_RTN_ERROR;
5134 	}
5135 
5136 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5137 	if (mbxdata == NULL) {
5138 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5139 		return OCS_HW_RTN_NO_MEMORY;
5140 	}
5141 
5142 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5143 	if (cb_arg == NULL) {
5144 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5145 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5146 		return OCS_HW_RTN_NO_MEMORY;
5147 	}
5148 
5149 	cb_arg->cb = cb;
5150 	cb_arg->arg = arg;
5151 
5152 	if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5153 			size, offset, "/prg/", dma)) {
5154 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5155 	}
5156 
5157 	if (rc != OCS_HW_RTN_SUCCESS) {
5158 		ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5159 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5160 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5161 	}
5162 
5163 	return rc;
5164 
5165 }
5166 
5167 /**
5168  * @brief Called when the WRITE OBJECT command completes.
5169  *
5170  * @par Description
5171  * Get the number of bytes actually written out of the response, free the mailbox
5172  * that was malloc'd by ocs_hw_firmware_write(),
5173  * then call the callback and pass the status and bytes written.
5174  *
5175  * @param hw Hardware context.
5176  * @param status Status field from the mbox completion.
5177  * @param mqe Mailbox response structure.
5178  * @param arg Pointer to a callback function that signals the caller that the command is done.
5179  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5180  *
5181  * @return Returns 0.
5182  */
5183 static int32_t
ocs_hw_cb_fw_write(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)5184 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5185 {
5186 
5187 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5188 	sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5189 	ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5190 	uint32_t bytes_written;
5191 	uint16_t mbox_status;
5192 	uint32_t change_status;
5193 
5194 	bytes_written = wr_obj_rsp->actual_write_length;
5195 	mbox_status = mbox_rsp->hdr.status;
5196 	change_status = wr_obj_rsp->change_status;
5197 
5198 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5199 
5200 	if (cb_arg) {
5201 		if (cb_arg->cb) {
5202 			if ((status == 0) && mbox_status) {
5203 				status = mbox_status;
5204 			}
5205 			cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5206 		}
5207 
5208 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5209 	}
5210 
5211 	return 0;
5212 
5213 }
5214 
5215 /**
5216  * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5217  *
5218  * @par Description
5219  * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5220  * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5221  *
5222  * @param hw Hardware context.
5223  * @param status Status field from the mbox completion.
5224  * @param mqe Mailbox response structure.
5225  * @param arg Pointer to a callback function that signals the caller that the command is done.
5226  * The callback function prototype is
5227  * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5228  *
5229  * @return Returns 0.
5230  */
5231 static int32_t
ocs_hw_cb_sfp(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)5232 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5233 {
5234 
5235 	ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5236 	ocs_dma_t *payload = NULL;
5237 	sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5238 	uint32_t bytes_written;
5239 
5240 	if (cb_arg) {
5241 		payload = &(cb_arg->payload);
5242 		if (cb_arg->cb) {
5243 			mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5244 			bytes_written = mbox_rsp->hdr.response_length;
5245 			if ((status == 0) && mbox_rsp->hdr.status) {
5246 				status = mbox_rsp->hdr.status;
5247 			}
5248 			cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5249 		}
5250 
5251 		ocs_dma_free(hw->os, &cb_arg->payload);
5252 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5253 	}
5254 
5255 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5256 	return 0;
5257 }
5258 
5259 /**
5260  * @ingroup io
5261  * @brief Function to retrieve the SFP information.
5262  *
5263  * @param hw Hardware context.
5264  * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5265  * @param cb Function call upon completion of sending the data (may be NULL).
5266  * @param arg Argument to pass to IO completion function.
5267  *
5268  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5269  */
5270 ocs_hw_rtn_e
ocs_hw_get_sfp(ocs_hw_t * hw,uint16_t page,ocs_hw_sfp_cb_t cb,void * arg)5271 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5272 {
5273 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5274 	ocs_hw_sfp_cb_arg_t *cb_arg;
5275 	uint8_t *mbxdata;
5276 
5277 	/* mbxdata holds the header of the command */
5278 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5279 	if (mbxdata == NULL) {
5280 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5281 		return OCS_HW_RTN_NO_MEMORY;
5282 	}
5283 
5284 	/* cb_arg holds the data that will be passed to the callback on completion */
5285 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5286 	if (cb_arg == NULL) {
5287 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5288 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5289 		return OCS_HW_RTN_NO_MEMORY;
5290 	}
5291 
5292 	cb_arg->cb = cb;
5293 	cb_arg->arg = arg;
5294 
5295 	/* payload holds the non-embedded portion */
5296 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5297 			  OCS_MIN_DMA_ALIGNMENT)) {
5298 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5299 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5300 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5301 		return OCS_HW_RTN_NO_MEMORY;
5302 	}
5303 
5304 	/* Send the HW command */
5305 	if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5306 	    &cb_arg->payload)) {
5307 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5308 	}
5309 
5310 	if (rc != OCS_HW_RTN_SUCCESS) {
5311 		ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5312 				rc);
5313 		ocs_dma_free(hw->os, &cb_arg->payload);
5314 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5315 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5316 	}
5317 
5318 	return rc;
5319 }
5320 
5321 /**
5322  * @brief Function to retrieve the temperature information.
5323  *
5324  * @param hw Hardware context.
5325  * @param cb Function call upon completion of sending the data (may be NULL).
5326  * @param arg Argument to pass to IO completion function.
5327  *
5328  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5329  */
5330 ocs_hw_rtn_e
ocs_hw_get_temperature(ocs_hw_t * hw,ocs_hw_temp_cb_t cb,void * arg)5331 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5332 {
5333 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5334 	ocs_hw_temp_cb_arg_t *cb_arg;
5335 	uint8_t *mbxdata;
5336 
5337 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5338 	if (mbxdata == NULL) {
5339 		ocs_log_err(hw->os, "failed to malloc mbox");
5340 		return OCS_HW_RTN_NO_MEMORY;
5341 	}
5342 
5343 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5344 	if (cb_arg == NULL) {
5345 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5346 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5347 		return OCS_HW_RTN_NO_MEMORY;
5348 	}
5349 
5350 	cb_arg->cb = cb;
5351 	cb_arg->arg = arg;
5352 
5353 	if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5354 				SLI4_WKI_TAG_SAT_TEM)) {
5355 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5356 	}
5357 
5358 	if (rc != OCS_HW_RTN_SUCCESS) {
5359 		ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5360 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5361 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5362 	}
5363 
5364 	return rc;
5365 }
5366 
5367 /**
5368  * @brief Called when the DUMP command completes.
5369  *
5370  * @par Description
5371  * Get the temperature data out of the response, free the mailbox that was malloc'd
5372  * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5373  *
5374  * @param hw Hardware context.
5375  * @param status Status field from the mbox completion.
5376  * @param mqe Mailbox response structure.
5377  * @param arg Pointer to a callback function that signals the caller that the command is done.
5378  * The callback function prototype is defined by ocs_hw_temp_cb_t.
5379  *
5380  * @return Returns 0.
5381  */
5382 static int32_t
ocs_hw_cb_temp(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)5383 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5384 {
5385 
5386 	sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5387 	ocs_hw_temp_cb_arg_t *cb_arg = arg;
5388 	uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5389 	uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5390 	uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5391 	uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5392 	uint32_t fan_off_thrshld = mbox_rsp->resp_data[4];   /* word 9 */
5393 	uint32_t fan_on_thrshld = mbox_rsp->resp_data[5];    /* word 10 */
5394 
5395 	if (cb_arg) {
5396 		if (cb_arg->cb) {
5397 			if ((status == 0) && mbox_rsp->hdr.status) {
5398 				status = mbox_rsp->hdr.status;
5399 			}
5400 			cb_arg->cb(status,
5401 				   curr_temp,
5402 				   crit_temp_thrshld,
5403 				   warn_temp_thrshld,
5404 				   norm_temp_thrshld,
5405 				   fan_off_thrshld,
5406 				   fan_on_thrshld,
5407 				   cb_arg->arg);
5408 		}
5409 
5410 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5411 	}
5412 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5413 
5414 	return 0;
5415 }
5416 
5417 /**
5418  * @brief Function to retrieve the link statistics.
5419  *
5420  * @param hw Hardware context.
5421  * @param req_ext_counters If TRUE, then the extended counters will be requested.
5422  * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5423  * @param clear_all_counters If TRUE, the counters will be cleared.
5424  * @param cb Function call upon completion of sending the data (may be NULL).
5425  * @param arg Argument to pass to IO completion function.
5426  *
5427  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5428  */
5429 ocs_hw_rtn_e
ocs_hw_get_link_stats(ocs_hw_t * hw,uint8_t req_ext_counters,uint8_t clear_overflow_flags,uint8_t clear_all_counters,ocs_hw_link_stat_cb_t cb,void * arg)5430 ocs_hw_get_link_stats(ocs_hw_t *hw,
5431 			uint8_t req_ext_counters,
5432 			uint8_t clear_overflow_flags,
5433 			uint8_t clear_all_counters,
5434 			ocs_hw_link_stat_cb_t cb,
5435 			void *arg)
5436 {
5437 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5438 	ocs_hw_link_stat_cb_arg_t *cb_arg;
5439 	uint8_t *mbxdata;
5440 
5441 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5442 	if (mbxdata == NULL) {
5443 		ocs_log_err(hw->os, "failed to malloc mbox");
5444 		return OCS_HW_RTN_NO_MEMORY;
5445 	}
5446 
5447 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5448 	if (cb_arg == NULL) {
5449 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5450 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5451 		return OCS_HW_RTN_NO_MEMORY;
5452 	}
5453 
5454 	cb_arg->cb = cb;
5455 	cb_arg->arg = arg;
5456 
5457 	if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5458 				    req_ext_counters,
5459 				    clear_overflow_flags,
5460 				    clear_all_counters)) {
5461 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5462 	}
5463 
5464 	if (rc != OCS_HW_RTN_SUCCESS) {
5465 		ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5466 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5467 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5468 	}
5469 
5470 	return rc;
5471 }
5472 
5473 /**
5474  * @brief Called when the READ_LINK_STAT command completes.
5475  *
5476  * @par Description
5477  * Get the counters out of the response, free the mailbox that was malloc'd
5478  * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5479  *
5480  * @param hw Hardware context.
5481  * @param status Status field from the mbox completion.
5482  * @param mqe Mailbox response structure.
5483  * @param arg Pointer to a callback function that signals the caller that the command is done.
5484  * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5485  *
5486  * @return Returns 0.
5487  */
5488 static int32_t
ocs_hw_cb_link_stat(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)5489 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5490 {
5491 
5492 	sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5493 	ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5494 	ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5495 	uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5496 
5497 	ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5498 		   OCS_HW_LINK_STAT_MAX);
5499 
5500 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5501 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5502 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5503 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5504 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5505 	counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5506 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5507 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5508 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5509 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5510 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5511 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5512 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5513 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5514 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5515 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5516 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5517 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5518 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5519 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5520 
5521 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5522 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5523 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5524 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5525 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5526 	counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5527 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5528 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5529 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5530 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5531 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5532 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5533 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5534 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5535 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5536 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5537 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5538 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5539 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5540 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5541 
5542 	if (cb_arg) {
5543 		if (cb_arg->cb) {
5544 			if ((status == 0) && mbox_rsp->hdr.status) {
5545 				status = mbox_rsp->hdr.status;
5546 			}
5547 			cb_arg->cb(status,
5548 				   num_counters,
5549 				   counts,
5550 				   cb_arg->arg);
5551 		}
5552 
5553 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5554 	}
5555 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5556 
5557 	return 0;
5558 }
5559 
5560 /**
5561  * @brief Function to retrieve the link and host statistics.
5562  *
5563  * @param hw Hardware context.
5564  * @param cc clear counters, if TRUE all counters will be cleared.
5565  * @param cb Function call upon completion of receiving the data.
5566  * @param arg Argument to pass to pointer fc hosts statistics structure.
5567  *
5568  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5569  */
5570 ocs_hw_rtn_e
ocs_hw_get_host_stats(ocs_hw_t * hw,uint8_t cc,ocs_hw_host_stat_cb_t cb,void * arg)5571 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5572 {
5573 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5574 	ocs_hw_host_stat_cb_arg_t *cb_arg;
5575 	uint8_t *mbxdata;
5576 
5577 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5578 	if (mbxdata == NULL) {
5579 		ocs_log_err(hw->os, "failed to malloc mbox");
5580 		return OCS_HW_RTN_NO_MEMORY;
5581 	}
5582 
5583 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5584 	if (cb_arg == NULL) {
5585 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5586 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5587 		return OCS_HW_RTN_NO_MEMORY;
5588 	 }
5589 
5590 	 cb_arg->cb = cb;
5591 	 cb_arg->arg = arg;
5592 
5593 	 /* Send the HW command to get the host stats */
5594 	if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5595 		 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5596 	}
5597 
5598 	if (rc != OCS_HW_RTN_SUCCESS) {
5599 		ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5600 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5601 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5602 	}
5603 
5604 	return rc;
5605 }
5606 
5607 /**
5608  * @brief Called when the READ_STATUS command completes.
5609  *
5610  * @par Description
5611  * Get the counters out of the response, free the mailbox that was malloc'd
5612  * by ocs_hw_get_host_stats(), then call the callback and pass
5613  * the status and data.
5614  *
5615  * @param hw Hardware context.
5616  * @param status Status field from the mbox completion.
5617  * @param mqe Mailbox response structure.
5618  * @param arg Pointer to a callback function that signals the caller that the command is done.
5619  * The callback function prototype is defined by
5620  * ocs_hw_host_stat_cb_t.
5621  *
5622  * @return Returns 0.
5623  */
5624 static int32_t
ocs_hw_cb_host_stat(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)5625 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5626 {
5627 
5628 	sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5629 	ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5630 	ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5631 	uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5632 
5633 	ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5634 		   OCS_HW_HOST_STAT_MAX);
5635 
5636 	counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5637 	counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5638 	counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5639 	counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5640 	counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5641 	counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5642 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5643 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5644 	counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5645 	counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5646 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5647 	counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5648 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5649 	counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5650 
5651 	if (cb_arg) {
5652 		if (cb_arg->cb) {
5653 			if ((status == 0) && mbox_rsp->hdr.status) {
5654 				status = mbox_rsp->hdr.status;
5655 			}
5656 			cb_arg->cb(status,
5657 				   num_counters,
5658 				   counts,
5659 				   cb_arg->arg);
5660 		}
5661 
5662 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5663 	}
5664 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5665 
5666 	return 0;
5667 }
5668 
5669 /**
5670  * @brief HW link configuration enum to the CLP string value mapping.
5671  *
5672  * This structure provides a mapping from the ocs_hw_linkcfg_e
5673  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5674  * control) to the CLP string that is used
5675  * in the DMTF_CLP_CMD mailbox command.
5676  */
5677 typedef struct ocs_hw_linkcfg_map_s {
5678 	ocs_hw_linkcfg_e linkcfg;
5679 	const char *clp_str;
5680 } ocs_hw_linkcfg_map_t;
5681 
5682 /**
5683  * @brief Mapping from the HW linkcfg enum to the CLP command value
5684  * string.
5685  */
5686 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5687 	{OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5688 	{OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5689 	{OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5690 	{OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5691 	{OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5692 	{OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5693 	{OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5694 
5695 /**
5696  * @brief HW link configuration enum to Skyhawk link config ID mapping.
5697  *
5698  * This structure provides a mapping from the ocs_hw_linkcfg_e
5699  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5700  * control) to the link config ID numbers used by Skyhawk
5701  */
5702 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5703 	ocs_hw_linkcfg_e linkcfg;
5704 	uint32_t	config_id;
5705 } ocs_hw_skyhawk_linkcfg_map_t;
5706 
5707 /**
5708  * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5709  */
5710 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5711 	{OCS_HW_LINKCFG_4X10G, 0x0a},
5712 	{OCS_HW_LINKCFG_1X40G, 0x09},
5713 };
5714 
5715 /**
5716  * @brief Helper function for getting the HW linkcfg enum from the CLP
5717  * string value
5718  *
5719  * @param clp_str CLP string value from OEMELX_LinkConfig.
5720  *
5721  * @return Returns the HW linkcfg enum corresponding to clp_str.
5722  */
5723 static ocs_hw_linkcfg_e
ocs_hw_linkcfg_from_clp(const char * clp_str)5724 ocs_hw_linkcfg_from_clp(const char *clp_str)
5725 {
5726 	uint32_t i;
5727 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5728 		if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5729 			return linkcfg_map[i].linkcfg;
5730 		}
5731 	}
5732 	return OCS_HW_LINKCFG_NA;
5733 }
5734 
5735 /**
5736  * @brief Helper function for getting the CLP string value from the HW
5737  * linkcfg enum.
5738  *
5739  * @param linkcfg HW linkcfg enum.
5740  *
5741  * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5742  * given linkcfg.
5743  */
5744 static const char *
ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)5745 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5746 {
5747 	uint32_t i;
5748 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5749 		if (linkcfg_map[i].linkcfg == linkcfg) {
5750 			return linkcfg_map[i].clp_str;
5751 		}
5752 	}
5753 	return NULL;
5754 }
5755 
5756 /**
5757  * @brief Helper function for getting a Skyhawk link config ID from the HW
5758  * linkcfg enum.
5759  *
5760  * @param linkcfg HW linkcfg enum.
5761  *
5762  * @return Returns the Skyhawk link config ID corresponding to
5763  * given linkcfg.
5764  */
5765 static uint32_t
ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)5766 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5767 {
5768 	uint32_t i;
5769 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5770 		if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5771 			return skyhawk_linkcfg_map[i].config_id;
5772 		}
5773 	}
5774 	return 0;
5775 }
5776 
5777 /**
5778  * @brief Helper function for getting the HW linkcfg enum from a
5779  * Skyhawk config ID.
5780  *
5781  * @param config_id Skyhawk link config ID.
5782  *
5783  * @return Returns the HW linkcfg enum corresponding to config_id.
5784  */
5785 static ocs_hw_linkcfg_e
ocs_hw_linkcfg_from_config_id(const uint32_t config_id)5786 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5787 {
5788 	uint32_t i;
5789 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5790 		if (skyhawk_linkcfg_map[i].config_id == config_id) {
5791 			return skyhawk_linkcfg_map[i].linkcfg;
5792 		}
5793 	}
5794 	return OCS_HW_LINKCFG_NA;
5795 }
5796 
5797 /**
5798  * @brief Link configuration callback argument.
5799  */
5800 typedef struct ocs_hw_linkcfg_cb_arg_s {
5801 	ocs_hw_port_control_cb_t cb;
5802 	void *arg;
5803 	uint32_t opts;
5804 	int32_t status;
5805 	ocs_dma_t dma_cmd;
5806 	ocs_dma_t dma_resp;
5807 	uint32_t result_len;
5808 } ocs_hw_linkcfg_cb_arg_t;
5809 
5810 /**
5811  * @brief Set link configuration.
5812  *
5813  * @param hw Hardware context.
5814  * @param value Link configuration enum to which the link configuration is
5815  * set.
5816  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5817  * @param cb Callback function to invoke following mbx command.
5818  * @param arg Callback argument.
5819  *
5820  * @return Returns OCS_HW_RTN_SUCCESS on success.
5821  */
5822 static ocs_hw_rtn_e
ocs_hw_set_linkcfg(ocs_hw_t * hw,ocs_hw_linkcfg_e value,uint32_t opts,ocs_hw_port_control_cb_t cb,void * arg)5823 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5824 {
5825 	if (!sli_link_is_configurable(&hw->sli)) {
5826 		ocs_log_debug(hw->os, "Function not supported\n");
5827 		return OCS_HW_RTN_ERROR;
5828 	}
5829 
5830 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5831 		return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5832 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5833 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5834 		return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5835 	} else {
5836 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5837 		return OCS_HW_RTN_ERROR;
5838 	}
5839 }
5840 
5841 /**
5842  * @brief Set link configuration for Lancer
5843  *
5844  * @param hw Hardware context.
5845  * @param value Link configuration enum to which the link configuration is
5846  * set.
5847  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5848  * @param cb Callback function to invoke following mbx command.
5849  * @param arg Callback argument.
5850  *
5851  * @return Returns OCS_HW_RTN_SUCCESS on success.
5852  */
5853 static ocs_hw_rtn_e
ocs_hw_set_linkcfg_lancer(ocs_hw_t * hw,ocs_hw_linkcfg_e value,uint32_t opts,ocs_hw_port_control_cb_t cb,void * arg)5854 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5855 {
5856 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5857 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5858 	const char *value_str = NULL;
5859 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5860 
5861 	/* translate ocs_hw_linkcfg_e to CLP string */
5862 	value_str = ocs_hw_clp_from_linkcfg(value);
5863 
5864 	/* allocate memory for callback argument */
5865 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5866 	if (cb_arg == NULL) {
5867 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5868 		return OCS_HW_RTN_NO_MEMORY;
5869 	}
5870 
5871 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5872 	/* allocate DMA for command  */
5873 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5874 		ocs_log_err(hw->os, "malloc failed\n");
5875 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5876 		return OCS_HW_RTN_NO_MEMORY;
5877 	}
5878 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5879 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5880 
5881 	/* allocate DMA for response */
5882 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5883 		ocs_log_err(hw->os, "malloc failed\n");
5884 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5885 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5886 		return OCS_HW_RTN_NO_MEMORY;
5887 	}
5888 	cb_arg->cb = cb;
5889 	cb_arg->arg = arg;
5890 	cb_arg->opts = opts;
5891 
5892 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5893 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5894 
5895 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5896 		/* if failed, or polling, free memory here; if success and not
5897 		 * polling, will free in callback function
5898 		 */
5899 		if (rc) {
5900 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5901 					(char *)cb_arg->dma_cmd.virt);
5902 		}
5903 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5904 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
5905 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5906 	}
5907 	return rc;
5908 }
5909 
5910 /**
5911  * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5912  *
5913  * @param hw Hardware context.
5914  * @param status Status from the RECONFIG_GET_LINK_INFO command.
5915  * @param mqe Mailbox response structure.
5916  * @param arg Pointer to a callback argument.
5917  *
5918  * @return none
5919  */
5920 static void
ocs_hw_set_active_link_config_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)5921 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5922 {
5923 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5924 
5925 	if (status) {
5926 		ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5927 	}
5928 
5929 	/* invoke callback */
5930 	if (cb_arg->cb) {
5931 		cb_arg->cb(status, 0, cb_arg->arg);
5932 	}
5933 
5934 	/* if polling, will free memory in calling function */
5935 	if (cb_arg->opts != OCS_CMD_POLL) {
5936 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5937 	}
5938 }
5939 
5940 /**
5941  * @brief Set link configuration for a Skyhawk
5942  *
5943  * @param hw Hardware context.
5944  * @param value Link configuration enum to which the link configuration is
5945  * set.
5946  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5947  * @param cb Callback function to invoke following mbx command.
5948  * @param arg Callback argument.
5949  *
5950  * @return Returns OCS_HW_RTN_SUCCESS on success.
5951  */
5952 static ocs_hw_rtn_e
ocs_hw_set_linkcfg_skyhawk(ocs_hw_t * hw,ocs_hw_linkcfg_e value,uint32_t opts,ocs_hw_port_control_cb_t cb,void * arg)5953 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5954 {
5955 	uint8_t *mbxdata;
5956 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5957 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5958 	uint32_t config_id;
5959 
5960 	config_id = ocs_hw_config_id_from_linkcfg(value);
5961 
5962 	if (config_id == 0) {
5963 		ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
5964 		return OCS_HW_RTN_ERROR;
5965 	}
5966 
5967 	/* mbxdata holds the header of the command */
5968 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5969 	if (mbxdata == NULL) {
5970 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5971 		return OCS_HW_RTN_NO_MEMORY;
5972 	}
5973 
5974 	/* cb_arg holds the data that will be passed to the callback on completion */
5975 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
5976 	if (cb_arg == NULL) {
5977 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5978 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5979 		return OCS_HW_RTN_NO_MEMORY;
5980 	}
5981 
5982 	cb_arg->cb = cb;
5983 	cb_arg->arg = arg;
5984 
5985 	if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
5986 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
5987 	}
5988 
5989 	if (rc != OCS_HW_RTN_SUCCESS) {
5990 		ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
5991 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5992 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5993 	} else if (opts == OCS_CMD_POLL) {
5994 		/* if we're polling we have to call the callback here. */
5995 		ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
5996 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5997 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5998 	} else {
5999 		/* We weren't poling, so the callback got called */
6000 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6001 	}
6002 
6003 	return rc;
6004 }
6005 
6006 /**
6007  * @brief Get link configuration.
6008  *
6009  * @param hw Hardware context.
6010  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6011  * @param cb Callback function to invoke following mbx command.
6012  * @param arg Callback argument.
6013  *
6014  * @return Returns OCS_HW_RTN_SUCCESS on success.
6015  */
6016 static ocs_hw_rtn_e
ocs_hw_get_linkcfg(ocs_hw_t * hw,uint32_t opts,ocs_hw_port_control_cb_t cb,void * arg)6017 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6018 {
6019 	if (!sli_link_is_configurable(&hw->sli)) {
6020 		ocs_log_debug(hw->os, "Function not supported\n");
6021 		return OCS_HW_RTN_ERROR;
6022 	}
6023 
6024 	if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) ||
6025 	    (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))){
6026 		return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
6027 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
6028 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
6029 		return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
6030 	} else {
6031 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
6032 		return OCS_HW_RTN_ERROR;
6033 	}
6034 }
6035 
6036 /**
6037  * @brief Get link configuration for a Lancer
6038  *
6039  * @param hw Hardware context.
6040  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6041  * @param cb Callback function to invoke following mbx command.
6042  * @param arg Callback argument.
6043  *
6044  * @return Returns OCS_HW_RTN_SUCCESS on success.
6045  */
6046 static ocs_hw_rtn_e
ocs_hw_get_linkcfg_lancer(ocs_hw_t * hw,uint32_t opts,ocs_hw_port_control_cb_t cb,void * arg)6047 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6048 {
6049 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6050 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6051 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6052 
6053 	/* allocate memory for callback argument */
6054 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6055 	if (cb_arg == NULL) {
6056 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6057 		return OCS_HW_RTN_NO_MEMORY;
6058 	}
6059 
6060 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
6061 
6062 	/* allocate DMA for command  */
6063 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6064 		ocs_log_err(hw->os, "malloc failed\n");
6065 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6066 		return OCS_HW_RTN_NO_MEMORY;
6067 	}
6068 
6069 	/* copy CLP command to DMA command */
6070 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6071 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
6072 
6073 	/* allocate DMA for response */
6074 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6075 		ocs_log_err(hw->os, "malloc failed\n");
6076 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6077 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6078 		return OCS_HW_RTN_NO_MEMORY;
6079 	}
6080 	cb_arg->cb = cb;
6081 	cb_arg->arg = arg;
6082 	cb_arg->opts = opts;
6083 
6084 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
6085 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
6086 
6087 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6088 		/* if failed or polling, free memory here; if not polling and success,
6089 		 * will free in callback function
6090 		 */
6091 		if (rc) {
6092 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
6093 					(char *)cb_arg->dma_cmd.virt);
6094 		}
6095 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6096 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6097 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6098 	}
6099 	return rc;
6100 }
6101 
6102 /**
6103  * @brief Get the link configuration callback.
6104  *
6105  * @param hw Hardware context.
6106  * @param status Status from the RECONFIG_GET_LINK_INFO command.
6107  * @param mqe Mailbox response structure.
6108  * @param arg Pointer to a callback argument.
6109  *
6110  * @return none
6111  */
6112 static void
ocs_hw_get_active_link_config_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)6113 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6114 {
6115 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6116 	sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
6117 	ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
6118 
6119 	if (status) {
6120 		ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
6121 	} else {
6122 		/* Call was successful */
6123 		value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
6124 	}
6125 
6126 	/* invoke callback */
6127 	if (cb_arg->cb) {
6128 		cb_arg->cb(status, value, cb_arg->arg);
6129 	}
6130 
6131 	/* if polling, will free memory in calling function */
6132 	if (cb_arg->opts != OCS_CMD_POLL) {
6133 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6134 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6135 	}
6136 }
6137 
6138 /**
6139  * @brief Get link configuration for a Skyhawk.
6140  *
6141  * @param hw Hardware context.
6142  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6143  * @param cb Callback function to invoke following mbx command.
6144  * @param arg Callback argument.
6145  *
6146  * @return Returns OCS_HW_RTN_SUCCESS on success.
6147  */
6148 static ocs_hw_rtn_e
ocs_hw_get_linkcfg_skyhawk(ocs_hw_t * hw,uint32_t opts,ocs_hw_port_control_cb_t cb,void * arg)6149 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6150 {
6151 	uint8_t *mbxdata;
6152 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6153 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6154 
6155 	/* mbxdata holds the header of the command */
6156 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6157 	if (mbxdata == NULL) {
6158 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6159 		return OCS_HW_RTN_NO_MEMORY;
6160 	}
6161 
6162 	/* cb_arg holds the data that will be passed to the callback on completion */
6163 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
6164 	if (cb_arg == NULL) {
6165 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6166 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6167 		return OCS_HW_RTN_NO_MEMORY;
6168 	}
6169 
6170 	cb_arg->cb = cb;
6171 	cb_arg->arg = arg;
6172 	cb_arg->opts = opts;
6173 
6174 	/* dma_mem holds the non-embedded portion */
6175 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
6176 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
6177 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6178 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6179 		return OCS_HW_RTN_NO_MEMORY;
6180 	}
6181 
6182 	if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
6183 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
6184 	}
6185 
6186 	if (rc != OCS_HW_RTN_SUCCESS) {
6187 		ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
6188 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6189 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6190 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6191 	} else if (opts == OCS_CMD_POLL) {
6192 		/* if we're polling we have to call the callback here. */
6193 		ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
6194 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6195 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6196 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6197 	} else {
6198 		/* We weren't poling, so the callback got called */
6199 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6200 	}
6201 
6202 	return rc;
6203 }
6204 
6205 /**
6206  * @brief Sets the DIF seed value.
6207  *
6208  * @param hw Hardware context.
6209  *
6210  * @return Returns OCS_HW_RTN_SUCCESS on success.
6211  */
6212 static ocs_hw_rtn_e
ocs_hw_set_dif_seed(ocs_hw_t * hw)6213 ocs_hw_set_dif_seed(ocs_hw_t *hw)
6214 {
6215 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6216 	uint8_t buf[SLI4_BMBX_SIZE];
6217 	sli4_req_common_set_features_dif_seed_t seed_param;
6218 
6219 	ocs_memset(&seed_param, 0, sizeof(seed_param));
6220 	seed_param.seed = hw->config.dif_seed;
6221 
6222 	/* send set_features command */
6223 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6224 					SLI4_SET_FEATURES_DIF_SEED,
6225 					4,
6226 					(uint32_t*)&seed_param)) {
6227 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6228 		if (rc) {
6229 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6230 		} else {
6231 			ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
6232 					hw->config.dif_seed);
6233 		}
6234 	} else {
6235 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6236 		rc = OCS_HW_RTN_ERROR;
6237 	}
6238 	return rc;
6239 }
6240 
6241 /**
6242  * @brief Sets the DIF mode value.
6243  *
6244  * @param hw Hardware context.
6245  *
6246  * @return Returns OCS_HW_RTN_SUCCESS on success.
6247  */
6248 static ocs_hw_rtn_e
ocs_hw_set_dif_mode(ocs_hw_t * hw)6249 ocs_hw_set_dif_mode(ocs_hw_t *hw)
6250 {
6251 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6252 	uint8_t buf[SLI4_BMBX_SIZE];
6253 	sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
6254 
6255 	ocs_memset(&mode_param, 0, sizeof(mode_param));
6256 	mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
6257 
6258 	/* send set_features command */
6259 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6260 					SLI4_SET_FEATURES_DIF_MEMORY_MODE,
6261 					sizeof(mode_param),
6262 					(uint32_t*)&mode_param)) {
6263 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6264 		if (rc) {
6265 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6266 		} else {
6267 			ocs_log_test(hw->os, "DIF mode set to %s\n",
6268 				(hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
6269 		}
6270 	} else {
6271 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6272 		rc = OCS_HW_RTN_ERROR;
6273 	}
6274 	return rc;
6275 }
6276 
6277 static void
ocs_hw_watchdog_timer_cb(void * arg)6278 ocs_hw_watchdog_timer_cb(void *arg)
6279 {
6280 	ocs_hw_t *hw = (ocs_hw_t *)arg;
6281 
6282 	ocs_hw_config_watchdog_timer(hw);
6283 	return;
6284 }
6285 
6286 static void
ocs_hw_cb_cfg_watchdog(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)6287 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6288 {
6289 	uint16_t timeout = hw->watchdog_timeout;
6290 
6291 	if (status != 0) {
6292 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
6293 	} else {
6294 		if(timeout != 0) {
6295 			/* keeping callback 500ms before timeout to keep heartbeat alive */
6296 			ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
6297 		}else {
6298 			ocs_del_timer(&hw->watchdog_timer);
6299 		}
6300 	}
6301 
6302 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6303 	return;
6304 }
6305 
6306 /**
6307  * @brief Set configuration parameters for watchdog timer feature.
6308  *
6309  * @param hw Hardware context.
6310  * @param timeout Timeout for watchdog timer in seconds
6311  *
6312  * @return Returns OCS_HW_RTN_SUCCESS on success.
6313  */
6314 static ocs_hw_rtn_e
ocs_hw_config_watchdog_timer(ocs_hw_t * hw)6315 ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
6316 {
6317 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6318 	uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
6319 
6320 	if (!buf) {
6321 		ocs_log_err(hw->os, "no buffer for command\n");
6322 		return OCS_HW_RTN_NO_MEMORY;
6323 	}
6324 
6325 	sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
6326 	rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
6327 	if (rc) {
6328 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
6329 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
6330 	}
6331 	return rc;
6332 }
6333 
6334 /**
6335  * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
6336  *
6337  * @param hw Hardware context.
6338  * @param buf Pointer to a mailbox buffer area.
6339  *
6340  * @return Returns OCS_HW_RTN_SUCCESS on success.
6341  */
6342 static ocs_hw_rtn_e
ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t * hw,uint8_t * buf)6343 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
6344 {
6345 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6346 	sli4_req_common_set_features_xfer_rdy_t10pi_t param;
6347 
6348 	ocs_memset(&param, 0, sizeof(param));
6349 	param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
6350 	param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
6351 	param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
6352 	param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
6353 	param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
6354 
6355 	switch (hw->config.auto_xfer_rdy_p_type) {
6356 	case 1:
6357 		param.p_type = 0;
6358 		break;
6359 	case 3:
6360 		param.p_type = 2;
6361 		break;
6362 	default:
6363 		ocs_log_err(hw->os, "unsupported p_type %d\n",
6364 			hw->config.auto_xfer_rdy_p_type);
6365 		return OCS_HW_RTN_ERROR;
6366 	}
6367 
6368 	/* build the set_features command */
6369 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6370 				    SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
6371 				    sizeof(param),
6372 				    &param);
6373 
6374 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6375 	if (rc) {
6376 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6377 	} else {
6378 		ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
6379 				param.rtc, param.atv, param.p_type,
6380 				param.app_tag, param.blk_size);
6381 	}
6382 
6383 	return rc;
6384 }
6385 
6386 /**
6387  * @brief enable sli port health check
6388  *
6389  * @param hw Hardware context.
6390  * @param buf Pointer to a mailbox buffer area.
6391  * @param query current status of the health check feature enabled/disabled
6392  * @param enable if 1: enable 0: disable
6393  * @param buf Pointer to a mailbox buffer area.
6394  *
6395  * @return Returns OCS_HW_RTN_SUCCESS on success.
6396  */
6397 static ocs_hw_rtn_e
ocs_hw_config_sli_port_health_check(ocs_hw_t * hw,uint8_t query,uint8_t enable)6398 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
6399 {
6400 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6401 	uint8_t buf[SLI4_BMBX_SIZE];
6402 	sli4_req_common_set_features_health_check_t param;
6403 
6404 	ocs_memset(&param, 0, sizeof(param));
6405 	param.hck = enable;
6406 	param.qry = query;
6407 
6408 	/* build the set_features command */
6409 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6410 				    SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
6411 				    sizeof(param),
6412 				    &param);
6413 
6414 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6415 	if (rc) {
6416 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6417 	} else {
6418 		ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
6419 	}
6420 
6421 	return rc;
6422 }
6423 
6424 /**
6425  * @brief Set FTD transfer hint feature
6426  *
6427  * @param hw Hardware context.
6428  * @param fdt_xfer_hint size in bytes where read requests are segmented.
6429  *
6430  * @return Returns OCS_HW_RTN_SUCCESS on success.
6431  */
6432 static ocs_hw_rtn_e
ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t * hw,uint32_t fdt_xfer_hint)6433 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
6434 {
6435 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6436 	uint8_t buf[SLI4_BMBX_SIZE];
6437 	sli4_req_common_set_features_set_fdt_xfer_hint_t param;
6438 
6439 	ocs_memset(&param, 0, sizeof(param));
6440 	param.fdt_xfer_hint = fdt_xfer_hint;
6441 	/* build the set_features command */
6442 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6443 				    SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
6444 				    sizeof(param),
6445 				    &param);
6446 
6447 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6448 	if (rc) {
6449 		ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
6450 	} else {
6451 		ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
6452 	}
6453 
6454 	return rc;
6455 }
6456 
6457 /**
6458  * @brief Get the link configuration callback.
6459  *
6460  * @param hw Hardware context.
6461  * @param status Status from the DMTF CLP command.
6462  * @param result_len Length, in bytes, of the DMTF CLP result.
6463  * @param arg Pointer to a callback argument.
6464  *
6465  * @return Returns OCS_HW_RTN_SUCCESS on success.
6466  */
6467 static void
ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t * hw,int32_t status,uint32_t result_len,void * arg)6468 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
6469 {
6470 	int32_t rval;
6471 	char retdata_str[64];
6472 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6473 	ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
6474 
6475 	if (status) {
6476 		ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
6477 	} else {
6478 		/* parse CLP response to get return data */
6479 		rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
6480 						  sizeof(retdata_str),
6481 						  cb_arg->dma_resp.virt,
6482 						  result_len);
6483 
6484 		if (rval <= 0) {
6485 			ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
6486 		} else {
6487 			/* translate string into hw enum */
6488 			linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
6489 		}
6490 	}
6491 
6492 	/* invoke callback */
6493 	if (cb_arg->cb) {
6494 		cb_arg->cb(status, linkcfg, cb_arg->arg);
6495 	}
6496 
6497 	/* if polling, will free memory in calling function */
6498 	if (cb_arg->opts != OCS_CMD_POLL) {
6499 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6500 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6501 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6502 	}
6503 }
6504 
6505 /**
6506  * @brief Set the Lancer dump location
6507  * @par Description
6508  * This function tells a Lancer chip to use a specific DMA
6509  * buffer as a dump location rather than the internal flash.
6510  *
6511  * @param hw Hardware context.
6512  * @param num_buffers The number of DMA buffers to hold the dump (1..n).
6513  * @param dump_buffers DMA buffers to hold the dump.
6514  *
6515  * @return Returns OCS_HW_RTN_SUCCESS on success.
6516  */
6517 ocs_hw_rtn_e
ocs_hw_set_dump_location(ocs_hw_t * hw,uint32_t num_buffers,ocs_dma_t * dump_buffers,uint8_t fdb)6518 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
6519 {
6520 	uint8_t bus, dev, func;
6521 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6522 	uint8_t	buf[SLI4_BMBX_SIZE];
6523 
6524 	/*
6525 	 * Make sure the FW is new enough to support this command. If the FW
6526 	 * is too old, the FW will UE.
6527 	 */
6528 	if (hw->workaround.disable_dump_loc) {
6529 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
6530 		return OCS_HW_RTN_ERROR;
6531 	}
6532 
6533 	/* This command is only valid for physical port 0 */
6534 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
6535 	if (fdb == 0 && func != 0) {
6536 		ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
6537 			     func);
6538 		return OCS_HW_RTN_ERROR;
6539 	}
6540 
6541 	/*
6542 	 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
6543 	 * We must allocate a SGL list and then pass the address of the list to the chip.
6544 	 */
6545 	if (num_buffers > 1) {
6546 		uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
6547 		sli4_sge_t *sge;
6548 		uint32_t i;
6549 
6550 		if (hw->dump_sges.size < sge_size) {
6551 			ocs_dma_free(hw->os, &hw->dump_sges);
6552 			if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
6553 				ocs_log_err(hw->os, "SGE DMA allocation failed\n");
6554 				return OCS_HW_RTN_NO_MEMORY;
6555 			}
6556 		}
6557 		/* build the SGE list */
6558 		ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
6559 		hw->dump_sges.len = sge_size;
6560 		sge = hw->dump_sges.virt;
6561 		for (i = 0; i < num_buffers; i++) {
6562 			sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
6563 			sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
6564 			sge[i].last = (i == num_buffers - 1 ? 1 : 0);
6565 			sge[i].buffer_length = dump_buffers[i].size;
6566 		}
6567 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6568 						      SLI4_BMBX_SIZE, FALSE, TRUE,
6569 						      &hw->dump_sges, fdb);
6570 	} else {
6571 		dump_buffers->len = dump_buffers->size;
6572 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6573 						      SLI4_BMBX_SIZE, FALSE, FALSE,
6574 						      dump_buffers, fdb);
6575 	}
6576 
6577 	if (rc) {
6578 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
6579 				     NULL, NULL);
6580 		if (rc) {
6581 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
6582 				rc);
6583 		}
6584 	} else {
6585 		ocs_log_err(hw->os,
6586 			"sli_cmd_common_set_dump_location failed\n");
6587 		rc = OCS_HW_RTN_ERROR;
6588 	}
6589 
6590 	return rc;
6591 }
6592 
6593 /**
6594  * @brief Set the Ethernet license.
6595  *
6596  * @par Description
6597  * This function sends the appropriate mailbox command (DMTF
6598  * CLP) to set the Ethernet license to the given license value.
6599  * Since it is used during the time of ocs_hw_init(), the mailbox
6600  * command is sent via polling (the BMBX route).
6601  *
6602  * @param hw Hardware context.
6603  * @param license 32-bit license value.
6604  *
6605  * @return Returns OCS_HW_RTN_SUCCESS on success.
6606  */
6607 static ocs_hw_rtn_e
ocs_hw_set_eth_license(ocs_hw_t * hw,uint32_t license)6608 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
6609 {
6610 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6611 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6612 	ocs_dma_t dma_cmd;
6613 	ocs_dma_t dma_resp;
6614 
6615 	/* only for lancer right now */
6616 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6617 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6618 		return OCS_HW_RTN_ERROR;
6619 	}
6620 
6621 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
6622 	/* allocate DMA for command  */
6623 	if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6624 		ocs_log_err(hw->os, "malloc failed\n");
6625 		return OCS_HW_RTN_NO_MEMORY;
6626 	}
6627 	ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6628 	ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
6629 
6630 	/* allocate DMA for response */
6631 	if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6632 		ocs_log_err(hw->os, "malloc failed\n");
6633 		ocs_dma_free(hw->os, &dma_cmd);
6634 		return OCS_HW_RTN_NO_MEMORY;
6635 	}
6636 
6637 	/* send DMTF CLP command mbx and poll */
6638 	if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
6639 		ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
6640 		rc = OCS_HW_RTN_ERROR;
6641 	}
6642 
6643 	ocs_dma_free(hw->os, &dma_cmd);
6644 	ocs_dma_free(hw->os, &dma_resp);
6645 	return rc;
6646 }
6647 
6648 /**
6649  * @brief Callback argument structure for the DMTF CLP commands.
6650  */
6651 typedef struct ocs_hw_clp_cb_arg_s {
6652 	ocs_hw_dmtf_clp_cb_t cb;
6653 	ocs_dma_t *dma_resp;
6654 	int32_t status;
6655 	uint32_t opts;
6656 	void *arg;
6657 } ocs_hw_clp_cb_arg_t;
6658 
6659 /**
6660  * @brief Execute the DMTF CLP command.
6661  *
6662  * @param hw Hardware context.
6663  * @param dma_cmd DMA buffer containing the CLP command.
6664  * @param dma_resp DMA buffer that will contain the response (if successful).
6665  * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
6666  * @param cb Callback function.
6667  * @param arg Callback argument.
6668  *
6669  * @return Returns the number of bytes written to the response
6670  * buffer on success, or a negative value if failed.
6671  */
6672 static ocs_hw_rtn_e
ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t * hw,ocs_dma_t * dma_cmd,ocs_dma_t * dma_resp,uint32_t opts,ocs_hw_dmtf_clp_cb_t cb,void * arg)6673 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
6674 {
6675 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6676 	ocs_hw_clp_cb_arg_t *cb_arg;
6677 	uint8_t *mbxdata;
6678 
6679 	/* allocate DMA for mailbox */
6680 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6681 	if (mbxdata == NULL) {
6682 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6683 		return OCS_HW_RTN_NO_MEMORY;
6684 	}
6685 
6686 	/* allocate memory for callback argument */
6687 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6688 	if (cb_arg == NULL) {
6689 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6690 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6691 		return OCS_HW_RTN_NO_MEMORY;
6692 	}
6693 
6694 	cb_arg->cb = cb;
6695 	cb_arg->arg = arg;
6696 	cb_arg->dma_resp = dma_resp;
6697 	cb_arg->opts = opts;
6698 
6699 	/* Send the HW command */
6700 	if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6701 				      dma_cmd, dma_resp)) {
6702 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
6703 
6704 		if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
6705 			/* if we're polling, copy response and invoke callback to
6706 			 * parse result */
6707 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6708 			ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
6709 
6710 			/* set rc to resulting or "parsed" status */
6711 			rc = cb_arg->status;
6712 		}
6713 
6714 		/* if failed, or polling, free memory here */
6715 		if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6716 			if (rc != OCS_HW_RTN_SUCCESS) {
6717 				ocs_log_test(hw->os, "ocs_hw_command failed\n");
6718 			}
6719 			ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6720 			ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6721 		}
6722 	} else {
6723 		ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
6724 		rc = OCS_HW_RTN_ERROR;
6725 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6726 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6727 	}
6728 
6729 	return rc;
6730 }
6731 
6732 /**
6733  * @brief Called when the DMTF CLP command completes.
6734  *
6735  * @param hw Hardware context.
6736  * @param status Status field from the mbox completion.
6737  * @param mqe Mailbox response structure.
6738  * @param arg Pointer to a callback argument.
6739  *
6740  * @return None.
6741  *
6742  */
6743 static void
ocs_hw_dmtf_clp_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)6744 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6745 {
6746 	int32_t cb_status = 0;
6747 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6748 	sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
6749 	ocs_hw_clp_cb_arg_t *cb_arg = arg;
6750 	uint32_t result_len = 0;
6751 	int32_t stat_len;
6752 	char stat_str[8];
6753 
6754 	/* there are several status codes here, check them all and condense
6755 	 * into a single callback status
6756 	 */
6757 	if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
6758 		ocs_log_debug(hw->os, "status=x%x/x%x/x%x  addl=x%x clp=x%x detail=x%x\n",
6759 			status,
6760 			mbox_rsp->hdr.status,
6761 			clp_rsp->hdr.status,
6762 			clp_rsp->hdr.additional_status,
6763 			clp_rsp->clp_status,
6764 			clp_rsp->clp_detailed_status);
6765 		if (status) {
6766 			cb_status = status;
6767 		} else if (mbox_rsp->hdr.status) {
6768 			cb_status = mbox_rsp->hdr.status;
6769 		} else {
6770 			cb_status = clp_rsp->clp_status;
6771 		}
6772 	} else {
6773 		result_len = clp_rsp->resp_length;
6774 	}
6775 
6776 	if (cb_status) {
6777 		goto ocs_hw_cb_dmtf_clp_done;
6778 	}
6779 
6780 	if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
6781 		ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
6782 			     cb_arg->dma_resp->size, result_len);
6783 		cb_status = -1;
6784 		goto ocs_hw_cb_dmtf_clp_done;
6785 	}
6786 
6787 	/* parse CLP response to get status */
6788 	stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
6789 					      sizeof(stat_str),
6790 					      cb_arg->dma_resp->virt,
6791 					      result_len);
6792 
6793 	if (stat_len <= 0) {
6794 		ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
6795 		cb_status = -1;
6796 		goto ocs_hw_cb_dmtf_clp_done;
6797 	}
6798 
6799 	if (ocs_strcmp(stat_str, "0") != 0) {
6800 		ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
6801 		cb_status = -1;
6802 		goto ocs_hw_cb_dmtf_clp_done;
6803 	}
6804 
6805 ocs_hw_cb_dmtf_clp_done:
6806 
6807 	/* save status in cb_arg for callers with NULL cb's + polling */
6808 	cb_arg->status = cb_status;
6809 	if (cb_arg->cb) {
6810 		cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
6811 	}
6812 	/* if polling, caller will free memory */
6813 	if (cb_arg->opts != OCS_CMD_POLL) {
6814 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6815 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6816 	}
6817 }
6818 
6819 /**
6820  * @brief Parse the CLP result and get the value corresponding to the given
6821  * keyword.
6822  *
6823  * @param hw Hardware context.
6824  * @param keyword CLP keyword for which the value is returned.
6825  * @param value Location to which the resulting value is copied.
6826  * @param value_len Length of the value parameter.
6827  * @param resp Pointer to the response buffer that is searched
6828  * for the keyword and value.
6829  * @param resp_len Length of response buffer passed in.
6830  *
6831  * @return Returns the number of bytes written to the value
6832  * buffer on success, or a negative vaue on failure.
6833  */
6834 static int32_t
ocs_hw_clp_resp_get_value(ocs_hw_t * hw,const char * keyword,char * value,uint32_t value_len,const char * resp,uint32_t resp_len)6835 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
6836 {
6837 	char *start = NULL;
6838 	char *end = NULL;
6839 
6840 	/* look for specified keyword in string */
6841 	start = ocs_strstr(resp, keyword);
6842 	if (start == NULL) {
6843 		ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
6844 			     keyword);
6845 		return -1;
6846 	}
6847 
6848 	/* now look for '=' and go one past */
6849 	start = ocs_strchr(start, '=');
6850 	if (start == NULL) {
6851 		ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
6852 			     keyword);
6853 		return -1;
6854 	}
6855 	start++;
6856 
6857 	/* \r\n terminates value */
6858 	end = ocs_strstr(start, "\r\n");
6859 	if (end == NULL) {
6860 		ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
6861 			     keyword);
6862 		return -1;
6863 	}
6864 
6865 	/* make sure given result array is big enough */
6866 	if ((end - start + 1) > value_len) {
6867 		ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
6868 			     value_len, (end-start));
6869 		return -1;
6870 	}
6871 
6872 	ocs_strncpy(value, start, (end - start));
6873 	value[end-start] = '\0';
6874 	return (end-start+1);
6875 }
6876 
6877 /**
6878  * @brief Cause chip to enter an unrecoverable error state.
6879  *
6880  * @par Description
6881  * Cause chip to enter an unrecoverable error state. This is
6882  * used when detecting unexpected FW behavior so that the FW can be
6883  * hwted from the driver as soon as the error is detected.
6884  *
6885  * @param hw Hardware context.
6886  * @param dump Generate dump as part of reset.
6887  *
6888  * @return Returns 0 on success, or a non-zero value on failure.
6889  *
6890  */
6891 ocs_hw_rtn_e
ocs_hw_raise_ue(ocs_hw_t * hw,uint8_t dump)6892 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
6893 {
6894 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6895 
6896 	if (sli_raise_ue(&hw->sli, dump) != 0) {
6897 		rc = OCS_HW_RTN_ERROR;
6898 	} else {
6899 		if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
6900 			hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
6901 		}
6902 	}
6903 
6904 	return rc;
6905 }
6906 
6907 /**
6908  * @brief Called when the OBJECT_GET command completes.
6909  *
6910  * @par Description
6911  * Get the number of bytes actually written out of the response, free the mailbox
6912  * that was malloc'd by ocs_hw_dump_get(), then call the callback
6913  * and pass the status and bytes read.
6914  *
6915  * @param hw Hardware context.
6916  * @param status Status field from the mbox completion.
6917  * @param mqe Mailbox response structure.
6918  * @param arg Pointer to a callback function that signals the caller that the command is done.
6919  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
6920  *
6921  * @return Returns 0.
6922  */
6923 static int32_t
ocs_hw_cb_dump_get(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)6924 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6925 {
6926 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6927 	sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
6928 	ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
6929 	uint32_t bytes_read;
6930 	uint8_t eof;
6931 
6932 	bytes_read = rd_obj_rsp->actual_read_length;
6933 	eof = rd_obj_rsp->eof;
6934 
6935 	if (cb_arg) {
6936 		if (cb_arg->cb) {
6937 			if ((status == 0) && mbox_rsp->hdr.status) {
6938 				status = mbox_rsp->hdr.status;
6939 			}
6940 			cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
6941 		}
6942 
6943 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
6944 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
6945 	}
6946 
6947 	return 0;
6948 }
6949 
6950 /**
6951  * @brief Read a dump image to the host.
6952  *
6953  * @par Description
6954  * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
6955  * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
6956  * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
6957  * and signal the caller that the read has completed.
6958  *
6959  * @param hw Hardware context.
6960  * @param dma DMA structure to transfer the dump chunk into.
6961  * @param size Size of the dump chunk.
6962  * @param offset Offset, in bytes, from the beginning of the dump.
6963  * @param cb Pointer to a callback function that is called when the command completes.
6964  * The callback function prototype is
6965  * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
6966  * @param arg Pointer to be passed to the callback function.
6967  *
6968  * @return Returns 0 on success, or a non-zero value on failure.
6969  */
6970 ocs_hw_rtn_e
ocs_hw_dump_get(ocs_hw_t * hw,ocs_dma_t * dma,uint32_t size,uint32_t offset,ocs_hw_dump_get_cb_t cb,void * arg)6971 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
6972 {
6973 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6974 	uint8_t *mbxdata;
6975 	ocs_hw_dump_get_cb_arg_t *cb_arg;
6976 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
6977 
6978 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6979 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6980 		return OCS_HW_RTN_ERROR;
6981 	}
6982 
6983 	if (1 != sli_dump_is_present(&hw->sli)) {
6984 		ocs_log_test(hw->os, "No dump is present\n");
6985 		return OCS_HW_RTN_ERROR;
6986 	}
6987 
6988 	if (1 == sli_reset_required(&hw->sli)) {
6989 		ocs_log_test(hw->os, "device reset required\n");
6990 		return OCS_HW_RTN_ERROR;
6991 	}
6992 
6993 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6994 	if (mbxdata == NULL) {
6995 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6996 		return OCS_HW_RTN_NO_MEMORY;
6997 	}
6998 
6999 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
7000 	if (cb_arg == NULL) {
7001 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7002 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7003 		return OCS_HW_RTN_NO_MEMORY;
7004 	}
7005 
7006 	cb_arg->cb = cb;
7007 	cb_arg->arg = arg;
7008 	cb_arg->mbox_cmd = mbxdata;
7009 
7010 	if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7011 			size, offset, "/dbg/dump.bin", dma)) {
7012 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
7013 		if (rc == 0 && opts == OCS_CMD_POLL) {
7014 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7015 			rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
7016 		}
7017 	}
7018 
7019 	if (rc != OCS_HW_RTN_SUCCESS) {
7020 		ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
7021 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7022 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
7023 	}
7024 
7025 	return rc;
7026 }
7027 
7028 /**
7029  * @brief Called when the OBJECT_DELETE command completes.
7030  *
7031  * @par Description
7032  * Free the mailbox that was malloc'd
7033  * by ocs_hw_dump_clear(), then call the callback and pass the status.
7034  *
7035  * @param hw Hardware context.
7036  * @param status Status field from the mbox completion.
7037  * @param mqe Mailbox response structure.
7038  * @param arg Pointer to a callback function that signals the caller that the command is done.
7039  * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
7040  *
7041  * @return Returns 0.
7042  */
7043 static int32_t
ocs_hw_cb_dump_clear(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7044 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
7045 {
7046 	ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
7047 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7048 
7049 	if (cb_arg) {
7050 		if (cb_arg->cb) {
7051 			if ((status == 0) && mbox_rsp->hdr.status) {
7052 				status = mbox_rsp->hdr.status;
7053 			}
7054 			cb_arg->cb(status, cb_arg->arg);
7055 		}
7056 
7057 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
7058 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7059 	}
7060 
7061 	return 0;
7062 }
7063 
7064 /**
7065  * @brief Clear a dump image from the device.
7066  *
7067  * @par Description
7068  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
7069  * the dump, then sends the command with ocs_hw_command(). On completion,
7070  * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
7071  * and to signal the caller that the write has completed.
7072  *
7073  * @param hw Hardware context.
7074  * @param cb Pointer to a callback function that is called when the command completes.
7075  * The callback function prototype is
7076  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
7077  * @param arg Pointer to be passed to the callback function.
7078  *
7079  * @return Returns 0 on success, or a non-zero value on failure.
7080  */
7081 ocs_hw_rtn_e
ocs_hw_dump_clear(ocs_hw_t * hw,ocs_hw_dump_clear_cb_t cb,void * arg)7082 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
7083 {
7084 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7085 	uint8_t *mbxdata;
7086 	ocs_hw_dump_clear_cb_arg_t *cb_arg;
7087 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
7088 
7089 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
7090 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
7091 		return OCS_HW_RTN_ERROR;
7092 	}
7093 
7094 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7095 	if (mbxdata == NULL) {
7096 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7097 		return OCS_HW_RTN_NO_MEMORY;
7098 	}
7099 
7100 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
7101 	if (cb_arg == NULL) {
7102 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7103 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7104 		return OCS_HW_RTN_NO_MEMORY;
7105 	}
7106 
7107 	cb_arg->cb = cb;
7108 	cb_arg->arg = arg;
7109 	cb_arg->mbox_cmd = mbxdata;
7110 
7111 	if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7112 			"/dbg/dump.bin")) {
7113 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
7114 		if (rc == 0 && opts == OCS_CMD_POLL) {
7115 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7116 			rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
7117 		}
7118 	}
7119 
7120 	if (rc != OCS_HW_RTN_SUCCESS) {
7121 		ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
7122 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7123 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7124 	}
7125 
7126 	return rc;
7127 }
7128 
7129 typedef struct ocs_hw_get_port_protocol_cb_arg_s {
7130 	ocs_get_port_protocol_cb_t cb;
7131 	void *arg;
7132 	uint32_t pci_func;
7133 	ocs_dma_t payload;
7134 } ocs_hw_get_port_protocol_cb_arg_t;
7135 
7136 /**
7137  * @brief Called for the completion of get_port_profile for a
7138  *        user request.
7139  *
7140  * @param hw Hardware context.
7141  * @param status The status from the MQE.
7142  * @param mqe Pointer to mailbox command buffer.
7143  * @param arg Pointer to a callback argument.
7144  *
7145  * @return Returns 0 on success, or a non-zero value on failure.
7146  */
7147 static int32_t
ocs_hw_get_port_protocol_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7148 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
7149 			    uint8_t *mqe, void *arg)
7150 {
7151 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
7152 	ocs_dma_t *payload = &(cb_arg->payload);
7153 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7154 	ocs_hw_port_protocol_e port_protocol;
7155 	int num_descriptors;
7156 	sli4_resource_descriptor_v1_t *desc_p;
7157 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7158 	int i;
7159 
7160 	port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7161 
7162 	num_descriptors = response->desc_count;
7163 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7164 	for (i=0; i<num_descriptors; i++) {
7165 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7166 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7167 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7168 				switch(pcie_desc_p->pf_type) {
7169 				case 0x02:
7170 					port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
7171 					break;
7172 				case 0x04:
7173 					port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
7174 					break;
7175 				case 0x10:
7176 					port_protocol = OCS_HW_PORT_PROTOCOL_FC;
7177 					break;
7178 				default:
7179 					port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7180 					break;
7181 				}
7182 			}
7183 		}
7184 
7185 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7186 	}
7187 
7188 	if (cb_arg->cb) {
7189 		cb_arg->cb(status, port_protocol, cb_arg->arg);
7190 	}
7191 
7192 	ocs_dma_free(hw->os, &cb_arg->payload);
7193 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7194 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7195 
7196 	return 0;
7197 }
7198 
7199 /**
7200  * @ingroup io
7201  * @brief  Get the current port protocol.
7202  * @par Description
7203  * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox.  When the
7204  * command completes the provided mgmt callback function is
7205  * called.
7206  *
7207  * @param hw Hardware context.
7208  * @param pci_func PCI function to query for current protocol.
7209  * @param cb Callback function to be called when the command completes.
7210  * @param ul_arg An argument that is passed to the callback function.
7211  *
7212  * @return
7213  * - OCS_HW_RTN_SUCCESS on success.
7214  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7215  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7216  *   context.
7217  * - OCS_HW_RTN_ERROR on any other error.
7218  */
7219 ocs_hw_rtn_e
ocs_hw_get_port_protocol(ocs_hw_t * hw,uint32_t pci_func,ocs_get_port_protocol_cb_t cb,void * ul_arg)7220 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
7221 	ocs_get_port_protocol_cb_t cb, void* ul_arg)
7222 {
7223 	uint8_t *mbxdata;
7224 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
7225 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7226 
7227 	/* Only supported on Skyhawk */
7228 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7229 		return OCS_HW_RTN_ERROR;
7230 	}
7231 
7232 	/* mbxdata holds the header of the command */
7233 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7234 	if (mbxdata == NULL) {
7235 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7236 		return OCS_HW_RTN_NO_MEMORY;
7237 	}
7238 
7239 	/* cb_arg holds the data that will be passed to the callback on completion */
7240 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7241 	if (cb_arg == NULL) {
7242 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7243 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7244 		return OCS_HW_RTN_NO_MEMORY;
7245 	}
7246 
7247 	cb_arg->cb = cb;
7248 	cb_arg->arg = ul_arg;
7249 	cb_arg->pci_func = pci_func;
7250 
7251 	/* dma_mem holds the non-embedded portion */
7252 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7253 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7254 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7255 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7256 		return OCS_HW_RTN_NO_MEMORY;
7257 	}
7258 
7259 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7260 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
7261 	}
7262 
7263 	if (rc != OCS_HW_RTN_SUCCESS) {
7264 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7265 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7266 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7267 		ocs_dma_free(hw->os, &cb_arg->payload);
7268 	}
7269 
7270 	return rc;
7271 
7272 }
7273 
7274 typedef struct ocs_hw_set_port_protocol_cb_arg_s {
7275 	ocs_set_port_protocol_cb_t cb;
7276 	void *arg;
7277 	ocs_dma_t payload;
7278 	uint32_t new_protocol;
7279 	uint32_t pci_func;
7280 } ocs_hw_set_port_protocol_cb_arg_t;
7281 
7282 /**
7283  * @brief Called for the completion of set_port_profile for a
7284  *        user request.
7285  *
7286  * @par Description
7287  * This is the second of two callbacks for the set_port_protocol
7288  * function. The set operation is a read-modify-write. This
7289  * callback is called when the write (SET_PROFILE_CONFIG)
7290  * completes.
7291  *
7292  * @param hw Hardware context.
7293  * @param status The status from the MQE.
7294  * @param mqe Pointer to mailbox command buffer.
7295  * @param arg Pointer to a callback argument.
7296  *
7297  * @return 0 on success, non-zero otherwise
7298  */
7299 static int32_t
ocs_hw_set_port_protocol_cb2(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7300 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7301 {
7302 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7303 
7304 	if (cb_arg->cb) {
7305 		cb_arg->cb( status, cb_arg->arg);
7306 	}
7307 
7308 	ocs_dma_free(hw->os, &(cb_arg->payload));
7309 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7310 	ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7311 
7312 	return 0;
7313 }
7314 
7315 /**
7316  * @brief Called for the completion of set_port_profile for a
7317  *        user request.
7318  *
7319  * @par Description
7320  * This is the first of two callbacks for the set_port_protocol
7321  * function.  The set operation is a read-modify-write.  This
7322  * callback is called when the read completes
7323  * (GET_PROFILE_CONFG).  It will updated the resource
7324  * descriptors, then queue the write (SET_PROFILE_CONFIG).
7325  *
7326  * On entry there are three memory areas that were allocated by
7327  * ocs_hw_set_port_protocol.  If a failure is detected in this
7328  * function those need to be freed.  If this function succeeds
7329  * it allocates three more areas.
7330  *
7331  * @param hw Hardware context.
7332  * @param status The status from the MQE
7333  * @param mqe Pointer to mailbox command buffer.
7334  * @param arg Pointer to a callback argument.
7335  *
7336  * @return Returns 0 on success, or a non-zero value otherwise.
7337  */
7338 static int32_t
ocs_hw_set_port_protocol_cb1(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7339 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7340 {
7341 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7342 	ocs_dma_t *payload = &(cb_arg->payload);
7343 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7344 	int num_descriptors;
7345 	sli4_resource_descriptor_v1_t *desc_p;
7346 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7347 	int i;
7348 	ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
7349 	ocs_hw_port_protocol_e new_protocol;
7350 	uint8_t *dst;
7351 	sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
7352 	uint8_t *mbxdata;
7353 	int pci_descriptor_count;
7354 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7355 	int num_fcoe_ports = 0;
7356 	int num_iscsi_ports = 0;
7357 
7358 	new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
7359 
7360 	num_descriptors = response->desc_count;
7361 
7362 	/* Count PCI descriptors */
7363 	pci_descriptor_count = 0;
7364 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7365 	for (i=0; i<num_descriptors; i++) {
7366 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7367 			++pci_descriptor_count;
7368 		}
7369 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7370 	}
7371 
7372 	/* mbxdata holds the header of the command */
7373 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7374 	if (mbxdata == NULL) {
7375 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7376 		return OCS_HW_RTN_NO_MEMORY;
7377 	}
7378 
7379 	/* cb_arg holds the data that will be passed to the callback on completion */
7380 	new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7381 	if (new_cb_arg == NULL) {
7382 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7383 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7384 		return OCS_HW_RTN_NO_MEMORY;
7385 	}
7386 
7387 	new_cb_arg->cb = cb_arg->cb;
7388 	new_cb_arg->arg = cb_arg->arg;
7389 
7390 	/* Allocate memory for the descriptors we're going to send.  This is
7391 	 * one for each PCI descriptor plus one ISAP descriptor. */
7392 	if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
7393 			  (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
7394 			  sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
7395 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7396 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7397 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7398 		return OCS_HW_RTN_NO_MEMORY;
7399 	}
7400 
7401 	sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7402 						   &new_cb_arg->payload,
7403 						   0, pci_descriptor_count+1, 1);
7404 
7405 	/* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
7406 	dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
7407 
7408 	/* Loop over all descriptors.  If the descriptor is a PCIe descriptor, copy it
7409 	 * to the SET_PROFILE_CONFIG command to be written back.  If it's the descriptor
7410 	 * that we're trying to change also set its pf_type.
7411 	 */
7412 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7413 	for (i=0; i<num_descriptors; i++) {
7414 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7415 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7416 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7417 				/* This is the PCIe descriptor for this OCS instance.
7418 				 * Update it with the new pf_type */
7419 				switch(new_protocol) {
7420 				case OCS_HW_PORT_PROTOCOL_FC:
7421 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
7422 					break;
7423 				case OCS_HW_PORT_PROTOCOL_FCOE:
7424 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
7425 					break;
7426 				case OCS_HW_PORT_PROTOCOL_ISCSI:
7427 					pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
7428 					break;
7429 				default:
7430 					pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
7431 					break;
7432 				}
7433 			}
7434 
7435 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
7436 				++num_fcoe_ports;
7437 			}
7438 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
7439 				++num_iscsi_ports;
7440 			}
7441 			ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
7442 			dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
7443 		}
7444 
7445 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7446 	}
7447 
7448 	/* Create an ISAP resource descriptor */
7449 	isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
7450 	isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
7451 	isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
7452 	if (num_iscsi_ports > 0) {
7453 		isap_desc_p->iscsi_tgt = 1;
7454 		isap_desc_p->iscsi_ini = 1;
7455 		isap_desc_p->iscsi_dif = 1;
7456 	}
7457 	if (num_fcoe_ports > 0) {
7458 		isap_desc_p->fcoe_tgt = 1;
7459 		isap_desc_p->fcoe_ini = 1;
7460 		isap_desc_p->fcoe_dif = 1;
7461 	}
7462 
7463 	/* At this point we're done with the memory allocated by ocs_port_set_protocol */
7464 	ocs_dma_free(hw->os, &cb_arg->payload);
7465 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7466 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7467 
7468 	/* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
7469 	rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
7470 	if (rc) {
7471 		ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
7472 		/* Call the upper level callback to report a failure */
7473 		if (new_cb_arg->cb) {
7474 			new_cb_arg->cb( rc, new_cb_arg->arg);
7475 		}
7476 
7477 		/* Free the memory allocated by this function */
7478 		ocs_dma_free(hw->os, &new_cb_arg->payload);
7479 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7480 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7481 	}
7482 
7483 	return rc;
7484 }
7485 
7486 /**
7487  * @ingroup io
7488  * @brief  Set the port protocol.
7489  * @par Description
7490  * Setting the port protocol is a read-modify-write operation.
7491  * This function submits a GET_PROFILE_CONFIG command to read
7492  * the current settings.  The callback function will modify the
7493  * settings and issue the write.
7494  *
7495  * On successful completion this function will have allocated
7496  * two regular memory areas and one dma area which will need to
7497  * get freed later in the callbacks.
7498  *
7499  * @param hw Hardware context.
7500  * @param new_protocol New protocol to use.
7501  * @param pci_func PCI function to configure.
7502  * @param cb Callback function to be called when the command completes.
7503  * @param ul_arg An argument that is passed to the callback function.
7504  *
7505  * @return
7506  * - OCS_HW_RTN_SUCCESS on success.
7507  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7508  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7509  *   context.
7510  * - OCS_HW_RTN_ERROR on any other error.
7511  */
7512 ocs_hw_rtn_e
ocs_hw_set_port_protocol(ocs_hw_t * hw,ocs_hw_port_protocol_e new_protocol,uint32_t pci_func,ocs_set_port_protocol_cb_t cb,void * ul_arg)7513 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
7514 		uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
7515 {
7516 	uint8_t *mbxdata;
7517 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
7518 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7519 
7520 	/* Only supported on Skyhawk */
7521 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7522 		return OCS_HW_RTN_ERROR;
7523 	}
7524 
7525 	/* mbxdata holds the header of the command */
7526 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7527 	if (mbxdata == NULL) {
7528 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7529 		return OCS_HW_RTN_NO_MEMORY;
7530 	}
7531 
7532 	/* cb_arg holds the data that will be passed to the callback on completion */
7533 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7534 	if (cb_arg == NULL) {
7535 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7536 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7537 		return OCS_HW_RTN_NO_MEMORY;
7538 	}
7539 
7540 	cb_arg->cb = cb;
7541 	cb_arg->arg = ul_arg;
7542 	cb_arg->new_protocol = new_protocol;
7543 	cb_arg->pci_func = pci_func;
7544 
7545 	/* dma_mem holds the non-embedded portion */
7546 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7547 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7548 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7549 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7550 		return OCS_HW_RTN_NO_MEMORY;
7551 	}
7552 
7553 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7554 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
7555 	}
7556 
7557 	if (rc != OCS_HW_RTN_SUCCESS) {
7558 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7559 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7560 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7561 		ocs_dma_free(hw->os, &cb_arg->payload);
7562 	}
7563 
7564 	return rc;
7565 }
7566 
7567 typedef struct ocs_hw_get_profile_list_cb_arg_s {
7568 	ocs_get_profile_list_cb_t cb;
7569 	void *arg;
7570 	ocs_dma_t payload;
7571 } ocs_hw_get_profile_list_cb_arg_t;
7572 
7573 /**
7574  * @brief Called for the completion of get_profile_list for a
7575  *        user request.
7576  * @par Description
7577  * This function is called when the COMMMON_GET_PROFILE_LIST
7578  * mailbox completes.  The response will be in
7579  * ctx->non_embedded_mem.virt.  This function parses the
7580  * response and creates a ocs_hw_profile_list, then calls the
7581  * mgmt_cb callback function and passes that list to it.
7582  *
7583  * @param hw Hardware context.
7584  * @param status The status from the MQE
7585  * @param mqe Pointer to mailbox command buffer.
7586  * @param arg Pointer to a callback argument.
7587  *
7588  * @return Returns 0 on success, or a non-zero value on failure.
7589  */
7590 static int32_t
ocs_hw_get_profile_list_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7591 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7592 {
7593 	ocs_hw_profile_list_t *list;
7594 	ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
7595 	ocs_dma_t *payload = &(cb_arg->payload);
7596 	sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
7597 	int i;
7598 	int num_descriptors;
7599 
7600 	list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
7601 	if (list == NULL) {
7602 		ocs_log_err(hw->os, "failed to malloc list\n");
7603 		return OCS_HW_RTN_NO_MEMORY;
7604 	}
7605 
7606 	list->num_descriptors = response->profile_descriptor_count;
7607 
7608 	num_descriptors = list->num_descriptors;
7609 	if (num_descriptors > OCS_HW_MAX_PROFILES) {
7610 		num_descriptors = OCS_HW_MAX_PROFILES;
7611 	}
7612 
7613 	for (i=0; i<num_descriptors; i++) {
7614 		list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
7615 		list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
7616 		ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
7617 	}
7618 
7619 	if (cb_arg->cb) {
7620 		cb_arg->cb(status, list, cb_arg->arg);
7621 	} else {
7622 		ocs_free(hw->os, list, sizeof(*list));
7623 	}
7624 
7625 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7626 	ocs_dma_free(hw->os, &cb_arg->payload);
7627 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7628 
7629 	return 0;
7630 }
7631 
7632 /**
7633  * @ingroup io
7634  * @brief  Get a list of available profiles.
7635  * @par Description
7636  * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox.  When the
7637  * command completes the provided mgmt callback function is
7638  * called.
7639  *
7640  * @param hw Hardware context.
7641  * @param cb Callback function to be called when the
7642  *      	  command completes.
7643  * @param ul_arg An argument that is passed to the callback
7644  *      	 function.
7645  *
7646  * @return
7647  * - OCS_HW_RTN_SUCCESS on success.
7648  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7649  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7650  *   context.
7651  * - OCS_HW_RTN_ERROR on any other error.
7652  */
7653 ocs_hw_rtn_e
ocs_hw_get_profile_list(ocs_hw_t * hw,ocs_get_profile_list_cb_t cb,void * ul_arg)7654 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
7655 {
7656 	uint8_t *mbxdata;
7657 	ocs_hw_get_profile_list_cb_arg_t *cb_arg;
7658 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7659 
7660 	/* Only supported on Skyhawk */
7661 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7662 		return OCS_HW_RTN_ERROR;
7663 	}
7664 
7665 	/* mbxdata holds the header of the command */
7666 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7667 	if (mbxdata == NULL) {
7668 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7669 		return OCS_HW_RTN_NO_MEMORY;
7670 	}
7671 
7672 	/* cb_arg holds the data that will be passed to the callback on completion */
7673 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
7674 	if (cb_arg == NULL) {
7675 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7676 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7677 		return OCS_HW_RTN_NO_MEMORY;
7678 	}
7679 
7680 	cb_arg->cb = cb;
7681 	cb_arg->arg = ul_arg;
7682 
7683 	/* dma_mem holds the non-embedded portion */
7684 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
7685 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7686 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7687 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7688 		return OCS_HW_RTN_NO_MEMORY;
7689 	}
7690 
7691 	if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
7692 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
7693 	}
7694 
7695 	if (rc != OCS_HW_RTN_SUCCESS) {
7696 		ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
7697 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7698 		ocs_dma_free(hw->os, &cb_arg->payload);
7699 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7700 	}
7701 
7702 	return rc;
7703 }
7704 
7705 typedef struct ocs_hw_get_active_profile_cb_arg_s {
7706 	ocs_get_active_profile_cb_t cb;
7707 	void *arg;
7708 } ocs_hw_get_active_profile_cb_arg_t;
7709 
7710 /**
7711  * @brief Called for the completion of get_active_profile for a
7712  *        user request.
7713  *
7714  * @param hw Hardware context.
7715  * @param status The status from the MQE
7716  * @param mqe Pointer to mailbox command buffer.
7717  * @param arg Pointer to a callback argument.
7718  *
7719  * @return Returns 0 on success, or a non-zero value on failure.
7720  */
7721 static int32_t
ocs_hw_get_active_profile_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7722 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7723 {
7724 	ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
7725 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7726 	sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
7727 	uint32_t active_profile;
7728 
7729 	active_profile = response->active_profile_id;
7730 
7731 	if (cb_arg->cb) {
7732 		cb_arg->cb(status, active_profile, cb_arg->arg);
7733 	}
7734 
7735 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7736 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7737 
7738 	return 0;
7739 }
7740 
7741 /**
7742  * @ingroup io
7743  * @brief  Get the currently active profile.
7744  * @par Description
7745  * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
7746  * command completes the provided mgmt callback function is
7747  * called.
7748  *
7749  * @param hw Hardware context.
7750  * @param cb Callback function to be called when the
7751  *	     command completes.
7752  * @param ul_arg An argument that is passed to the callback
7753  *      	 function.
7754  *
7755  * @return
7756  * - OCS_HW_RTN_SUCCESS on success.
7757  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7758  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7759  *   context.
7760  * - OCS_HW_RTN_ERROR on any other error.
7761  */
7762 ocs_hw_rtn_e
ocs_hw_get_active_profile(ocs_hw_t * hw,ocs_get_active_profile_cb_t cb,void * ul_arg)7763 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
7764 {
7765 	uint8_t *mbxdata;
7766 	ocs_hw_get_active_profile_cb_arg_t *cb_arg;
7767 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7768 
7769 	/* Only supported on Skyhawk */
7770 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7771 		return OCS_HW_RTN_ERROR;
7772 	}
7773 
7774 	/* mbxdata holds the header of the command */
7775 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7776 	if (mbxdata == NULL) {
7777 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7778 		return OCS_HW_RTN_NO_MEMORY;
7779 	}
7780 
7781 	/* cb_arg holds the data that will be passed to the callback on completion */
7782 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
7783 	if (cb_arg == NULL) {
7784 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7785 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7786 		return OCS_HW_RTN_NO_MEMORY;
7787 	}
7788 
7789 	cb_arg->cb = cb;
7790 	cb_arg->arg = ul_arg;
7791 
7792 	if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7793 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
7794 	}
7795 
7796 	if (rc != OCS_HW_RTN_SUCCESS) {
7797 		ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
7798 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7799 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7800 	}
7801 
7802 	return rc;
7803 }
7804 
7805 typedef struct ocs_hw_get_nvparms_cb_arg_s {
7806 	ocs_get_nvparms_cb_t cb;
7807 	void *arg;
7808 } ocs_hw_get_nvparms_cb_arg_t;
7809 
7810 /**
7811  * @brief Called for the completion of get_nvparms for a
7812  *        user request.
7813  *
7814  * @param hw Hardware context.
7815  * @param status The status from the MQE.
7816  * @param mqe Pointer to mailbox command buffer.
7817  * @param arg Pointer to a callback argument.
7818  *
7819  * @return 0 on success, non-zero otherwise
7820  */
7821 static int32_t
ocs_hw_get_nvparms_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7822 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7823 {
7824 	ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
7825 	sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
7826 
7827 	if (cb_arg->cb) {
7828 		cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
7829 				mbox_rsp->preferred_d_id, cb_arg->arg);
7830 	}
7831 
7832 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7833 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7834 
7835 	return 0;
7836 }
7837 
7838 /**
7839  * @ingroup io
7840  * @brief  Read non-volatile parms.
7841  * @par Description
7842  * Issues a SLI-4 READ_NVPARMS mailbox. When the
7843  * command completes the provided mgmt callback function is
7844  * called.
7845  *
7846  * @param hw Hardware context.
7847  * @param cb Callback function to be called when the
7848  *	  command completes.
7849  * @param ul_arg An argument that is passed to the callback
7850  *	  function.
7851  *
7852  * @return
7853  * - OCS_HW_RTN_SUCCESS on success.
7854  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7855  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7856  *   context.
7857  * - OCS_HW_RTN_ERROR on any other error.
7858  */
7859 ocs_hw_rtn_e
ocs_hw_get_nvparms(ocs_hw_t * hw,ocs_get_nvparms_cb_t cb,void * ul_arg)7860 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
7861 {
7862 	uint8_t *mbxdata;
7863 	ocs_hw_get_nvparms_cb_arg_t *cb_arg;
7864 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7865 
7866 	/* mbxdata holds the header of the command */
7867 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7868 	if (mbxdata == NULL) {
7869 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7870 		return OCS_HW_RTN_NO_MEMORY;
7871 	}
7872 
7873 	/* cb_arg holds the data that will be passed to the callback on completion */
7874 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
7875 	if (cb_arg == NULL) {
7876 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7877 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7878 		return OCS_HW_RTN_NO_MEMORY;
7879 	}
7880 
7881 	cb_arg->cb = cb;
7882 	cb_arg->arg = ul_arg;
7883 
7884 	if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7885 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
7886 	}
7887 
7888 	if (rc != OCS_HW_RTN_SUCCESS) {
7889 		ocs_log_test(hw->os, "READ_NVPARMS failed\n");
7890 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7891 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7892 	}
7893 
7894 	return rc;
7895 }
7896 
7897 typedef struct ocs_hw_set_nvparms_cb_arg_s {
7898 	ocs_set_nvparms_cb_t cb;
7899 	void *arg;
7900 } ocs_hw_set_nvparms_cb_arg_t;
7901 
7902 /**
7903  * @brief Called for the completion of set_nvparms for a
7904  *        user request.
7905  *
7906  * @param hw Hardware context.
7907  * @param status The status from the MQE.
7908  * @param mqe Pointer to mailbox command buffer.
7909  * @param arg Pointer to a callback argument.
7910  *
7911  * @return Returns 0 on success, or a non-zero value on failure.
7912  */
7913 static int32_t
ocs_hw_set_nvparms_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)7914 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7915 {
7916 	ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
7917 
7918 	if (cb_arg->cb) {
7919 		cb_arg->cb(status, cb_arg->arg);
7920 	}
7921 
7922 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7923 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7924 
7925 	return 0;
7926 }
7927 
7928 /**
7929  * @ingroup io
7930  * @brief  Write non-volatile parms.
7931  * @par Description
7932  * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
7933  * command completes the provided mgmt callback function is
7934  * called.
7935  *
7936  * @param hw Hardware context.
7937  * @param cb Callback function to be called when the
7938  *	  command completes.
7939  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
7940  * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
7941  * @param hard_alpa A hard AL_PA address setting used during loop
7942  * initialization. If no hard AL_PA is required, set to 0.
7943  * @param preferred_d_id A preferred D_ID address setting
7944  * that may be overridden with the CONFIG_LINK mailbox command.
7945  * If there is no preference, set to 0.
7946  * @param ul_arg An argument that is passed to the callback
7947  *	  function.
7948  *
7949  * @return
7950  * - OCS_HW_RTN_SUCCESS on success.
7951  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7952  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7953  *   context.
7954  * - OCS_HW_RTN_ERROR on any other error.
7955  */
7956 ocs_hw_rtn_e
ocs_hw_set_nvparms(ocs_hw_t * hw,ocs_set_nvparms_cb_t cb,uint8_t * wwpn,uint8_t * wwnn,uint8_t hard_alpa,uint32_t preferred_d_id,void * ul_arg)7957 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
7958 		uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
7959 {
7960 	uint8_t *mbxdata;
7961 	ocs_hw_set_nvparms_cb_arg_t *cb_arg;
7962 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7963 
7964 	/* mbxdata holds the header of the command */
7965 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7966 	if (mbxdata == NULL) {
7967 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7968 		return OCS_HW_RTN_NO_MEMORY;
7969 	}
7970 
7971 	/* cb_arg holds the data that will be passed to the callback on completion */
7972 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
7973 	if (cb_arg == NULL) {
7974 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7975 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7976 		return OCS_HW_RTN_NO_MEMORY;
7977 	}
7978 
7979 	cb_arg->cb = cb;
7980 	cb_arg->arg = ul_arg;
7981 
7982 	if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
7983 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
7984 	}
7985 
7986 	if (rc != OCS_HW_RTN_SUCCESS) {
7987 		ocs_log_test(hw->os, "SET_NVPARMS failed\n");
7988 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7989 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7990 	}
7991 
7992 	return rc;
7993 }
7994 
7995 /**
7996  * @brief Called to obtain the count for the specified type.
7997  *
7998  * @param hw Hardware context.
7999  * @param io_count_type IO count type (inuse, free, wait_free).
8000  *
8001  * @return Returns the number of IOs on the specified list type.
8002  */
8003 uint32_t
ocs_hw_io_get_count(ocs_hw_t * hw,ocs_hw_io_count_type_e io_count_type)8004 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
8005 {
8006 	ocs_hw_io_t *io = NULL;
8007 	uint32_t count = 0;
8008 
8009 	ocs_lock(&hw->io_lock);
8010 
8011 	switch (io_count_type) {
8012 	case OCS_HW_IO_INUSE_COUNT :
8013 		ocs_list_foreach(&hw->io_inuse, io) {
8014 			count++;
8015 		}
8016 		break;
8017 	case OCS_HW_IO_FREE_COUNT :
8018 		 ocs_list_foreach(&hw->io_free, io) {
8019 			 count++;
8020 		 }
8021 		 break;
8022 	case OCS_HW_IO_WAIT_FREE_COUNT :
8023 		 ocs_list_foreach(&hw->io_wait_free, io) {
8024 			 count++;
8025 		 }
8026 		 break;
8027 	case OCS_HW_IO_PORT_OWNED_COUNT:
8028 		 ocs_list_foreach(&hw->io_port_owned, io) {
8029 			 count++;
8030 		 }
8031 		 break;
8032 	case OCS_HW_IO_N_TOTAL_IO_COUNT :
8033 		count = hw->config.n_io;
8034 		break;
8035 	}
8036 
8037 	ocs_unlock(&hw->io_lock);
8038 
8039 	return count;
8040 }
8041 
8042 /**
8043  * @brief Called to obtain the count of produced RQs.
8044  *
8045  * @param hw Hardware context.
8046  *
8047  * @return Returns the number of RQs produced.
8048  */
8049 uint32_t
ocs_hw_get_rqes_produced_count(ocs_hw_t * hw)8050 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
8051 {
8052 	uint32_t count = 0;
8053 	uint32_t i;
8054 	uint32_t j;
8055 
8056 	for (i = 0; i < hw->hw_rq_count; i++) {
8057 		hw_rq_t *rq = hw->hw_rq[i];
8058 		if (rq->rq_tracker != NULL) {
8059 			for (j = 0; j < rq->entry_count; j++) {
8060 				if (rq->rq_tracker[j] != NULL) {
8061 					count++;
8062 				}
8063 			}
8064 		}
8065 	}
8066 
8067 	return count;
8068 }
8069 
8070 typedef struct ocs_hw_set_active_profile_cb_arg_s {
8071 	ocs_set_active_profile_cb_t cb;
8072 	void *arg;
8073 } ocs_hw_set_active_profile_cb_arg_t;
8074 
8075 /**
8076  * @brief Called for the completion of set_active_profile for a
8077  *        user request.
8078  *
8079  * @param hw Hardware context.
8080  * @param status The status from the MQE
8081  * @param mqe Pointer to mailbox command buffer.
8082  * @param arg Pointer to a callback argument.
8083  *
8084  * @return Returns 0 on success, or a non-zero value on failure.
8085  */
8086 static int32_t
ocs_hw_set_active_profile_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)8087 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
8088 {
8089 	ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
8090 
8091 	if (cb_arg->cb) {
8092 		cb_arg->cb(status, cb_arg->arg);
8093 	}
8094 
8095 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
8096 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
8097 
8098 	return 0;
8099 }
8100 
8101 /**
8102  * @ingroup io
8103  * @brief  Set the currently active profile.
8104  * @par Description
8105  * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
8106  * command completes the provided mgmt callback function is
8107  * called.
8108  *
8109  * @param hw Hardware context.
8110  * @param profile_id Profile ID to activate.
8111  * @param cb Callback function to be called when the command completes.
8112  * @param ul_arg An argument that is passed to the callback function.
8113  *
8114  * @return
8115  * - OCS_HW_RTN_SUCCESS on success.
8116  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
8117  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
8118  *   context.
8119  * - OCS_HW_RTN_ERROR on any other error.
8120  */
8121 ocs_hw_rtn_e
ocs_hw_set_active_profile(ocs_hw_t * hw,ocs_set_active_profile_cb_t cb,uint32_t profile_id,void * ul_arg)8122 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
8123 {
8124 	uint8_t *mbxdata;
8125 	ocs_hw_set_active_profile_cb_arg_t *cb_arg;
8126 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
8127 
8128 	/* Only supported on Skyhawk */
8129 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
8130 		return OCS_HW_RTN_ERROR;
8131 	}
8132 
8133 	/* mbxdata holds the header of the command */
8134 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
8135 	if (mbxdata == NULL) {
8136 		ocs_log_err(hw->os, "failed to malloc mbox\n");
8137 		return OCS_HW_RTN_NO_MEMORY;
8138 	}
8139 
8140 	/* cb_arg holds the data that will be passed to the callback on completion */
8141 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
8142 	if (cb_arg == NULL) {
8143 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
8144 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8145 		return OCS_HW_RTN_NO_MEMORY;
8146 	}
8147 
8148 	cb_arg->cb = cb;
8149 	cb_arg->arg = ul_arg;
8150 
8151 	if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
8152 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
8153 	}
8154 
8155 	if (rc != OCS_HW_RTN_SUCCESS) {
8156 		ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
8157 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8158 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
8159 	}
8160 
8161 	return rc;
8162 }
8163 
8164 /*
8165  * Private functions
8166  */
8167 
8168 /**
8169  * @brief Update the queue hash with the ID and index.
8170  *
8171  * @param hash Pointer to hash table.
8172  * @param id ID that was created.
8173  * @param index The index into the hash object.
8174  */
8175 static void
ocs_hw_queue_hash_add(ocs_queue_hash_t * hash,uint16_t id,uint16_t index)8176 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
8177 {
8178 	uint32_t	hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
8179 
8180 	/*
8181 	 * Since the hash is always bigger than the number of queues, then we
8182 	 * never have to worry about an infinite loop.
8183 	 */
8184 	while(hash[hash_index].in_use) {
8185 		hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8186 	}
8187 
8188 	/* not used, claim the entry */
8189 	hash[hash_index].id = id;
8190 	hash[hash_index].in_use = 1;
8191 	hash[hash_index].index = index;
8192 }
8193 
8194 /**
8195  * @brief Find index given queue ID.
8196  *
8197  * @param hash Pointer to hash table.
8198  * @param id ID to find.
8199  *
8200  * @return Returns the index into the HW cq array or -1 if not found.
8201  */
8202 int32_t
ocs_hw_queue_hash_find(ocs_queue_hash_t * hash,uint16_t id)8203 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
8204 {
8205 	int32_t	rc = -1;
8206 	int32_t	index = id & (OCS_HW_Q_HASH_SIZE - 1);
8207 
8208 	/*
8209 	 * Since the hash is always bigger than the maximum number of Qs, then we
8210 	 * never have to worry about an infinite loop. We will always find an
8211 	 * unused entry.
8212 	 */
8213 	do {
8214 		if (hash[index].in_use &&
8215 		    hash[index].id == id) {
8216 			rc = hash[index].index;
8217 		} else {
8218 			index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8219 		}
8220 	} while(rc == -1 && hash[index].in_use);
8221 
8222 	return rc;
8223 }
8224 
8225 static int32_t
ocs_hw_domain_add(ocs_hw_t * hw,ocs_domain_t * domain)8226 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
8227 {
8228 	int32_t		rc = OCS_HW_RTN_ERROR;
8229 	uint16_t	fcfi = UINT16_MAX;
8230 
8231 	if ((hw == NULL) || (domain == NULL)) {
8232 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8233 				hw, domain);
8234 		return OCS_HW_RTN_ERROR;
8235 	}
8236 
8237 	fcfi = domain->fcf_indicator;
8238 
8239 	if (fcfi < SLI4_MAX_FCFI) {
8240 		uint16_t	fcf_index = UINT16_MAX;
8241 
8242 		ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
8243 				domain, fcfi);
8244 		hw->domains[fcfi] = domain;
8245 
8246 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8247 		if (hw->workaround.override_fcfi) {
8248 			if (hw->first_domain_idx < 0) {
8249 				hw->first_domain_idx = fcfi;
8250 			}
8251 		}
8252 
8253 		fcf_index = domain->fcf;
8254 
8255 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8256 			ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
8257 				      fcf_index, fcfi);
8258 			hw->fcf_index_fcfi[fcf_index] = fcfi;
8259 			rc = OCS_HW_RTN_SUCCESS;
8260 		} else {
8261 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8262 				     fcf_index, SLI4_MAX_FCF_INDEX);
8263 			hw->domains[fcfi] = NULL;
8264 		}
8265 	} else {
8266 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8267 				fcfi, SLI4_MAX_FCFI);
8268 	}
8269 
8270 	return rc;
8271 }
8272 
8273 static int32_t
ocs_hw_domain_del(ocs_hw_t * hw,ocs_domain_t * domain)8274 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
8275 {
8276 	int32_t		rc = OCS_HW_RTN_ERROR;
8277 	uint16_t	fcfi = UINT16_MAX;
8278 
8279 	if ((hw == NULL) || (domain == NULL)) {
8280 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8281 				hw, domain);
8282 		return OCS_HW_RTN_ERROR;
8283 	}
8284 
8285 	fcfi = domain->fcf_indicator;
8286 
8287 	if (fcfi < SLI4_MAX_FCFI) {
8288 		uint16_t	fcf_index = UINT16_MAX;
8289 
8290 		ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
8291 				domain, fcfi);
8292 
8293 		if (domain != hw->domains[fcfi]) {
8294 			ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
8295 				     domain, hw->domains[fcfi]);
8296 			return OCS_HW_RTN_ERROR;
8297 		}
8298 
8299 		hw->domains[fcfi] = NULL;
8300 
8301 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8302 		if (hw->workaround.override_fcfi) {
8303 			if (hw->first_domain_idx == fcfi) {
8304 				hw->first_domain_idx = -1;
8305 			}
8306 		}
8307 
8308 		fcf_index = domain->fcf;
8309 
8310 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8311 			if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
8312 				hw->fcf_index_fcfi[fcf_index] = 0;
8313 				rc = OCS_HW_RTN_SUCCESS;
8314 			} else {
8315 				ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
8316 					     hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
8317 			}
8318 		} else {
8319 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8320 				     fcf_index, SLI4_MAX_FCF_INDEX);
8321 		}
8322 	} else {
8323 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8324 				fcfi, SLI4_MAX_FCFI);
8325 	}
8326 
8327 	return rc;
8328 }
8329 
8330 ocs_domain_t *
ocs_hw_domain_get(ocs_hw_t * hw,uint16_t fcfi)8331 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
8332 {
8333 
8334 	if (hw == NULL) {
8335 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8336 		return NULL;
8337 	}
8338 
8339 	if (fcfi < SLI4_MAX_FCFI) {
8340 		return hw->domains[fcfi];
8341 	} else {
8342 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8343 				fcfi, SLI4_MAX_FCFI);
8344 		return NULL;
8345 	}
8346 }
8347 
8348 static ocs_domain_t *
ocs_hw_domain_get_indexed(ocs_hw_t * hw,uint16_t fcf_index)8349 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
8350 {
8351 
8352 	if (hw == NULL) {
8353 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8354 		return NULL;
8355 	}
8356 
8357 	if (fcf_index < SLI4_MAX_FCF_INDEX) {
8358 		return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
8359 	} else {
8360 		ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8361 			     fcf_index, SLI4_MAX_FCF_INDEX);
8362 		return NULL;
8363 	}
8364 }
8365 
8366 /**
8367  * @brief Quaratine an IO by taking a reference count and adding it to the
8368  *        quarantine list. When the IO is popped from the list then the
8369  *        count is released and the IO MAY be freed depending on whether
8370  *        it is still referenced by the IO.
8371  *
8372  *        @n @b Note: BZ 160124 - If this is a target write or an initiator read using
8373  *        DIF, then we must add the XRI to a quarantine list until we receive
8374  *        4 more completions of this same type.
8375  *
8376  * @param hw Hardware context.
8377  * @param wq Pointer to the WQ associated with the IO object to quarantine.
8378  * @param io Pointer to the io object to quarantine.
8379  */
8380 static void
ocs_hw_io_quarantine(ocs_hw_t * hw,hw_wq_t * wq,ocs_hw_io_t * io)8381 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
8382 {
8383 	ocs_quarantine_info_t *q_info = &wq->quarantine_info;
8384 	uint32_t	index;
8385 	ocs_hw_io_t	*free_io = NULL;
8386 
8387 	/* return if the QX bit was clear */
8388 	if (!io->quarantine) {
8389 		return;
8390 	}
8391 
8392 	/* increment the IO refcount to prevent it from being freed before the quarantine is over */
8393 	if (ocs_ref_get_unless_zero(&io->ref) == 0) {
8394 		/* command no longer active */
8395 		ocs_log_debug(hw ? hw->os : NULL,
8396 			      "io not active xri=0x%x tag=0x%x\n",
8397 			      io->indicator, io->reqtag);
8398 		return;
8399 	}
8400 
8401 	sli_queue_lock(wq->queue);
8402 		index = q_info->quarantine_index;
8403 		free_io = q_info->quarantine_ios[index];
8404 		q_info->quarantine_ios[index] = io;
8405 		q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
8406 	sli_queue_unlock(wq->queue);
8407 
8408 	if (free_io != NULL) {
8409 		ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
8410 	}
8411 }
8412 
8413 /**
8414  * @brief Process entries on the given completion queue.
8415  *
8416  * @param hw Hardware context.
8417  * @param cq Pointer to the HW completion queue object.
8418  *
8419  * @return None.
8420  */
8421 void
ocs_hw_cq_process(ocs_hw_t * hw,hw_cq_t * cq)8422 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
8423 {
8424 	uint8_t		cqe[sizeof(sli4_mcqe_t)];
8425 	uint16_t	rid = UINT16_MAX;
8426 	sli4_qentry_e	ctype;		/* completion type */
8427 	int32_t		status;
8428 	uint32_t	n_processed = 0;
8429 	time_t		tstart;
8430 	time_t		telapsed;
8431 
8432 	tstart = ocs_msectime();
8433 
8434 	while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
8435 		status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
8436 		/*
8437 		 * The sign of status is significant. If status is:
8438 		 * == 0 : call completed correctly and the CQE indicated success
8439 		 *  > 0 : call completed correctly and the CQE indicated an error
8440 		 *  < 0 : call failed and no information is available about the CQE
8441 		 */
8442 		if (status < 0) {
8443 			if (status == -2) {
8444 				/* Notification that an entry was consumed, but not completed */
8445 				continue;
8446 			}
8447 
8448 			break;
8449 		}
8450 
8451 		switch (ctype) {
8452 		case SLI_QENTRY_ASYNC:
8453 			CPUTRACE("async");
8454 			sli_cqe_async(&hw->sli, cqe);
8455 			break;
8456 		case SLI_QENTRY_MQ:
8457 			/*
8458 			 * Process MQ entry. Note there is no way to determine
8459 			 * the MQ_ID from the completion entry.
8460 			 */
8461 			CPUTRACE("mq");
8462 			ocs_hw_mq_process(hw, status, hw->mq);
8463 			break;
8464 		case SLI_QENTRY_OPT_WRITE_CMD:
8465 			ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
8466 			break;
8467 		case SLI_QENTRY_OPT_WRITE_DATA:
8468 			ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
8469 			break;
8470 		case SLI_QENTRY_WQ:
8471 			CPUTRACE("wq");
8472 			ocs_hw_wq_process(hw, cq, cqe, status, rid);
8473 			break;
8474 		case SLI_QENTRY_WQ_RELEASE: {
8475 			uint32_t wq_id = rid;
8476 			int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
8477 
8478 			if (unlikely(index < 0)) {
8479 				ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n",
8480 					    index, rid);
8481 				break;
8482 			}
8483 
8484 			hw_wq_t *wq = hw->hw_wq[index];
8485 
8486 			/* Submit any HW IOs that are on the WQ pending list */
8487 			hw_wq_submit_pending(wq, wq->wqec_set_count);
8488 
8489 			break;
8490 		}
8491 
8492 		case SLI_QENTRY_RQ:
8493 			CPUTRACE("rq");
8494 			ocs_hw_rqpair_process_rq(hw, cq, cqe);
8495 			break;
8496 		case SLI_QENTRY_XABT: {
8497 			CPUTRACE("xabt");
8498 			ocs_hw_xabt_process(hw, cq, cqe, rid);
8499 			break;
8500 		}
8501 		default:
8502 			ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
8503 			break;
8504 		}
8505 
8506 		n_processed++;
8507 		if (n_processed == cq->queue->proc_limit) {
8508 			break;
8509 		}
8510 
8511 		if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
8512 			sli_queue_arm(&hw->sli, cq->queue, FALSE);
8513 		}
8514 	}
8515 
8516 	sli_queue_arm(&hw->sli, cq->queue, TRUE);
8517 
8518 	if (n_processed > cq->queue->max_num_processed) {
8519 		cq->queue->max_num_processed = n_processed;
8520 	}
8521 	telapsed = ocs_msectime() - tstart;
8522 	if (telapsed > cq->queue->max_process_time) {
8523 		cq->queue->max_process_time = telapsed;
8524 	}
8525 }
8526 
8527 /**
8528  * @brief Process WQ completion queue entries.
8529  *
8530  * @param hw Hardware context.
8531  * @param cq Pointer to the HW completion queue object.
8532  * @param cqe Pointer to WQ completion queue.
8533  * @param status Completion status.
8534  * @param rid Resource ID (IO tag).
8535  *
8536  * @return none
8537  */
8538 void
ocs_hw_wq_process(ocs_hw_t * hw,hw_cq_t * cq,uint8_t * cqe,int32_t status,uint16_t rid)8539 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
8540 {
8541 	hw_wq_callback_t *wqcb;
8542 
8543 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
8544 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8545 
8546 	if(rid == OCS_HW_REQUE_XRI_REGTAG) {
8547 		if(status) {
8548 			ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
8549 		}
8550 		return;
8551 	}
8552 
8553 	wqcb = ocs_hw_reqtag_get_instance(hw, rid);
8554 	if (wqcb == NULL) {
8555 		ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
8556 		return;
8557 	}
8558 
8559 	if (wqcb->callback == NULL) {
8560 		ocs_log_err(hw->os, "wqcb callback is NULL\n");
8561 		return;
8562 	}
8563 
8564 	(*wqcb->callback)(wqcb->arg, cqe, status);
8565 }
8566 
8567 /**
8568  * @brief Process WQ completions for IO requests
8569  *
8570  * @param arg Generic callback argument
8571  * @param cqe Pointer to completion queue entry
8572  * @param status Completion status
8573  *
8574  * @par Description
8575  * @n @b Note:  Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
8576  * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
8577  *
8578  * @return None.
8579  */
8580 static void
ocs_hw_wq_process_io(void * arg,uint8_t * cqe,int32_t status)8581 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
8582 {
8583 	ocs_hw_io_t *io = arg;
8584 	ocs_hw_t *hw = io->hw;
8585 	sli4_fc_wcqe_t *wcqe = (void *)cqe;
8586 	uint32_t	len = 0;
8587 	uint32_t ext = 0;
8588 	uint8_t out_of_order_axr_cmd = 0;
8589 	uint8_t out_of_order_axr_data = 0;
8590 	uint8_t lock_taken = 0;
8591 #if defined(OCS_DISC_SPIN_DELAY)
8592 	uint32_t delay = 0;
8593 	char prop_buf[32];
8594 #endif
8595 
8596 	/*
8597 	 * For the primary IO, this will also be used for the
8598 	 * response. So it is important to only set/clear this
8599 	 * flag on the first data phase of the IO because
8600 	 * subsequent phases will be done on the secondary XRI.
8601 	 */
8602 	if (io->quarantine && io->quarantine_first_phase) {
8603 		io->quarantine = (wcqe->qx == 1);
8604 		ocs_hw_io_quarantine(hw, io->wq, io);
8605 	}
8606 	io->quarantine_first_phase = FALSE;
8607 
8608 	/* BZ 161832 - free secondary HW IO */
8609 	if (io->sec_hio != NULL &&
8610 	    io->sec_hio->quarantine) {
8611 		/*
8612 		 * If the quarantine flag is set on the
8613 		 * IO, then set it on the secondary IO
8614 		 * based on the quarantine XRI (QX) bit
8615 		 * sent by the FW.
8616 		 */
8617 		io->sec_hio->quarantine = (wcqe->qx == 1);
8618 		/* use the primary io->wq because it is not set on the secondary IO. */
8619 		ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
8620 	}
8621 
8622 	ocs_hw_remove_io_timed_wqe(hw, io);
8623 
8624 	/* clear xbusy flag if WCQE[XB] is clear */
8625 	if (io->xbusy && wcqe->xb == 0) {
8626 		io->xbusy = FALSE;
8627 	}
8628 
8629 	/* get extended CQE status */
8630 	switch (io->type) {
8631 	case OCS_HW_BLS_ACC:
8632 	case OCS_HW_BLS_ACC_SID:
8633 		break;
8634 	case OCS_HW_ELS_REQ:
8635 		sli_fc_els_did(&hw->sli, cqe, &ext);
8636 		len = sli_fc_response_length(&hw->sli, cqe);
8637 		break;
8638 	case OCS_HW_ELS_RSP:
8639 	case OCS_HW_ELS_RSP_SID:
8640 	case OCS_HW_FC_CT_RSP:
8641 		break;
8642 	case OCS_HW_FC_CT:
8643 		len = sli_fc_response_length(&hw->sli, cqe);
8644 		break;
8645 	case OCS_HW_IO_TARGET_WRITE:
8646 		len = sli_fc_io_length(&hw->sli, cqe);
8647 #if defined(OCS_DISC_SPIN_DELAY)
8648 		if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
8649 			delay = ocs_strtoul(prop_buf, 0, 0);
8650 			ocs_udelay(delay);
8651 		}
8652 #endif
8653 		break;
8654 	case OCS_HW_IO_TARGET_READ:
8655 		len = sli_fc_io_length(&hw->sli, cqe);
8656 		/*
8657 		 * if_type == 2 seems to return 0 "total length placed" on
8658 		 * FCP_TSEND64_WQE completions. If this appears to happen,
8659 		 * use the CTIO data transfer length instead.
8660 		 */
8661 		if (hw->workaround.retain_tsend_io_length && !len && !status) {
8662 			len = io->length;
8663 		}
8664 
8665 		break;
8666 	case OCS_HW_IO_TARGET_RSP:
8667 		if(io->is_port_owned) {
8668 			ocs_lock(&io->axr_lock);
8669 			lock_taken = 1;
8670 			if(io->axr_buf->call_axr_cmd) {
8671 				out_of_order_axr_cmd = 1;
8672 			}
8673 			if(io->axr_buf->call_axr_data) {
8674 				out_of_order_axr_data = 1;
8675 			}
8676 		}
8677 		break;
8678 	case OCS_HW_IO_INITIATOR_READ:
8679 		len = sli_fc_io_length(&hw->sli, cqe);
8680 		break;
8681 	case OCS_HW_IO_INITIATOR_WRITE:
8682 		len = sli_fc_io_length(&hw->sli, cqe);
8683 		break;
8684 	case OCS_HW_IO_INITIATOR_NODATA:
8685 		break;
8686 	case OCS_HW_IO_DNRX_REQUEUE:
8687 		/* release the count for re-posting the buffer */
8688 		//ocs_hw_io_free(hw, io);
8689 		break;
8690 	default:
8691 		ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
8692 			     io->type, io->indicator);
8693 		break;
8694 	}
8695 	if (status) {
8696 		ext = sli_fc_ext_status(&hw->sli, cqe);
8697 		/* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
8698 		 * abort exchange if an error occurred and exchange is still busy.
8699 		 */
8700 		if (hw->config.i_only_aab &&
8701 		    (ocs_hw_iotype_is_originator(io->type)) &&
8702 		    (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
8703 			ocs_hw_rtn_e rc;
8704 
8705 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
8706 				      io->indicator, io->reqtag);
8707 			/*
8708 			 * Because the initiator will not issue another IO phase, then it is OK to issue the
8709 			 * callback on the abort completion, but for consistency with the target, wait for the
8710 			 * XRI_ABORTED CQE to issue the IO callback.
8711 			 */
8712 			rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
8713 
8714 			if (rc == OCS_HW_RTN_SUCCESS) {
8715 				/* latch status to return after abort is complete */
8716 				io->status_saved = 1;
8717 				io->saved_status = status;
8718 				io->saved_ext = ext;
8719 				io->saved_len = len;
8720 				goto exit_ocs_hw_wq_process_io;
8721 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8722 				/*
8723 				 * Already being aborted by someone else (ABTS
8724 				 * perhaps). Just fall through and return original
8725 				 * error.
8726 				 */
8727 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8728 					      io->indicator, io->reqtag);
8729 
8730 			} else {
8731 				/* Failed to abort for some other reason, log error */
8732 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8733 					     io->indicator, io->reqtag, rc);
8734 			}
8735 		}
8736 
8737 		/*
8738 		 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
8739 		 */
8740 		if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
8741 			ocs_hw_rtn_e rc;
8742 
8743 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
8744 
8745 			/*
8746 			 * Because targets may send a response when the IO completes using the same XRI, we must
8747 			 * wait for the XRI_ABORTED CQE to issue the IO callback
8748 			 */
8749 			rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
8750 			if (rc == OCS_HW_RTN_SUCCESS) {
8751 				/* latch status to return after abort is complete */
8752 				io->status_saved = 1;
8753 				io->saved_status = status;
8754 				io->saved_ext = ext;
8755 				io->saved_len = len;
8756 				goto exit_ocs_hw_wq_process_io;
8757 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8758 				/*
8759 				 * Already being aborted by someone else (ABTS
8760 				 * perhaps). Just fall through and return original
8761 				 * error.
8762 				 */
8763 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8764 					      io->indicator, io->reqtag);
8765 
8766 			} else {
8767 				/* Failed to abort for some other reason, log error */
8768 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8769 					     io->indicator, io->reqtag, rc);
8770 			}
8771 		}
8772 	}
8773 	/* BZ 161832 - free secondary HW IO */
8774 	if (io->sec_hio != NULL) {
8775 		ocs_hw_io_free(hw, io->sec_hio);
8776 		io->sec_hio = NULL;
8777 	}
8778 
8779 	if (io->done != NULL) {
8780 		ocs_hw_done_t  done = io->done;
8781 		void		*arg = io->arg;
8782 
8783 		io->done = NULL;
8784 
8785 		if (io->status_saved) {
8786 			/* use latched status if exists */
8787 			status = io->saved_status;
8788 			len = io->saved_len;
8789 			ext = io->saved_ext;
8790 			io->status_saved = 0;
8791 		}
8792 
8793 		/* Restore default SGL */
8794 		ocs_hw_io_restore_sgl(hw, io);
8795 		done(io, io->rnode, len, status, ext, arg);
8796 	}
8797 
8798 	if(out_of_order_axr_cmd) {
8799 		/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8800 		if (hw->config.bounce) {
8801 			fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
8802 			uint32_t s_id = fc_be24toh(hdr->s_id);
8803 			uint32_t d_id = fc_be24toh(hdr->d_id);
8804 			uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8805 			if (hw->callback.bounce != NULL) {
8806 				(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
8807 			}
8808 		}else {
8809 			hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
8810 		}
8811 
8812 		if(out_of_order_axr_data) {
8813 			/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8814 			if (hw->config.bounce) {
8815 				fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
8816 				uint32_t s_id = fc_be24toh(hdr->s_id);
8817 				uint32_t d_id = fc_be24toh(hdr->d_id);
8818 				uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8819 				if (hw->callback.bounce != NULL) {
8820 					(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
8821 				}
8822 			}else {
8823 				hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
8824 			}
8825 		}
8826 	}
8827 
8828 exit_ocs_hw_wq_process_io:
8829 	if(lock_taken) {
8830 		ocs_unlock(&io->axr_lock);
8831 	}
8832 }
8833 
8834 /**
8835  * @brief Process WQ completions for abort requests.
8836  *
8837  * @param arg Generic callback argument.
8838  * @param cqe Pointer to completion queue entry.
8839  * @param status Completion status.
8840  *
8841  * @return None.
8842  */
8843 static void
ocs_hw_wq_process_abort(void * arg,uint8_t * cqe,int32_t status)8844 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
8845 {
8846 	ocs_hw_io_t *io = arg;
8847 	ocs_hw_t *hw = io->hw;
8848 	uint32_t ext = 0;
8849 	uint32_t len = 0;
8850 	hw_wq_callback_t *wqcb;
8851 
8852 	/*
8853 	 * For IOs that were aborted internally, we may need to issue the callback here depending
8854 	 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
8855 	 * issue the callback now.
8856 	*/
8857 	ext = sli_fc_ext_status(&hw->sli, cqe);
8858 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
8859 	    ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
8860 		io->done != NULL) {
8861 		ocs_hw_done_t  done = io->done;
8862 		void		*arg = io->arg;
8863 
8864 		io->done = NULL;
8865 
8866 		/*
8867 		 * Use latched status as this is always saved for an internal abort
8868 		 *
8869 		 * Note: We wont have both a done and abort_done function, so don't worry about
8870 		 *       clobbering the len, status and ext fields.
8871 		 */
8872 		status = io->saved_status;
8873 		len = io->saved_len;
8874 		ext = io->saved_ext;
8875 		io->status_saved = 0;
8876 		done(io, io->rnode, len, status, ext, arg);
8877 	}
8878 
8879 	if (io->abort_done != NULL) {
8880 		ocs_hw_done_t  done = io->abort_done;
8881 		void		*arg = io->abort_arg;
8882 
8883 		io->abort_done = NULL;
8884 
8885 		done(io, io->rnode, len, status, ext, arg);
8886 	}
8887 	ocs_lock(&hw->io_abort_lock);
8888 		/* clear abort bit to indicate abort is complete */
8889 		io->abort_in_progress = 0;
8890 	ocs_unlock(&hw->io_abort_lock);
8891 
8892 	/* Free the WQ callback */
8893 	ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
8894 	wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
8895 	ocs_hw_reqtag_free(hw, wqcb);
8896 
8897 	/*
8898 	 * Call ocs_hw_io_free() because this releases the WQ reservation as
8899 	 * well as doing the refcount put. Don't duplicate the code here.
8900 	 */
8901 	(void)ocs_hw_io_free(hw, io);
8902 }
8903 
8904 /**
8905  * @brief Process XABT completions
8906  *
8907  * @param hw Hardware context.
8908  * @param cq Pointer to the HW completion queue object.
8909  * @param cqe Pointer to WQ completion queue.
8910  * @param rid Resource ID (IO tag).
8911  *
8912  *
8913  * @return None.
8914  */
8915 void
ocs_hw_xabt_process(ocs_hw_t * hw,hw_cq_t * cq,uint8_t * cqe,uint16_t rid)8916 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
8917 {
8918 	/* search IOs wait free list */
8919 	ocs_hw_io_t *io = NULL;
8920 
8921 	io = ocs_hw_io_lookup(hw, rid);
8922 
8923 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
8924 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8925 	if (io == NULL) {
8926 		/* IO lookup failure should never happen */
8927 		ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
8928 		return;
8929 	}
8930 
8931 	if (!io->xbusy) {
8932 		ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
8933 	} else {
8934 		/* mark IO as no longer busy */
8935 		io->xbusy = FALSE;
8936 	}
8937 
8938        if (io->is_port_owned) {
8939                ocs_lock(&hw->io_lock);
8940                /* Take reference so that below callback will not free io before reque */
8941                ocs_ref_get(&io->ref);
8942                ocs_unlock(&hw->io_lock);
8943        }
8944 
8945 	/* For IOs that were aborted internally, we need to issue any pending callback here. */
8946 	if (io->done != NULL) {
8947 		ocs_hw_done_t  done = io->done;
8948 		void		*arg = io->arg;
8949 
8950 		/* Use latched status as this is always saved for an internal abort */
8951 		int32_t status = io->saved_status;
8952 		uint32_t len = io->saved_len;
8953 		uint32_t ext = io->saved_ext;
8954 
8955 		io->done = NULL;
8956 		io->status_saved = 0;
8957 
8958 		done(io, io->rnode, len, status, ext, arg);
8959 	}
8960 
8961 	/* Check to see if this is a port owned XRI */
8962 	if (io->is_port_owned) {
8963 		ocs_lock(&hw->io_lock);
8964 		ocs_hw_reque_xri(hw, io);
8965 		ocs_unlock(&hw->io_lock);
8966 		/* Not hanlding reque xri completion, free io */
8967 		ocs_hw_io_free(hw, io);
8968 		return;
8969 	}
8970 
8971 	ocs_lock(&hw->io_lock);
8972 		if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
8973 			/* if on wait_free list, caller has already freed IO;
8974 			 * remove from wait_free list and add to free list.
8975 			 * if on in-use list, already marked as no longer busy;
8976 			 * just leave there and wait for caller to free.
8977 			 */
8978 			if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
8979 				io->state = OCS_HW_IO_STATE_FREE;
8980 				ocs_list_remove(&hw->io_wait_free, io);
8981 				ocs_hw_io_free_move_correct_list(hw, io);
8982 			}
8983 		}
8984 	ocs_unlock(&hw->io_lock);
8985 }
8986 
8987 /**
8988  * @brief Adjust the number of WQs and CQs within the HW.
8989  *
8990  * @par Description
8991  * Calculates the number of WQs and associated CQs needed in the HW based on
8992  * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
8993  * MQ.
8994  *
8995  * @param hw Hardware context allocated by the caller.
8996  */
8997 static void
ocs_hw_adjust_wqs(ocs_hw_t * hw)8998 ocs_hw_adjust_wqs(ocs_hw_t *hw)
8999 {
9000 	uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
9001 	uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
9002 	uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
9003 
9004 	/*
9005 	 * possibly adjust the the size of the WQs so that the CQ is twice as
9006 	 * big as the WQ to allow for 2 completions per IO. This allows us to
9007 	 * handle multi-phase as well as aborts.
9008 	 */
9009 	if (max_cq_entries < max_wq_entries * 2) {
9010 		max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
9011 	}
9012 
9013 	/*
9014 	 * Calculate the number of WQs to use base on the number of IOs.
9015 	 *
9016 	 * Note: We need to reserve room for aborts which must be sent down
9017 	 *       the same WQ as the IO. So we allocate enough WQ space to
9018 	 *       handle 2 times the number of IOs. Half of the space will be
9019 	 *       used for normal IOs and the other hwf is reserved for aborts.
9020 	 */
9021 	hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
9022 
9023 	/*
9024 	 * For performance reasons, it is best to use use a minimum of 4 WQs
9025 	 * for BE3 and Skyhawk.
9026 	 */
9027 	if (hw->config.n_wq < 4 &&
9028 	    SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
9029 		hw->config.n_wq = 4;
9030 	}
9031 
9032 	/*
9033 	 * For dual-chute support, we need to have at least one WQ per chute.
9034 	 */
9035 	if (hw->config.n_wq < 2 &&
9036 	    ocs_hw_get_num_chutes(hw) > 1) {
9037 		hw->config.n_wq = 2;
9038 	}
9039 
9040 	/* make sure we haven't exceeded the max supported in the HW */
9041 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
9042 		hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
9043 	}
9044 
9045 	/* make sure we haven't exceeded the chip maximum */
9046 	if (hw->config.n_wq > max_wq_num) {
9047 		hw->config.n_wq = max_wq_num;
9048 	}
9049 
9050 	/*
9051 	 * Using Queue Topology string, we divide by number of chutes
9052 	 */
9053 	hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
9054 }
9055 
9056 static int32_t
ocs_hw_command_process(ocs_hw_t * hw,int32_t status,uint8_t * mqe,size_t size)9057 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
9058 {
9059 	ocs_command_ctx_t *ctx = NULL;
9060 
9061 	ocs_lock(&hw->cmd_lock);
9062 		if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
9063 			ocs_log_err(hw->os, "XXX no command context?!?\n");
9064 			ocs_unlock(&hw->cmd_lock);
9065 			return -1;
9066 		}
9067 
9068 		hw->cmd_head_count--;
9069 
9070 		/* Post any pending requests */
9071 		ocs_hw_cmd_submit_pending(hw);
9072 
9073 	ocs_unlock(&hw->cmd_lock);
9074 
9075 	if (ctx->cb) {
9076 		if (ctx->buf) {
9077 			ocs_memcpy(ctx->buf, mqe, size);
9078 		}
9079 		ctx->cb(hw, status, ctx->buf, ctx->arg);
9080 	}
9081 
9082 	ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
9083 	ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
9084 
9085 	return 0;
9086 }
9087 
9088 /**
9089  * @brief Process entries on the given mailbox queue.
9090  *
9091  * @param hw Hardware context.
9092  * @param status CQE status.
9093  * @param mq Pointer to the mailbox queue object.
9094  *
9095  * @return Returns 0 on success, or a non-zero value on failure.
9096  */
9097 static int32_t
ocs_hw_mq_process(ocs_hw_t * hw,int32_t status,sli4_queue_t * mq)9098 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
9099 {
9100 	uint8_t		mqe[SLI4_BMBX_SIZE];
9101 
9102 	if (!sli_queue_read(&hw->sli, mq, mqe)) {
9103 		ocs_hw_command_process(hw, status, mqe, mq->size);
9104 	}
9105 
9106 	return 0;
9107 }
9108 
9109 /**
9110  * @brief Read a FCF table entry.
9111  *
9112  * @param hw Hardware context.
9113  * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
9114  * read and the next_index field from the FCOE_READ_FCF_TABLE command
9115  * for subsequent reads.
9116  *
9117  * @return Returns 0 on success, or a non-zero value on failure.
9118  */
9119 static ocs_hw_rtn_e
ocs_hw_read_fcf(ocs_hw_t * hw,uint32_t index)9120 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
9121 {
9122 	uint8_t		*buf = NULL;
9123 	int32_t		rc = OCS_HW_RTN_ERROR;
9124 
9125 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9126 	if (!buf) {
9127 		ocs_log_err(hw->os, "no buffer for command\n");
9128 		return OCS_HW_RTN_NO_MEMORY;
9129 	}
9130 
9131 	if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
9132 			index)) {
9133 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
9134 	}
9135 
9136 	if (rc != OCS_HW_RTN_SUCCESS) {
9137 		ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
9138 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9139 	}
9140 
9141 	return rc;
9142 }
9143 
9144 /**
9145  * @brief Callback function for the FCOE_READ_FCF_TABLE command.
9146  *
9147  * @par Description
9148  * Note that the caller has allocated:
9149  *  - DMA memory to hold the table contents
9150  *  - DMA memory structure
9151  *  - Command/results buffer
9152  *  .
9153  * Each of these must be freed here.
9154  *
9155  * @param hw Hardware context.
9156  * @param status Hardware status.
9157  * @param mqe Pointer to the mailbox command/results buffer.
9158  * @param arg Pointer to the DMA memory structure.
9159  *
9160  * @return Returns 0 on success, or a non-zero value on failure.
9161  */
9162 static int32_t
ocs_hw_cb_read_fcf(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)9163 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9164 {
9165 	ocs_dma_t	*dma = arg;
9166 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9167 
9168 	if (status || hdr->status) {
9169 		ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
9170 				status, hdr->status);
9171 	} else if (dma->virt) {
9172 		sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
9173 
9174 		/* if FC or FCOE and FCF entry valid, process it */
9175 		if (read_fcf->fcf_entry.fc ||
9176 				(read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
9177 			if (hw->callback.domain != NULL) {
9178 				ocs_domain_record_t drec = {0};
9179 
9180 				if (read_fcf->fcf_entry.fc) {
9181 					/*
9182 					 * This is a pseudo FCF entry. Create a domain
9183 					 * record based on the read topology information
9184 					 */
9185 					drec.speed = hw->link.speed;
9186 					drec.fc_id = hw->link.fc_id;
9187 					drec.is_fc = TRUE;
9188 					if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
9189 						drec.is_loop = TRUE;
9190 						ocs_memcpy(drec.map.loop, hw->link.loop_map,
9191 							   sizeof(drec.map.loop));
9192 					} else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
9193 						drec.is_nport = TRUE;
9194 					}
9195 				} else {
9196 					drec.index = read_fcf->fcf_entry.fcf_index;
9197 					drec.priority = read_fcf->fcf_entry.fip_priority;
9198 
9199 					/* copy address, wwn and vlan_bitmap */
9200 					ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
9201 						   sizeof(drec.address));
9202 					ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
9203 						   sizeof(drec.wwn));
9204 					ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
9205 						   sizeof(drec.map.vlan));
9206 
9207 					drec.is_ethernet = TRUE;
9208 					drec.is_nport = TRUE;
9209 				}
9210 
9211 				hw->callback.domain(hw->args.domain,
9212 						OCS_HW_DOMAIN_FOUND,
9213 						&drec);
9214 			}
9215 		} else {
9216 			/* if FCOE and FCF is not valid, ignore it */
9217 			ocs_log_test(hw->os, "ignore invalid FCF entry\n");
9218 		}
9219 
9220 		if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
9221 			ocs_hw_read_fcf(hw, read_fcf->next_index);
9222 		}
9223 	}
9224 
9225 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9226 	//ocs_dma_free(hw->os, dma);
9227 	//ocs_free(hw->os, dma, sizeof(ocs_dma_t));
9228 
9229 	return 0;
9230 }
9231 
9232 /**
9233  * @brief Callback function for the SLI link events.
9234  *
9235  * @par Description
9236  * This function allocates memory which must be freed in its callback.
9237  *
9238  * @param ctx Hardware context pointer (that is, ocs_hw_t *).
9239  * @param e Event structure pointer (that is, sli4_link_event_t *).
9240  *
9241  * @return Returns 0 on success, or a non-zero value on failure.
9242  */
9243 static int32_t
ocs_hw_cb_link(void * ctx,void * e)9244 ocs_hw_cb_link(void *ctx, void *e)
9245 {
9246 	ocs_hw_t	*hw = ctx;
9247 	sli4_link_event_t *event = e;
9248 	ocs_domain_t	*d = NULL;
9249 	uint32_t	i = 0;
9250 	int32_t		rc = OCS_HW_RTN_ERROR;
9251 	ocs_t 		*ocs = hw->os;
9252 
9253 	ocs_hw_link_event_init(hw);
9254 
9255 	switch (event->status) {
9256 	case SLI_LINK_STATUS_UP:
9257 
9258 		hw->link = *event;
9259 
9260 		if (SLI_LINK_TOPO_NPORT == event->topology) {
9261 			device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
9262 			ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9263 		} else if (SLI_LINK_TOPO_LOOP == event->topology) {
9264 			uint8_t	*buf = NULL;
9265 			device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
9266 
9267 			buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9268 			if (!buf) {
9269 				ocs_log_err(hw->os, "no buffer for command\n");
9270 				break;
9271 			}
9272 
9273 			if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
9274 				rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
9275 			}
9276 
9277 			if (rc != OCS_HW_RTN_SUCCESS) {
9278 				ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
9279 				ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9280 			}
9281 		} else {
9282 			device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
9283 					event->topology, event->speed);
9284 		}
9285 		break;
9286 	case SLI_LINK_STATUS_DOWN:
9287 		device_printf(ocs->dev, "Link Down\n");
9288 
9289 		hw->link.status = event->status;
9290 
9291 		for (i = 0; i < SLI4_MAX_FCFI; i++) {
9292 			d = hw->domains[i];
9293 			if (d != NULL &&
9294 			    hw->callback.domain != NULL) {
9295 				hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
9296 			}
9297 		}
9298 		break;
9299 	default:
9300 		ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
9301 		break;
9302 	}
9303 
9304 	return 0;
9305 }
9306 
9307 static int32_t
ocs_hw_cb_fip(void * ctx,void * e)9308 ocs_hw_cb_fip(void *ctx, void *e)
9309 {
9310 	ocs_hw_t	*hw = ctx;
9311 	ocs_domain_t	*domain = NULL;
9312 	sli4_fip_event_t *event = e;
9313 
9314 	ocs_hw_assert(event);
9315 	ocs_hw_assert(hw);
9316 
9317 	/* Find the associated domain object */
9318 	if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
9319 		ocs_domain_t *d = NULL;
9320 		uint32_t	i = 0;
9321 
9322 		/* Clear VLINK is different from the other FIP events as it passes back
9323 		 * a VPI instead of a FCF index. Check all attached SLI ports for a
9324 		 * matching VPI */
9325 		for (i = 0; i < SLI4_MAX_FCFI; i++) {
9326 			d = hw->domains[i];
9327 			if (d != NULL) {
9328 				ocs_sport_t	*sport = NULL;
9329 
9330 				ocs_list_foreach(&d->sport_list, sport) {
9331 					if (sport->indicator == event->index) {
9332 						domain = d;
9333 						break;
9334 					}
9335 				}
9336 
9337 				if (domain != NULL) {
9338 					break;
9339 				}
9340 			}
9341 		}
9342 	} else {
9343 		domain = ocs_hw_domain_get_indexed(hw, event->index);
9344 	}
9345 
9346 	switch (event->type) {
9347 	case SLI4_FCOE_FIP_FCF_DISCOVERED:
9348 		ocs_hw_read_fcf(hw, event->index);
9349 		break;
9350 	case SLI4_FCOE_FIP_FCF_DEAD:
9351 		if (domain != NULL &&
9352 		    hw->callback.domain != NULL) {
9353 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9354 		}
9355 		break;
9356 	case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
9357 		if (domain != NULL &&
9358 		    hw->callback.domain != NULL) {
9359 			/*
9360 			 * We will want to issue rediscover FCF when this domain is free'd  in order
9361 			 * to invalidate the FCF table
9362 			 */
9363 			domain->req_rediscover_fcf = TRUE;
9364 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9365 		}
9366 		break;
9367 	case SLI4_FCOE_FIP_FCF_MODIFIED:
9368 		if (domain != NULL &&
9369 		    hw->callback.domain != NULL) {
9370 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9371 		}
9372 
9373 		ocs_hw_read_fcf(hw, event->index);
9374 		break;
9375 	default:
9376 		ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
9377 	}
9378 
9379 	return 0;
9380 }
9381 
9382 static int32_t
ocs_hw_cb_node_attach(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)9383 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9384 {
9385 	ocs_remote_node_t *rnode = arg;
9386 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9387 	ocs_hw_remote_node_event_e	evt = 0;
9388 
9389 	if (status || hdr->status) {
9390 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9391 				hdr->status);
9392 		ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
9393 		rnode->attached = FALSE;
9394 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9395 		evt = OCS_HW_NODE_ATTACH_FAIL;
9396 	} else {
9397 		rnode->attached = TRUE;
9398 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
9399 		evt = OCS_HW_NODE_ATTACH_OK;
9400 	}
9401 
9402 	if (hw->callback.rnode != NULL) {
9403 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9404 	}
9405 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9406 
9407 	return 0;
9408 }
9409 
9410 static int32_t
ocs_hw_cb_node_free(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)9411 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9412 {
9413 	ocs_remote_node_t *rnode = arg;
9414 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9415 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9416 	int32_t		rc = 0;
9417 
9418 	if (status || hdr->status) {
9419 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9420 				hdr->status);
9421 
9422 		/*
9423 		 * In certain cases, a non-zero MQE status is OK (all must be true):
9424 		 *   - node is attached
9425 		 *   - if High Login Mode is enabled, node is part of a node group
9426 		 *   - status is 0x1400
9427 		 */
9428 		if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
9429 				(hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
9430 			rc = -1;
9431 		}
9432 	}
9433 
9434 	if (rc == 0) {
9435 		rnode->node_group = FALSE;
9436 		rnode->attached = FALSE;
9437 
9438 		if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
9439 			ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9440 		}
9441 
9442 		evt = OCS_HW_NODE_FREE_OK;
9443 	}
9444 
9445 	if (hw->callback.rnode != NULL) {
9446 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9447 	}
9448 
9449 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9450 
9451 	return rc;
9452 }
9453 
9454 static int32_t
ocs_hw_cb_node_free_all(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)9455 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9456 {
9457 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9458 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9459 	int32_t		rc = 0;
9460 	uint32_t	i;
9461 
9462 	if (status || hdr->status) {
9463 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9464 				hdr->status);
9465 	} else {
9466 		evt = OCS_HW_NODE_FREE_ALL_OK;
9467 	}
9468 
9469 	if (evt == OCS_HW_NODE_FREE_ALL_OK) {
9470 		for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
9471 			ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
9472 		}
9473 
9474 		if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
9475 			ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
9476 			rc = -1;
9477 		}
9478 	}
9479 
9480 	if (hw->callback.rnode != NULL) {
9481 		hw->callback.rnode(hw->args.rnode, evt, NULL);
9482 	}
9483 
9484 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9485 
9486 	return rc;
9487 }
9488 
9489 /**
9490  * @brief Initialize the pool of HW IO objects.
9491  *
9492  * @param hw Hardware context.
9493  *
9494  * @return Returns 0 on success, or a non-zero value on failure.
9495  */
9496 static ocs_hw_rtn_e
ocs_hw_setup_io(ocs_hw_t * hw)9497 ocs_hw_setup_io(ocs_hw_t *hw)
9498 {
9499 	uint32_t	i = 0;
9500 	ocs_hw_io_t	*io = NULL;
9501 	uintptr_t	xfer_virt = 0;
9502 	uintptr_t	xfer_phys = 0;
9503 	uint32_t	index;
9504 	uint8_t		new_alloc = TRUE;
9505 
9506 	if (NULL == hw->io) {
9507 		hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
9508 
9509 		if (NULL == hw->io) {
9510 			ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
9511 				    hw->config.n_io,
9512 				    sizeof(ocs_hw_io_t *));
9513 			return OCS_HW_RTN_NO_MEMORY;
9514 		}
9515 		for (i = 0; i < hw->config.n_io; i++) {
9516 			hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
9517 						OCS_M_ZERO | OCS_M_NOWAIT);
9518 			if (hw->io[i] == NULL) {
9519 				ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
9520 				goto error;
9521 			}
9522 		}
9523 
9524 		/* Create WQE buffs for IO */
9525 		hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
9526 				OCS_M_ZERO | OCS_M_NOWAIT);
9527 		if (NULL == hw->wqe_buffs) {
9528 			ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
9529 			ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
9530 					__func__, hw->config.n_io, hw->sli.config.wqe_size);
9531 			return OCS_HW_RTN_NO_MEMORY;
9532 		}
9533 
9534 	} else {
9535 		/* re-use existing IOs, including SGLs */
9536 		new_alloc = FALSE;
9537 	}
9538 
9539 	if (new_alloc) {
9540 		if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
9541 					sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
9542 					4/*XXX what does this need to be? */)) {
9543 			ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
9544 			return OCS_HW_RTN_NO_MEMORY;
9545 		}
9546 	}
9547 	xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
9548 	xfer_phys = hw->xfer_rdy.phys;
9549 
9550 	for (i = 0; i < hw->config.n_io; i++) {
9551 		hw_wq_callback_t *wqcb;
9552 
9553 		io = hw->io[i];
9554 
9555 		/* initialize IO fields */
9556 		io->hw = hw;
9557 
9558 		/* Assign a WQE buff */
9559 		io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
9560 
9561 		/* Allocate the request tag for this IO */
9562 		wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
9563 		if (wqcb == NULL) {
9564 			ocs_log_err(hw->os, "can't allocate request tag\n");
9565 			return OCS_HW_RTN_NO_RESOURCES;
9566 		}
9567 		io->reqtag = wqcb->instance_index;
9568 
9569 		/* Now for the fields that are initialized on each free */
9570 		ocs_hw_init_free_io(io);
9571 
9572 		/* The XB flag isn't cleared on IO free, so initialize it to zero here */
9573 		io->xbusy = 0;
9574 
9575 		if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
9576 			ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
9577 			return OCS_HW_RTN_NO_MEMORY;
9578 		}
9579 
9580 		if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
9581 			ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
9582 			ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
9583 			return OCS_HW_RTN_NO_MEMORY;
9584 		}
9585 		io->def_sgl_count = hw->config.n_sgl;
9586 		io->sgl = &io->def_sgl;
9587 		io->sgl_count = io->def_sgl_count;
9588 
9589 		if (hw->xfer_rdy.size) {
9590 			io->xfer_rdy.virt = (void *)xfer_virt;
9591 			io->xfer_rdy.phys = xfer_phys;
9592 			io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
9593 
9594 			xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
9595 			xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
9596 		}
9597 	}
9598 
9599 	return OCS_HW_RTN_SUCCESS;
9600 error:
9601 	for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
9602 		ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
9603 		hw->io[i] = NULL;
9604 	}
9605 
9606 	return OCS_HW_RTN_NO_MEMORY;
9607 }
9608 
9609 static ocs_hw_rtn_e
ocs_hw_init_io(ocs_hw_t * hw)9610 ocs_hw_init_io(ocs_hw_t *hw)
9611 {
9612 	uint32_t        i = 0, io_index = 0;
9613 	uint32_t        prereg = 0;
9614 	ocs_hw_io_t	*io = NULL;
9615 	uint8_t		cmd[SLI4_BMBX_SIZE];
9616 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
9617 	uint32_t	nremaining;
9618 	uint32_t	n = 0;
9619 	uint32_t	sgls_per_request = 256;
9620 	ocs_dma_t	**sgls = NULL;
9621 	ocs_dma_t	reqbuf = { 0 };
9622 
9623 	prereg = sli_get_sgl_preregister(&hw->sli);
9624 
9625 	if (prereg) {
9626 		sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
9627 		if (sgls == NULL) {
9628 			ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
9629 			return OCS_HW_RTN_NO_MEMORY;
9630 		}
9631 
9632 		rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
9633 		if (rc) {
9634 			ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
9635 			ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9636 			return OCS_HW_RTN_NO_MEMORY;
9637 		}
9638 	}
9639 
9640 	io = hw->io[io_index];
9641 	for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
9642 		if (prereg) {
9643 			/* Copy address of SGL's into local sgls[] array, break out if the xri
9644 			 * is not contiguous.
9645 			 */
9646 			for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
9647 				/* Check that we have contiguous xri values */
9648 				if (n > 0) {
9649 					if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
9650 						break;
9651 					}
9652 				}
9653 				sgls[n] = hw->io[io_index + n]->sgl;
9654 			}
9655 
9656 			if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
9657 						io->indicator, n, sgls, NULL, &reqbuf)) {
9658 				if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
9659 					rc = OCS_HW_RTN_ERROR;
9660 					ocs_log_err(hw->os, "SGL post failed\n");
9661 					break;
9662 				}
9663 			}
9664 		} else {
9665 			n = nremaining;
9666 		}
9667 
9668 		/* Add to tail if successful */
9669 		for (i = 0; i < n; i ++) {
9670 			io->is_port_owned = 0;
9671 			io->state = OCS_HW_IO_STATE_FREE;
9672 			ocs_list_add_tail(&hw->io_free, io);
9673 			io = hw->io[io_index+1];
9674 			io_index++;
9675 		}
9676 	}
9677 
9678 	if (prereg) {
9679 		ocs_dma_free(hw->os, &reqbuf);
9680 		ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9681 	}
9682 
9683 	return rc;
9684 }
9685 
9686 static int32_t
ocs_hw_flush(ocs_hw_t * hw)9687 ocs_hw_flush(ocs_hw_t *hw)
9688 {
9689 	uint32_t	i = 0;
9690 
9691 	/* Process any remaining completions */
9692 	for (i = 0; i < hw->eq_count; i++) {
9693 		ocs_hw_process(hw, i, ~0);
9694 	}
9695 
9696 	return 0;
9697 }
9698 
9699 static int32_t
ocs_hw_command_cancel(ocs_hw_t * hw)9700 ocs_hw_command_cancel(ocs_hw_t *hw)
9701 {
9702 
9703 	ocs_lock(&hw->cmd_lock);
9704 
9705 	/*
9706 	 * Manually clean up remaining commands. Note: since this calls
9707 	 * ocs_hw_command_process(), we'll also process the cmd_pending
9708 	 * list, so no need to manually clean that out.
9709 	 */
9710 	while (!ocs_list_empty(&hw->cmd_head)) {
9711 		uint8_t		mqe[SLI4_BMBX_SIZE] = { 0 };
9712 		ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
9713 
9714 		ocs_log_test(hw->os, "hung command %08x\n",
9715 				NULL == ctx ? UINT32_MAX :
9716 				(NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
9717 		ocs_unlock(&hw->cmd_lock);
9718 		ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
9719 		ocs_lock(&hw->cmd_lock);
9720 	}
9721 
9722 	ocs_unlock(&hw->cmd_lock);
9723 
9724 	return 0;
9725 }
9726 
9727 /**
9728  * @brief Find IO given indicator (xri).
9729  *
9730  * @param hw Hal context.
9731  * @param indicator Indicator (xri) to look for.
9732  *
9733  * @return Returns io if found, NULL otherwise.
9734  */
9735 ocs_hw_io_t *
ocs_hw_io_lookup(ocs_hw_t * hw,uint32_t xri)9736 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
9737 {
9738 	uint32_t ioindex;
9739 	ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
9740 	return hw->io[ioindex];
9741 }
9742 
9743 /**
9744  * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
9745  *
9746  * @param hw Hal context.
9747  * @param io Pointer to the IO to cleanup.
9748  */
9749 static void
ocs_hw_io_cancel_cleanup(ocs_hw_t * hw,ocs_hw_io_t * io)9750 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
9751 {
9752 	ocs_hw_done_t  done = io->done;
9753 	ocs_hw_done_t  abort_done = io->abort_done;
9754 
9755 	/* first check active_wqe list and remove if there */
9756 	if (ocs_list_on_list(&io->wqe_link)) {
9757 		ocs_list_remove(&hw->io_timed_wqe, io);
9758 	}
9759 
9760 	/* Remove from WQ pending list */
9761 	if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
9762 		ocs_list_remove(&io->wq->pending_list, io);
9763 	}
9764 
9765 	if (io->done) {
9766 		void		*arg = io->arg;
9767 
9768 		io->done = NULL;
9769 		ocs_unlock(&hw->io_lock);
9770 		done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
9771 		ocs_lock(&hw->io_lock);
9772 	}
9773 
9774 	if (io->abort_done != NULL) {
9775 		void		*abort_arg = io->abort_arg;
9776 
9777 		io->abort_done = NULL;
9778 		ocs_unlock(&hw->io_lock);
9779 		abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
9780 		ocs_lock(&hw->io_lock);
9781 	}
9782 }
9783 
9784 static int32_t
ocs_hw_io_cancel(ocs_hw_t * hw)9785 ocs_hw_io_cancel(ocs_hw_t *hw)
9786 {
9787 	ocs_hw_io_t	*io = NULL;
9788 	ocs_hw_io_t	*tmp_io = NULL;
9789 	uint32_t	iters = 100; /* One second limit */
9790 
9791 	/*
9792 	 * Manually clean up outstanding IO.
9793 	 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
9794 	 */
9795 	ocs_lock(&hw->io_lock);
9796 	ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
9797 		ocs_hw_done_t  done = io->done;
9798 		ocs_hw_done_t  abort_done = io->abort_done;
9799 
9800 		ocs_hw_io_cancel_cleanup(hw, io);
9801 
9802 		/*
9803 		 * Since this is called in a reset/shutdown
9804 		 * case, If there is no callback, then just
9805 		 * free the IO.
9806 		 *
9807 		 * Note: A port owned XRI cannot be on
9808 		 *       the in use list. We cannot call
9809 		 *       ocs_hw_io_free() because we already
9810 		 *       hold the io_lock.
9811 		 */
9812 		if (done == NULL &&
9813 		    abort_done == NULL) {
9814 			/*
9815 			 * Since this is called in a reset/shutdown
9816 			 * case, If there is no callback, then just
9817 			 * free the IO.
9818 			 */
9819 			ocs_hw_io_free_common(hw, io);
9820 			ocs_list_remove(&hw->io_inuse, io);
9821 			ocs_hw_io_free_move_correct_list(hw, io);
9822 		}
9823 	}
9824 
9825 	/*
9826 	 * For port owned XRIs, they are not on the in use list, so
9827 	 * walk though XRIs and issue any callbacks.
9828 	 */
9829 	ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
9830 		/* check  list and remove if there */
9831 		if (ocs_list_on_list(&io->dnrx_link)) {
9832 			ocs_list_remove(&hw->io_port_dnrx, io);
9833 			ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
9834 		}
9835 		ocs_hw_io_cancel_cleanup(hw, io);
9836 		ocs_list_remove(&hw->io_port_owned, io);
9837 		ocs_hw_io_free_common(hw, io);
9838 	}
9839 	ocs_unlock(&hw->io_lock);
9840 
9841 	/* Give time for the callbacks to complete */
9842 	do {
9843 		ocs_udelay(10000);
9844 		iters--;
9845 	} while (!ocs_list_empty(&hw->io_inuse) && iters);
9846 
9847 	/* Leave a breadcrumb that cleanup is not yet complete. */
9848 	if (!ocs_list_empty(&hw->io_inuse)) {
9849 		ocs_log_test(hw->os, "io_inuse list is not empty\n");
9850 	}
9851 
9852 	return 0;
9853 }
9854 
9855 static int32_t
ocs_hw_io_ini_sge(ocs_hw_t * hw,ocs_hw_io_t * io,ocs_dma_t * cmnd,uint32_t cmnd_size,ocs_dma_t * rsp)9856 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
9857 		ocs_dma_t *rsp)
9858 {
9859 	sli4_sge_t	*data = NULL;
9860 
9861 	if (!hw || !io) {
9862 		ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
9863 		return OCS_HW_RTN_ERROR;
9864 	}
9865 
9866 	data = io->def_sgl.virt;
9867 
9868 	/* setup command pointer */
9869 	data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
9870 	data->buffer_address_low  = ocs_addr32_lo(cmnd->phys);
9871 	data->buffer_length = cmnd_size;
9872 	data++;
9873 
9874 	/* setup response pointer */
9875 	data->buffer_address_high = ocs_addr32_hi(rsp->phys);
9876 	data->buffer_address_low  = ocs_addr32_lo(rsp->phys);
9877 	data->buffer_length = rsp->size;
9878 
9879 	return 0;
9880 }
9881 
9882 static int32_t
__ocs_read_topology_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)9883 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9884 {
9885 	sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
9886 
9887 	if (status || read_topo->hdr.status) {
9888 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
9889 				status, read_topo->hdr.status);
9890 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9891 		return -1;
9892 	}
9893 
9894 	switch (read_topo->attention_type) {
9895 	case SLI4_READ_TOPOLOGY_LINK_UP:
9896 		hw->link.status = SLI_LINK_STATUS_UP;
9897 		break;
9898 	case SLI4_READ_TOPOLOGY_LINK_DOWN:
9899 		hw->link.status = SLI_LINK_STATUS_DOWN;
9900 		break;
9901 	case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
9902 		hw->link.status = SLI_LINK_STATUS_NO_ALPA;
9903 		break;
9904 	default:
9905 		hw->link.status = SLI_LINK_STATUS_MAX;
9906 		break;
9907 	}
9908 
9909 	switch (read_topo->topology) {
9910 	case SLI4_READ_TOPOLOGY_NPORT:
9911 		hw->link.topology = SLI_LINK_TOPO_NPORT;
9912 		break;
9913 	case SLI4_READ_TOPOLOGY_FC_AL:
9914 		hw->link.topology = SLI_LINK_TOPO_LOOP;
9915 		if (SLI_LINK_STATUS_UP == hw->link.status) {
9916 			hw->link.loop_map = hw->loop_map.virt;
9917 		}
9918 		hw->link.fc_id = read_topo->acquired_al_pa;
9919 		break;
9920 	default:
9921 		hw->link.topology = SLI_LINK_TOPO_MAX;
9922 		break;
9923 	}
9924 
9925 	hw->link.medium = SLI_LINK_MEDIUM_FC;
9926 
9927 	switch (read_topo->link_current.link_speed) {
9928 	case SLI4_READ_TOPOLOGY_SPEED_1G:
9929 		hw->link.speed =  1 * 1000;
9930 		break;
9931 	case SLI4_READ_TOPOLOGY_SPEED_2G:
9932 		hw->link.speed =  2 * 1000;
9933 		break;
9934 	case SLI4_READ_TOPOLOGY_SPEED_4G:
9935 		hw->link.speed =  4 * 1000;
9936 		break;
9937 	case SLI4_READ_TOPOLOGY_SPEED_8G:
9938 		hw->link.speed =  8 * 1000;
9939 		break;
9940 	case SLI4_READ_TOPOLOGY_SPEED_16G:
9941 		hw->link.speed = 16 * 1000;
9942 		hw->link.loop_map = NULL;
9943 		break;
9944 	case SLI4_READ_TOPOLOGY_SPEED_32G:
9945 		hw->link.speed = 32 * 1000;
9946 		hw->link.loop_map = NULL;
9947 		break;
9948 	}
9949 
9950 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9951 
9952 	ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9953 
9954 	return 0;
9955 }
9956 
9957 static int32_t
__ocs_hw_port_common(const char * funcname,ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)9958 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9959 {
9960 	ocs_sli_port_t	*sport = ctx->app;
9961 	ocs_hw_t	*hw = sport->hw;
9962 
9963 	smtrace("port");
9964 
9965 	switch (evt) {
9966 	case OCS_EVT_EXIT:
9967 		/* ignore */
9968 		break;
9969 
9970 	case OCS_EVT_HW_PORT_REQ_FREE:
9971 	case OCS_EVT_HW_PORT_REQ_ATTACH:
9972 		if (data != NULL) {
9973 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9974 		}
9975 		/* fall through */
9976 	default:
9977 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
9978 		break;
9979 	}
9980 
9981 	return 0;
9982 }
9983 
9984 static void *
__ocs_hw_port_free_report_fail(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)9985 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9986 {
9987 	ocs_sli_port_t	*sport = ctx->app;
9988 	ocs_hw_t	*hw = sport->hw;
9989 
9990 	smtrace("port");
9991 
9992 	switch (evt) {
9993 	case OCS_EVT_ENTER:
9994 		if (data != NULL) {
9995 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9996 		}
9997 		if (hw->callback.port != NULL) {
9998 			hw->callback.port(hw->args.port,
9999 					OCS_HW_PORT_FREE_FAIL, sport);
10000 		}
10001 		break;
10002 	default:
10003 		break;
10004 	}
10005 
10006 	return NULL;
10007 }
10008 
10009 static void *
__ocs_hw_port_freed(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10010 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10011 {
10012 	ocs_sli_port_t	*sport = ctx->app;
10013 	ocs_hw_t	*hw = sport->hw;
10014 
10015 	smtrace("port");
10016 
10017 	switch (evt) {
10018 	case OCS_EVT_ENTER:
10019 		/* free SLI resource */
10020 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
10021 			ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
10022 		}
10023 
10024 		/* free mailbox buffer */
10025 		if (data != NULL) {
10026 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10027 		}
10028 		if (hw->callback.port != NULL) {
10029 			hw->callback.port(hw->args.port,
10030 					OCS_HW_PORT_FREE_OK, sport);
10031 		}
10032 		break;
10033 	default:
10034 		break;
10035 	}
10036 
10037 	return NULL;
10038 }
10039 
10040 static void *
__ocs_hw_port_attach_report_fail(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10041 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10042 {
10043 	ocs_sli_port_t	*sport = ctx->app;
10044 	ocs_hw_t	*hw = sport->hw;
10045 
10046 	smtrace("port");
10047 
10048 	switch (evt) {
10049 	case OCS_EVT_ENTER:
10050 		/* free SLI resource */
10051 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10052 
10053 		/* free mailbox buffer */
10054 		if (data != NULL) {
10055 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10056 		}
10057 
10058 		if (hw->callback.port != NULL) {
10059 			hw->callback.port(hw->args.port,
10060 					OCS_HW_PORT_ATTACH_FAIL, sport);
10061 		}
10062 		if (sport->sm_free_req_pending) {
10063 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10064 		}
10065 		break;
10066 	default:
10067 		__ocs_hw_port_common(__func__, ctx, evt, data);
10068 		break;
10069 	}
10070 
10071 	return NULL;
10072 }
10073 
10074 static void *
__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10075 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10076 {
10077 	ocs_sli_port_t	*sport = ctx->app;
10078 	ocs_hw_t	*hw = sport->hw;
10079 	uint8_t		*cmd = NULL;
10080 
10081 	smtrace("port");
10082 
10083 	switch (evt) {
10084 	case OCS_EVT_ENTER:
10085 		/* allocate memory and send unreg_vpi */
10086 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10087 		if (!cmd) {
10088 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10089 			break;
10090 		}
10091 
10092 		if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
10093 					   SLI4_UNREG_TYPE_PORT)) {
10094 			ocs_log_err(hw->os, "UNREG_VPI format failure\n");
10095 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10096 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10097 			break;
10098 		}
10099 
10100 		if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10101 			ocs_log_err(hw->os, "UNREG_VPI command failure\n");
10102 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10103 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10104 			break;
10105 		}
10106 		break;
10107 	case OCS_EVT_RESPONSE:
10108 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10109 		break;
10110 	case OCS_EVT_ERROR:
10111 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10112 		break;
10113 	default:
10114 		__ocs_hw_port_common(__func__, ctx, evt, data);
10115 		break;
10116 	}
10117 
10118 	return NULL;
10119 }
10120 
10121 static void *
__ocs_hw_port_free_nop(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10122 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10123 {
10124 	ocs_sli_port_t	*sport = ctx->app;
10125 	ocs_hw_t	*hw = sport->hw;
10126 
10127 	smtrace("port");
10128 
10129 	switch (evt) {
10130 	case OCS_EVT_ENTER:
10131 		/* Forward to execute in mailbox completion processing context */
10132 		if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
10133 			ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
10134 		}
10135 		break;
10136 	case OCS_EVT_RESPONSE:
10137 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10138 		break;
10139 	case OCS_EVT_ERROR:
10140 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10141 		break;
10142 	default:
10143 		break;
10144 	}
10145 
10146 	return NULL;
10147 }
10148 
10149 static void *
__ocs_hw_port_attached(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10150 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10151 {
10152 	ocs_sli_port_t	*sport = ctx->app;
10153 	ocs_hw_t	*hw = sport->hw;
10154 
10155 	smtrace("port");
10156 
10157 	switch (evt) {
10158 	case OCS_EVT_ENTER:
10159 		if (data != NULL) {
10160 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10161 		}
10162 		if (hw->callback.port != NULL) {
10163 			hw->callback.port(hw->args.port,
10164 					OCS_HW_PORT_ATTACH_OK, sport);
10165 		}
10166 		if (sport->sm_free_req_pending) {
10167 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10168 		}
10169 		break;
10170 	case OCS_EVT_HW_PORT_REQ_FREE:
10171 		/* virtual/physical port request free */
10172 		ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10173 		break;
10174 	default:
10175 		__ocs_hw_port_common(__func__, ctx, evt, data);
10176 		break;
10177 	}
10178 
10179 	return NULL;
10180 }
10181 
10182 static void *
__ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10183 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10184 {
10185 	ocs_sli_port_t	*sport = ctx->app;
10186 	ocs_hw_t	*hw = sport->hw;
10187 
10188 	smtrace("port");
10189 
10190 	switch (evt) {
10191 	case OCS_EVT_ENTER:
10192 		if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
10193 			ocs_log_err(hw->os, "REG_VPI format failure\n");
10194 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10195 			break;
10196 		}
10197 
10198 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10199 			ocs_log_err(hw->os, "REG_VPI command failure\n");
10200 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10201 			break;
10202 		}
10203 		break;
10204 	case OCS_EVT_RESPONSE:
10205 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10206 		break;
10207 	case OCS_EVT_ERROR:
10208 		ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
10209 		break;
10210 	case OCS_EVT_HW_PORT_REQ_FREE:
10211 		/* Wait for attach response and then free */
10212 		sport->sm_free_req_pending = 1;
10213 		break;
10214 	default:
10215 		__ocs_hw_port_common(__func__, ctx, evt, data);
10216 		break;
10217 	}
10218 
10219 	return NULL;
10220 }
10221 
10222 static void *
__ocs_hw_port_done(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10223 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10224 {
10225 	ocs_sli_port_t	*sport = ctx->app;
10226 	ocs_hw_t	*hw = sport->hw;
10227 
10228 	smtrace("port");
10229 
10230 	switch (evt) {
10231 	case OCS_EVT_ENTER:
10232 		/* free SLI resource */
10233 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10234 
10235 		/* free mailbox buffer */
10236 		if (data != NULL) {
10237 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10238 		}
10239 		break;
10240 	default:
10241 		__ocs_hw_port_common(__func__, ctx, evt, data);
10242 		break;
10243 	}
10244 
10245 	return NULL;
10246 }
10247 
10248 static void *
__ocs_hw_port_allocated(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10249 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10250 {
10251 	ocs_sli_port_t	*sport = ctx->app;
10252 	ocs_hw_t	*hw = sport->hw;
10253 
10254 	smtrace("port");
10255 
10256 	switch (evt) {
10257 	case OCS_EVT_ENTER:
10258 		if (data != NULL) {
10259 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10260 		}
10261 		if (hw->callback.port != NULL) {
10262 			hw->callback.port(hw->args.port,
10263 					OCS_HW_PORT_ALLOC_OK, sport);
10264 		}
10265 		/* If there is a pending free request, then handle it now */
10266 		if (sport->sm_free_req_pending) {
10267 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10268 		}
10269 		break;
10270 	case OCS_EVT_HW_PORT_REQ_ATTACH:
10271 		/* virtual port requests attach */
10272 		ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
10273 		break;
10274 	case OCS_EVT_HW_PORT_ATTACH_OK:
10275 		/* physical port attached (as part of attaching domain) */
10276 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10277 		break;
10278 	case OCS_EVT_HW_PORT_REQ_FREE:
10279 		/* virtual port request free */
10280 		if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
10281 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10282 		} else {
10283 			/*
10284 			 * Note: BE3/Skyhawk will respond with a status of 0x20
10285 			 *       unless the reg_vpi has been issued, so we can
10286 			 *       skip the unreg_vpi for these adapters.
10287 			 *
10288 			 * Send a nop to make sure that free doesn't occur in
10289 			 * same context
10290 			 */
10291 			ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
10292 		}
10293 		break;
10294 	default:
10295 		__ocs_hw_port_common(__func__, ctx, evt, data);
10296 		break;
10297 	}
10298 
10299 	return NULL;
10300 }
10301 
10302 static void *
__ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10303 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10304 {
10305 	ocs_sli_port_t	*sport = ctx->app;
10306 	ocs_hw_t	*hw = sport->hw;
10307 
10308 	smtrace("port");
10309 
10310 	switch (evt) {
10311 	case OCS_EVT_ENTER:
10312 		/* free SLI resource */
10313 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10314 
10315 		/* free mailbox buffer */
10316 		if (data != NULL) {
10317 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10318 		}
10319 
10320 		if (hw->callback.port != NULL) {
10321 			hw->callback.port(hw->args.port,
10322 					OCS_HW_PORT_ALLOC_FAIL, sport);
10323 		}
10324 
10325 		/* If there is a pending free request, then handle it now */
10326 		if (sport->sm_free_req_pending) {
10327 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10328 		}
10329 		break;
10330 	default:
10331 		__ocs_hw_port_common(__func__, ctx, evt, data);
10332 		break;
10333 	}
10334 
10335 	return NULL;
10336 }
10337 
10338 static void *
__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10339 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10340 {
10341 	ocs_sli_port_t	*sport = ctx->app;
10342 	ocs_hw_t	*hw = sport->hw;
10343 	uint8_t		*payload = NULL;
10344 
10345 	smtrace("port");
10346 
10347 	switch (evt) {
10348 	case OCS_EVT_ENTER:
10349 		/* allocate memory for the service parameters */
10350 		if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
10351 			ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
10352 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10353 			break;
10354 		}
10355 
10356 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10357 					&sport->dma, sport->indicator)) {
10358 			ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
10359 			ocs_dma_free(hw->os, &sport->dma);
10360 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10361 			break;
10362 		}
10363 
10364 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10365 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10366 			ocs_dma_free(hw->os, &sport->dma);
10367 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10368 			break;
10369 		}
10370 		break;
10371 	case OCS_EVT_RESPONSE:
10372 		payload = sport->dma.virt;
10373 
10374 		ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
10375 
10376 		ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
10377 				sizeof(sport->sli_wwpn));
10378 		ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
10379 				sizeof(sport->sli_wwnn));
10380 
10381 		ocs_dma_free(hw->os, &sport->dma);
10382 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
10383 		break;
10384 	case OCS_EVT_ERROR:
10385 		ocs_dma_free(hw->os, &sport->dma);
10386 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10387 		break;
10388 	case OCS_EVT_HW_PORT_REQ_FREE:
10389 		/* Wait for attach response and then free */
10390 		sport->sm_free_req_pending = 1;
10391 		break;
10392 	case OCS_EVT_EXIT:
10393 		break;
10394 	default:
10395 		__ocs_hw_port_common(__func__, ctx, evt, data);
10396 		break;
10397 	}
10398 
10399 	return NULL;
10400 }
10401 
10402 static void *
__ocs_hw_port_alloc_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10403 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10404 {
10405 	ocs_sli_port_t	*sport = ctx->app;
10406 
10407 	smtrace("port");
10408 
10409 	switch (evt) {
10410 	case OCS_EVT_ENTER:
10411 		/* no-op */
10412 		break;
10413 	case OCS_EVT_HW_PORT_ALLOC_OK:
10414 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
10415 		break;
10416 	case OCS_EVT_HW_PORT_ALLOC_FAIL:
10417 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
10418 		break;
10419 	case OCS_EVT_HW_PORT_REQ_FREE:
10420 		/* Wait for attach response and then free */
10421 		sport->sm_free_req_pending = 1;
10422 		break;
10423 	default:
10424 		__ocs_hw_port_common(__func__, ctx, evt, data);
10425 		break;
10426 	}
10427 
10428 	return NULL;
10429 }
10430 
10431 static void *
__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10432 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10433 {
10434 	ocs_sli_port_t	*sport = ctx->app;
10435 	ocs_hw_t	*hw = sport->hw;
10436 
10437 	smtrace("port");
10438 
10439 	switch (evt) {
10440 	case OCS_EVT_ENTER:
10441 		/* If there is a pending free request, then handle it now */
10442 		if (sport->sm_free_req_pending) {
10443 			ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
10444 			return NULL;
10445 		}
10446 
10447 		/* TODO XXX transitioning to done only works if this is called
10448 		 * directly from ocs_hw_port_alloc BUT not if called from
10449 		 * read_sparm64. In the later case, we actually want to go
10450 		 * through report_ok/fail
10451 		 */
10452 		if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
10453 					sport->indicator, sport->domain->indicator)) {
10454 			ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
10455 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10456 			break;
10457 		}
10458 
10459 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10460 			ocs_log_err(hw->os, "INIT_VPI command failure\n");
10461 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10462 			break;
10463 		}
10464 		break;
10465 	case OCS_EVT_RESPONSE:
10466 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
10467 		break;
10468 	case OCS_EVT_ERROR:
10469 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10470 		break;
10471 	case OCS_EVT_HW_PORT_REQ_FREE:
10472 		/* Wait for attach response and then free */
10473 		sport->sm_free_req_pending = 1;
10474 		break;
10475 	case OCS_EVT_EXIT:
10476 		break;
10477 	default:
10478 		__ocs_hw_port_common(__func__, ctx, evt, data);
10479 		break;
10480 	}
10481 
10482 	return NULL;
10483 }
10484 
10485 static int32_t
__ocs_hw_port_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)10486 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10487 {
10488 	ocs_sli_port_t *sport = arg;
10489 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10490 	ocs_sm_event_t	evt;
10491 
10492 	if (status || hdr->status) {
10493 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10494 			      sport->indicator, status, hdr->status);
10495 		evt = OCS_EVT_ERROR;
10496 	} else {
10497 		evt = OCS_EVT_RESPONSE;
10498 	}
10499 
10500 	ocs_sm_post_event(&sport->ctx, evt, mqe);
10501 
10502 	return 0;
10503 }
10504 
10505 static int32_t
__ocs_hw_port_realloc_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)10506 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10507 {
10508 	ocs_sli_port_t *sport = arg;
10509 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10510 	ocs_sm_event_t	evt;
10511 	uint8_t *mqecpy;
10512 
10513 	if (status || hdr->status) {
10514 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10515 			      sport->indicator, status, hdr->status);
10516 		evt = OCS_EVT_ERROR;
10517 	} else {
10518 		evt = OCS_EVT_RESPONSE;
10519 	}
10520 
10521 	/*
10522 	 * In this case we have to malloc a mailbox command buffer, as it is reused
10523 	 * in the state machine post event call, and eventually freed
10524 	 */
10525 	mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10526 	if (mqecpy == NULL) {
10527 		ocs_log_err(hw->os, "malloc mqecpy failed\n");
10528 		return -1;
10529 	}
10530 	ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
10531 
10532 	ocs_sm_post_event(&sport->ctx, evt, mqecpy);
10533 
10534 	return 0;
10535 }
10536 
10537 /***************************************************************************
10538  * Domain state machine
10539  */
10540 
10541 static int32_t
__ocs_hw_domain_common(const char * funcname,ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10542 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10543 {
10544 	ocs_domain_t	*domain = ctx->app;
10545 	ocs_hw_t	*hw = domain->hw;
10546 
10547 	smtrace("domain");
10548 
10549 	switch (evt) {
10550 	case OCS_EVT_EXIT:
10551 		/* ignore */
10552 		break;
10553 
10554 	default:
10555 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10556 		break;
10557 	}
10558 
10559 	return 0;
10560 }
10561 
10562 static void *
__ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10563 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10564 {
10565 	ocs_domain_t	*domain = ctx->app;
10566 	ocs_hw_t	*hw = domain->hw;
10567 
10568 	smtrace("domain");
10569 
10570 	switch (evt) {
10571 	case OCS_EVT_ENTER:
10572 		/* free command buffer */
10573 		if (data != NULL) {
10574 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10575 		}
10576 		/* free SLI resources */
10577 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10578 		/* TODO how to free FCFI (or do we at all)? */
10579 
10580 		if (hw->callback.domain != NULL) {
10581 			hw->callback.domain(hw->args.domain,
10582 					OCS_HW_DOMAIN_ALLOC_FAIL,
10583 					domain);
10584 		}
10585 		break;
10586 	default:
10587 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10588 		break;
10589 	}
10590 
10591 	return NULL;
10592 }
10593 
10594 static void *
__ocs_hw_domain_attached(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10595 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10596 {
10597 	ocs_domain_t	*domain = ctx->app;
10598 	ocs_hw_t	*hw = domain->hw;
10599 
10600 	smtrace("domain");
10601 
10602 	switch (evt) {
10603 	case OCS_EVT_ENTER:
10604 		/* free mailbox buffer and send alloc ok to physical sport */
10605 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10606 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
10607 
10608 		/* now inform registered callbacks */
10609 		if (hw->callback.domain != NULL) {
10610 			hw->callback.domain(hw->args.domain,
10611 					OCS_HW_DOMAIN_ATTACH_OK,
10612 					domain);
10613 		}
10614 		break;
10615 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10616 		ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10617 		break;
10618 	default:
10619 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10620 		break;
10621 	}
10622 
10623 	return NULL;
10624 }
10625 
10626 static void *
__ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10627 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10628 {
10629 	ocs_domain_t	*domain = ctx->app;
10630 	ocs_hw_t	*hw = domain->hw;
10631 
10632 	smtrace("domain");
10633 
10634 	switch (evt) {
10635 	case OCS_EVT_ENTER:
10636 		if (data != NULL) {
10637 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10638 		}
10639 		/* free SLI resources */
10640 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10641 		/* TODO how to free FCFI (or do we at all)? */
10642 
10643 		if (hw->callback.domain != NULL) {
10644 			hw->callback.domain(hw->args.domain,
10645 					OCS_HW_DOMAIN_ATTACH_FAIL,
10646 					domain);
10647 		}
10648 		break;
10649 	case OCS_EVT_EXIT:
10650 		break;
10651 	default:
10652 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10653 		break;
10654 	}
10655 
10656 	return NULL;
10657 }
10658 
10659 static void *
__ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10660 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10661 {
10662 	ocs_domain_t	*domain = ctx->app;
10663 	ocs_hw_t	*hw = domain->hw;
10664 
10665 	smtrace("domain");
10666 
10667 	switch (evt) {
10668 	case OCS_EVT_ENTER:
10669 
10670 		ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
10671 
10672 		if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
10673 			ocs_log_err(hw->os, "REG_VFI format failure\n");
10674 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10675 			break;
10676 		}
10677 
10678 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10679 			ocs_log_err(hw->os, "REG_VFI command failure\n");
10680 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10681 			break;
10682 		}
10683 		break;
10684 	case OCS_EVT_RESPONSE:
10685 		ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
10686 		break;
10687 	case OCS_EVT_ERROR:
10688 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
10689 		break;
10690 	default:
10691 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10692 		break;
10693 	}
10694 
10695 	return NULL;
10696 }
10697 
10698 static void *
__ocs_hw_domain_allocated(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10699 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10700 {
10701 	ocs_domain_t	*domain = ctx->app;
10702 	ocs_hw_t	*hw = domain->hw;
10703 
10704 	smtrace("domain");
10705 
10706 	switch (evt) {
10707 	case OCS_EVT_ENTER:
10708 		/* free mailbox buffer and send alloc ok to physical sport */
10709 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10710 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
10711 
10712 		ocs_hw_domain_add(hw, domain);
10713 
10714 		/* now inform registered callbacks */
10715 		if (hw->callback.domain != NULL) {
10716 			hw->callback.domain(hw->args.domain,
10717 					OCS_HW_DOMAIN_ALLOC_OK,
10718 					domain);
10719 		}
10720 		break;
10721 	case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
10722 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
10723 		break;
10724 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10725 		/* unreg_fcfi/vfi */
10726 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10727 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
10728 		} else {
10729 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10730 		}
10731 		break;
10732 	default:
10733 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10734 		break;
10735 	}
10736 
10737 	return NULL;
10738 }
10739 
10740 static void *
__ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10741 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10742 {
10743 	ocs_domain_t	*domain = ctx->app;
10744 	ocs_hw_t	*hw = domain->hw;
10745 
10746 	smtrace("domain");
10747 
10748 	switch (evt) {
10749 	case OCS_EVT_ENTER:
10750 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10751 					&domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
10752 			ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
10753 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10754 			break;
10755 		}
10756 
10757 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10758 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10759 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10760 			break;
10761 		}
10762 		break;
10763 	case OCS_EVT_EXIT:
10764 		break;
10765 	case OCS_EVT_RESPONSE:
10766 		ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
10767 
10768 		ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
10769 		break;
10770 	case OCS_EVT_ERROR:
10771 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10772 		break;
10773 	default:
10774 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10775 		break;
10776 	}
10777 
10778 	return NULL;
10779 }
10780 
10781 static void *
__ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10782 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10783 {
10784 	ocs_domain_t	*domain = ctx->app;
10785 	ocs_sli_port_t	*sport = domain->sport;
10786 	ocs_hw_t	*hw = domain->hw;
10787 
10788 	smtrace("domain");
10789 
10790 	switch (evt) {
10791 	case OCS_EVT_ENTER:
10792 		if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
10793 					domain->fcf_indicator, sport->indicator)) {
10794 			ocs_log_err(hw->os, "INIT_VFI format failure\n");
10795 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10796 			break;
10797 		}
10798 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10799 			ocs_log_err(hw->os, "INIT_VFI command failure\n");
10800 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10801 			break;
10802 		}
10803 		break;
10804 	case OCS_EVT_EXIT:
10805 		break;
10806 	case OCS_EVT_RESPONSE:
10807 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10808 		break;
10809 	case OCS_EVT_ERROR:
10810 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10811 		break;
10812 	default:
10813 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10814 		break;
10815 	}
10816 
10817 	return NULL;
10818 }
10819 
10820 static void *
__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10821 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10822 {
10823 	ocs_domain_t	*domain = ctx->app;
10824 	ocs_hw_t	*hw = domain->hw;
10825 
10826 	smtrace("domain");
10827 
10828 	switch (evt) {
10829 	case OCS_EVT_ENTER: {
10830 		sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
10831 		uint32_t i;
10832 
10833 		/* Set the filter match/mask values from hw's filter_def values */
10834 		for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
10835 			rq_cfg[i].rq_id = 0xffff;
10836 			rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
10837 			rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
10838 			rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
10839 			rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
10840 		}
10841 
10842 		/* Set the rq_id for each, in order of RQ definition */
10843 		for (i = 0; i < hw->hw_rq_count; i++) {
10844 			if (i >= ARRAY_SIZE(rq_cfg)) {
10845 				ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
10846 				break;
10847 			}
10848 			rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
10849 		}
10850 
10851 		if (!data) {
10852 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10853 			break;
10854 		}
10855 
10856 		if (hw->hw_mrq_count) {
10857 			if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
10858 				 domain->vlan_id, domain->fcf)) {
10859 				ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
10860 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10861 				break;
10862 			}
10863 
10864 		} else {
10865 			if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
10866 						rq_cfg, domain->vlan_id)) {
10867 				ocs_log_err(hw->os, "REG_FCFI format failure\n");
10868 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10869 				break;
10870 			}
10871 		}
10872 
10873 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10874 			ocs_log_err(hw->os, "REG_FCFI command failure\n");
10875 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10876 			break;
10877 		}
10878 		break;
10879 	}
10880 	case OCS_EVT_EXIT:
10881 		break;
10882 	case OCS_EVT_RESPONSE:
10883 		if (!data) {
10884 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10885 			break;
10886 		}
10887 
10888 		domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
10889 
10890 		/*
10891 		 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
10892 		 * and instead rely on implicit initialization during VFI registration.
10893 		 * Short circuit normal processing here for those devices.
10894 		 */
10895 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10896 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10897 		} else {
10898 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
10899 		}
10900 		break;
10901 	case OCS_EVT_ERROR:
10902 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10903 		break;
10904 	default:
10905 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10906 		break;
10907 	}
10908 
10909 	return NULL;
10910 }
10911 
10912 static void *
__ocs_hw_domain_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10913 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10914 {
10915 	ocs_domain_t	*domain = ctx->app;
10916 	ocs_hw_t	*hw = domain->hw;
10917 
10918 	smtrace("domain");
10919 
10920 	switch (evt) {
10921 	case OCS_EVT_ENTER:
10922 		if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
10923 			/*
10924 			 * For FC, the HW alread registered a FCFI
10925 			 * Copy FCF information into the domain and jump to INIT_VFI
10926 			 */
10927 			domain->fcf_indicator = hw->fcf_indicator;
10928 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
10929 		} else {
10930 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
10931 		}
10932 		break;
10933 	default:
10934 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10935 		break;
10936 	}
10937 
10938 	return NULL;
10939 }
10940 
10941 static void *
__ocs_hw_domain_free_report_fail(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10942 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10943 {
10944 	ocs_domain_t	*domain = ctx->app;
10945 
10946 	smtrace("domain");
10947 
10948 	switch (evt) {
10949 	case OCS_EVT_ENTER:
10950 		if (domain != NULL) {
10951 			ocs_hw_t	*hw = domain->hw;
10952 
10953 			ocs_hw_domain_del(hw, domain);
10954 
10955 			if (hw->callback.domain != NULL) {
10956 				hw->callback.domain(hw->args.domain,
10957 						     OCS_HW_DOMAIN_FREE_FAIL,
10958 						     domain);
10959 			}
10960 		}
10961 
10962 		/* free command buffer */
10963 		if (data != NULL) {
10964 			ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
10965 		}
10966 		break;
10967 	case OCS_EVT_EXIT:
10968 		break;
10969 	default:
10970 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10971 		break;
10972 	}
10973 
10974 	return NULL;
10975 }
10976 
10977 static void *
__ocs_hw_domain_freed(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)10978 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10979 {
10980 	ocs_domain_t	*domain = ctx->app;
10981 
10982 	smtrace("domain");
10983 
10984 	switch (evt) {
10985 	case OCS_EVT_ENTER:
10986 		/* Free DMA and mailbox buffer */
10987 		if (domain != NULL) {
10988 			ocs_hw_t *hw = domain->hw;
10989 
10990 			/* free VFI resource */
10991 			sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
10992 					  domain->indicator);
10993 
10994 			ocs_hw_domain_del(hw, domain);
10995 
10996 			/* inform registered callbacks */
10997 			if (hw->callback.domain != NULL) {
10998 				hw->callback.domain(hw->args.domain,
10999 						     OCS_HW_DOMAIN_FREE_OK,
11000 						     domain);
11001 			}
11002 		}
11003 		if (data != NULL) {
11004 			ocs_free(NULL, data, SLI4_BMBX_SIZE);
11005 		}
11006 		break;
11007 	case OCS_EVT_EXIT:
11008 		break;
11009 	default:
11010 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11011 		break;
11012 	}
11013 
11014 	return NULL;
11015 }
11016 
11017 static void *
__ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)11018 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11019 {
11020 	ocs_domain_t	*domain = ctx->app;
11021 	ocs_hw_t	*hw = domain->hw;
11022 
11023 	smtrace("domain");
11024 
11025 	switch (evt) {
11026 	case OCS_EVT_ENTER:
11027 		/* if we're in the middle of a teardown, skip sending rediscover */
11028 		if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
11029 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11030 			break;
11031 		}
11032 		if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
11033 			ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
11034 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11035 			break;
11036 		}
11037 
11038 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11039 			ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
11040 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11041 		}
11042 		break;
11043 	case OCS_EVT_RESPONSE:
11044 	case OCS_EVT_ERROR:
11045 		/* REDISCOVER_FCF can fail if none exist */
11046 		ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11047 		break;
11048 	case OCS_EVT_EXIT:
11049 		break;
11050 	default:
11051 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11052 		break;
11053 	}
11054 
11055 	return NULL;
11056 }
11057 
11058 static void *
__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)11059 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11060 {
11061 	ocs_domain_t	*domain = ctx->app;
11062 	ocs_hw_t	*hw = domain->hw;
11063 
11064 	smtrace("domain");
11065 
11066 	switch (evt) {
11067 	case OCS_EVT_ENTER:
11068 		if (data == NULL) {
11069 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11070 			if (!data) {
11071 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11072 				break;
11073 			}
11074 		}
11075 
11076 		if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
11077 			ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
11078 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11079 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11080 			break;
11081 		}
11082 
11083 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11084 			ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
11085 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11086 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11087 			break;
11088 		}
11089 		break;
11090 	case OCS_EVT_RESPONSE:
11091 		if (domain->req_rediscover_fcf) {
11092 			domain->req_rediscover_fcf = FALSE;
11093 			ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
11094 		} else {
11095 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11096 		}
11097 		break;
11098 	case OCS_EVT_ERROR:
11099 		ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11100 		break;
11101 	case OCS_EVT_EXIT:
11102 		break;
11103 	default:
11104 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11105 		break;
11106 	}
11107 
11108 	return NULL;
11109 }
11110 
11111 static void *
__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * data)11112 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11113 {
11114 	ocs_domain_t	*domain = ctx->app;
11115 	ocs_hw_t	*hw = domain->hw;
11116 	uint8_t		is_fc = FALSE;
11117 
11118 	smtrace("domain");
11119 
11120 	is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
11121 
11122 	switch (evt) {
11123 	case OCS_EVT_ENTER:
11124 		if (data == NULL) {
11125 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11126 			if (!data) {
11127 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11128 				break;
11129 			}
11130 		}
11131 
11132 		if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
11133 					SLI4_UNREG_TYPE_DOMAIN)) {
11134 			ocs_log_err(hw->os, "UNREG_VFI format failure\n");
11135 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11136 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11137 			break;
11138 		}
11139 
11140 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11141 			ocs_log_err(hw->os, "UNREG_VFI command failure\n");
11142 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11143 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11144 			break;
11145 		}
11146 		break;
11147 	case OCS_EVT_ERROR:
11148 		if (is_fc) {
11149 			ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11150 		} else {
11151 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11152 		}
11153 		break;
11154 	case OCS_EVT_RESPONSE:
11155 		if (is_fc) {
11156 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11157 		} else {
11158 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11159 		}
11160 		break;
11161 	default:
11162 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11163 		break;
11164 	}
11165 
11166 	return NULL;
11167 }
11168 
11169 /* callback for domain alloc/attach/free */
11170 static int32_t
__ocs_hw_domain_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)11171 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11172 {
11173 	ocs_domain_t	*domain = arg;
11174 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11175 	ocs_sm_event_t	evt;
11176 
11177 	if (status || hdr->status) {
11178 		ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
11179 			      domain->indicator, status, hdr->status);
11180 		evt = OCS_EVT_ERROR;
11181 	} else {
11182 		evt = OCS_EVT_RESPONSE;
11183 	}
11184 
11185 	ocs_sm_post_event(&domain->sm, evt, mqe);
11186 
11187 	return 0;
11188 }
11189 
11190 static int32_t
target_wqe_timer_nop_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)11191 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11192 {
11193 	ocs_hw_io_t *io = NULL;
11194 	ocs_hw_io_t *io_next = NULL;
11195 	ocs_hw_rtn_e rc;
11196 	struct timeval cur_time;
11197 
11198 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11199 
11200 	if (status || hdr->status) {
11201 		ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
11202 			      status, hdr->status);
11203 		/* go ahead and proceed with wqe timer checks... */
11204 	}
11205 
11206 	/* loop through active WQE list and check for timeouts */
11207 	ocs_lock(&hw->io_lock);
11208 	ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
11209 
11210 		/*
11211 		 * If elapsed time > timeout, abort it. No need to check type since
11212 		 * it wouldn't be on this list unless it was a target WQE
11213 		 */
11214 		getmicrouptime(&cur_time);
11215 		timevalsub(&cur_time, &io->submit_time);
11216 		if (cur_time.tv_sec > io->wqe_timeout) {
11217 			ocs_log_info(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d elapsed time:%u\n",
11218 				     io->indicator, io->reqtag, io->type, cur_time.tv_sec);
11219 
11220 			/* remove from active_wqe list so won't try to abort again */
11221 			ocs_list_remove(&hw->io_timed_wqe, io);
11222 
11223 			/* save status of "timed out" for when abort completes */
11224 			io->status_saved = 1;
11225 			io->saved_status = SLI4_FC_WCQE_STATUS_WQE_TIMEOUT;
11226 			io->saved_ext = 0;
11227 			io->saved_len = 0;
11228 
11229 			/* now abort outstanding IO */
11230 			rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
11231 			if (rc) {
11232 				ocs_log_test(hw->os,
11233 					"abort failed xri=%#x tag=%#x rc=%d\n",
11234 					io->indicator, io->reqtag, rc);
11235 			}
11236 		}
11237 		/*
11238 		 * need to go through entire list since each IO could have a
11239 		 * different timeout value
11240 		 */
11241 	}
11242 	ocs_unlock(&hw->io_lock);
11243 
11244 	/* if we're not in the middle of shutting down, schedule next timer */
11245 	if (!hw->active_wqe_timer_shutdown) {
11246 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
11247 	}
11248 	hw->in_active_wqe_timer = FALSE;
11249 	return 0;
11250 }
11251 
11252 static void
target_wqe_timer_cb(void * arg)11253 target_wqe_timer_cb(void *arg)
11254 {
11255 	ocs_hw_t *hw = (ocs_hw_t *)arg;
11256 
11257 	/* delete existing timer; will kick off new timer after checking wqe timeouts */
11258 	hw->in_active_wqe_timer = TRUE;
11259 
11260 	/* Forward timer callback to execute in the mailbox completion processing context */
11261 	if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
11262 		ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
11263 	}
11264 }
11265 
11266 static void
shutdown_target_wqe_timer(ocs_hw_t * hw)11267 shutdown_target_wqe_timer(ocs_hw_t *hw)
11268 {
11269 	uint32_t	iters = 100;
11270 
11271 	if (hw->config.emulate_wqe_timeout) {
11272 		/* request active wqe timer shutdown, then wait for it to complete */
11273 		hw->active_wqe_timer_shutdown = TRUE;
11274 
11275 		/* delete WQE timer and wait for timer handler to complete (if necessary) */
11276 		ocs_del_timer(&hw->wqe_timer);
11277 
11278 		/* now wait for timer handler to complete (if necessary) */
11279 		while (hw->in_active_wqe_timer && iters) {
11280 			/*
11281 			 * if we happen to have just sent NOP mailbox command, make sure
11282 			 * completions are being processed
11283 			 */
11284 			ocs_hw_flush(hw);
11285 			iters--;
11286 		}
11287 
11288 		if (iters == 0) {
11289 			ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
11290 		}
11291 	}
11292 }
11293 
11294 /**
11295  * @brief Determine if HW IO is owned by the port.
11296  *
11297  * @par Description
11298  * Determines if the given HW IO has been posted to the chip.
11299  *
11300  * @param hw Hardware context allocated by the caller.
11301  * @param io HW IO.
11302  *
11303  * @return Returns TRUE if given HW IO is port-owned.
11304  */
11305 uint8_t
ocs_hw_is_io_port_owned(ocs_hw_t * hw,ocs_hw_io_t * io)11306 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
11307 {
11308 	/* Check to see if this is a port owned XRI */
11309 	return io->is_port_owned;
11310 }
11311 
11312 /**
11313  * @brief Return TRUE if exchange is port-owned.
11314  *
11315  * @par Description
11316  * Test to see if the xri is a port-owned xri.
11317  *
11318  * @param hw Hardware context.
11319  * @param xri Exchange indicator.
11320  *
11321  * @return Returns TRUE if XRI is a port owned XRI.
11322  */
11323 
11324 uint8_t
ocs_hw_is_xri_port_owned(ocs_hw_t * hw,uint32_t xri)11325 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
11326 {
11327 	ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
11328 	return (io == NULL ? FALSE : io->is_port_owned);
11329 }
11330 
11331 /**
11332  * @brief Returns an XRI from the port owned list to the host.
11333  *
11334  * @par Description
11335  * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
11336  *
11337  * @param hw Hardware context.
11338  * @param xri_base The starting XRI number.
11339  * @param xri_count The number of XRIs to free from the base.
11340  */
11341 static void
ocs_hw_reclaim_xri(ocs_hw_t * hw,uint16_t xri_base,uint16_t xri_count)11342 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
11343 {
11344 	ocs_hw_io_t	*io;
11345 	uint32_t i;
11346 
11347 	for (i = 0; i < xri_count; i++) {
11348 		io = ocs_hw_io_lookup(hw, xri_base + i);
11349 
11350 		/*
11351 		 * if this is an auto xfer rdy XRI, then we need to release any
11352 		 * buffer attached to the XRI before moving the XRI back to the free pool.
11353 		 */
11354 		if (hw->auto_xfer_rdy_enabled) {
11355 			ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
11356 		}
11357 
11358 		ocs_lock(&hw->io_lock);
11359 			ocs_list_remove(&hw->io_port_owned, io);
11360 			io->is_port_owned = 0;
11361 			ocs_list_add_tail(&hw->io_free, io);
11362 		ocs_unlock(&hw->io_lock);
11363 	}
11364 }
11365 
11366 /**
11367  * @brief Called when the POST_XRI command completes.
11368  *
11369  * @par Description
11370  * Free the mailbox command buffer and reclaim the XRIs on failure.
11371  *
11372  * @param hw Hardware context.
11373  * @param status Status field from the mbox completion.
11374  * @param mqe Mailbox response structure.
11375  * @param arg Pointer to a callback function that signals the caller that the command is done.
11376  *
11377  * @return Returns 0.
11378  */
11379 static int32_t
ocs_hw_cb_post_xri(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)11380 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11381 {
11382 	sli4_cmd_post_xri_t	*post_xri = (sli4_cmd_post_xri_t*)mqe;
11383 
11384 	/* Reclaim the XRIs as host owned if the command fails */
11385 	if (status != 0) {
11386 		ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
11387 			      status, post_xri->xri_base, post_xri->xri_count);
11388 		ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
11389 	}
11390 
11391 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11392 	return 0;
11393 }
11394 
11395 /**
11396  * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
11397  *
11398  * @param hw Hardware context.
11399  * @param xri_start The starting XRI to post.
11400  * @param num_to_post The number of XRIs to post.
11401  *
11402  * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
11403  */
11404 
11405 static ocs_hw_rtn_e
ocs_hw_post_xri(ocs_hw_t * hw,uint32_t xri_start,uint32_t num_to_post)11406 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
11407 {
11408 	uint8_t	*post_xri;
11409 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11410 
11411 	/* Since we need to allocate for mailbox queue, just always allocate */
11412 	post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11413 	if (post_xri == NULL) {
11414 		ocs_log_err(hw->os, "no buffer for command\n");
11415 		return OCS_HW_RTN_NO_MEMORY;
11416 	}
11417 
11418 	/* Register the XRIs */
11419 	if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
11420 			     xri_start, num_to_post)) {
11421 		rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
11422 		if (rc != OCS_HW_RTN_SUCCESS) {
11423 			ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
11424 			ocs_log_err(hw->os, "post_xri failed\n");
11425 		}
11426 	}
11427 	return rc;
11428 }
11429 
11430 /**
11431  * @brief Move XRIs from the host-controlled pool to the port.
11432  *
11433  * @par Description
11434  * Removes IOs from the free list and moves them to the port.
11435  *
11436  * @param hw Hardware context.
11437  * @param num_xri The number of XRIs being requested to move to the chip.
11438  *
11439  * @return Returns the number of XRIs that were moved.
11440  */
11441 
11442 uint32_t
ocs_hw_xri_move_to_port_owned(ocs_hw_t * hw,uint32_t num_xri)11443 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
11444 {
11445 	ocs_hw_io_t	*io;
11446 	uint32_t i;
11447 	uint32_t num_posted = 0;
11448 
11449 	/*
11450 	 * Note: We cannot use ocs_hw_io_alloc() because that would place the
11451 	 *       IO on the io_inuse list. We need to move from the io_free to
11452 	 *       the io_port_owned list.
11453 	 */
11454 	ocs_lock(&hw->io_lock);
11455 
11456 	for (i = 0; i < num_xri; i++) {
11457 		if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
11458 			ocs_hw_rtn_e rc;
11459 
11460 			/*
11461 			 * if this is an auto xfer rdy XRI, then we need to attach a
11462 			 * buffer to the XRI before submitting it to the chip. If a
11463 			 * buffer is unavailable, then we cannot post it, so return it
11464 			 * to the free pool.
11465 			 */
11466 			if (hw->auto_xfer_rdy_enabled) {
11467 				/* Note: uses the IO lock to get the auto xfer rdy buffer */
11468 				ocs_unlock(&hw->io_lock);
11469 				rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
11470 				ocs_lock(&hw->io_lock);
11471 				if (rc != OCS_HW_RTN_SUCCESS) {
11472 					ocs_list_add_head(&hw->io_free, io);
11473 					break;
11474 				}
11475 			}
11476 			ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
11477 			io->is_port_owned = 1;
11478 			ocs_list_add_tail(&hw->io_port_owned, io);
11479 
11480 			/* Post XRI */
11481 			if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
11482 				ocs_hw_reclaim_xri(hw, io->indicator, i);
11483 				break;
11484 			}
11485 			num_posted++;
11486 		} else {
11487 			/* no more free XRIs */
11488 			break;
11489 		}
11490 	}
11491 	ocs_unlock(&hw->io_lock);
11492 
11493 	return num_posted;
11494 }
11495 
11496 /**
11497  * @brief Called when the RELEASE_XRI command completes.
11498  *
11499  * @par Description
11500  * Move the IOs back to the free pool on success.
11501  *
11502  * @param hw Hardware context.
11503  * @param status Status field from the mbox completion.
11504  * @param mqe Mailbox response structure.
11505  * @param arg Pointer to a callback function that signals the caller that the command is done.
11506  *
11507  * @return Returns 0.
11508  */
11509 static int32_t
ocs_hw_cb_release_xri(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)11510 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11511 {
11512 	sli4_cmd_release_xri_t	*release_xri = (sli4_cmd_release_xri_t*)mqe;
11513 	uint8_t i;
11514 
11515 	/* Reclaim the XRIs as host owned if the command fails */
11516 	if (status != 0) {
11517 		ocs_log_err(hw->os, "Status 0x%x\n", status);
11518 	} else {
11519 		for (i = 0; i < release_xri->released_xri_count; i++) {
11520 			uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
11521 					release_xri->xri_tbl[i/2].xri_tag1);
11522 			ocs_hw_reclaim_xri(hw, xri, 1);
11523 		}
11524 	}
11525 
11526 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11527 	return 0;
11528 }
11529 
11530 /**
11531  * @brief Move XRIs from the port-controlled pool to the host.
11532  *
11533  * Requests XRIs from the FW to return to the host-owned pool.
11534  *
11535  * @param hw Hardware context.
11536  * @param num_xri The number of XRIs being requested to moved from the chip.
11537  *
11538  * @return Returns 0 for success, or a negative error code value for failure.
11539  */
11540 
11541 ocs_hw_rtn_e
ocs_hw_xri_move_to_host_owned(ocs_hw_t * hw,uint8_t num_xri)11542 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
11543 {
11544 	uint8_t	*release_xri;
11545 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11546 
11547 	/* non-local buffer required for mailbox queue */
11548 	release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11549 	if (release_xri == NULL) {
11550 		ocs_log_err(hw->os, "no buffer for command\n");
11551 		return OCS_HW_RTN_NO_MEMORY;
11552 	}
11553 
11554 	/* release the XRIs */
11555 	if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
11556 		rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
11557 		if (rc != OCS_HW_RTN_SUCCESS) {
11558 			ocs_log_err(hw->os, "release_xri failed\n");
11559 		}
11560 	}
11561 	/* If we are polling or an error occurred, then free the mailbox buffer */
11562 	if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
11563 		ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
11564 	}
11565 	return rc;
11566 }
11567 
11568 /**
11569  * @brief Allocate an ocs_hw_rx_buffer_t array.
11570  *
11571  * @par Description
11572  * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
11573  *
11574  * @param hw Pointer to HW object.
11575  * @param rqindex RQ index for this buffer.
11576  * @param count Count of buffers in array.
11577  * @param size Size of buffer.
11578  *
11579  * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
11580  */
11581 static ocs_hw_rq_buffer_t *
ocs_hw_rx_buffer_alloc(ocs_hw_t * hw,uint32_t rqindex,uint32_t count,uint32_t size)11582 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
11583 {
11584 	ocs_t *ocs = hw->os;
11585 	ocs_hw_rq_buffer_t *rq_buf = NULL;
11586 	ocs_hw_rq_buffer_t *prq;
11587 	uint32_t i;
11588 
11589 	if (count != 0) {
11590 		rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
11591 		if (rq_buf == NULL) {
11592 			ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
11593 			return NULL;
11594 		}
11595 
11596 		for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
11597 			prq->rqindex = rqindex;
11598 			if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
11599 				ocs_log_err(hw->os, "DMA allocation failed\n");
11600 				ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11601 				rq_buf = NULL;
11602 				break;
11603 			}
11604 		}
11605 	}
11606 	return rq_buf;
11607 }
11608 
11609 /**
11610  * @brief Free an ocs_hw_rx_buffer_t array.
11611  *
11612  * @par Description
11613  * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
11614  *
11615  * @param hw Pointer to HW object.
11616  * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
11617  * @param count Count of buffers in array.
11618  *
11619  * @return None.
11620  */
11621 static void
ocs_hw_rx_buffer_free(ocs_hw_t * hw,ocs_hw_rq_buffer_t * rq_buf,uint32_t count)11622 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
11623 {
11624 	ocs_t *ocs = hw->os;
11625 	uint32_t i;
11626 	ocs_hw_rq_buffer_t *prq;
11627 
11628 	if (rq_buf != NULL) {
11629 		for (i = 0, prq = rq_buf; i < count; i++, prq++) {
11630 			ocs_dma_free(ocs, &prq->dma);
11631 		}
11632 		ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11633 	}
11634 }
11635 
11636 /**
11637  * @brief Allocate the RQ data buffers.
11638  *
11639  * @param hw Pointer to HW object.
11640  *
11641  * @return Returns 0 on success, or a non-zero value on failure.
11642  */
11643 ocs_hw_rtn_e
ocs_hw_rx_allocate(ocs_hw_t * hw)11644 ocs_hw_rx_allocate(ocs_hw_t *hw)
11645 {
11646 	ocs_t *ocs = hw->os;
11647 	uint32_t i;
11648 	int32_t rc = OCS_HW_RTN_SUCCESS;
11649 	uint32_t rqindex = 0;
11650 	hw_rq_t *rq;
11651 	uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
11652 	uint32_t payload_size = hw->config.rq_default_buffer_size;
11653 
11654 	rqindex = 0;
11655 
11656 	for (i = 0; i < hw->hw_rq_count; i++) {
11657 		rq = hw->hw_rq[i];
11658 
11659 		/* Allocate header buffers */
11660 		rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
11661 		if (rq->hdr_buf == NULL) {
11662 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
11663 			rc = OCS_HW_RTN_ERROR;
11664 			break;
11665 		}
11666 
11667 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header  %4d by %4d bytes\n", i, rq->hdr->id,
11668 			      rq->entry_count, hdr_size);
11669 
11670 		rqindex++;
11671 
11672 		/* Allocate payload buffers */
11673 		rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
11674 		if (rq->payload_buf == NULL) {
11675 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
11676 			rc = OCS_HW_RTN_ERROR;
11677 			break;
11678 		}
11679 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
11680 			      rq->entry_count, payload_size);
11681 		rqindex++;
11682 	}
11683 
11684 	return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
11685 }
11686 
11687 /**
11688  * @brief Post the RQ data buffers to the chip.
11689  *
11690  * @param hw Pointer to HW object.
11691  *
11692  * @return Returns 0 on success, or a non-zero value on failure.
11693  */
11694 ocs_hw_rtn_e
ocs_hw_rx_post(ocs_hw_t * hw)11695 ocs_hw_rx_post(ocs_hw_t *hw)
11696 {
11697 	uint32_t i;
11698 	uint32_t idx;
11699 	uint32_t rq_idx;
11700 	int32_t rc = 0;
11701 
11702 	/*
11703 	 * In RQ pair mode, we MUST post the header and payload buffer at the
11704 	 * same time.
11705 	 */
11706 	for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
11707 		hw_rq_t *rq = hw->hw_rq[rq_idx];
11708 
11709 		for (i = 0; i < rq->entry_count-1; i++) {
11710 			ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
11711 			ocs_hw_assert(seq != NULL);
11712 
11713 			seq->header = &rq->hdr_buf[i];
11714 
11715 			seq->payload = &rq->payload_buf[i];
11716 
11717 			rc = ocs_hw_sequence_free(hw, seq);
11718 			if (rc) {
11719 				break;
11720 			}
11721 		}
11722 		if (rc) {
11723 			break;
11724 		}
11725 	}
11726 
11727 	return rc;
11728 }
11729 
11730 /**
11731  * @brief Free the RQ data buffers.
11732  *
11733  * @param hw Pointer to HW object.
11734  *
11735  */
11736 void
ocs_hw_rx_free(ocs_hw_t * hw)11737 ocs_hw_rx_free(ocs_hw_t *hw)
11738 {
11739 	hw_rq_t *rq;
11740 	uint32_t i;
11741 
11742 	/* Free hw_rq buffers */
11743 	for (i = 0; i < hw->hw_rq_count; i++) {
11744 		rq = hw->hw_rq[i];
11745 		if (rq != NULL) {
11746 			ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
11747 			rq->hdr_buf = NULL;
11748 			ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
11749 			rq->payload_buf = NULL;
11750 		}
11751 	}
11752 }
11753 
11754 /**
11755  * @brief HW async call context structure.
11756  */
11757 typedef struct {
11758 	ocs_hw_async_cb_t callback;
11759 	void *arg;
11760 	uint8_t cmd[SLI4_BMBX_SIZE];
11761 } ocs_hw_async_call_ctx_t;
11762 
11763 /**
11764  * @brief HW async callback handler
11765  *
11766  * @par Description
11767  * This function is called when the NOP mailbox command completes.  The callback stored
11768  * in the requesting context is invoked.
11769  *
11770  * @param hw Pointer to HW object.
11771  * @param status Completion status.
11772  * @param mqe Pointer to mailbox completion queue entry.
11773  * @param arg Caller-provided argument.
11774  *
11775  * @return None.
11776  */
11777 static void
ocs_hw_async_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)11778 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11779 {
11780 	ocs_hw_async_call_ctx_t *ctx = arg;
11781 
11782 	if (ctx != NULL) {
11783 		if (ctx->callback != NULL) {
11784 			(*ctx->callback)(hw, status, mqe, ctx->arg);
11785 		}
11786 		ocs_free(hw->os, ctx, sizeof(*ctx));
11787 	}
11788 }
11789 
11790 /**
11791  * @brief Make an async callback using NOP mailbox command
11792  *
11793  * @par Description
11794  * Post a NOP mailbox command; the callback with argument is invoked upon completion
11795  * while in the event processing context.
11796  *
11797  * @param hw Pointer to HW object.
11798  * @param callback Pointer to callback function.
11799  * @param arg Caller-provided callback.
11800  *
11801  * @return Returns 0 on success, or a negative error code value on failure.
11802  */
11803 int32_t
ocs_hw_async_call(ocs_hw_t * hw,ocs_hw_async_cb_t callback,void * arg)11804 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
11805 {
11806 	ocs_hw_async_call_ctx_t *ctx;
11807 
11808 	/*
11809 	 * Allocate a callback context (which includes the mailbox command buffer), we need
11810 	 * this to be persistent as the mailbox command submission may be queued and executed later
11811 	 * execution.
11812 	 */
11813 	ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
11814 	if (ctx == NULL) {
11815 		ocs_log_err(hw->os, "failed to malloc async call context\n");
11816 		return OCS_HW_RTN_NO_MEMORY;
11817 	}
11818 	ctx->callback = callback;
11819 	ctx->arg = arg;
11820 
11821 	/* Build and send a NOP mailbox command */
11822 	if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
11823 		ocs_log_err(hw->os, "COMMON_NOP format failure\n");
11824 		ocs_free(hw->os, ctx, sizeof(*ctx));
11825 		return OCS_HW_RTN_ERROR;
11826 	}
11827 
11828 	if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
11829 		ocs_log_err(hw->os, "COMMON_NOP command failure\n");
11830 		ocs_free(hw->os, ctx, sizeof(*ctx));
11831 		return OCS_HW_RTN_ERROR;
11832 	}
11833 	return OCS_HW_RTN_SUCCESS;
11834 }
11835 
11836 /**
11837  * @brief Initialize the reqtag pool.
11838  *
11839  * @par Description
11840  * The WQ request tag pool is initialized.
11841  *
11842  * @param hw Pointer to HW object.
11843  *
11844  * @return Returns 0 on success, or a negative error code value on failure.
11845  */
11846 ocs_hw_rtn_e
ocs_hw_reqtag_init(ocs_hw_t * hw)11847 ocs_hw_reqtag_init(ocs_hw_t *hw)
11848 {
11849 	if (hw->wq_reqtag_pool == NULL) {
11850 		hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
11851 		if (hw->wq_reqtag_pool == NULL) {
11852 			ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
11853 			return OCS_HW_RTN_NO_MEMORY;
11854 		}
11855 	}
11856 	ocs_hw_reqtag_reset(hw);
11857 	return OCS_HW_RTN_SUCCESS;
11858 }
11859 
11860 /**
11861  * @brief Allocate a WQ request tag.
11862  *
11863  * Allocate and populate a WQ request tag from the WQ request tag pool.
11864  *
11865  * @param hw Pointer to HW object.
11866  * @param callback Callback function.
11867  * @param arg Pointer to callback argument.
11868  *
11869  * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
11870  */
11871 hw_wq_callback_t *
ocs_hw_reqtag_alloc(ocs_hw_t * hw,void (* callback)(void * arg,uint8_t * cqe,int32_t status),void * arg)11872 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
11873 {
11874 	hw_wq_callback_t *wqcb;
11875 
11876 	ocs_hw_assert(callback != NULL);
11877 
11878 	wqcb = ocs_pool_get(hw->wq_reqtag_pool);
11879 	if (wqcb != NULL) {
11880 		ocs_hw_assert(wqcb->callback == NULL);
11881 		wqcb->callback = callback;
11882 		wqcb->arg = arg;
11883 	}
11884 	return wqcb;
11885 }
11886 
11887 /**
11888  * @brief Free a WQ request tag.
11889  *
11890  * Free the passed in WQ request tag.
11891  *
11892  * @param hw Pointer to HW object.
11893  * @param wqcb Pointer to WQ request tag object to free.
11894  *
11895  * @return None.
11896  */
11897 void
ocs_hw_reqtag_free(ocs_hw_t * hw,hw_wq_callback_t * wqcb)11898 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
11899 {
11900 	ocs_hw_assert(wqcb->callback != NULL);
11901 	wqcb->callback = NULL;
11902 	wqcb->arg = NULL;
11903 	ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11904 }
11905 
11906 /**
11907  * @brief Return WQ request tag by index.
11908  *
11909  * @par Description
11910  * Return pointer to WQ request tag object given an index.
11911  *
11912  * @param hw Pointer to HW object.
11913  * @param instance_index Index of WQ request tag to return.
11914  *
11915  * @return Pointer to WQ request tag, or NULL.
11916  */
11917 hw_wq_callback_t *
ocs_hw_reqtag_get_instance(ocs_hw_t * hw,uint32_t instance_index)11918 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
11919 {
11920 	hw_wq_callback_t *wqcb;
11921 
11922 	wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
11923 	if (wqcb == NULL) {
11924 		ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
11925 	}
11926 	return wqcb;
11927 }
11928 
11929 /**
11930  * @brief Reset the WQ request tag pool.
11931  *
11932  * @par Description
11933  * Reset the WQ request tag pool, returning all to the free list.
11934  *
11935  * @param hw pointer to HW object.
11936  *
11937  * @return None.
11938  */
11939 void
ocs_hw_reqtag_reset(ocs_hw_t * hw)11940 ocs_hw_reqtag_reset(ocs_hw_t *hw)
11941 {
11942 	hw_wq_callback_t *wqcb;
11943 	uint32_t i;
11944 
11945 	/* Remove all from freelist */
11946 	while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
11947 		;
11948 	}
11949 
11950 	/* Put them all back */
11951 	for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
11952 		wqcb->instance_index = i;
11953 		wqcb->callback = NULL;
11954 		wqcb->arg = NULL;
11955 		ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11956 	}
11957 }
11958 
11959 /**
11960  * @brief Handle HW assertion
11961  *
11962  * HW assert, display diagnostic message, and abort.
11963  *
11964  * @param cond string describing failing assertion condition
11965  * @param filename file name
11966  * @param linenum line number
11967  *
11968  * @return none
11969  */
11970 void
_ocs_hw_assert(const char * cond,const char * filename,int linenum)11971 _ocs_hw_assert(const char *cond, const char *filename, int linenum)
11972 {
11973 	ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
11974 	ocs_abort();
11975 		/* no return */
11976 }
11977 
11978 /**
11979  * @brief Handle HW verify
11980  *
11981  * HW verify, display diagnostic message, dump stack and return.
11982  *
11983  * @param cond string describing failing verify condition
11984  * @param filename file name
11985  * @param linenum line number
11986  *
11987  * @return none
11988  */
11989 void
_ocs_hw_verify(const char * cond,const char * filename,int linenum)11990 _ocs_hw_verify(const char *cond, const char *filename, int linenum)
11991 {
11992 	ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
11993 	ocs_print_stack();
11994 }
11995 
11996 /**
11997  * @brief Reque XRI
11998  *
11999  * @par Description
12000  * Reque XRI
12001  *
12002  * @param hw Pointer to HW object.
12003  * @param io Pointer to HW IO
12004  *
12005  * @return Return 0 if successful else returns -1
12006  */
12007 int32_t
ocs_hw_reque_xri(ocs_hw_t * hw,ocs_hw_io_t * io)12008 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
12009 {
12010 	int32_t rc = 0;
12011 
12012 	rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
12013 	if (rc) {
12014 		ocs_list_add_tail(&hw->io_port_dnrx, io);
12015 		rc = -1;
12016 		goto exit_ocs_hw_reque_xri;
12017 	}
12018 
12019 	io->auto_xfer_rdy_dnrx = 0;
12020 	io->type = OCS_HW_IO_DNRX_REQUEUE;
12021 	if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
12022 		/* Clear buffer from XRI */
12023 		ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
12024 		io->axr_buf = NULL;
12025 
12026 		ocs_log_err(hw->os, "requeue_xri WQE error\n");
12027 		ocs_list_add_tail(&hw->io_port_dnrx, io);
12028 
12029 		rc = -1;
12030 		goto exit_ocs_hw_reque_xri;
12031 	}
12032 
12033 	if (io->wq == NULL) {
12034 		io->wq = ocs_hw_queue_next_wq(hw, io);
12035 		ocs_hw_assert(io->wq != NULL);
12036 	}
12037 
12038 	/*
12039 	 * Add IO to active io wqe list before submitting, in case the
12040 	 * wcqe processing preempts this thread.
12041 	 */
12042 	OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
12043 	OCS_STAT(io->wq->use_count++);
12044 
12045 	rc = hw_wq_write(io->wq, &io->wqe);
12046 	if (rc < 0) {
12047 		ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
12048 		rc = -1;
12049 	}
12050 
12051 exit_ocs_hw_reque_xri:
12052 	return 0;
12053 }
12054 
12055 uint32_t
ocs_hw_get_def_wwn(ocs_t * ocs,uint32_t chan,uint64_t * wwpn,uint64_t * wwnn)12056 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
12057 {
12058 	sli4_t *sli4 = &ocs->hw.sli;
12059 	ocs_dma_t       dma;
12060 	uint8_t		*payload = NULL;
12061 
12062 	int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
12063 
12064 	/* allocate memory for the service parameters */
12065 	if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
12066 		ocs_log_err(ocs, "Failed to allocate DMA memory\n");
12067 		return 1;
12068 	}
12069 
12070 	if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
12071 				&dma, indicator)) {
12072 		ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
12073 		ocs_dma_free(ocs, &dma);
12074 		return 1;
12075 	}
12076 
12077 	if (sli_bmbx_command(sli4)) {
12078 		ocs_log_err(ocs, "READ_SPARM64 command failure\n");
12079 		ocs_dma_free(ocs, &dma);
12080 		return 1;
12081 	}
12082 
12083 	payload = dma.virt;
12084 	ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
12085 	ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
12086 	ocs_dma_free(ocs, &dma);
12087 	return 0;
12088 }
12089 
12090 uint32_t
ocs_hw_get_config_persistent_topology(ocs_hw_t * hw)12091 ocs_hw_get_config_persistent_topology(ocs_hw_t *hw)
12092 {
12093         uint32_t topology = OCS_HW_TOPOLOGY_AUTO;
12094 	sli4_t *sli = &hw->sli;
12095 
12096         if (!sli_persist_topology_enabled(sli))
12097                 return topology;
12098 
12099         switch (sli->config.pt) {
12100                 case SLI4_INIT_LINK_F_P2P_ONLY:
12101                         topology = OCS_HW_TOPOLOGY_NPORT;
12102                         break;
12103                 case SLI4_INIT_LINK_F_FCAL_ONLY:
12104                         topology = OCS_HW_TOPOLOGY_LOOP;
12105                         break;
12106                 default:
12107                         break;
12108         }
12109 
12110         return topology;
12111 }
12112 
12113 /*
12114  * @brief Persistent topology configuration callback argument.
12115  */
12116 typedef struct ocs_hw_persistent_topo_cb_arg {
12117 	ocs_sem_t semaphore;
12118 	int32_t status;
12119 } ocs_hw_persistent_topo_cb_arg_t;
12120 
12121 /*
12122  * @brief Called after the completion of set persistent topology request
12123  *
12124  * @par Description
12125  * This is callback fn for the set_persistent_topology
12126  * function. This callback is called when the common feature mbx cmd
12127  * completes.
12128  *
12129  * @param hw Hardware context.
12130  * @param status The status from the MQE.
12131  * @param mqe Pointer to mailbox command buffer.
12132  * @param arg Pointer to a callback argument.
12133  *
12134  * @return 0 on success, non-zero otherwise
12135  */
12136 static int32_t
ocs_hw_set_persistent_topolgy_cb(ocs_hw_t * hw,int32_t status,uint8_t * mqe,void * arg)12137 ocs_hw_set_persistent_topolgy_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
12138 {
12139 	ocs_hw_persistent_topo_cb_arg_t *req = (ocs_hw_persistent_topo_cb_arg_t *)arg;
12140 
12141 	req->status = status;
12142 
12143 	ocs_sem_v(&req->semaphore);
12144 
12145 	return 0;
12146 }
12147 
12148 /**
12149  * @brief Set persistent topology
12150  *
12151  * Sets the persistent topology(PT) feature using
12152  * COMMON_SET_FEATURES cmd. If mbx cmd succeeds, update the
12153  * topology into sli config. PT stores the value to be set into link_flags
12154  * of the cmd INIT_LINK, to bring up the link.
12155  *
12156  * SLI specs defines following for PT:
12157  *     When TF is set to 0:
12158  *       0 Reserved
12159  *       1 Attempt point-to-point initialization (direct attach or Fabric topology).
12160  *       2 Attempt FC-AL loop initialization.
12161  *       3 Reserved
12162  *
12163  *      When TF is set to 1:
12164  *       0 Attempt FC-AL loop initialization; if it fails, attempt point-to-point initialization.
12165  *       1 Attempt point-to-point initialization; if it fails, attempt FC-AL loop initialization.
12166  *       2 Reserved
12167  *      3 Reserved
12168  *
12169  *     Note: Topology failover is only available on Lancer G5. This command will fail
12170  *     if TF is set to 1 on any other ASICs
12171  *
12172  * @param hw Pointer to hw
12173  * @param topology topology value to be set, provided through
12174  *        elxsdkutil set-topology cmd
12175  *
12176  * @return Returns 0 on success, or a non-zero value on failure.
12177  */
12178 ocs_hw_rtn_e
ocs_hw_set_persistent_topology(ocs_hw_t * hw,uint32_t topology,uint32_t opts)12179 ocs_hw_set_persistent_topology(ocs_hw_t *hw, uint32_t topology, uint32_t opts)
12180 {
12181 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
12182 	uint8_t buf[SLI4_BMBX_SIZE];
12183 	sli4_req_common_set_features_persistent_topo_param_t param;
12184 	ocs_hw_persistent_topo_cb_arg_t request;
12185 
12186 	ocs_memset(&param, 0, sizeof(param));
12187 	param.persistent_topo = topology;
12188 
12189 	switch (topology) {
12190 	case OCS_HW_TOPOLOGY_AUTO:
12191 		if (sli_get_asic_type(&hw->sli) == SLI4_ASIC_TYPE_LANCER) {
12192 			param.persistent_topo = SLI4_INIT_LINK_F_P2P_FAIL_OVER;
12193 			param.topo_failover = 1;
12194 		} else {
12195 			param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;
12196 			param.topo_failover = 0;
12197 		}
12198 		break;
12199 
12200 	case OCS_HW_TOPOLOGY_NPORT:
12201 		param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;
12202 		param.topo_failover = 0;
12203 		break;
12204 
12205 	case OCS_HW_TOPOLOGY_LOOP:
12206 		param.persistent_topo = SLI4_INIT_LINK_F_FCAL_ONLY;
12207 		param.topo_failover = 0;
12208 		break;
12209 
12210 	default:
12211 		ocs_log_err(hw->os, "unsupported topology %#x\n", topology);
12212 		return -1;
12213 	}
12214 
12215 	ocs_sem_init(&request.semaphore, 0, "set_persistent_topo");
12216 
12217 	/* build the set_features command */
12218 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
12219 		SLI4_SET_FEATURES_PERSISTENT_TOPOLOGY, sizeof(param), &param);
12220 
12221 	if (opts == OCS_CMD_POLL) {
12222 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
12223 		if (rc) {
12224 			ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc);
12225 			return rc;
12226 		}
12227 	} else {
12228 
12229 		// there's no response for this feature command
12230 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_set_persistent_topolgy_cb, &request);
12231 		if (rc) {
12232 			ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc);
12233 			return rc;
12234 		}
12235 
12236 		if (ocs_sem_p(&request.semaphore, OCS_SEM_FOREVER)) {
12237 			ocs_log_err(hw->os, "ocs_sem_p failed\n");
12238 			return -ENXIO;
12239 		}
12240 
12241 		if (request.status) {
12242 			ocs_log_err(hw->os, "set persistent topology failed; status: %d\n", request.status);
12243 			return -EFAULT;
12244 		}
12245 	}
12246 
12247 	sli_config_persistent_topology(&hw->sli, &param);
12248 
12249 	return rc;
12250 }
12251 
12252 /**
12253  * @page fc_hw_api_overview HW APIs
12254  * - @ref devInitShutdown
12255  * - @ref domain
12256  * - @ref port
12257  * - @ref node
12258  * - @ref io
12259  * - @ref interrupt
12260  *
12261  * <div class="overview">
12262  * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
12263  * message details, but the higher level code must still manage domains, ports,
12264  * IT nexuses, and IOs. The HW API is designed to help the higher level manage
12265  * these objects.<br><br>
12266  *
12267  * The HW uses function callbacks to notify the higher-level code of events
12268  * that are received from the chip. There are currently three types of
12269  * functions that may be registered:
12270  *
12271  * <ul><li>domain – This function is called whenever a domain event is generated
12272  * within the HW. Examples include a new FCF is discovered, a connection
12273  * to a domain is disrupted, and allocation callbacks.</li>
12274  * <li>unsolicited – This function is called whenever new data is received in
12275  * the SLI-4 receive queue.</li>
12276  * <li>rnode – This function is called for remote node events, such as attach status
12277  * and  allocation callbacks.</li></ul>
12278  *
12279  * Upper layer functions may be registered by using the ocs_hw_callback() function.
12280  *
12281  * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
12282  * <h2>FC/FCoE HW API</h2>
12283  * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
12284  * interface for creating the necessary common objects and sending I/Os. It may be used
12285  * “as is” in customer implementations or it can serve as an example of typical interactions
12286  * between a driver and the SLI-4 hardware. The broad categories of functionality include:
12287  *
12288  * <ul><li>Setting-up and tearing-down of the HW.</li>
12289  * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
12290  * <li>Sending and receiving I/Os.</li></ul>
12291  *
12292  * <h3>HW Setup</h3>
12293  * To set up the HW:
12294  *
12295  * <ol>
12296  * <li>Set up the HW object using ocs_hw_setup().<br>
12297  * This step performs a basic configuration of the SLI-4 component and the HW to
12298  * enable querying the hardware for its capabilities. At this stage, the HW is not
12299  * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
12300  * <li>Configure the HW according to the driver requirements.<br>
12301  * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
12302  * well as configures the amount of resources required (ocs_hw_set()). The driver
12303  * must also register callback functions (ocs_hw_callback()) to receive notification of
12304  * various asynchronous events.<br><br>
12305  * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
12306  * step creates the underlying queues, commits resources to the hardware, and
12307  * prepares the hardware for operation. While the hardware is operational, the
12308  * port is not online, and cannot send or receive data.</li><br><br>
12309  * <br><br>
12310  * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
12311  * When the link comes up, the HW determines if a domain is present and notifies the
12312  * driver using the domain callback function. This is the starting point of the driver's
12313  * interaction with the common objects.<br><br>
12314  * @b Note: For FCoE, there may be more than one domain available and, therefore,
12315  * more than one callback.</li>
12316  * </ol>
12317  *
12318  * <h3>Allocating and Using Common Objects</h3>
12319  * Common objects provide a mechanism through which the various OneCore Storage
12320  * driver components share and track information. These data structures are primarily
12321  * used to track SLI component information but can be extended by other components, if
12322  * needed. The main objects are:
12323  *
12324  * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
12325  * memory access (DMA) transactions.</li>
12326  * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
12327  * any infrastructure devices such as FC switches and FC forwarders. The domain
12328  * object contains both an FCFI and a VFI.</li>
12329  * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
12330  * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
12331  * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
12332  * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
12333  *
12334  * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
12335  * node common objects and establish the connections between them. The goal is to
12336  * connect the driver to the SCSI domain to exchange I/Os with other devices. These
12337  * common object connections are shown in the following figure, FC Driver Common Objects:
12338  * <img src="elx_fc_common_objects.jpg"
12339  * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
12340  *
12341  * The first step is to create a connection to the domain by allocating an SLI Port object.
12342  * The SLI Port object represents a particular FC ID and must be initialized with one. With
12343  * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
12344  * a domain, the driver allocates a domain object and attaches to it using the previous SLI
12345  * port object.<br><br>
12346  *
12347  * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
12348  * FLOGI) with the domain before attaching.<br><br>
12349  *
12350  * Once attached to the domain, the driver can discover and attach to other devices
12351  * (remote nodes). The exact discovery method depends on the driver, but it typically
12352  * includes using a position map, querying the fabric name server, or an out-of-band
12353  * method. In most cases, it is necessary to log in with devices before performing I/Os.
12354  * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
12355  * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
12356  * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
12357  * before exchanging FCP I/O.<br><br>
12358  *
12359  * @b Note: The HW manages both the well known fabric address and the name server as
12360  * nodes in the domain. Therefore, the driver must allocate node objects prior to
12361  * communicating with either of these entities.
12362  *
12363  * <h3>Sending and Receiving I/Os</h3>
12364  * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
12365  * commands are conceptually similar. Since the commands complete asynchronously,
12366  * the caller must provide a HW I/O object that maintains the I/O state, as well as
12367  * provide a callback function. The driver may use the same callback function for all I/O
12368  * operations, but each operation must use a unique HW I/O object. In the SLI-4
12369  * architecture, there is a direct association between the HW I/O object and the SGL used
12370  * to describe the data. Therefore, a driver typically performs the following operations:
12371  *
12372  * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
12373  * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
12374  * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
12375  * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
12376  *
12377  * <h3>HW Tear Down</h3>
12378  * To tear-down the HW:
12379  *
12380  * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
12381  * data andevents.</li>
12382  * <li>Destroy the HW object (ocs_hw_teardown()).</li>
12383  * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
12384  * <br>
12385  * </div><!-- overview -->
12386  *
12387  */
12388 
12389 /**
12390  * This contains all hw runtime workaround code.  Based on the asic type,
12391  * asic revision, and range of fw revisions, a particular workaround may be enabled.
12392  *
12393  * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
12394  * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
12395  * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
12396  * control specific runtime behavior.
12397  *
12398  * It is intended that the controls in ocs_hw_workaround_t be defined functionally.  So we
12399  * would have the driver look like:  "if (hw->workaround.enable_xxx) then ...", rather than
12400  * what we might previously see as "if this is a BE3, then do xxx"
12401  *
12402  */
12403 
12404 #define HW_FWREV_ZERO		(0ull)
12405 #define HW_FWREV_MAX		(~0ull)
12406 
12407 #define SLI4_ASIC_TYPE_ANY	0
12408 #define SLI4_ASIC_REV_ANY	0
12409 
12410 /**
12411  * @brief Internal definition of workarounds
12412  */
12413 
12414 typedef enum {
12415 	HW_WORKAROUND_TEST = 1,
12416 	HW_WORKAROUND_MAX_QUEUE,	/**< Limits all queues */
12417 	HW_WORKAROUND_MAX_RQ,		/**< Limits only the RQ */
12418 	HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
12419 	HW_WORKAROUND_WQE_COUNT_METHOD,
12420 	HW_WORKAROUND_RQE_COUNT_METHOD,
12421 	HW_WORKAROUND_USE_UNREGISTERD_RPI,
12422 	HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
12423 	HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
12424 	HW_WORKAROUND_USE_DIF_QUARANTINE,
12425 	HW_WORKAROUND_USE_DIF_SEC_XRI,		/**< Use secondary xri for multiple data phases */
12426 	HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB,	/**< FCFI reported in SRB not correct, use "first" registered domain */
12427 	HW_WORKAROUND_FW_VERSION_TOO_LOW,	/**< The FW version is not the min version supported by this driver */
12428 	HW_WORKAROUND_SGLC_MISREPORTED,	/**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
12429 	HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE,	/**< Don't use SEND_FRAME capable if FW version is too old */
12430 } hw_workaround_e;
12431 
12432 /**
12433  * @brief Internal workaround structure instance
12434  */
12435 
12436 typedef struct {
12437 	sli4_asic_type_e asic_type;
12438 	sli4_asic_rev_e asic_rev;
12439 	uint64_t fwrev_low;
12440 	uint64_t fwrev_high;
12441 
12442 	hw_workaround_e workaround;
12443 	uint32_t value;
12444 } hw_workaround_t;
12445 
12446 static hw_workaround_t hw_workarounds[] = {
12447 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12448 		HW_WORKAROUND_TEST, 999},
12449 
12450 	/* Bug: 127585: if_type == 2 returns 0 for total length placed on
12451 	 * FCP_TSEND64_WQE completions.   Note, original driver code enables this
12452 	 * workaround for all asic types
12453 	 */
12454 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12455 		HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
12456 
12457 	/* Bug: unknown, Lancer A0 has mis-reported max queue depth */
12458 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
12459 		HW_WORKAROUND_MAX_QUEUE, 2048},
12460 
12461 	/* Bug: 143399, BE3 has mis-reported max RQ queue depth */
12462 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
12463 		HW_WORKAROUND_MAX_RQ, 2048},
12464 
12465 	/* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
12466 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
12467 		HW_WORKAROUND_MAX_RQ, 2048},
12468 
12469 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
12470 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12471 		HW_WORKAROUND_WQE_COUNT_METHOD, 1},
12472 
12473 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
12474 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12475 		HW_WORKAROUND_RQE_COUNT_METHOD, 1},
12476 
12477 	/* Bug: 142968, BE3 UE with RPI == 0xffff */
12478 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12479 		HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
12480 
12481 	/* Bug: unknown, Skyhawk won't support auto-response on target T10-PI  */
12482 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12483 		HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
12484 
12485 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
12486 		HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
12487 
12488 	/* Bug: 160124, Skyhawk quarantine DIF XRIs  */
12489 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12490 		HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
12491 
12492 	/* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
12493 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12494 		HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
12495 
12496 	/* Bug: xxxxxx, FCFI reported in SRB not corrrect */
12497 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12498 		HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
12499 #if 0
12500 	/* Bug: 165642, FW version check for driver */
12501 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
12502 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12503 #endif
12504 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
12505 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12506 
12507 	/* Bug 177061, Lancer FW does not set the SGLC bit */
12508 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12509 		HW_WORKAROUND_SGLC_MISREPORTED, 0},
12510 
12511 	/* BZ 181208/183914, enable this workaround for ALL revisions */
12512 	{SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12513 		HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
12514 };
12515 
12516 /**
12517  * @brief Function prototypes
12518  */
12519 
12520 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
12521 
12522 /**
12523  * @brief Parse the firmware version (name)
12524  *
12525  * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
12526  * by the HW_FWREV() macro
12527  *
12528  * @param fwrev_string pointer to the firmware string
12529  *
12530  * @return packed firmware revision value
12531  */
12532 
12533 static uint64_t
parse_fw_version(const char * fwrev_string)12534 parse_fw_version(const char *fwrev_string)
12535 {
12536 	int v[4] = {0};
12537 	const char *p;
12538 	int i;
12539 
12540 	for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
12541 		v[i] = ocs_strtoul(p, 0, 0);
12542 		while(*p && *p != '.') {
12543 			p ++;
12544 		}
12545 		if (*p) {
12546 			p ++;
12547 		}
12548 	}
12549 
12550 	/* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
12551 	if (v[2] == 9999) {
12552 		return HW_FWREV_MAX;
12553 	} else {
12554 		return HW_FWREV(v[0], v[1], v[2], v[3]);
12555 	}
12556 }
12557 
12558 /**
12559  * @brief Test for a workaround match
12560  *
12561  * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
12562  *
12563  * @param hw Pointer to the HW structure
12564  * @param w Pointer to a workaround structure entry
12565  *
12566  * @return Return TRUE for a match
12567  */
12568 
12569 static int32_t
ocs_hw_workaround_match(ocs_hw_t * hw,hw_workaround_t * w)12570 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
12571 {
12572 	return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
12573 		    ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
12574 		    (w->fwrev_low <= hw->workaround.fwrev) &&
12575 		    ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
12576 }
12577 
12578 /**
12579  * @brief Setup HW runtime workarounds
12580  *
12581  * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
12582  * based on the HW/SLI setup.
12583  *
12584  * @param hw Pointer to HW structure
12585  *
12586  * @return none
12587  */
12588 
12589 void
ocs_hw_workaround_setup(struct ocs_hw_s * hw)12590 ocs_hw_workaround_setup(struct ocs_hw_s *hw)
12591 {
12592 	hw_workaround_t *w;
12593 	sli4_t *sli4 = &hw->sli;
12594 	uint32_t i;
12595 
12596 	/* Initialize the workaround settings */
12597 	ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
12598 
12599 	/* If hw_war_version is non-null, then its a value that was set by a module parameter
12600 	 * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
12601 	 */
12602 
12603 	if (hw->hw_war_version) {
12604 		hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
12605 	} else {
12606 		hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
12607 	}
12608 
12609 	/* Walk the workaround list, if a match is found, then handle it */
12610 	for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
12611 		if (ocs_hw_workaround_match(hw, w)) {
12612 			switch(w->workaround) {
12613 			case HW_WORKAROUND_TEST: {
12614 				ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
12615 				break;
12616 			}
12617 
12618 			case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
12619 				ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
12620 				hw->workaround.retain_tsend_io_length = 1;
12621 				break;
12622 			}
12623 			case HW_WORKAROUND_MAX_QUEUE: {
12624 				sli4_qtype_e q;
12625 
12626 				ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
12627 				for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
12628 					if (hw->num_qentries[q] > w->value) {
12629 						hw->num_qentries[q] = w->value;
12630 					}
12631 				}
12632 				break;
12633 			}
12634 			case HW_WORKAROUND_MAX_RQ: {
12635 				ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
12636 				if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
12637 					hw->num_qentries[SLI_QTYPE_RQ] = w->value;
12638 				}
12639 				break;
12640 			}
12641 			case HW_WORKAROUND_WQE_COUNT_METHOD: {
12642 				ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
12643 				sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
12644 				sli_calc_max_qentries(sli4);
12645 				break;
12646 			}
12647 			case HW_WORKAROUND_RQE_COUNT_METHOD: {
12648 				ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
12649 				sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
12650 				sli_calc_max_qentries(sli4);
12651 				break;
12652 			}
12653 			case HW_WORKAROUND_USE_UNREGISTERD_RPI:
12654 				ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
12655 				hw->workaround.use_unregistered_rpi = TRUE;
12656 				/*
12657 				 * Allocate an RPI that is never registered, to be used in the case where
12658 				 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
12659 				 */
12660 				if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
12661 					&hw->workaround.unregistered_index)) {
12662 					ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
12663 					hw->workaround.use_unregistered_rpi = FALSE;
12664 				}
12665 				break;
12666 			case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
12667 				ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
12668 				hw->workaround.disable_ar_tgt_dif = TRUE;
12669 				break;
12670 			case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
12671 				ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
12672 				hw->workaround.disable_dump_loc = TRUE;
12673 				break;
12674 			case HW_WORKAROUND_USE_DIF_QUARANTINE:
12675 				ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
12676 				hw->workaround.use_dif_quarantine = TRUE;
12677 				break;
12678 			case HW_WORKAROUND_USE_DIF_SEC_XRI:
12679 				ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
12680 				hw->workaround.use_dif_sec_xri = TRUE;
12681 				break;
12682 			case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
12683 				ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
12684 				hw->workaround.override_fcfi = TRUE;
12685 				break;
12686 
12687 			case HW_WORKAROUND_FW_VERSION_TOO_LOW:
12688 				ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
12689 				hw->workaround.fw_version_too_low = TRUE;
12690 				break;
12691 			case HW_WORKAROUND_SGLC_MISREPORTED:
12692 				ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
12693 				hw->workaround.sglc_misreported = TRUE;
12694 				break;
12695 			case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
12696 				ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
12697 				hw->workaround.ignore_send_frame = TRUE;
12698 				break;
12699 			} /* switch(w->workaround) */
12700 		}
12701 	}
12702 }
12703