1 /*- 2 * Copyright (c) 2017 Broadcom. All rights reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the copyright holder nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 /** 35 * @file 36 * Defines and implements the Hardware Abstraction Layer (HW). 37 * All interaction with the hardware is performed through the HW, which abstracts 38 * the details of the underlying SLI-4 implementation. 39 */ 40 41 /** 42 * @defgroup devInitShutdown Device Initialization and Shutdown 43 * @defgroup domain Domain Functions 44 * @defgroup port Port Functions 45 * @defgroup node Remote Node Functions 46 * @defgroup io IO Functions 47 * @defgroup interrupt Interrupt handling 48 * @defgroup os OS Required Functions 49 */ 50 51 #include "ocs.h" 52 #include "ocs_os.h" 53 #include "ocs_hw.h" 54 #include "ocs_hw_queues.h" 55 56 #define OCS_HW_MQ_DEPTH 128 57 #define OCS_HW_READ_FCF_SIZE 4096 58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256 59 #define OCS_HW_WQ_TIMER_PERIOD_MS 500 60 61 /* values used for setting the auto xfer rdy parameters */ 62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */ 63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE 64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE 65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0 66 #define OCS_HW_REQUE_XRI_REGTAG 65534 67 /* max command and response buffer lengths -- arbitrary at the moment */ 68 #define OCS_HW_DMTF_CLP_CMD_MAX 256 69 #define OCS_HW_DMTF_CLP_RSP_MAX 256 70 71 /* HW global data */ 72 ocs_hw_global_t hw_global; 73 74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t); 75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw); 76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw); 77 static int32_t ocs_hw_cb_link(void *, void *); 78 static int32_t ocs_hw_cb_fip(void *, void *); 79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t); 80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *); 81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *); 82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *); 83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *); 84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *); 85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *); 86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *); 87 static int32_t ocs_hw_flush(ocs_hw_t *); 88 static int32_t ocs_hw_command_cancel(ocs_hw_t *); 89 static int32_t ocs_hw_io_cancel(ocs_hw_t *); 90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io); 91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *); 92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *); 93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg); 94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *); 95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *); 96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *); 97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *); 98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); 99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); 100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len); 101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); 102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg); 103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); 104 105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *); 106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); 107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); 108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); 109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); 110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); 111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); 112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg); 113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license); 114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw); 115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw); 116 static void ocs_hw_io_free_internal(void *arg); 117 static void ocs_hw_io_free_port_owned(void *arg); 118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf); 119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint); 120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status); 121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t); 122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw); 123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable); 124 125 /* HW domain database operations */ 126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *); 127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *); 128 129 /* Port state machine */ 130 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 131 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 132 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 133 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 134 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 135 136 /* Domain state machine */ 137 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 138 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 139 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 140 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 141 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data); 142 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *); 143 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *); 144 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); 145 146 /* BZ 161832 */ 147 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw); 148 149 /* WQE timeouts */ 150 static void target_wqe_timer_cb(void *arg); 151 static void shutdown_target_wqe_timer(ocs_hw_t *hw); 152 153 static inline void 154 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) 155 { 156 if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) { 157 /* 158 * Active WQE list currently only used for 159 * target WQE timeouts. 160 */ 161 ocs_lock(&hw->io_lock); 162 ocs_list_add_tail(&hw->io_timed_wqe, io); 163 io->submit_ticks = ocs_get_os_ticks(); 164 ocs_unlock(&hw->io_lock); 165 } 166 } 167 168 static inline void 169 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) 170 { 171 if (hw->config.emulate_tgt_wqe_timeout) { 172 /* 173 * If target wqe timeouts are enabled, 174 * remove from active wqe list. 175 */ 176 ocs_lock(&hw->io_lock); 177 if (ocs_list_on_list(&io->wqe_link)) { 178 ocs_list_remove(&hw->io_timed_wqe, io); 179 } 180 ocs_unlock(&hw->io_lock); 181 } 182 } 183 184 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type) 185 { 186 switch (io_type) { 187 case OCS_HW_IO_INITIATOR_READ: 188 case OCS_HW_IO_INITIATOR_WRITE: 189 case OCS_HW_IO_INITIATOR_NODATA: 190 case OCS_HW_FC_CT: 191 case OCS_HW_ELS_REQ: 192 return 1; 193 default: 194 return 0; 195 } 196 } 197 198 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb) 199 { 200 /* if exchange not active, nothing to abort */ 201 if (!xb) { 202 return FALSE; 203 } 204 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) { 205 switch (ext) { 206 /* exceptions where abort is not needed */ 207 case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */ 208 case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */ 209 return FALSE; 210 default: 211 break; 212 } 213 } 214 return TRUE; 215 } 216 217 /** 218 * @brief Determine the number of chutes on the device. 219 * 220 * @par Description 221 * Some devices require queue resources allocated per protocol processor 222 * (chute). This function returns the number of chutes on this device. 223 * 224 * @param hw Hardware context allocated by the caller. 225 * 226 * @return Returns the number of chutes on the device for protocol. 227 */ 228 static uint32_t 229 ocs_hw_get_num_chutes(ocs_hw_t *hw) 230 { 231 uint32_t num_chutes = 1; 232 233 if (sli_get_is_dual_ulp_capable(&hw->sli) && 234 sli_get_is_ulp_enabled(&hw->sli, 0) && 235 sli_get_is_ulp_enabled(&hw->sli, 1)) { 236 num_chutes = 2; 237 } 238 return num_chutes; 239 } 240 241 static ocs_hw_rtn_e 242 ocs_hw_link_event_init(ocs_hw_t *hw) 243 { 244 ocs_hw_assert(hw); 245 246 hw->link.status = SLI_LINK_STATUS_MAX; 247 hw->link.topology = SLI_LINK_TOPO_NONE; 248 hw->link.medium = SLI_LINK_MEDIUM_MAX; 249 hw->link.speed = 0; 250 hw->link.loop_map = NULL; 251 hw->link.fc_id = UINT32_MAX; 252 253 return OCS_HW_RTN_SUCCESS; 254 } 255 256 /** 257 * @ingroup devInitShutdown 258 * @brief If this is physical port 0, then read the max dump size. 259 * 260 * @par Description 261 * Queries the FW for the maximum dump size 262 * 263 * @param hw Hardware context allocated by the caller. 264 * 265 * @return Returns 0 on success, or a non-zero value on failure. 266 */ 267 static ocs_hw_rtn_e 268 ocs_hw_read_max_dump_size(ocs_hw_t *hw) 269 { 270 uint8_t buf[SLI4_BMBX_SIZE]; 271 uint8_t bus, dev, func; 272 int rc; 273 274 /* lancer only */ 275 if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) && 276 (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(&hw->sli))) { 277 ocs_log_debug(hw->os, "Function only supported for I/F type 2\n"); 278 return OCS_HW_RTN_ERROR; 279 } 280 281 /* 282 * Make sure the FW is new enough to support this command. If the FW 283 * is too old, the FW will UE. 284 */ 285 if (hw->workaround.disable_dump_loc) { 286 ocs_log_test(hw->os, "FW version is too old for this feature\n"); 287 return OCS_HW_RTN_ERROR; 288 } 289 290 /* attempt to detemine the dump size for function 0 only. */ 291 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); 292 if (func == 0) { 293 if (sli_cmd_common_set_dump_location(&hw->sli, buf, 294 SLI4_BMBX_SIZE, 1, 0, NULL, 0)) { 295 sli4_res_common_set_dump_location_t *rsp = 296 (sli4_res_common_set_dump_location_t *) 297 (buf + offsetof(sli4_cmd_sli_config_t, 298 payload.embed)); 299 300 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 301 if (rc != OCS_HW_RTN_SUCCESS) { 302 ocs_log_test(hw->os, "set dump location command failed\n"); 303 return rc; 304 } else { 305 hw->dump_size = rsp->buffer_length; 306 ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length); 307 } 308 } 309 } 310 return OCS_HW_RTN_SUCCESS; 311 } 312 313 /** 314 * @ingroup devInitShutdown 315 * @brief Set up the Hardware Abstraction Layer module. 316 * 317 * @par Description 318 * Calls set up to configure the hardware. 319 * 320 * @param hw Hardware context allocated by the caller. 321 * @param os Device abstraction. 322 * @param port_type Protocol type of port, such as FC and NIC. 323 * 324 * @todo Why is port_type a parameter? 325 * 326 * @return Returns 0 on success, or a non-zero value on failure. 327 */ 328 ocs_hw_rtn_e 329 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type) 330 { 331 uint32_t i; 332 char prop_buf[32]; 333 334 if (hw == NULL) { 335 ocs_log_err(os, "bad parameter(s) hw=%p\n", hw); 336 return OCS_HW_RTN_ERROR; 337 } 338 339 if (hw->hw_setup_called) { 340 /* Setup run-time workarounds. 341 * Call for each setup, to allow for hw_war_version 342 */ 343 ocs_hw_workaround_setup(hw); 344 return OCS_HW_RTN_SUCCESS; 345 } 346 347 /* 348 * ocs_hw_init() relies on NULL pointers indicating that a structure 349 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't 350 * free/realloc that memory 351 */ 352 ocs_memset(hw, 0, sizeof(ocs_hw_t)); 353 354 hw->hw_setup_called = TRUE; 355 356 hw->os = os; 357 358 ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os)); 359 ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link); 360 ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link); 361 hw->cmd_head_count = 0; 362 363 ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os)); 364 ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os)); 365 366 ocs_atomic_init(&hw->io_alloc_failed_count, 0); 367 368 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; 369 hw->config.dif_seed = 0; 370 hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT; 371 hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT; 372 hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT; 373 hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT; 374 375 if (sli_setup(&hw->sli, hw->os, port_type)) { 376 ocs_log_err(hw->os, "SLI setup failed\n"); 377 return OCS_HW_RTN_ERROR; 378 } 379 380 ocs_memset(hw->domains, 0, sizeof(hw->domains)); 381 382 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); 383 384 ocs_hw_link_event_init(hw); 385 386 sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw); 387 sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw); 388 389 /* 390 * Set all the queue sizes to the maximum allowed. These values may 391 * be changes later by the adjust and workaround functions. 392 */ 393 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) { 394 hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i); 395 } 396 397 /* 398 * The RQ assignment for RQ pair mode. 399 */ 400 hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD; 401 hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); 402 if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) { 403 hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0); 404 } 405 406 /* by default, enable initiator-only auto-ABTS emulation */ 407 hw->config.i_only_aab = TRUE; 408 409 /* Setup run-time workarounds */ 410 ocs_hw_workaround_setup(hw); 411 412 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 413 if (hw->workaround.override_fcfi) { 414 hw->first_domain_idx = -1; 415 } 416 417 /* Must be done after the workaround setup */ 418 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) || 419 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))) { 420 421 (void)ocs_hw_read_max_dump_size(hw); 422 } 423 424 /* calculate the number of WQs required. */ 425 ocs_hw_adjust_wqs(hw); 426 427 /* Set the default dif mode */ 428 if (! sli_is_dif_inline_capable(&hw->sli)) { 429 ocs_log_test(hw->os, "not inline capable, setting mode to separate\n"); 430 hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE; 431 } 432 /* Workaround: BZ 161832 */ 433 if (hw->workaround.use_dif_sec_xri) { 434 ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link); 435 } 436 437 /* 438 * Figure out the starting and max ULP to spread the WQs across the 439 * ULPs. 440 */ 441 if (sli_get_is_dual_ulp_capable(&hw->sli)) { 442 if (sli_get_is_ulp_enabled(&hw->sli, 0) && 443 sli_get_is_ulp_enabled(&hw->sli, 1)) { 444 hw->ulp_start = 0; 445 hw->ulp_max = 1; 446 } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) { 447 hw->ulp_start = 0; 448 hw->ulp_max = 0; 449 } else { 450 hw->ulp_start = 1; 451 hw->ulp_max = 1; 452 } 453 } else { 454 if (sli_get_is_ulp_enabled(&hw->sli, 0)) { 455 hw->ulp_start = 0; 456 hw->ulp_max = 0; 457 } else { 458 hw->ulp_start = 1; 459 hw->ulp_max = 1; 460 } 461 } 462 ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n", 463 hw->ulp_start, hw->ulp_max); 464 hw->config.queue_topology = hw_global.queue_topology_string; 465 466 hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology); 467 468 hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ]; 469 hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ]; 470 hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ]; 471 hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ]; 472 hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ]; 473 474 /* Verify qtop configuration against driver supported configuration */ 475 if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) { 476 ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n", 477 OCE_HW_MAX_NUM_MRQ_PAIRS); 478 return OCS_HW_RTN_ERROR; 479 } 480 481 if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) { 482 ocs_log_crit(hw->os, "Max supported EQs = %d\n", 483 OCS_HW_MAX_NUM_EQ); 484 return OCS_HW_RTN_ERROR; 485 } 486 487 if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) { 488 ocs_log_crit(hw->os, "Max supported CQs = %d\n", 489 OCS_HW_MAX_NUM_CQ); 490 return OCS_HW_RTN_ERROR; 491 } 492 493 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { 494 ocs_log_crit(hw->os, "Max supported WQs = %d\n", 495 OCS_HW_MAX_NUM_WQ); 496 return OCS_HW_RTN_ERROR; 497 } 498 499 if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) { 500 ocs_log_crit(hw->os, "Max supported MQs = %d\n", 501 OCS_HW_MAX_NUM_MQ); 502 return OCS_HW_RTN_ERROR; 503 } 504 505 return OCS_HW_RTN_SUCCESS; 506 } 507 508 /** 509 * @ingroup devInitShutdown 510 * @brief Allocate memory structures to prepare for the device operation. 511 * 512 * @par Description 513 * Allocates memory structures needed by the device and prepares the device 514 * for operation. 515 * @n @n @b Note: This function may be called more than once (for example, at 516 * initialization and then after a reset), but the size of the internal resources 517 * may not be changed without tearing down the HW (ocs_hw_teardown()). 518 * 519 * @param hw Hardware context allocated by the caller. 520 * 521 * @return Returns 0 on success, or a non-zero value on failure. 522 */ 523 ocs_hw_rtn_e 524 ocs_hw_init(ocs_hw_t *hw) 525 { 526 ocs_hw_rtn_e rc; 527 uint32_t i = 0; 528 uint8_t buf[SLI4_BMBX_SIZE]; 529 uint32_t max_rpi; 530 int rem_count; 531 int written_size = 0; 532 uint32_t count; 533 char prop_buf[32]; 534 uint32_t ramdisc_blocksize = 512; 535 uint32_t q_count = 0; 536 /* 537 * Make sure the command lists are empty. If this is start-of-day, 538 * they'll be empty since they were just initialized in ocs_hw_setup. 539 * If we've just gone through a reset, the command and command pending 540 * lists should have been cleaned up as part of the reset (ocs_hw_reset()). 541 */ 542 ocs_lock(&hw->cmd_lock); 543 if (!ocs_list_empty(&hw->cmd_head)) { 544 ocs_log_test(hw->os, "command found on cmd list\n"); 545 ocs_unlock(&hw->cmd_lock); 546 return OCS_HW_RTN_ERROR; 547 } 548 if (!ocs_list_empty(&hw->cmd_pending)) { 549 ocs_log_test(hw->os, "command found on pending list\n"); 550 ocs_unlock(&hw->cmd_lock); 551 return OCS_HW_RTN_ERROR; 552 } 553 ocs_unlock(&hw->cmd_lock); 554 555 /* Free RQ buffers if prevously allocated */ 556 ocs_hw_rx_free(hw); 557 558 /* 559 * The IO queues must be initialized here for the reset case. The 560 * ocs_hw_init_io() function will re-add the IOs to the free list. 561 * The cmd_head list should be OK since we free all entries in 562 * ocs_hw_command_cancel() that is called in the ocs_hw_reset(). 563 */ 564 565 /* If we are in this function due to a reset, there may be stale items 566 * on lists that need to be removed. Clean them up. 567 */ 568 rem_count=0; 569 if (ocs_list_valid(&hw->io_wait_free)) { 570 while ((!ocs_list_empty(&hw->io_wait_free))) { 571 rem_count++; 572 ocs_list_remove_head(&hw->io_wait_free); 573 } 574 if (rem_count > 0) { 575 ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count); 576 } 577 } 578 rem_count=0; 579 if (ocs_list_valid(&hw->io_inuse)) { 580 while ((!ocs_list_empty(&hw->io_inuse))) { 581 rem_count++; 582 ocs_list_remove_head(&hw->io_inuse); 583 } 584 if (rem_count > 0) { 585 ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count); 586 } 587 } 588 rem_count=0; 589 if (ocs_list_valid(&hw->io_free)) { 590 while ((!ocs_list_empty(&hw->io_free))) { 591 rem_count++; 592 ocs_list_remove_head(&hw->io_free); 593 } 594 if (rem_count > 0) { 595 ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count); 596 } 597 } 598 if (ocs_list_valid(&hw->io_port_owned)) { 599 while ((!ocs_list_empty(&hw->io_port_owned))) { 600 ocs_list_remove_head(&hw->io_port_owned); 601 } 602 } 603 ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link); 604 ocs_list_init(&hw->io_free, ocs_hw_io_t, link); 605 ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link); 606 ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link); 607 ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link); 608 ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link); 609 610 /* If MRQ not required, Make sure we dont request feature. */ 611 if (hw->config.n_rq == 1) { 612 hw->sli.config.features.flag.mrqp = FALSE; 613 } 614 615 if (sli_init(&hw->sli)) { 616 ocs_log_err(hw->os, "SLI failed to initialize\n"); 617 return OCS_HW_RTN_ERROR; 618 } 619 620 /* 621 * Enable the auto xfer rdy feature if requested. 622 */ 623 hw->auto_xfer_rdy_enabled = FALSE; 624 if (sli_get_auto_xfer_rdy_capable(&hw->sli) && 625 hw->config.auto_xfer_rdy_size > 0) { 626 if (hw->config.esoc){ 627 if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) { 628 ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0); 629 } 630 written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize); 631 } else { 632 written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size); 633 } 634 if (written_size) { 635 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 636 if (rc != OCS_HW_RTN_SUCCESS) { 637 ocs_log_err(hw->os, "config auto xfer rdy failed\n"); 638 return rc; 639 } 640 } 641 hw->auto_xfer_rdy_enabled = TRUE; 642 643 if (hw->config.auto_xfer_rdy_t10_enable) { 644 rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf); 645 if (rc != OCS_HW_RTN_SUCCESS) { 646 ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n"); 647 return rc; 648 } 649 } 650 } 651 652 if(hw->sliport_healthcheck) { 653 rc = ocs_hw_config_sli_port_health_check(hw, 0, 1); 654 if (rc != OCS_HW_RTN_SUCCESS) { 655 ocs_log_err(hw->os, "Enabling Sliport Health check failed \n"); 656 return rc; 657 } 658 } 659 660 /* 661 * Set FDT transfer hint, only works on Lancer 662 */ 663 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) { 664 /* 665 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on 666 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature. 667 */ 668 ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT); 669 } 670 671 /* 672 * Verify that we have not exceeded any queue sizes 673 */ 674 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ), 675 OCS_HW_MAX_NUM_EQ); 676 if (hw->config.n_eq > q_count) { 677 ocs_log_err(hw->os, "requested %d EQ but %d allowed\n", 678 hw->config.n_eq, q_count); 679 return OCS_HW_RTN_ERROR; 680 } 681 682 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ), 683 OCS_HW_MAX_NUM_CQ); 684 if (hw->config.n_cq > q_count) { 685 ocs_log_err(hw->os, "requested %d CQ but %d allowed\n", 686 hw->config.n_cq, q_count); 687 return OCS_HW_RTN_ERROR; 688 } 689 690 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ), 691 OCS_HW_MAX_NUM_MQ); 692 if (hw->config.n_mq > q_count) { 693 ocs_log_err(hw->os, "requested %d MQ but %d allowed\n", 694 hw->config.n_mq, q_count); 695 return OCS_HW_RTN_ERROR; 696 } 697 698 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ), 699 OCS_HW_MAX_NUM_RQ); 700 if (hw->config.n_rq > q_count) { 701 ocs_log_err(hw->os, "requested %d RQ but %d allowed\n", 702 hw->config.n_rq, q_count); 703 return OCS_HW_RTN_ERROR; 704 } 705 706 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ), 707 OCS_HW_MAX_NUM_WQ); 708 if (hw->config.n_wq > q_count) { 709 ocs_log_err(hw->os, "requested %d WQ but %d allowed\n", 710 hw->config.n_wq, q_count); 711 return OCS_HW_RTN_ERROR; 712 } 713 714 /* zero the hashes */ 715 ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash)); 716 ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n", 717 OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE); 718 719 ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash)); 720 ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n", 721 OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE); 722 723 ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash)); 724 ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n", 725 OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE); 726 727 rc = ocs_hw_init_queues(hw, hw->qtop); 728 if (rc != OCS_HW_RTN_SUCCESS) { 729 return rc; 730 } 731 732 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); 733 i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi); 734 if (i) { 735 ocs_dma_t payload_memory; 736 737 rc = OCS_HW_RTN_ERROR; 738 739 if (hw->rnode_mem.size) { 740 ocs_dma_free(hw->os, &hw->rnode_mem); 741 } 742 743 if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) { 744 ocs_log_err(hw->os, "remote node memory allocation fail\n"); 745 return OCS_HW_RTN_NO_MEMORY; 746 } 747 748 payload_memory.size = 0; 749 if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE, 750 &hw->rnode_mem, UINT16_MAX, &payload_memory)) { 751 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 752 753 if (payload_memory.size != 0) { 754 /* The command was non-embedded - need to free the dma buffer */ 755 ocs_dma_free(hw->os, &payload_memory); 756 } 757 } 758 759 if (rc != OCS_HW_RTN_SUCCESS) { 760 ocs_log_err(hw->os, "header template registration failed\n"); 761 return rc; 762 } 763 } 764 765 /* Allocate and post RQ buffers */ 766 rc = ocs_hw_rx_allocate(hw); 767 if (rc) { 768 ocs_log_err(hw->os, "rx_allocate failed\n"); 769 return rc; 770 } 771 772 /* Populate hw->seq_free_list */ 773 if (hw->seq_pool == NULL) { 774 uint32_t count = 0; 775 uint32_t i; 776 777 /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */ 778 for (i = 0; i < hw->hw_rq_count; i++) { 779 count += hw->hw_rq[i]->entry_count; 780 } 781 782 hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count); 783 if (hw->seq_pool == NULL) { 784 ocs_log_err(hw->os, "malloc seq_pool failed\n"); 785 return OCS_HW_RTN_NO_MEMORY; 786 } 787 } 788 789 if(ocs_hw_rx_post(hw)) { 790 ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n"); 791 } 792 793 /* Allocate rpi_ref if not previously allocated */ 794 if (hw->rpi_ref == NULL) { 795 hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref), 796 OCS_M_ZERO | OCS_M_NOWAIT); 797 if (hw->rpi_ref == NULL) { 798 ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i); 799 return OCS_HW_RTN_NO_MEMORY; 800 } 801 } 802 803 for (i = 0; i < max_rpi; i ++) { 804 ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0); 805 ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0); 806 } 807 808 ocs_memset(hw->domains, 0, sizeof(hw->domains)); 809 810 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 811 if (hw->workaround.override_fcfi) { 812 hw->first_domain_idx = -1; 813 } 814 815 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); 816 817 /* Register a FCFI to allow unsolicited frames to be routed to the driver */ 818 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { 819 if (hw->hw_mrq_count) { 820 ocs_log_debug(hw->os, "using REG_FCFI MRQ\n"); 821 822 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0); 823 if (rc != OCS_HW_RTN_SUCCESS) { 824 ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n"); 825 return rc; 826 } 827 828 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0); 829 if (rc != OCS_HW_RTN_SUCCESS) { 830 ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n"); 831 return rc; 832 } 833 } else { 834 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; 835 836 ocs_log_debug(hw->os, "using REG_FCFI standard\n"); 837 838 /* Set the filter match/mask values from hw's filter_def values */ 839 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 840 rq_cfg[i].rq_id = 0xffff; 841 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; 842 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); 843 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); 844 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); 845 } 846 847 /* 848 * Update the rq_id's of the FCF configuration (don't update more than the number 849 * of rq_cfg elements) 850 */ 851 for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) { 852 hw_rq_t *rq = hw->hw_rq[i]; 853 uint32_t j; 854 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) { 855 uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1; 856 if (mask & (1U << j)) { 857 rq_cfg[j].rq_id = rq->hdr->id; 858 ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n", 859 j, hw->config.filter_def[j], i, rq->hdr->id); 860 } 861 } 862 } 863 864 rc = OCS_HW_RTN_ERROR; 865 866 if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) { 867 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 868 } 869 870 if (rc != OCS_HW_RTN_SUCCESS) { 871 ocs_log_err(hw->os, "FCFI registration failed\n"); 872 return rc; 873 } 874 hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi; 875 } 876 } 877 878 /* 879 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits, 880 * thus the pool allocation size of 64k) 881 */ 882 rc = ocs_hw_reqtag_init(hw); 883 if (rc) { 884 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc); 885 return rc; 886 } 887 888 rc = ocs_hw_setup_io(hw); 889 if (rc) { 890 ocs_log_err(hw->os, "IO allocation failure\n"); 891 return rc; 892 } 893 894 rc = ocs_hw_init_io(hw); 895 if (rc) { 896 ocs_log_err(hw->os, "IO initialization failure\n"); 897 return rc; 898 } 899 900 ocs_queue_history_init(hw->os, &hw->q_hist); 901 902 /* get hw link config; polling, so callback will be called immediately */ 903 hw->linkcfg = OCS_HW_LINKCFG_NA; 904 ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw); 905 906 /* if lancer ethernet, ethernet ports need to be enabled */ 907 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && 908 (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) { 909 if (ocs_hw_set_eth_license(hw, hw->eth_license)) { 910 /* log warning but continue */ 911 ocs_log_err(hw->os, "Failed to set ethernet license\n"); 912 } 913 } 914 915 /* Set the DIF seed - only for lancer right now */ 916 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) && 917 ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) { 918 ocs_log_err(hw->os, "Failed to set DIF seed value\n"); 919 return rc; 920 } 921 922 /* Set the DIF mode - skyhawk only */ 923 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) && 924 sli_get_dif_capable(&hw->sli)) { 925 rc = ocs_hw_set_dif_mode(hw); 926 if (rc != OCS_HW_RTN_SUCCESS) { 927 ocs_log_err(hw->os, "Failed to set DIF mode value\n"); 928 return rc; 929 } 930 } 931 932 /* 933 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries 934 */ 935 for (i = 0; i < hw->eq_count; i++) { 936 sli_queue_arm(&hw->sli, &hw->eq[i], TRUE); 937 } 938 939 /* 940 * Initialize RQ hash 941 */ 942 for (i = 0; i < hw->rq_count; i++) { 943 ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i); 944 } 945 946 /* 947 * Initialize WQ hash 948 */ 949 for (i = 0; i < hw->wq_count; i++) { 950 ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i); 951 } 952 953 /* 954 * Arming the CQ allows (e.g.) MQ completions to write CQ entries 955 */ 956 for (i = 0; i < hw->cq_count; i++) { 957 ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i); 958 sli_queue_arm(&hw->sli, &hw->cq[i], TRUE); 959 } 960 961 /* record the fact that the queues are functional */ 962 hw->state = OCS_HW_STATE_ACTIVE; 963 964 /* Note: Must be after the IOs are setup and the state is active*/ 965 if (ocs_hw_rqpair_init(hw)) { 966 ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n"); 967 } 968 969 /* finally kick off periodic timer to check for timed out target WQEs */ 970 if (hw->config.emulate_tgt_wqe_timeout) { 971 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, 972 OCS_HW_WQ_TIMER_PERIOD_MS); 973 } 974 975 /* 976 * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there 977 * are none of those, allocate one for WQ[0] 978 */ 979 if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) { 980 for (i = 0; i < count; i++) { 981 hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]); 982 wq->send_frame_io = ocs_hw_io_alloc(hw); 983 if (wq->send_frame_io == NULL) { 984 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); 985 } 986 } 987 } else { 988 hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw); 989 if (hw->hw_wq[0]->send_frame_io == NULL) { 990 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); 991 } 992 } 993 994 /* Initialize send frame frame sequence id */ 995 ocs_atomic_init(&hw->send_frame_seq_id, 0); 996 997 /* Initialize watchdog timer if enabled by user */ 998 hw->expiration_logged = 0; 999 if(hw->watchdog_timeout) { 1000 if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) { 1001 ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n"); 1002 }else if(!ocs_hw_config_watchdog_timer(hw)) { 1003 ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout); 1004 } 1005 } 1006 1007 if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) { 1008 ocs_log_err(hw->os, "domain node memory allocation fail\n"); 1009 return OCS_HW_RTN_NO_MEMORY; 1010 } 1011 1012 if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) { 1013 ocs_log_err(hw->os, "domain fcf memory allocation fail\n"); 1014 return OCS_HW_RTN_NO_MEMORY; 1015 } 1016 1017 if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map, 1018 SLI4_MIN_LOOP_MAP_BYTES, 4)) { 1019 ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size); 1020 } 1021 1022 return OCS_HW_RTN_SUCCESS; 1023 } 1024 1025 /** 1026 * @brief Configure Multi-RQ 1027 * 1028 * @param hw Hardware context allocated by the caller. 1029 * @param mode 1 to set MRQ filters and 0 to set FCFI index 1030 * @param vlanid valid in mode 0 1031 * @param fcf_index valid in mode 0 1032 * 1033 * @return Returns 0 on success, or a non-zero value on failure. 1034 */ 1035 static int32_t 1036 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index) 1037 { 1038 uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0; 1039 hw_rq_t *rq; 1040 sli4_cmd_reg_fcfi_mrq_t *rsp = NULL; 1041 uint32_t i, j; 1042 sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG]; 1043 int32_t rc; 1044 1045 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { 1046 goto issue_cmd; 1047 } 1048 1049 /* Set the filter match/mask values from hw's filter_def values */ 1050 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 1051 rq_filter[i].rq_id = 0xffff; 1052 rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; 1053 rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); 1054 rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); 1055 rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); 1056 } 1057 1058 /* Accumulate counts for each filter type used, build rq_ids[] list */ 1059 for (i = 0; i < hw->hw_rq_count; i++) { 1060 rq = hw->hw_rq[i]; 1061 for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) { 1062 if (rq->filter_mask & (1U << j)) { 1063 if (rq_filter[j].rq_id != 0xffff) { 1064 /* Already used. Bailout ifts not RQset case */ 1065 if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) { 1066 ocs_log_err(hw->os, "Wrong queue topology.\n"); 1067 return OCS_HW_RTN_ERROR; 1068 } 1069 continue; 1070 } 1071 1072 if (rq->is_mrq) { 1073 rq_filter[j].rq_id = rq->base_mrq_id; 1074 mrq_bitmask |= (1U << j); 1075 } else { 1076 rq_filter[j].rq_id = rq->hdr->id; 1077 } 1078 } 1079 } 1080 } 1081 1082 issue_cmd: 1083 /* Invoke REG_FCFI_MRQ */ 1084 rc = sli_cmd_reg_fcfi_mrq(&hw->sli, 1085 buf, /* buf */ 1086 SLI4_BMBX_SIZE, /* size */ 1087 mode, /* mode 1 */ 1088 fcf_index, /* fcf_index */ 1089 vlanid, /* vlan_id */ 1090 hw->config.rq_selection_policy, /* RQ selection policy*/ 1091 mrq_bitmask, /* MRQ bitmask */ 1092 hw->hw_mrq_count, /* num_mrqs */ 1093 rq_filter); /* RQ filter */ 1094 if (rc == 0) { 1095 ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc); 1096 return OCS_HW_RTN_ERROR; 1097 } 1098 1099 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 1100 1101 rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf; 1102 1103 if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) { 1104 ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n", 1105 rsp->hdr.command, rsp->hdr.status); 1106 return OCS_HW_RTN_ERROR; 1107 } 1108 1109 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { 1110 hw->fcf_indicator = rsp->fcfi; 1111 } 1112 return 0; 1113 } 1114 1115 /** 1116 * @brief Callback function for getting linkcfg during HW initialization. 1117 * 1118 * @param status Status of the linkcfg get operation. 1119 * @param value Link configuration enum to which the link configuration is set. 1120 * @param arg Callback argument (ocs_hw_t *). 1121 * 1122 * @return None. 1123 */ 1124 static void 1125 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg) 1126 { 1127 ocs_hw_t *hw = (ocs_hw_t *)arg; 1128 if (status == 0) { 1129 hw->linkcfg = (ocs_hw_linkcfg_e)value; 1130 } else { 1131 hw->linkcfg = OCS_HW_LINKCFG_NA; 1132 } 1133 ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg); 1134 } 1135 1136 /** 1137 * @ingroup devInitShutdown 1138 * @brief Tear down the Hardware Abstraction Layer module. 1139 * 1140 * @par Description 1141 * Frees memory structures needed by the device, and shuts down the device. Does 1142 * not free the HW context memory (which is done by the caller). 1143 * 1144 * @param hw Hardware context allocated by the caller. 1145 * 1146 * @return Returns 0 on success, or a non-zero value on failure. 1147 */ 1148 ocs_hw_rtn_e 1149 ocs_hw_teardown(ocs_hw_t *hw) 1150 { 1151 uint32_t i = 0; 1152 uint32_t iters = 10;/*XXX*/ 1153 uint32_t max_rpi; 1154 uint32_t destroy_queues; 1155 uint32_t free_memory; 1156 1157 if (!hw) { 1158 ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw); 1159 return OCS_HW_RTN_ERROR; 1160 } 1161 1162 destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE); 1163 free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED); 1164 1165 /* shutdown target wqe timer */ 1166 shutdown_target_wqe_timer(hw); 1167 1168 /* Cancel watchdog timer if enabled */ 1169 if(hw->watchdog_timeout) { 1170 hw->watchdog_timeout = 0; 1171 ocs_hw_config_watchdog_timer(hw); 1172 } 1173 1174 /* Cancel Sliport Healthcheck */ 1175 if(hw->sliport_healthcheck) { 1176 hw->sliport_healthcheck = 0; 1177 ocs_hw_config_sli_port_health_check(hw, 0, 0); 1178 } 1179 1180 if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) { 1181 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; 1182 1183 ocs_hw_flush(hw); 1184 1185 /* If there are outstanding commands, wait for them to complete */ 1186 while (!ocs_list_empty(&hw->cmd_head) && iters) { 1187 ocs_udelay(10000); 1188 ocs_hw_flush(hw); 1189 iters--; 1190 } 1191 1192 if (ocs_list_empty(&hw->cmd_head)) { 1193 ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); 1194 } else { 1195 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); 1196 } 1197 1198 /* Cancel any remaining commands */ 1199 ocs_hw_command_cancel(hw); 1200 } else { 1201 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; 1202 } 1203 1204 ocs_lock_free(&hw->cmd_lock); 1205 1206 /* Free unregistered RPI if workaround is in force */ 1207 if (hw->workaround.use_unregistered_rpi) { 1208 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid); 1209 } 1210 1211 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); 1212 if (hw->rpi_ref) { 1213 for (i = 0; i < max_rpi; i++) { 1214 if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) { 1215 ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n", 1216 i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count)); 1217 } 1218 } 1219 ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref)); 1220 hw->rpi_ref = NULL; 1221 } 1222 1223 ocs_dma_free(hw->os, &hw->rnode_mem); 1224 1225 if (hw->io) { 1226 for (i = 0; i < hw->config.n_io; i++) { 1227 if (hw->io[i] && (hw->io[i]->sgl != NULL) && 1228 (hw->io[i]->sgl->virt != NULL)) { 1229 if(hw->io[i]->is_port_owned) { 1230 ocs_lock_free(&hw->io[i]->axr_lock); 1231 } 1232 ocs_dma_free(hw->os, hw->io[i]->sgl); 1233 } 1234 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); 1235 hw->io[i] = NULL; 1236 } 1237 ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size); 1238 hw->wqe_buffs = NULL; 1239 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *)); 1240 hw->io = NULL; 1241 } 1242 1243 ocs_dma_free(hw->os, &hw->xfer_rdy); 1244 ocs_dma_free(hw->os, &hw->dump_sges); 1245 ocs_dma_free(hw->os, &hw->loop_map); 1246 1247 ocs_lock_free(&hw->io_lock); 1248 ocs_lock_free(&hw->io_abort_lock); 1249 1250 for (i = 0; i < hw->wq_count; i++) { 1251 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory); 1252 } 1253 1254 for (i = 0; i < hw->rq_count; i++) { 1255 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory); 1256 } 1257 1258 for (i = 0; i < hw->mq_count; i++) { 1259 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory); 1260 } 1261 1262 for (i = 0; i < hw->cq_count; i++) { 1263 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory); 1264 } 1265 1266 for (i = 0; i < hw->eq_count; i++) { 1267 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory); 1268 } 1269 1270 ocs_hw_qtop_free(hw->qtop); 1271 1272 /* Free rq buffers */ 1273 ocs_hw_rx_free(hw); 1274 1275 hw_queue_teardown(hw); 1276 1277 ocs_hw_rqpair_teardown(hw); 1278 1279 if (sli_teardown(&hw->sli)) { 1280 ocs_log_err(hw->os, "SLI teardown failed\n"); 1281 } 1282 1283 ocs_queue_history_free(&hw->q_hist); 1284 1285 /* record the fact that the queues are non-functional */ 1286 hw->state = OCS_HW_STATE_UNINITIALIZED; 1287 1288 /* free sequence free pool */ 1289 ocs_array_free(hw->seq_pool); 1290 hw->seq_pool = NULL; 1291 1292 /* free hw_wq_callback pool */ 1293 ocs_pool_free(hw->wq_reqtag_pool); 1294 1295 ocs_dma_free(hw->os, &hw->domain_dmem); 1296 ocs_dma_free(hw->os, &hw->fcf_dmem); 1297 /* Mark HW setup as not having been called */ 1298 hw->hw_setup_called = FALSE; 1299 1300 return OCS_HW_RTN_SUCCESS; 1301 } 1302 1303 ocs_hw_rtn_e 1304 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset) 1305 { 1306 uint32_t i; 1307 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 1308 uint32_t iters; 1309 ocs_hw_state_e prev_state = hw->state; 1310 1311 if (hw->state != OCS_HW_STATE_ACTIVE) { 1312 ocs_log_test(hw->os, "HW state %d is not active\n", hw->state); 1313 } 1314 1315 hw->state = OCS_HW_STATE_RESET_IN_PROGRESS; 1316 1317 /* shutdown target wqe timer */ 1318 shutdown_target_wqe_timer(hw); 1319 1320 ocs_hw_flush(hw); 1321 1322 /* 1323 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM), 1324 * then the FW will UE when the reset is issued. So attempt to complete 1325 * all mailbox commands. 1326 */ 1327 iters = 10; 1328 while (!ocs_list_empty(&hw->cmd_head) && iters) { 1329 ocs_udelay(10000); 1330 ocs_hw_flush(hw); 1331 iters--; 1332 } 1333 1334 if (ocs_list_empty(&hw->cmd_head)) { 1335 ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); 1336 } else { 1337 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); 1338 } 1339 1340 /* Reset the chip */ 1341 switch(reset) { 1342 case OCS_HW_RESET_FUNCTION: 1343 ocs_log_debug(hw->os, "issuing function level reset\n"); 1344 if (sli_reset(&hw->sli)) { 1345 ocs_log_err(hw->os, "sli_reset failed\n"); 1346 rc = OCS_HW_RTN_ERROR; 1347 } 1348 break; 1349 case OCS_HW_RESET_FIRMWARE: 1350 ocs_log_debug(hw->os, "issuing firmware reset\n"); 1351 if (sli_fw_reset(&hw->sli)) { 1352 ocs_log_err(hw->os, "sli_soft_reset failed\n"); 1353 rc = OCS_HW_RTN_ERROR; 1354 } 1355 /* 1356 * Because the FW reset leaves the FW in a non-running state, 1357 * follow that with a regular reset. 1358 */ 1359 ocs_log_debug(hw->os, "issuing function level reset\n"); 1360 if (sli_reset(&hw->sli)) { 1361 ocs_log_err(hw->os, "sli_reset failed\n"); 1362 rc = OCS_HW_RTN_ERROR; 1363 } 1364 break; 1365 default: 1366 ocs_log_test(hw->os, "unknown reset type - no reset performed\n"); 1367 hw->state = prev_state; 1368 return OCS_HW_RTN_ERROR; 1369 } 1370 1371 /* Not safe to walk command/io lists unless they've been initialized */ 1372 if (prev_state != OCS_HW_STATE_UNINITIALIZED) { 1373 ocs_hw_command_cancel(hw); 1374 1375 /* Clean up the inuse list, the free list and the wait free list */ 1376 ocs_hw_io_cancel(hw); 1377 1378 ocs_memset(hw->domains, 0, sizeof(hw->domains)); 1379 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); 1380 1381 ocs_hw_link_event_init(hw); 1382 1383 ocs_lock(&hw->io_lock); 1384 /* The io lists should be empty, but remove any that didn't get cleaned up. */ 1385 while (!ocs_list_empty(&hw->io_timed_wqe)) { 1386 ocs_list_remove_head(&hw->io_timed_wqe); 1387 } 1388 /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */ 1389 1390 while (!ocs_list_empty(&hw->io_free)) { 1391 ocs_list_remove_head(&hw->io_free); 1392 } 1393 while (!ocs_list_empty(&hw->io_wait_free)) { 1394 ocs_list_remove_head(&hw->io_wait_free); 1395 } 1396 1397 /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */ 1398 ocs_hw_reqtag_reset(hw); 1399 1400 ocs_unlock(&hw->io_lock); 1401 } 1402 1403 if (prev_state != OCS_HW_STATE_UNINITIALIZED) { 1404 for (i = 0; i < hw->wq_count; i++) { 1405 sli_queue_reset(&hw->sli, &hw->wq[i]); 1406 } 1407 1408 for (i = 0; i < hw->rq_count; i++) { 1409 sli_queue_reset(&hw->sli, &hw->rq[i]); 1410 } 1411 1412 for (i = 0; i < hw->hw_rq_count; i++) { 1413 hw_rq_t *rq = hw->hw_rq[i]; 1414 if (rq->rq_tracker != NULL) { 1415 uint32_t j; 1416 1417 for (j = 0; j < rq->entry_count; j++) { 1418 rq->rq_tracker[j] = NULL; 1419 } 1420 } 1421 } 1422 1423 for (i = 0; i < hw->mq_count; i++) { 1424 sli_queue_reset(&hw->sli, &hw->mq[i]); 1425 } 1426 1427 for (i = 0; i < hw->cq_count; i++) { 1428 sli_queue_reset(&hw->sli, &hw->cq[i]); 1429 } 1430 1431 for (i = 0; i < hw->eq_count; i++) { 1432 sli_queue_reset(&hw->sli, &hw->eq[i]); 1433 } 1434 1435 /* Free rq buffers */ 1436 ocs_hw_rx_free(hw); 1437 1438 /* Teardown the HW queue topology */ 1439 hw_queue_teardown(hw); 1440 } else { 1441 /* Free rq buffers */ 1442 ocs_hw_rx_free(hw); 1443 } 1444 1445 /* 1446 * Re-apply the run-time workarounds after clearing the SLI config 1447 * fields in sli_reset. 1448 */ 1449 ocs_hw_workaround_setup(hw); 1450 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; 1451 1452 return rc; 1453 } 1454 1455 int32_t 1456 ocs_hw_get_num_eq(ocs_hw_t *hw) 1457 { 1458 return hw->eq_count; 1459 } 1460 1461 static int32_t 1462 ocs_hw_get_fw_timed_out(ocs_hw_t *hw) 1463 { 1464 /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf 1465 * No further explanation is given in the document. 1466 * */ 1467 return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 && 1468 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10); 1469 } 1470 1471 ocs_hw_rtn_e 1472 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value) 1473 { 1474 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 1475 int32_t tmp; 1476 1477 if (!value) { 1478 return OCS_HW_RTN_ERROR; 1479 } 1480 1481 *value = 0; 1482 1483 switch (prop) { 1484 case OCS_HW_N_IO: 1485 *value = hw->config.n_io; 1486 break; 1487 case OCS_HW_N_SGL: 1488 *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED); 1489 break; 1490 case OCS_HW_MAX_IO: 1491 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); 1492 break; 1493 case OCS_HW_MAX_NODES: 1494 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); 1495 break; 1496 case OCS_HW_MAX_RQ_ENTRIES: 1497 *value = hw->num_qentries[SLI_QTYPE_RQ]; 1498 break; 1499 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: 1500 *value = hw->config.rq_default_buffer_size; 1501 break; 1502 case OCS_HW_AUTO_XFER_RDY_CAPABLE: 1503 *value = sli_get_auto_xfer_rdy_capable(&hw->sli); 1504 break; 1505 case OCS_HW_AUTO_XFER_RDY_XRI_CNT: 1506 *value = hw->config.auto_xfer_rdy_xri_cnt; 1507 break; 1508 case OCS_HW_AUTO_XFER_RDY_SIZE: 1509 *value = hw->config.auto_xfer_rdy_size; 1510 break; 1511 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: 1512 switch (hw->config.auto_xfer_rdy_blk_size_chip) { 1513 case 0: 1514 *value = 512; 1515 break; 1516 case 1: 1517 *value = 1024; 1518 break; 1519 case 2: 1520 *value = 2048; 1521 break; 1522 case 3: 1523 *value = 4096; 1524 break; 1525 case 4: 1526 *value = 520; 1527 break; 1528 default: 1529 *value = 0; 1530 rc = OCS_HW_RTN_ERROR; 1531 break; 1532 } 1533 break; 1534 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: 1535 *value = hw->config.auto_xfer_rdy_t10_enable; 1536 break; 1537 case OCS_HW_AUTO_XFER_RDY_P_TYPE: 1538 *value = hw->config.auto_xfer_rdy_p_type; 1539 break; 1540 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: 1541 *value = hw->config.auto_xfer_rdy_ref_tag_is_lba; 1542 break; 1543 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: 1544 *value = hw->config.auto_xfer_rdy_app_tag_valid; 1545 break; 1546 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: 1547 *value = hw->config.auto_xfer_rdy_app_tag_value; 1548 break; 1549 case OCS_HW_MAX_SGE: 1550 *value = sli_get_max_sge(&hw->sli); 1551 break; 1552 case OCS_HW_MAX_SGL: 1553 *value = sli_get_max_sgl(&hw->sli); 1554 break; 1555 case OCS_HW_TOPOLOGY: 1556 /* 1557 * Infer link.status based on link.speed. 1558 * Report OCS_HW_TOPOLOGY_NONE if the link is down. 1559 */ 1560 if (hw->link.speed == 0) { 1561 *value = OCS_HW_TOPOLOGY_NONE; 1562 break; 1563 } 1564 switch (hw->link.topology) { 1565 case SLI_LINK_TOPO_NPORT: 1566 *value = OCS_HW_TOPOLOGY_NPORT; 1567 break; 1568 case SLI_LINK_TOPO_LOOP: 1569 *value = OCS_HW_TOPOLOGY_LOOP; 1570 break; 1571 case SLI_LINK_TOPO_NONE: 1572 *value = OCS_HW_TOPOLOGY_NONE; 1573 break; 1574 default: 1575 ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology); 1576 rc = OCS_HW_RTN_ERROR; 1577 break; 1578 } 1579 break; 1580 case OCS_HW_CONFIG_TOPOLOGY: 1581 *value = hw->config.topology; 1582 break; 1583 case OCS_HW_LINK_SPEED: 1584 *value = hw->link.speed; 1585 break; 1586 case OCS_HW_LINK_CONFIG_SPEED: 1587 switch (hw->config.speed) { 1588 case FC_LINK_SPEED_10G: 1589 *value = 10000; 1590 break; 1591 case FC_LINK_SPEED_AUTO_16_8_4: 1592 *value = 0; 1593 break; 1594 case FC_LINK_SPEED_2G: 1595 *value = 2000; 1596 break; 1597 case FC_LINK_SPEED_4G: 1598 *value = 4000; 1599 break; 1600 case FC_LINK_SPEED_8G: 1601 *value = 8000; 1602 break; 1603 case FC_LINK_SPEED_16G: 1604 *value = 16000; 1605 break; 1606 case FC_LINK_SPEED_32G: 1607 *value = 32000; 1608 break; 1609 default: 1610 ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed); 1611 rc = OCS_HW_RTN_ERROR; 1612 break; 1613 } 1614 break; 1615 case OCS_HW_IF_TYPE: 1616 *value = sli_get_if_type(&hw->sli); 1617 break; 1618 case OCS_HW_SLI_REV: 1619 *value = sli_get_sli_rev(&hw->sli); 1620 break; 1621 case OCS_HW_SLI_FAMILY: 1622 *value = sli_get_sli_family(&hw->sli); 1623 break; 1624 case OCS_HW_DIF_CAPABLE: 1625 *value = sli_get_dif_capable(&hw->sli); 1626 break; 1627 case OCS_HW_DIF_SEED: 1628 *value = hw->config.dif_seed; 1629 break; 1630 case OCS_HW_DIF_MODE: 1631 *value = hw->config.dif_mode; 1632 break; 1633 case OCS_HW_DIF_MULTI_SEPARATE: 1634 /* Lancer supports multiple DIF separates */ 1635 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { 1636 *value = TRUE; 1637 } else { 1638 *value = FALSE; 1639 } 1640 break; 1641 case OCS_HW_DUMP_MAX_SIZE: 1642 *value = hw->dump_size; 1643 break; 1644 case OCS_HW_DUMP_READY: 1645 *value = sli_dump_is_ready(&hw->sli); 1646 break; 1647 case OCS_HW_DUMP_PRESENT: 1648 *value = sli_dump_is_present(&hw->sli); 1649 break; 1650 case OCS_HW_RESET_REQUIRED: 1651 tmp = sli_reset_required(&hw->sli); 1652 if(tmp < 0) { 1653 rc = OCS_HW_RTN_ERROR; 1654 } else { 1655 *value = tmp; 1656 } 1657 break; 1658 case OCS_HW_FW_ERROR: 1659 *value = sli_fw_error_status(&hw->sli); 1660 break; 1661 case OCS_HW_FW_READY: 1662 *value = sli_fw_ready(&hw->sli); 1663 break; 1664 case OCS_HW_FW_TIMED_OUT: 1665 *value = ocs_hw_get_fw_timed_out(hw); 1666 break; 1667 case OCS_HW_HIGH_LOGIN_MODE: 1668 *value = sli_get_hlm_capable(&hw->sli); 1669 break; 1670 case OCS_HW_PREREGISTER_SGL: 1671 *value = sli_get_sgl_preregister_required(&hw->sli); 1672 break; 1673 case OCS_HW_HW_REV1: 1674 *value = sli_get_hw_revision(&hw->sli, 0); 1675 break; 1676 case OCS_HW_HW_REV2: 1677 *value = sli_get_hw_revision(&hw->sli, 1); 1678 break; 1679 case OCS_HW_HW_REV3: 1680 *value = sli_get_hw_revision(&hw->sli, 2); 1681 break; 1682 case OCS_HW_LINKCFG: 1683 *value = hw->linkcfg; 1684 break; 1685 case OCS_HW_ETH_LICENSE: 1686 *value = hw->eth_license; 1687 break; 1688 case OCS_HW_LINK_MODULE_TYPE: 1689 *value = sli_get_link_module_type(&hw->sli); 1690 break; 1691 case OCS_HW_NUM_CHUTES: 1692 *value = ocs_hw_get_num_chutes(hw); 1693 break; 1694 case OCS_HW_DISABLE_AR_TGT_DIF: 1695 *value = hw->workaround.disable_ar_tgt_dif; 1696 break; 1697 case OCS_HW_EMULATE_I_ONLY_AAB: 1698 *value = hw->config.i_only_aab; 1699 break; 1700 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: 1701 *value = hw->config.emulate_tgt_wqe_timeout; 1702 break; 1703 case OCS_HW_VPD_LEN: 1704 *value = sli_get_vpd_len(&hw->sli); 1705 break; 1706 case OCS_HW_SGL_CHAINING_CAPABLE: 1707 *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported; 1708 break; 1709 case OCS_HW_SGL_CHAINING_ALLOWED: 1710 /* 1711 * SGL Chaining is allowed in the following cases: 1712 * 1. Lancer with host SGL Lists 1713 * 2. Skyhawk with pre-registered SGL Lists 1714 */ 1715 *value = FALSE; 1716 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && 1717 !sli_get_sgl_preregister(&hw->sli) && 1718 SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { 1719 *value = TRUE; 1720 } 1721 1722 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && 1723 sli_get_sgl_preregister(&hw->sli) && 1724 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 1725 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { 1726 *value = TRUE; 1727 } 1728 break; 1729 case OCS_HW_SGL_CHAINING_HOST_ALLOCATED: 1730 /* Only lancer supports host allocated SGL Chaining buffers. */ 1731 *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && 1732 (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli))); 1733 break; 1734 case OCS_HW_SEND_FRAME_CAPABLE: 1735 if (hw->workaround.ignore_send_frame) { 1736 *value = 0; 1737 } else { 1738 /* Only lancer is capable */ 1739 *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH; 1740 } 1741 break; 1742 case OCS_HW_RQ_SELECTION_POLICY: 1743 *value = hw->config.rq_selection_policy; 1744 break; 1745 case OCS_HW_RR_QUANTA: 1746 *value = hw->config.rr_quanta; 1747 break; 1748 case OCS_HW_MAX_VPORTS: 1749 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI); 1750 break; 1751 default: 1752 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 1753 rc = OCS_HW_RTN_ERROR; 1754 } 1755 1756 return rc; 1757 } 1758 1759 void * 1760 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop) 1761 { 1762 void *rc = NULL; 1763 1764 switch (prop) { 1765 case OCS_HW_WWN_NODE: 1766 rc = sli_get_wwn_node(&hw->sli); 1767 break; 1768 case OCS_HW_WWN_PORT: 1769 rc = sli_get_wwn_port(&hw->sli); 1770 break; 1771 case OCS_HW_VPD: 1772 /* make sure VPD length is non-zero */ 1773 if (sli_get_vpd_len(&hw->sli)) { 1774 rc = sli_get_vpd(&hw->sli); 1775 } 1776 break; 1777 case OCS_HW_FW_REV: 1778 rc = sli_get_fw_name(&hw->sli, 0); 1779 break; 1780 case OCS_HW_FW_REV2: 1781 rc = sli_get_fw_name(&hw->sli, 1); 1782 break; 1783 case OCS_HW_IPL: 1784 rc = sli_get_ipl_name(&hw->sli); 1785 break; 1786 case OCS_HW_PORTNUM: 1787 rc = sli_get_portnum(&hw->sli); 1788 break; 1789 case OCS_HW_BIOS_VERSION_STRING: 1790 rc = sli_get_bios_version_string(&hw->sli); 1791 break; 1792 default: 1793 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 1794 } 1795 1796 return rc; 1797 } 1798 1799 ocs_hw_rtn_e 1800 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value) 1801 { 1802 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 1803 1804 switch (prop) { 1805 case OCS_HW_N_IO: 1806 if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) || 1807 value == 0) { 1808 ocs_log_test(hw->os, "IO value out of range %d vs %d\n", 1809 value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI)); 1810 rc = OCS_HW_RTN_ERROR; 1811 } else { 1812 hw->config.n_io = value; 1813 } 1814 break; 1815 case OCS_HW_N_SGL: 1816 value += SLI4_SGE_MAX_RESERVED; 1817 if (value > sli_get_max_sgl(&hw->sli)) { 1818 ocs_log_test(hw->os, "SGL value out of range %d vs %d\n", 1819 value, sli_get_max_sgl(&hw->sli)); 1820 rc = OCS_HW_RTN_ERROR; 1821 } else { 1822 hw->config.n_sgl = value; 1823 } 1824 break; 1825 case OCS_HW_TOPOLOGY: 1826 if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) && 1827 (value != OCS_HW_TOPOLOGY_AUTO)) { 1828 ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n", 1829 value, sli_get_medium(&hw->sli)); 1830 rc = OCS_HW_RTN_ERROR; 1831 break; 1832 } 1833 1834 switch (value) { 1835 case OCS_HW_TOPOLOGY_AUTO: 1836 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { 1837 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC); 1838 } else { 1839 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE); 1840 } 1841 break; 1842 case OCS_HW_TOPOLOGY_NPORT: 1843 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA); 1844 break; 1845 case OCS_HW_TOPOLOGY_LOOP: 1846 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL); 1847 break; 1848 default: 1849 ocs_log_test(hw->os, "unsupported topology %#x\n", value); 1850 rc = OCS_HW_RTN_ERROR; 1851 } 1852 hw->config.topology = value; 1853 break; 1854 case OCS_HW_LINK_SPEED: 1855 if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) { 1856 switch (value) { 1857 case 0: /* Auto-speed negotiation */ 1858 case 10000: /* FCoE speed */ 1859 hw->config.speed = FC_LINK_SPEED_10G; 1860 break; 1861 default: 1862 ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n", 1863 value, sli_get_medium(&hw->sli)); 1864 rc = OCS_HW_RTN_ERROR; 1865 } 1866 break; 1867 } 1868 1869 switch (value) { 1870 case 0: /* Auto-speed negotiation */ 1871 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; 1872 break; 1873 case 2000: /* FC speeds */ 1874 hw->config.speed = FC_LINK_SPEED_2G; 1875 break; 1876 case 4000: 1877 hw->config.speed = FC_LINK_SPEED_4G; 1878 break; 1879 case 8000: 1880 hw->config.speed = FC_LINK_SPEED_8G; 1881 break; 1882 case 16000: 1883 hw->config.speed = FC_LINK_SPEED_16G; 1884 break; 1885 case 32000: 1886 hw->config.speed = FC_LINK_SPEED_32G; 1887 break; 1888 default: 1889 ocs_log_test(hw->os, "unsupported speed %d\n", value); 1890 rc = OCS_HW_RTN_ERROR; 1891 } 1892 break; 1893 case OCS_HW_DIF_SEED: 1894 /* Set the DIF seed - only for lancer right now */ 1895 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 1896 ocs_log_test(hw->os, "DIF seed not supported for this device\n"); 1897 rc = OCS_HW_RTN_ERROR; 1898 } else { 1899 hw->config.dif_seed = value; 1900 } 1901 break; 1902 case OCS_HW_DIF_MODE: 1903 switch (value) { 1904 case OCS_HW_DIF_MODE_INLINE: 1905 /* 1906 * Make sure we support inline DIF. 1907 * 1908 * Note: Having both bits clear means that we have old 1909 * FW that doesn't set the bits. 1910 */ 1911 if (sli_is_dif_inline_capable(&hw->sli)) { 1912 hw->config.dif_mode = value; 1913 } else { 1914 ocs_log_test(hw->os, "chip does not support DIF inline\n"); 1915 rc = OCS_HW_RTN_ERROR; 1916 } 1917 break; 1918 case OCS_HW_DIF_MODE_SEPARATE: 1919 /* Make sure we support DIF separates. */ 1920 if (sli_is_dif_separate_capable(&hw->sli)) { 1921 hw->config.dif_mode = value; 1922 } else { 1923 ocs_log_test(hw->os, "chip does not support DIF separate\n"); 1924 rc = OCS_HW_RTN_ERROR; 1925 } 1926 } 1927 break; 1928 case OCS_HW_RQ_PROCESS_LIMIT: { 1929 hw_rq_t *rq; 1930 uint32_t i; 1931 1932 /* For each hw_rq object, set its parent CQ limit value */ 1933 for (i = 0; i < hw->hw_rq_count; i++) { 1934 rq = hw->hw_rq[i]; 1935 hw->cq[rq->cq->instance].proc_limit = value; 1936 } 1937 break; 1938 } 1939 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: 1940 hw->config.rq_default_buffer_size = value; 1941 break; 1942 case OCS_HW_AUTO_XFER_RDY_XRI_CNT: 1943 hw->config.auto_xfer_rdy_xri_cnt = value; 1944 break; 1945 case OCS_HW_AUTO_XFER_RDY_SIZE: 1946 hw->config.auto_xfer_rdy_size = value; 1947 break; 1948 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: 1949 switch (value) { 1950 case 512: 1951 hw->config.auto_xfer_rdy_blk_size_chip = 0; 1952 break; 1953 case 1024: 1954 hw->config.auto_xfer_rdy_blk_size_chip = 1; 1955 break; 1956 case 2048: 1957 hw->config.auto_xfer_rdy_blk_size_chip = 2; 1958 break; 1959 case 4096: 1960 hw->config.auto_xfer_rdy_blk_size_chip = 3; 1961 break; 1962 case 520: 1963 hw->config.auto_xfer_rdy_blk_size_chip = 4; 1964 break; 1965 default: 1966 ocs_log_err(hw->os, "Invalid block size %d\n", 1967 value); 1968 rc = OCS_HW_RTN_ERROR; 1969 } 1970 break; 1971 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: 1972 hw->config.auto_xfer_rdy_t10_enable = value; 1973 break; 1974 case OCS_HW_AUTO_XFER_RDY_P_TYPE: 1975 hw->config.auto_xfer_rdy_p_type = value; 1976 break; 1977 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: 1978 hw->config.auto_xfer_rdy_ref_tag_is_lba = value; 1979 break; 1980 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: 1981 hw->config.auto_xfer_rdy_app_tag_valid = value; 1982 break; 1983 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: 1984 hw->config.auto_xfer_rdy_app_tag_value = value; 1985 break; 1986 case OCS_ESOC: 1987 hw->config.esoc = value; 1988 break; 1989 case OCS_HW_HIGH_LOGIN_MODE: 1990 rc = sli_set_hlm(&hw->sli, value); 1991 break; 1992 case OCS_HW_PREREGISTER_SGL: 1993 rc = sli_set_sgl_preregister(&hw->sli, value); 1994 break; 1995 case OCS_HW_ETH_LICENSE: 1996 hw->eth_license = value; 1997 break; 1998 case OCS_HW_EMULATE_I_ONLY_AAB: 1999 hw->config.i_only_aab = value; 2000 break; 2001 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: 2002 hw->config.emulate_tgt_wqe_timeout = value; 2003 break; 2004 case OCS_HW_BOUNCE: 2005 hw->config.bounce = value; 2006 break; 2007 case OCS_HW_RQ_SELECTION_POLICY: 2008 hw->config.rq_selection_policy = value; 2009 break; 2010 case OCS_HW_RR_QUANTA: 2011 hw->config.rr_quanta = value; 2012 break; 2013 default: 2014 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 2015 rc = OCS_HW_RTN_ERROR; 2016 } 2017 2018 return rc; 2019 } 2020 2021 ocs_hw_rtn_e 2022 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value) 2023 { 2024 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2025 2026 switch (prop) { 2027 case OCS_HW_WAR_VERSION: 2028 hw->hw_war_version = value; 2029 break; 2030 case OCS_HW_FILTER_DEF: { 2031 char *p = value; 2032 uint32_t idx = 0; 2033 2034 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) { 2035 hw->config.filter_def[idx] = 0; 2036 } 2037 2038 for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) { 2039 hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0); 2040 p = ocs_strchr(p, ','); 2041 if (p != NULL) { 2042 p++; 2043 } 2044 } 2045 2046 break; 2047 } 2048 default: 2049 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 2050 rc = OCS_HW_RTN_ERROR; 2051 break; 2052 } 2053 return rc; 2054 } 2055 /** 2056 * @ingroup interrupt 2057 * @brief Check for the events associated with the interrupt vector. 2058 * 2059 * @param hw Hardware context. 2060 * @param vector Zero-based interrupt vector number. 2061 * 2062 * @return Returns 0 on success, or a non-zero value on failure. 2063 */ 2064 int32_t 2065 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector) 2066 { 2067 int32_t rc = 0; 2068 2069 if (!hw) { 2070 ocs_log_err(NULL, "HW context NULL?!?\n"); 2071 return -1; 2072 } 2073 2074 if (vector > hw->eq_count) { 2075 ocs_log_err(hw->os, "vector %d. max %d\n", 2076 vector, hw->eq_count); 2077 return -1; 2078 } 2079 2080 /* 2081 * The caller should disable interrupts if they wish to prevent us 2082 * from processing during a shutdown. The following states are defined: 2083 * OCS_HW_STATE_UNINITIALIZED - No queues allocated 2084 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, 2085 * queues are cleared. 2086 * OCS_HW_STATE_ACTIVE - Chip and queues are operational 2087 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions 2088 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox 2089 * completions. 2090 */ 2091 if (hw->state != OCS_HW_STATE_UNINITIALIZED) { 2092 rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]); 2093 2094 /* Re-arm queue if there are no entries */ 2095 if (rc != 0) { 2096 sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE); 2097 } 2098 } 2099 return rc; 2100 } 2101 2102 void 2103 ocs_hw_unsol_process_bounce(void *arg) 2104 { 2105 ocs_hw_sequence_t *seq = arg; 2106 ocs_hw_t *hw = seq->hw; 2107 2108 ocs_hw_assert(hw != NULL); 2109 ocs_hw_assert(hw->callback.unsolicited != NULL); 2110 2111 hw->callback.unsolicited(hw->args.unsolicited, seq); 2112 } 2113 2114 int32_t 2115 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec) 2116 { 2117 hw_eq_t *eq; 2118 int32_t rc = 0; 2119 2120 CPUTRACE(""); 2121 2122 /* 2123 * The caller should disable interrupts if they wish to prevent us 2124 * from processing during a shutdown. The following states are defined: 2125 * OCS_HW_STATE_UNINITIALIZED - No queues allocated 2126 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, 2127 * queues are cleared. 2128 * OCS_HW_STATE_ACTIVE - Chip and queues are operational 2129 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions 2130 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox 2131 * completions. 2132 */ 2133 if (hw->state == OCS_HW_STATE_UNINITIALIZED) { 2134 return 0; 2135 } 2136 2137 /* Get pointer to hw_eq_t */ 2138 eq = hw->hw_eq[vector]; 2139 2140 OCS_STAT(eq->use_count++); 2141 2142 rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec); 2143 2144 return rc; 2145 } 2146 2147 /** 2148 * @ingroup interrupt 2149 * @brief Process events associated with an EQ. 2150 * 2151 * @par Description 2152 * Loop termination: 2153 * @n @n Without a mechanism to terminate the completion processing loop, it 2154 * is possible under some workload conditions for the loop to never terminate 2155 * (or at least take longer than the OS is happy to have an interrupt handler 2156 * or kernel thread context hold a CPU without yielding). 2157 * @n @n The approach taken here is to periodically check how much time 2158 * we have been in this 2159 * processing loop, and if we exceed a predetermined time (multiple seconds), the 2160 * loop is terminated, and ocs_hw_process() returns. 2161 * 2162 * @param hw Hardware context. 2163 * @param eq Pointer to HW EQ object. 2164 * @param max_isr_time_msec Maximum time in msec to stay in this function. 2165 * 2166 * @return Returns 0 on success, or a non-zero value on failure. 2167 */ 2168 int32_t 2169 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec) 2170 { 2171 uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 }; 2172 uint32_t done = FALSE; 2173 uint32_t tcheck_count; 2174 time_t tstart; 2175 time_t telapsed; 2176 2177 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; 2178 tstart = ocs_msectime(); 2179 2180 CPUTRACE(""); 2181 2182 while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) { 2183 uint16_t cq_id = 0; 2184 int32_t rc; 2185 2186 rc = sli_eq_parse(&hw->sli, eqe, &cq_id); 2187 if (unlikely(rc)) { 2188 if (rc > 0) { 2189 uint32_t i; 2190 2191 /* 2192 * Received a sentinel EQE indicating the EQ is full. 2193 * Process all CQs 2194 */ 2195 for (i = 0; i < hw->cq_count; i++) { 2196 ocs_hw_cq_process(hw, hw->hw_cq[i]); 2197 } 2198 continue; 2199 } else { 2200 return rc; 2201 } 2202 } else { 2203 int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id); 2204 if (likely(index >= 0)) { 2205 ocs_hw_cq_process(hw, hw->hw_cq[index]); 2206 } else { 2207 ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id); 2208 } 2209 } 2210 2211 if (eq->queue->n_posted > (eq->queue->posted_limit)) { 2212 sli_queue_arm(&hw->sli, eq->queue, FALSE); 2213 } 2214 2215 if (tcheck_count && (--tcheck_count == 0)) { 2216 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; 2217 telapsed = ocs_msectime() - tstart; 2218 if (telapsed >= max_isr_time_msec) { 2219 done = TRUE; 2220 } 2221 } 2222 } 2223 sli_queue_eq_arm(&hw->sli, eq->queue, TRUE); 2224 2225 return 0; 2226 } 2227 2228 /** 2229 * @brief Submit queued (pending) mbx commands. 2230 * 2231 * @par Description 2232 * Submit queued mailbox commands. 2233 * --- Assumes that hw->cmd_lock is held --- 2234 * 2235 * @param hw Hardware context. 2236 * 2237 * @return Returns 0 on success, or a negative error code value on failure. 2238 */ 2239 static int32_t 2240 ocs_hw_cmd_submit_pending(ocs_hw_t *hw) 2241 { 2242 ocs_command_ctx_t *ctx; 2243 int32_t rc = 0; 2244 2245 /* Assumes lock held */ 2246 2247 /* Only submit MQE if there's room */ 2248 while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) { 2249 ctx = ocs_list_remove_head(&hw->cmd_pending); 2250 if (ctx == NULL) { 2251 break; 2252 } 2253 ocs_list_add_tail(&hw->cmd_head, ctx); 2254 hw->cmd_head_count++; 2255 if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) { 2256 ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc); 2257 rc = -1; 2258 break; 2259 } 2260 } 2261 return rc; 2262 } 2263 2264 /** 2265 * @ingroup io 2266 * @brief Issue a SLI command. 2267 * 2268 * @par Description 2269 * Send a mailbox command to the hardware, and either wait for a completion 2270 * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT). 2271 * 2272 * @param hw Hardware context. 2273 * @param cmd Buffer containing a formatted command and results. 2274 * @param opts Command options: 2275 * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion. 2276 * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback. 2277 * @param cb Function callback used for asynchronous mode. May be NULL. 2278 * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>. 2279 * @n @n @b Note: If the 2280 * callback function pointer is NULL, the results of the command are silently 2281 * discarded, allowing this pointer to exist solely on the stack. 2282 * @param arg Argument passed to an asynchronous callback. 2283 * 2284 * @return Returns 0 on success, or a non-zero value on failure. 2285 */ 2286 ocs_hw_rtn_e 2287 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg) 2288 { 2289 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 2290 2291 /* 2292 * If the chip is in an error state (UE'd) then reject this mailbox 2293 * command. 2294 */ 2295 if (sli_fw_error_status(&hw->sli) > 0) { 2296 uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1); 2297 uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2); 2298 if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) { 2299 hw->expiration_logged = 1; 2300 ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n", 2301 hw->watchdog_timeout); 2302 } 2303 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2304 ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n", 2305 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS), 2306 err1, err2); 2307 2308 return OCS_HW_RTN_ERROR; 2309 } 2310 2311 if (OCS_CMD_POLL == opts) { 2312 ocs_lock(&hw->cmd_lock); 2313 if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) { 2314 /* 2315 * Can't issue Boot-strap mailbox command with other 2316 * mail-queue commands pending as this interaction is 2317 * undefined 2318 */ 2319 rc = OCS_HW_RTN_ERROR; 2320 } else { 2321 void *bmbx = hw->sli.bmbx.virt; 2322 2323 ocs_memset(bmbx, 0, SLI4_BMBX_SIZE); 2324 ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE); 2325 2326 if (sli_bmbx_command(&hw->sli) == 0) { 2327 rc = OCS_HW_RTN_SUCCESS; 2328 ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE); 2329 } 2330 } 2331 ocs_unlock(&hw->cmd_lock); 2332 } else if (OCS_CMD_NOWAIT == opts) { 2333 ocs_command_ctx_t *ctx = NULL; 2334 2335 ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT); 2336 if (!ctx) { 2337 ocs_log_err(hw->os, "can't allocate command context\n"); 2338 return OCS_HW_RTN_NO_RESOURCES; 2339 } 2340 2341 if (hw->state != OCS_HW_STATE_ACTIVE) { 2342 ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state); 2343 ocs_free(hw->os, ctx, sizeof(*ctx)); 2344 return OCS_HW_RTN_ERROR; 2345 } 2346 2347 if (cb) { 2348 ctx->cb = cb; 2349 ctx->arg = arg; 2350 } 2351 ctx->buf = cmd; 2352 ctx->ctx = hw; 2353 2354 ocs_lock(&hw->cmd_lock); 2355 2356 /* Add to pending list */ 2357 ocs_list_add_tail(&hw->cmd_pending, ctx); 2358 2359 /* Submit as much of the pending list as we can */ 2360 if (ocs_hw_cmd_submit_pending(hw) == 0) { 2361 rc = OCS_HW_RTN_SUCCESS; 2362 } 2363 2364 ocs_unlock(&hw->cmd_lock); 2365 } 2366 2367 return rc; 2368 } 2369 2370 /** 2371 * @ingroup devInitShutdown 2372 * @brief Register a callback for the given event. 2373 * 2374 * @param hw Hardware context. 2375 * @param which Event of interest. 2376 * @param func Function to call when the event occurs. 2377 * @param arg Argument passed to the callback function. 2378 * 2379 * @return Returns 0 on success, or a non-zero value on failure. 2380 */ 2381 ocs_hw_rtn_e 2382 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg) 2383 { 2384 2385 if (!hw || !func || (which >= OCS_HW_CB_MAX)) { 2386 ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n", 2387 hw, which, func); 2388 return OCS_HW_RTN_ERROR; 2389 } 2390 2391 switch (which) { 2392 case OCS_HW_CB_DOMAIN: 2393 hw->callback.domain = func; 2394 hw->args.domain = arg; 2395 break; 2396 case OCS_HW_CB_PORT: 2397 hw->callback.port = func; 2398 hw->args.port = arg; 2399 break; 2400 case OCS_HW_CB_UNSOLICITED: 2401 hw->callback.unsolicited = func; 2402 hw->args.unsolicited = arg; 2403 break; 2404 case OCS_HW_CB_REMOTE_NODE: 2405 hw->callback.rnode = func; 2406 hw->args.rnode = arg; 2407 break; 2408 case OCS_HW_CB_BOUNCE: 2409 hw->callback.bounce = func; 2410 hw->args.bounce = arg; 2411 break; 2412 default: 2413 ocs_log_test(hw->os, "unknown callback %#x\n", which); 2414 return OCS_HW_RTN_ERROR; 2415 } 2416 2417 return OCS_HW_RTN_SUCCESS; 2418 } 2419 2420 /** 2421 * @ingroup port 2422 * @brief Allocate a port object. 2423 * 2424 * @par Description 2425 * This function allocates a VPI object for the port and stores it in the 2426 * indicator field of the port object. 2427 * 2428 * @param hw Hardware context. 2429 * @param sport SLI port object used to connect to the domain. 2430 * @param domain Domain object associated with this port (may be NULL). 2431 * @param wwpn Port's WWPN in big-endian order, or NULL to use default. 2432 * 2433 * @return Returns 0 on success, or a non-zero value on failure. 2434 */ 2435 ocs_hw_rtn_e 2436 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain, 2437 uint8_t *wwpn) 2438 { 2439 uint8_t *cmd = NULL; 2440 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2441 uint32_t index; 2442 2443 sport->indicator = UINT32_MAX; 2444 sport->hw = hw; 2445 sport->ctx.app = sport; 2446 sport->sm_free_req_pending = 0; 2447 2448 /* 2449 * Check if the chip is in an error state (UE'd) before proceeding. 2450 */ 2451 if (sli_fw_error_status(&hw->sli) > 0) { 2452 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2453 return OCS_HW_RTN_ERROR; 2454 } 2455 2456 if (wwpn) { 2457 ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn)); 2458 } 2459 2460 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) { 2461 ocs_log_err(hw->os, "FCOE_VPI allocation failure\n"); 2462 return OCS_HW_RTN_ERROR; 2463 } 2464 2465 if (domain != NULL) { 2466 ocs_sm_function_t next = NULL; 2467 2468 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 2469 if (!cmd) { 2470 ocs_log_err(hw->os, "command memory allocation failed\n"); 2471 rc = OCS_HW_RTN_NO_MEMORY; 2472 goto ocs_hw_port_alloc_out; 2473 } 2474 2475 /* If the WWPN is NULL, fetch the default WWPN and WWNN before 2476 * initializing the VPI 2477 */ 2478 if (!wwpn) { 2479 next = __ocs_hw_port_alloc_read_sparm64; 2480 } else { 2481 next = __ocs_hw_port_alloc_init_vpi; 2482 } 2483 2484 ocs_sm_transition(&sport->ctx, next, cmd); 2485 } else if (!wwpn) { 2486 /* This is the convention for the HW, not SLI */ 2487 ocs_log_test(hw->os, "need WWN for physical port\n"); 2488 rc = OCS_HW_RTN_ERROR; 2489 } else { 2490 /* domain NULL and wwpn non-NULL */ 2491 ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL); 2492 } 2493 2494 ocs_hw_port_alloc_out: 2495 if (rc != OCS_HW_RTN_SUCCESS) { 2496 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 2497 2498 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 2499 } 2500 2501 return rc; 2502 } 2503 2504 /** 2505 * @ingroup port 2506 * @brief Attach a physical/virtual SLI port to a domain. 2507 * 2508 * @par Description 2509 * This function registers a previously-allocated VPI with the 2510 * device. 2511 * 2512 * @param hw Hardware context. 2513 * @param sport Pointer to the SLI port object. 2514 * @param fc_id Fibre Channel ID to associate with this port. 2515 * 2516 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure. 2517 */ 2518 ocs_hw_rtn_e 2519 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id) 2520 { 2521 uint8_t *buf = NULL; 2522 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2523 2524 if (!hw || !sport) { 2525 ocs_log_err(hw ? hw->os : NULL, 2526 "bad parameter(s) hw=%p sport=%p\n", hw, 2527 sport); 2528 return OCS_HW_RTN_ERROR; 2529 } 2530 2531 /* 2532 * Check if the chip is in an error state (UE'd) before proceeding. 2533 */ 2534 if (sli_fw_error_status(&hw->sli) > 0) { 2535 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2536 return OCS_HW_RTN_ERROR; 2537 } 2538 2539 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2540 if (!buf) { 2541 ocs_log_err(hw->os, "no buffer for command\n"); 2542 return OCS_HW_RTN_NO_MEMORY; 2543 } 2544 2545 sport->fc_id = fc_id; 2546 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf); 2547 return rc; 2548 } 2549 2550 /** 2551 * @brief Called when the port control command completes. 2552 * 2553 * @par Description 2554 * We only need to free the mailbox command buffer. 2555 * 2556 * @param hw Hardware context. 2557 * @param status Status field from the mbox completion. 2558 * @param mqe Mailbox response structure. 2559 * @param arg Pointer to a callback function that signals the caller that the command is done. 2560 * 2561 * @return Returns 0. 2562 */ 2563 static int32_t 2564 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 2565 { 2566 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 2567 return 0; 2568 } 2569 2570 /** 2571 * @ingroup port 2572 * @brief Control a port (initialize, shutdown, or set link configuration). 2573 * 2574 * @par Description 2575 * This function controls a port depending on the @c ctrl parameter: 2576 * - @b OCS_HW_PORT_INIT - 2577 * Issues the CONFIG_LINK and INIT_LINK commands for the specified port. 2578 * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up. 2579 * . 2580 * - @b OCS_HW_PORT_SHUTDOWN - 2581 * Issues the DOWN_LINK command for the specified port. 2582 * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down. 2583 * . 2584 * - @b OCS_HW_PORT_SET_LINK_CONFIG - 2585 * Sets the link configuration. 2586 * 2587 * @param hw Hardware context. 2588 * @param ctrl Specifies the operation: 2589 * - OCS_HW_PORT_INIT 2590 * - OCS_HW_PORT_SHUTDOWN 2591 * - OCS_HW_PORT_SET_LINK_CONFIG 2592 * 2593 * @param value Operation-specific value. 2594 * - OCS_HW_PORT_INIT - Selective reset AL_PA 2595 * - OCS_HW_PORT_SHUTDOWN - N/A 2596 * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value. 2597 * 2598 * @param cb Callback function to invoke the following operation. 2599 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events 2600 * are handled by the OCS_HW_CB_DOMAIN callbacks). 2601 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command 2602 * completes. 2603 * 2604 * @param arg Callback argument invoked after the command completes. 2605 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events 2606 * are handled by the OCS_HW_CB_DOMAIN callbacks). 2607 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command 2608 * completes. 2609 * 2610 * @return Returns 0 on success, or a non-zero value on failure. 2611 */ 2612 ocs_hw_rtn_e 2613 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg) 2614 { 2615 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 2616 2617 switch (ctrl) { 2618 case OCS_HW_PORT_INIT: 2619 { 2620 uint8_t *init_link; 2621 uint32_t speed = 0; 2622 uint8_t reset_alpa = 0; 2623 2624 if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) { 2625 uint8_t *cfg_link; 2626 2627 cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2628 if (cfg_link == NULL) { 2629 ocs_log_err(hw->os, "no buffer for command\n"); 2630 return OCS_HW_RTN_NO_MEMORY; 2631 } 2632 2633 if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) { 2634 rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT, 2635 ocs_hw_cb_port_control, NULL); 2636 } 2637 2638 if (rc != OCS_HW_RTN_SUCCESS) { 2639 ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE); 2640 ocs_log_err(hw->os, "CONFIG_LINK failed\n"); 2641 break; 2642 } 2643 speed = hw->config.speed; 2644 reset_alpa = (uint8_t)(value & 0xff); 2645 } else { 2646 speed = FC_LINK_SPEED_10G; 2647 } 2648 2649 /* 2650 * Bring link up, unless FW version is not supported 2651 */ 2652 if (hw->workaround.fw_version_too_low) { 2653 if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) { 2654 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", 2655 OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0)); 2656 } else { 2657 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", 2658 OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0)); 2659 } 2660 2661 return OCS_HW_RTN_ERROR; 2662 } 2663 2664 rc = OCS_HW_RTN_ERROR; 2665 2666 /* Allocate a new buffer for the init_link command */ 2667 init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2668 if (init_link == NULL) { 2669 ocs_log_err(hw->os, "no buffer for command\n"); 2670 return OCS_HW_RTN_NO_MEMORY; 2671 } 2672 2673 if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) { 2674 rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT, 2675 ocs_hw_cb_port_control, NULL); 2676 } 2677 /* Free buffer on error, since no callback is coming */ 2678 if (rc != OCS_HW_RTN_SUCCESS) { 2679 ocs_free(hw->os, init_link, SLI4_BMBX_SIZE); 2680 ocs_log_err(hw->os, "INIT_LINK failed\n"); 2681 } 2682 break; 2683 } 2684 case OCS_HW_PORT_SHUTDOWN: 2685 { 2686 uint8_t *down_link; 2687 2688 down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2689 if (down_link == NULL) { 2690 ocs_log_err(hw->os, "no buffer for command\n"); 2691 return OCS_HW_RTN_NO_MEMORY; 2692 } 2693 if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) { 2694 rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT, 2695 ocs_hw_cb_port_control, NULL); 2696 } 2697 /* Free buffer on error, since no callback is coming */ 2698 if (rc != OCS_HW_RTN_SUCCESS) { 2699 ocs_free(hw->os, down_link, SLI4_BMBX_SIZE); 2700 ocs_log_err(hw->os, "DOWN_LINK failed\n"); 2701 } 2702 break; 2703 } 2704 case OCS_HW_PORT_SET_LINK_CONFIG: 2705 rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg); 2706 break; 2707 default: 2708 ocs_log_test(hw->os, "unhandled control %#x\n", ctrl); 2709 break; 2710 } 2711 2712 return rc; 2713 } 2714 2715 /** 2716 * @ingroup port 2717 * @brief Free port resources. 2718 * 2719 * @par Description 2720 * Issue the UNREG_VPI command to free the assigned VPI context. 2721 * 2722 * @param hw Hardware context. 2723 * @param sport SLI port object used to connect to the domain. 2724 * 2725 * @return Returns 0 on success, or a non-zero value on failure. 2726 */ 2727 ocs_hw_rtn_e 2728 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport) 2729 { 2730 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2731 2732 if (!hw || !sport) { 2733 ocs_log_err(hw ? hw->os : NULL, 2734 "bad parameter(s) hw=%p sport=%p\n", hw, 2735 sport); 2736 return OCS_HW_RTN_ERROR; 2737 } 2738 2739 /* 2740 * Check if the chip is in an error state (UE'd) before proceeding. 2741 */ 2742 if (sli_fw_error_status(&hw->sli) > 0) { 2743 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2744 return OCS_HW_RTN_ERROR; 2745 } 2746 2747 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL); 2748 return rc; 2749 } 2750 2751 /** 2752 * @ingroup domain 2753 * @brief Allocate a fabric domain object. 2754 * 2755 * @par Description 2756 * This function starts a series of commands needed to connect to the domain, including 2757 * - REG_FCFI 2758 * - INIT_VFI 2759 * - READ_SPARMS 2760 * . 2761 * @b Note: Not all SLI interface types use all of the above commands. 2762 * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK 2763 * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event. 2764 * 2765 * @param hw Hardware context. 2766 * @param domain Pointer to the domain object. 2767 * @param fcf FCF index. 2768 * @param vlan VLAN ID. 2769 * 2770 * @return Returns 0 on success, or a non-zero value on failure. 2771 */ 2772 ocs_hw_rtn_e 2773 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan) 2774 { 2775 uint8_t *cmd = NULL; 2776 uint32_t index; 2777 2778 if (!hw || !domain || !domain->sport) { 2779 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n", 2780 hw, domain, domain ? domain->sport : NULL); 2781 return OCS_HW_RTN_ERROR; 2782 } 2783 2784 /* 2785 * Check if the chip is in an error state (UE'd) before proceeding. 2786 */ 2787 if (sli_fw_error_status(&hw->sli) > 0) { 2788 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2789 return OCS_HW_RTN_ERROR; 2790 } 2791 2792 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 2793 if (!cmd) { 2794 ocs_log_err(hw->os, "command memory allocation failed\n"); 2795 return OCS_HW_RTN_NO_MEMORY; 2796 } 2797 2798 domain->dma = hw->domain_dmem; 2799 2800 domain->hw = hw; 2801 domain->sm.app = domain; 2802 domain->fcf = fcf; 2803 domain->fcf_indicator = UINT32_MAX; 2804 domain->vlan_id = vlan; 2805 domain->indicator = UINT32_MAX; 2806 2807 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) { 2808 ocs_log_err(hw->os, "FCOE_VFI allocation failure\n"); 2809 2810 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 2811 2812 return OCS_HW_RTN_ERROR; 2813 } 2814 2815 ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd); 2816 return OCS_HW_RTN_SUCCESS; 2817 } 2818 2819 /** 2820 * @ingroup domain 2821 * @brief Attach a SLI port to a domain. 2822 * 2823 * @param hw Hardware context. 2824 * @param domain Pointer to the domain object. 2825 * @param fc_id Fibre Channel ID to associate with this port. 2826 * 2827 * @return Returns 0 on success, or a non-zero value on failure. 2828 */ 2829 ocs_hw_rtn_e 2830 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id) 2831 { 2832 uint8_t *buf = NULL; 2833 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2834 2835 if (!hw || !domain) { 2836 ocs_log_err(hw ? hw->os : NULL, 2837 "bad parameter(s) hw=%p domain=%p\n", 2838 hw, domain); 2839 return OCS_HW_RTN_ERROR; 2840 } 2841 2842 /* 2843 * Check if the chip is in an error state (UE'd) before proceeding. 2844 */ 2845 if (sli_fw_error_status(&hw->sli) > 0) { 2846 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2847 return OCS_HW_RTN_ERROR; 2848 } 2849 2850 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2851 if (!buf) { 2852 ocs_log_err(hw->os, "no buffer for command\n"); 2853 return OCS_HW_RTN_NO_MEMORY; 2854 } 2855 2856 domain->sport->fc_id = fc_id; 2857 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf); 2858 return rc; 2859 } 2860 2861 /** 2862 * @ingroup domain 2863 * @brief Free a fabric domain object. 2864 * 2865 * @par Description 2866 * Free both the driver and SLI port resources associated with the domain. 2867 * 2868 * @param hw Hardware context. 2869 * @param domain Pointer to the domain object. 2870 * 2871 * @return Returns 0 on success, or a non-zero value on failure. 2872 */ 2873 ocs_hw_rtn_e 2874 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain) 2875 { 2876 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2877 2878 if (!hw || !domain) { 2879 ocs_log_err(hw ? hw->os : NULL, 2880 "bad parameter(s) hw=%p domain=%p\n", 2881 hw, domain); 2882 return OCS_HW_RTN_ERROR; 2883 } 2884 2885 /* 2886 * Check if the chip is in an error state (UE'd) before proceeding. 2887 */ 2888 if (sli_fw_error_status(&hw->sli) > 0) { 2889 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2890 return OCS_HW_RTN_ERROR; 2891 } 2892 2893 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL); 2894 return rc; 2895 } 2896 2897 /** 2898 * @ingroup domain 2899 * @brief Free a fabric domain object. 2900 * 2901 * @par Description 2902 * Free the driver resources associated with the domain. The difference between 2903 * this call and ocs_hw_domain_free() is that this call assumes resources no longer 2904 * exist on the SLI port, due to a reset or after some error conditions. 2905 * 2906 * @param hw Hardware context. 2907 * @param domain Pointer to the domain object. 2908 * 2909 * @return Returns 0 on success, or a non-zero value on failure. 2910 */ 2911 ocs_hw_rtn_e 2912 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain) 2913 { 2914 if (!hw || !domain) { 2915 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); 2916 return OCS_HW_RTN_ERROR; 2917 } 2918 2919 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); 2920 2921 return OCS_HW_RTN_SUCCESS; 2922 } 2923 2924 /** 2925 * @ingroup node 2926 * @brief Allocate a remote node object. 2927 * 2928 * @param hw Hardware context. 2929 * @param rnode Allocated remote node object to initialize. 2930 * @param fc_addr FC address of the remote node. 2931 * @param sport SLI port used to connect to remote node. 2932 * 2933 * @return Returns 0 on success, or a non-zero value on failure. 2934 */ 2935 ocs_hw_rtn_e 2936 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr, 2937 ocs_sli_port_t *sport) 2938 { 2939 /* Check for invalid indicator */ 2940 if (UINT32_MAX != rnode->indicator) { 2941 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n", 2942 fc_addr, rnode->indicator); 2943 return OCS_HW_RTN_ERROR; 2944 } 2945 2946 /* 2947 * Check if the chip is in an error state (UE'd) before proceeding. 2948 */ 2949 if (sli_fw_error_status(&hw->sli) > 0) { 2950 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2951 return OCS_HW_RTN_ERROR; 2952 } 2953 2954 /* NULL SLI port indicates an unallocated remote node */ 2955 rnode->sport = NULL; 2956 2957 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) { 2958 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", 2959 fc_addr); 2960 return OCS_HW_RTN_ERROR; 2961 } 2962 2963 rnode->fc_id = fc_addr; 2964 rnode->sport = sport; 2965 2966 return OCS_HW_RTN_SUCCESS; 2967 } 2968 2969 /** 2970 * @ingroup node 2971 * @brief Update a remote node object with the remote port's service parameters. 2972 * 2973 * @param hw Hardware context. 2974 * @param rnode Allocated remote node object to initialize. 2975 * @param sparms DMA buffer containing the remote port's service parameters. 2976 * 2977 * @return Returns 0 on success, or a non-zero value on failure. 2978 */ 2979 ocs_hw_rtn_e 2980 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms) 2981 { 2982 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 2983 uint8_t *buf = NULL; 2984 uint32_t count = 0; 2985 2986 if (!hw || !rnode || !sparms) { 2987 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n", 2988 hw, rnode, sparms); 2989 return OCS_HW_RTN_ERROR; 2990 } 2991 2992 /* 2993 * Check if the chip is in an error state (UE'd) before proceeding. 2994 */ 2995 if (sli_fw_error_status(&hw->sli) > 0) { 2996 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2997 return OCS_HW_RTN_ERROR; 2998 } 2999 3000 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 3001 if (!buf) { 3002 ocs_log_err(hw->os, "no buffer for command\n"); 3003 return OCS_HW_RTN_NO_MEMORY; 3004 } 3005 3006 /* 3007 * If the attach count is non-zero, this RPI has already been registered. 3008 * Otherwise, register the RPI 3009 */ 3010 if (rnode->index == UINT32_MAX) { 3011 ocs_log_err(NULL, "bad parameter rnode->index invalid\n"); 3012 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3013 return OCS_HW_RTN_ERROR; 3014 } 3015 count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1); 3016 if (count) { 3017 /* 3018 * Can't attach multiple FC_ID's to a node unless High Login 3019 * Mode is enabled 3020 */ 3021 if (sli_get_hlm(&hw->sli) == FALSE) { 3022 ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n", 3023 sli_get_hlm(&hw->sli), count); 3024 rc = OCS_HW_RTN_SUCCESS; 3025 } else { 3026 rnode->node_group = TRUE; 3027 rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached); 3028 rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS; 3029 } 3030 } else { 3031 rnode->node_group = FALSE; 3032 3033 ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt); 3034 if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id, 3035 rnode->indicator, rnode->sport->indicator, 3036 sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) { 3037 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, 3038 ocs_hw_cb_node_attach, rnode); 3039 } 3040 } 3041 3042 if (count || rc) { 3043 if (rc < OCS_HW_RTN_SUCCESS) { 3044 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); 3045 ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI"); 3046 } 3047 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3048 } 3049 3050 return rc; 3051 } 3052 3053 /** 3054 * @ingroup node 3055 * @brief Free a remote node resource. 3056 * 3057 * @param hw Hardware context. 3058 * @param rnode Remote node object to free. 3059 * 3060 * @return Returns 0 on success, or a non-zero value on failure. 3061 */ 3062 ocs_hw_rtn_e 3063 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode) 3064 { 3065 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 3066 3067 if (!hw || !rnode) { 3068 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", 3069 hw, rnode); 3070 return OCS_HW_RTN_ERROR; 3071 } 3072 3073 if (rnode->sport) { 3074 if (!rnode->attached) { 3075 if (rnode->indicator != UINT32_MAX) { 3076 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { 3077 ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n", 3078 rnode->indicator, rnode->fc_id); 3079 rc = OCS_HW_RTN_ERROR; 3080 } else { 3081 rnode->node_group = FALSE; 3082 rnode->indicator = UINT32_MAX; 3083 rnode->index = UINT32_MAX; 3084 rnode->free_group = FALSE; 3085 } 3086 } 3087 } else { 3088 ocs_log_err(hw->os, "Error: rnode is still attached\n"); 3089 rc = OCS_HW_RTN_ERROR; 3090 } 3091 } 3092 3093 return rc; 3094 } 3095 3096 /** 3097 * @ingroup node 3098 * @brief Free a remote node object. 3099 * 3100 * @param hw Hardware context. 3101 * @param rnode Remote node object to free. 3102 * 3103 * @return Returns 0 on success, or a non-zero value on failure. 3104 */ 3105 ocs_hw_rtn_e 3106 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode) 3107 { 3108 uint8_t *buf = NULL; 3109 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC; 3110 uint32_t index = UINT32_MAX; 3111 3112 if (!hw || !rnode) { 3113 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", 3114 hw, rnode); 3115 return OCS_HW_RTN_ERROR; 3116 } 3117 3118 /* 3119 * Check if the chip is in an error state (UE'd) before proceeding. 3120 */ 3121 if (sli_fw_error_status(&hw->sli) > 0) { 3122 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 3123 return OCS_HW_RTN_ERROR; 3124 } 3125 3126 index = rnode->index; 3127 3128 if (rnode->sport) { 3129 uint32_t count = 0; 3130 uint32_t fc_id; 3131 3132 if (!rnode->attached) { 3133 return OCS_HW_RTN_SUCCESS_SYNC; 3134 } 3135 3136 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 3137 if (!buf) { 3138 ocs_log_err(hw->os, "no buffer for command\n"); 3139 return OCS_HW_RTN_NO_MEMORY; 3140 } 3141 3142 count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1); 3143 3144 if (count <= 1) { 3145 /* There are no other references to this RPI 3146 * so unregister it and free the resource. */ 3147 fc_id = UINT32_MAX; 3148 rnode->node_group = FALSE; 3149 rnode->free_group = TRUE; 3150 } else { 3151 if (sli_get_hlm(&hw->sli) == FALSE) { 3152 ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n", 3153 count); 3154 } 3155 fc_id = rnode->fc_id & 0x00ffffff; 3156 } 3157 3158 rc = OCS_HW_RTN_ERROR; 3159 3160 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator, 3161 SLI_RSRC_FCOE_RPI, fc_id)) { 3162 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode); 3163 } 3164 3165 if (rc != OCS_HW_RTN_SUCCESS) { 3166 ocs_log_err(hw->os, "UNREG_RPI failed\n"); 3167 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3168 rc = OCS_HW_RTN_ERROR; 3169 } 3170 } 3171 3172 return rc; 3173 } 3174 3175 /** 3176 * @ingroup node 3177 * @brief Free all remote node objects. 3178 * 3179 * @param hw Hardware context. 3180 * 3181 * @return Returns 0 on success, or a non-zero value on failure. 3182 */ 3183 ocs_hw_rtn_e 3184 ocs_hw_node_free_all(ocs_hw_t *hw) 3185 { 3186 uint8_t *buf = NULL; 3187 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 3188 3189 if (!hw) { 3190 ocs_log_err(NULL, "bad parameter hw=%p\n", hw); 3191 return OCS_HW_RTN_ERROR; 3192 } 3193 3194 /* 3195 * Check if the chip is in an error state (UE'd) before proceeding. 3196 */ 3197 if (sli_fw_error_status(&hw->sli) > 0) { 3198 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 3199 return OCS_HW_RTN_ERROR; 3200 } 3201 3202 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 3203 if (!buf) { 3204 ocs_log_err(hw->os, "no buffer for command\n"); 3205 return OCS_HW_RTN_NO_MEMORY; 3206 } 3207 3208 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff, 3209 SLI_RSRC_FCOE_FCFI, UINT32_MAX)) { 3210 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all, 3211 NULL); 3212 } 3213 3214 if (rc != OCS_HW_RTN_SUCCESS) { 3215 ocs_log_err(hw->os, "UNREG_RPI failed\n"); 3216 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3217 rc = OCS_HW_RTN_ERROR; 3218 } 3219 3220 return rc; 3221 } 3222 3223 ocs_hw_rtn_e 3224 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) 3225 { 3226 3227 if (!hw || !ngroup) { 3228 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", 3229 hw, ngroup); 3230 return OCS_HW_RTN_ERROR; 3231 } 3232 3233 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator, 3234 &ngroup->index)) { 3235 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", 3236 ngroup->indicator); 3237 return OCS_HW_RTN_ERROR; 3238 } 3239 3240 return OCS_HW_RTN_SUCCESS; 3241 } 3242 3243 ocs_hw_rtn_e 3244 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode) 3245 { 3246 3247 if (!hw || !ngroup || !rnode) { 3248 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n", 3249 hw, ngroup, rnode); 3250 return OCS_HW_RTN_ERROR; 3251 } 3252 3253 if (rnode->attached) { 3254 ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n", 3255 rnode->indicator, rnode->fc_id); 3256 return OCS_HW_RTN_ERROR; 3257 } 3258 3259 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { 3260 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", 3261 rnode->indicator); 3262 return OCS_HW_RTN_ERROR; 3263 } 3264 3265 rnode->indicator = ngroup->indicator; 3266 rnode->index = ngroup->index; 3267 3268 return OCS_HW_RTN_SUCCESS; 3269 } 3270 3271 ocs_hw_rtn_e 3272 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) 3273 { 3274 int ref; 3275 3276 if (!hw || !ngroup) { 3277 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", 3278 hw, ngroup); 3279 return OCS_HW_RTN_ERROR; 3280 } 3281 3282 ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count); 3283 if (ref) { 3284 /* Hmmm, the reference count is non-zero */ 3285 ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n", 3286 ref, ngroup->indicator); 3287 3288 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) { 3289 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", 3290 ngroup->indicator); 3291 return OCS_HW_RTN_ERROR; 3292 } 3293 3294 ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0); 3295 } 3296 3297 ngroup->indicator = UINT32_MAX; 3298 ngroup->index = UINT32_MAX; 3299 3300 return OCS_HW_RTN_SUCCESS; 3301 } 3302 3303 /** 3304 * @brief Initialize IO fields on each free call. 3305 * 3306 * @n @b Note: This is done on each free call (as opposed to each 3307 * alloc call) because port-owned XRIs are not 3308 * allocated with ocs_hw_io_alloc() but are freed with this 3309 * function. 3310 * 3311 * @param io Pointer to HW IO. 3312 */ 3313 static inline void 3314 ocs_hw_init_free_io(ocs_hw_io_t *io) 3315 { 3316 /* 3317 * Set io->done to NULL, to avoid any callbacks, should 3318 * a completion be received for one of these IOs 3319 */ 3320 io->done = NULL; 3321 io->abort_done = NULL; 3322 io->status_saved = 0; 3323 io->abort_in_progress = FALSE; 3324 io->port_owned_abort_count = 0; 3325 io->rnode = NULL; 3326 io->type = 0xFFFF; 3327 io->wq = NULL; 3328 io->ul_io = NULL; 3329 io->tgt_wqe_timeout = 0; 3330 } 3331 3332 /** 3333 * @ingroup io 3334 * @brief Lockless allocate a HW IO object. 3335 * 3336 * @par Description 3337 * Assume that hw->ocs_lock is held. This function is only used if 3338 * use_dif_sec_xri workaround is being used. 3339 * 3340 * @param hw Hardware context. 3341 * 3342 * @return Returns a pointer to an object on success, or NULL on failure. 3343 */ 3344 static inline ocs_hw_io_t * 3345 _ocs_hw_io_alloc(ocs_hw_t *hw) 3346 { 3347 ocs_hw_io_t *io = NULL; 3348 3349 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { 3350 ocs_list_add_tail(&hw->io_inuse, io); 3351 io->state = OCS_HW_IO_STATE_INUSE; 3352 io->quarantine = FALSE; 3353 io->quarantine_first_phase = TRUE; 3354 io->abort_reqtag = UINT32_MAX; 3355 ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io); 3356 } else { 3357 ocs_atomic_add_return(&hw->io_alloc_failed_count, 1); 3358 } 3359 3360 return io; 3361 } 3362 /** 3363 * @ingroup io 3364 * @brief Allocate a HW IO object. 3365 * 3366 * @par Description 3367 * @n @b Note: This function applies to non-port owned XRIs 3368 * only. 3369 * 3370 * @param hw Hardware context. 3371 * 3372 * @return Returns a pointer to an object on success, or NULL on failure. 3373 */ 3374 ocs_hw_io_t * 3375 ocs_hw_io_alloc(ocs_hw_t *hw) 3376 { 3377 ocs_hw_io_t *io = NULL; 3378 3379 ocs_lock(&hw->io_lock); 3380 io = _ocs_hw_io_alloc(hw); 3381 ocs_unlock(&hw->io_lock); 3382 3383 return io; 3384 } 3385 3386 /** 3387 * @ingroup io 3388 * @brief Allocate/Activate a port owned HW IO object. 3389 * 3390 * @par Description 3391 * This function is called by the transport layer when an XRI is 3392 * allocated by the SLI-Port. This will "activate" the HW IO 3393 * associated with the XRI received from the SLI-Port to mirror 3394 * the state of the XRI. 3395 * @n @n @b Note: This function applies to port owned XRIs only. 3396 * 3397 * @param hw Hardware context. 3398 * @param io Pointer HW IO to activate/allocate. 3399 * 3400 * @return Returns a pointer to an object on success, or NULL on failure. 3401 */ 3402 ocs_hw_io_t * 3403 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) 3404 { 3405 if (ocs_ref_read_count(&io->ref) > 0) { 3406 ocs_log_err(hw->os, "Bad parameter: refcount > 0\n"); 3407 return NULL; 3408 } 3409 3410 if (io->wq != NULL) { 3411 ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator); 3412 return NULL; 3413 } 3414 3415 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); 3416 io->xbusy = TRUE; 3417 3418 return io; 3419 } 3420 3421 /** 3422 * @ingroup io 3423 * @brief When an IO is freed, depending on the exchange busy flag, and other 3424 * workarounds, move it to the correct list. 3425 * 3426 * @par Description 3427 * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed 3428 * from the busy or wait_free list. 3429 * 3430 * @param hw Hardware context. 3431 * @param io Pointer to the IO object to move. 3432 */ 3433 static void 3434 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io) 3435 { 3436 if (io->xbusy) { 3437 /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */ 3438 ocs_list_add_tail(&hw->io_wait_free, io); 3439 io->state = OCS_HW_IO_STATE_WAIT_FREE; 3440 } else { 3441 /* IO not busy, add to free list */ 3442 ocs_list_add_tail(&hw->io_free, io); 3443 io->state = OCS_HW_IO_STATE_FREE; 3444 } 3445 3446 /* BZ 161832 workaround */ 3447 if (hw->workaround.use_dif_sec_xri) { 3448 ocs_hw_check_sec_hio_list(hw); 3449 } 3450 } 3451 3452 /** 3453 * @ingroup io 3454 * @brief Free a HW IO object. Perform cleanup common to 3455 * port and host-owned IOs. 3456 * 3457 * @param hw Hardware context. 3458 * @param io Pointer to the HW IO object. 3459 */ 3460 static inline void 3461 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io) 3462 { 3463 /* initialize IO fields */ 3464 ocs_hw_init_free_io(io); 3465 3466 /* Restore default SGL */ 3467 ocs_hw_io_restore_sgl(hw, io); 3468 } 3469 3470 /** 3471 * @ingroup io 3472 * @brief Free a HW IO object associated with a port-owned XRI. 3473 * 3474 * @param arg Pointer to the HW IO object. 3475 */ 3476 static void 3477 ocs_hw_io_free_port_owned(void *arg) 3478 { 3479 ocs_hw_io_t *io = (ocs_hw_io_t *)arg; 3480 ocs_hw_t *hw = io->hw; 3481 3482 /* 3483 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs 3484 * waiting for buffers. 3485 */ 3486 if (io->auto_xfer_rdy_dnrx) { 3487 ocs_lock(&hw->io_lock); 3488 /* take a reference count because we still own the IO until the buffer is posted */ 3489 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); 3490 ocs_list_add_tail(&hw->io_port_dnrx, io); 3491 ocs_unlock(&hw->io_lock); 3492 } 3493 3494 /* perform common cleanup */ 3495 ocs_hw_io_free_common(hw, io); 3496 } 3497 3498 /** 3499 * @ingroup io 3500 * @brief Free a previously-allocated HW IO object. Called when 3501 * IO refcount goes to zero (host-owned IOs only). 3502 * 3503 * @param arg Pointer to the HW IO object. 3504 */ 3505 static void 3506 ocs_hw_io_free_internal(void *arg) 3507 { 3508 ocs_hw_io_t *io = (ocs_hw_io_t *)arg; 3509 ocs_hw_t *hw = io->hw; 3510 3511 /* perform common cleanup */ 3512 ocs_hw_io_free_common(hw, io); 3513 3514 ocs_lock(&hw->io_lock); 3515 /* remove from in-use list */ 3516 ocs_list_remove(&hw->io_inuse, io); 3517 ocs_hw_io_free_move_correct_list(hw, io); 3518 ocs_unlock(&hw->io_lock); 3519 } 3520 3521 /** 3522 * @ingroup io 3523 * @brief Free a previously-allocated HW IO object. 3524 * 3525 * @par Description 3526 * @n @b Note: This function applies to port and host owned XRIs. 3527 * 3528 * @param hw Hardware context. 3529 * @param io Pointer to the HW IO object. 3530 * 3531 * @return Returns a non-zero value if HW IO was freed, 0 if references 3532 * on the IO still exist, or a negative value if an error occurred. 3533 */ 3534 int32_t 3535 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io) 3536 { 3537 /* just put refcount */ 3538 if (ocs_ref_read_count(&io->ref) <= 0) { 3539 ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n", 3540 io->indicator, io->reqtag); 3541 return -1; 3542 } 3543 3544 return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */ 3545 } 3546 3547 /** 3548 * @ingroup io 3549 * @brief Check if given HW IO is in-use 3550 * 3551 * @par Description 3552 * This function returns TRUE if the given HW IO has been 3553 * allocated and is in-use, and FALSE otherwise. It applies to 3554 * port and host owned XRIs. 3555 * 3556 * @param hw Hardware context. 3557 * @param io Pointer to the HW IO object. 3558 * 3559 * @return TRUE if an IO is in use, or FALSE otherwise. 3560 */ 3561 uint8_t 3562 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io) 3563 { 3564 return (ocs_ref_read_count(&io->ref) > 0); 3565 } 3566 3567 /** 3568 * @brief Write a HW IO to a work queue. 3569 * 3570 * @par Description 3571 * A HW IO is written to a work queue. 3572 * 3573 * @param wq Pointer to work queue. 3574 * @param wqe Pointer to WQ entry. 3575 * 3576 * @n @b Note: Assumes the SLI-4 queue lock is held. 3577 * 3578 * @return Returns 0 on success, or a negative error code value on failure. 3579 */ 3580 static int32_t 3581 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) 3582 { 3583 int32_t rc; 3584 int32_t queue_rc; 3585 3586 /* Every so often, set the wqec bit to generate comsummed completions */ 3587 if (wq->wqec_count) { 3588 wq->wqec_count--; 3589 } 3590 if (wq->wqec_count == 0) { 3591 sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf; 3592 genwqe->wqec = 1; 3593 wq->wqec_count = wq->wqec_set_count; 3594 } 3595 3596 /* Decrement WQ free count */ 3597 wq->free_count--; 3598 3599 queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf); 3600 3601 if (queue_rc < 0) { 3602 rc = -1; 3603 } else { 3604 rc = 0; 3605 ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc); 3606 } 3607 3608 return rc; 3609 } 3610 3611 /** 3612 * @brief Write a HW IO to a work queue. 3613 * 3614 * @par Description 3615 * A HW IO is written to a work queue. 3616 * 3617 * @param wq Pointer to work queue. 3618 * @param wqe Pointer to WQE entry. 3619 * 3620 * @n @b Note: Takes the SLI-4 queue lock. 3621 * 3622 * @return Returns 0 on success, or a negative error code value on failure. 3623 */ 3624 int32_t 3625 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) 3626 { 3627 int32_t rc = 0; 3628 3629 sli_queue_lock(wq->queue); 3630 if ( ! ocs_list_empty(&wq->pending_list)) { 3631 ocs_list_add_tail(&wq->pending_list, wqe); 3632 OCS_STAT(wq->wq_pending_count++;) 3633 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { 3634 rc = _hw_wq_write(wq, wqe); 3635 if (rc < 0) { 3636 break; 3637 } 3638 if (wqe->abort_wqe_submit_needed) { 3639 wqe->abort_wqe_submit_needed = 0; 3640 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, 3641 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT ); 3642 ocs_list_add_tail(&wq->pending_list, wqe); 3643 OCS_STAT(wq->wq_pending_count++;) 3644 } 3645 } 3646 } else { 3647 if (wq->free_count > 0) { 3648 rc = _hw_wq_write(wq, wqe); 3649 } else { 3650 ocs_list_add_tail(&wq->pending_list, wqe); 3651 OCS_STAT(wq->wq_pending_count++;) 3652 } 3653 } 3654 3655 sli_queue_unlock(wq->queue); 3656 3657 return rc; 3658 3659 } 3660 3661 /** 3662 * @brief Update free count and submit any pending HW IOs 3663 * 3664 * @par Description 3665 * The WQ free count is updated, and any pending HW IOs are submitted that 3666 * will fit in the queue. 3667 * 3668 * @param wq Pointer to work queue. 3669 * @param update_free_count Value added to WQs free count. 3670 * 3671 * @return None. 3672 */ 3673 static void 3674 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count) 3675 { 3676 ocs_hw_wqe_t *wqe; 3677 3678 sli_queue_lock(wq->queue); 3679 3680 /* Update free count with value passed in */ 3681 wq->free_count += update_free_count; 3682 3683 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { 3684 _hw_wq_write(wq, wqe); 3685 3686 if (wqe->abort_wqe_submit_needed) { 3687 wqe->abort_wqe_submit_needed = 0; 3688 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, 3689 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT); 3690 ocs_list_add_tail(&wq->pending_list, wqe); 3691 OCS_STAT(wq->wq_pending_count++;) 3692 } 3693 } 3694 3695 sli_queue_unlock(wq->queue); 3696 } 3697 3698 /** 3699 * @brief Check to see if there are any BZ 161832 workaround waiting IOs 3700 * 3701 * @par Description 3702 * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try 3703 * to allocate a secondary HW io, and dispatch it. 3704 * 3705 * @n @b Note: hw->io_lock MUST be taken when called. 3706 * 3707 * @param hw pointer to HW object 3708 * 3709 * @return none 3710 */ 3711 static void 3712 ocs_hw_check_sec_hio_list(ocs_hw_t *hw) 3713 { 3714 ocs_hw_io_t *io; 3715 ocs_hw_io_t *sec_io; 3716 int rc = 0; 3717 3718 while (!ocs_list_empty(&hw->sec_hio_wait_list)) { 3719 uint16_t flags; 3720 3721 sec_io = _ocs_hw_io_alloc(hw); 3722 if (sec_io == NULL) { 3723 break; 3724 } 3725 3726 io = ocs_list_remove_head(&hw->sec_hio_wait_list); 3727 ocs_list_add_tail(&hw->io_inuse, io); 3728 io->state = OCS_HW_IO_STATE_INUSE; 3729 io->sec_hio = sec_io; 3730 3731 /* mark secondary XRI for second and subsequent data phase as quarantine */ 3732 if (io->xbusy) { 3733 sec_io->quarantine = TRUE; 3734 } 3735 3736 flags = io->sec_iparam.fcp_tgt.flags; 3737 if (io->xbusy) { 3738 flags |= SLI4_IO_CONTINUATION; 3739 } else { 3740 flags &= ~SLI4_IO_CONTINUATION; 3741 } 3742 3743 io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout; 3744 3745 /* Complete (continue) TRECV IO */ 3746 if (io->xbusy) { 3747 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, 3748 io->first_data_sge, 3749 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator, 3750 io->reqtag, SLI4_CQ_DEFAULT, 3751 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, 3752 flags, 3753 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { 3754 ocs_log_test(hw->os, "TRECEIVE WQE error\n"); 3755 break; 3756 } 3757 } else { 3758 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, 3759 io->first_data_sge, 3760 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, 3761 io->reqtag, SLI4_CQ_DEFAULT, 3762 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, 3763 flags, 3764 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, 3765 io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { 3766 ocs_log_test(hw->os, "TRECEIVE WQE error\n"); 3767 break; 3768 } 3769 } 3770 3771 if (io->wq == NULL) { 3772 io->wq = ocs_hw_queue_next_wq(hw, io); 3773 ocs_hw_assert(io->wq != NULL); 3774 } 3775 io->xbusy = TRUE; 3776 3777 /* 3778 * Add IO to active io wqe list before submitting, in case the 3779 * wcqe processing preempts this thread. 3780 */ 3781 ocs_hw_add_io_timed_wqe(hw, io); 3782 rc = hw_wq_write(io->wq, &io->wqe); 3783 if (rc >= 0) { 3784 /* non-negative return is success */ 3785 rc = 0; 3786 } else { 3787 /* failed to write wqe, remove from active wqe list */ 3788 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); 3789 io->xbusy = FALSE; 3790 ocs_hw_remove_io_timed_wqe(hw, io); 3791 } 3792 } 3793 } 3794 3795 /** 3796 * @ingroup io 3797 * @brief Send a Single Request/Response Sequence (SRRS). 3798 * 3799 * @par Description 3800 * This routine supports communication sequences consisting of a single 3801 * request and single response between two endpoints. Examples include: 3802 * - Sending an ELS request. 3803 * - Sending an ELS response - To send an ELS reponse, the caller must provide 3804 * the OX_ID from the received request. 3805 * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request, 3806 * the caller must provide the R_CTL, TYPE, and DF_CTL 3807 * values to place in the FC frame header. 3808 * . 3809 * @n @b Note: The caller is expected to provide both send and receive 3810 * buffers for requests. In the case of sending a response, no receive buffer 3811 * is necessary and the caller may pass in a NULL pointer. 3812 * 3813 * @param hw Hardware context. 3814 * @param type Type of sequence (ELS request/response, FC-CT). 3815 * @param io Previously-allocated HW IO object. 3816 * @param send DMA memory holding data to send (for example, ELS request, BLS response). 3817 * @param len Length, in bytes, of data to send. 3818 * @param receive Optional DMA memory to hold a response. 3819 * @param rnode Destination of data (that is, a remote node). 3820 * @param iparam IO parameters (ELS response and FC-CT). 3821 * @param cb Function call upon completion of sending the data (may be NULL). 3822 * @param arg Argument to pass to IO completion function. 3823 * 3824 * @return Returns 0 on success, or a non-zero on failure. 3825 */ 3826 ocs_hw_rtn_e 3827 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, 3828 ocs_dma_t *send, uint32_t len, ocs_dma_t *receive, 3829 ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam, 3830 ocs_hw_srrs_cb_t cb, void *arg) 3831 { 3832 sli4_sge_t *sge = NULL; 3833 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 3834 uint16_t local_flags = 0; 3835 3836 if (!hw || !io || !rnode || !iparam) { 3837 ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n", 3838 hw, io, send, receive, rnode, iparam); 3839 return OCS_HW_RTN_ERROR; 3840 } 3841 3842 if (hw->state != OCS_HW_STATE_ACTIVE) { 3843 ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state); 3844 return OCS_HW_RTN_ERROR; 3845 } 3846 3847 if (ocs_hw_is_xri_port_owned(hw, io->indicator)) { 3848 /* We must set the XC bit for port owned XRIs */ 3849 local_flags |= SLI4_IO_CONTINUATION; 3850 } 3851 io->rnode = rnode; 3852 io->type = type; 3853 io->done = cb; 3854 io->arg = arg; 3855 3856 sge = io->sgl->virt; 3857 3858 /* clear both SGE */ 3859 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); 3860 3861 if (send) { 3862 sge[0].buffer_address_high = ocs_addr32_hi(send->phys); 3863 sge[0].buffer_address_low = ocs_addr32_lo(send->phys); 3864 sge[0].sge_type = SLI4_SGE_TYPE_DATA; 3865 sge[0].buffer_length = len; 3866 } 3867 3868 if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) { 3869 sge[1].buffer_address_high = ocs_addr32_hi(receive->phys); 3870 sge[1].buffer_address_low = ocs_addr32_lo(receive->phys); 3871 sge[1].sge_type = SLI4_SGE_TYPE_DATA; 3872 sge[1].buffer_length = receive->size; 3873 sge[1].last = TRUE; 3874 } else { 3875 sge[0].last = TRUE; 3876 } 3877 3878 switch (type) { 3879 case OCS_HW_ELS_REQ: 3880 if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, 3881 *((uint8_t *)(send->virt)), /* req_type */ 3882 len, receive->size, 3883 iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) { 3884 ocs_log_err(hw->os, "REQ WQE error\n"); 3885 rc = OCS_HW_RTN_ERROR; 3886 } 3887 break; 3888 case OCS_HW_ELS_RSP: 3889 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, 3890 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, 3891 iparam->els.ox_id, 3892 rnode, local_flags, UINT32_MAX)) { 3893 ocs_log_err(hw->os, "RSP WQE error\n"); 3894 rc = OCS_HW_RTN_ERROR; 3895 } 3896 break; 3897 case OCS_HW_ELS_RSP_SID: 3898 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, 3899 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, 3900 iparam->els_sid.ox_id, 3901 rnode, local_flags, iparam->els_sid.s_id)) { 3902 ocs_log_err(hw->os, "RSP (SID) WQE error\n"); 3903 rc = OCS_HW_RTN_ERROR; 3904 } 3905 break; 3906 case OCS_HW_FC_CT: 3907 if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, 3908 receive->size, iparam->fc_ct.timeout, io->indicator, 3909 io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl, 3910 iparam->fc_ct.type, iparam->fc_ct.df_ctl)) { 3911 ocs_log_err(hw->os, "GEN WQE error\n"); 3912 rc = OCS_HW_RTN_ERROR; 3913 } 3914 break; 3915 case OCS_HW_FC_CT_RSP: 3916 if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, 3917 iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator, 3918 io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl, 3919 iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) { 3920 ocs_log_err(hw->os, "XMIT SEQ WQE error\n"); 3921 rc = OCS_HW_RTN_ERROR; 3922 } 3923 break; 3924 case OCS_HW_BLS_ACC: 3925 case OCS_HW_BLS_RJT: 3926 { 3927 sli_bls_payload_t bls; 3928 3929 if (OCS_HW_BLS_ACC == type) { 3930 bls.type = SLI_BLS_ACC; 3931 ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc)); 3932 } else { 3933 bls.type = SLI_BLS_RJT; 3934 ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt)); 3935 } 3936 3937 bls.ox_id = iparam->bls.ox_id; 3938 bls.rx_id = iparam->bls.rx_id; 3939 3940 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, 3941 io->indicator, io->reqtag, 3942 SLI4_CQ_DEFAULT, 3943 rnode, UINT32_MAX)) { 3944 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n"); 3945 rc = OCS_HW_RTN_ERROR; 3946 } 3947 break; 3948 } 3949 case OCS_HW_BLS_ACC_SID: 3950 { 3951 sli_bls_payload_t bls; 3952 3953 bls.type = SLI_BLS_ACC; 3954 ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc)); 3955 3956 bls.ox_id = iparam->bls_sid.ox_id; 3957 bls.rx_id = iparam->bls_sid.rx_id; 3958 3959 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, 3960 io->indicator, io->reqtag, 3961 SLI4_CQ_DEFAULT, 3962 rnode, iparam->bls_sid.s_id)) { 3963 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n"); 3964 rc = OCS_HW_RTN_ERROR; 3965 } 3966 break; 3967 } 3968 case OCS_HW_BCAST: 3969 if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, 3970 iparam->bcast.timeout, io->indicator, io->reqtag, 3971 SLI4_CQ_DEFAULT, rnode, 3972 iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) { 3973 ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n"); 3974 rc = OCS_HW_RTN_ERROR; 3975 } 3976 break; 3977 default: 3978 ocs_log_err(hw->os, "bad SRRS type %#x\n", type); 3979 rc = OCS_HW_RTN_ERROR; 3980 } 3981 3982 if (OCS_HW_RTN_SUCCESS == rc) { 3983 if (io->wq == NULL) { 3984 io->wq = ocs_hw_queue_next_wq(hw, io); 3985 ocs_hw_assert(io->wq != NULL); 3986 } 3987 io->xbusy = TRUE; 3988 3989 /* 3990 * Add IO to active io wqe list before submitting, in case the 3991 * wcqe processing preempts this thread. 3992 */ 3993 OCS_STAT(io->wq->use_count++); 3994 ocs_hw_add_io_timed_wqe(hw, io); 3995 rc = hw_wq_write(io->wq, &io->wqe); 3996 if (rc >= 0) { 3997 /* non-negative return is success */ 3998 rc = 0; 3999 } else { 4000 /* failed to write wqe, remove from active wqe list */ 4001 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); 4002 io->xbusy = FALSE; 4003 ocs_hw_remove_io_timed_wqe(hw, io); 4004 } 4005 } 4006 4007 return rc; 4008 } 4009 4010 /** 4011 * @ingroup io 4012 * @brief Send a read, write, or response IO. 4013 * 4014 * @par Description 4015 * This routine supports sending a higher-level IO (for example, FCP) between two endpoints 4016 * as a target or initiator. Examples include: 4017 * - Sending read data and good response (target). 4018 * - Sending a response (target with no data or after receiving write data). 4019 * . 4020 * This routine assumes all IOs use the SGL associated with the HW IO. Prior to 4021 * calling this routine, the data should be loaded using ocs_hw_io_add_sge(). 4022 * 4023 * @param hw Hardware context. 4024 * @param type Type of IO (target read, target response, and so on). 4025 * @param io Previously-allocated HW IO object. 4026 * @param len Length, in bytes, of data to send. 4027 * @param iparam IO parameters. 4028 * @param rnode Destination of data (that is, a remote node). 4029 * @param cb Function call upon completion of sending data (may be NULL). 4030 * @param arg Argument to pass to IO completion function. 4031 * 4032 * @return Returns 0 on success, or a non-zero value on failure. 4033 * 4034 * @todo 4035 * - Support specifiying relative offset. 4036 * - Use a WQ other than 0. 4037 */ 4038 ocs_hw_rtn_e 4039 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, 4040 uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode, 4041 void *cb, void *arg) 4042 { 4043 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 4044 uint32_t rpi; 4045 uint8_t send_wqe = TRUE; 4046 4047 CPUTRACE(""); 4048 4049 if (!hw || !io || !rnode || !iparam) { 4050 ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n", 4051 hw, io, iparam, rnode); 4052 return OCS_HW_RTN_ERROR; 4053 } 4054 4055 if (hw->state != OCS_HW_STATE_ACTIVE) { 4056 ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); 4057 return OCS_HW_RTN_ERROR; 4058 } 4059 4060 rpi = rnode->indicator; 4061 4062 if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) { 4063 rpi = hw->workaround.unregistered_rid; 4064 ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi); 4065 } 4066 4067 /* 4068 * Save state needed during later stages 4069 */ 4070 io->rnode = rnode; 4071 io->type = type; 4072 io->done = cb; 4073 io->arg = arg; 4074 4075 /* 4076 * Format the work queue entry used to send the IO 4077 */ 4078 switch (type) { 4079 case OCS_HW_IO_INITIATOR_READ: 4080 /* 4081 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the 4082 * initiator read IO for quarantine 4083 */ 4084 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && 4085 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { 4086 io->quarantine = TRUE; 4087 } 4088 4089 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, 4090 iparam->fcp_ini.rsp); 4091 4092 if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len, 4093 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, 4094 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, 4095 iparam->fcp_ini.timeout)) { 4096 ocs_log_err(hw->os, "IREAD WQE error\n"); 4097 rc = OCS_HW_RTN_ERROR; 4098 } 4099 break; 4100 case OCS_HW_IO_INITIATOR_WRITE: 4101 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, 4102 iparam->fcp_ini.rsp); 4103 4104 if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4105 len, iparam->fcp_ini.first_burst, 4106 io->indicator, io->reqtag, 4107 SLI4_CQ_DEFAULT, rpi, rnode, 4108 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, 4109 iparam->fcp_ini.timeout)) { 4110 ocs_log_err(hw->os, "IWRITE WQE error\n"); 4111 rc = OCS_HW_RTN_ERROR; 4112 } 4113 break; 4114 case OCS_HW_IO_INITIATOR_NODATA: 4115 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, 4116 iparam->fcp_ini.rsp); 4117 4118 if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, 4119 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, 4120 rpi, rnode, iparam->fcp_ini.timeout)) { 4121 ocs_log_err(hw->os, "ICMND WQE error\n"); 4122 rc = OCS_HW_RTN_ERROR; 4123 } 4124 break; 4125 case OCS_HW_IO_TARGET_WRITE: { 4126 uint16_t flags = iparam->fcp_tgt.flags; 4127 fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt; 4128 4129 /* 4130 * Fill in the XFER_RDY for IF_TYPE 0 devices 4131 */ 4132 *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset); 4133 *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len); 4134 *((uint32_t *)xfer->rsvd) = 0; 4135 4136 if (io->xbusy) { 4137 flags |= SLI4_IO_CONTINUATION; 4138 } else { 4139 flags &= ~SLI4_IO_CONTINUATION; 4140 } 4141 4142 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; 4143 4144 /* 4145 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO 4146 * then mark the target write IO for quarantine 4147 */ 4148 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && 4149 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { 4150 io->quarantine = TRUE; 4151 } 4152 4153 /* 4154 * BZ 161832 Workaround: 4155 * Check for use_dif_sec_xri workaround. Note, even though the first dataphase 4156 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the 4157 * potential for deadlock where all XRI's are allocated as primaries to IOs that 4158 * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first 4159 * data phase, it is marked for quarantine. 4160 */ 4161 if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { 4162 /* 4163 * If we have allocated a chained SGL for skyhawk, then 4164 * we can re-use this for the sec_hio. 4165 */ 4166 if (io->ovfl_io != NULL) { 4167 io->sec_hio = io->ovfl_io; 4168 io->sec_hio->quarantine = TRUE; 4169 } else { 4170 io->sec_hio = ocs_hw_io_alloc(hw); 4171 } 4172 if (io->sec_hio == NULL) { 4173 /* Failed to allocate, so save full request context and put 4174 * this IO on the wait list 4175 */ 4176 io->sec_iparam = *iparam; 4177 io->sec_len = len; 4178 ocs_lock(&hw->io_lock); 4179 ocs_list_remove(&hw->io_inuse, io); 4180 ocs_list_add_tail(&hw->sec_hio_wait_list, io); 4181 io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO; 4182 hw->sec_hio_wait_count++; 4183 ocs_unlock(&hw->io_lock); 4184 send_wqe = FALSE; 4185 /* Done */ 4186 break; 4187 } 4188 /* We quarantine the secondary IO if this is the second or subsequent data phase */ 4189 if (io->xbusy) { 4190 io->sec_hio->quarantine = TRUE; 4191 } 4192 } 4193 4194 /* 4195 * If not the first data phase, and io->sec_hio has been allocated, then issue 4196 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE 4197 */ 4198 if (io->xbusy && (io->sec_hio != NULL)) { 4199 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4200 iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator, 4201 io->reqtag, SLI4_CQ_DEFAULT, 4202 iparam->fcp_tgt.ox_id, rpi, rnode, 4203 flags, 4204 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, 4205 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { 4206 ocs_log_err(hw->os, "TRECEIVE WQE error\n"); 4207 rc = OCS_HW_RTN_ERROR; 4208 } 4209 } else { 4210 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4211 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, 4212 SLI4_CQ_DEFAULT, 4213 iparam->fcp_tgt.ox_id, rpi, rnode, 4214 flags, 4215 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, 4216 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { 4217 ocs_log_err(hw->os, "TRECEIVE WQE error\n"); 4218 rc = OCS_HW_RTN_ERROR; 4219 } 4220 } 4221 break; 4222 } 4223 case OCS_HW_IO_TARGET_READ: { 4224 uint16_t flags = iparam->fcp_tgt.flags; 4225 4226 if (io->xbusy) { 4227 flags |= SLI4_IO_CONTINUATION; 4228 } else { 4229 flags &= ~SLI4_IO_CONTINUATION; 4230 } 4231 4232 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; 4233 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4234 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, 4235 SLI4_CQ_DEFAULT, 4236 iparam->fcp_tgt.ox_id, rpi, rnode, 4237 flags, 4238 iparam->fcp_tgt.dif_oper, 4239 iparam->fcp_tgt.blk_size, 4240 iparam->fcp_tgt.cs_ctl, 4241 iparam->fcp_tgt.app_id)) { 4242 ocs_log_err(hw->os, "TSEND WQE error\n"); 4243 rc = OCS_HW_RTN_ERROR; 4244 } else if (hw->workaround.retain_tsend_io_length) { 4245 io->length = len; 4246 } 4247 break; 4248 } 4249 case OCS_HW_IO_TARGET_RSP: { 4250 uint16_t flags = iparam->fcp_tgt.flags; 4251 4252 if (io->xbusy) { 4253 flags |= SLI4_IO_CONTINUATION; 4254 } else { 4255 flags &= ~SLI4_IO_CONTINUATION; 4256 } 4257 4258 /* post a new auto xfer ready buffer */ 4259 if (hw->auto_xfer_rdy_enabled && io->is_port_owned) { 4260 if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) { 4261 flags |= SLI4_IO_DNRX; 4262 } 4263 } 4264 4265 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; 4266 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, 4267 &io->def_sgl, 4268 len, 4269 io->indicator, io->reqtag, 4270 SLI4_CQ_DEFAULT, 4271 iparam->fcp_tgt.ox_id, 4272 rpi, rnode, 4273 flags, iparam->fcp_tgt.cs_ctl, 4274 io->is_port_owned, 4275 iparam->fcp_tgt.app_id)) { 4276 ocs_log_err(hw->os, "TRSP WQE error\n"); 4277 rc = OCS_HW_RTN_ERROR; 4278 } 4279 4280 break; 4281 } 4282 default: 4283 ocs_log_err(hw->os, "unsupported IO type %#x\n", type); 4284 rc = OCS_HW_RTN_ERROR; 4285 } 4286 4287 if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) { 4288 if (io->wq == NULL) { 4289 io->wq = ocs_hw_queue_next_wq(hw, io); 4290 ocs_hw_assert(io->wq != NULL); 4291 } 4292 4293 io->xbusy = TRUE; 4294 4295 /* 4296 * Add IO to active io wqe list before submitting, in case the 4297 * wcqe processing preempts this thread. 4298 */ 4299 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); 4300 OCS_STAT(io->wq->use_count++); 4301 ocs_hw_add_io_timed_wqe(hw, io); 4302 rc = hw_wq_write(io->wq, &io->wqe); 4303 if (rc >= 0) { 4304 /* non-negative return is success */ 4305 rc = 0; 4306 } else { 4307 /* failed to write wqe, remove from active wqe list */ 4308 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); 4309 io->xbusy = FALSE; 4310 ocs_hw_remove_io_timed_wqe(hw, io); 4311 } 4312 } 4313 4314 return rc; 4315 } 4316 4317 /** 4318 * @brief Send a raw frame 4319 * 4320 * @par Description 4321 * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent. 4322 * 4323 * @param hw Pointer to HW object. 4324 * @param hdr Pointer to a little endian formatted FC header. 4325 * @param sof Value to use as the frame SOF. 4326 * @param eof Value to use as the frame EOF. 4327 * @param payload Pointer to payload DMA buffer. 4328 * @param ctx Pointer to caller provided send frame context. 4329 * @param callback Callback function. 4330 * @param arg Callback function argument. 4331 * 4332 * @return Returns 0 on success, or a negative error code value on failure. 4333 */ 4334 ocs_hw_rtn_e 4335 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload, 4336 ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) 4337 { 4338 int32_t rc; 4339 ocs_hw_wqe_t *wqe; 4340 uint32_t xri; 4341 hw_wq_t *wq; 4342 4343 wqe = &ctx->wqe; 4344 4345 /* populate the callback object */ 4346 ctx->hw = hw; 4347 4348 /* Fetch and populate request tag */ 4349 ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg); 4350 if (ctx->wqcb == NULL) { 4351 ocs_log_err(hw->os, "can't allocate request tag\n"); 4352 return OCS_HW_RTN_NO_RESOURCES; 4353 } 4354 4355 /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */ 4356 wq = ocs_varray_iter_next(hw->wq_class_array[1]); 4357 if (wq == NULL) { 4358 wq = hw->hw_wq[0]; 4359 } 4360 4361 /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */ 4362 xri = wq->send_frame_io->indicator; 4363 4364 /* Build the send frame WQE */ 4365 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload, 4366 payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index); 4367 if (rc) { 4368 ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc); 4369 return OCS_HW_RTN_ERROR; 4370 } 4371 4372 /* Write to WQ */ 4373 rc = hw_wq_write(wq, wqe); 4374 if (rc) { 4375 ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc); 4376 return OCS_HW_RTN_ERROR; 4377 } 4378 4379 OCS_STAT(wq->use_count++); 4380 4381 return OCS_HW_RTN_SUCCESS; 4382 } 4383 4384 ocs_hw_rtn_e 4385 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count) 4386 { 4387 if (sli_get_sgl_preregister(&hw->sli)) { 4388 ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n"); 4389 return OCS_HW_RTN_ERROR; 4390 } 4391 io->ovfl_sgl = sgl; 4392 io->ovfl_sgl_count = sgl_count; 4393 io->ovfl_io = NULL; 4394 4395 return OCS_HW_RTN_SUCCESS; 4396 } 4397 4398 static void 4399 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) 4400 { 4401 /* Restore the default */ 4402 io->sgl = &io->def_sgl; 4403 io->sgl_count = io->def_sgl_count; 4404 4405 /* 4406 * For skyhawk, we need to free the IO allocated for the chained 4407 * SGL. For all devices, clear the overflow fields on the IO. 4408 * 4409 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and 4410 * the chained SGLs. If so, then we clear the ovfl_io field 4411 * when the sec_hio is freed. 4412 */ 4413 if (io->ovfl_io != NULL) { 4414 ocs_hw_io_free(hw, io->ovfl_io); 4415 io->ovfl_io = NULL; 4416 } 4417 4418 /* Clear the overflow SGL */ 4419 io->ovfl_sgl = NULL; 4420 io->ovfl_sgl_count = 0; 4421 io->ovfl_lsp = NULL; 4422 } 4423 4424 /** 4425 * @ingroup io 4426 * @brief Initialize the scatter gather list entries of an IO. 4427 * 4428 * @param hw Hardware context. 4429 * @param io Previously-allocated HW IO object. 4430 * @param type Type of IO (target read, target response, and so on). 4431 * 4432 * @return Returns 0 on success, or a non-zero value on failure. 4433 */ 4434 ocs_hw_rtn_e 4435 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type) 4436 { 4437 sli4_sge_t *data = NULL; 4438 uint32_t i = 0; 4439 uint32_t skips = 0; 4440 4441 if (!hw || !io) { 4442 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", 4443 hw, io); 4444 return OCS_HW_RTN_ERROR; 4445 } 4446 4447 /* Clear / reset the scatter-gather list */ 4448 io->sgl = &io->def_sgl; 4449 io->sgl_count = io->def_sgl_count; 4450 io->first_data_sge = 0; 4451 4452 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); 4453 io->n_sge = 0; 4454 io->sge_offset = 0; 4455 4456 io->type = type; 4457 4458 data = io->sgl->virt; 4459 4460 /* 4461 * Some IO types have underlying hardware requirements on the order 4462 * of SGEs. Process all special entries here. 4463 */ 4464 switch (type) { 4465 case OCS_HW_IO_INITIATOR_READ: 4466 case OCS_HW_IO_INITIATOR_WRITE: 4467 case OCS_HW_IO_INITIATOR_NODATA: 4468 /* 4469 * No skips, 2 special for initiator I/Os 4470 * The addresses and length are written later 4471 */ 4472 /* setup command pointer */ 4473 data->sge_type = SLI4_SGE_TYPE_DATA; 4474 data++; 4475 4476 /* setup response pointer */ 4477 data->sge_type = SLI4_SGE_TYPE_DATA; 4478 4479 if (OCS_HW_IO_INITIATOR_NODATA == type) { 4480 data->last = TRUE; 4481 } 4482 data++; 4483 4484 io->n_sge = 2; 4485 break; 4486 case OCS_HW_IO_TARGET_WRITE: 4487 #define OCS_TARGET_WRITE_SKIPS 2 4488 skips = OCS_TARGET_WRITE_SKIPS; 4489 4490 /* populate host resident XFER_RDY buffer */ 4491 data->sge_type = SLI4_SGE_TYPE_DATA; 4492 data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys); 4493 data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys); 4494 data->buffer_length = io->xfer_rdy.size; 4495 data++; 4496 4497 skips--; 4498 4499 io->n_sge = 1; 4500 break; 4501 case OCS_HW_IO_TARGET_READ: 4502 /* 4503 * For FCP_TSEND64, the first 2 entries are SKIP SGE's 4504 */ 4505 #define OCS_TARGET_READ_SKIPS 2 4506 skips = OCS_TARGET_READ_SKIPS; 4507 break; 4508 case OCS_HW_IO_TARGET_RSP: 4509 /* 4510 * No skips, etc. for FCP_TRSP64 4511 */ 4512 break; 4513 default: 4514 ocs_log_err(hw->os, "unsupported IO type %#x\n", type); 4515 return OCS_HW_RTN_ERROR; 4516 } 4517 4518 /* 4519 * Write skip entries 4520 */ 4521 for (i = 0; i < skips; i++) { 4522 data->sge_type = SLI4_SGE_TYPE_SKIP; 4523 data++; 4524 } 4525 4526 io->n_sge += skips; 4527 4528 /* 4529 * Set last 4530 */ 4531 data->last = TRUE; 4532 4533 return OCS_HW_RTN_SUCCESS; 4534 } 4535 4536 /** 4537 * @ingroup io 4538 * @brief Add a T10 PI seed scatter gather list entry. 4539 * 4540 * @param hw Hardware context. 4541 * @param io Previously-allocated HW IO object. 4542 * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF. 4543 * 4544 * @return Returns 0 on success, or a non-zero value on failure. 4545 */ 4546 ocs_hw_rtn_e 4547 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info) 4548 { 4549 sli4_sge_t *data = NULL; 4550 sli4_diseed_sge_t *dif_seed; 4551 4552 /* If no dif_info, or dif_oper is disabled, then just return success */ 4553 if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) { 4554 return OCS_HW_RTN_SUCCESS; 4555 } 4556 4557 if (!hw || !io) { 4558 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n", 4559 hw, io, dif_info); 4560 return OCS_HW_RTN_ERROR; 4561 } 4562 4563 data = io->sgl->virt; 4564 data += io->n_sge; 4565 4566 /* If we are doing T10 DIF add the DIF Seed SGE */ 4567 ocs_memset(data, 0, sizeof(sli4_diseed_sge_t)); 4568 dif_seed = (sli4_diseed_sge_t *)data; 4569 dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp; 4570 dif_seed->ref_tag_repl = dif_info->ref_tag_repl; 4571 dif_seed->app_tag_repl = dif_info->app_tag_repl; 4572 dif_seed->repl_app_tag = dif_info->repl_app_tag; 4573 if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) { 4574 dif_seed->atrt = dif_info->disable_app_ref_ffff; 4575 dif_seed->at = dif_info->disable_app_ffff; 4576 } 4577 dif_seed->sge_type = SLI4_SGE_TYPE_DISEED; 4578 /* Workaround for SKH (BZ157233) */ 4579 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && 4580 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) { 4581 dif_seed->sge_type = SLI4_SGE_TYPE_SKIP; 4582 } 4583 4584 dif_seed->app_tag_cmp = dif_info->app_tag_cmp; 4585 dif_seed->dif_blk_size = dif_info->blk_size; 4586 dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag; 4587 dif_seed->check_app_tag = dif_info->check_app_tag; 4588 dif_seed->check_ref_tag = dif_info->check_ref_tag; 4589 dif_seed->check_crc = dif_info->check_guard; 4590 dif_seed->new_ref_tag = dif_info->repl_ref_tag; 4591 4592 switch(dif_info->dif_oper) { 4593 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC: 4594 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; 4595 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; 4596 break; 4597 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF: 4598 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; 4599 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; 4600 break; 4601 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM: 4602 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; 4603 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; 4604 break; 4605 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF: 4606 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; 4607 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; 4608 break; 4609 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC: 4610 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; 4611 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; 4612 break; 4613 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM: 4614 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; 4615 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; 4616 break; 4617 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM: 4618 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; 4619 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; 4620 break; 4621 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC: 4622 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; 4623 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; 4624 break; 4625 case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW: 4626 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; 4627 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; 4628 break; 4629 default: 4630 ocs_log_err(hw->os, "unsupported DIF operation %#x\n", 4631 dif_info->dif_oper); 4632 return OCS_HW_RTN_ERROR; 4633 } 4634 4635 /* 4636 * Set last, clear previous last 4637 */ 4638 data->last = TRUE; 4639 if (io->n_sge) { 4640 data[-1].last = FALSE; 4641 } 4642 4643 io->n_sge++; 4644 4645 return OCS_HW_RTN_SUCCESS; 4646 } 4647 4648 static ocs_hw_rtn_e 4649 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) 4650 { 4651 sli4_lsp_sge_t *lsp; 4652 4653 /* fail if we're already pointing to the overflow SGL */ 4654 if (io->sgl == io->ovfl_sgl) { 4655 return OCS_HW_RTN_ERROR; 4656 } 4657 4658 /* 4659 * For skyhawk, we can use another SGL to extend the SGL list. The 4660 * Chained entry must not be in the first 4 entries. 4661 * 4662 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio. 4663 */ 4664 if (sli_get_sgl_preregister(&hw->sli) && 4665 io->def_sgl_count > 4 && 4666 io->ovfl_io == NULL && 4667 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 4668 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { 4669 io->ovfl_io = ocs_hw_io_alloc(hw); 4670 if (io->ovfl_io != NULL) { 4671 /* 4672 * Note: We can't call ocs_hw_io_register_sgl() here 4673 * because it checks that SGLs are not pre-registered 4674 * and for shyhawk, preregistered SGLs are required. 4675 */ 4676 io->ovfl_sgl = &io->ovfl_io->def_sgl; 4677 io->ovfl_sgl_count = io->ovfl_io->def_sgl_count; 4678 } 4679 } 4680 4681 /* fail if we don't have an overflow SGL registered */ 4682 if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) { 4683 return OCS_HW_RTN_ERROR; 4684 } 4685 4686 /* 4687 * Overflow, we need to put a link SGE in the last location of the current SGL, after 4688 * copying the the last SGE to the overflow SGL 4689 */ 4690 4691 ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1]; 4692 4693 lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1]; 4694 ocs_memset(lsp, 0, sizeof(*lsp)); 4695 4696 if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 4697 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { 4698 sli_skh_chain_sge_build(&hw->sli, 4699 (sli4_sge_t*)lsp, 4700 io->ovfl_io->indicator, 4701 0, /* frag_num */ 4702 0); /* offset */ 4703 } else { 4704 lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys); 4705 lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys); 4706 lsp->sge_type = SLI4_SGE_TYPE_LSP; 4707 lsp->last = 0; 4708 io->ovfl_lsp = lsp; 4709 io->ovfl_lsp->segment_length = sizeof(sli4_sge_t); 4710 } 4711 4712 /* Update the current SGL pointer, and n_sgl */ 4713 io->sgl = io->ovfl_sgl; 4714 io->sgl_count = io->ovfl_sgl_count; 4715 io->n_sge = 1; 4716 4717 return OCS_HW_RTN_SUCCESS; 4718 } 4719 4720 /** 4721 * @ingroup io 4722 * @brief Add a scatter gather list entry to an IO. 4723 * 4724 * @param hw Hardware context. 4725 * @param io Previously-allocated HW IO object. 4726 * @param addr Physical address. 4727 * @param length Length of memory pointed to by @c addr. 4728 * 4729 * @return Returns 0 on success, or a non-zero value on failure. 4730 */ 4731 ocs_hw_rtn_e 4732 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length) 4733 { 4734 sli4_sge_t *data = NULL; 4735 4736 if (!hw || !io || !addr || !length) { 4737 ocs_log_err(hw ? hw->os : NULL, 4738 "bad parameter hw=%p io=%p addr=%lx length=%u\n", 4739 hw, io, addr, length); 4740 return OCS_HW_RTN_ERROR; 4741 } 4742 4743 if ((length != 0) && (io->n_sge + 1) > io->sgl_count) { 4744 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) { 4745 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); 4746 return OCS_HW_RTN_ERROR; 4747 } 4748 } 4749 4750 if (length > sli_get_max_sge(&hw->sli)) { 4751 ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n", 4752 length, sli_get_max_sge(&hw->sli)); 4753 return OCS_HW_RTN_ERROR; 4754 } 4755 4756 data = io->sgl->virt; 4757 data += io->n_sge; 4758 4759 data->sge_type = SLI4_SGE_TYPE_DATA; 4760 data->buffer_address_high = ocs_addr32_hi(addr); 4761 data->buffer_address_low = ocs_addr32_lo(addr); 4762 data->buffer_length = length; 4763 data->data_offset = io->sge_offset; 4764 /* 4765 * Always assume this is the last entry and mark as such. 4766 * If this is not the first entry unset the "last SGE" 4767 * indication for the previous entry 4768 */ 4769 data->last = TRUE; 4770 if (io->n_sge) { 4771 data[-1].last = FALSE; 4772 } 4773 4774 /* Set first_data_bde if not previously set */ 4775 if (io->first_data_sge == 0) { 4776 io->first_data_sge = io->n_sge; 4777 } 4778 4779 io->sge_offset += length; 4780 io->n_sge++; 4781 4782 /* Update the linked segment length (only executed after overflow has begun) */ 4783 if (io->ovfl_lsp != NULL) { 4784 io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t); 4785 } 4786 4787 return OCS_HW_RTN_SUCCESS; 4788 } 4789 4790 /** 4791 * @ingroup io 4792 * @brief Add a T10 DIF scatter gather list entry to an IO. 4793 * 4794 * @param hw Hardware context. 4795 * @param io Previously-allocated HW IO object. 4796 * @param addr DIF physical address. 4797 * 4798 * @return Returns 0 on success, or a non-zero value on failure. 4799 */ 4800 ocs_hw_rtn_e 4801 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr) 4802 { 4803 sli4_dif_sge_t *data = NULL; 4804 4805 if (!hw || !io || !addr) { 4806 ocs_log_err(hw ? hw->os : NULL, 4807 "bad parameter hw=%p io=%p addr=%lx\n", 4808 hw, io, addr); 4809 return OCS_HW_RTN_ERROR; 4810 } 4811 4812 if ((io->n_sge + 1) > hw->config.n_sgl) { 4813 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) { 4814 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); 4815 return OCS_HW_RTN_ERROR; 4816 } 4817 } 4818 4819 data = io->sgl->virt; 4820 data += io->n_sge; 4821 4822 data->sge_type = SLI4_SGE_TYPE_DIF; 4823 /* Workaround for SKH (BZ157233) */ 4824 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && 4825 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) { 4826 data->sge_type = SLI4_SGE_TYPE_SKIP; 4827 } 4828 4829 data->buffer_address_high = ocs_addr32_hi(addr); 4830 data->buffer_address_low = ocs_addr32_lo(addr); 4831 4832 /* 4833 * Always assume this is the last entry and mark as such. 4834 * If this is not the first entry unset the "last SGE" 4835 * indication for the previous entry 4836 */ 4837 data->last = TRUE; 4838 if (io->n_sge) { 4839 data[-1].last = FALSE; 4840 } 4841 4842 io->n_sge++; 4843 4844 return OCS_HW_RTN_SUCCESS; 4845 } 4846 4847 /** 4848 * @ingroup io 4849 * @brief Abort a previously-started IO. 4850 * 4851 * @param hw Hardware context. 4852 * @param io_to_abort The IO to abort. 4853 * @param send_abts Boolean to have the hardware automatically 4854 * generate an ABTS. 4855 * @param cb Function call upon completion of the abort (may be NULL). 4856 * @param arg Argument to pass to abort completion function. 4857 * 4858 * @return Returns 0 on success, or a non-zero value on failure. 4859 */ 4860 ocs_hw_rtn_e 4861 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg) 4862 { 4863 sli4_abort_type_e atype = SLI_ABORT_MAX; 4864 uint32_t id = 0, mask = 0; 4865 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 4866 hw_wq_callback_t *wqcb; 4867 4868 if (!hw || !io_to_abort) { 4869 ocs_log_err(hw ? hw->os : NULL, 4870 "bad parameter hw=%p io=%p\n", 4871 hw, io_to_abort); 4872 return OCS_HW_RTN_ERROR; 4873 } 4874 4875 if (hw->state != OCS_HW_STATE_ACTIVE) { 4876 ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n", 4877 hw->state); 4878 return OCS_HW_RTN_ERROR; 4879 } 4880 4881 /* take a reference on IO being aborted */ 4882 if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) { 4883 /* command no longer active */ 4884 ocs_log_test(hw ? hw->os : NULL, 4885 "io not active xri=0x%x tag=0x%x\n", 4886 io_to_abort->indicator, io_to_abort->reqtag); 4887 return OCS_HW_RTN_IO_NOT_ACTIVE; 4888 } 4889 4890 /* non-port owned XRI checks */ 4891 /* Must have a valid WQ reference */ 4892 if (io_to_abort->wq == NULL) { 4893 ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n", 4894 io_to_abort->indicator); 4895 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ 4896 return OCS_HW_RTN_IO_NOT_ACTIVE; 4897 } 4898 4899 /* Validation checks complete; now check to see if already being aborted */ 4900 ocs_lock(&hw->io_abort_lock); 4901 if (io_to_abort->abort_in_progress) { 4902 ocs_unlock(&hw->io_abort_lock); 4903 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ 4904 ocs_log_debug(hw ? hw->os : NULL, 4905 "io already being aborted xri=0x%x tag=0x%x\n", 4906 io_to_abort->indicator, io_to_abort->reqtag); 4907 return OCS_HW_RTN_IO_ABORT_IN_PROGRESS; 4908 } 4909 4910 /* 4911 * This IO is not already being aborted. Set flag so we won't try to 4912 * abort it again. After all, we only have one abort_done callback. 4913 */ 4914 io_to_abort->abort_in_progress = 1; 4915 ocs_unlock(&hw->io_abort_lock); 4916 4917 /* 4918 * If we got here, the possibilities are: 4919 * - host owned xri 4920 * - io_to_abort->wq_index != UINT32_MAX 4921 * - submit ABORT_WQE to same WQ 4922 * - port owned xri: 4923 * - rxri: io_to_abort->wq_index == UINT32_MAX 4924 * - submit ABORT_WQE to any WQ 4925 * - non-rxri 4926 * - io_to_abort->index != UINT32_MAX 4927 * - submit ABORT_WQE to same WQ 4928 * - io_to_abort->index == UINT32_MAX 4929 * - submit ABORT_WQE to any WQ 4930 */ 4931 io_to_abort->abort_done = cb; 4932 io_to_abort->abort_arg = arg; 4933 4934 atype = SLI_ABORT_XRI; 4935 id = io_to_abort->indicator; 4936 4937 /* Allocate a request tag for the abort portion of this IO */ 4938 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort); 4939 if (wqcb == NULL) { 4940 ocs_log_err(hw->os, "can't allocate request tag\n"); 4941 return OCS_HW_RTN_NO_RESOURCES; 4942 } 4943 io_to_abort->abort_reqtag = wqcb->instance_index; 4944 4945 /* 4946 * If the wqe is on the pending list, then set this wqe to be 4947 * aborted when the IO's wqe is removed from the list. 4948 */ 4949 if (io_to_abort->wq != NULL) { 4950 sli_queue_lock(io_to_abort->wq->queue); 4951 if (ocs_list_on_list(&io_to_abort->wqe.link)) { 4952 io_to_abort->wqe.abort_wqe_submit_needed = 1; 4953 io_to_abort->wqe.send_abts = send_abts; 4954 io_to_abort->wqe.id = id; 4955 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag; 4956 sli_queue_unlock(io_to_abort->wq->queue); 4957 return 0; 4958 } 4959 sli_queue_unlock(io_to_abort->wq->queue); 4960 } 4961 4962 if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask, 4963 io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) { 4964 ocs_log_err(hw->os, "ABORT WQE error\n"); 4965 io_to_abort->abort_reqtag = UINT32_MAX; 4966 ocs_hw_reqtag_free(hw, wqcb); 4967 rc = OCS_HW_RTN_ERROR; 4968 } 4969 4970 if (OCS_HW_RTN_SUCCESS == rc) { 4971 if (io_to_abort->wq == NULL) { 4972 io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort); 4973 ocs_hw_assert(io_to_abort->wq != NULL); 4974 } 4975 /* ABORT_WQE does not actually utilize an XRI on the Port, 4976 * therefore, keep xbusy as-is to track the exchange's state, 4977 * not the ABORT_WQE's state 4978 */ 4979 rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe); 4980 if (rc > 0) { 4981 /* non-negative return is success */ 4982 rc = 0; 4983 /* can't abort an abort so skip adding to timed wqe list */ 4984 } 4985 } 4986 4987 if (OCS_HW_RTN_SUCCESS != rc) { 4988 ocs_lock(&hw->io_abort_lock); 4989 io_to_abort->abort_in_progress = 0; 4990 ocs_unlock(&hw->io_abort_lock); 4991 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ 4992 } 4993 return rc; 4994 } 4995 4996 /** 4997 * @ingroup io 4998 * @brief Return the OX_ID/RX_ID of the IO. 4999 * 5000 * @param hw Hardware context. 5001 * @param io HW IO object. 5002 * 5003 * @return Returns X_ID on success, or -1 on failure. 5004 */ 5005 int32_t 5006 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io) 5007 { 5008 if (!hw || !io) { 5009 ocs_log_err(hw ? hw->os : NULL, 5010 "bad parameter hw=%p io=%p\n", hw, io); 5011 return -1; 5012 } 5013 5014 return io->indicator; 5015 } 5016 5017 typedef struct ocs_hw_fw_write_cb_arg { 5018 ocs_hw_fw_cb_t cb; 5019 void *arg; 5020 } ocs_hw_fw_write_cb_arg_t; 5021 5022 typedef struct ocs_hw_sfp_cb_arg { 5023 ocs_hw_sfp_cb_t cb; 5024 void *arg; 5025 ocs_dma_t payload; 5026 } ocs_hw_sfp_cb_arg_t; 5027 5028 typedef struct ocs_hw_temp_cb_arg { 5029 ocs_hw_temp_cb_t cb; 5030 void *arg; 5031 } ocs_hw_temp_cb_arg_t; 5032 5033 typedef struct ocs_hw_link_stat_cb_arg { 5034 ocs_hw_link_stat_cb_t cb; 5035 void *arg; 5036 } ocs_hw_link_stat_cb_arg_t; 5037 5038 typedef struct ocs_hw_host_stat_cb_arg { 5039 ocs_hw_host_stat_cb_t cb; 5040 void *arg; 5041 } ocs_hw_host_stat_cb_arg_t; 5042 5043 typedef struct ocs_hw_dump_get_cb_arg { 5044 ocs_hw_dump_get_cb_t cb; 5045 void *arg; 5046 void *mbox_cmd; 5047 } ocs_hw_dump_get_cb_arg_t; 5048 5049 typedef struct ocs_hw_dump_clear_cb_arg { 5050 ocs_hw_dump_clear_cb_t cb; 5051 void *arg; 5052 void *mbox_cmd; 5053 } ocs_hw_dump_clear_cb_arg_t; 5054 5055 /** 5056 * @brief Write a portion of a firmware image to the device. 5057 * 5058 * @par Description 5059 * Calls the correct firmware write function based on the device type. 5060 * 5061 * @param hw Hardware context. 5062 * @param dma DMA structure containing the firmware image chunk. 5063 * @param size Size of the firmware image chunk. 5064 * @param offset Offset, in bytes, from the beginning of the firmware image. 5065 * @param last True if this is the last chunk of the image. 5066 * Causes the image to be committed to flash. 5067 * @param cb Pointer to a callback function that is called when the command completes. 5068 * The callback function prototype is 5069 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>. 5070 * @param arg Pointer to be passed to the callback function. 5071 * 5072 * @return Returns 0 on success, or a non-zero value on failure. 5073 */ 5074 ocs_hw_rtn_e 5075 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) 5076 { 5077 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { 5078 return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg); 5079 } else { 5080 /* Write firmware_write for BE3/Skyhawk not supported */ 5081 return -1; 5082 } 5083 } 5084 5085 /** 5086 * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer). 5087 * 5088 * @par Description 5089 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a 5090 * firmware image chunk, and then sends the command with ocs_hw_command(). On completion, 5091 * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox 5092 * and to signal the caller that the write has completed. 5093 * 5094 * @param hw Hardware context. 5095 * @param dma DMA structure containing the firmware image chunk. 5096 * @param size Size of the firmware image chunk. 5097 * @param offset Offset, in bytes, from the beginning of the firmware image. 5098 * @param last True if this is the last chunk of the image. Causes the image to be committed to flash. 5099 * @param cb Pointer to a callback function that is called when the command completes. 5100 * The callback function prototype is 5101 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>. 5102 * @param arg Pointer to be passed to the callback function. 5103 * 5104 * @return Returns 0 on success, or a non-zero value on failure. 5105 */ 5106 ocs_hw_rtn_e 5107 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) 5108 { 5109 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5110 uint8_t *mbxdata; 5111 ocs_hw_fw_write_cb_arg_t *cb_arg; 5112 int noc=0; /* No Commit bit - set to 1 for testing */ 5113 5114 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 5115 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 5116 return OCS_HW_RTN_ERROR; 5117 } 5118 5119 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5120 if (mbxdata == NULL) { 5121 ocs_log_err(hw->os, "failed to malloc mbox\n"); 5122 return OCS_HW_RTN_NO_MEMORY; 5123 } 5124 5125 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT); 5126 if (cb_arg == NULL) { 5127 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 5128 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5129 return OCS_HW_RTN_NO_MEMORY; 5130 } 5131 5132 cb_arg->cb = cb; 5133 cb_arg->arg = arg; 5134 5135 if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last, 5136 size, offset, "/prg/", dma)) { 5137 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg); 5138 } 5139 5140 if (rc != OCS_HW_RTN_SUCCESS) { 5141 ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n"); 5142 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5143 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 5144 } 5145 5146 return rc; 5147 5148 } 5149 5150 /** 5151 * @brief Called when the WRITE OBJECT command completes. 5152 * 5153 * @par Description 5154 * Get the number of bytes actually written out of the response, free the mailbox 5155 * that was malloc'd by ocs_hw_firmware_write(), 5156 * then call the callback and pass the status and bytes written. 5157 * 5158 * @param hw Hardware context. 5159 * @param status Status field from the mbox completion. 5160 * @param mqe Mailbox response structure. 5161 * @param arg Pointer to a callback function that signals the caller that the command is done. 5162 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>. 5163 * 5164 * @return Returns 0. 5165 */ 5166 static int32_t 5167 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5168 { 5169 5170 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 5171 sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed); 5172 ocs_hw_fw_write_cb_arg_t *cb_arg = arg; 5173 uint32_t bytes_written; 5174 uint16_t mbox_status; 5175 uint32_t change_status; 5176 5177 bytes_written = wr_obj_rsp->actual_write_length; 5178 mbox_status = mbox_rsp->hdr.status; 5179 change_status = wr_obj_rsp->change_status; 5180 5181 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5182 5183 if (cb_arg) { 5184 if (cb_arg->cb) { 5185 if ((status == 0) && mbox_status) { 5186 status = mbox_status; 5187 } 5188 cb_arg->cb(status, bytes_written, change_status, cb_arg->arg); 5189 } 5190 5191 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 5192 } 5193 5194 return 0; 5195 5196 } 5197 5198 /** 5199 * @brief Called when the READ_TRANSCEIVER_DATA command completes. 5200 * 5201 * @par Description 5202 * Get the number of bytes read out of the response, free the mailbox that was malloc'd 5203 * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written. 5204 * 5205 * @param hw Hardware context. 5206 * @param status Status field from the mbox completion. 5207 * @param mqe Mailbox response structure. 5208 * @param arg Pointer to a callback function that signals the caller that the command is done. 5209 * The callback function prototype is 5210 * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>. 5211 * 5212 * @return Returns 0. 5213 */ 5214 static int32_t 5215 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5216 { 5217 5218 ocs_hw_sfp_cb_arg_t *cb_arg = arg; 5219 ocs_dma_t *payload = NULL; 5220 sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL; 5221 uint32_t bytes_written; 5222 5223 if (cb_arg) { 5224 payload = &(cb_arg->payload); 5225 if (cb_arg->cb) { 5226 mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt; 5227 bytes_written = mbox_rsp->hdr.response_length; 5228 if ((status == 0) && mbox_rsp->hdr.status) { 5229 status = mbox_rsp->hdr.status; 5230 } 5231 cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg); 5232 } 5233 5234 ocs_dma_free(hw->os, &cb_arg->payload); 5235 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); 5236 } 5237 5238 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5239 return 0; 5240 } 5241 5242 /** 5243 * @ingroup io 5244 * @brief Function to retrieve the SFP information. 5245 * 5246 * @param hw Hardware context. 5247 * @param page The page of SFP data to retrieve (0xa0 or 0xa2). 5248 * @param cb Function call upon completion of sending the data (may be NULL). 5249 * @param arg Argument to pass to IO completion function. 5250 * 5251 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5252 */ 5253 ocs_hw_rtn_e 5254 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg) 5255 { 5256 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5257 ocs_hw_sfp_cb_arg_t *cb_arg; 5258 uint8_t *mbxdata; 5259 5260 /* mbxdata holds the header of the command */ 5261 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5262 if (mbxdata == NULL) { 5263 ocs_log_err(hw->os, "failed to malloc mbox\n"); 5264 return OCS_HW_RTN_NO_MEMORY; 5265 } 5266 5267 /* cb_arg holds the data that will be passed to the callback on completion */ 5268 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT); 5269 if (cb_arg == NULL) { 5270 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 5271 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5272 return OCS_HW_RTN_NO_MEMORY; 5273 } 5274 5275 cb_arg->cb = cb; 5276 cb_arg->arg = arg; 5277 5278 /* payload holds the non-embedded portion */ 5279 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t), 5280 OCS_MIN_DMA_ALIGNMENT)) { 5281 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 5282 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); 5283 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5284 return OCS_HW_RTN_NO_MEMORY; 5285 } 5286 5287 /* Send the HW command */ 5288 if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page, 5289 &cb_arg->payload)) { 5290 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg); 5291 } 5292 5293 if (rc != OCS_HW_RTN_SUCCESS) { 5294 ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n", 5295 rc); 5296 ocs_dma_free(hw->os, &cb_arg->payload); 5297 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); 5298 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5299 } 5300 5301 return rc; 5302 } 5303 5304 /** 5305 * @brief Function to retrieve the temperature information. 5306 * 5307 * @param hw Hardware context. 5308 * @param cb Function call upon completion of sending the data (may be NULL). 5309 * @param arg Argument to pass to IO completion function. 5310 * 5311 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5312 */ 5313 ocs_hw_rtn_e 5314 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg) 5315 { 5316 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5317 ocs_hw_temp_cb_arg_t *cb_arg; 5318 uint8_t *mbxdata; 5319 5320 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5321 if (mbxdata == NULL) { 5322 ocs_log_err(hw->os, "failed to malloc mbox"); 5323 return OCS_HW_RTN_NO_MEMORY; 5324 } 5325 5326 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT); 5327 if (cb_arg == NULL) { 5328 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5329 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5330 return OCS_HW_RTN_NO_MEMORY; 5331 } 5332 5333 cb_arg->cb = cb; 5334 cb_arg->arg = arg; 5335 5336 if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 5337 SLI4_WKI_TAG_SAT_TEM)) { 5338 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg); 5339 } 5340 5341 if (rc != OCS_HW_RTN_SUCCESS) { 5342 ocs_log_test(hw->os, "DUMP_TYPE4 failed\n"); 5343 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5344 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); 5345 } 5346 5347 return rc; 5348 } 5349 5350 /** 5351 * @brief Called when the DUMP command completes. 5352 * 5353 * @par Description 5354 * Get the temperature data out of the response, free the mailbox that was malloc'd 5355 * by ocs_hw_get_temperature(), then call the callback and pass the status and data. 5356 * 5357 * @param hw Hardware context. 5358 * @param status Status field from the mbox completion. 5359 * @param mqe Mailbox response structure. 5360 * @param arg Pointer to a callback function that signals the caller that the command is done. 5361 * The callback function prototype is defined by ocs_hw_temp_cb_t. 5362 * 5363 * @return Returns 0. 5364 */ 5365 static int32_t 5366 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5367 { 5368 5369 sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe; 5370 ocs_hw_temp_cb_arg_t *cb_arg = arg; 5371 uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */ 5372 uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/ 5373 uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */ 5374 uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */ 5375 uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */ 5376 uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */ 5377 5378 if (cb_arg) { 5379 if (cb_arg->cb) { 5380 if ((status == 0) && mbox_rsp->hdr.status) { 5381 status = mbox_rsp->hdr.status; 5382 } 5383 cb_arg->cb(status, 5384 curr_temp, 5385 crit_temp_thrshld, 5386 warn_temp_thrshld, 5387 norm_temp_thrshld, 5388 fan_off_thrshld, 5389 fan_on_thrshld, 5390 cb_arg->arg); 5391 } 5392 5393 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); 5394 } 5395 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5396 5397 return 0; 5398 } 5399 5400 /** 5401 * @brief Function to retrieve the link statistics. 5402 * 5403 * @param hw Hardware context. 5404 * @param req_ext_counters If TRUE, then the extended counters will be requested. 5405 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared. 5406 * @param clear_all_counters If TRUE, the counters will be cleared. 5407 * @param cb Function call upon completion of sending the data (may be NULL). 5408 * @param arg Argument to pass to IO completion function. 5409 * 5410 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5411 */ 5412 ocs_hw_rtn_e 5413 ocs_hw_get_link_stats(ocs_hw_t *hw, 5414 uint8_t req_ext_counters, 5415 uint8_t clear_overflow_flags, 5416 uint8_t clear_all_counters, 5417 ocs_hw_link_stat_cb_t cb, 5418 void *arg) 5419 { 5420 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5421 ocs_hw_link_stat_cb_arg_t *cb_arg; 5422 uint8_t *mbxdata; 5423 5424 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5425 if (mbxdata == NULL) { 5426 ocs_log_err(hw->os, "failed to malloc mbox"); 5427 return OCS_HW_RTN_NO_MEMORY; 5428 } 5429 5430 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT); 5431 if (cb_arg == NULL) { 5432 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5433 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5434 return OCS_HW_RTN_NO_MEMORY; 5435 } 5436 5437 cb_arg->cb = cb; 5438 cb_arg->arg = arg; 5439 5440 if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 5441 req_ext_counters, 5442 clear_overflow_flags, 5443 clear_all_counters)) { 5444 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg); 5445 } 5446 5447 if (rc != OCS_HW_RTN_SUCCESS) { 5448 ocs_log_test(hw->os, "READ_LINK_STATS failed\n"); 5449 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5450 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); 5451 } 5452 5453 return rc; 5454 } 5455 5456 /** 5457 * @brief Called when the READ_LINK_STAT command completes. 5458 * 5459 * @par Description 5460 * Get the counters out of the response, free the mailbox that was malloc'd 5461 * by ocs_hw_get_link_stats(), then call the callback and pass the status and data. 5462 * 5463 * @param hw Hardware context. 5464 * @param status Status field from the mbox completion. 5465 * @param mqe Mailbox response structure. 5466 * @param arg Pointer to a callback function that signals the caller that the command is done. 5467 * The callback function prototype is defined by ocs_hw_link_stat_cb_t. 5468 * 5469 * @return Returns 0. 5470 */ 5471 static int32_t 5472 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5473 { 5474 5475 sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe; 5476 ocs_hw_link_stat_cb_arg_t *cb_arg = arg; 5477 ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX]; 5478 uint32_t num_counters = (mbox_rsp->gec ? 20 : 13); 5479 5480 ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) * 5481 OCS_HW_LINK_STAT_MAX); 5482 5483 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of; 5484 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of; 5485 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of; 5486 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of; 5487 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of; 5488 counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of; 5489 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of; 5490 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of; 5491 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of; 5492 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of; 5493 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of; 5494 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of; 5495 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of; 5496 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of; 5497 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of; 5498 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of; 5499 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of; 5500 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of; 5501 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of; 5502 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of; 5503 5504 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count; 5505 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count; 5506 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count; 5507 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count; 5508 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count; 5509 counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count; 5510 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count; 5511 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count; 5512 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count; 5513 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit; 5514 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit; 5515 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit; 5516 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit; 5517 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count; 5518 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count; 5519 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count; 5520 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count; 5521 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count; 5522 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count; 5523 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count; 5524 5525 if (cb_arg) { 5526 if (cb_arg->cb) { 5527 if ((status == 0) && mbox_rsp->hdr.status) { 5528 status = mbox_rsp->hdr.status; 5529 } 5530 cb_arg->cb(status, 5531 num_counters, 5532 counts, 5533 cb_arg->arg); 5534 } 5535 5536 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); 5537 } 5538 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5539 5540 return 0; 5541 } 5542 5543 /** 5544 * @brief Function to retrieve the link and host statistics. 5545 * 5546 * @param hw Hardware context. 5547 * @param cc clear counters, if TRUE all counters will be cleared. 5548 * @param cb Function call upon completion of receiving the data. 5549 * @param arg Argument to pass to pointer fc hosts statistics structure. 5550 * 5551 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5552 */ 5553 ocs_hw_rtn_e 5554 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg) 5555 { 5556 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5557 ocs_hw_host_stat_cb_arg_t *cb_arg; 5558 uint8_t *mbxdata; 5559 5560 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO); 5561 if (mbxdata == NULL) { 5562 ocs_log_err(hw->os, "failed to malloc mbox"); 5563 return OCS_HW_RTN_NO_MEMORY; 5564 } 5565 5566 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0); 5567 if (cb_arg == NULL) { 5568 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5569 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5570 return OCS_HW_RTN_NO_MEMORY; 5571 } 5572 5573 cb_arg->cb = cb; 5574 cb_arg->arg = arg; 5575 5576 /* Send the HW command to get the host stats */ 5577 if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) { 5578 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg); 5579 } 5580 5581 if (rc != OCS_HW_RTN_SUCCESS) { 5582 ocs_log_test(hw->os, "READ_HOST_STATS failed\n"); 5583 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5584 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); 5585 } 5586 5587 return rc; 5588 } 5589 5590 /** 5591 * @brief Called when the READ_STATUS command completes. 5592 * 5593 * @par Description 5594 * Get the counters out of the response, free the mailbox that was malloc'd 5595 * by ocs_hw_get_host_stats(), then call the callback and pass 5596 * the status and data. 5597 * 5598 * @param hw Hardware context. 5599 * @param status Status field from the mbox completion. 5600 * @param mqe Mailbox response structure. 5601 * @param arg Pointer to a callback function that signals the caller that the command is done. 5602 * The callback function prototype is defined by 5603 * ocs_hw_host_stat_cb_t. 5604 * 5605 * @return Returns 0. 5606 */ 5607 static int32_t 5608 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5609 { 5610 5611 sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe; 5612 ocs_hw_host_stat_cb_arg_t *cb_arg = arg; 5613 ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX]; 5614 uint32_t num_counters = OCS_HW_HOST_STAT_MAX; 5615 5616 ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) * 5617 OCS_HW_HOST_STAT_MAX); 5618 5619 counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count; 5620 counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count; 5621 counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count; 5622 counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count; 5623 counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count; 5624 counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count; 5625 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator; 5626 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder; 5627 counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count; 5628 counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count; 5629 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count; 5630 counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count; 5631 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count; 5632 counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count; 5633 5634 if (cb_arg) { 5635 if (cb_arg->cb) { 5636 if ((status == 0) && mbox_rsp->hdr.status) { 5637 status = mbox_rsp->hdr.status; 5638 } 5639 cb_arg->cb(status, 5640 num_counters, 5641 counts, 5642 cb_arg->arg); 5643 } 5644 5645 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); 5646 } 5647 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5648 5649 return 0; 5650 } 5651 5652 /** 5653 * @brief HW link configuration enum to the CLP string value mapping. 5654 * 5655 * This structure provides a mapping from the ocs_hw_linkcfg_e 5656 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port 5657 * control) to the CLP string that is used 5658 * in the DMTF_CLP_CMD mailbox command. 5659 */ 5660 typedef struct ocs_hw_linkcfg_map_s { 5661 ocs_hw_linkcfg_e linkcfg; 5662 const char *clp_str; 5663 } ocs_hw_linkcfg_map_t; 5664 5665 /** 5666 * @brief Mapping from the HW linkcfg enum to the CLP command value 5667 * string. 5668 */ 5669 static ocs_hw_linkcfg_map_t linkcfg_map[] = { 5670 {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"}, 5671 {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"}, 5672 {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"}, 5673 {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"}, 5674 {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"}, 5675 {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"}, 5676 {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}}; 5677 5678 /** 5679 * @brief HW link configuration enum to Skyhawk link config ID mapping. 5680 * 5681 * This structure provides a mapping from the ocs_hw_linkcfg_e 5682 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port 5683 * control) to the link config ID numbers used by Skyhawk 5684 */ 5685 typedef struct ocs_hw_skyhawk_linkcfg_map_s { 5686 ocs_hw_linkcfg_e linkcfg; 5687 uint32_t config_id; 5688 } ocs_hw_skyhawk_linkcfg_map_t; 5689 5690 /** 5691 * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs 5692 */ 5693 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = { 5694 {OCS_HW_LINKCFG_4X10G, 0x0a}, 5695 {OCS_HW_LINKCFG_1X40G, 0x09}, 5696 }; 5697 5698 /** 5699 * @brief Helper function for getting the HW linkcfg enum from the CLP 5700 * string value 5701 * 5702 * @param clp_str CLP string value from OEMELX_LinkConfig. 5703 * 5704 * @return Returns the HW linkcfg enum corresponding to clp_str. 5705 */ 5706 static ocs_hw_linkcfg_e 5707 ocs_hw_linkcfg_from_clp(const char *clp_str) 5708 { 5709 uint32_t i; 5710 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { 5711 if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) { 5712 return linkcfg_map[i].linkcfg; 5713 } 5714 } 5715 return OCS_HW_LINKCFG_NA; 5716 } 5717 5718 /** 5719 * @brief Helper function for getting the CLP string value from the HW 5720 * linkcfg enum. 5721 * 5722 * @param linkcfg HW linkcfg enum. 5723 * 5724 * @return Returns the OEMELX_LinkConfig CLP string value corresponding to 5725 * given linkcfg. 5726 */ 5727 static const char * 5728 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg) 5729 { 5730 uint32_t i; 5731 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { 5732 if (linkcfg_map[i].linkcfg == linkcfg) { 5733 return linkcfg_map[i].clp_str; 5734 } 5735 } 5736 return NULL; 5737 } 5738 5739 /** 5740 * @brief Helper function for getting a Skyhawk link config ID from the HW 5741 * linkcfg enum. 5742 * 5743 * @param linkcfg HW linkcfg enum. 5744 * 5745 * @return Returns the Skyhawk link config ID corresponding to 5746 * given linkcfg. 5747 */ 5748 static uint32_t 5749 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg) 5750 { 5751 uint32_t i; 5752 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { 5753 if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) { 5754 return skyhawk_linkcfg_map[i].config_id; 5755 } 5756 } 5757 return 0; 5758 } 5759 5760 /** 5761 * @brief Helper function for getting the HW linkcfg enum from a 5762 * Skyhawk config ID. 5763 * 5764 * @param config_id Skyhawk link config ID. 5765 * 5766 * @return Returns the HW linkcfg enum corresponding to config_id. 5767 */ 5768 static ocs_hw_linkcfg_e 5769 ocs_hw_linkcfg_from_config_id(const uint32_t config_id) 5770 { 5771 uint32_t i; 5772 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { 5773 if (skyhawk_linkcfg_map[i].config_id == config_id) { 5774 return skyhawk_linkcfg_map[i].linkcfg; 5775 } 5776 } 5777 return OCS_HW_LINKCFG_NA; 5778 } 5779 5780 /** 5781 * @brief Link configuration callback argument. 5782 */ 5783 typedef struct ocs_hw_linkcfg_cb_arg_s { 5784 ocs_hw_port_control_cb_t cb; 5785 void *arg; 5786 uint32_t opts; 5787 int32_t status; 5788 ocs_dma_t dma_cmd; 5789 ocs_dma_t dma_resp; 5790 uint32_t result_len; 5791 } ocs_hw_linkcfg_cb_arg_t; 5792 5793 /** 5794 * @brief Set link configuration. 5795 * 5796 * @param hw Hardware context. 5797 * @param value Link configuration enum to which the link configuration is 5798 * set. 5799 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5800 * @param cb Callback function to invoke following mbx command. 5801 * @param arg Callback argument. 5802 * 5803 * @return Returns OCS_HW_RTN_SUCCESS on success. 5804 */ 5805 static ocs_hw_rtn_e 5806 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 5807 { 5808 if (!sli_link_is_configurable(&hw->sli)) { 5809 ocs_log_debug(hw->os, "Function not supported\n"); 5810 return OCS_HW_RTN_ERROR; 5811 } 5812 5813 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { 5814 return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg); 5815 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 5816 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { 5817 return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg); 5818 } else { 5819 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); 5820 return OCS_HW_RTN_ERROR; 5821 } 5822 } 5823 5824 /** 5825 * @brief Set link configuration for Lancer 5826 * 5827 * @param hw Hardware context. 5828 * @param value Link configuration enum to which the link configuration is 5829 * set. 5830 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5831 * @param cb Callback function to invoke following mbx command. 5832 * @param arg Callback argument. 5833 * 5834 * @return Returns OCS_HW_RTN_SUCCESS on success. 5835 */ 5836 static ocs_hw_rtn_e 5837 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 5838 { 5839 char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; 5840 ocs_hw_linkcfg_cb_arg_t *cb_arg; 5841 const char *value_str = NULL; 5842 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 5843 5844 /* translate ocs_hw_linkcfg_e to CLP string */ 5845 value_str = ocs_hw_clp_from_linkcfg(value); 5846 5847 /* allocate memory for callback argument */ 5848 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); 5849 if (cb_arg == NULL) { 5850 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5851 return OCS_HW_RTN_NO_MEMORY; 5852 } 5853 5854 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str); 5855 /* allocate DMA for command */ 5856 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { 5857 ocs_log_err(hw->os, "malloc failed\n"); 5858 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5859 return OCS_HW_RTN_NO_MEMORY; 5860 } 5861 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); 5862 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); 5863 5864 /* allocate DMA for response */ 5865 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { 5866 ocs_log_err(hw->os, "malloc failed\n"); 5867 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 5868 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5869 return OCS_HW_RTN_NO_MEMORY; 5870 } 5871 cb_arg->cb = cb; 5872 cb_arg->arg = arg; 5873 cb_arg->opts = opts; 5874 5875 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, 5876 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); 5877 5878 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { 5879 /* if failed, or polling, free memory here; if success and not 5880 * polling, will free in callback function 5881 */ 5882 if (rc) { 5883 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", 5884 (char *)cb_arg->dma_cmd.virt); 5885 } 5886 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 5887 ocs_dma_free(hw->os, &cb_arg->dma_resp); 5888 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5889 } 5890 return rc; 5891 } 5892 5893 /** 5894 * @brief Callback for ocs_hw_set_linkcfg_skyhawk 5895 * 5896 * @param hw Hardware context. 5897 * @param status Status from the RECONFIG_GET_LINK_INFO command. 5898 * @param mqe Mailbox response structure. 5899 * @param arg Pointer to a callback argument. 5900 * 5901 * @return none 5902 */ 5903 static void 5904 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5905 { 5906 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; 5907 5908 if (status) { 5909 ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status); 5910 } 5911 5912 /* invoke callback */ 5913 if (cb_arg->cb) { 5914 cb_arg->cb(status, 0, cb_arg->arg); 5915 } 5916 5917 /* if polling, will free memory in calling function */ 5918 if (cb_arg->opts != OCS_CMD_POLL) { 5919 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5920 } 5921 } 5922 5923 /** 5924 * @brief Set link configuration for a Skyhawk 5925 * 5926 * @param hw Hardware context. 5927 * @param value Link configuration enum to which the link configuration is 5928 * set. 5929 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5930 * @param cb Callback function to invoke following mbx command. 5931 * @param arg Callback argument. 5932 * 5933 * @return Returns OCS_HW_RTN_SUCCESS on success. 5934 */ 5935 static ocs_hw_rtn_e 5936 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 5937 { 5938 uint8_t *mbxdata; 5939 ocs_hw_linkcfg_cb_arg_t *cb_arg; 5940 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 5941 uint32_t config_id; 5942 5943 config_id = ocs_hw_config_id_from_linkcfg(value); 5944 5945 if (config_id == 0) { 5946 ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value); 5947 return OCS_HW_RTN_ERROR; 5948 } 5949 5950 /* mbxdata holds the header of the command */ 5951 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5952 if (mbxdata == NULL) { 5953 ocs_log_err(hw->os, "failed to malloc mbox\n"); 5954 return OCS_HW_RTN_NO_MEMORY; 5955 } 5956 5957 /* cb_arg holds the data that will be passed to the callback on completion */ 5958 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); 5959 if (cb_arg == NULL) { 5960 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 5961 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5962 return OCS_HW_RTN_NO_MEMORY; 5963 } 5964 5965 cb_arg->cb = cb; 5966 cb_arg->arg = arg; 5967 5968 if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) { 5969 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg); 5970 } 5971 5972 if (rc != OCS_HW_RTN_SUCCESS) { 5973 ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n"); 5974 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5975 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 5976 } else if (opts == OCS_CMD_POLL) { 5977 /* if we're polling we have to call the callback here. */ 5978 ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg); 5979 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5980 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 5981 } else { 5982 /* We weren't poling, so the callback got called */ 5983 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5984 } 5985 5986 return rc; 5987 } 5988 5989 /** 5990 * @brief Get link configuration. 5991 * 5992 * @param hw Hardware context. 5993 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5994 * @param cb Callback function to invoke following mbx command. 5995 * @param arg Callback argument. 5996 * 5997 * @return Returns OCS_HW_RTN_SUCCESS on success. 5998 */ 5999 static ocs_hw_rtn_e 6000 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 6001 { 6002 if (!sli_link_is_configurable(&hw->sli)) { 6003 ocs_log_debug(hw->os, "Function not supported\n"); 6004 return OCS_HW_RTN_ERROR; 6005 } 6006 6007 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) || 6008 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))){ 6009 return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg); 6010 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 6011 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { 6012 return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg); 6013 } else { 6014 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); 6015 return OCS_HW_RTN_ERROR; 6016 } 6017 } 6018 6019 /** 6020 * @brief Get link configuration for a Lancer 6021 * 6022 * @param hw Hardware context. 6023 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 6024 * @param cb Callback function to invoke following mbx command. 6025 * @param arg Callback argument. 6026 * 6027 * @return Returns OCS_HW_RTN_SUCCESS on success. 6028 */ 6029 static ocs_hw_rtn_e 6030 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 6031 { 6032 char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; 6033 ocs_hw_linkcfg_cb_arg_t *cb_arg; 6034 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6035 6036 /* allocate memory for callback argument */ 6037 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); 6038 if (cb_arg == NULL) { 6039 ocs_log_err(hw->os, "failed to malloc cb_arg"); 6040 return OCS_HW_RTN_NO_MEMORY; 6041 } 6042 6043 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig"); 6044 6045 /* allocate DMA for command */ 6046 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { 6047 ocs_log_err(hw->os, "malloc failed\n"); 6048 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6049 return OCS_HW_RTN_NO_MEMORY; 6050 } 6051 6052 /* copy CLP command to DMA command */ 6053 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); 6054 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); 6055 6056 /* allocate DMA for response */ 6057 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { 6058 ocs_log_err(hw->os, "malloc failed\n"); 6059 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6060 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6061 return OCS_HW_RTN_NO_MEMORY; 6062 } 6063 cb_arg->cb = cb; 6064 cb_arg->arg = arg; 6065 cb_arg->opts = opts; 6066 6067 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, 6068 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); 6069 6070 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { 6071 /* if failed or polling, free memory here; if not polling and success, 6072 * will free in callback function 6073 */ 6074 if (rc) { 6075 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", 6076 (char *)cb_arg->dma_cmd.virt); 6077 } 6078 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6079 ocs_dma_free(hw->os, &cb_arg->dma_resp); 6080 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6081 } 6082 return rc; 6083 } 6084 6085 /** 6086 * @brief Get the link configuration callback. 6087 * 6088 * @param hw Hardware context. 6089 * @param status Status from the RECONFIG_GET_LINK_INFO command. 6090 * @param mqe Mailbox response structure. 6091 * @param arg Pointer to a callback argument. 6092 * 6093 * @return none 6094 */ 6095 static void 6096 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6097 { 6098 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; 6099 sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt; 6100 ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA; 6101 6102 if (status) { 6103 ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status); 6104 } else { 6105 /* Call was successful */ 6106 value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id); 6107 } 6108 6109 /* invoke callback */ 6110 if (cb_arg->cb) { 6111 cb_arg->cb(status, value, cb_arg->arg); 6112 } 6113 6114 /* if polling, will free memory in calling function */ 6115 if (cb_arg->opts != OCS_CMD_POLL) { 6116 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6117 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6118 } 6119 } 6120 6121 /** 6122 * @brief Get link configuration for a Skyhawk. 6123 * 6124 * @param hw Hardware context. 6125 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 6126 * @param cb Callback function to invoke following mbx command. 6127 * @param arg Callback argument. 6128 * 6129 * @return Returns OCS_HW_RTN_SUCCESS on success. 6130 */ 6131 static ocs_hw_rtn_e 6132 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 6133 { 6134 uint8_t *mbxdata; 6135 ocs_hw_linkcfg_cb_arg_t *cb_arg; 6136 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6137 6138 /* mbxdata holds the header of the command */ 6139 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 6140 if (mbxdata == NULL) { 6141 ocs_log_err(hw->os, "failed to malloc mbox\n"); 6142 return OCS_HW_RTN_NO_MEMORY; 6143 } 6144 6145 /* cb_arg holds the data that will be passed to the callback on completion */ 6146 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); 6147 if (cb_arg == NULL) { 6148 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 6149 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6150 return OCS_HW_RTN_NO_MEMORY; 6151 } 6152 6153 cb_arg->cb = cb; 6154 cb_arg->arg = arg; 6155 cb_arg->opts = opts; 6156 6157 /* dma_mem holds the non-embedded portion */ 6158 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) { 6159 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 6160 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6161 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 6162 return OCS_HW_RTN_NO_MEMORY; 6163 } 6164 6165 if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) { 6166 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg); 6167 } 6168 6169 if (rc != OCS_HW_RTN_SUCCESS) { 6170 ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n"); 6171 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6172 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6173 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 6174 } else if (opts == OCS_CMD_POLL) { 6175 /* if we're polling we have to call the callback here. */ 6176 ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg); 6177 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6178 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6179 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 6180 } else { 6181 /* We weren't poling, so the callback got called */ 6182 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6183 } 6184 6185 return rc; 6186 } 6187 6188 /** 6189 * @brief Sets the DIF seed value. 6190 * 6191 * @param hw Hardware context. 6192 * 6193 * @return Returns OCS_HW_RTN_SUCCESS on success. 6194 */ 6195 static ocs_hw_rtn_e 6196 ocs_hw_set_dif_seed(ocs_hw_t *hw) 6197 { 6198 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6199 uint8_t buf[SLI4_BMBX_SIZE]; 6200 sli4_req_common_set_features_dif_seed_t seed_param; 6201 6202 ocs_memset(&seed_param, 0, sizeof(seed_param)); 6203 seed_param.seed = hw->config.dif_seed; 6204 6205 /* send set_features command */ 6206 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6207 SLI4_SET_FEATURES_DIF_SEED, 6208 4, 6209 (uint32_t*)&seed_param)) { 6210 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6211 if (rc) { 6212 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6213 } else { 6214 ocs_log_debug(hw->os, "DIF seed set to 0x%x\n", 6215 hw->config.dif_seed); 6216 } 6217 } else { 6218 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); 6219 rc = OCS_HW_RTN_ERROR; 6220 } 6221 return rc; 6222 } 6223 6224 /** 6225 * @brief Sets the DIF mode value. 6226 * 6227 * @param hw Hardware context. 6228 * 6229 * @return Returns OCS_HW_RTN_SUCCESS on success. 6230 */ 6231 static ocs_hw_rtn_e 6232 ocs_hw_set_dif_mode(ocs_hw_t *hw) 6233 { 6234 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6235 uint8_t buf[SLI4_BMBX_SIZE]; 6236 sli4_req_common_set_features_t10_pi_mem_model_t mode_param; 6237 6238 ocs_memset(&mode_param, 0, sizeof(mode_param)); 6239 mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1); 6240 6241 /* send set_features command */ 6242 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6243 SLI4_SET_FEATURES_DIF_MEMORY_MODE, 6244 sizeof(mode_param), 6245 (uint32_t*)&mode_param)) { 6246 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6247 if (rc) { 6248 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6249 } else { 6250 ocs_log_test(hw->os, "DIF mode set to %s\n", 6251 (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate")); 6252 } 6253 } else { 6254 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); 6255 rc = OCS_HW_RTN_ERROR; 6256 } 6257 return rc; 6258 } 6259 6260 static void 6261 ocs_hw_watchdog_timer_cb(void *arg) 6262 { 6263 ocs_hw_t *hw = (ocs_hw_t *)arg; 6264 6265 ocs_hw_config_watchdog_timer(hw); 6266 return; 6267 } 6268 6269 static void 6270 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6271 { 6272 uint16_t timeout = hw->watchdog_timeout; 6273 6274 if (status != 0) { 6275 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status); 6276 } else { 6277 if(timeout != 0) { 6278 /* keeping callback 500ms before timeout to keep heartbeat alive */ 6279 ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) ); 6280 }else { 6281 ocs_del_timer(&hw->watchdog_timer); 6282 } 6283 } 6284 6285 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 6286 return; 6287 } 6288 6289 /** 6290 * @brief Set configuration parameters for watchdog timer feature. 6291 * 6292 * @param hw Hardware context. 6293 * @param timeout Timeout for watchdog timer in seconds 6294 * 6295 * @return Returns OCS_HW_RTN_SUCCESS on success. 6296 */ 6297 static ocs_hw_rtn_e 6298 ocs_hw_config_watchdog_timer(ocs_hw_t *hw) 6299 { 6300 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6301 uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 6302 6303 if (!buf) { 6304 ocs_log_err(hw->os, "no buffer for command\n"); 6305 return OCS_HW_RTN_NO_MEMORY; 6306 } 6307 6308 sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout); 6309 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL); 6310 if (rc) { 6311 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 6312 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc); 6313 } 6314 return rc; 6315 } 6316 6317 /** 6318 * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature. 6319 * 6320 * @param hw Hardware context. 6321 * @param buf Pointer to a mailbox buffer area. 6322 * 6323 * @return Returns OCS_HW_RTN_SUCCESS on success. 6324 */ 6325 static ocs_hw_rtn_e 6326 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf) 6327 { 6328 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6329 sli4_req_common_set_features_xfer_rdy_t10pi_t param; 6330 6331 ocs_memset(¶m, 0, sizeof(param)); 6332 param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1); 6333 param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0); 6334 param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1); 6335 param.app_tag = hw->config.auto_xfer_rdy_app_tag_value; 6336 param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip; 6337 6338 switch (hw->config.auto_xfer_rdy_p_type) { 6339 case 1: 6340 param.p_type = 0; 6341 break; 6342 case 3: 6343 param.p_type = 2; 6344 break; 6345 default: 6346 ocs_log_err(hw->os, "unsupported p_type %d\n", 6347 hw->config.auto_xfer_rdy_p_type); 6348 return OCS_HW_RTN_ERROR; 6349 } 6350 6351 /* build the set_features command */ 6352 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6353 SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI, 6354 sizeof(param), 6355 ¶m); 6356 6357 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6358 if (rc) { 6359 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6360 } else { 6361 ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n", 6362 param.rtc, param.atv, param.p_type, 6363 param.app_tag, param.blk_size); 6364 } 6365 6366 return rc; 6367 } 6368 6369 /** 6370 * @brief enable sli port health check 6371 * 6372 * @param hw Hardware context. 6373 * @param buf Pointer to a mailbox buffer area. 6374 * @param query current status of the health check feature enabled/disabled 6375 * @param enable if 1: enable 0: disable 6376 * @param buf Pointer to a mailbox buffer area. 6377 * 6378 * @return Returns OCS_HW_RTN_SUCCESS on success. 6379 */ 6380 static ocs_hw_rtn_e 6381 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable) 6382 { 6383 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6384 uint8_t buf[SLI4_BMBX_SIZE]; 6385 sli4_req_common_set_features_health_check_t param; 6386 6387 ocs_memset(¶m, 0, sizeof(param)); 6388 param.hck = enable; 6389 param.qry = query; 6390 6391 /* build the set_features command */ 6392 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6393 SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, 6394 sizeof(param), 6395 ¶m); 6396 6397 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6398 if (rc) { 6399 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6400 } else { 6401 ocs_log_test(hw->os, "SLI Port Health Check is enabled \n"); 6402 } 6403 6404 return rc; 6405 } 6406 6407 /** 6408 * @brief Set FTD transfer hint feature 6409 * 6410 * @param hw Hardware context. 6411 * @param fdt_xfer_hint size in bytes where read requests are segmented. 6412 * 6413 * @return Returns OCS_HW_RTN_SUCCESS on success. 6414 */ 6415 static ocs_hw_rtn_e 6416 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint) 6417 { 6418 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6419 uint8_t buf[SLI4_BMBX_SIZE]; 6420 sli4_req_common_set_features_set_fdt_xfer_hint_t param; 6421 6422 ocs_memset(¶m, 0, sizeof(param)); 6423 param.fdt_xfer_hint = fdt_xfer_hint; 6424 /* build the set_features command */ 6425 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6426 SLI4_SET_FEATURES_SET_FTD_XFER_HINT, 6427 sizeof(param), 6428 ¶m); 6429 6430 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6431 if (rc) { 6432 ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc); 6433 } else { 6434 ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint); 6435 } 6436 6437 return rc; 6438 } 6439 6440 /** 6441 * @brief Get the link configuration callback. 6442 * 6443 * @param hw Hardware context. 6444 * @param status Status from the DMTF CLP command. 6445 * @param result_len Length, in bytes, of the DMTF CLP result. 6446 * @param arg Pointer to a callback argument. 6447 * 6448 * @return Returns OCS_HW_RTN_SUCCESS on success. 6449 */ 6450 static void 6451 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg) 6452 { 6453 int32_t rval; 6454 char retdata_str[64]; 6455 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; 6456 ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA; 6457 6458 if (status) { 6459 ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status); 6460 } else { 6461 /* parse CLP response to get return data */ 6462 rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str, 6463 sizeof(retdata_str), 6464 cb_arg->dma_resp.virt, 6465 result_len); 6466 6467 if (rval <= 0) { 6468 ocs_log_err(hw->os, "failed to get retdata %d\n", result_len); 6469 } else { 6470 /* translate string into hw enum */ 6471 linkcfg = ocs_hw_linkcfg_from_clp(retdata_str); 6472 } 6473 } 6474 6475 /* invoke callback */ 6476 if (cb_arg->cb) { 6477 cb_arg->cb(status, linkcfg, cb_arg->arg); 6478 } 6479 6480 /* if polling, will free memory in calling function */ 6481 if (cb_arg->opts != OCS_CMD_POLL) { 6482 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6483 ocs_dma_free(hw->os, &cb_arg->dma_resp); 6484 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6485 } 6486 } 6487 6488 /** 6489 * @brief Set the Lancer dump location 6490 * @par Description 6491 * This function tells a Lancer chip to use a specific DMA 6492 * buffer as a dump location rather than the internal flash. 6493 * 6494 * @param hw Hardware context. 6495 * @param num_buffers The number of DMA buffers to hold the dump (1..n). 6496 * @param dump_buffers DMA buffers to hold the dump. 6497 * 6498 * @return Returns OCS_HW_RTN_SUCCESS on success. 6499 */ 6500 ocs_hw_rtn_e 6501 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb) 6502 { 6503 uint8_t bus, dev, func; 6504 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6505 uint8_t buf[SLI4_BMBX_SIZE]; 6506 6507 /* 6508 * Make sure the FW is new enough to support this command. If the FW 6509 * is too old, the FW will UE. 6510 */ 6511 if (hw->workaround.disable_dump_loc) { 6512 ocs_log_test(hw->os, "FW version is too old for this feature\n"); 6513 return OCS_HW_RTN_ERROR; 6514 } 6515 6516 /* This command is only valid for physical port 0 */ 6517 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); 6518 if (fdb == 0 && func != 0) { 6519 ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n", 6520 func); 6521 return OCS_HW_RTN_ERROR; 6522 } 6523 6524 /* 6525 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers, 6526 * We must allocate a SGL list and then pass the address of the list to the chip. 6527 */ 6528 if (num_buffers > 1) { 6529 uint32_t sge_size = num_buffers * sizeof(sli4_sge_t); 6530 sli4_sge_t *sge; 6531 uint32_t i; 6532 6533 if (hw->dump_sges.size < sge_size) { 6534 ocs_dma_free(hw->os, &hw->dump_sges); 6535 if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) { 6536 ocs_log_err(hw->os, "SGE DMA allocation failed\n"); 6537 return OCS_HW_RTN_NO_MEMORY; 6538 } 6539 } 6540 /* build the SGE list */ 6541 ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size); 6542 hw->dump_sges.len = sge_size; 6543 sge = hw->dump_sges.virt; 6544 for (i = 0; i < num_buffers; i++) { 6545 sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys); 6546 sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys); 6547 sge[i].last = (i == num_buffers - 1 ? 1 : 0); 6548 sge[i].buffer_length = dump_buffers[i].size; 6549 } 6550 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, 6551 SLI4_BMBX_SIZE, FALSE, TRUE, 6552 &hw->dump_sges, fdb); 6553 } else { 6554 dump_buffers->len = dump_buffers->size; 6555 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, 6556 SLI4_BMBX_SIZE, FALSE, FALSE, 6557 dump_buffers, fdb); 6558 } 6559 6560 if (rc) { 6561 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, 6562 NULL, NULL); 6563 if (rc) { 6564 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", 6565 rc); 6566 } 6567 } else { 6568 ocs_log_err(hw->os, 6569 "sli_cmd_common_set_dump_location failed\n"); 6570 rc = OCS_HW_RTN_ERROR; 6571 } 6572 6573 return rc; 6574 } 6575 6576 /** 6577 * @brief Set the Ethernet license. 6578 * 6579 * @par Description 6580 * This function sends the appropriate mailbox command (DMTF 6581 * CLP) to set the Ethernet license to the given license value. 6582 * Since it is used during the time of ocs_hw_init(), the mailbox 6583 * command is sent via polling (the BMBX route). 6584 * 6585 * @param hw Hardware context. 6586 * @param license 32-bit license value. 6587 * 6588 * @return Returns OCS_HW_RTN_SUCCESS on success. 6589 */ 6590 static ocs_hw_rtn_e 6591 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license) 6592 { 6593 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6594 char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; 6595 ocs_dma_t dma_cmd; 6596 ocs_dma_t dma_resp; 6597 6598 /* only for lancer right now */ 6599 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 6600 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 6601 return OCS_HW_RTN_ERROR; 6602 } 6603 6604 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license); 6605 /* allocate DMA for command */ 6606 if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) { 6607 ocs_log_err(hw->os, "malloc failed\n"); 6608 return OCS_HW_RTN_NO_MEMORY; 6609 } 6610 ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1); 6611 ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd)); 6612 6613 /* allocate DMA for response */ 6614 if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { 6615 ocs_log_err(hw->os, "malloc failed\n"); 6616 ocs_dma_free(hw->os, &dma_cmd); 6617 return OCS_HW_RTN_NO_MEMORY; 6618 } 6619 6620 /* send DMTF CLP command mbx and poll */ 6621 if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) { 6622 ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt); 6623 rc = OCS_HW_RTN_ERROR; 6624 } 6625 6626 ocs_dma_free(hw->os, &dma_cmd); 6627 ocs_dma_free(hw->os, &dma_resp); 6628 return rc; 6629 } 6630 6631 /** 6632 * @brief Callback argument structure for the DMTF CLP commands. 6633 */ 6634 typedef struct ocs_hw_clp_cb_arg_s { 6635 ocs_hw_dmtf_clp_cb_t cb; 6636 ocs_dma_t *dma_resp; 6637 int32_t status; 6638 uint32_t opts; 6639 void *arg; 6640 } ocs_hw_clp_cb_arg_t; 6641 6642 /** 6643 * @brief Execute the DMTF CLP command. 6644 * 6645 * @param hw Hardware context. 6646 * @param dma_cmd DMA buffer containing the CLP command. 6647 * @param dma_resp DMA buffer that will contain the response (if successful). 6648 * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL). 6649 * @param cb Callback function. 6650 * @param arg Callback argument. 6651 * 6652 * @return Returns the number of bytes written to the response 6653 * buffer on success, or a negative value if failed. 6654 */ 6655 static ocs_hw_rtn_e 6656 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg) 6657 { 6658 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 6659 ocs_hw_clp_cb_arg_t *cb_arg; 6660 uint8_t *mbxdata; 6661 6662 /* allocate DMA for mailbox */ 6663 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 6664 if (mbxdata == NULL) { 6665 ocs_log_err(hw->os, "failed to malloc mbox\n"); 6666 return OCS_HW_RTN_NO_MEMORY; 6667 } 6668 6669 /* allocate memory for callback argument */ 6670 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); 6671 if (cb_arg == NULL) { 6672 ocs_log_err(hw->os, "failed to malloc cb_arg"); 6673 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6674 return OCS_HW_RTN_NO_MEMORY; 6675 } 6676 6677 cb_arg->cb = cb; 6678 cb_arg->arg = arg; 6679 cb_arg->dma_resp = dma_resp; 6680 cb_arg->opts = opts; 6681 6682 /* Send the HW command */ 6683 if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 6684 dma_cmd, dma_resp)) { 6685 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg); 6686 6687 if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) { 6688 /* if we're polling, copy response and invoke callback to 6689 * parse result */ 6690 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); 6691 ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg); 6692 6693 /* set rc to resulting or "parsed" status */ 6694 rc = cb_arg->status; 6695 } 6696 6697 /* if failed, or polling, free memory here */ 6698 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { 6699 if (rc != OCS_HW_RTN_SUCCESS) { 6700 ocs_log_test(hw->os, "ocs_hw_command failed\n"); 6701 } 6702 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6703 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6704 } 6705 } else { 6706 ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n"); 6707 rc = OCS_HW_RTN_ERROR; 6708 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6709 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6710 } 6711 6712 return rc; 6713 } 6714 6715 /** 6716 * @brief Called when the DMTF CLP command completes. 6717 * 6718 * @param hw Hardware context. 6719 * @param status Status field from the mbox completion. 6720 * @param mqe Mailbox response structure. 6721 * @param arg Pointer to a callback argument. 6722 * 6723 * @return None. 6724 * 6725 */ 6726 static void 6727 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6728 { 6729 int32_t cb_status = 0; 6730 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 6731 sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed; 6732 ocs_hw_clp_cb_arg_t *cb_arg = arg; 6733 uint32_t result_len = 0; 6734 int32_t stat_len; 6735 char stat_str[8]; 6736 6737 /* there are several status codes here, check them all and condense 6738 * into a single callback status 6739 */ 6740 if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) { 6741 ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n", 6742 status, 6743 mbox_rsp->hdr.status, 6744 clp_rsp->hdr.status, 6745 clp_rsp->hdr.additional_status, 6746 clp_rsp->clp_status, 6747 clp_rsp->clp_detailed_status); 6748 if (status) { 6749 cb_status = status; 6750 } else if (mbox_rsp->hdr.status) { 6751 cb_status = mbox_rsp->hdr.status; 6752 } else { 6753 cb_status = clp_rsp->clp_status; 6754 } 6755 } else { 6756 result_len = clp_rsp->resp_length; 6757 } 6758 6759 if (cb_status) { 6760 goto ocs_hw_cb_dmtf_clp_done; 6761 } 6762 6763 if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) { 6764 ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n", 6765 cb_arg->dma_resp->size, result_len); 6766 cb_status = -1; 6767 goto ocs_hw_cb_dmtf_clp_done; 6768 } 6769 6770 /* parse CLP response to get status */ 6771 stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str, 6772 sizeof(stat_str), 6773 cb_arg->dma_resp->virt, 6774 result_len); 6775 6776 if (stat_len <= 0) { 6777 ocs_log_test(hw->os, "failed to get status %d\n", stat_len); 6778 cb_status = -1; 6779 goto ocs_hw_cb_dmtf_clp_done; 6780 } 6781 6782 if (ocs_strcmp(stat_str, "0") != 0) { 6783 ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str); 6784 cb_status = -1; 6785 goto ocs_hw_cb_dmtf_clp_done; 6786 } 6787 6788 ocs_hw_cb_dmtf_clp_done: 6789 6790 /* save status in cb_arg for callers with NULL cb's + polling */ 6791 cb_arg->status = cb_status; 6792 if (cb_arg->cb) { 6793 cb_arg->cb(hw, cb_status, result_len, cb_arg->arg); 6794 } 6795 /* if polling, caller will free memory */ 6796 if (cb_arg->opts != OCS_CMD_POLL) { 6797 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6798 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 6799 } 6800 } 6801 6802 /** 6803 * @brief Parse the CLP result and get the value corresponding to the given 6804 * keyword. 6805 * 6806 * @param hw Hardware context. 6807 * @param keyword CLP keyword for which the value is returned. 6808 * @param value Location to which the resulting value is copied. 6809 * @param value_len Length of the value parameter. 6810 * @param resp Pointer to the response buffer that is searched 6811 * for the keyword and value. 6812 * @param resp_len Length of response buffer passed in. 6813 * 6814 * @return Returns the number of bytes written to the value 6815 * buffer on success, or a negative vaue on failure. 6816 */ 6817 static int32_t 6818 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len) 6819 { 6820 char *start = NULL; 6821 char *end = NULL; 6822 6823 /* look for specified keyword in string */ 6824 start = ocs_strstr(resp, keyword); 6825 if (start == NULL) { 6826 ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n", 6827 keyword); 6828 return -1; 6829 } 6830 6831 /* now look for '=' and go one past */ 6832 start = ocs_strchr(start, '='); 6833 if (start == NULL) { 6834 ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n", 6835 keyword); 6836 return -1; 6837 } 6838 start++; 6839 6840 /* \r\n terminates value */ 6841 end = ocs_strstr(start, "\r\n"); 6842 if (end == NULL) { 6843 ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n", 6844 keyword); 6845 return -1; 6846 } 6847 6848 /* make sure given result array is big enough */ 6849 if ((end - start + 1) > value_len) { 6850 ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n", 6851 value_len, (end-start)); 6852 return -1; 6853 } 6854 6855 ocs_strncpy(value, start, (end - start)); 6856 value[end-start] = '\0'; 6857 return (end-start+1); 6858 } 6859 6860 /** 6861 * @brief Cause chip to enter an unrecoverable error state. 6862 * 6863 * @par Description 6864 * Cause chip to enter an unrecoverable error state. This is 6865 * used when detecting unexpected FW behavior so that the FW can be 6866 * hwted from the driver as soon as the error is detected. 6867 * 6868 * @param hw Hardware context. 6869 * @param dump Generate dump as part of reset. 6870 * 6871 * @return Returns 0 on success, or a non-zero value on failure. 6872 * 6873 */ 6874 ocs_hw_rtn_e 6875 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump) 6876 { 6877 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6878 6879 if (sli_raise_ue(&hw->sli, dump) != 0) { 6880 rc = OCS_HW_RTN_ERROR; 6881 } else { 6882 if (hw->state != OCS_HW_STATE_UNINITIALIZED) { 6883 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; 6884 } 6885 } 6886 6887 return rc; 6888 } 6889 6890 /** 6891 * @brief Called when the OBJECT_GET command completes. 6892 * 6893 * @par Description 6894 * Get the number of bytes actually written out of the response, free the mailbox 6895 * that was malloc'd by ocs_hw_dump_get(), then call the callback 6896 * and pass the status and bytes read. 6897 * 6898 * @param hw Hardware context. 6899 * @param status Status field from the mbox completion. 6900 * @param mqe Mailbox response structure. 6901 * @param arg Pointer to a callback function that signals the caller that the command is done. 6902 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>. 6903 * 6904 * @return Returns 0. 6905 */ 6906 static int32_t 6907 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6908 { 6909 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 6910 sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed; 6911 ocs_hw_dump_get_cb_arg_t *cb_arg = arg; 6912 uint32_t bytes_read; 6913 uint8_t eof; 6914 6915 bytes_read = rd_obj_rsp->actual_read_length; 6916 eof = rd_obj_rsp->eof; 6917 6918 if (cb_arg) { 6919 if (cb_arg->cb) { 6920 if ((status == 0) && mbox_rsp->hdr.status) { 6921 status = mbox_rsp->hdr.status; 6922 } 6923 cb_arg->cb(status, bytes_read, eof, cb_arg->arg); 6924 } 6925 6926 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); 6927 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); 6928 } 6929 6930 return 0; 6931 } 6932 6933 /** 6934 * @brief Read a dump image to the host. 6935 * 6936 * @par Description 6937 * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a 6938 * dump image chunk, then sends the command with the ocs_hw_command(). On completion, 6939 * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox 6940 * and signal the caller that the read has completed. 6941 * 6942 * @param hw Hardware context. 6943 * @param dma DMA structure to transfer the dump chunk into. 6944 * @param size Size of the dump chunk. 6945 * @param offset Offset, in bytes, from the beginning of the dump. 6946 * @param cb Pointer to a callback function that is called when the command completes. 6947 * The callback function prototype is 6948 * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>. 6949 * @param arg Pointer to be passed to the callback function. 6950 * 6951 * @return Returns 0 on success, or a non-zero value on failure. 6952 */ 6953 ocs_hw_rtn_e 6954 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg) 6955 { 6956 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 6957 uint8_t *mbxdata; 6958 ocs_hw_dump_get_cb_arg_t *cb_arg; 6959 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); 6960 6961 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 6962 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 6963 return OCS_HW_RTN_ERROR; 6964 } 6965 6966 if (1 != sli_dump_is_present(&hw->sli)) { 6967 ocs_log_test(hw->os, "No dump is present\n"); 6968 return OCS_HW_RTN_ERROR; 6969 } 6970 6971 if (1 == sli_reset_required(&hw->sli)) { 6972 ocs_log_test(hw->os, "device reset required\n"); 6973 return OCS_HW_RTN_ERROR; 6974 } 6975 6976 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 6977 if (mbxdata == NULL) { 6978 ocs_log_err(hw->os, "failed to malloc mbox\n"); 6979 return OCS_HW_RTN_NO_MEMORY; 6980 } 6981 6982 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT); 6983 if (cb_arg == NULL) { 6984 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 6985 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6986 return OCS_HW_RTN_NO_MEMORY; 6987 } 6988 6989 cb_arg->cb = cb; 6990 cb_arg->arg = arg; 6991 cb_arg->mbox_cmd = mbxdata; 6992 6993 if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 6994 size, offset, "/dbg/dump.bin", dma)) { 6995 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg); 6996 if (rc == 0 && opts == OCS_CMD_POLL) { 6997 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); 6998 rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg); 6999 } 7000 } 7001 7002 if (rc != OCS_HW_RTN_SUCCESS) { 7003 ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n"); 7004 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7005 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); 7006 } 7007 7008 return rc; 7009 } 7010 7011 /** 7012 * @brief Called when the OBJECT_DELETE command completes. 7013 * 7014 * @par Description 7015 * Free the mailbox that was malloc'd 7016 * by ocs_hw_dump_clear(), then call the callback and pass the status. 7017 * 7018 * @param hw Hardware context. 7019 * @param status Status field from the mbox completion. 7020 * @param mqe Mailbox response structure. 7021 * @param arg Pointer to a callback function that signals the caller that the command is done. 7022 * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>. 7023 * 7024 * @return Returns 0. 7025 */ 7026 static int32_t 7027 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7028 { 7029 ocs_hw_dump_clear_cb_arg_t *cb_arg = arg; 7030 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 7031 7032 if (cb_arg) { 7033 if (cb_arg->cb) { 7034 if ((status == 0) && mbox_rsp->hdr.status) { 7035 status = mbox_rsp->hdr.status; 7036 } 7037 cb_arg->cb(status, cb_arg->arg); 7038 } 7039 7040 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); 7041 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); 7042 } 7043 7044 return 0; 7045 } 7046 7047 /** 7048 * @brief Clear a dump image from the device. 7049 * 7050 * @par Description 7051 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear 7052 * the dump, then sends the command with ocs_hw_command(). On completion, 7053 * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox 7054 * and to signal the caller that the write has completed. 7055 * 7056 * @param hw Hardware context. 7057 * @param cb Pointer to a callback function that is called when the command completes. 7058 * The callback function prototype is 7059 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>. 7060 * @param arg Pointer to be passed to the callback function. 7061 * 7062 * @return Returns 0 on success, or a non-zero value on failure. 7063 */ 7064 ocs_hw_rtn_e 7065 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg) 7066 { 7067 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 7068 uint8_t *mbxdata; 7069 ocs_hw_dump_clear_cb_arg_t *cb_arg; 7070 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); 7071 7072 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 7073 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 7074 return OCS_HW_RTN_ERROR; 7075 } 7076 7077 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7078 if (mbxdata == NULL) { 7079 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7080 return OCS_HW_RTN_NO_MEMORY; 7081 } 7082 7083 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT); 7084 if (cb_arg == NULL) { 7085 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7086 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7087 return OCS_HW_RTN_NO_MEMORY; 7088 } 7089 7090 cb_arg->cb = cb; 7091 cb_arg->arg = arg; 7092 cb_arg->mbox_cmd = mbxdata; 7093 7094 if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 7095 "/dbg/dump.bin")) { 7096 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg); 7097 if (rc == 0 && opts == OCS_CMD_POLL) { 7098 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); 7099 rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg); 7100 } 7101 } 7102 7103 if (rc != OCS_HW_RTN_SUCCESS) { 7104 ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n"); 7105 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7106 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); 7107 } 7108 7109 return rc; 7110 } 7111 7112 typedef struct ocs_hw_get_port_protocol_cb_arg_s { 7113 ocs_get_port_protocol_cb_t cb; 7114 void *arg; 7115 uint32_t pci_func; 7116 ocs_dma_t payload; 7117 } ocs_hw_get_port_protocol_cb_arg_t; 7118 7119 /** 7120 * @brief Called for the completion of get_port_profile for a 7121 * user request. 7122 * 7123 * @param hw Hardware context. 7124 * @param status The status from the MQE. 7125 * @param mqe Pointer to mailbox command buffer. 7126 * @param arg Pointer to a callback argument. 7127 * 7128 * @return Returns 0 on success, or a non-zero value on failure. 7129 */ 7130 static int32_t 7131 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status, 7132 uint8_t *mqe, void *arg) 7133 { 7134 ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg; 7135 ocs_dma_t *payload = &(cb_arg->payload); 7136 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; 7137 ocs_hw_port_protocol_e port_protocol; 7138 int num_descriptors; 7139 sli4_resource_descriptor_v1_t *desc_p; 7140 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; 7141 int i; 7142 7143 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; 7144 7145 num_descriptors = response->desc_count; 7146 desc_p = (sli4_resource_descriptor_v1_t *)response->desc; 7147 for (i=0; i<num_descriptors; i++) { 7148 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { 7149 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; 7150 if (pcie_desc_p->pf_number == cb_arg->pci_func) { 7151 switch(pcie_desc_p->pf_type) { 7152 case 0x02: 7153 port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI; 7154 break; 7155 case 0x04: 7156 port_protocol = OCS_HW_PORT_PROTOCOL_FCOE; 7157 break; 7158 case 0x10: 7159 port_protocol = OCS_HW_PORT_PROTOCOL_FC; 7160 break; 7161 default: 7162 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; 7163 break; 7164 } 7165 } 7166 } 7167 7168 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); 7169 } 7170 7171 if (cb_arg->cb) { 7172 cb_arg->cb(status, port_protocol, cb_arg->arg); 7173 } 7174 7175 ocs_dma_free(hw->os, &cb_arg->payload); 7176 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); 7177 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7178 7179 return 0; 7180 } 7181 7182 /** 7183 * @ingroup io 7184 * @brief Get the current port protocol. 7185 * @par Description 7186 * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the 7187 * command completes the provided mgmt callback function is 7188 * called. 7189 * 7190 * @param hw Hardware context. 7191 * @param pci_func PCI function to query for current protocol. 7192 * @param cb Callback function to be called when the command completes. 7193 * @param ul_arg An argument that is passed to the callback function. 7194 * 7195 * @return 7196 * - OCS_HW_RTN_SUCCESS on success. 7197 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7198 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7199 * context. 7200 * - OCS_HW_RTN_ERROR on any other error. 7201 */ 7202 ocs_hw_rtn_e 7203 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func, 7204 ocs_get_port_protocol_cb_t cb, void* ul_arg) 7205 { 7206 uint8_t *mbxdata; 7207 ocs_hw_get_port_protocol_cb_arg_t *cb_arg; 7208 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7209 7210 /* Only supported on Skyhawk */ 7211 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7212 return OCS_HW_RTN_ERROR; 7213 } 7214 7215 /* mbxdata holds the header of the command */ 7216 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7217 if (mbxdata == NULL) { 7218 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7219 return OCS_HW_RTN_NO_MEMORY; 7220 } 7221 7222 /* cb_arg holds the data that will be passed to the callback on completion */ 7223 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT); 7224 if (cb_arg == NULL) { 7225 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7226 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7227 return OCS_HW_RTN_NO_MEMORY; 7228 } 7229 7230 cb_arg->cb = cb; 7231 cb_arg->arg = ul_arg; 7232 cb_arg->pci_func = pci_func; 7233 7234 /* dma_mem holds the non-embedded portion */ 7235 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { 7236 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7237 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7238 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); 7239 return OCS_HW_RTN_NO_MEMORY; 7240 } 7241 7242 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { 7243 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg); 7244 } 7245 7246 if (rc != OCS_HW_RTN_SUCCESS) { 7247 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); 7248 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7249 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 7250 ocs_dma_free(hw->os, &cb_arg->payload); 7251 } 7252 7253 return rc; 7254 7255 } 7256 7257 typedef struct ocs_hw_set_port_protocol_cb_arg_s { 7258 ocs_set_port_protocol_cb_t cb; 7259 void *arg; 7260 ocs_dma_t payload; 7261 uint32_t new_protocol; 7262 uint32_t pci_func; 7263 } ocs_hw_set_port_protocol_cb_arg_t; 7264 7265 /** 7266 * @brief Called for the completion of set_port_profile for a 7267 * user request. 7268 * 7269 * @par Description 7270 * This is the second of two callbacks for the set_port_protocol 7271 * function. The set operation is a read-modify-write. This 7272 * callback is called when the write (SET_PROFILE_CONFIG) 7273 * completes. 7274 * 7275 * @param hw Hardware context. 7276 * @param status The status from the MQE. 7277 * @param mqe Pointer to mailbox command buffer. 7278 * @param arg Pointer to a callback argument. 7279 * 7280 * @return 0 on success, non-zero otherwise 7281 */ 7282 static int32_t 7283 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7284 { 7285 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; 7286 7287 if (cb_arg->cb) { 7288 cb_arg->cb( status, cb_arg->arg); 7289 } 7290 7291 ocs_dma_free(hw->os, &(cb_arg->payload)); 7292 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7293 ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7294 7295 return 0; 7296 } 7297 7298 /** 7299 * @brief Called for the completion of set_port_profile for a 7300 * user request. 7301 * 7302 * @par Description 7303 * This is the first of two callbacks for the set_port_protocol 7304 * function. The set operation is a read-modify-write. This 7305 * callback is called when the read completes 7306 * (GET_PROFILE_CONFG). It will updated the resource 7307 * descriptors, then queue the write (SET_PROFILE_CONFIG). 7308 * 7309 * On entry there are three memory areas that were allocated by 7310 * ocs_hw_set_port_protocol. If a failure is detected in this 7311 * function those need to be freed. If this function succeeds 7312 * it allocates three more areas. 7313 * 7314 * @param hw Hardware context. 7315 * @param status The status from the MQE 7316 * @param mqe Pointer to mailbox command buffer. 7317 * @param arg Pointer to a callback argument. 7318 * 7319 * @return Returns 0 on success, or a non-zero value otherwise. 7320 */ 7321 static int32_t 7322 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7323 { 7324 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; 7325 ocs_dma_t *payload = &(cb_arg->payload); 7326 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; 7327 int num_descriptors; 7328 sli4_resource_descriptor_v1_t *desc_p; 7329 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; 7330 int i; 7331 ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg; 7332 ocs_hw_port_protocol_e new_protocol; 7333 uint8_t *dst; 7334 sli4_isap_resouce_descriptor_v1_t *isap_desc_p; 7335 uint8_t *mbxdata; 7336 int pci_descriptor_count; 7337 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7338 int num_fcoe_ports = 0; 7339 int num_iscsi_ports = 0; 7340 7341 new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol; 7342 7343 num_descriptors = response->desc_count; 7344 7345 /* Count PCI descriptors */ 7346 pci_descriptor_count = 0; 7347 desc_p = (sli4_resource_descriptor_v1_t *)response->desc; 7348 for (i=0; i<num_descriptors; i++) { 7349 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { 7350 ++pci_descriptor_count; 7351 } 7352 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); 7353 } 7354 7355 /* mbxdata holds the header of the command */ 7356 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7357 if (mbxdata == NULL) { 7358 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7359 return OCS_HW_RTN_NO_MEMORY; 7360 } 7361 7362 /* cb_arg holds the data that will be passed to the callback on completion */ 7363 new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); 7364 if (new_cb_arg == NULL) { 7365 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7366 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7367 return OCS_HW_RTN_NO_MEMORY; 7368 } 7369 7370 new_cb_arg->cb = cb_arg->cb; 7371 new_cb_arg->arg = cb_arg->arg; 7372 7373 /* Allocate memory for the descriptors we're going to send. This is 7374 * one for each PCI descriptor plus one ISAP descriptor. */ 7375 if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) + 7376 (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) + 7377 sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) { 7378 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7379 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7380 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7381 return OCS_HW_RTN_NO_MEMORY; 7382 } 7383 7384 sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 7385 &new_cb_arg->payload, 7386 0, pci_descriptor_count+1, 1); 7387 7388 /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */ 7389 dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc); 7390 7391 /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it 7392 * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor 7393 * that we're trying to change also set its pf_type. 7394 */ 7395 desc_p = (sli4_resource_descriptor_v1_t *)response->desc; 7396 for (i=0; i<num_descriptors; i++) { 7397 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { 7398 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; 7399 if (pcie_desc_p->pf_number == cb_arg->pci_func) { 7400 /* This is the PCIe descriptor for this OCS instance. 7401 * Update it with the new pf_type */ 7402 switch(new_protocol) { 7403 case OCS_HW_PORT_PROTOCOL_FC: 7404 pcie_desc_p->pf_type = SLI4_PROTOCOL_FC; 7405 break; 7406 case OCS_HW_PORT_PROTOCOL_FCOE: 7407 pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE; 7408 break; 7409 case OCS_HW_PORT_PROTOCOL_ISCSI: 7410 pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI; 7411 break; 7412 default: 7413 pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT; 7414 break; 7415 } 7416 } 7417 7418 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) { 7419 ++num_fcoe_ports; 7420 } 7421 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) { 7422 ++num_iscsi_ports; 7423 } 7424 ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t)); 7425 dst += sizeof(sli4_pcie_resource_descriptor_v1_t); 7426 } 7427 7428 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); 7429 } 7430 7431 /* Create an ISAP resource descriptor */ 7432 isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst; 7433 isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP; 7434 isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t); 7435 if (num_iscsi_ports > 0) { 7436 isap_desc_p->iscsi_tgt = 1; 7437 isap_desc_p->iscsi_ini = 1; 7438 isap_desc_p->iscsi_dif = 1; 7439 } 7440 if (num_fcoe_ports > 0) { 7441 isap_desc_p->fcoe_tgt = 1; 7442 isap_desc_p->fcoe_ini = 1; 7443 isap_desc_p->fcoe_dif = 1; 7444 } 7445 7446 /* At this point we're done with the memory allocated by ocs_port_set_protocol */ 7447 ocs_dma_free(hw->os, &cb_arg->payload); 7448 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7449 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7450 7451 /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */ 7452 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg); 7453 if (rc) { 7454 ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n"); 7455 /* Call the upper level callback to report a failure */ 7456 if (new_cb_arg->cb) { 7457 new_cb_arg->cb( rc, new_cb_arg->arg); 7458 } 7459 7460 /* Free the memory allocated by this function */ 7461 ocs_dma_free(hw->os, &new_cb_arg->payload); 7462 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7463 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7464 } 7465 7466 return rc; 7467 } 7468 7469 /** 7470 * @ingroup io 7471 * @brief Set the port protocol. 7472 * @par Description 7473 * Setting the port protocol is a read-modify-write operation. 7474 * This function submits a GET_PROFILE_CONFIG command to read 7475 * the current settings. The callback function will modify the 7476 * settings and issue the write. 7477 * 7478 * On successful completion this function will have allocated 7479 * two regular memory areas and one dma area which will need to 7480 * get freed later in the callbacks. 7481 * 7482 * @param hw Hardware context. 7483 * @param new_protocol New protocol to use. 7484 * @param pci_func PCI function to configure. 7485 * @param cb Callback function to be called when the command completes. 7486 * @param ul_arg An argument that is passed to the callback function. 7487 * 7488 * @return 7489 * - OCS_HW_RTN_SUCCESS on success. 7490 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7491 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7492 * context. 7493 * - OCS_HW_RTN_ERROR on any other error. 7494 */ 7495 ocs_hw_rtn_e 7496 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol, 7497 uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg) 7498 { 7499 uint8_t *mbxdata; 7500 ocs_hw_set_port_protocol_cb_arg_t *cb_arg; 7501 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 7502 7503 /* Only supported on Skyhawk */ 7504 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7505 return OCS_HW_RTN_ERROR; 7506 } 7507 7508 /* mbxdata holds the header of the command */ 7509 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7510 if (mbxdata == NULL) { 7511 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7512 return OCS_HW_RTN_NO_MEMORY; 7513 } 7514 7515 /* cb_arg holds the data that will be passed to the callback on completion */ 7516 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); 7517 if (cb_arg == NULL) { 7518 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7519 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7520 return OCS_HW_RTN_NO_MEMORY; 7521 } 7522 7523 cb_arg->cb = cb; 7524 cb_arg->arg = ul_arg; 7525 cb_arg->new_protocol = new_protocol; 7526 cb_arg->pci_func = pci_func; 7527 7528 /* dma_mem holds the non-embedded portion */ 7529 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { 7530 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7531 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7532 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); 7533 return OCS_HW_RTN_NO_MEMORY; 7534 } 7535 7536 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { 7537 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg); 7538 } 7539 7540 if (rc != OCS_HW_RTN_SUCCESS) { 7541 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); 7542 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7543 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 7544 ocs_dma_free(hw->os, &cb_arg->payload); 7545 } 7546 7547 return rc; 7548 } 7549 7550 typedef struct ocs_hw_get_profile_list_cb_arg_s { 7551 ocs_get_profile_list_cb_t cb; 7552 void *arg; 7553 ocs_dma_t payload; 7554 } ocs_hw_get_profile_list_cb_arg_t; 7555 7556 /** 7557 * @brief Called for the completion of get_profile_list for a 7558 * user request. 7559 * @par Description 7560 * This function is called when the COMMMON_GET_PROFILE_LIST 7561 * mailbox completes. The response will be in 7562 * ctx->non_embedded_mem.virt. This function parses the 7563 * response and creates a ocs_hw_profile_list, then calls the 7564 * mgmt_cb callback function and passes that list to it. 7565 * 7566 * @param hw Hardware context. 7567 * @param status The status from the MQE 7568 * @param mqe Pointer to mailbox command buffer. 7569 * @param arg Pointer to a callback argument. 7570 * 7571 * @return Returns 0 on success, or a non-zero value on failure. 7572 */ 7573 static int32_t 7574 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7575 { 7576 ocs_hw_profile_list_t *list; 7577 ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg; 7578 ocs_dma_t *payload = &(cb_arg->payload); 7579 sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt; 7580 int i; 7581 int num_descriptors; 7582 7583 list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO); 7584 list->num_descriptors = response->profile_descriptor_count; 7585 7586 num_descriptors = list->num_descriptors; 7587 if (num_descriptors > OCS_HW_MAX_PROFILES) { 7588 num_descriptors = OCS_HW_MAX_PROFILES; 7589 } 7590 7591 for (i=0; i<num_descriptors; i++) { 7592 list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id; 7593 list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index; 7594 ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description); 7595 } 7596 7597 if (cb_arg->cb) { 7598 cb_arg->cb(status, list, cb_arg->arg); 7599 } else { 7600 ocs_free(hw->os, list, sizeof(*list)); 7601 } 7602 7603 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7604 ocs_dma_free(hw->os, &cb_arg->payload); 7605 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); 7606 7607 return 0; 7608 } 7609 7610 /** 7611 * @ingroup io 7612 * @brief Get a list of available profiles. 7613 * @par Description 7614 * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the 7615 * command completes the provided mgmt callback function is 7616 * called. 7617 * 7618 * @param hw Hardware context. 7619 * @param cb Callback function to be called when the 7620 * command completes. 7621 * @param ul_arg An argument that is passed to the callback 7622 * function. 7623 * 7624 * @return 7625 * - OCS_HW_RTN_SUCCESS on success. 7626 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7627 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7628 * context. 7629 * - OCS_HW_RTN_ERROR on any other error. 7630 */ 7631 ocs_hw_rtn_e 7632 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg) 7633 { 7634 uint8_t *mbxdata; 7635 ocs_hw_get_profile_list_cb_arg_t *cb_arg; 7636 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7637 7638 /* Only supported on Skyhawk */ 7639 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7640 return OCS_HW_RTN_ERROR; 7641 } 7642 7643 /* mbxdata holds the header of the command */ 7644 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7645 if (mbxdata == NULL) { 7646 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7647 return OCS_HW_RTN_NO_MEMORY; 7648 } 7649 7650 /* cb_arg holds the data that will be passed to the callback on completion */ 7651 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT); 7652 if (cb_arg == NULL) { 7653 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7654 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7655 return OCS_HW_RTN_NO_MEMORY; 7656 } 7657 7658 cb_arg->cb = cb; 7659 cb_arg->arg = ul_arg; 7660 7661 /* dma_mem holds the non-embedded portion */ 7662 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) { 7663 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7664 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7665 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); 7666 return OCS_HW_RTN_NO_MEMORY; 7667 } 7668 7669 if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) { 7670 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg); 7671 } 7672 7673 if (rc != OCS_HW_RTN_SUCCESS) { 7674 ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n"); 7675 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7676 ocs_dma_free(hw->os, &cb_arg->payload); 7677 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); 7678 } 7679 7680 return rc; 7681 } 7682 7683 typedef struct ocs_hw_get_active_profile_cb_arg_s { 7684 ocs_get_active_profile_cb_t cb; 7685 void *arg; 7686 } ocs_hw_get_active_profile_cb_arg_t; 7687 7688 /** 7689 * @brief Called for the completion of get_active_profile for a 7690 * user request. 7691 * 7692 * @param hw Hardware context. 7693 * @param status The status from the MQE 7694 * @param mqe Pointer to mailbox command buffer. 7695 * @param arg Pointer to a callback argument. 7696 * 7697 * @return Returns 0 on success, or a non-zero value on failure. 7698 */ 7699 static int32_t 7700 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7701 { 7702 ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg; 7703 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 7704 sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed; 7705 uint32_t active_profile; 7706 7707 active_profile = response->active_profile_id; 7708 7709 if (cb_arg->cb) { 7710 cb_arg->cb(status, active_profile, cb_arg->arg); 7711 } 7712 7713 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7714 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); 7715 7716 return 0; 7717 } 7718 7719 /** 7720 * @ingroup io 7721 * @brief Get the currently active profile. 7722 * @par Description 7723 * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the 7724 * command completes the provided mgmt callback function is 7725 * called. 7726 * 7727 * @param hw Hardware context. 7728 * @param cb Callback function to be called when the 7729 * command completes. 7730 * @param ul_arg An argument that is passed to the callback 7731 * function. 7732 * 7733 * @return 7734 * - OCS_HW_RTN_SUCCESS on success. 7735 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7736 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7737 * context. 7738 * - OCS_HW_RTN_ERROR on any other error. 7739 */ 7740 int32_t 7741 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg) 7742 { 7743 uint8_t *mbxdata; 7744 ocs_hw_get_active_profile_cb_arg_t *cb_arg; 7745 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7746 7747 /* Only supported on Skyhawk */ 7748 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7749 return OCS_HW_RTN_ERROR; 7750 } 7751 7752 /* mbxdata holds the header of the command */ 7753 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7754 if (mbxdata == NULL) { 7755 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7756 return OCS_HW_RTN_NO_MEMORY; 7757 } 7758 7759 /* cb_arg holds the data that will be passed to the callback on completion */ 7760 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT); 7761 if (cb_arg == NULL) { 7762 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7763 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7764 return OCS_HW_RTN_NO_MEMORY; 7765 } 7766 7767 cb_arg->cb = cb; 7768 cb_arg->arg = ul_arg; 7769 7770 if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { 7771 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg); 7772 } 7773 7774 if (rc != OCS_HW_RTN_SUCCESS) { 7775 ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n"); 7776 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7777 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); 7778 } 7779 7780 return rc; 7781 } 7782 7783 typedef struct ocs_hw_get_nvparms_cb_arg_s { 7784 ocs_get_nvparms_cb_t cb; 7785 void *arg; 7786 } ocs_hw_get_nvparms_cb_arg_t; 7787 7788 /** 7789 * @brief Called for the completion of get_nvparms for a 7790 * user request. 7791 * 7792 * @param hw Hardware context. 7793 * @param status The status from the MQE. 7794 * @param mqe Pointer to mailbox command buffer. 7795 * @param arg Pointer to a callback argument. 7796 * 7797 * @return 0 on success, non-zero otherwise 7798 */ 7799 static int32_t 7800 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7801 { 7802 ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg; 7803 sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe; 7804 7805 if (cb_arg->cb) { 7806 cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa, 7807 mbox_rsp->preferred_d_id, cb_arg->arg); 7808 } 7809 7810 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7811 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); 7812 7813 return 0; 7814 } 7815 7816 /** 7817 * @ingroup io 7818 * @brief Read non-volatile parms. 7819 * @par Description 7820 * Issues a SLI-4 READ_NVPARMS mailbox. When the 7821 * command completes the provided mgmt callback function is 7822 * called. 7823 * 7824 * @param hw Hardware context. 7825 * @param cb Callback function to be called when the 7826 * command completes. 7827 * @param ul_arg An argument that is passed to the callback 7828 * function. 7829 * 7830 * @return 7831 * - OCS_HW_RTN_SUCCESS on success. 7832 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7833 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7834 * context. 7835 * - OCS_HW_RTN_ERROR on any other error. 7836 */ 7837 int32_t 7838 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg) 7839 { 7840 uint8_t *mbxdata; 7841 ocs_hw_get_nvparms_cb_arg_t *cb_arg; 7842 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7843 7844 /* mbxdata holds the header of the command */ 7845 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7846 if (mbxdata == NULL) { 7847 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7848 return OCS_HW_RTN_NO_MEMORY; 7849 } 7850 7851 /* cb_arg holds the data that will be passed to the callback on completion */ 7852 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT); 7853 if (cb_arg == NULL) { 7854 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7855 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7856 return OCS_HW_RTN_NO_MEMORY; 7857 } 7858 7859 cb_arg->cb = cb; 7860 cb_arg->arg = ul_arg; 7861 7862 if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { 7863 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg); 7864 } 7865 7866 if (rc != OCS_HW_RTN_SUCCESS) { 7867 ocs_log_test(hw->os, "READ_NVPARMS failed\n"); 7868 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7869 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); 7870 } 7871 7872 return rc; 7873 } 7874 7875 typedef struct ocs_hw_set_nvparms_cb_arg_s { 7876 ocs_set_nvparms_cb_t cb; 7877 void *arg; 7878 } ocs_hw_set_nvparms_cb_arg_t; 7879 7880 /** 7881 * @brief Called for the completion of set_nvparms for a 7882 * user request. 7883 * 7884 * @param hw Hardware context. 7885 * @param status The status from the MQE. 7886 * @param mqe Pointer to mailbox command buffer. 7887 * @param arg Pointer to a callback argument. 7888 * 7889 * @return Returns 0 on success, or a non-zero value on failure. 7890 */ 7891 static int32_t 7892 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7893 { 7894 ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg; 7895 7896 if (cb_arg->cb) { 7897 cb_arg->cb(status, cb_arg->arg); 7898 } 7899 7900 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7901 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); 7902 7903 return 0; 7904 } 7905 7906 /** 7907 * @ingroup io 7908 * @brief Write non-volatile parms. 7909 * @par Description 7910 * Issues a SLI-4 WRITE_NVPARMS mailbox. When the 7911 * command completes the provided mgmt callback function is 7912 * called. 7913 * 7914 * @param hw Hardware context. 7915 * @param cb Callback function to be called when the 7916 * command completes. 7917 * @param wwpn Port's WWPN in big-endian order, or NULL to use default. 7918 * @param wwnn Port's WWNN in big-endian order, or NULL to use default. 7919 * @param hard_alpa A hard AL_PA address setting used during loop 7920 * initialization. If no hard AL_PA is required, set to 0. 7921 * @param preferred_d_id A preferred D_ID address setting 7922 * that may be overridden with the CONFIG_LINK mailbox command. 7923 * If there is no preference, set to 0. 7924 * @param ul_arg An argument that is passed to the callback 7925 * function. 7926 * 7927 * @return 7928 * - OCS_HW_RTN_SUCCESS on success. 7929 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7930 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7931 * context. 7932 * - OCS_HW_RTN_ERROR on any other error. 7933 */ 7934 int32_t 7935 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn, 7936 uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg) 7937 { 7938 uint8_t *mbxdata; 7939 ocs_hw_set_nvparms_cb_arg_t *cb_arg; 7940 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7941 7942 /* mbxdata holds the header of the command */ 7943 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7944 if (mbxdata == NULL) { 7945 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7946 return OCS_HW_RTN_NO_MEMORY; 7947 } 7948 7949 /* cb_arg holds the data that will be passed to the callback on completion */ 7950 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT); 7951 if (cb_arg == NULL) { 7952 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7953 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7954 return OCS_HW_RTN_NO_MEMORY; 7955 } 7956 7957 cb_arg->cb = cb; 7958 cb_arg->arg = ul_arg; 7959 7960 if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) { 7961 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg); 7962 } 7963 7964 if (rc != OCS_HW_RTN_SUCCESS) { 7965 ocs_log_test(hw->os, "SET_NVPARMS failed\n"); 7966 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7967 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); 7968 } 7969 7970 return rc; 7971 } 7972 7973 /** 7974 * @brief Called to obtain the count for the specified type. 7975 * 7976 * @param hw Hardware context. 7977 * @param io_count_type IO count type (inuse, free, wait_free). 7978 * 7979 * @return Returns the number of IOs on the specified list type. 7980 */ 7981 uint32_t 7982 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type) 7983 { 7984 ocs_hw_io_t *io = NULL; 7985 uint32_t count = 0; 7986 7987 ocs_lock(&hw->io_lock); 7988 7989 switch (io_count_type) { 7990 case OCS_HW_IO_INUSE_COUNT : 7991 ocs_list_foreach(&hw->io_inuse, io) { 7992 count++; 7993 } 7994 break; 7995 case OCS_HW_IO_FREE_COUNT : 7996 ocs_list_foreach(&hw->io_free, io) { 7997 count++; 7998 } 7999 break; 8000 case OCS_HW_IO_WAIT_FREE_COUNT : 8001 ocs_list_foreach(&hw->io_wait_free, io) { 8002 count++; 8003 } 8004 break; 8005 case OCS_HW_IO_PORT_OWNED_COUNT: 8006 ocs_list_foreach(&hw->io_port_owned, io) { 8007 count++; 8008 } 8009 break; 8010 case OCS_HW_IO_N_TOTAL_IO_COUNT : 8011 count = hw->config.n_io; 8012 break; 8013 } 8014 8015 ocs_unlock(&hw->io_lock); 8016 8017 return count; 8018 } 8019 8020 /** 8021 * @brief Called to obtain the count of produced RQs. 8022 * 8023 * @param hw Hardware context. 8024 * 8025 * @return Returns the number of RQs produced. 8026 */ 8027 uint32_t 8028 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw) 8029 { 8030 uint32_t count = 0; 8031 uint32_t i; 8032 uint32_t j; 8033 8034 for (i = 0; i < hw->hw_rq_count; i++) { 8035 hw_rq_t *rq = hw->hw_rq[i]; 8036 if (rq->rq_tracker != NULL) { 8037 for (j = 0; j < rq->entry_count; j++) { 8038 if (rq->rq_tracker[j] != NULL) { 8039 count++; 8040 } 8041 } 8042 } 8043 } 8044 8045 return count; 8046 } 8047 8048 typedef struct ocs_hw_set_active_profile_cb_arg_s { 8049 ocs_set_active_profile_cb_t cb; 8050 void *arg; 8051 } ocs_hw_set_active_profile_cb_arg_t; 8052 8053 /** 8054 * @brief Called for the completion of set_active_profile for a 8055 * user request. 8056 * 8057 * @param hw Hardware context. 8058 * @param status The status from the MQE 8059 * @param mqe Pointer to mailbox command buffer. 8060 * @param arg Pointer to a callback argument. 8061 * 8062 * @return Returns 0 on success, or a non-zero value on failure. 8063 */ 8064 static int32_t 8065 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 8066 { 8067 ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg; 8068 8069 if (cb_arg->cb) { 8070 cb_arg->cb(status, cb_arg->arg); 8071 } 8072 8073 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 8074 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); 8075 8076 return 0; 8077 } 8078 8079 /** 8080 * @ingroup io 8081 * @brief Set the currently active profile. 8082 * @par Description 8083 * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the 8084 * command completes the provided mgmt callback function is 8085 * called. 8086 * 8087 * @param hw Hardware context. 8088 * @param profile_id Profile ID to activate. 8089 * @param cb Callback function to be called when the command completes. 8090 * @param ul_arg An argument that is passed to the callback function. 8091 * 8092 * @return 8093 * - OCS_HW_RTN_SUCCESS on success. 8094 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 8095 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 8096 * context. 8097 * - OCS_HW_RTN_ERROR on any other error. 8098 */ 8099 int32_t 8100 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg) 8101 { 8102 uint8_t *mbxdata; 8103 ocs_hw_set_active_profile_cb_arg_t *cb_arg; 8104 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 8105 8106 /* Only supported on Skyhawk */ 8107 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 8108 return OCS_HW_RTN_ERROR; 8109 } 8110 8111 /* mbxdata holds the header of the command */ 8112 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 8113 if (mbxdata == NULL) { 8114 ocs_log_err(hw->os, "failed to malloc mbox\n"); 8115 return OCS_HW_RTN_NO_MEMORY; 8116 } 8117 8118 /* cb_arg holds the data that will be passed to the callback on completion */ 8119 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT); 8120 if (cb_arg == NULL) { 8121 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 8122 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 8123 return OCS_HW_RTN_NO_MEMORY; 8124 } 8125 8126 cb_arg->cb = cb; 8127 cb_arg->arg = ul_arg; 8128 8129 if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) { 8130 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg); 8131 } 8132 8133 if (rc != OCS_HW_RTN_SUCCESS) { 8134 ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n"); 8135 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 8136 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t)); 8137 } 8138 8139 return rc; 8140 } 8141 8142 /* 8143 * Private functions 8144 */ 8145 8146 /** 8147 * @brief Update the queue hash with the ID and index. 8148 * 8149 * @param hash Pointer to hash table. 8150 * @param id ID that was created. 8151 * @param index The index into the hash object. 8152 */ 8153 static void 8154 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index) 8155 { 8156 uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1); 8157 8158 /* 8159 * Since the hash is always bigger than the number of queues, then we 8160 * never have to worry about an infinite loop. 8161 */ 8162 while(hash[hash_index].in_use) { 8163 hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1); 8164 } 8165 8166 /* not used, claim the entry */ 8167 hash[hash_index].id = id; 8168 hash[hash_index].in_use = 1; 8169 hash[hash_index].index = index; 8170 } 8171 8172 /** 8173 * @brief Find index given queue ID. 8174 * 8175 * @param hash Pointer to hash table. 8176 * @param id ID to find. 8177 * 8178 * @return Returns the index into the HW cq array or -1 if not found. 8179 */ 8180 int32_t 8181 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id) 8182 { 8183 int32_t rc = -1; 8184 int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1); 8185 8186 /* 8187 * Since the hash is always bigger than the maximum number of Qs, then we 8188 * never have to worry about an infinite loop. We will always find an 8189 * unused entry. 8190 */ 8191 do { 8192 if (hash[index].in_use && 8193 hash[index].id == id) { 8194 rc = hash[index].index; 8195 } else { 8196 index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1); 8197 } 8198 } while(rc == -1 && hash[index].in_use); 8199 8200 return rc; 8201 } 8202 8203 static int32_t 8204 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain) 8205 { 8206 int32_t rc = OCS_HW_RTN_ERROR; 8207 uint16_t fcfi = UINT16_MAX; 8208 8209 if ((hw == NULL) || (domain == NULL)) { 8210 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", 8211 hw, domain); 8212 return OCS_HW_RTN_ERROR; 8213 } 8214 8215 fcfi = domain->fcf_indicator; 8216 8217 if (fcfi < SLI4_MAX_FCFI) { 8218 uint16_t fcf_index = UINT16_MAX; 8219 8220 ocs_log_debug(hw->os, "adding domain %p @ %#x\n", 8221 domain, fcfi); 8222 hw->domains[fcfi] = domain; 8223 8224 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 8225 if (hw->workaround.override_fcfi) { 8226 if (hw->first_domain_idx < 0) { 8227 hw->first_domain_idx = fcfi; 8228 } 8229 } 8230 8231 fcf_index = domain->fcf; 8232 8233 if (fcf_index < SLI4_MAX_FCF_INDEX) { 8234 ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n", 8235 fcf_index, fcfi); 8236 hw->fcf_index_fcfi[fcf_index] = fcfi; 8237 rc = OCS_HW_RTN_SUCCESS; 8238 } else { 8239 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", 8240 fcf_index, SLI4_MAX_FCF_INDEX); 8241 hw->domains[fcfi] = NULL; 8242 } 8243 } else { 8244 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", 8245 fcfi, SLI4_MAX_FCFI); 8246 } 8247 8248 return rc; 8249 } 8250 8251 static int32_t 8252 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain) 8253 { 8254 int32_t rc = OCS_HW_RTN_ERROR; 8255 uint16_t fcfi = UINT16_MAX; 8256 8257 if ((hw == NULL) || (domain == NULL)) { 8258 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", 8259 hw, domain); 8260 return OCS_HW_RTN_ERROR; 8261 } 8262 8263 fcfi = domain->fcf_indicator; 8264 8265 if (fcfi < SLI4_MAX_FCFI) { 8266 uint16_t fcf_index = UINT16_MAX; 8267 8268 ocs_log_debug(hw->os, "deleting domain %p @ %#x\n", 8269 domain, fcfi); 8270 8271 if (domain != hw->domains[fcfi]) { 8272 ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n", 8273 domain, hw->domains[fcfi]); 8274 return OCS_HW_RTN_ERROR; 8275 } 8276 8277 hw->domains[fcfi] = NULL; 8278 8279 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 8280 if (hw->workaround.override_fcfi) { 8281 if (hw->first_domain_idx == fcfi) { 8282 hw->first_domain_idx = -1; 8283 } 8284 } 8285 8286 fcf_index = domain->fcf; 8287 8288 if (fcf_index < SLI4_MAX_FCF_INDEX) { 8289 if (hw->fcf_index_fcfi[fcf_index] == fcfi) { 8290 hw->fcf_index_fcfi[fcf_index] = 0; 8291 rc = OCS_HW_RTN_SUCCESS; 8292 } else { 8293 ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n", 8294 hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index); 8295 } 8296 } else { 8297 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", 8298 fcf_index, SLI4_MAX_FCF_INDEX); 8299 } 8300 } else { 8301 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", 8302 fcfi, SLI4_MAX_FCFI); 8303 } 8304 8305 return rc; 8306 } 8307 8308 ocs_domain_t * 8309 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi) 8310 { 8311 8312 if (hw == NULL) { 8313 ocs_log_err(NULL, "bad parameter hw=%p\n", hw); 8314 return NULL; 8315 } 8316 8317 if (fcfi < SLI4_MAX_FCFI) { 8318 return hw->domains[fcfi]; 8319 } else { 8320 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", 8321 fcfi, SLI4_MAX_FCFI); 8322 return NULL; 8323 } 8324 } 8325 8326 static ocs_domain_t * 8327 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index) 8328 { 8329 8330 if (hw == NULL) { 8331 ocs_log_err(NULL, "bad parameter hw=%p\n", hw); 8332 return NULL; 8333 } 8334 8335 if (fcf_index < SLI4_MAX_FCF_INDEX) { 8336 return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]); 8337 } else { 8338 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", 8339 fcf_index, SLI4_MAX_FCF_INDEX); 8340 return NULL; 8341 } 8342 } 8343 8344 /** 8345 * @brief Quaratine an IO by taking a reference count and adding it to the 8346 * quarantine list. When the IO is popped from the list then the 8347 * count is released and the IO MAY be freed depending on whether 8348 * it is still referenced by the IO. 8349 * 8350 * @n @b Note: BZ 160124 - If this is a target write or an initiator read using 8351 * DIF, then we must add the XRI to a quarantine list until we receive 8352 * 4 more completions of this same type. 8353 * 8354 * @param hw Hardware context. 8355 * @param wq Pointer to the WQ associated with the IO object to quarantine. 8356 * @param io Pointer to the io object to quarantine. 8357 */ 8358 static void 8359 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io) 8360 { 8361 ocs_quarantine_info_t *q_info = &wq->quarantine_info; 8362 uint32_t index; 8363 ocs_hw_io_t *free_io = NULL; 8364 8365 /* return if the QX bit was clear */ 8366 if (!io->quarantine) { 8367 return; 8368 } 8369 8370 /* increment the IO refcount to prevent it from being freed before the quarantine is over */ 8371 if (ocs_ref_get_unless_zero(&io->ref) == 0) { 8372 /* command no longer active */ 8373 ocs_log_debug(hw ? hw->os : NULL, 8374 "io not active xri=0x%x tag=0x%x\n", 8375 io->indicator, io->reqtag); 8376 return; 8377 } 8378 8379 sli_queue_lock(wq->queue); 8380 index = q_info->quarantine_index; 8381 free_io = q_info->quarantine_ios[index]; 8382 q_info->quarantine_ios[index] = io; 8383 q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH; 8384 sli_queue_unlock(wq->queue); 8385 8386 if (free_io != NULL) { 8387 ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */ 8388 } 8389 } 8390 8391 /** 8392 * @brief Process entries on the given completion queue. 8393 * 8394 * @param hw Hardware context. 8395 * @param cq Pointer to the HW completion queue object. 8396 * 8397 * @return None. 8398 */ 8399 void 8400 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq) 8401 { 8402 uint8_t cqe[sizeof(sli4_mcqe_t)]; 8403 uint16_t rid = UINT16_MAX; 8404 sli4_qentry_e ctype; /* completion type */ 8405 int32_t status; 8406 uint32_t n_processed = 0; 8407 time_t tstart; 8408 time_t telapsed; 8409 8410 tstart = ocs_msectime(); 8411 8412 while (!sli_queue_read(&hw->sli, cq->queue, cqe)) { 8413 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); 8414 /* 8415 * The sign of status is significant. If status is: 8416 * == 0 : call completed correctly and the CQE indicated success 8417 * > 0 : call completed correctly and the CQE indicated an error 8418 * < 0 : call failed and no information is available about the CQE 8419 */ 8420 if (status < 0) { 8421 if (status == -2) { 8422 /* Notification that an entry was consumed, but not completed */ 8423 continue; 8424 } 8425 8426 break; 8427 } 8428 8429 switch (ctype) { 8430 case SLI_QENTRY_ASYNC: 8431 CPUTRACE("async"); 8432 sli_cqe_async(&hw->sli, cqe); 8433 break; 8434 case SLI_QENTRY_MQ: 8435 /* 8436 * Process MQ entry. Note there is no way to determine 8437 * the MQ_ID from the completion entry. 8438 */ 8439 CPUTRACE("mq"); 8440 ocs_hw_mq_process(hw, status, hw->mq); 8441 break; 8442 case SLI_QENTRY_OPT_WRITE_CMD: 8443 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe); 8444 break; 8445 case SLI_QENTRY_OPT_WRITE_DATA: 8446 ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe); 8447 break; 8448 case SLI_QENTRY_WQ: 8449 CPUTRACE("wq"); 8450 ocs_hw_wq_process(hw, cq, cqe, status, rid); 8451 break; 8452 case SLI_QENTRY_WQ_RELEASE: { 8453 uint32_t wq_id = rid; 8454 int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id); 8455 8456 if (unlikely(index < 0)) { 8457 ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n", 8458 index, rid); 8459 break; 8460 } 8461 8462 hw_wq_t *wq = hw->hw_wq[index]; 8463 8464 /* Submit any HW IOs that are on the WQ pending list */ 8465 hw_wq_submit_pending(wq, wq->wqec_set_count); 8466 8467 break; 8468 } 8469 8470 case SLI_QENTRY_RQ: 8471 CPUTRACE("rq"); 8472 ocs_hw_rqpair_process_rq(hw, cq, cqe); 8473 break; 8474 case SLI_QENTRY_XABT: { 8475 CPUTRACE("xabt"); 8476 ocs_hw_xabt_process(hw, cq, cqe, rid); 8477 break; 8478 } 8479 default: 8480 ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid); 8481 break; 8482 } 8483 8484 n_processed++; 8485 if (n_processed == cq->queue->proc_limit) { 8486 break; 8487 } 8488 8489 if (cq->queue->n_posted >= (cq->queue->posted_limit)) { 8490 sli_queue_arm(&hw->sli, cq->queue, FALSE); 8491 } 8492 } 8493 8494 sli_queue_arm(&hw->sli, cq->queue, TRUE); 8495 8496 if (n_processed > cq->queue->max_num_processed) { 8497 cq->queue->max_num_processed = n_processed; 8498 } 8499 telapsed = ocs_msectime() - tstart; 8500 if (telapsed > cq->queue->max_process_time) { 8501 cq->queue->max_process_time = telapsed; 8502 } 8503 } 8504 8505 /** 8506 * @brief Process WQ completion queue entries. 8507 * 8508 * @param hw Hardware context. 8509 * @param cq Pointer to the HW completion queue object. 8510 * @param cqe Pointer to WQ completion queue. 8511 * @param status Completion status. 8512 * @param rid Resource ID (IO tag). 8513 * 8514 * @return none 8515 */ 8516 void 8517 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid) 8518 { 8519 hw_wq_callback_t *wqcb; 8520 8521 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id, 8522 ((cq->queue->index - 1) & (cq->queue->length - 1))); 8523 8524 if(rid == OCS_HW_REQUE_XRI_REGTAG) { 8525 if(status) { 8526 ocs_log_err(hw->os, "reque xri failed, status = %d \n", status); 8527 } 8528 return; 8529 } 8530 8531 wqcb = ocs_hw_reqtag_get_instance(hw, rid); 8532 if (wqcb == NULL) { 8533 ocs_log_err(hw->os, "invalid request tag: x%x\n", rid); 8534 return; 8535 } 8536 8537 if (wqcb->callback == NULL) { 8538 ocs_log_err(hw->os, "wqcb callback is NULL\n"); 8539 return; 8540 } 8541 8542 (*wqcb->callback)(wqcb->arg, cqe, status); 8543 } 8544 8545 /** 8546 * @brief Process WQ completions for IO requests 8547 * 8548 * @param arg Generic callback argument 8549 * @param cqe Pointer to completion queue entry 8550 * @param status Completion status 8551 * 8552 * @par Description 8553 * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized 8554 * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool. 8555 * 8556 * @return None. 8557 */ 8558 static void 8559 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status) 8560 { 8561 ocs_hw_io_t *io = arg; 8562 ocs_hw_t *hw = io->hw; 8563 sli4_fc_wcqe_t *wcqe = (void *)cqe; 8564 uint32_t len = 0; 8565 uint32_t ext = 0; 8566 uint8_t out_of_order_axr_cmd = 0; 8567 uint8_t out_of_order_axr_data = 0; 8568 uint8_t lock_taken = 0; 8569 #if defined(OCS_DISC_SPIN_DELAY) 8570 uint32_t delay = 0; 8571 char prop_buf[32]; 8572 #endif 8573 8574 /* 8575 * For the primary IO, this will also be used for the 8576 * response. So it is important to only set/clear this 8577 * flag on the first data phase of the IO because 8578 * subsequent phases will be done on the secondary XRI. 8579 */ 8580 if (io->quarantine && io->quarantine_first_phase) { 8581 io->quarantine = (wcqe->qx == 1); 8582 ocs_hw_io_quarantine(hw, io->wq, io); 8583 } 8584 io->quarantine_first_phase = FALSE; 8585 8586 /* BZ 161832 - free secondary HW IO */ 8587 if (io->sec_hio != NULL && 8588 io->sec_hio->quarantine) { 8589 /* 8590 * If the quarantine flag is set on the 8591 * IO, then set it on the secondary IO 8592 * based on the quarantine XRI (QX) bit 8593 * sent by the FW. 8594 */ 8595 io->sec_hio->quarantine = (wcqe->qx == 1); 8596 /* use the primary io->wq because it is not set on the secondary IO. */ 8597 ocs_hw_io_quarantine(hw, io->wq, io->sec_hio); 8598 } 8599 8600 ocs_hw_remove_io_timed_wqe(hw, io); 8601 8602 /* clear xbusy flag if WCQE[XB] is clear */ 8603 if (io->xbusy && wcqe->xb == 0) { 8604 io->xbusy = FALSE; 8605 } 8606 8607 /* get extended CQE status */ 8608 switch (io->type) { 8609 case OCS_HW_BLS_ACC: 8610 case OCS_HW_BLS_ACC_SID: 8611 break; 8612 case OCS_HW_ELS_REQ: 8613 sli_fc_els_did(&hw->sli, cqe, &ext); 8614 len = sli_fc_response_length(&hw->sli, cqe); 8615 break; 8616 case OCS_HW_ELS_RSP: 8617 case OCS_HW_ELS_RSP_SID: 8618 case OCS_HW_FC_CT_RSP: 8619 break; 8620 case OCS_HW_FC_CT: 8621 len = sli_fc_response_length(&hw->sli, cqe); 8622 break; 8623 case OCS_HW_IO_TARGET_WRITE: 8624 len = sli_fc_io_length(&hw->sli, cqe); 8625 #if defined(OCS_DISC_SPIN_DELAY) 8626 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { 8627 delay = ocs_strtoul(prop_buf, 0, 0); 8628 ocs_udelay(delay); 8629 } 8630 #endif 8631 break; 8632 case OCS_HW_IO_TARGET_READ: 8633 len = sli_fc_io_length(&hw->sli, cqe); 8634 /* 8635 * if_type == 2 seems to return 0 "total length placed" on 8636 * FCP_TSEND64_WQE completions. If this appears to happen, 8637 * use the CTIO data transfer length instead. 8638 */ 8639 if (hw->workaround.retain_tsend_io_length && !len && !status) { 8640 len = io->length; 8641 } 8642 8643 break; 8644 case OCS_HW_IO_TARGET_RSP: 8645 if(io->is_port_owned) { 8646 ocs_lock(&io->axr_lock); 8647 lock_taken = 1; 8648 if(io->axr_buf->call_axr_cmd) { 8649 out_of_order_axr_cmd = 1; 8650 } 8651 if(io->axr_buf->call_axr_data) { 8652 out_of_order_axr_data = 1; 8653 } 8654 } 8655 break; 8656 case OCS_HW_IO_INITIATOR_READ: 8657 len = sli_fc_io_length(&hw->sli, cqe); 8658 break; 8659 case OCS_HW_IO_INITIATOR_WRITE: 8660 len = sli_fc_io_length(&hw->sli, cqe); 8661 break; 8662 case OCS_HW_IO_INITIATOR_NODATA: 8663 break; 8664 case OCS_HW_IO_DNRX_REQUEUE: 8665 /* release the count for re-posting the buffer */ 8666 //ocs_hw_io_free(hw, io); 8667 break; 8668 default: 8669 ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n", 8670 io->type, io->indicator); 8671 break; 8672 } 8673 if (status) { 8674 ext = sli_fc_ext_status(&hw->sli, cqe); 8675 /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically 8676 * abort exchange if an error occurred and exchange is still busy. 8677 */ 8678 if (hw->config.i_only_aab && 8679 (ocs_hw_iotype_is_originator(io->type)) && 8680 (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) { 8681 ocs_hw_rtn_e rc; 8682 8683 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", 8684 io->indicator, io->reqtag); 8685 /* 8686 * Because the initiator will not issue another IO phase, then it is OK to to issue the 8687 * callback on the abort completion, but for consistency with the target, wait for the 8688 * XRI_ABORTED CQE to issue the IO callback. 8689 */ 8690 rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL); 8691 8692 if (rc == OCS_HW_RTN_SUCCESS) { 8693 /* latch status to return after abort is complete */ 8694 io->status_saved = 1; 8695 io->saved_status = status; 8696 io->saved_ext = ext; 8697 io->saved_len = len; 8698 goto exit_ocs_hw_wq_process_io; 8699 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { 8700 /* 8701 * Already being aborted by someone else (ABTS 8702 * perhaps). Just fall through and return original 8703 * error. 8704 */ 8705 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", 8706 io->indicator, io->reqtag); 8707 8708 } else { 8709 /* Failed to abort for some other reason, log error */ 8710 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", 8711 io->indicator, io->reqtag, rc); 8712 } 8713 } 8714 8715 /* 8716 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW 8717 */ 8718 if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) { 8719 ocs_hw_rtn_e rc; 8720 8721 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag); 8722 8723 /* 8724 * Because targets may send a response when the IO completes using the same XRI, we must 8725 * wait for the XRI_ABORTED CQE to issue the IO callback 8726 */ 8727 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); 8728 if (rc == OCS_HW_RTN_SUCCESS) { 8729 /* latch status to return after abort is complete */ 8730 io->status_saved = 1; 8731 io->saved_status = status; 8732 io->saved_ext = ext; 8733 io->saved_len = len; 8734 goto exit_ocs_hw_wq_process_io; 8735 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { 8736 /* 8737 * Already being aborted by someone else (ABTS 8738 * perhaps). Just fall through and return original 8739 * error. 8740 */ 8741 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", 8742 io->indicator, io->reqtag); 8743 8744 } else { 8745 /* Failed to abort for some other reason, log error */ 8746 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", 8747 io->indicator, io->reqtag, rc); 8748 } 8749 } 8750 } 8751 /* BZ 161832 - free secondary HW IO */ 8752 if (io->sec_hio != NULL) { 8753 ocs_hw_io_free(hw, io->sec_hio); 8754 io->sec_hio = NULL; 8755 } 8756 8757 if (io->done != NULL) { 8758 ocs_hw_done_t done = io->done; 8759 void *arg = io->arg; 8760 8761 io->done = NULL; 8762 8763 if (io->status_saved) { 8764 /* use latched status if exists */ 8765 status = io->saved_status; 8766 len = io->saved_len; 8767 ext = io->saved_ext; 8768 io->status_saved = 0; 8769 } 8770 8771 /* Restore default SGL */ 8772 ocs_hw_io_restore_sgl(hw, io); 8773 done(io, io->rnode, len, status, ext, arg); 8774 } 8775 8776 if(out_of_order_axr_cmd) { 8777 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ 8778 if (hw->config.bounce) { 8779 fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt; 8780 uint32_t s_id = fc_be24toh(hdr->s_id); 8781 uint32_t d_id = fc_be24toh(hdr->d_id); 8782 uint32_t ox_id = ocs_be16toh(hdr->ox_id); 8783 if (hw->callback.bounce != NULL) { 8784 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id); 8785 } 8786 }else { 8787 hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq); 8788 } 8789 8790 if(out_of_order_axr_data) { 8791 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ 8792 if (hw->config.bounce) { 8793 fc_header_t *hdr = io->axr_buf->seq.header->dma.virt; 8794 uint32_t s_id = fc_be24toh(hdr->s_id); 8795 uint32_t d_id = fc_be24toh(hdr->d_id); 8796 uint32_t ox_id = ocs_be16toh(hdr->ox_id); 8797 if (hw->callback.bounce != NULL) { 8798 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id); 8799 } 8800 }else { 8801 hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq); 8802 } 8803 } 8804 } 8805 8806 exit_ocs_hw_wq_process_io: 8807 if(lock_taken) { 8808 ocs_unlock(&io->axr_lock); 8809 } 8810 } 8811 8812 /** 8813 * @brief Process WQ completions for abort requests. 8814 * 8815 * @param arg Generic callback argument. 8816 * @param cqe Pointer to completion queue entry. 8817 * @param status Completion status. 8818 * 8819 * @return None. 8820 */ 8821 static void 8822 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status) 8823 { 8824 ocs_hw_io_t *io = arg; 8825 ocs_hw_t *hw = io->hw; 8826 uint32_t ext = 0; 8827 uint32_t len = 0; 8828 hw_wq_callback_t *wqcb; 8829 8830 /* 8831 * For IOs that were aborted internally, we may need to issue the callback here depending 8832 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then 8833 * issue the callback now. 8834 */ 8835 ext = sli_fc_ext_status(&hw->sli, cqe); 8836 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT && 8837 ext == SLI4_FC_LOCAL_REJECT_NO_XRI && 8838 io->done != NULL) { 8839 ocs_hw_done_t done = io->done; 8840 void *arg = io->arg; 8841 8842 io->done = NULL; 8843 8844 /* 8845 * Use latched status as this is always saved for an internal abort 8846 * 8847 * Note: We wont have both a done and abort_done function, so don't worry about 8848 * clobbering the len, status and ext fields. 8849 */ 8850 status = io->saved_status; 8851 len = io->saved_len; 8852 ext = io->saved_ext; 8853 io->status_saved = 0; 8854 done(io, io->rnode, len, status, ext, arg); 8855 } 8856 8857 if (io->abort_done != NULL) { 8858 ocs_hw_done_t done = io->abort_done; 8859 void *arg = io->abort_arg; 8860 8861 io->abort_done = NULL; 8862 8863 done(io, io->rnode, len, status, ext, arg); 8864 } 8865 ocs_lock(&hw->io_abort_lock); 8866 /* clear abort bit to indicate abort is complete */ 8867 io->abort_in_progress = 0; 8868 ocs_unlock(&hw->io_abort_lock); 8869 8870 /* Free the WQ callback */ 8871 ocs_hw_assert(io->abort_reqtag != UINT32_MAX); 8872 wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag); 8873 ocs_hw_reqtag_free(hw, wqcb); 8874 8875 /* 8876 * Call ocs_hw_io_free() because this releases the WQ reservation as 8877 * well as doing the refcount put. Don't duplicate the code here. 8878 */ 8879 (void)ocs_hw_io_free(hw, io); 8880 } 8881 8882 /** 8883 * @brief Process XABT completions 8884 * 8885 * @param hw Hardware context. 8886 * @param cq Pointer to the HW completion queue object. 8887 * @param cqe Pointer to WQ completion queue. 8888 * @param rid Resource ID (IO tag). 8889 * 8890 * 8891 * @return None. 8892 */ 8893 void 8894 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid) 8895 { 8896 /* search IOs wait free list */ 8897 ocs_hw_io_t *io = NULL; 8898 8899 io = ocs_hw_io_lookup(hw, rid); 8900 8901 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id, 8902 ((cq->queue->index - 1) & (cq->queue->length - 1))); 8903 if (io == NULL) { 8904 /* IO lookup failure should never happen */ 8905 ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid); 8906 return; 8907 } 8908 8909 if (!io->xbusy) { 8910 ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); 8911 } else { 8912 /* mark IO as no longer busy */ 8913 io->xbusy = FALSE; 8914 } 8915 8916 if (io->is_port_owned) { 8917 ocs_lock(&hw->io_lock); 8918 /* Take reference so that below callback will not free io before reque */ 8919 ocs_ref_get(&io->ref); 8920 ocs_unlock(&hw->io_lock); 8921 } 8922 8923 /* For IOs that were aborted internally, we need to issue any pending callback here. */ 8924 if (io->done != NULL) { 8925 ocs_hw_done_t done = io->done; 8926 void *arg = io->arg; 8927 8928 /* Use latched status as this is always saved for an internal abort */ 8929 int32_t status = io->saved_status; 8930 uint32_t len = io->saved_len; 8931 uint32_t ext = io->saved_ext; 8932 8933 io->done = NULL; 8934 io->status_saved = 0; 8935 8936 done(io, io->rnode, len, status, ext, arg); 8937 } 8938 8939 /* Check to see if this is a port owned XRI */ 8940 if (io->is_port_owned) { 8941 ocs_lock(&hw->io_lock); 8942 ocs_hw_reque_xri(hw, io); 8943 ocs_unlock(&hw->io_lock); 8944 /* Not hanlding reque xri completion, free io */ 8945 ocs_hw_io_free(hw, io); 8946 return; 8947 } 8948 8949 ocs_lock(&hw->io_lock); 8950 if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) { 8951 /* if on wait_free list, caller has already freed IO; 8952 * remove from wait_free list and add to free list. 8953 * if on in-use list, already marked as no longer busy; 8954 * just leave there and wait for caller to free. 8955 */ 8956 if (io->state == OCS_HW_IO_STATE_WAIT_FREE) { 8957 io->state = OCS_HW_IO_STATE_FREE; 8958 ocs_list_remove(&hw->io_wait_free, io); 8959 ocs_hw_io_free_move_correct_list(hw, io); 8960 } 8961 } 8962 ocs_unlock(&hw->io_lock); 8963 } 8964 8965 /** 8966 * @brief Adjust the number of WQs and CQs within the HW. 8967 * 8968 * @par Description 8969 * Calculates the number of WQs and associated CQs needed in the HW based on 8970 * the number of IOs. Calculates the starting CQ index for each WQ, RQ and 8971 * MQ. 8972 * 8973 * @param hw Hardware context allocated by the caller. 8974 */ 8975 static void 8976 ocs_hw_adjust_wqs(ocs_hw_t *hw) 8977 { 8978 uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ); 8979 uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ]; 8980 uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ]; 8981 8982 /* 8983 * possibly adjust the the size of the WQs so that the CQ is twice as 8984 * big as the WQ to allow for 2 completions per IO. This allows us to 8985 * handle multi-phase as well as aborts. 8986 */ 8987 if (max_cq_entries < max_wq_entries * 2) { 8988 max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2; 8989 } 8990 8991 /* 8992 * Calculate the number of WQs to use base on the number of IOs. 8993 * 8994 * Note: We need to reserve room for aborts which must be sent down 8995 * the same WQ as the IO. So we allocate enough WQ space to 8996 * handle 2 times the number of IOs. Half of the space will be 8997 * used for normal IOs and the other hwf is reserved for aborts. 8998 */ 8999 hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries; 9000 9001 /* 9002 * For performance reasons, it is best to use use a minimum of 4 WQs 9003 * for BE3 and Skyhawk. 9004 */ 9005 if (hw->config.n_wq < 4 && 9006 SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { 9007 hw->config.n_wq = 4; 9008 } 9009 9010 /* 9011 * For dual-chute support, we need to have at least one WQ per chute. 9012 */ 9013 if (hw->config.n_wq < 2 && 9014 ocs_hw_get_num_chutes(hw) > 1) { 9015 hw->config.n_wq = 2; 9016 } 9017 9018 /* make sure we haven't exceeded the max supported in the HW */ 9019 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { 9020 hw->config.n_wq = OCS_HW_MAX_NUM_WQ; 9021 } 9022 9023 /* make sure we haven't exceeded the chip maximum */ 9024 if (hw->config.n_wq > max_wq_num) { 9025 hw->config.n_wq = max_wq_num; 9026 } 9027 9028 /* 9029 * Using Queue Topology string, we divide by number of chutes 9030 */ 9031 hw->config.n_wq /= ocs_hw_get_num_chutes(hw); 9032 } 9033 9034 static int32_t 9035 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size) 9036 { 9037 ocs_command_ctx_t *ctx = NULL; 9038 9039 ocs_lock(&hw->cmd_lock); 9040 if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) { 9041 ocs_log_err(hw->os, "XXX no command context?!?\n"); 9042 ocs_unlock(&hw->cmd_lock); 9043 return -1; 9044 } 9045 9046 hw->cmd_head_count--; 9047 9048 /* Post any pending requests */ 9049 ocs_hw_cmd_submit_pending(hw); 9050 9051 ocs_unlock(&hw->cmd_lock); 9052 9053 if (ctx->cb) { 9054 if (ctx->buf) { 9055 ocs_memcpy(ctx->buf, mqe, size); 9056 } 9057 ctx->cb(hw, status, ctx->buf, ctx->arg); 9058 } 9059 9060 ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t)); 9061 ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t)); 9062 9063 return 0; 9064 } 9065 9066 /** 9067 * @brief Process entries on the given mailbox queue. 9068 * 9069 * @param hw Hardware context. 9070 * @param status CQE status. 9071 * @param mq Pointer to the mailbox queue object. 9072 * 9073 * @return Returns 0 on success, or a non-zero value on failure. 9074 */ 9075 static int32_t 9076 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq) 9077 { 9078 uint8_t mqe[SLI4_BMBX_SIZE]; 9079 9080 if (!sli_queue_read(&hw->sli, mq, mqe)) { 9081 ocs_hw_command_process(hw, status, mqe, mq->size); 9082 } 9083 9084 return 0; 9085 } 9086 9087 /** 9088 * @brief Read a FCF table entry. 9089 * 9090 * @param hw Hardware context. 9091 * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first 9092 * read and the next_index field from the FCOE_READ_FCF_TABLE command 9093 * for subsequent reads. 9094 * 9095 * @return Returns 0 on success, or a non-zero value on failure. 9096 */ 9097 static ocs_hw_rtn_e 9098 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index) 9099 { 9100 uint8_t *buf = NULL; 9101 int32_t rc = OCS_HW_RTN_ERROR; 9102 9103 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 9104 if (!buf) { 9105 ocs_log_err(hw->os, "no buffer for command\n"); 9106 return OCS_HW_RTN_NO_MEMORY; 9107 } 9108 9109 if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem, 9110 index)) { 9111 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem); 9112 } 9113 9114 if (rc != OCS_HW_RTN_SUCCESS) { 9115 ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n"); 9116 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 9117 } 9118 9119 return rc; 9120 } 9121 9122 /** 9123 * @brief Callback function for the FCOE_READ_FCF_TABLE command. 9124 * 9125 * @par Description 9126 * Note that the caller has allocated: 9127 * - DMA memory to hold the table contents 9128 * - DMA memory structure 9129 * - Command/results buffer 9130 * . 9131 * Each of these must be freed here. 9132 * 9133 * @param hw Hardware context. 9134 * @param status Hardware status. 9135 * @param mqe Pointer to the mailbox command/results buffer. 9136 * @param arg Pointer to the DMA memory structure. 9137 * 9138 * @return Returns 0 on success, or a non-zero value on failure. 9139 */ 9140 static int32_t 9141 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9142 { 9143 ocs_dma_t *dma = arg; 9144 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9145 9146 if (status || hdr->status) { 9147 ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n", 9148 status, hdr->status); 9149 } else if (dma->virt) { 9150 sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt; 9151 9152 /* if FC or FCOE and FCF entry valid, process it */ 9153 if (read_fcf->fcf_entry.fc || 9154 (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) { 9155 if (hw->callback.domain != NULL) { 9156 ocs_domain_record_t drec = {0}; 9157 9158 if (read_fcf->fcf_entry.fc) { 9159 /* 9160 * This is a pseudo FCF entry. Create a domain 9161 * record based on the read topology information 9162 */ 9163 drec.speed = hw->link.speed; 9164 drec.fc_id = hw->link.fc_id; 9165 drec.is_fc = TRUE; 9166 if (SLI_LINK_TOPO_LOOP == hw->link.topology) { 9167 drec.is_loop = TRUE; 9168 ocs_memcpy(drec.map.loop, hw->link.loop_map, 9169 sizeof(drec.map.loop)); 9170 } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) { 9171 drec.is_nport = TRUE; 9172 } 9173 } else { 9174 drec.index = read_fcf->fcf_entry.fcf_index; 9175 drec.priority = read_fcf->fcf_entry.fip_priority; 9176 9177 /* copy address, wwn and vlan_bitmap */ 9178 ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address, 9179 sizeof(drec.address)); 9180 ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id, 9181 sizeof(drec.wwn)); 9182 ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap, 9183 sizeof(drec.map.vlan)); 9184 9185 drec.is_ethernet = TRUE; 9186 drec.is_nport = TRUE; 9187 } 9188 9189 hw->callback.domain(hw->args.domain, 9190 OCS_HW_DOMAIN_FOUND, 9191 &drec); 9192 } 9193 } else { 9194 /* if FCOE and FCF is not valid, ignore it */ 9195 ocs_log_test(hw->os, "ignore invalid FCF entry\n"); 9196 } 9197 9198 if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) { 9199 ocs_hw_read_fcf(hw, read_fcf->next_index); 9200 } 9201 } 9202 9203 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9204 //ocs_dma_free(hw->os, dma); 9205 //ocs_free(hw->os, dma, sizeof(ocs_dma_t)); 9206 9207 return 0; 9208 } 9209 9210 /** 9211 * @brief Callback function for the SLI link events. 9212 * 9213 * @par Description 9214 * This function allocates memory which must be freed in its callback. 9215 * 9216 * @param ctx Hardware context pointer (that is, ocs_hw_t *). 9217 * @param e Event structure pointer (that is, sli4_link_event_t *). 9218 * 9219 * @return Returns 0 on success, or a non-zero value on failure. 9220 */ 9221 static int32_t 9222 ocs_hw_cb_link(void *ctx, void *e) 9223 { 9224 ocs_hw_t *hw = ctx; 9225 sli4_link_event_t *event = e; 9226 ocs_domain_t *d = NULL; 9227 uint32_t i = 0; 9228 int32_t rc = OCS_HW_RTN_ERROR; 9229 ocs_t *ocs = hw->os; 9230 9231 ocs_hw_link_event_init(hw); 9232 9233 switch (event->status) { 9234 case SLI_LINK_STATUS_UP: 9235 9236 hw->link = *event; 9237 9238 if (SLI_LINK_TOPO_NPORT == event->topology) { 9239 device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed); 9240 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); 9241 } else if (SLI_LINK_TOPO_LOOP == event->topology) { 9242 uint8_t *buf = NULL; 9243 device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed); 9244 9245 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 9246 if (!buf) { 9247 ocs_log_err(hw->os, "no buffer for command\n"); 9248 break; 9249 } 9250 9251 if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) { 9252 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL); 9253 } 9254 9255 if (rc != OCS_HW_RTN_SUCCESS) { 9256 ocs_log_test(hw->os, "READ_TOPOLOGY failed\n"); 9257 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 9258 } 9259 } else { 9260 device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n", 9261 event->topology, event->speed); 9262 } 9263 break; 9264 case SLI_LINK_STATUS_DOWN: 9265 device_printf(ocs->dev, "Link Down\n"); 9266 9267 hw->link.status = event->status; 9268 9269 for (i = 0; i < SLI4_MAX_FCFI; i++) { 9270 d = hw->domains[i]; 9271 if (d != NULL && 9272 hw->callback.domain != NULL) { 9273 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d); 9274 } 9275 } 9276 break; 9277 default: 9278 ocs_log_test(hw->os, "unhandled link status %#x\n", event->status); 9279 break; 9280 } 9281 9282 return 0; 9283 } 9284 9285 static int32_t 9286 ocs_hw_cb_fip(void *ctx, void *e) 9287 { 9288 ocs_hw_t *hw = ctx; 9289 ocs_domain_t *domain = NULL; 9290 sli4_fip_event_t *event = e; 9291 9292 ocs_hw_assert(event); 9293 ocs_hw_assert(hw); 9294 9295 /* Find the associated domain object */ 9296 if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) { 9297 ocs_domain_t *d = NULL; 9298 uint32_t i = 0; 9299 9300 /* Clear VLINK is different from the other FIP events as it passes back 9301 * a VPI instead of a FCF index. Check all attached SLI ports for a 9302 * matching VPI */ 9303 for (i = 0; i < SLI4_MAX_FCFI; i++) { 9304 d = hw->domains[i]; 9305 if (d != NULL) { 9306 ocs_sport_t *sport = NULL; 9307 9308 ocs_list_foreach(&d->sport_list, sport) { 9309 if (sport->indicator == event->index) { 9310 domain = d; 9311 break; 9312 } 9313 } 9314 9315 if (domain != NULL) { 9316 break; 9317 } 9318 } 9319 } 9320 } else { 9321 domain = ocs_hw_domain_get_indexed(hw, event->index); 9322 } 9323 9324 switch (event->type) { 9325 case SLI4_FCOE_FIP_FCF_DISCOVERED: 9326 ocs_hw_read_fcf(hw, event->index); 9327 break; 9328 case SLI4_FCOE_FIP_FCF_DEAD: 9329 if (domain != NULL && 9330 hw->callback.domain != NULL) { 9331 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); 9332 } 9333 break; 9334 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK: 9335 if (domain != NULL && 9336 hw->callback.domain != NULL) { 9337 /* 9338 * We will want to issue rediscover FCF when this domain is free'd in order 9339 * to invalidate the FCF table 9340 */ 9341 domain->req_rediscover_fcf = TRUE; 9342 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); 9343 } 9344 break; 9345 case SLI4_FCOE_FIP_FCF_MODIFIED: 9346 if (domain != NULL && 9347 hw->callback.domain != NULL) { 9348 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); 9349 } 9350 9351 ocs_hw_read_fcf(hw, event->index); 9352 break; 9353 default: 9354 ocs_log_test(hw->os, "unsupported event %#x\n", event->type); 9355 } 9356 9357 return 0; 9358 } 9359 9360 static int32_t 9361 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9362 { 9363 ocs_remote_node_t *rnode = arg; 9364 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9365 ocs_hw_remote_node_event_e evt = 0; 9366 9367 if (status || hdr->status) { 9368 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, 9369 hdr->status); 9370 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); 9371 rnode->attached = FALSE; 9372 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); 9373 evt = OCS_HW_NODE_ATTACH_FAIL; 9374 } else { 9375 rnode->attached = TRUE; 9376 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1); 9377 evt = OCS_HW_NODE_ATTACH_OK; 9378 } 9379 9380 if (hw->callback.rnode != NULL) { 9381 hw->callback.rnode(hw->args.rnode, evt, rnode); 9382 } 9383 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9384 9385 return 0; 9386 } 9387 9388 static int32_t 9389 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9390 { 9391 ocs_remote_node_t *rnode = arg; 9392 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9393 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; 9394 int32_t rc = 0; 9395 9396 if (status || hdr->status) { 9397 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, 9398 hdr->status); 9399 9400 /* 9401 * In certain cases, a non-zero MQE status is OK (all must be true): 9402 * - node is attached 9403 * - if High Login Mode is enabled, node is part of a node group 9404 * - status is 0x1400 9405 */ 9406 if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) || 9407 (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) { 9408 rc = -1; 9409 } 9410 } 9411 9412 if (rc == 0) { 9413 rnode->node_group = FALSE; 9414 rnode->attached = FALSE; 9415 9416 if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) { 9417 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); 9418 } 9419 9420 evt = OCS_HW_NODE_FREE_OK; 9421 } 9422 9423 if (hw->callback.rnode != NULL) { 9424 hw->callback.rnode(hw->args.rnode, evt, rnode); 9425 } 9426 9427 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9428 9429 return rc; 9430 } 9431 9432 static int32_t 9433 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9434 { 9435 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9436 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; 9437 int32_t rc = 0; 9438 uint32_t i; 9439 9440 if (status || hdr->status) { 9441 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, 9442 hdr->status); 9443 } else { 9444 evt = OCS_HW_NODE_FREE_ALL_OK; 9445 } 9446 9447 if (evt == OCS_HW_NODE_FREE_ALL_OK) { 9448 for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) { 9449 ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0); 9450 } 9451 9452 if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) { 9453 ocs_log_test(hw->os, "FCOE_RPI free all failure\n"); 9454 rc = -1; 9455 } 9456 } 9457 9458 if (hw->callback.rnode != NULL) { 9459 hw->callback.rnode(hw->args.rnode, evt, NULL); 9460 } 9461 9462 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9463 9464 return rc; 9465 } 9466 9467 /** 9468 * @brief Initialize the pool of HW IO objects. 9469 * 9470 * @param hw Hardware context. 9471 * 9472 * @return Returns 0 on success, or a non-zero value on failure. 9473 */ 9474 static ocs_hw_rtn_e 9475 ocs_hw_setup_io(ocs_hw_t *hw) 9476 { 9477 uint32_t i = 0; 9478 ocs_hw_io_t *io = NULL; 9479 uintptr_t xfer_virt = 0; 9480 uintptr_t xfer_phys = 0; 9481 uint32_t index; 9482 uint8_t new_alloc = TRUE; 9483 9484 if (NULL == hw->io) { 9485 hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT); 9486 9487 if (NULL == hw->io) { 9488 ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n", 9489 hw->config.n_io, 9490 sizeof(ocs_hw_io_t *)); 9491 return OCS_HW_RTN_NO_MEMORY; 9492 } 9493 for (i = 0; i < hw->config.n_io; i++) { 9494 hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t), 9495 OCS_M_ZERO | OCS_M_NOWAIT); 9496 if (hw->io[i] == NULL) { 9497 ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i); 9498 goto error; 9499 } 9500 } 9501 9502 /* Create WQE buffs for IO */ 9503 hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size, 9504 OCS_M_ZERO | OCS_M_NOWAIT); 9505 if (NULL == hw->wqe_buffs) { 9506 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t)); 9507 ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n", 9508 __func__, hw->config.n_io, hw->sli.config.wqe_size); 9509 return OCS_HW_RTN_NO_MEMORY; 9510 } 9511 9512 } else { 9513 /* re-use existing IOs, including SGLs */ 9514 new_alloc = FALSE; 9515 } 9516 9517 if (new_alloc) { 9518 if (ocs_dma_alloc(hw->os, &hw->xfer_rdy, 9519 sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io, 9520 4/*XXX what does this need to be? */)) { 9521 ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n"); 9522 return OCS_HW_RTN_NO_MEMORY; 9523 } 9524 } 9525 xfer_virt = (uintptr_t)hw->xfer_rdy.virt; 9526 xfer_phys = hw->xfer_rdy.phys; 9527 9528 for (i = 0; i < hw->config.n_io; i++) { 9529 hw_wq_callback_t *wqcb; 9530 9531 io = hw->io[i]; 9532 9533 /* initialize IO fields */ 9534 io->hw = hw; 9535 9536 /* Assign a WQE buff */ 9537 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size]; 9538 9539 /* Allocate the request tag for this IO */ 9540 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io); 9541 if (wqcb == NULL) { 9542 ocs_log_err(hw->os, "can't allocate request tag\n"); 9543 return OCS_HW_RTN_NO_RESOURCES; 9544 } 9545 io->reqtag = wqcb->instance_index; 9546 9547 /* Now for the fields that are initialized on each free */ 9548 ocs_hw_init_free_io(io); 9549 9550 /* The XB flag isn't cleared on IO free, so initialize it to zero here */ 9551 io->xbusy = 0; 9552 9553 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) { 9554 ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i); 9555 return OCS_HW_RTN_NO_MEMORY; 9556 } 9557 9558 if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) { 9559 ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i); 9560 ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t)); 9561 return OCS_HW_RTN_NO_MEMORY; 9562 } 9563 io->def_sgl_count = hw->config.n_sgl; 9564 io->sgl = &io->def_sgl; 9565 io->sgl_count = io->def_sgl_count; 9566 9567 if (hw->xfer_rdy.size) { 9568 io->xfer_rdy.virt = (void *)xfer_virt; 9569 io->xfer_rdy.phys = xfer_phys; 9570 io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t); 9571 9572 xfer_virt += sizeof(fcp_xfer_rdy_iu_t); 9573 xfer_phys += sizeof(fcp_xfer_rdy_iu_t); 9574 } 9575 } 9576 9577 return OCS_HW_RTN_SUCCESS; 9578 error: 9579 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { 9580 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); 9581 hw->io[i] = NULL; 9582 } 9583 9584 return OCS_HW_RTN_NO_MEMORY; 9585 } 9586 9587 static ocs_hw_rtn_e 9588 ocs_hw_init_io(ocs_hw_t *hw) 9589 { 9590 uint32_t i = 0, io_index = 0; 9591 uint32_t prereg = 0; 9592 ocs_hw_io_t *io = NULL; 9593 uint8_t cmd[SLI4_BMBX_SIZE]; 9594 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 9595 uint32_t nremaining; 9596 uint32_t n = 0; 9597 uint32_t sgls_per_request = 256; 9598 ocs_dma_t **sgls = NULL; 9599 ocs_dma_t reqbuf = { 0 }; 9600 9601 prereg = sli_get_sgl_preregister(&hw->sli); 9602 9603 if (prereg) { 9604 sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT); 9605 if (sgls == NULL) { 9606 ocs_log_err(hw->os, "ocs_malloc sgls failed\n"); 9607 return OCS_HW_RTN_NO_MEMORY; 9608 } 9609 9610 rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT); 9611 if (rc) { 9612 ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n"); 9613 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); 9614 return OCS_HW_RTN_NO_MEMORY; 9615 } 9616 } 9617 9618 io = hw->io[io_index]; 9619 for (nremaining = hw->config.n_io; nremaining; nremaining -= n) { 9620 if (prereg) { 9621 /* Copy address of SGL's into local sgls[] array, break out if the xri 9622 * is not contiguous. 9623 */ 9624 for (n = 0; n < MIN(sgls_per_request, nremaining); n++) { 9625 /* Check that we have contiguous xri values */ 9626 if (n > 0) { 9627 if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) { 9628 break; 9629 } 9630 } 9631 sgls[n] = hw->io[io_index + n]->sgl; 9632 } 9633 9634 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd), 9635 io->indicator, n, sgls, NULL, &reqbuf)) { 9636 if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) { 9637 rc = OCS_HW_RTN_ERROR; 9638 ocs_log_err(hw->os, "SGL post failed\n"); 9639 break; 9640 } 9641 } 9642 } else { 9643 n = nremaining; 9644 } 9645 9646 /* Add to tail if successful */ 9647 for (i = 0; i < n; i ++) { 9648 io->is_port_owned = 0; 9649 io->state = OCS_HW_IO_STATE_FREE; 9650 ocs_list_add_tail(&hw->io_free, io); 9651 io = hw->io[io_index+1]; 9652 io_index++; 9653 } 9654 } 9655 9656 if (prereg) { 9657 ocs_dma_free(hw->os, &reqbuf); 9658 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); 9659 } 9660 9661 return rc; 9662 } 9663 9664 static int32_t 9665 ocs_hw_flush(ocs_hw_t *hw) 9666 { 9667 uint32_t i = 0; 9668 9669 /* Process any remaining completions */ 9670 for (i = 0; i < hw->eq_count; i++) { 9671 ocs_hw_process(hw, i, ~0); 9672 } 9673 9674 return 0; 9675 } 9676 9677 static int32_t 9678 ocs_hw_command_cancel(ocs_hw_t *hw) 9679 { 9680 9681 ocs_lock(&hw->cmd_lock); 9682 9683 /* 9684 * Manually clean up remaining commands. Note: since this calls 9685 * ocs_hw_command_process(), we'll also process the cmd_pending 9686 * list, so no need to manually clean that out. 9687 */ 9688 while (!ocs_list_empty(&hw->cmd_head)) { 9689 uint8_t mqe[SLI4_BMBX_SIZE] = { 0 }; 9690 ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head); 9691 9692 ocs_log_test(hw->os, "hung command %08x\n", 9693 NULL == ctx ? UINT32_MAX : 9694 (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf))); 9695 ocs_unlock(&hw->cmd_lock); 9696 ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE); 9697 ocs_lock(&hw->cmd_lock); 9698 } 9699 9700 ocs_unlock(&hw->cmd_lock); 9701 9702 return 0; 9703 } 9704 9705 /** 9706 * @brief Find IO given indicator (xri). 9707 * 9708 * @param hw Hal context. 9709 * @param indicator Indicator (xri) to look for. 9710 * 9711 * @return Returns io if found, NULL otherwise. 9712 */ 9713 ocs_hw_io_t * 9714 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri) 9715 { 9716 uint32_t ioindex; 9717 ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0]; 9718 return hw->io[ioindex]; 9719 } 9720 9721 /** 9722 * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists. 9723 * 9724 * @param hw Hal context. 9725 * @param io Pointer to the IO to cleanup. 9726 */ 9727 static void 9728 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io) 9729 { 9730 ocs_hw_done_t done = io->done; 9731 ocs_hw_done_t abort_done = io->abort_done; 9732 9733 /* first check active_wqe list and remove if there */ 9734 if (ocs_list_on_list(&io->wqe_link)) { 9735 ocs_list_remove(&hw->io_timed_wqe, io); 9736 } 9737 9738 /* Remove from WQ pending list */ 9739 if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) { 9740 ocs_list_remove(&io->wq->pending_list, io); 9741 } 9742 9743 if (io->done) { 9744 void *arg = io->arg; 9745 9746 io->done = NULL; 9747 ocs_unlock(&hw->io_lock); 9748 done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg); 9749 ocs_lock(&hw->io_lock); 9750 } 9751 9752 if (io->abort_done != NULL) { 9753 void *abort_arg = io->abort_arg; 9754 9755 io->abort_done = NULL; 9756 ocs_unlock(&hw->io_lock); 9757 abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg); 9758 ocs_lock(&hw->io_lock); 9759 } 9760 } 9761 9762 static int32_t 9763 ocs_hw_io_cancel(ocs_hw_t *hw) 9764 { 9765 ocs_hw_io_t *io = NULL; 9766 ocs_hw_io_t *tmp_io = NULL; 9767 uint32_t iters = 100; /* One second limit */ 9768 9769 /* 9770 * Manually clean up outstanding IO. 9771 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called. 9772 */ 9773 ocs_lock(&hw->io_lock); 9774 ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) { 9775 ocs_hw_done_t done = io->done; 9776 ocs_hw_done_t abort_done = io->abort_done; 9777 9778 ocs_hw_io_cancel_cleanup(hw, io); 9779 9780 /* 9781 * Since this is called in a reset/shutdown 9782 * case, If there is no callback, then just 9783 * free the IO. 9784 * 9785 * Note: A port owned XRI cannot be on 9786 * the in use list. We cannot call 9787 * ocs_hw_io_free() because we already 9788 * hold the io_lock. 9789 */ 9790 if (done == NULL && 9791 abort_done == NULL) { 9792 /* 9793 * Since this is called in a reset/shutdown 9794 * case, If there is no callback, then just 9795 * free the IO. 9796 */ 9797 ocs_hw_io_free_common(hw, io); 9798 ocs_list_remove(&hw->io_inuse, io); 9799 ocs_hw_io_free_move_correct_list(hw, io); 9800 } 9801 } 9802 9803 /* 9804 * For port owned XRIs, they are not on the in use list, so 9805 * walk though XRIs and issue any callbacks. 9806 */ 9807 ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) { 9808 /* check list and remove if there */ 9809 if (ocs_list_on_list(&io->dnrx_link)) { 9810 ocs_list_remove(&hw->io_port_dnrx, io); 9811 ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */ 9812 } 9813 ocs_hw_io_cancel_cleanup(hw, io); 9814 ocs_list_remove(&hw->io_port_owned, io); 9815 ocs_hw_io_free_common(hw, io); 9816 } 9817 ocs_unlock(&hw->io_lock); 9818 9819 /* Give time for the callbacks to complete */ 9820 do { 9821 ocs_udelay(10000); 9822 iters--; 9823 } while (!ocs_list_empty(&hw->io_inuse) && iters); 9824 9825 /* Leave a breadcrumb that cleanup is not yet complete. */ 9826 if (!ocs_list_empty(&hw->io_inuse)) { 9827 ocs_log_test(hw->os, "io_inuse list is not empty\n"); 9828 } 9829 9830 return 0; 9831 } 9832 9833 static int32_t 9834 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size, 9835 ocs_dma_t *rsp) 9836 { 9837 sli4_sge_t *data = NULL; 9838 9839 if (!hw || !io) { 9840 ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io); 9841 return OCS_HW_RTN_ERROR; 9842 } 9843 9844 data = io->def_sgl.virt; 9845 9846 /* setup command pointer */ 9847 data->buffer_address_high = ocs_addr32_hi(cmnd->phys); 9848 data->buffer_address_low = ocs_addr32_lo(cmnd->phys); 9849 data->buffer_length = cmnd_size; 9850 data++; 9851 9852 /* setup response pointer */ 9853 data->buffer_address_high = ocs_addr32_hi(rsp->phys); 9854 data->buffer_address_low = ocs_addr32_lo(rsp->phys); 9855 data->buffer_length = rsp->size; 9856 9857 return 0; 9858 } 9859 9860 static int32_t 9861 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9862 { 9863 sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe; 9864 9865 if (status || read_topo->hdr.status) { 9866 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", 9867 status, read_topo->hdr.status); 9868 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9869 return -1; 9870 } 9871 9872 switch (read_topo->attention_type) { 9873 case SLI4_READ_TOPOLOGY_LINK_UP: 9874 hw->link.status = SLI_LINK_STATUS_UP; 9875 break; 9876 case SLI4_READ_TOPOLOGY_LINK_DOWN: 9877 hw->link.status = SLI_LINK_STATUS_DOWN; 9878 break; 9879 case SLI4_READ_TOPOLOGY_LINK_NO_ALPA: 9880 hw->link.status = SLI_LINK_STATUS_NO_ALPA; 9881 break; 9882 default: 9883 hw->link.status = SLI_LINK_STATUS_MAX; 9884 break; 9885 } 9886 9887 switch (read_topo->topology) { 9888 case SLI4_READ_TOPOLOGY_NPORT: 9889 hw->link.topology = SLI_LINK_TOPO_NPORT; 9890 break; 9891 case SLI4_READ_TOPOLOGY_FC_AL: 9892 hw->link.topology = SLI_LINK_TOPO_LOOP; 9893 if (SLI_LINK_STATUS_UP == hw->link.status) { 9894 hw->link.loop_map = hw->loop_map.virt; 9895 } 9896 hw->link.fc_id = read_topo->acquired_al_pa; 9897 break; 9898 default: 9899 hw->link.topology = SLI_LINK_TOPO_MAX; 9900 break; 9901 } 9902 9903 hw->link.medium = SLI_LINK_MEDIUM_FC; 9904 9905 switch (read_topo->link_current.link_speed) { 9906 case SLI4_READ_TOPOLOGY_SPEED_1G: 9907 hw->link.speed = 1 * 1000; 9908 break; 9909 case SLI4_READ_TOPOLOGY_SPEED_2G: 9910 hw->link.speed = 2 * 1000; 9911 break; 9912 case SLI4_READ_TOPOLOGY_SPEED_4G: 9913 hw->link.speed = 4 * 1000; 9914 break; 9915 case SLI4_READ_TOPOLOGY_SPEED_8G: 9916 hw->link.speed = 8 * 1000; 9917 break; 9918 case SLI4_READ_TOPOLOGY_SPEED_16G: 9919 hw->link.speed = 16 * 1000; 9920 hw->link.loop_map = NULL; 9921 break; 9922 case SLI4_READ_TOPOLOGY_SPEED_32G: 9923 hw->link.speed = 32 * 1000; 9924 hw->link.loop_map = NULL; 9925 break; 9926 } 9927 9928 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9929 9930 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); 9931 9932 return 0; 9933 } 9934 9935 static int32_t 9936 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 9937 { 9938 ocs_sli_port_t *sport = ctx->app; 9939 ocs_hw_t *hw = sport->hw; 9940 9941 smtrace("port"); 9942 9943 switch (evt) { 9944 case OCS_EVT_EXIT: 9945 /* ignore */ 9946 break; 9947 9948 case OCS_EVT_HW_PORT_REQ_FREE: 9949 case OCS_EVT_HW_PORT_REQ_ATTACH: 9950 if (data != NULL) { 9951 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 9952 } 9953 /* fall through */ 9954 default: 9955 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); 9956 break; 9957 } 9958 9959 return 0; 9960 } 9961 9962 static void * 9963 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 9964 { 9965 ocs_sli_port_t *sport = ctx->app; 9966 ocs_hw_t *hw = sport->hw; 9967 9968 smtrace("port"); 9969 9970 switch (evt) { 9971 case OCS_EVT_ENTER: 9972 if (data != NULL) { 9973 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 9974 } 9975 if (hw->callback.port != NULL) { 9976 hw->callback.port(hw->args.port, 9977 OCS_HW_PORT_FREE_FAIL, sport); 9978 } 9979 break; 9980 default: 9981 break; 9982 } 9983 9984 return NULL; 9985 } 9986 9987 static void * 9988 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 9989 { 9990 ocs_sli_port_t *sport = ctx->app; 9991 ocs_hw_t *hw = sport->hw; 9992 9993 smtrace("port"); 9994 9995 switch (evt) { 9996 case OCS_EVT_ENTER: 9997 /* free SLI resource */ 9998 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) { 9999 ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id); 10000 } 10001 10002 /* free mailbox buffer */ 10003 if (data != NULL) { 10004 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10005 } 10006 if (hw->callback.port != NULL) { 10007 hw->callback.port(hw->args.port, 10008 OCS_HW_PORT_FREE_OK, sport); 10009 } 10010 break; 10011 default: 10012 break; 10013 } 10014 10015 return NULL; 10016 } 10017 10018 static void * 10019 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10020 { 10021 ocs_sli_port_t *sport = ctx->app; 10022 ocs_hw_t *hw = sport->hw; 10023 10024 smtrace("port"); 10025 10026 switch (evt) { 10027 case OCS_EVT_ENTER: 10028 /* free SLI resource */ 10029 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 10030 10031 /* free mailbox buffer */ 10032 if (data != NULL) { 10033 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10034 } 10035 10036 if (hw->callback.port != NULL) { 10037 hw->callback.port(hw->args.port, 10038 OCS_HW_PORT_ATTACH_FAIL, sport); 10039 } 10040 if (sport->sm_free_req_pending) { 10041 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10042 } 10043 break; 10044 default: 10045 __ocs_hw_port_common(__func__, ctx, evt, data); 10046 break; 10047 } 10048 10049 return NULL; 10050 } 10051 10052 static void * 10053 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10054 { 10055 ocs_sli_port_t *sport = ctx->app; 10056 ocs_hw_t *hw = sport->hw; 10057 uint8_t *cmd = NULL; 10058 10059 smtrace("port"); 10060 10061 switch (evt) { 10062 case OCS_EVT_ENTER: 10063 /* allocate memory and send unreg_vpi */ 10064 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 10065 if (!cmd) { 10066 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10067 break; 10068 } 10069 10070 if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator, 10071 SLI4_UNREG_TYPE_PORT)) { 10072 ocs_log_err(hw->os, "UNREG_VPI format failure\n"); 10073 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 10074 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10075 break; 10076 } 10077 10078 if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10079 ocs_log_err(hw->os, "UNREG_VPI command failure\n"); 10080 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 10081 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10082 break; 10083 } 10084 break; 10085 case OCS_EVT_RESPONSE: 10086 ocs_sm_transition(ctx, __ocs_hw_port_freed, data); 10087 break; 10088 case OCS_EVT_ERROR: 10089 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); 10090 break; 10091 default: 10092 __ocs_hw_port_common(__func__, ctx, evt, data); 10093 break; 10094 } 10095 10096 return NULL; 10097 } 10098 10099 static void * 10100 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10101 { 10102 ocs_sli_port_t *sport = ctx->app; 10103 ocs_hw_t *hw = sport->hw; 10104 10105 smtrace("port"); 10106 10107 switch (evt) { 10108 case OCS_EVT_ENTER: 10109 /* Forward to execute in mailbox completion processing context */ 10110 if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) { 10111 ocs_log_err(hw->os, "ocs_hw_async_call failed\n"); 10112 } 10113 break; 10114 case OCS_EVT_RESPONSE: 10115 ocs_sm_transition(ctx, __ocs_hw_port_freed, data); 10116 break; 10117 case OCS_EVT_ERROR: 10118 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); 10119 break; 10120 default: 10121 break; 10122 } 10123 10124 return NULL; 10125 } 10126 10127 static void * 10128 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10129 { 10130 ocs_sli_port_t *sport = ctx->app; 10131 ocs_hw_t *hw = sport->hw; 10132 10133 smtrace("port"); 10134 10135 switch (evt) { 10136 case OCS_EVT_ENTER: 10137 if (data != NULL) { 10138 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10139 } 10140 if (hw->callback.port != NULL) { 10141 hw->callback.port(hw->args.port, 10142 OCS_HW_PORT_ATTACH_OK, sport); 10143 } 10144 if (sport->sm_free_req_pending) { 10145 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10146 } 10147 break; 10148 case OCS_EVT_HW_PORT_REQ_FREE: 10149 /* virtual/physical port request free */ 10150 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10151 break; 10152 default: 10153 __ocs_hw_port_common(__func__, ctx, evt, data); 10154 break; 10155 } 10156 10157 return NULL; 10158 } 10159 10160 static void * 10161 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10162 { 10163 ocs_sli_port_t *sport = ctx->app; 10164 ocs_hw_t *hw = sport->hw; 10165 10166 smtrace("port"); 10167 10168 switch (evt) { 10169 case OCS_EVT_ENTER: 10170 if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) { 10171 ocs_log_err(hw->os, "REG_VPI format failure\n"); 10172 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10173 break; 10174 } 10175 10176 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10177 ocs_log_err(hw->os, "REG_VPI command failure\n"); 10178 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10179 break; 10180 } 10181 break; 10182 case OCS_EVT_RESPONSE: 10183 ocs_sm_transition(ctx, __ocs_hw_port_attached, data); 10184 break; 10185 case OCS_EVT_ERROR: 10186 ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data); 10187 break; 10188 case OCS_EVT_HW_PORT_REQ_FREE: 10189 /* Wait for attach response and then free */ 10190 sport->sm_free_req_pending = 1; 10191 break; 10192 default: 10193 __ocs_hw_port_common(__func__, ctx, evt, data); 10194 break; 10195 } 10196 10197 return NULL; 10198 } 10199 10200 static void * 10201 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10202 { 10203 ocs_sli_port_t *sport = ctx->app; 10204 ocs_hw_t *hw = sport->hw; 10205 10206 smtrace("port"); 10207 10208 switch (evt) { 10209 case OCS_EVT_ENTER: 10210 /* free SLI resource */ 10211 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 10212 10213 /* free mailbox buffer */ 10214 if (data != NULL) { 10215 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10216 } 10217 break; 10218 default: 10219 __ocs_hw_port_common(__func__, ctx, evt, data); 10220 break; 10221 } 10222 10223 return NULL; 10224 } 10225 10226 static void * 10227 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10228 { 10229 ocs_sli_port_t *sport = ctx->app; 10230 ocs_hw_t *hw = sport->hw; 10231 10232 smtrace("port"); 10233 10234 switch (evt) { 10235 case OCS_EVT_ENTER: 10236 if (data != NULL) { 10237 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10238 } 10239 if (hw->callback.port != NULL) { 10240 hw->callback.port(hw->args.port, 10241 OCS_HW_PORT_ALLOC_OK, sport); 10242 } 10243 /* If there is a pending free request, then handle it now */ 10244 if (sport->sm_free_req_pending) { 10245 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10246 } 10247 break; 10248 case OCS_EVT_HW_PORT_REQ_ATTACH: 10249 /* virtual port requests attach */ 10250 ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data); 10251 break; 10252 case OCS_EVT_HW_PORT_ATTACH_OK: 10253 /* physical port attached (as part of attaching domain) */ 10254 ocs_sm_transition(ctx, __ocs_hw_port_attached, data); 10255 break; 10256 case OCS_EVT_HW_PORT_REQ_FREE: 10257 /* virtual port request free */ 10258 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { 10259 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10260 } else { 10261 /* 10262 * Note: BE3/Skyhawk will respond with a status of 0x20 10263 * unless the reg_vpi has been issued, so we can 10264 * skip the unreg_vpi for these adapters. 10265 * 10266 * Send a nop to make sure that free doesn't occur in 10267 * same context 10268 */ 10269 ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL); 10270 } 10271 break; 10272 default: 10273 __ocs_hw_port_common(__func__, ctx, evt, data); 10274 break; 10275 } 10276 10277 return NULL; 10278 } 10279 10280 static void * 10281 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10282 { 10283 ocs_sli_port_t *sport = ctx->app; 10284 ocs_hw_t *hw = sport->hw; 10285 10286 smtrace("port"); 10287 10288 switch (evt) { 10289 case OCS_EVT_ENTER: 10290 /* free SLI resource */ 10291 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 10292 10293 /* free mailbox buffer */ 10294 if (data != NULL) { 10295 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10296 } 10297 10298 if (hw->callback.port != NULL) { 10299 hw->callback.port(hw->args.port, 10300 OCS_HW_PORT_ALLOC_FAIL, sport); 10301 } 10302 10303 /* If there is a pending free request, then handle it now */ 10304 if (sport->sm_free_req_pending) { 10305 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10306 } 10307 break; 10308 default: 10309 __ocs_hw_port_common(__func__, ctx, evt, data); 10310 break; 10311 } 10312 10313 return NULL; 10314 } 10315 10316 static void * 10317 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10318 { 10319 ocs_sli_port_t *sport = ctx->app; 10320 ocs_hw_t *hw = sport->hw; 10321 uint8_t *payload = NULL; 10322 10323 smtrace("port"); 10324 10325 switch (evt) { 10326 case OCS_EVT_ENTER: 10327 /* allocate memory for the service parameters */ 10328 if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) { 10329 ocs_log_err(hw->os, "Failed to allocate DMA memory\n"); 10330 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10331 break; 10332 } 10333 10334 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, 10335 &sport->dma, sport->indicator)) { 10336 ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n"); 10337 ocs_dma_free(hw->os, &sport->dma); 10338 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10339 break; 10340 } 10341 10342 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10343 ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); 10344 ocs_dma_free(hw->os, &sport->dma); 10345 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10346 break; 10347 } 10348 break; 10349 case OCS_EVT_RESPONSE: 10350 payload = sport->dma.virt; 10351 10352 ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload); 10353 10354 ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, 10355 sizeof(sport->sli_wwpn)); 10356 ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, 10357 sizeof(sport->sli_wwnn)); 10358 10359 ocs_dma_free(hw->os, &sport->dma); 10360 ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data); 10361 break; 10362 case OCS_EVT_ERROR: 10363 ocs_dma_free(hw->os, &sport->dma); 10364 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); 10365 break; 10366 case OCS_EVT_HW_PORT_REQ_FREE: 10367 /* Wait for attach response and then free */ 10368 sport->sm_free_req_pending = 1; 10369 break; 10370 case OCS_EVT_EXIT: 10371 break; 10372 default: 10373 __ocs_hw_port_common(__func__, ctx, evt, data); 10374 break; 10375 } 10376 10377 return NULL; 10378 } 10379 10380 static void * 10381 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10382 { 10383 ocs_sli_port_t *sport = ctx->app; 10384 10385 smtrace("port"); 10386 10387 switch (evt) { 10388 case OCS_EVT_ENTER: 10389 /* no-op */ 10390 break; 10391 case OCS_EVT_HW_PORT_ALLOC_OK: 10392 ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL); 10393 break; 10394 case OCS_EVT_HW_PORT_ALLOC_FAIL: 10395 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL); 10396 break; 10397 case OCS_EVT_HW_PORT_REQ_FREE: 10398 /* Wait for attach response and then free */ 10399 sport->sm_free_req_pending = 1; 10400 break; 10401 default: 10402 __ocs_hw_port_common(__func__, ctx, evt, data); 10403 break; 10404 } 10405 10406 return NULL; 10407 } 10408 10409 static void * 10410 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10411 { 10412 ocs_sli_port_t *sport = ctx->app; 10413 ocs_hw_t *hw = sport->hw; 10414 10415 smtrace("port"); 10416 10417 switch (evt) { 10418 case OCS_EVT_ENTER: 10419 /* If there is a pending free request, then handle it now */ 10420 if (sport->sm_free_req_pending) { 10421 ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL); 10422 return NULL; 10423 } 10424 10425 /* TODO XXX transitioning to done only works if this is called 10426 * directly from ocs_hw_port_alloc BUT not if called from 10427 * read_sparm64. In the later case, we actually want to go 10428 * through report_ok/fail 10429 */ 10430 if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE, 10431 sport->indicator, sport->domain->indicator)) { 10432 ocs_log_err(hw->os, "INIT_VPI allocation failure\n"); 10433 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10434 break; 10435 } 10436 10437 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10438 ocs_log_err(hw->os, "INIT_VPI command failure\n"); 10439 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10440 break; 10441 } 10442 break; 10443 case OCS_EVT_RESPONSE: 10444 ocs_sm_transition(ctx, __ocs_hw_port_allocated, data); 10445 break; 10446 case OCS_EVT_ERROR: 10447 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); 10448 break; 10449 case OCS_EVT_HW_PORT_REQ_FREE: 10450 /* Wait for attach response and then free */ 10451 sport->sm_free_req_pending = 1; 10452 break; 10453 case OCS_EVT_EXIT: 10454 break; 10455 default: 10456 __ocs_hw_port_common(__func__, ctx, evt, data); 10457 break; 10458 } 10459 10460 return NULL; 10461 } 10462 10463 static int32_t 10464 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 10465 { 10466 ocs_sli_port_t *sport = arg; 10467 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 10468 ocs_sm_event_t evt; 10469 10470 if (status || hdr->status) { 10471 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", 10472 sport->indicator, status, hdr->status); 10473 evt = OCS_EVT_ERROR; 10474 } else { 10475 evt = OCS_EVT_RESPONSE; 10476 } 10477 10478 ocs_sm_post_event(&sport->ctx, evt, mqe); 10479 10480 return 0; 10481 } 10482 10483 static int32_t 10484 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 10485 { 10486 ocs_sli_port_t *sport = arg; 10487 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 10488 ocs_sm_event_t evt; 10489 uint8_t *mqecpy; 10490 10491 if (status || hdr->status) { 10492 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", 10493 sport->indicator, status, hdr->status); 10494 evt = OCS_EVT_ERROR; 10495 } else { 10496 evt = OCS_EVT_RESPONSE; 10497 } 10498 10499 /* 10500 * In this case we have to malloc a mailbox command buffer, as it is reused 10501 * in the state machine post event call, and eventually freed 10502 */ 10503 mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 10504 if (mqecpy == NULL) { 10505 ocs_log_err(hw->os, "malloc mqecpy failed\n"); 10506 return -1; 10507 } 10508 ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE); 10509 10510 ocs_sm_post_event(&sport->ctx, evt, mqecpy); 10511 10512 return 0; 10513 } 10514 10515 /*************************************************************************** 10516 * Domain state machine 10517 */ 10518 10519 static int32_t 10520 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10521 { 10522 ocs_domain_t *domain = ctx->app; 10523 ocs_hw_t *hw = domain->hw; 10524 10525 smtrace("domain"); 10526 10527 switch (evt) { 10528 case OCS_EVT_EXIT: 10529 /* ignore */ 10530 break; 10531 10532 default: 10533 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); 10534 break; 10535 } 10536 10537 return 0; 10538 } 10539 10540 static void * 10541 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10542 { 10543 ocs_domain_t *domain = ctx->app; 10544 ocs_hw_t *hw = domain->hw; 10545 10546 smtrace("domain"); 10547 10548 switch (evt) { 10549 case OCS_EVT_ENTER: 10550 /* free command buffer */ 10551 if (data != NULL) { 10552 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10553 } 10554 /* free SLI resources */ 10555 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); 10556 /* TODO how to free FCFI (or do we at all)? */ 10557 10558 if (hw->callback.domain != NULL) { 10559 hw->callback.domain(hw->args.domain, 10560 OCS_HW_DOMAIN_ALLOC_FAIL, 10561 domain); 10562 } 10563 break; 10564 default: 10565 __ocs_hw_domain_common(__func__, ctx, evt, data); 10566 break; 10567 } 10568 10569 return NULL; 10570 } 10571 10572 static void * 10573 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10574 { 10575 ocs_domain_t *domain = ctx->app; 10576 ocs_hw_t *hw = domain->hw; 10577 10578 smtrace("domain"); 10579 10580 switch (evt) { 10581 case OCS_EVT_ENTER: 10582 /* free mailbox buffer and send alloc ok to physical sport */ 10583 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10584 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL); 10585 10586 /* now inform registered callbacks */ 10587 if (hw->callback.domain != NULL) { 10588 hw->callback.domain(hw->args.domain, 10589 OCS_HW_DOMAIN_ATTACH_OK, 10590 domain); 10591 } 10592 break; 10593 case OCS_EVT_HW_DOMAIN_REQ_FREE: 10594 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); 10595 break; 10596 default: 10597 __ocs_hw_domain_common(__func__, ctx, evt, data); 10598 break; 10599 } 10600 10601 return NULL; 10602 } 10603 10604 static void * 10605 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10606 { 10607 ocs_domain_t *domain = ctx->app; 10608 ocs_hw_t *hw = domain->hw; 10609 10610 smtrace("domain"); 10611 10612 switch (evt) { 10613 case OCS_EVT_ENTER: 10614 if (data != NULL) { 10615 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10616 } 10617 /* free SLI resources */ 10618 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); 10619 /* TODO how to free FCFI (or do we at all)? */ 10620 10621 if (hw->callback.domain != NULL) { 10622 hw->callback.domain(hw->args.domain, 10623 OCS_HW_DOMAIN_ATTACH_FAIL, 10624 domain); 10625 } 10626 break; 10627 case OCS_EVT_EXIT: 10628 break; 10629 default: 10630 __ocs_hw_domain_common(__func__, ctx, evt, data); 10631 break; 10632 } 10633 10634 return NULL; 10635 } 10636 10637 static void * 10638 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10639 { 10640 ocs_domain_t *domain = ctx->app; 10641 ocs_hw_t *hw = domain->hw; 10642 10643 smtrace("domain"); 10644 10645 switch (evt) { 10646 case OCS_EVT_ENTER: 10647 10648 ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt); 10649 10650 if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) { 10651 ocs_log_err(hw->os, "REG_VFI format failure\n"); 10652 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10653 break; 10654 } 10655 10656 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10657 ocs_log_err(hw->os, "REG_VFI command failure\n"); 10658 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10659 break; 10660 } 10661 break; 10662 case OCS_EVT_RESPONSE: 10663 ocs_sm_transition(ctx, __ocs_hw_domain_attached, data); 10664 break; 10665 case OCS_EVT_ERROR: 10666 ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data); 10667 break; 10668 default: 10669 __ocs_hw_domain_common(__func__, ctx, evt, data); 10670 break; 10671 } 10672 10673 return NULL; 10674 } 10675 10676 static void * 10677 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10678 { 10679 ocs_domain_t *domain = ctx->app; 10680 ocs_hw_t *hw = domain->hw; 10681 10682 smtrace("domain"); 10683 10684 switch (evt) { 10685 case OCS_EVT_ENTER: 10686 /* free mailbox buffer and send alloc ok to physical sport */ 10687 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10688 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL); 10689 10690 ocs_hw_domain_add(hw, domain); 10691 10692 /* now inform registered callbacks */ 10693 if (hw->callback.domain != NULL) { 10694 hw->callback.domain(hw->args.domain, 10695 OCS_HW_DOMAIN_ALLOC_OK, 10696 domain); 10697 } 10698 break; 10699 case OCS_EVT_HW_DOMAIN_REQ_ATTACH: 10700 ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data); 10701 break; 10702 case OCS_EVT_HW_DOMAIN_REQ_FREE: 10703 /* unreg_fcfi/vfi */ 10704 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { 10705 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL); 10706 } else { 10707 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); 10708 } 10709 break; 10710 default: 10711 __ocs_hw_domain_common(__func__, ctx, evt, data); 10712 break; 10713 } 10714 10715 return NULL; 10716 } 10717 10718 static void * 10719 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10720 { 10721 ocs_domain_t *domain = ctx->app; 10722 ocs_hw_t *hw = domain->hw; 10723 10724 smtrace("domain"); 10725 10726 switch (evt) { 10727 case OCS_EVT_ENTER: 10728 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, 10729 &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) { 10730 ocs_log_err(hw->os, "READ_SPARM64 format failure\n"); 10731 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10732 break; 10733 } 10734 10735 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10736 ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); 10737 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10738 break; 10739 } 10740 break; 10741 case OCS_EVT_EXIT: 10742 break; 10743 case OCS_EVT_RESPONSE: 10744 ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt); 10745 10746 ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data); 10747 break; 10748 case OCS_EVT_ERROR: 10749 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); 10750 break; 10751 default: 10752 __ocs_hw_domain_common(__func__, ctx, evt, data); 10753 break; 10754 } 10755 10756 return NULL; 10757 } 10758 10759 static void * 10760 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10761 { 10762 ocs_domain_t *domain = ctx->app; 10763 ocs_sli_port_t *sport = domain->sport; 10764 ocs_hw_t *hw = domain->hw; 10765 10766 smtrace("domain"); 10767 10768 switch (evt) { 10769 case OCS_EVT_ENTER: 10770 if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator, 10771 domain->fcf_indicator, sport->indicator)) { 10772 ocs_log_err(hw->os, "INIT_VFI format failure\n"); 10773 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10774 break; 10775 } 10776 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10777 ocs_log_err(hw->os, "INIT_VFI command failure\n"); 10778 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10779 break; 10780 } 10781 break; 10782 case OCS_EVT_EXIT: 10783 break; 10784 case OCS_EVT_RESPONSE: 10785 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); 10786 break; 10787 case OCS_EVT_ERROR: 10788 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); 10789 break; 10790 default: 10791 __ocs_hw_domain_common(__func__, ctx, evt, data); 10792 break; 10793 } 10794 10795 return NULL; 10796 } 10797 10798 static void * 10799 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10800 { 10801 ocs_domain_t *domain = ctx->app; 10802 ocs_hw_t *hw = domain->hw; 10803 10804 smtrace("domain"); 10805 10806 switch (evt) { 10807 case OCS_EVT_ENTER: { 10808 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; 10809 uint32_t i; 10810 10811 /* Set the filter match/mask values from hw's filter_def values */ 10812 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 10813 rq_cfg[i].rq_id = 0xffff; 10814 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; 10815 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); 10816 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); 10817 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); 10818 } 10819 10820 /* Set the rq_id for each, in order of RQ definition */ 10821 for (i = 0; i < hw->hw_rq_count; i++) { 10822 if (i >= ARRAY_SIZE(rq_cfg)) { 10823 ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n"); 10824 break; 10825 } 10826 rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id; 10827 } 10828 10829 if (!data) { 10830 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10831 break; 10832 } 10833 10834 if (hw->hw_mrq_count) { 10835 if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 10836 domain->vlan_id, domain->fcf)) { 10837 ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n"); 10838 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10839 break; 10840 } 10841 10842 } else { 10843 if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf, 10844 rq_cfg, domain->vlan_id)) { 10845 ocs_log_err(hw->os, "REG_FCFI format failure\n"); 10846 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10847 break; 10848 } 10849 } 10850 10851 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10852 ocs_log_err(hw->os, "REG_FCFI command failure\n"); 10853 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10854 break; 10855 } 10856 break; 10857 } 10858 case OCS_EVT_EXIT: 10859 break; 10860 case OCS_EVT_RESPONSE: 10861 if (!data) { 10862 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10863 break; 10864 } 10865 10866 domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi; 10867 10868 /* 10869 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization 10870 * and instead rely on implicit initialization during VFI registration. 10871 * Short circuit normal processing here for those devices. 10872 */ 10873 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { 10874 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); 10875 } else { 10876 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data); 10877 } 10878 break; 10879 case OCS_EVT_ERROR: 10880 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); 10881 break; 10882 default: 10883 __ocs_hw_domain_common(__func__, ctx, evt, data); 10884 break; 10885 } 10886 10887 return NULL; 10888 } 10889 10890 static void * 10891 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10892 { 10893 ocs_domain_t *domain = ctx->app; 10894 ocs_hw_t *hw = domain->hw; 10895 10896 smtrace("domain"); 10897 10898 switch (evt) { 10899 case OCS_EVT_ENTER: 10900 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { 10901 /* 10902 * For FC, the HW alread registered a FCFI 10903 * Copy FCF information into the domain and jump to INIT_VFI 10904 */ 10905 domain->fcf_indicator = hw->fcf_indicator; 10906 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data); 10907 } else { 10908 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data); 10909 } 10910 break; 10911 default: 10912 __ocs_hw_domain_common(__func__, ctx, evt, data); 10913 break; 10914 } 10915 10916 return NULL; 10917 } 10918 10919 static void * 10920 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10921 { 10922 ocs_domain_t *domain = ctx->app; 10923 10924 smtrace("domain"); 10925 10926 switch (evt) { 10927 case OCS_EVT_ENTER: 10928 if (domain != NULL) { 10929 ocs_hw_t *hw = domain->hw; 10930 10931 ocs_hw_domain_del(hw, domain); 10932 10933 if (hw->callback.domain != NULL) { 10934 hw->callback.domain(hw->args.domain, 10935 OCS_HW_DOMAIN_FREE_FAIL, 10936 domain); 10937 } 10938 } 10939 10940 /* free command buffer */ 10941 if (data != NULL) { 10942 ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE); 10943 } 10944 break; 10945 case OCS_EVT_EXIT: 10946 break; 10947 default: 10948 __ocs_hw_domain_common(__func__, ctx, evt, data); 10949 break; 10950 } 10951 10952 return NULL; 10953 } 10954 10955 static void * 10956 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10957 { 10958 ocs_domain_t *domain = ctx->app; 10959 10960 smtrace("domain"); 10961 10962 switch (evt) { 10963 case OCS_EVT_ENTER: 10964 /* Free DMA and mailbox buffer */ 10965 if (domain != NULL) { 10966 ocs_hw_t *hw = domain->hw; 10967 10968 /* free VFI resource */ 10969 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, 10970 domain->indicator); 10971 10972 ocs_hw_domain_del(hw, domain); 10973 10974 /* inform registered callbacks */ 10975 if (hw->callback.domain != NULL) { 10976 hw->callback.domain(hw->args.domain, 10977 OCS_HW_DOMAIN_FREE_OK, 10978 domain); 10979 } 10980 } 10981 if (data != NULL) { 10982 ocs_free(NULL, data, SLI4_BMBX_SIZE); 10983 } 10984 break; 10985 case OCS_EVT_EXIT: 10986 break; 10987 default: 10988 __ocs_hw_domain_common(__func__, ctx, evt, data); 10989 break; 10990 } 10991 10992 return NULL; 10993 } 10994 10995 static void * 10996 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10997 { 10998 ocs_domain_t *domain = ctx->app; 10999 ocs_hw_t *hw = domain->hw; 11000 11001 smtrace("domain"); 11002 11003 switch (evt) { 11004 case OCS_EVT_ENTER: 11005 /* if we're in the middle of a teardown, skip sending rediscover */ 11006 if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) { 11007 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11008 break; 11009 } 11010 if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) { 11011 ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n"); 11012 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11013 break; 11014 } 11015 11016 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 11017 ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n"); 11018 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11019 } 11020 break; 11021 case OCS_EVT_RESPONSE: 11022 case OCS_EVT_ERROR: 11023 /* REDISCOVER_FCF can fail if none exist */ 11024 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11025 break; 11026 case OCS_EVT_EXIT: 11027 break; 11028 default: 11029 __ocs_hw_domain_common(__func__, ctx, evt, data); 11030 break; 11031 } 11032 11033 return NULL; 11034 } 11035 11036 static void * 11037 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 11038 { 11039 ocs_domain_t *domain = ctx->app; 11040 ocs_hw_t *hw = domain->hw; 11041 11042 smtrace("domain"); 11043 11044 switch (evt) { 11045 case OCS_EVT_ENTER: 11046 if (data == NULL) { 11047 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 11048 if (!data) { 11049 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11050 break; 11051 } 11052 } 11053 11054 if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) { 11055 ocs_log_err(hw->os, "UNREG_FCFI format failure\n"); 11056 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11057 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11058 break; 11059 } 11060 11061 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 11062 ocs_log_err(hw->os, "UNREG_FCFI command failure\n"); 11063 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11064 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11065 break; 11066 } 11067 break; 11068 case OCS_EVT_RESPONSE: 11069 if (domain->req_rediscover_fcf) { 11070 domain->req_rediscover_fcf = FALSE; 11071 ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data); 11072 } else { 11073 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11074 } 11075 break; 11076 case OCS_EVT_ERROR: 11077 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); 11078 break; 11079 case OCS_EVT_EXIT: 11080 break; 11081 default: 11082 __ocs_hw_domain_common(__func__, ctx, evt, data); 11083 break; 11084 } 11085 11086 return NULL; 11087 } 11088 11089 static void * 11090 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 11091 { 11092 ocs_domain_t *domain = ctx->app; 11093 ocs_hw_t *hw = domain->hw; 11094 uint8_t is_fc = FALSE; 11095 11096 smtrace("domain"); 11097 11098 is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC); 11099 11100 switch (evt) { 11101 case OCS_EVT_ENTER: 11102 if (data == NULL) { 11103 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 11104 if (!data) { 11105 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11106 break; 11107 } 11108 } 11109 11110 if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain, 11111 SLI4_UNREG_TYPE_DOMAIN)) { 11112 ocs_log_err(hw->os, "UNREG_VFI format failure\n"); 11113 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11114 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11115 break; 11116 } 11117 11118 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 11119 ocs_log_err(hw->os, "UNREG_VFI command failure\n"); 11120 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11121 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11122 break; 11123 } 11124 break; 11125 case OCS_EVT_ERROR: 11126 if (is_fc) { 11127 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); 11128 } else { 11129 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); 11130 } 11131 break; 11132 case OCS_EVT_RESPONSE: 11133 if (is_fc) { 11134 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11135 } else { 11136 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); 11137 } 11138 break; 11139 default: 11140 __ocs_hw_domain_common(__func__, ctx, evt, data); 11141 break; 11142 } 11143 11144 return NULL; 11145 } 11146 11147 /* callback for domain alloc/attach/free */ 11148 static int32_t 11149 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11150 { 11151 ocs_domain_t *domain = arg; 11152 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 11153 ocs_sm_event_t evt; 11154 11155 if (status || hdr->status) { 11156 ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n", 11157 domain->indicator, status, hdr->status); 11158 evt = OCS_EVT_ERROR; 11159 } else { 11160 evt = OCS_EVT_RESPONSE; 11161 } 11162 11163 ocs_sm_post_event(&domain->sm, evt, mqe); 11164 11165 return 0; 11166 } 11167 11168 static int32_t 11169 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11170 { 11171 ocs_hw_io_t *io = NULL; 11172 ocs_hw_io_t *io_next = NULL; 11173 uint64_t ticks_current = ocs_get_os_ticks(); 11174 uint32_t sec_elapsed; 11175 ocs_hw_rtn_e rc; 11176 11177 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 11178 11179 if (status || hdr->status) { 11180 ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n", 11181 status, hdr->status); 11182 /* go ahead and proceed with wqe timer checks... */ 11183 } 11184 11185 /* loop through active WQE list and check for timeouts */ 11186 ocs_lock(&hw->io_lock); 11187 ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) { 11188 sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq()); 11189 11190 /* 11191 * If elapsed time > timeout, abort it. No need to check type since 11192 * it wouldn't be on this list unless it was a target WQE 11193 */ 11194 if (sec_elapsed > io->tgt_wqe_timeout) { 11195 ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n", 11196 io->indicator, io->reqtag, io->type); 11197 11198 /* remove from active_wqe list so won't try to abort again */ 11199 ocs_list_remove(&hw->io_timed_wqe, io); 11200 11201 /* save status of "timed out" for when abort completes */ 11202 io->status_saved = 1; 11203 io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT; 11204 io->saved_ext = 0; 11205 io->saved_len = 0; 11206 11207 /* now abort outstanding IO */ 11208 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); 11209 if (rc) { 11210 ocs_log_test(hw->os, 11211 "abort failed xri=%#x tag=%#x rc=%d\n", 11212 io->indicator, io->reqtag, rc); 11213 } 11214 } 11215 /* 11216 * need to go through entire list since each IO could have a 11217 * different timeout value 11218 */ 11219 } 11220 ocs_unlock(&hw->io_lock); 11221 11222 /* if we're not in the middle of shutting down, schedule next timer */ 11223 if (!hw->active_wqe_timer_shutdown) { 11224 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS); 11225 } 11226 hw->in_active_wqe_timer = FALSE; 11227 return 0; 11228 } 11229 11230 static void 11231 target_wqe_timer_cb(void *arg) 11232 { 11233 ocs_hw_t *hw = (ocs_hw_t *)arg; 11234 11235 /* delete existing timer; will kick off new timer after checking wqe timeouts */ 11236 hw->in_active_wqe_timer = TRUE; 11237 ocs_del_timer(&hw->wqe_timer); 11238 11239 /* Forward timer callback to execute in the mailbox completion processing context */ 11240 if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) { 11241 ocs_log_test(hw->os, "ocs_hw_async_call failed\n"); 11242 } 11243 } 11244 11245 static void 11246 shutdown_target_wqe_timer(ocs_hw_t *hw) 11247 { 11248 uint32_t iters = 100; 11249 11250 if (hw->config.emulate_tgt_wqe_timeout) { 11251 /* request active wqe timer shutdown, then wait for it to complete */ 11252 hw->active_wqe_timer_shutdown = TRUE; 11253 11254 /* delete WQE timer and wait for timer handler to complete (if necessary) */ 11255 ocs_del_timer(&hw->wqe_timer); 11256 11257 /* now wait for timer handler to complete (if necessary) */ 11258 while (hw->in_active_wqe_timer && iters) { 11259 /* 11260 * if we happen to have just sent NOP mailbox command, make sure 11261 * completions are being processed 11262 */ 11263 ocs_hw_flush(hw); 11264 iters--; 11265 } 11266 11267 if (iters == 0) { 11268 ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n"); 11269 } 11270 } 11271 } 11272 11273 /** 11274 * @brief Determine if HW IO is owned by the port. 11275 * 11276 * @par Description 11277 * Determines if the given HW IO has been posted to the chip. 11278 * 11279 * @param hw Hardware context allocated by the caller. 11280 * @param io HW IO. 11281 * 11282 * @return Returns TRUE if given HW IO is port-owned. 11283 */ 11284 uint8_t 11285 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) 11286 { 11287 /* Check to see if this is a port owned XRI */ 11288 return io->is_port_owned; 11289 } 11290 11291 /** 11292 * @brief Return TRUE if exchange is port-owned. 11293 * 11294 * @par Description 11295 * Test to see if the xri is a port-owned xri. 11296 * 11297 * @param hw Hardware context. 11298 * @param xri Exchange indicator. 11299 * 11300 * @return Returns TRUE if XRI is a port owned XRI. 11301 */ 11302 11303 uint8_t 11304 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri) 11305 { 11306 ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri); 11307 return (io == NULL ? FALSE : io->is_port_owned); 11308 } 11309 11310 /** 11311 * @brief Returns an XRI from the port owned list to the host. 11312 * 11313 * @par Description 11314 * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes. 11315 * 11316 * @param hw Hardware context. 11317 * @param xri_base The starting XRI number. 11318 * @param xri_count The number of XRIs to free from the base. 11319 */ 11320 static void 11321 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count) 11322 { 11323 ocs_hw_io_t *io; 11324 uint32_t i; 11325 11326 for (i = 0; i < xri_count; i++) { 11327 io = ocs_hw_io_lookup(hw, xri_base + i); 11328 11329 /* 11330 * if this is an auto xfer rdy XRI, then we need to release any 11331 * buffer attached to the XRI before moving the XRI back to the free pool. 11332 */ 11333 if (hw->auto_xfer_rdy_enabled) { 11334 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io); 11335 } 11336 11337 ocs_lock(&hw->io_lock); 11338 ocs_list_remove(&hw->io_port_owned, io); 11339 io->is_port_owned = 0; 11340 ocs_list_add_tail(&hw->io_free, io); 11341 ocs_unlock(&hw->io_lock); 11342 } 11343 } 11344 11345 /** 11346 * @brief Called when the POST_XRI command completes. 11347 * 11348 * @par Description 11349 * Free the mailbox command buffer and reclaim the XRIs on failure. 11350 * 11351 * @param hw Hardware context. 11352 * @param status Status field from the mbox completion. 11353 * @param mqe Mailbox response structure. 11354 * @param arg Pointer to a callback function that signals the caller that the command is done. 11355 * 11356 * @return Returns 0. 11357 */ 11358 static int32_t 11359 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11360 { 11361 sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe; 11362 11363 /* Reclaim the XRIs as host owned if the command fails */ 11364 if (status != 0) { 11365 ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n", 11366 status, post_xri->xri_base, post_xri->xri_count); 11367 ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count); 11368 } 11369 11370 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 11371 return 0; 11372 } 11373 11374 /** 11375 * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port. 11376 * 11377 * @param hw Hardware context. 11378 * @param xri_start The starting XRI to post. 11379 * @param num_to_post The number of XRIs to post. 11380 * 11381 * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS. 11382 */ 11383 11384 static ocs_hw_rtn_e 11385 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post) 11386 { 11387 uint8_t *post_xri; 11388 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 11389 11390 /* Since we need to allocate for mailbox queue, just always allocate */ 11391 post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 11392 if (post_xri == NULL) { 11393 ocs_log_err(hw->os, "no buffer for command\n"); 11394 return OCS_HW_RTN_NO_MEMORY; 11395 } 11396 11397 /* Register the XRIs */ 11398 if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE, 11399 xri_start, num_to_post)) { 11400 rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL); 11401 if (rc != OCS_HW_RTN_SUCCESS) { 11402 ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE); 11403 ocs_log_err(hw->os, "post_xri failed\n"); 11404 } 11405 } 11406 return rc; 11407 } 11408 11409 /** 11410 * @brief Move XRIs from the host-controlled pool to the port. 11411 * 11412 * @par Description 11413 * Removes IOs from the free list and moves them to the port. 11414 * 11415 * @param hw Hardware context. 11416 * @param num_xri The number of XRIs being requested to move to the chip. 11417 * 11418 * @return Returns the number of XRIs that were moved. 11419 */ 11420 11421 uint32_t 11422 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri) 11423 { 11424 ocs_hw_io_t *io; 11425 uint32_t i; 11426 uint32_t num_posted = 0; 11427 11428 /* 11429 * Note: We cannot use ocs_hw_io_alloc() because that would place the 11430 * IO on the io_inuse list. We need to move from the io_free to 11431 * the io_port_owned list. 11432 */ 11433 ocs_lock(&hw->io_lock); 11434 11435 for (i = 0; i < num_xri; i++) { 11436 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { 11437 ocs_hw_rtn_e rc; 11438 11439 /* 11440 * if this is an auto xfer rdy XRI, then we need to attach a 11441 * buffer to the XRI before submitting it to the chip. If a 11442 * buffer is unavailable, then we cannot post it, so return it 11443 * to the free pool. 11444 */ 11445 if (hw->auto_xfer_rdy_enabled) { 11446 /* Note: uses the IO lock to get the auto xfer rdy buffer */ 11447 ocs_unlock(&hw->io_lock); 11448 rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io); 11449 ocs_lock(&hw->io_lock); 11450 if (rc != OCS_HW_RTN_SUCCESS) { 11451 ocs_list_add_head(&hw->io_free, io); 11452 break; 11453 } 11454 } 11455 ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator); 11456 io->is_port_owned = 1; 11457 ocs_list_add_tail(&hw->io_port_owned, io); 11458 11459 /* Post XRI */ 11460 if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) { 11461 ocs_hw_reclaim_xri(hw, io->indicator, i); 11462 break; 11463 } 11464 num_posted++; 11465 } else { 11466 /* no more free XRIs */ 11467 break; 11468 } 11469 } 11470 ocs_unlock(&hw->io_lock); 11471 11472 return num_posted; 11473 } 11474 11475 /** 11476 * @brief Called when the RELEASE_XRI command completes. 11477 * 11478 * @par Description 11479 * Move the IOs back to the free pool on success. 11480 * 11481 * @param hw Hardware context. 11482 * @param status Status field from the mbox completion. 11483 * @param mqe Mailbox response structure. 11484 * @param arg Pointer to a callback function that signals the caller that the command is done. 11485 * 11486 * @return Returns 0. 11487 */ 11488 static int32_t 11489 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11490 { 11491 sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe; 11492 uint8_t i; 11493 11494 /* Reclaim the XRIs as host owned if the command fails */ 11495 if (status != 0) { 11496 ocs_log_err(hw->os, "Status 0x%x\n", status); 11497 } else { 11498 for (i = 0; i < release_xri->released_xri_count; i++) { 11499 uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 : 11500 release_xri->xri_tbl[i/2].xri_tag1); 11501 ocs_hw_reclaim_xri(hw, xri, 1); 11502 } 11503 } 11504 11505 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 11506 return 0; 11507 } 11508 11509 /** 11510 * @brief Move XRIs from the port-controlled pool to the host. 11511 * 11512 * Requests XRIs from the FW to return to the host-owned pool. 11513 * 11514 * @param hw Hardware context. 11515 * @param num_xri The number of XRIs being requested to moved from the chip. 11516 * 11517 * @return Returns 0 for success, or a negative error code value for failure. 11518 */ 11519 11520 ocs_hw_rtn_e 11521 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri) 11522 { 11523 uint8_t *release_xri; 11524 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 11525 11526 /* non-local buffer required for mailbox queue */ 11527 release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 11528 if (release_xri == NULL) { 11529 ocs_log_err(hw->os, "no buffer for command\n"); 11530 return OCS_HW_RTN_NO_MEMORY; 11531 } 11532 11533 /* release the XRIs */ 11534 if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) { 11535 rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL); 11536 if (rc != OCS_HW_RTN_SUCCESS) { 11537 ocs_log_err(hw->os, "release_xri failed\n"); 11538 } 11539 } 11540 /* If we are polling or an error occurred, then free the mailbox buffer */ 11541 if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) { 11542 ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE); 11543 } 11544 return rc; 11545 } 11546 11547 /** 11548 * @brief Allocate an ocs_hw_rx_buffer_t array. 11549 * 11550 * @par Description 11551 * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory. 11552 * 11553 * @param hw Pointer to HW object. 11554 * @param rqindex RQ index for this buffer. 11555 * @param count Count of buffers in array. 11556 * @param size Size of buffer. 11557 * 11558 * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array. 11559 */ 11560 static ocs_hw_rq_buffer_t * 11561 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size) 11562 { 11563 ocs_t *ocs = hw->os; 11564 ocs_hw_rq_buffer_t *rq_buf = NULL; 11565 ocs_hw_rq_buffer_t *prq; 11566 uint32_t i; 11567 11568 if (count != 0) { 11569 rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO); 11570 if (rq_buf == NULL) { 11571 ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n"); 11572 return NULL; 11573 } 11574 11575 for (i = 0, prq = rq_buf; i < count; i ++, prq++) { 11576 prq->rqindex = rqindex; 11577 if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) { 11578 ocs_log_err(hw->os, "DMA allocation failed\n"); 11579 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); 11580 rq_buf = NULL; 11581 break; 11582 } 11583 } 11584 } 11585 return rq_buf; 11586 } 11587 11588 /** 11589 * @brief Free an ocs_hw_rx_buffer_t array. 11590 * 11591 * @par Description 11592 * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory. 11593 * 11594 * @param hw Pointer to HW object. 11595 * @param rq_buf Pointer to ocs_hw_rx_buffer_t array. 11596 * @param count Count of buffers in array. 11597 * 11598 * @return None. 11599 */ 11600 static void 11601 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count) 11602 { 11603 ocs_t *ocs = hw->os; 11604 uint32_t i; 11605 ocs_hw_rq_buffer_t *prq; 11606 11607 if (rq_buf != NULL) { 11608 for (i = 0, prq = rq_buf; i < count; i++, prq++) { 11609 ocs_dma_free(ocs, &prq->dma); 11610 } 11611 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); 11612 } 11613 } 11614 11615 /** 11616 * @brief Allocate the RQ data buffers. 11617 * 11618 * @param hw Pointer to HW object. 11619 * 11620 * @return Returns 0 on success, or a non-zero value on failure. 11621 */ 11622 ocs_hw_rtn_e 11623 ocs_hw_rx_allocate(ocs_hw_t *hw) 11624 { 11625 ocs_t *ocs = hw->os; 11626 uint32_t i; 11627 int32_t rc = OCS_HW_RTN_SUCCESS; 11628 uint32_t rqindex = 0; 11629 hw_rq_t *rq; 11630 uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR; 11631 uint32_t payload_size = hw->config.rq_default_buffer_size; 11632 11633 rqindex = 0; 11634 11635 for (i = 0; i < hw->hw_rq_count; i++) { 11636 rq = hw->hw_rq[i]; 11637 11638 /* Allocate header buffers */ 11639 rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size); 11640 if (rq->hdr_buf == NULL) { 11641 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n"); 11642 rc = OCS_HW_RTN_ERROR; 11643 break; 11644 } 11645 11646 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id, 11647 rq->entry_count, hdr_size); 11648 11649 rqindex++; 11650 11651 /* Allocate payload buffers */ 11652 rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size); 11653 if (rq->payload_buf == NULL) { 11654 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n"); 11655 rc = OCS_HW_RTN_ERROR; 11656 break; 11657 } 11658 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id, 11659 rq->entry_count, payload_size); 11660 rqindex++; 11661 } 11662 11663 return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS; 11664 } 11665 11666 /** 11667 * @brief Post the RQ data buffers to the chip. 11668 * 11669 * @param hw Pointer to HW object. 11670 * 11671 * @return Returns 0 on success, or a non-zero value on failure. 11672 */ 11673 ocs_hw_rtn_e 11674 ocs_hw_rx_post(ocs_hw_t *hw) 11675 { 11676 uint32_t i; 11677 uint32_t idx; 11678 uint32_t rq_idx; 11679 int32_t rc = 0; 11680 11681 /* 11682 * In RQ pair mode, we MUST post the header and payload buffer at the 11683 * same time. 11684 */ 11685 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { 11686 hw_rq_t *rq = hw->hw_rq[rq_idx]; 11687 11688 for (i = 0; i < rq->entry_count-1; i++) { 11689 ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++); 11690 ocs_hw_assert(seq != NULL); 11691 11692 seq->header = &rq->hdr_buf[i]; 11693 11694 seq->payload = &rq->payload_buf[i]; 11695 11696 rc = ocs_hw_sequence_free(hw, seq); 11697 if (rc) { 11698 break; 11699 } 11700 } 11701 if (rc) { 11702 break; 11703 } 11704 } 11705 11706 return rc; 11707 } 11708 11709 /** 11710 * @brief Free the RQ data buffers. 11711 * 11712 * @param hw Pointer to HW object. 11713 * 11714 */ 11715 void 11716 ocs_hw_rx_free(ocs_hw_t *hw) 11717 { 11718 hw_rq_t *rq; 11719 uint32_t i; 11720 11721 /* Free hw_rq buffers */ 11722 for (i = 0; i < hw->hw_rq_count; i++) { 11723 rq = hw->hw_rq[i]; 11724 if (rq != NULL) { 11725 ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count); 11726 rq->hdr_buf = NULL; 11727 ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count); 11728 rq->payload_buf = NULL; 11729 } 11730 } 11731 } 11732 11733 /** 11734 * @brief HW async call context structure. 11735 */ 11736 typedef struct { 11737 ocs_hw_async_cb_t callback; 11738 void *arg; 11739 uint8_t cmd[SLI4_BMBX_SIZE]; 11740 } ocs_hw_async_call_ctx_t; 11741 11742 /** 11743 * @brief HW async callback handler 11744 * 11745 * @par Description 11746 * This function is called when the NOP mailbox command completes. The callback stored 11747 * in the requesting context is invoked. 11748 * 11749 * @param hw Pointer to HW object. 11750 * @param status Completion status. 11751 * @param mqe Pointer to mailbox completion queue entry. 11752 * @param arg Caller-provided argument. 11753 * 11754 * @return None. 11755 */ 11756 static void 11757 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11758 { 11759 ocs_hw_async_call_ctx_t *ctx = arg; 11760 11761 if (ctx != NULL) { 11762 if (ctx->callback != NULL) { 11763 (*ctx->callback)(hw, status, mqe, ctx->arg); 11764 } 11765 ocs_free(hw->os, ctx, sizeof(*ctx)); 11766 } 11767 } 11768 11769 /** 11770 * @brief Make an async callback using NOP mailbox command 11771 * 11772 * @par Description 11773 * Post a NOP mailbox command; the callback with argument is invoked upon completion 11774 * while in the event processing context. 11775 * 11776 * @param hw Pointer to HW object. 11777 * @param callback Pointer to callback function. 11778 * @param arg Caller-provided callback. 11779 * 11780 * @return Returns 0 on success, or a negative error code value on failure. 11781 */ 11782 int32_t 11783 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg) 11784 { 11785 ocs_hw_async_call_ctx_t *ctx; 11786 11787 /* 11788 * Allocate a callback context (which includes the mailbox command buffer), we need 11789 * this to be persistent as the mailbox command submission may be queued and executed later 11790 * execution. 11791 */ 11792 ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT); 11793 if (ctx == NULL) { 11794 ocs_log_err(hw->os, "failed to malloc async call context\n"); 11795 return OCS_HW_RTN_NO_MEMORY; 11796 } 11797 ctx->callback = callback; 11798 ctx->arg = arg; 11799 11800 /* Build and send a NOP mailbox command */ 11801 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) { 11802 ocs_log_err(hw->os, "COMMON_NOP format failure\n"); 11803 ocs_free(hw->os, ctx, sizeof(*ctx)); 11804 return OCS_HW_RTN_ERROR; 11805 } 11806 11807 if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) { 11808 ocs_log_err(hw->os, "COMMON_NOP command failure\n"); 11809 ocs_free(hw->os, ctx, sizeof(*ctx)); 11810 return OCS_HW_RTN_ERROR; 11811 } 11812 return OCS_HW_RTN_SUCCESS; 11813 } 11814 11815 /** 11816 * @brief Initialize the reqtag pool. 11817 * 11818 * @par Description 11819 * The WQ request tag pool is initialized. 11820 * 11821 * @param hw Pointer to HW object. 11822 * 11823 * @return Returns 0 on success, or a negative error code value on failure. 11824 */ 11825 ocs_hw_rtn_e 11826 ocs_hw_reqtag_init(ocs_hw_t *hw) 11827 { 11828 if (hw->wq_reqtag_pool == NULL) { 11829 hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE); 11830 if (hw->wq_reqtag_pool == NULL) { 11831 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n"); 11832 return OCS_HW_RTN_NO_MEMORY; 11833 } 11834 } 11835 ocs_hw_reqtag_reset(hw); 11836 return OCS_HW_RTN_SUCCESS; 11837 } 11838 11839 /** 11840 * @brief Allocate a WQ request tag. 11841 * 11842 * Allocate and populate a WQ request tag from the WQ request tag pool. 11843 * 11844 * @param hw Pointer to HW object. 11845 * @param callback Callback function. 11846 * @param arg Pointer to callback argument. 11847 * 11848 * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated. 11849 */ 11850 hw_wq_callback_t * 11851 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) 11852 { 11853 hw_wq_callback_t *wqcb; 11854 11855 ocs_hw_assert(callback != NULL); 11856 11857 wqcb = ocs_pool_get(hw->wq_reqtag_pool); 11858 if (wqcb != NULL) { 11859 ocs_hw_assert(wqcb->callback == NULL); 11860 wqcb->callback = callback; 11861 wqcb->arg = arg; 11862 } 11863 return wqcb; 11864 } 11865 11866 /** 11867 * @brief Free a WQ request tag. 11868 * 11869 * Free the passed in WQ request tag. 11870 * 11871 * @param hw Pointer to HW object. 11872 * @param wqcb Pointer to WQ request tag object to free. 11873 * 11874 * @return None. 11875 */ 11876 void 11877 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb) 11878 { 11879 ocs_hw_assert(wqcb->callback != NULL); 11880 wqcb->callback = NULL; 11881 wqcb->arg = NULL; 11882 ocs_pool_put(hw->wq_reqtag_pool, wqcb); 11883 } 11884 11885 /** 11886 * @brief Return WQ request tag by index. 11887 * 11888 * @par Description 11889 * Return pointer to WQ request tag object given an index. 11890 * 11891 * @param hw Pointer to HW object. 11892 * @param instance_index Index of WQ request tag to return. 11893 * 11894 * @return Pointer to WQ request tag, or NULL. 11895 */ 11896 hw_wq_callback_t * 11897 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index) 11898 { 11899 hw_wq_callback_t *wqcb; 11900 11901 wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index); 11902 if (wqcb == NULL) { 11903 ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index); 11904 } 11905 return wqcb; 11906 } 11907 11908 /** 11909 * @brief Reset the WQ request tag pool. 11910 * 11911 * @par Description 11912 * Reset the WQ request tag pool, returning all to the free list. 11913 * 11914 * @param hw pointer to HW object. 11915 * 11916 * @return None. 11917 */ 11918 void 11919 ocs_hw_reqtag_reset(ocs_hw_t *hw) 11920 { 11921 hw_wq_callback_t *wqcb; 11922 uint32_t i; 11923 11924 /* Remove all from freelist */ 11925 while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) { 11926 ; 11927 } 11928 11929 /* Put them all back */ 11930 for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) { 11931 wqcb->instance_index = i; 11932 wqcb->callback = NULL; 11933 wqcb->arg = NULL; 11934 ocs_pool_put(hw->wq_reqtag_pool, wqcb); 11935 } 11936 } 11937 11938 /** 11939 * @brief Handle HW assertion 11940 * 11941 * HW assert, display diagnostic message, and abort. 11942 * 11943 * @param cond string describing failing assertion condition 11944 * @param filename file name 11945 * @param linenum line number 11946 * 11947 * @return none 11948 */ 11949 void 11950 _ocs_hw_assert(const char *cond, const char *filename, int linenum) 11951 { 11952 ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond); 11953 ocs_abort(); 11954 /* no return */ 11955 } 11956 11957 /** 11958 * @brief Handle HW verify 11959 * 11960 * HW verify, display diagnostic message, dump stack and return. 11961 * 11962 * @param cond string describing failing verify condition 11963 * @param filename file name 11964 * @param linenum line number 11965 * 11966 * @return none 11967 */ 11968 void 11969 _ocs_hw_verify(const char *cond, const char *filename, int linenum) 11970 { 11971 ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond); 11972 ocs_print_stack(); 11973 } 11974 11975 /** 11976 * @brief Reque XRI 11977 * 11978 * @par Description 11979 * Reque XRI 11980 * 11981 * @param hw Pointer to HW object. 11982 * @param io Pointer to HW IO 11983 * 11984 * @return Return 0 if successful else returns -1 11985 */ 11986 int32_t 11987 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io ) 11988 { 11989 int32_t rc = 0; 11990 11991 rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1); 11992 if (rc) { 11993 ocs_list_add_tail(&hw->io_port_dnrx, io); 11994 rc = -1; 11995 goto exit_ocs_hw_reque_xri; 11996 } 11997 11998 io->auto_xfer_rdy_dnrx = 0; 11999 io->type = OCS_HW_IO_DNRX_REQUEUE; 12000 if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) { 12001 /* Clear buffer from XRI */ 12002 ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf); 12003 io->axr_buf = NULL; 12004 12005 ocs_log_err(hw->os, "requeue_xri WQE error\n"); 12006 ocs_list_add_tail(&hw->io_port_dnrx, io); 12007 12008 rc = -1; 12009 goto exit_ocs_hw_reque_xri; 12010 } 12011 12012 if (io->wq == NULL) { 12013 io->wq = ocs_hw_queue_next_wq(hw, io); 12014 ocs_hw_assert(io->wq != NULL); 12015 } 12016 12017 /* 12018 * Add IO to active io wqe list before submitting, in case the 12019 * wcqe processing preempts this thread. 12020 */ 12021 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); 12022 OCS_STAT(io->wq->use_count++); 12023 12024 rc = hw_wq_write(io->wq, &io->wqe); 12025 if (rc < 0) { 12026 ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc); 12027 rc = -1; 12028 } 12029 12030 exit_ocs_hw_reque_xri: 12031 return 0; 12032 } 12033 12034 uint32_t 12035 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn) 12036 { 12037 sli4_t *sli4 = &ocs->hw.sli; 12038 ocs_dma_t dma; 12039 uint8_t *payload = NULL; 12040 12041 int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan; 12042 12043 /* allocate memory for the service parameters */ 12044 if (ocs_dma_alloc(ocs, &dma, 112, 4)) { 12045 ocs_log_err(ocs, "Failed to allocate DMA memory\n"); 12046 return 1; 12047 } 12048 12049 if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, 12050 &dma, indicator)) { 12051 ocs_log_err(ocs, "READ_SPARM64 allocation failure\n"); 12052 ocs_dma_free(ocs, &dma); 12053 return 1; 12054 } 12055 12056 if (sli_bmbx_command(sli4)) { 12057 ocs_log_err(ocs, "READ_SPARM64 command failure\n"); 12058 ocs_dma_free(ocs, &dma); 12059 return 1; 12060 } 12061 12062 payload = dma.virt; 12063 ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn)); 12064 ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn)); 12065 ocs_dma_free(ocs, &dma); 12066 return 0; 12067 } 12068 12069 /** 12070 * @page fc_hw_api_overview HW APIs 12071 * - @ref devInitShutdown 12072 * - @ref domain 12073 * - @ref port 12074 * - @ref node 12075 * - @ref io 12076 * - @ref interrupt 12077 * 12078 * <div class="overview"> 12079 * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4 12080 * message details, but the higher level code must still manage domains, ports, 12081 * IT nexuses, and IOs. The HW API is designed to help the higher level manage 12082 * these objects.<br><br> 12083 * 12084 * The HW uses function callbacks to notify the higher-level code of events 12085 * that are received from the chip. There are currently three types of 12086 * functions that may be registered: 12087 * 12088 * <ul><li>domain – This function is called whenever a domain event is generated 12089 * within the HW. Examples include a new FCF is discovered, a connection 12090 * to a domain is disrupted, and allocation callbacks.</li> 12091 * <li>unsolicited – This function is called whenever new data is received in 12092 * the SLI-4 receive queue.</li> 12093 * <li>rnode – This function is called for remote node events, such as attach status 12094 * and allocation callbacks.</li></ul> 12095 * 12096 * Upper layer functions may be registered by using the ocs_hw_callback() function. 12097 * 12098 * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/> 12099 * <h2>FC/FCoE HW API</h2> 12100 * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible 12101 * interface for creating the necessary common objects and sending I/Os. It may be used 12102 * “as is” in customer implementations or it can serve as an example of typical interactions 12103 * between a driver and the SLI-4 hardware. The broad categories of functionality include: 12104 * 12105 * <ul><li>Setting-up and tearing-down of the HW.</li> 12106 * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li> 12107 * <li>Sending and receiving I/Os.</li></ul> 12108 * 12109 * <h3>HW Setup</h3> 12110 * To set up the HW: 12111 * 12112 * <ol> 12113 * <li>Set up the HW object using ocs_hw_setup().<br> 12114 * This step performs a basic configuration of the SLI-4 component and the HW to 12115 * enable querying the hardware for its capabilities. At this stage, the HW is not 12116 * capable of general operations (such as, receiving events or sending I/Os).</li><br><br> 12117 * <li>Configure the HW according to the driver requirements.<br> 12118 * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as 12119 * well as configures the amount of resources required (ocs_hw_set()). The driver 12120 * must also register callback functions (ocs_hw_callback()) to receive notification of 12121 * various asynchronous events.<br><br> 12122 * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This 12123 * step creates the underlying queues, commits resources to the hardware, and 12124 * prepares the hardware for operation. While the hardware is operational, the 12125 * port is not online, and cannot send or receive data.</li><br><br> 12126 * <br><br> 12127 * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br> 12128 * When the link comes up, the HW determines if a domain is present and notifies the 12129 * driver using the domain callback function. This is the starting point of the driver's 12130 * interaction with the common objects.<br><br> 12131 * @b Note: For FCoE, there may be more than one domain available and, therefore, 12132 * more than one callback.</li> 12133 * </ol> 12134 * 12135 * <h3>Allocating and Using Common Objects</h3> 12136 * Common objects provide a mechanism through which the various OneCore Storage 12137 * driver components share and track information. These data structures are primarily 12138 * used to track SLI component information but can be extended by other components, if 12139 * needed. The main objects are: 12140 * 12141 * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct 12142 * memory access (DMA) transactions.</li> 12143 * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including 12144 * any infrastructure devices such as FC switches and FC forwarders. The domain 12145 * object contains both an FCFI and a VFI.</li> 12146 * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between 12147 * the driver and the SCSI domain. The SLI Port object contains a VPI.</li> 12148 * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI 12149 * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul> 12150 * 12151 * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote 12152 * node common objects and establish the connections between them. The goal is to 12153 * connect the driver to the SCSI domain to exchange I/Os with other devices. These 12154 * common object connections are shown in the following figure, FC Driver Common Objects: 12155 * <img src="elx_fc_common_objects.jpg" 12156 * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/> 12157 * 12158 * The first step is to create a connection to the domain by allocating an SLI Port object. 12159 * The SLI Port object represents a particular FC ID and must be initialized with one. With 12160 * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying 12161 * a domain, the driver allocates a domain object and attaches to it using the previous SLI 12162 * port object.<br><br> 12163 * 12164 * @b Note: In some cases, the driver may need to negotiate service parameters (that is, 12165 * FLOGI) with the domain before attaching.<br><br> 12166 * 12167 * Once attached to the domain, the driver can discover and attach to other devices 12168 * (remote nodes). The exact discovery method depends on the driver, but it typically 12169 * includes using a position map, querying the fabric name server, or an out-of-band 12170 * method. In most cases, it is necessary to log in with devices before performing I/Os. 12171 * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must 12172 * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is 12173 * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port 12174 * before exchanging FCP I/O.<br><br> 12175 * 12176 * @b Note: The HW manages both the well known fabric address and the name server as 12177 * nodes in the domain. Therefore, the driver must allocate node objects prior to 12178 * communicating with either of these entities. 12179 * 12180 * <h3>Sending and Receiving I/Os</h3> 12181 * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the 12182 * commands are conceptually similar. Since the commands complete asynchronously, 12183 * the caller must provide a HW I/O object that maintains the I/O state, as well as 12184 * provide a callback function. The driver may use the same callback function for all I/O 12185 * operations, but each operation must use a unique HW I/O object. In the SLI-4 12186 * architecture, there is a direct association between the HW I/O object and the SGL used 12187 * to describe the data. Therefore, a driver typically performs the following operations: 12188 * 12189 * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li> 12190 * <li>Formats the SGL, specifying both the HW I/O object and the SGL. 12191 * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li> 12192 * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul> 12193 * 12194 * <h3>HW Tear Down</h3> 12195 * To tear-down the HW: 12196 * 12197 * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further 12198 * data andevents.</li> 12199 * <li>Destroy the HW object (ocs_hw_teardown()).</li> 12200 * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol> 12201 * <br> 12202 * </div><!-- overview --> 12203 * 12204 */ 12205 12206 /** 12207 * This contains all hw runtime workaround code. Based on the asic type, 12208 * asic revision, and range of fw revisions, a particular workaround may be enabled. 12209 * 12210 * A workaround may consist of overriding a particular HW/SLI4 value that was initialized 12211 * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue 12212 * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to 12213 * control specific runtime behavior. 12214 * 12215 * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we 12216 * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than 12217 * what we might previously see as "if this is a BE3, then do xxx" 12218 * 12219 */ 12220 12221 #define HW_FWREV_ZERO (0ull) 12222 #define HW_FWREV_MAX (~0ull) 12223 12224 #define SLI4_ASIC_TYPE_ANY 0 12225 #define SLI4_ASIC_REV_ANY 0 12226 12227 /** 12228 * @brief Internal definition of workarounds 12229 */ 12230 12231 typedef enum { 12232 HW_WORKAROUND_TEST = 1, 12233 HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */ 12234 HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */ 12235 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 12236 HW_WORKAROUND_WQE_COUNT_METHOD, 12237 HW_WORKAROUND_RQE_COUNT_METHOD, 12238 HW_WORKAROUND_USE_UNREGISTERD_RPI, 12239 HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */ 12240 HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 12241 HW_WORKAROUND_USE_DIF_QUARANTINE, 12242 HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */ 12243 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */ 12244 HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */ 12245 HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */ 12246 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */ 12247 } hw_workaround_e; 12248 12249 /** 12250 * @brief Internal workaround structure instance 12251 */ 12252 12253 typedef struct { 12254 sli4_asic_type_e asic_type; 12255 sli4_asic_rev_e asic_rev; 12256 uint64_t fwrev_low; 12257 uint64_t fwrev_high; 12258 12259 hw_workaround_e workaround; 12260 uint32_t value; 12261 } hw_workaround_t; 12262 12263 static hw_workaround_t hw_workarounds[] = { 12264 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12265 HW_WORKAROUND_TEST, 999}, 12266 12267 /* Bug: 127585: if_type == 2 returns 0 for total length placed on 12268 * FCP_TSEND64_WQE completions. Note, original driver code enables this 12269 * workaround for all asic types 12270 */ 12271 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12272 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0}, 12273 12274 /* Bug: unknown, Lancer A0 has mis-reported max queue depth */ 12275 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX, 12276 HW_WORKAROUND_MAX_QUEUE, 2048}, 12277 12278 /* Bug: 143399, BE3 has mis-reported max RQ queue depth */ 12279 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0), 12280 HW_WORKAROUND_MAX_RQ, 2048}, 12281 12282 /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */ 12283 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0), 12284 HW_WORKAROUND_MAX_RQ, 2048}, 12285 12286 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */ 12287 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), 12288 HW_WORKAROUND_WQE_COUNT_METHOD, 1}, 12289 12290 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */ 12291 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), 12292 HW_WORKAROUND_RQE_COUNT_METHOD, 1}, 12293 12294 /* Bug: 142968, BE3 UE with RPI == 0xffff */ 12295 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12296 HW_WORKAROUND_USE_UNREGISTERD_RPI, 0}, 12297 12298 /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */ 12299 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12300 HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0}, 12301 12302 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0), 12303 HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0}, 12304 12305 /* Bug: 160124, Skyhawk quarantine DIF XRIs */ 12306 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12307 HW_WORKAROUND_USE_DIF_QUARANTINE, 0}, 12308 12309 /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */ 12310 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12311 HW_WORKAROUND_USE_DIF_SEC_XRI, 0}, 12312 12313 /* Bug: xxxxxx, FCFI reported in SRB not corrrect */ 12314 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12315 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0}, 12316 #if 0 12317 /* Bug: 165642, FW version check for driver */ 12318 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER), 12319 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, 12320 #endif 12321 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK), 12322 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, 12323 12324 /* Bug 177061, Lancer FW does not set the SGLC bit */ 12325 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12326 HW_WORKAROUND_SGLC_MISREPORTED, 0}, 12327 12328 /* BZ 181208/183914, enable this workaround for ALL revisions */ 12329 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12330 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0}, 12331 }; 12332 12333 /** 12334 * @brief Function prototypes 12335 */ 12336 12337 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w); 12338 12339 /** 12340 * @brief Parse the firmware version (name) 12341 * 12342 * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined 12343 * by the HW_FWREV() macro 12344 * 12345 * @param fwrev_string pointer to the firmware string 12346 * 12347 * @return packed firmware revision value 12348 */ 12349 12350 static uint64_t 12351 parse_fw_version(const char *fwrev_string) 12352 { 12353 int v[4] = {0}; 12354 const char *p; 12355 int i; 12356 12357 for (p = fwrev_string, i = 0; *p && (i < 4); i ++) { 12358 v[i] = ocs_strtoul(p, 0, 0); 12359 while(*p && *p != '.') { 12360 p ++; 12361 } 12362 if (*p) { 12363 p ++; 12364 } 12365 } 12366 12367 /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */ 12368 if (v[2] == 9999) { 12369 return HW_FWREV_MAX; 12370 } else { 12371 return HW_FWREV(v[0], v[1], v[2], v[3]); 12372 } 12373 } 12374 12375 /** 12376 * @brief Test for a workaround match 12377 * 12378 * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match. 12379 * 12380 * @param hw Pointer to the HW structure 12381 * @param w Pointer to a workaround structure entry 12382 * 12383 * @return Return TRUE for a match 12384 */ 12385 12386 static int32_t 12387 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w) 12388 { 12389 return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) && 12390 ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) && 12391 (w->fwrev_low <= hw->workaround.fwrev) && 12392 ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high))); 12393 } 12394 12395 /** 12396 * @brief Setup HW runtime workarounds 12397 * 12398 * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds 12399 * based on the HW/SLI setup. 12400 * 12401 * @param hw Pointer to HW structure 12402 * 12403 * @return none 12404 */ 12405 12406 void 12407 ocs_hw_workaround_setup(struct ocs_hw_s *hw) 12408 { 12409 hw_workaround_t *w; 12410 sli4_t *sli4 = &hw->sli; 12411 uint32_t i; 12412 12413 /* Initialize the workaround settings */ 12414 ocs_memset(&hw->workaround, 0, sizeof(hw->workaround)); 12415 12416 /* If hw_war_version is non-null, then its a value that was set by a module parameter 12417 * (sorry for the break in abstraction, but workarounds are ... well, workarounds) 12418 */ 12419 12420 if (hw->hw_war_version) { 12421 hw->workaround.fwrev = parse_fw_version(hw->hw_war_version); 12422 } else { 12423 hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]); 12424 } 12425 12426 /* Walk the workaround list, if a match is found, then handle it */ 12427 for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) { 12428 if (ocs_hw_workaround_match(hw, w)) { 12429 switch(w->workaround) { 12430 case HW_WORKAROUND_TEST: { 12431 ocs_log_debug(hw->os, "Override: test: %d\n", w->value); 12432 break; 12433 } 12434 12435 case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: { 12436 ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n"); 12437 hw->workaround.retain_tsend_io_length = 1; 12438 break; 12439 } 12440 case HW_WORKAROUND_MAX_QUEUE: { 12441 sli4_qtype_e q; 12442 12443 ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value); 12444 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { 12445 if (hw->num_qentries[q] > w->value) { 12446 hw->num_qentries[q] = w->value; 12447 } 12448 } 12449 break; 12450 } 12451 case HW_WORKAROUND_MAX_RQ: { 12452 ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value); 12453 if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) { 12454 hw->num_qentries[SLI_QTYPE_RQ] = w->value; 12455 } 12456 break; 12457 } 12458 case HW_WORKAROUND_WQE_COUNT_METHOD: { 12459 ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value); 12460 sli4->config.count_method[SLI_QTYPE_WQ] = w->value; 12461 sli_calc_max_qentries(sli4); 12462 break; 12463 } 12464 case HW_WORKAROUND_RQE_COUNT_METHOD: { 12465 ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value); 12466 sli4->config.count_method[SLI_QTYPE_RQ] = w->value; 12467 sli_calc_max_qentries(sli4); 12468 break; 12469 } 12470 case HW_WORKAROUND_USE_UNREGISTERD_RPI: 12471 ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n"); 12472 hw->workaround.use_unregistered_rpi = TRUE; 12473 /* 12474 * Allocate an RPI that is never registered, to be used in the case where 12475 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF 12476 */ 12477 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid, 12478 &hw->workaround.unregistered_index)) { 12479 ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n"); 12480 hw->workaround.use_unregistered_rpi = FALSE; 12481 } 12482 break; 12483 case HW_WORKAROUND_DISABLE_AR_TGT_DIF: 12484 ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n"); 12485 hw->workaround.disable_ar_tgt_dif = TRUE; 12486 break; 12487 case HW_WORKAROUND_DISABLE_SET_DUMP_LOC: 12488 ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n"); 12489 hw->workaround.disable_dump_loc = TRUE; 12490 break; 12491 case HW_WORKAROUND_USE_DIF_QUARANTINE: 12492 ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n"); 12493 hw->workaround.use_dif_quarantine = TRUE; 12494 break; 12495 case HW_WORKAROUND_USE_DIF_SEC_XRI: 12496 ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n"); 12497 hw->workaround.use_dif_sec_xri = TRUE; 12498 break; 12499 case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB: 12500 ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n"); 12501 hw->workaround.override_fcfi = TRUE; 12502 break; 12503 12504 case HW_WORKAROUND_FW_VERSION_TOO_LOW: 12505 ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n"); 12506 hw->workaround.fw_version_too_low = TRUE; 12507 break; 12508 case HW_WORKAROUND_SGLC_MISREPORTED: 12509 ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n"); 12510 hw->workaround.sglc_misreported = TRUE; 12511 break; 12512 case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE: 12513 ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n"); 12514 hw->workaround.ignore_send_frame = TRUE; 12515 break; 12516 } /* switch(w->workaround) */ 12517 } 12518 } 12519 } 12520