1 /*- 2 * Copyright (c) 2017 Broadcom. All rights reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the copyright holder nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /** 33 * @file 34 * Defines and implements the Hardware Abstraction Layer (HW). 35 * All interaction with the hardware is performed through the HW, which abstracts 36 * the details of the underlying SLI-4 implementation. 37 */ 38 39 /** 40 * @defgroup devInitShutdown Device Initialization and Shutdown 41 * @defgroup domain Domain Functions 42 * @defgroup port Port Functions 43 * @defgroup node Remote Node Functions 44 * @defgroup io IO Functions 45 * @defgroup interrupt Interrupt handling 46 * @defgroup os OS Required Functions 47 */ 48 49 #include "ocs.h" 50 #include "ocs_os.h" 51 #include "ocs_hw.h" 52 #include "ocs_hw_queues.h" 53 54 #define OCS_HW_MQ_DEPTH 128 55 #define OCS_HW_READ_FCF_SIZE 4096 56 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256 57 #define OCS_HW_WQ_TIMER_PERIOD_MS 500 58 59 /* values used for setting the auto xfer rdy parameters */ 60 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */ 61 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE 62 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE 63 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0 64 #define OCS_HW_REQUE_XRI_REGTAG 65534 65 /* max command and response buffer lengths -- arbitrary at the moment */ 66 #define OCS_HW_DMTF_CLP_CMD_MAX 256 67 #define OCS_HW_DMTF_CLP_RSP_MAX 256 68 69 /* HW global data */ 70 ocs_hw_global_t hw_global; 71 72 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t); 73 static void ocs_hw_adjust_wqs(ocs_hw_t *hw); 74 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw); 75 static int32_t ocs_hw_cb_link(void *, void *); 76 static int32_t ocs_hw_cb_fip(void *, void *); 77 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t); 78 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *); 79 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *); 80 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *); 81 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *); 82 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *); 83 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *); 84 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *); 85 static int32_t ocs_hw_flush(ocs_hw_t *); 86 static int32_t ocs_hw_command_cancel(ocs_hw_t *); 87 static int32_t ocs_hw_io_cancel(ocs_hw_t *); 88 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io); 89 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *); 90 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *); 91 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg); 92 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *); 93 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *); 94 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *); 95 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *); 96 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); 97 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); 98 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len); 99 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); 100 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg); 101 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg); 102 103 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *); 104 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); 105 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); 106 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *); 107 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); 108 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); 109 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *); 110 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg); 111 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license); 112 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw); 113 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw); 114 static void ocs_hw_io_free_internal(void *arg); 115 static void ocs_hw_io_free_port_owned(void *arg); 116 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf); 117 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint); 118 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status); 119 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t); 120 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw); 121 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable); 122 123 /* HW domain database operations */ 124 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *); 125 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *); 126 127 /* Port state machine */ 128 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 129 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 130 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 131 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 132 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 133 134 /* Domain state machine */ 135 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 136 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 137 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 138 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *); 139 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data); 140 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *); 141 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *); 142 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg); 143 144 /* BZ 161832 */ 145 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw); 146 147 /* WQE timeouts */ 148 static void target_wqe_timer_cb(void *arg); 149 static void shutdown_target_wqe_timer(ocs_hw_t *hw); 150 151 static inline void 152 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) 153 { 154 if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) { 155 /* 156 * Active WQE list currently only used for 157 * target WQE timeouts. 158 */ 159 ocs_lock(&hw->io_lock); 160 ocs_list_add_tail(&hw->io_timed_wqe, io); 161 io->submit_ticks = ocs_get_os_ticks(); 162 ocs_unlock(&hw->io_lock); 163 } 164 } 165 166 static inline void 167 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io) 168 { 169 if (hw->config.emulate_tgt_wqe_timeout) { 170 /* 171 * If target wqe timeouts are enabled, 172 * remove from active wqe list. 173 */ 174 ocs_lock(&hw->io_lock); 175 if (ocs_list_on_list(&io->wqe_link)) { 176 ocs_list_remove(&hw->io_timed_wqe, io); 177 } 178 ocs_unlock(&hw->io_lock); 179 } 180 } 181 182 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type) 183 { 184 switch (io_type) { 185 case OCS_HW_IO_INITIATOR_READ: 186 case OCS_HW_IO_INITIATOR_WRITE: 187 case OCS_HW_IO_INITIATOR_NODATA: 188 case OCS_HW_FC_CT: 189 case OCS_HW_ELS_REQ: 190 return 1; 191 default: 192 return 0; 193 } 194 } 195 196 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb) 197 { 198 /* if exchange not active, nothing to abort */ 199 if (!xb) { 200 return FALSE; 201 } 202 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) { 203 switch (ext) { 204 /* exceptions where abort is not needed */ 205 case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */ 206 case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */ 207 return FALSE; 208 default: 209 break; 210 } 211 } 212 return TRUE; 213 } 214 215 /** 216 * @brief Determine the number of chutes on the device. 217 * 218 * @par Description 219 * Some devices require queue resources allocated per protocol processor 220 * (chute). This function returns the number of chutes on this device. 221 * 222 * @param hw Hardware context allocated by the caller. 223 * 224 * @return Returns the number of chutes on the device for protocol. 225 */ 226 static uint32_t 227 ocs_hw_get_num_chutes(ocs_hw_t *hw) 228 { 229 uint32_t num_chutes = 1; 230 231 if (sli_get_is_dual_ulp_capable(&hw->sli) && 232 sli_get_is_ulp_enabled(&hw->sli, 0) && 233 sli_get_is_ulp_enabled(&hw->sli, 1)) { 234 num_chutes = 2; 235 } 236 return num_chutes; 237 } 238 239 static ocs_hw_rtn_e 240 ocs_hw_link_event_init(ocs_hw_t *hw) 241 { 242 ocs_hw_assert(hw); 243 244 hw->link.status = SLI_LINK_STATUS_MAX; 245 hw->link.topology = SLI_LINK_TOPO_NONE; 246 hw->link.medium = SLI_LINK_MEDIUM_MAX; 247 hw->link.speed = 0; 248 hw->link.loop_map = NULL; 249 hw->link.fc_id = UINT32_MAX; 250 251 return OCS_HW_RTN_SUCCESS; 252 } 253 254 /** 255 * @ingroup devInitShutdown 256 * @brief If this is physical port 0, then read the max dump size. 257 * 258 * @par Description 259 * Queries the FW for the maximum dump size 260 * 261 * @param hw Hardware context allocated by the caller. 262 * 263 * @return Returns 0 on success, or a non-zero value on failure. 264 */ 265 static ocs_hw_rtn_e 266 ocs_hw_read_max_dump_size(ocs_hw_t *hw) 267 { 268 uint8_t buf[SLI4_BMBX_SIZE]; 269 uint8_t bus, dev, func; 270 int rc; 271 272 /* lancer only */ 273 if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) && 274 (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(&hw->sli))) { 275 ocs_log_debug(hw->os, "Function only supported for I/F type 2\n"); 276 return OCS_HW_RTN_ERROR; 277 } 278 279 /* 280 * Make sure the FW is new enough to support this command. If the FW 281 * is too old, the FW will UE. 282 */ 283 if (hw->workaround.disable_dump_loc) { 284 ocs_log_test(hw->os, "FW version is too old for this feature\n"); 285 return OCS_HW_RTN_ERROR; 286 } 287 288 /* attempt to detemine the dump size for function 0 only. */ 289 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); 290 if (func == 0) { 291 if (sli_cmd_common_set_dump_location(&hw->sli, buf, 292 SLI4_BMBX_SIZE, 1, 0, NULL, 0)) { 293 sli4_res_common_set_dump_location_t *rsp = 294 (sli4_res_common_set_dump_location_t *) 295 (buf + offsetof(sli4_cmd_sli_config_t, 296 payload.embed)); 297 298 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 299 if (rc != OCS_HW_RTN_SUCCESS) { 300 ocs_log_test(hw->os, "set dump location command failed\n"); 301 return rc; 302 } else { 303 hw->dump_size = rsp->buffer_length; 304 ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length); 305 } 306 } 307 } 308 return OCS_HW_RTN_SUCCESS; 309 } 310 311 /** 312 * @ingroup devInitShutdown 313 * @brief Set up the Hardware Abstraction Layer module. 314 * 315 * @par Description 316 * Calls set up to configure the hardware. 317 * 318 * @param hw Hardware context allocated by the caller. 319 * @param os Device abstraction. 320 * @param port_type Protocol type of port, such as FC and NIC. 321 * 322 * @todo Why is port_type a parameter? 323 * 324 * @return Returns 0 on success, or a non-zero value on failure. 325 */ 326 ocs_hw_rtn_e 327 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type) 328 { 329 uint32_t i; 330 char prop_buf[32]; 331 332 if (hw == NULL) { 333 ocs_log_err(os, "bad parameter(s) hw=%p\n", hw); 334 return OCS_HW_RTN_ERROR; 335 } 336 337 if (hw->hw_setup_called) { 338 /* Setup run-time workarounds. 339 * Call for each setup, to allow for hw_war_version 340 */ 341 ocs_hw_workaround_setup(hw); 342 return OCS_HW_RTN_SUCCESS; 343 } 344 345 /* 346 * ocs_hw_init() relies on NULL pointers indicating that a structure 347 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't 348 * free/realloc that memory 349 */ 350 ocs_memset(hw, 0, sizeof(ocs_hw_t)); 351 352 hw->hw_setup_called = TRUE; 353 354 hw->os = os; 355 356 ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os)); 357 ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link); 358 ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link); 359 hw->cmd_head_count = 0; 360 361 ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os)); 362 ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os)); 363 364 ocs_atomic_init(&hw->io_alloc_failed_count, 0); 365 366 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; 367 hw->config.dif_seed = 0; 368 hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT; 369 hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT; 370 hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT; 371 hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT; 372 373 if (sli_setup(&hw->sli, hw->os, port_type)) { 374 ocs_log_err(hw->os, "SLI setup failed\n"); 375 return OCS_HW_RTN_ERROR; 376 } 377 378 ocs_memset(hw->domains, 0, sizeof(hw->domains)); 379 380 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); 381 382 ocs_hw_link_event_init(hw); 383 384 sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw); 385 sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw); 386 387 /* 388 * Set all the queue sizes to the maximum allowed. These values may 389 * be changes later by the adjust and workaround functions. 390 */ 391 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) { 392 hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i); 393 } 394 395 /* 396 * The RQ assignment for RQ pair mode. 397 */ 398 hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD; 399 hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); 400 if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) { 401 hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0); 402 } 403 404 /* by default, enable initiator-only auto-ABTS emulation */ 405 hw->config.i_only_aab = TRUE; 406 407 /* Setup run-time workarounds */ 408 ocs_hw_workaround_setup(hw); 409 410 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 411 if (hw->workaround.override_fcfi) { 412 hw->first_domain_idx = -1; 413 } 414 415 /* Must be done after the workaround setup */ 416 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) || 417 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))) { 418 419 (void)ocs_hw_read_max_dump_size(hw); 420 } 421 422 /* calculate the number of WQs required. */ 423 ocs_hw_adjust_wqs(hw); 424 425 /* Set the default dif mode */ 426 if (! sli_is_dif_inline_capable(&hw->sli)) { 427 ocs_log_test(hw->os, "not inline capable, setting mode to separate\n"); 428 hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE; 429 } 430 /* Workaround: BZ 161832 */ 431 if (hw->workaround.use_dif_sec_xri) { 432 ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link); 433 } 434 435 /* 436 * Figure out the starting and max ULP to spread the WQs across the 437 * ULPs. 438 */ 439 if (sli_get_is_dual_ulp_capable(&hw->sli)) { 440 if (sli_get_is_ulp_enabled(&hw->sli, 0) && 441 sli_get_is_ulp_enabled(&hw->sli, 1)) { 442 hw->ulp_start = 0; 443 hw->ulp_max = 1; 444 } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) { 445 hw->ulp_start = 0; 446 hw->ulp_max = 0; 447 } else { 448 hw->ulp_start = 1; 449 hw->ulp_max = 1; 450 } 451 } else { 452 if (sli_get_is_ulp_enabled(&hw->sli, 0)) { 453 hw->ulp_start = 0; 454 hw->ulp_max = 0; 455 } else { 456 hw->ulp_start = 1; 457 hw->ulp_max = 1; 458 } 459 } 460 ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n", 461 hw->ulp_start, hw->ulp_max); 462 hw->config.queue_topology = hw_global.queue_topology_string; 463 464 hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology); 465 466 hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ]; 467 hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ]; 468 hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ]; 469 hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ]; 470 hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ]; 471 472 /* Verify qtop configuration against driver supported configuration */ 473 if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) { 474 ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n", 475 OCE_HW_MAX_NUM_MRQ_PAIRS); 476 return OCS_HW_RTN_ERROR; 477 } 478 479 if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) { 480 ocs_log_crit(hw->os, "Max supported EQs = %d\n", 481 OCS_HW_MAX_NUM_EQ); 482 return OCS_HW_RTN_ERROR; 483 } 484 485 if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) { 486 ocs_log_crit(hw->os, "Max supported CQs = %d\n", 487 OCS_HW_MAX_NUM_CQ); 488 return OCS_HW_RTN_ERROR; 489 } 490 491 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { 492 ocs_log_crit(hw->os, "Max supported WQs = %d\n", 493 OCS_HW_MAX_NUM_WQ); 494 return OCS_HW_RTN_ERROR; 495 } 496 497 if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) { 498 ocs_log_crit(hw->os, "Max supported MQs = %d\n", 499 OCS_HW_MAX_NUM_MQ); 500 return OCS_HW_RTN_ERROR; 501 } 502 503 return OCS_HW_RTN_SUCCESS; 504 } 505 506 /** 507 * @ingroup devInitShutdown 508 * @brief Allocate memory structures to prepare for the device operation. 509 * 510 * @par Description 511 * Allocates memory structures needed by the device and prepares the device 512 * for operation. 513 * @n @n @b Note: This function may be called more than once (for example, at 514 * initialization and then after a reset), but the size of the internal resources 515 * may not be changed without tearing down the HW (ocs_hw_teardown()). 516 * 517 * @param hw Hardware context allocated by the caller. 518 * 519 * @return Returns 0 on success, or a non-zero value on failure. 520 */ 521 ocs_hw_rtn_e 522 ocs_hw_init(ocs_hw_t *hw) 523 { 524 ocs_hw_rtn_e rc; 525 uint32_t i = 0; 526 uint8_t buf[SLI4_BMBX_SIZE]; 527 uint32_t max_rpi; 528 int rem_count; 529 int written_size = 0; 530 uint32_t count; 531 char prop_buf[32]; 532 uint32_t ramdisc_blocksize = 512; 533 uint32_t q_count = 0; 534 /* 535 * Make sure the command lists are empty. If this is start-of-day, 536 * they'll be empty since they were just initialized in ocs_hw_setup. 537 * If we've just gone through a reset, the command and command pending 538 * lists should have been cleaned up as part of the reset (ocs_hw_reset()). 539 */ 540 ocs_lock(&hw->cmd_lock); 541 if (!ocs_list_empty(&hw->cmd_head)) { 542 ocs_log_test(hw->os, "command found on cmd list\n"); 543 ocs_unlock(&hw->cmd_lock); 544 return OCS_HW_RTN_ERROR; 545 } 546 if (!ocs_list_empty(&hw->cmd_pending)) { 547 ocs_log_test(hw->os, "command found on pending list\n"); 548 ocs_unlock(&hw->cmd_lock); 549 return OCS_HW_RTN_ERROR; 550 } 551 ocs_unlock(&hw->cmd_lock); 552 553 /* Free RQ buffers if prevously allocated */ 554 ocs_hw_rx_free(hw); 555 556 /* 557 * The IO queues must be initialized here for the reset case. The 558 * ocs_hw_init_io() function will re-add the IOs to the free list. 559 * The cmd_head list should be OK since we free all entries in 560 * ocs_hw_command_cancel() that is called in the ocs_hw_reset(). 561 */ 562 563 /* If we are in this function due to a reset, there may be stale items 564 * on lists that need to be removed. Clean them up. 565 */ 566 rem_count=0; 567 if (ocs_list_valid(&hw->io_wait_free)) { 568 while ((!ocs_list_empty(&hw->io_wait_free))) { 569 rem_count++; 570 ocs_list_remove_head(&hw->io_wait_free); 571 } 572 if (rem_count > 0) { 573 ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count); 574 } 575 } 576 rem_count=0; 577 if (ocs_list_valid(&hw->io_inuse)) { 578 while ((!ocs_list_empty(&hw->io_inuse))) { 579 rem_count++; 580 ocs_list_remove_head(&hw->io_inuse); 581 } 582 if (rem_count > 0) { 583 ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count); 584 } 585 } 586 rem_count=0; 587 if (ocs_list_valid(&hw->io_free)) { 588 while ((!ocs_list_empty(&hw->io_free))) { 589 rem_count++; 590 ocs_list_remove_head(&hw->io_free); 591 } 592 if (rem_count > 0) { 593 ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count); 594 } 595 } 596 if (ocs_list_valid(&hw->io_port_owned)) { 597 while ((!ocs_list_empty(&hw->io_port_owned))) { 598 ocs_list_remove_head(&hw->io_port_owned); 599 } 600 } 601 ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link); 602 ocs_list_init(&hw->io_free, ocs_hw_io_t, link); 603 ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link); 604 ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link); 605 ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link); 606 ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link); 607 608 /* If MRQ not required, Make sure we dont request feature. */ 609 if (hw->config.n_rq == 1) { 610 hw->sli.config.features.flag.mrqp = FALSE; 611 } 612 613 if (sli_init(&hw->sli)) { 614 ocs_log_err(hw->os, "SLI failed to initialize\n"); 615 return OCS_HW_RTN_ERROR; 616 } 617 618 /* 619 * Enable the auto xfer rdy feature if requested. 620 */ 621 hw->auto_xfer_rdy_enabled = FALSE; 622 if (sli_get_auto_xfer_rdy_capable(&hw->sli) && 623 hw->config.auto_xfer_rdy_size > 0) { 624 if (hw->config.esoc){ 625 if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) { 626 ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0); 627 } 628 written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize); 629 } else { 630 written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size); 631 } 632 if (written_size) { 633 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 634 if (rc != OCS_HW_RTN_SUCCESS) { 635 ocs_log_err(hw->os, "config auto xfer rdy failed\n"); 636 return rc; 637 } 638 } 639 hw->auto_xfer_rdy_enabled = TRUE; 640 641 if (hw->config.auto_xfer_rdy_t10_enable) { 642 rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf); 643 if (rc != OCS_HW_RTN_SUCCESS) { 644 ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n"); 645 return rc; 646 } 647 } 648 } 649 650 if(hw->sliport_healthcheck) { 651 rc = ocs_hw_config_sli_port_health_check(hw, 0, 1); 652 if (rc != OCS_HW_RTN_SUCCESS) { 653 ocs_log_err(hw->os, "Enabling Sliport Health check failed \n"); 654 return rc; 655 } 656 } 657 658 /* 659 * Set FDT transfer hint, only works on Lancer 660 */ 661 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) { 662 /* 663 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on 664 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature. 665 */ 666 ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT); 667 } 668 669 /* 670 * Verify that we have not exceeded any queue sizes 671 */ 672 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ), 673 OCS_HW_MAX_NUM_EQ); 674 if (hw->config.n_eq > q_count) { 675 ocs_log_err(hw->os, "requested %d EQ but %d allowed\n", 676 hw->config.n_eq, q_count); 677 return OCS_HW_RTN_ERROR; 678 } 679 680 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ), 681 OCS_HW_MAX_NUM_CQ); 682 if (hw->config.n_cq > q_count) { 683 ocs_log_err(hw->os, "requested %d CQ but %d allowed\n", 684 hw->config.n_cq, q_count); 685 return OCS_HW_RTN_ERROR; 686 } 687 688 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ), 689 OCS_HW_MAX_NUM_MQ); 690 if (hw->config.n_mq > q_count) { 691 ocs_log_err(hw->os, "requested %d MQ but %d allowed\n", 692 hw->config.n_mq, q_count); 693 return OCS_HW_RTN_ERROR; 694 } 695 696 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ), 697 OCS_HW_MAX_NUM_RQ); 698 if (hw->config.n_rq > q_count) { 699 ocs_log_err(hw->os, "requested %d RQ but %d allowed\n", 700 hw->config.n_rq, q_count); 701 return OCS_HW_RTN_ERROR; 702 } 703 704 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ), 705 OCS_HW_MAX_NUM_WQ); 706 if (hw->config.n_wq > q_count) { 707 ocs_log_err(hw->os, "requested %d WQ but %d allowed\n", 708 hw->config.n_wq, q_count); 709 return OCS_HW_RTN_ERROR; 710 } 711 712 /* zero the hashes */ 713 ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash)); 714 ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n", 715 OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE); 716 717 ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash)); 718 ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n", 719 OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE); 720 721 ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash)); 722 ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n", 723 OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE); 724 725 rc = ocs_hw_init_queues(hw, hw->qtop); 726 if (rc != OCS_HW_RTN_SUCCESS) { 727 return rc; 728 } 729 730 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); 731 i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi); 732 if (i) { 733 ocs_dma_t payload_memory; 734 735 rc = OCS_HW_RTN_ERROR; 736 737 if (hw->rnode_mem.size) { 738 ocs_dma_free(hw->os, &hw->rnode_mem); 739 } 740 741 if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) { 742 ocs_log_err(hw->os, "remote node memory allocation fail\n"); 743 return OCS_HW_RTN_NO_MEMORY; 744 } 745 746 payload_memory.size = 0; 747 if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE, 748 &hw->rnode_mem, UINT16_MAX, &payload_memory)) { 749 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 750 751 if (payload_memory.size != 0) { 752 /* The command was non-embedded - need to free the dma buffer */ 753 ocs_dma_free(hw->os, &payload_memory); 754 } 755 } 756 757 if (rc != OCS_HW_RTN_SUCCESS) { 758 ocs_log_err(hw->os, "header template registration failed\n"); 759 return rc; 760 } 761 } 762 763 /* Allocate and post RQ buffers */ 764 rc = ocs_hw_rx_allocate(hw); 765 if (rc) { 766 ocs_log_err(hw->os, "rx_allocate failed\n"); 767 return rc; 768 } 769 770 /* Populate hw->seq_free_list */ 771 if (hw->seq_pool == NULL) { 772 uint32_t count = 0; 773 uint32_t i; 774 775 /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */ 776 for (i = 0; i < hw->hw_rq_count; i++) { 777 count += hw->hw_rq[i]->entry_count; 778 } 779 780 hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count); 781 if (hw->seq_pool == NULL) { 782 ocs_log_err(hw->os, "malloc seq_pool failed\n"); 783 return OCS_HW_RTN_NO_MEMORY; 784 } 785 } 786 787 if(ocs_hw_rx_post(hw)) { 788 ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n"); 789 } 790 791 /* Allocate rpi_ref if not previously allocated */ 792 if (hw->rpi_ref == NULL) { 793 hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref), 794 OCS_M_ZERO | OCS_M_NOWAIT); 795 if (hw->rpi_ref == NULL) { 796 ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i); 797 return OCS_HW_RTN_NO_MEMORY; 798 } 799 } 800 801 for (i = 0; i < max_rpi; i ++) { 802 ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0); 803 ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0); 804 } 805 806 ocs_memset(hw->domains, 0, sizeof(hw->domains)); 807 808 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 809 if (hw->workaround.override_fcfi) { 810 hw->first_domain_idx = -1; 811 } 812 813 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); 814 815 /* Register a FCFI to allow unsolicited frames to be routed to the driver */ 816 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { 817 if (hw->hw_mrq_count) { 818 ocs_log_debug(hw->os, "using REG_FCFI MRQ\n"); 819 820 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0); 821 if (rc != OCS_HW_RTN_SUCCESS) { 822 ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n"); 823 return rc; 824 } 825 826 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0); 827 if (rc != OCS_HW_RTN_SUCCESS) { 828 ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n"); 829 return rc; 830 } 831 } else { 832 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; 833 834 ocs_log_debug(hw->os, "using REG_FCFI standard\n"); 835 836 /* Set the filter match/mask values from hw's filter_def values */ 837 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 838 rq_cfg[i].rq_id = 0xffff; 839 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; 840 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); 841 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); 842 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); 843 } 844 845 /* 846 * Update the rq_id's of the FCF configuration (don't update more than the number 847 * of rq_cfg elements) 848 */ 849 for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) { 850 hw_rq_t *rq = hw->hw_rq[i]; 851 uint32_t j; 852 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) { 853 uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1; 854 if (mask & (1U << j)) { 855 rq_cfg[j].rq_id = rq->hdr->id; 856 ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n", 857 j, hw->config.filter_def[j], i, rq->hdr->id); 858 } 859 } 860 } 861 862 rc = OCS_HW_RTN_ERROR; 863 864 if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) { 865 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 866 } 867 868 if (rc != OCS_HW_RTN_SUCCESS) { 869 ocs_log_err(hw->os, "FCFI registration failed\n"); 870 return rc; 871 } 872 hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi; 873 } 874 } 875 876 /* 877 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits, 878 * thus the pool allocation size of 64k) 879 */ 880 rc = ocs_hw_reqtag_init(hw); 881 if (rc) { 882 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc); 883 return rc; 884 } 885 886 rc = ocs_hw_setup_io(hw); 887 if (rc) { 888 ocs_log_err(hw->os, "IO allocation failure\n"); 889 return rc; 890 } 891 892 rc = ocs_hw_init_io(hw); 893 if (rc) { 894 ocs_log_err(hw->os, "IO initialization failure\n"); 895 return rc; 896 } 897 898 ocs_queue_history_init(hw->os, &hw->q_hist); 899 900 /* get hw link config; polling, so callback will be called immediately */ 901 hw->linkcfg = OCS_HW_LINKCFG_NA; 902 ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw); 903 904 /* if lancer ethernet, ethernet ports need to be enabled */ 905 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && 906 (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) { 907 if (ocs_hw_set_eth_license(hw, hw->eth_license)) { 908 /* log warning but continue */ 909 ocs_log_err(hw->os, "Failed to set ethernet license\n"); 910 } 911 } 912 913 /* Set the DIF seed - only for lancer right now */ 914 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) && 915 ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) { 916 ocs_log_err(hw->os, "Failed to set DIF seed value\n"); 917 return rc; 918 } 919 920 /* Set the DIF mode - skyhawk only */ 921 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) && 922 sli_get_dif_capable(&hw->sli)) { 923 rc = ocs_hw_set_dif_mode(hw); 924 if (rc != OCS_HW_RTN_SUCCESS) { 925 ocs_log_err(hw->os, "Failed to set DIF mode value\n"); 926 return rc; 927 } 928 } 929 930 /* 931 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries 932 */ 933 for (i = 0; i < hw->eq_count; i++) { 934 sli_queue_arm(&hw->sli, &hw->eq[i], TRUE); 935 } 936 937 /* 938 * Initialize RQ hash 939 */ 940 for (i = 0; i < hw->rq_count; i++) { 941 ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i); 942 } 943 944 /* 945 * Initialize WQ hash 946 */ 947 for (i = 0; i < hw->wq_count; i++) { 948 ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i); 949 } 950 951 /* 952 * Arming the CQ allows (e.g.) MQ completions to write CQ entries 953 */ 954 for (i = 0; i < hw->cq_count; i++) { 955 ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i); 956 sli_queue_arm(&hw->sli, &hw->cq[i], TRUE); 957 } 958 959 /* record the fact that the queues are functional */ 960 hw->state = OCS_HW_STATE_ACTIVE; 961 962 /* Note: Must be after the IOs are setup and the state is active*/ 963 if (ocs_hw_rqpair_init(hw)) { 964 ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n"); 965 } 966 967 /* finally kick off periodic timer to check for timed out target WQEs */ 968 if (hw->config.emulate_tgt_wqe_timeout) { 969 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, 970 OCS_HW_WQ_TIMER_PERIOD_MS); 971 } 972 973 /* 974 * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there 975 * are none of those, allocate one for WQ[0] 976 */ 977 if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) { 978 for (i = 0; i < count; i++) { 979 hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]); 980 wq->send_frame_io = ocs_hw_io_alloc(hw); 981 if (wq->send_frame_io == NULL) { 982 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); 983 } 984 } 985 } else { 986 hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw); 987 if (hw->hw_wq[0]->send_frame_io == NULL) { 988 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n"); 989 } 990 } 991 992 /* Initialize send frame frame sequence id */ 993 ocs_atomic_init(&hw->send_frame_seq_id, 0); 994 995 /* Initialize watchdog timer if enabled by user */ 996 hw->expiration_logged = 0; 997 if(hw->watchdog_timeout) { 998 if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) { 999 ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n"); 1000 }else if(!ocs_hw_config_watchdog_timer(hw)) { 1001 ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout); 1002 } 1003 } 1004 1005 if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) { 1006 ocs_log_err(hw->os, "domain node memory allocation fail\n"); 1007 return OCS_HW_RTN_NO_MEMORY; 1008 } 1009 1010 if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) { 1011 ocs_log_err(hw->os, "domain fcf memory allocation fail\n"); 1012 return OCS_HW_RTN_NO_MEMORY; 1013 } 1014 1015 if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map, 1016 SLI4_MIN_LOOP_MAP_BYTES, 4)) { 1017 ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size); 1018 } 1019 1020 return OCS_HW_RTN_SUCCESS; 1021 } 1022 1023 /** 1024 * @brief Configure Multi-RQ 1025 * 1026 * @param hw Hardware context allocated by the caller. 1027 * @param mode 1 to set MRQ filters and 0 to set FCFI index 1028 * @param vlanid valid in mode 0 1029 * @param fcf_index valid in mode 0 1030 * 1031 * @return Returns 0 on success, or a non-zero value on failure. 1032 */ 1033 static int32_t 1034 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index) 1035 { 1036 uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0; 1037 hw_rq_t *rq; 1038 sli4_cmd_reg_fcfi_mrq_t *rsp = NULL; 1039 uint32_t i, j; 1040 sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG]; 1041 int32_t rc; 1042 1043 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { 1044 goto issue_cmd; 1045 } 1046 1047 /* Set the filter match/mask values from hw's filter_def values */ 1048 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 1049 rq_filter[i].rq_id = 0xffff; 1050 rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; 1051 rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); 1052 rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); 1053 rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); 1054 } 1055 1056 /* Accumulate counts for each filter type used, build rq_ids[] list */ 1057 for (i = 0; i < hw->hw_rq_count; i++) { 1058 rq = hw->hw_rq[i]; 1059 for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) { 1060 if (rq->filter_mask & (1U << j)) { 1061 if (rq_filter[j].rq_id != 0xffff) { 1062 /* Already used. Bailout ifts not RQset case */ 1063 if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) { 1064 ocs_log_err(hw->os, "Wrong queue topology.\n"); 1065 return OCS_HW_RTN_ERROR; 1066 } 1067 continue; 1068 } 1069 1070 if (rq->is_mrq) { 1071 rq_filter[j].rq_id = rq->base_mrq_id; 1072 mrq_bitmask |= (1U << j); 1073 } else { 1074 rq_filter[j].rq_id = rq->hdr->id; 1075 } 1076 } 1077 } 1078 } 1079 1080 issue_cmd: 1081 /* Invoke REG_FCFI_MRQ */ 1082 rc = sli_cmd_reg_fcfi_mrq(&hw->sli, 1083 buf, /* buf */ 1084 SLI4_BMBX_SIZE, /* size */ 1085 mode, /* mode 1 */ 1086 fcf_index, /* fcf_index */ 1087 vlanid, /* vlan_id */ 1088 hw->config.rq_selection_policy, /* RQ selection policy*/ 1089 mrq_bitmask, /* MRQ bitmask */ 1090 hw->hw_mrq_count, /* num_mrqs */ 1091 rq_filter); /* RQ filter */ 1092 if (rc == 0) { 1093 ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc); 1094 return OCS_HW_RTN_ERROR; 1095 } 1096 1097 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 1098 1099 rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf; 1100 1101 if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) { 1102 ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n", 1103 rsp->hdr.command, rsp->hdr.status); 1104 return OCS_HW_RTN_ERROR; 1105 } 1106 1107 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { 1108 hw->fcf_indicator = rsp->fcfi; 1109 } 1110 return 0; 1111 } 1112 1113 /** 1114 * @brief Callback function for getting linkcfg during HW initialization. 1115 * 1116 * @param status Status of the linkcfg get operation. 1117 * @param value Link configuration enum to which the link configuration is set. 1118 * @param arg Callback argument (ocs_hw_t *). 1119 * 1120 * @return None. 1121 */ 1122 static void 1123 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg) 1124 { 1125 ocs_hw_t *hw = (ocs_hw_t *)arg; 1126 if (status == 0) { 1127 hw->linkcfg = (ocs_hw_linkcfg_e)value; 1128 } else { 1129 hw->linkcfg = OCS_HW_LINKCFG_NA; 1130 } 1131 ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg); 1132 } 1133 1134 /** 1135 * @ingroup devInitShutdown 1136 * @brief Tear down the Hardware Abstraction Layer module. 1137 * 1138 * @par Description 1139 * Frees memory structures needed by the device, and shuts down the device. Does 1140 * not free the HW context memory (which is done by the caller). 1141 * 1142 * @param hw Hardware context allocated by the caller. 1143 * 1144 * @return Returns 0 on success, or a non-zero value on failure. 1145 */ 1146 ocs_hw_rtn_e 1147 ocs_hw_teardown(ocs_hw_t *hw) 1148 { 1149 uint32_t i = 0; 1150 uint32_t iters = 10;/*XXX*/ 1151 uint32_t max_rpi; 1152 uint32_t destroy_queues; 1153 uint32_t free_memory; 1154 1155 if (!hw) { 1156 ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw); 1157 return OCS_HW_RTN_ERROR; 1158 } 1159 1160 destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE); 1161 free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED); 1162 1163 /* shutdown target wqe timer */ 1164 shutdown_target_wqe_timer(hw); 1165 1166 /* Cancel watchdog timer if enabled */ 1167 if(hw->watchdog_timeout) { 1168 hw->watchdog_timeout = 0; 1169 ocs_hw_config_watchdog_timer(hw); 1170 } 1171 1172 /* Cancel Sliport Healthcheck */ 1173 if(hw->sliport_healthcheck) { 1174 hw->sliport_healthcheck = 0; 1175 ocs_hw_config_sli_port_health_check(hw, 0, 0); 1176 } 1177 1178 if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) { 1179 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; 1180 1181 ocs_hw_flush(hw); 1182 1183 /* If there are outstanding commands, wait for them to complete */ 1184 while (!ocs_list_empty(&hw->cmd_head) && iters) { 1185 ocs_udelay(10000); 1186 ocs_hw_flush(hw); 1187 iters--; 1188 } 1189 1190 if (ocs_list_empty(&hw->cmd_head)) { 1191 ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); 1192 } else { 1193 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); 1194 } 1195 1196 /* Cancel any remaining commands */ 1197 ocs_hw_command_cancel(hw); 1198 } else { 1199 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS; 1200 } 1201 1202 ocs_lock_free(&hw->cmd_lock); 1203 1204 /* Free unregistered RPI if workaround is in force */ 1205 if (hw->workaround.use_unregistered_rpi) { 1206 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid); 1207 } 1208 1209 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); 1210 if (hw->rpi_ref) { 1211 for (i = 0; i < max_rpi; i++) { 1212 if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) { 1213 ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n", 1214 i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count)); 1215 } 1216 } 1217 ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref)); 1218 hw->rpi_ref = NULL; 1219 } 1220 1221 ocs_dma_free(hw->os, &hw->rnode_mem); 1222 1223 if (hw->io) { 1224 for (i = 0; i < hw->config.n_io; i++) { 1225 if (hw->io[i] && (hw->io[i]->sgl != NULL) && 1226 (hw->io[i]->sgl->virt != NULL)) { 1227 if(hw->io[i]->is_port_owned) { 1228 ocs_lock_free(&hw->io[i]->axr_lock); 1229 } 1230 ocs_dma_free(hw->os, hw->io[i]->sgl); 1231 } 1232 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); 1233 hw->io[i] = NULL; 1234 } 1235 ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size); 1236 hw->wqe_buffs = NULL; 1237 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *)); 1238 hw->io = NULL; 1239 } 1240 1241 ocs_dma_free(hw->os, &hw->xfer_rdy); 1242 ocs_dma_free(hw->os, &hw->dump_sges); 1243 ocs_dma_free(hw->os, &hw->loop_map); 1244 1245 ocs_lock_free(&hw->io_lock); 1246 ocs_lock_free(&hw->io_abort_lock); 1247 1248 for (i = 0; i < hw->wq_count; i++) { 1249 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory); 1250 } 1251 1252 for (i = 0; i < hw->rq_count; i++) { 1253 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory); 1254 } 1255 1256 for (i = 0; i < hw->mq_count; i++) { 1257 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory); 1258 } 1259 1260 for (i = 0; i < hw->cq_count; i++) { 1261 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory); 1262 } 1263 1264 for (i = 0; i < hw->eq_count; i++) { 1265 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory); 1266 } 1267 1268 ocs_hw_qtop_free(hw->qtop); 1269 1270 /* Free rq buffers */ 1271 ocs_hw_rx_free(hw); 1272 1273 hw_queue_teardown(hw); 1274 1275 ocs_hw_rqpair_teardown(hw); 1276 1277 if (sli_teardown(&hw->sli)) { 1278 ocs_log_err(hw->os, "SLI teardown failed\n"); 1279 } 1280 1281 ocs_queue_history_free(&hw->q_hist); 1282 1283 /* record the fact that the queues are non-functional */ 1284 hw->state = OCS_HW_STATE_UNINITIALIZED; 1285 1286 /* free sequence free pool */ 1287 ocs_array_free(hw->seq_pool); 1288 hw->seq_pool = NULL; 1289 1290 /* free hw_wq_callback pool */ 1291 ocs_pool_free(hw->wq_reqtag_pool); 1292 1293 ocs_dma_free(hw->os, &hw->domain_dmem); 1294 ocs_dma_free(hw->os, &hw->fcf_dmem); 1295 /* Mark HW setup as not having been called */ 1296 hw->hw_setup_called = FALSE; 1297 1298 return OCS_HW_RTN_SUCCESS; 1299 } 1300 1301 ocs_hw_rtn_e 1302 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset) 1303 { 1304 uint32_t i; 1305 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 1306 uint32_t iters; 1307 ocs_hw_state_e prev_state = hw->state; 1308 1309 if (hw->state != OCS_HW_STATE_ACTIVE) { 1310 ocs_log_test(hw->os, "HW state %d is not active\n", hw->state); 1311 } 1312 1313 hw->state = OCS_HW_STATE_RESET_IN_PROGRESS; 1314 1315 /* shutdown target wqe timer */ 1316 shutdown_target_wqe_timer(hw); 1317 1318 ocs_hw_flush(hw); 1319 1320 /* 1321 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM), 1322 * then the FW will UE when the reset is issued. So attempt to complete 1323 * all mailbox commands. 1324 */ 1325 iters = 10; 1326 while (!ocs_list_empty(&hw->cmd_head) && iters) { 1327 ocs_udelay(10000); 1328 ocs_hw_flush(hw); 1329 iters--; 1330 } 1331 1332 if (ocs_list_empty(&hw->cmd_head)) { 1333 ocs_log_debug(hw->os, "All commands completed on MQ queue\n"); 1334 } else { 1335 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n"); 1336 } 1337 1338 /* Reset the chip */ 1339 switch(reset) { 1340 case OCS_HW_RESET_FUNCTION: 1341 ocs_log_debug(hw->os, "issuing function level reset\n"); 1342 if (sli_reset(&hw->sli)) { 1343 ocs_log_err(hw->os, "sli_reset failed\n"); 1344 rc = OCS_HW_RTN_ERROR; 1345 } 1346 break; 1347 case OCS_HW_RESET_FIRMWARE: 1348 ocs_log_debug(hw->os, "issuing firmware reset\n"); 1349 if (sli_fw_reset(&hw->sli)) { 1350 ocs_log_err(hw->os, "sli_soft_reset failed\n"); 1351 rc = OCS_HW_RTN_ERROR; 1352 } 1353 /* 1354 * Because the FW reset leaves the FW in a non-running state, 1355 * follow that with a regular reset. 1356 */ 1357 ocs_log_debug(hw->os, "issuing function level reset\n"); 1358 if (sli_reset(&hw->sli)) { 1359 ocs_log_err(hw->os, "sli_reset failed\n"); 1360 rc = OCS_HW_RTN_ERROR; 1361 } 1362 break; 1363 default: 1364 ocs_log_test(hw->os, "unknown reset type - no reset performed\n"); 1365 hw->state = prev_state; 1366 return OCS_HW_RTN_ERROR; 1367 } 1368 1369 /* Not safe to walk command/io lists unless they've been initialized */ 1370 if (prev_state != OCS_HW_STATE_UNINITIALIZED) { 1371 ocs_hw_command_cancel(hw); 1372 1373 /* Clean up the inuse list, the free list and the wait free list */ 1374 ocs_hw_io_cancel(hw); 1375 1376 ocs_memset(hw->domains, 0, sizeof(hw->domains)); 1377 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi)); 1378 1379 ocs_hw_link_event_init(hw); 1380 1381 ocs_lock(&hw->io_lock); 1382 /* The io lists should be empty, but remove any that didn't get cleaned up. */ 1383 while (!ocs_list_empty(&hw->io_timed_wqe)) { 1384 ocs_list_remove_head(&hw->io_timed_wqe); 1385 } 1386 /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */ 1387 1388 while (!ocs_list_empty(&hw->io_free)) { 1389 ocs_list_remove_head(&hw->io_free); 1390 } 1391 while (!ocs_list_empty(&hw->io_wait_free)) { 1392 ocs_list_remove_head(&hw->io_wait_free); 1393 } 1394 1395 /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */ 1396 ocs_hw_reqtag_reset(hw); 1397 1398 ocs_unlock(&hw->io_lock); 1399 } 1400 1401 if (prev_state != OCS_HW_STATE_UNINITIALIZED) { 1402 for (i = 0; i < hw->wq_count; i++) { 1403 sli_queue_reset(&hw->sli, &hw->wq[i]); 1404 } 1405 1406 for (i = 0; i < hw->rq_count; i++) { 1407 sli_queue_reset(&hw->sli, &hw->rq[i]); 1408 } 1409 1410 for (i = 0; i < hw->hw_rq_count; i++) { 1411 hw_rq_t *rq = hw->hw_rq[i]; 1412 if (rq->rq_tracker != NULL) { 1413 uint32_t j; 1414 1415 for (j = 0; j < rq->entry_count; j++) { 1416 rq->rq_tracker[j] = NULL; 1417 } 1418 } 1419 } 1420 1421 for (i = 0; i < hw->mq_count; i++) { 1422 sli_queue_reset(&hw->sli, &hw->mq[i]); 1423 } 1424 1425 for (i = 0; i < hw->cq_count; i++) { 1426 sli_queue_reset(&hw->sli, &hw->cq[i]); 1427 } 1428 1429 for (i = 0; i < hw->eq_count; i++) { 1430 sli_queue_reset(&hw->sli, &hw->eq[i]); 1431 } 1432 1433 /* Free rq buffers */ 1434 ocs_hw_rx_free(hw); 1435 1436 /* Teardown the HW queue topology */ 1437 hw_queue_teardown(hw); 1438 } else { 1439 /* Free rq buffers */ 1440 ocs_hw_rx_free(hw); 1441 } 1442 1443 /* 1444 * Re-apply the run-time workarounds after clearing the SLI config 1445 * fields in sli_reset. 1446 */ 1447 ocs_hw_workaround_setup(hw); 1448 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; 1449 1450 return rc; 1451 } 1452 1453 int32_t 1454 ocs_hw_get_num_eq(ocs_hw_t *hw) 1455 { 1456 return hw->eq_count; 1457 } 1458 1459 static int32_t 1460 ocs_hw_get_fw_timed_out(ocs_hw_t *hw) 1461 { 1462 /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf 1463 * No further explanation is given in the document. 1464 * */ 1465 return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 && 1466 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10); 1467 } 1468 1469 ocs_hw_rtn_e 1470 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value) 1471 { 1472 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 1473 int32_t tmp; 1474 1475 if (!value) { 1476 return OCS_HW_RTN_ERROR; 1477 } 1478 1479 *value = 0; 1480 1481 switch (prop) { 1482 case OCS_HW_N_IO: 1483 *value = hw->config.n_io; 1484 break; 1485 case OCS_HW_N_SGL: 1486 *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED); 1487 break; 1488 case OCS_HW_MAX_IO: 1489 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI); 1490 break; 1491 case OCS_HW_MAX_NODES: 1492 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); 1493 break; 1494 case OCS_HW_MAX_RQ_ENTRIES: 1495 *value = hw->num_qentries[SLI_QTYPE_RQ]; 1496 break; 1497 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: 1498 *value = hw->config.rq_default_buffer_size; 1499 break; 1500 case OCS_HW_AUTO_XFER_RDY_CAPABLE: 1501 *value = sli_get_auto_xfer_rdy_capable(&hw->sli); 1502 break; 1503 case OCS_HW_AUTO_XFER_RDY_XRI_CNT: 1504 *value = hw->config.auto_xfer_rdy_xri_cnt; 1505 break; 1506 case OCS_HW_AUTO_XFER_RDY_SIZE: 1507 *value = hw->config.auto_xfer_rdy_size; 1508 break; 1509 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: 1510 switch (hw->config.auto_xfer_rdy_blk_size_chip) { 1511 case 0: 1512 *value = 512; 1513 break; 1514 case 1: 1515 *value = 1024; 1516 break; 1517 case 2: 1518 *value = 2048; 1519 break; 1520 case 3: 1521 *value = 4096; 1522 break; 1523 case 4: 1524 *value = 520; 1525 break; 1526 default: 1527 *value = 0; 1528 rc = OCS_HW_RTN_ERROR; 1529 break; 1530 } 1531 break; 1532 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: 1533 *value = hw->config.auto_xfer_rdy_t10_enable; 1534 break; 1535 case OCS_HW_AUTO_XFER_RDY_P_TYPE: 1536 *value = hw->config.auto_xfer_rdy_p_type; 1537 break; 1538 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: 1539 *value = hw->config.auto_xfer_rdy_ref_tag_is_lba; 1540 break; 1541 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: 1542 *value = hw->config.auto_xfer_rdy_app_tag_valid; 1543 break; 1544 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: 1545 *value = hw->config.auto_xfer_rdy_app_tag_value; 1546 break; 1547 case OCS_HW_MAX_SGE: 1548 *value = sli_get_max_sge(&hw->sli); 1549 break; 1550 case OCS_HW_MAX_SGL: 1551 *value = sli_get_max_sgl(&hw->sli); 1552 break; 1553 case OCS_HW_TOPOLOGY: 1554 /* 1555 * Infer link.status based on link.speed. 1556 * Report OCS_HW_TOPOLOGY_NONE if the link is down. 1557 */ 1558 if (hw->link.speed == 0) { 1559 *value = OCS_HW_TOPOLOGY_NONE; 1560 break; 1561 } 1562 switch (hw->link.topology) { 1563 case SLI_LINK_TOPO_NPORT: 1564 *value = OCS_HW_TOPOLOGY_NPORT; 1565 break; 1566 case SLI_LINK_TOPO_LOOP: 1567 *value = OCS_HW_TOPOLOGY_LOOP; 1568 break; 1569 case SLI_LINK_TOPO_NONE: 1570 *value = OCS_HW_TOPOLOGY_NONE; 1571 break; 1572 default: 1573 ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology); 1574 rc = OCS_HW_RTN_ERROR; 1575 break; 1576 } 1577 break; 1578 case OCS_HW_CONFIG_TOPOLOGY: 1579 *value = hw->config.topology; 1580 break; 1581 case OCS_HW_LINK_SPEED: 1582 *value = hw->link.speed; 1583 break; 1584 case OCS_HW_LINK_CONFIG_SPEED: 1585 switch (hw->config.speed) { 1586 case FC_LINK_SPEED_10G: 1587 *value = 10000; 1588 break; 1589 case FC_LINK_SPEED_AUTO_16_8_4: 1590 *value = 0; 1591 break; 1592 case FC_LINK_SPEED_2G: 1593 *value = 2000; 1594 break; 1595 case FC_LINK_SPEED_4G: 1596 *value = 4000; 1597 break; 1598 case FC_LINK_SPEED_8G: 1599 *value = 8000; 1600 break; 1601 case FC_LINK_SPEED_16G: 1602 *value = 16000; 1603 break; 1604 case FC_LINK_SPEED_32G: 1605 *value = 32000; 1606 break; 1607 default: 1608 ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed); 1609 rc = OCS_HW_RTN_ERROR; 1610 break; 1611 } 1612 break; 1613 case OCS_HW_IF_TYPE: 1614 *value = sli_get_if_type(&hw->sli); 1615 break; 1616 case OCS_HW_SLI_REV: 1617 *value = sli_get_sli_rev(&hw->sli); 1618 break; 1619 case OCS_HW_SLI_FAMILY: 1620 *value = sli_get_sli_family(&hw->sli); 1621 break; 1622 case OCS_HW_DIF_CAPABLE: 1623 *value = sli_get_dif_capable(&hw->sli); 1624 break; 1625 case OCS_HW_DIF_SEED: 1626 *value = hw->config.dif_seed; 1627 break; 1628 case OCS_HW_DIF_MODE: 1629 *value = hw->config.dif_mode; 1630 break; 1631 case OCS_HW_DIF_MULTI_SEPARATE: 1632 /* Lancer supports multiple DIF separates */ 1633 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { 1634 *value = TRUE; 1635 } else { 1636 *value = FALSE; 1637 } 1638 break; 1639 case OCS_HW_DUMP_MAX_SIZE: 1640 *value = hw->dump_size; 1641 break; 1642 case OCS_HW_DUMP_READY: 1643 *value = sli_dump_is_ready(&hw->sli); 1644 break; 1645 case OCS_HW_DUMP_PRESENT: 1646 *value = sli_dump_is_present(&hw->sli); 1647 break; 1648 case OCS_HW_RESET_REQUIRED: 1649 tmp = sli_reset_required(&hw->sli); 1650 if(tmp < 0) { 1651 rc = OCS_HW_RTN_ERROR; 1652 } else { 1653 *value = tmp; 1654 } 1655 break; 1656 case OCS_HW_FW_ERROR: 1657 *value = sli_fw_error_status(&hw->sli); 1658 break; 1659 case OCS_HW_FW_READY: 1660 *value = sli_fw_ready(&hw->sli); 1661 break; 1662 case OCS_HW_FW_TIMED_OUT: 1663 *value = ocs_hw_get_fw_timed_out(hw); 1664 break; 1665 case OCS_HW_HIGH_LOGIN_MODE: 1666 *value = sli_get_hlm_capable(&hw->sli); 1667 break; 1668 case OCS_HW_PREREGISTER_SGL: 1669 *value = sli_get_sgl_preregister_required(&hw->sli); 1670 break; 1671 case OCS_HW_HW_REV1: 1672 *value = sli_get_hw_revision(&hw->sli, 0); 1673 break; 1674 case OCS_HW_HW_REV2: 1675 *value = sli_get_hw_revision(&hw->sli, 1); 1676 break; 1677 case OCS_HW_HW_REV3: 1678 *value = sli_get_hw_revision(&hw->sli, 2); 1679 break; 1680 case OCS_HW_LINKCFG: 1681 *value = hw->linkcfg; 1682 break; 1683 case OCS_HW_ETH_LICENSE: 1684 *value = hw->eth_license; 1685 break; 1686 case OCS_HW_LINK_MODULE_TYPE: 1687 *value = sli_get_link_module_type(&hw->sli); 1688 break; 1689 case OCS_HW_NUM_CHUTES: 1690 *value = ocs_hw_get_num_chutes(hw); 1691 break; 1692 case OCS_HW_DISABLE_AR_TGT_DIF: 1693 *value = hw->workaround.disable_ar_tgt_dif; 1694 break; 1695 case OCS_HW_EMULATE_I_ONLY_AAB: 1696 *value = hw->config.i_only_aab; 1697 break; 1698 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: 1699 *value = hw->config.emulate_tgt_wqe_timeout; 1700 break; 1701 case OCS_HW_VPD_LEN: 1702 *value = sli_get_vpd_len(&hw->sli); 1703 break; 1704 case OCS_HW_SGL_CHAINING_CAPABLE: 1705 *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported; 1706 break; 1707 case OCS_HW_SGL_CHAINING_ALLOWED: 1708 /* 1709 * SGL Chaining is allowed in the following cases: 1710 * 1. Lancer with host SGL Lists 1711 * 2. Skyhawk with pre-registered SGL Lists 1712 */ 1713 *value = FALSE; 1714 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && 1715 !sli_get_sgl_preregister(&hw->sli) && 1716 SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { 1717 *value = TRUE; 1718 } 1719 1720 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && 1721 sli_get_sgl_preregister(&hw->sli) && 1722 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 1723 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { 1724 *value = TRUE; 1725 } 1726 break; 1727 case OCS_HW_SGL_CHAINING_HOST_ALLOCATED: 1728 /* Only lancer supports host allocated SGL Chaining buffers. */ 1729 *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) && 1730 (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli))); 1731 break; 1732 case OCS_HW_SEND_FRAME_CAPABLE: 1733 if (hw->workaround.ignore_send_frame) { 1734 *value = 0; 1735 } else { 1736 /* Only lancer is capable */ 1737 *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH; 1738 } 1739 break; 1740 case OCS_HW_RQ_SELECTION_POLICY: 1741 *value = hw->config.rq_selection_policy; 1742 break; 1743 case OCS_HW_RR_QUANTA: 1744 *value = hw->config.rr_quanta; 1745 break; 1746 case OCS_HW_MAX_VPORTS: 1747 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI); 1748 break; 1749 default: 1750 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 1751 rc = OCS_HW_RTN_ERROR; 1752 } 1753 1754 return rc; 1755 } 1756 1757 void * 1758 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop) 1759 { 1760 void *rc = NULL; 1761 1762 switch (prop) { 1763 case OCS_HW_WWN_NODE: 1764 rc = sli_get_wwn_node(&hw->sli); 1765 break; 1766 case OCS_HW_WWN_PORT: 1767 rc = sli_get_wwn_port(&hw->sli); 1768 break; 1769 case OCS_HW_VPD: 1770 /* make sure VPD length is non-zero */ 1771 if (sli_get_vpd_len(&hw->sli)) { 1772 rc = sli_get_vpd(&hw->sli); 1773 } 1774 break; 1775 case OCS_HW_FW_REV: 1776 rc = sli_get_fw_name(&hw->sli, 0); 1777 break; 1778 case OCS_HW_FW_REV2: 1779 rc = sli_get_fw_name(&hw->sli, 1); 1780 break; 1781 case OCS_HW_IPL: 1782 rc = sli_get_ipl_name(&hw->sli); 1783 break; 1784 case OCS_HW_PORTNUM: 1785 rc = sli_get_portnum(&hw->sli); 1786 break; 1787 case OCS_HW_BIOS_VERSION_STRING: 1788 rc = sli_get_bios_version_string(&hw->sli); 1789 break; 1790 default: 1791 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 1792 } 1793 1794 return rc; 1795 } 1796 1797 ocs_hw_rtn_e 1798 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value) 1799 { 1800 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 1801 1802 switch (prop) { 1803 case OCS_HW_N_IO: 1804 if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) || 1805 value == 0) { 1806 ocs_log_test(hw->os, "IO value out of range %d vs %d\n", 1807 value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI)); 1808 rc = OCS_HW_RTN_ERROR; 1809 } else { 1810 hw->config.n_io = value; 1811 } 1812 break; 1813 case OCS_HW_N_SGL: 1814 value += SLI4_SGE_MAX_RESERVED; 1815 if (value > sli_get_max_sgl(&hw->sli)) { 1816 ocs_log_test(hw->os, "SGL value out of range %d vs %d\n", 1817 value, sli_get_max_sgl(&hw->sli)); 1818 rc = OCS_HW_RTN_ERROR; 1819 } else { 1820 hw->config.n_sgl = value; 1821 } 1822 break; 1823 case OCS_HW_TOPOLOGY: 1824 if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) && 1825 (value != OCS_HW_TOPOLOGY_AUTO)) { 1826 ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n", 1827 value, sli_get_medium(&hw->sli)); 1828 rc = OCS_HW_RTN_ERROR; 1829 break; 1830 } 1831 1832 switch (value) { 1833 case OCS_HW_TOPOLOGY_AUTO: 1834 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { 1835 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC); 1836 } else { 1837 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE); 1838 } 1839 break; 1840 case OCS_HW_TOPOLOGY_NPORT: 1841 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA); 1842 break; 1843 case OCS_HW_TOPOLOGY_LOOP: 1844 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL); 1845 break; 1846 default: 1847 ocs_log_test(hw->os, "unsupported topology %#x\n", value); 1848 rc = OCS_HW_RTN_ERROR; 1849 } 1850 hw->config.topology = value; 1851 break; 1852 case OCS_HW_LINK_SPEED: 1853 if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) { 1854 switch (value) { 1855 case 0: /* Auto-speed negotiation */ 1856 case 10000: /* FCoE speed */ 1857 hw->config.speed = FC_LINK_SPEED_10G; 1858 break; 1859 default: 1860 ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n", 1861 value, sli_get_medium(&hw->sli)); 1862 rc = OCS_HW_RTN_ERROR; 1863 } 1864 break; 1865 } 1866 1867 switch (value) { 1868 case 0: /* Auto-speed negotiation */ 1869 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4; 1870 break; 1871 case 2000: /* FC speeds */ 1872 hw->config.speed = FC_LINK_SPEED_2G; 1873 break; 1874 case 4000: 1875 hw->config.speed = FC_LINK_SPEED_4G; 1876 break; 1877 case 8000: 1878 hw->config.speed = FC_LINK_SPEED_8G; 1879 break; 1880 case 16000: 1881 hw->config.speed = FC_LINK_SPEED_16G; 1882 break; 1883 case 32000: 1884 hw->config.speed = FC_LINK_SPEED_32G; 1885 break; 1886 default: 1887 ocs_log_test(hw->os, "unsupported speed %d\n", value); 1888 rc = OCS_HW_RTN_ERROR; 1889 } 1890 break; 1891 case OCS_HW_DIF_SEED: 1892 /* Set the DIF seed - only for lancer right now */ 1893 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 1894 ocs_log_test(hw->os, "DIF seed not supported for this device\n"); 1895 rc = OCS_HW_RTN_ERROR; 1896 } else { 1897 hw->config.dif_seed = value; 1898 } 1899 break; 1900 case OCS_HW_DIF_MODE: 1901 switch (value) { 1902 case OCS_HW_DIF_MODE_INLINE: 1903 /* 1904 * Make sure we support inline DIF. 1905 * 1906 * Note: Having both bits clear means that we have old 1907 * FW that doesn't set the bits. 1908 */ 1909 if (sli_is_dif_inline_capable(&hw->sli)) { 1910 hw->config.dif_mode = value; 1911 } else { 1912 ocs_log_test(hw->os, "chip does not support DIF inline\n"); 1913 rc = OCS_HW_RTN_ERROR; 1914 } 1915 break; 1916 case OCS_HW_DIF_MODE_SEPARATE: 1917 /* Make sure we support DIF separates. */ 1918 if (sli_is_dif_separate_capable(&hw->sli)) { 1919 hw->config.dif_mode = value; 1920 } else { 1921 ocs_log_test(hw->os, "chip does not support DIF separate\n"); 1922 rc = OCS_HW_RTN_ERROR; 1923 } 1924 } 1925 break; 1926 case OCS_HW_RQ_PROCESS_LIMIT: { 1927 hw_rq_t *rq; 1928 uint32_t i; 1929 1930 /* For each hw_rq object, set its parent CQ limit value */ 1931 for (i = 0; i < hw->hw_rq_count; i++) { 1932 rq = hw->hw_rq[i]; 1933 hw->cq[rq->cq->instance].proc_limit = value; 1934 } 1935 break; 1936 } 1937 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE: 1938 hw->config.rq_default_buffer_size = value; 1939 break; 1940 case OCS_HW_AUTO_XFER_RDY_XRI_CNT: 1941 hw->config.auto_xfer_rdy_xri_cnt = value; 1942 break; 1943 case OCS_HW_AUTO_XFER_RDY_SIZE: 1944 hw->config.auto_xfer_rdy_size = value; 1945 break; 1946 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE: 1947 switch (value) { 1948 case 512: 1949 hw->config.auto_xfer_rdy_blk_size_chip = 0; 1950 break; 1951 case 1024: 1952 hw->config.auto_xfer_rdy_blk_size_chip = 1; 1953 break; 1954 case 2048: 1955 hw->config.auto_xfer_rdy_blk_size_chip = 2; 1956 break; 1957 case 4096: 1958 hw->config.auto_xfer_rdy_blk_size_chip = 3; 1959 break; 1960 case 520: 1961 hw->config.auto_xfer_rdy_blk_size_chip = 4; 1962 break; 1963 default: 1964 ocs_log_err(hw->os, "Invalid block size %d\n", 1965 value); 1966 rc = OCS_HW_RTN_ERROR; 1967 } 1968 break; 1969 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE: 1970 hw->config.auto_xfer_rdy_t10_enable = value; 1971 break; 1972 case OCS_HW_AUTO_XFER_RDY_P_TYPE: 1973 hw->config.auto_xfer_rdy_p_type = value; 1974 break; 1975 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA: 1976 hw->config.auto_xfer_rdy_ref_tag_is_lba = value; 1977 break; 1978 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID: 1979 hw->config.auto_xfer_rdy_app_tag_valid = value; 1980 break; 1981 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE: 1982 hw->config.auto_xfer_rdy_app_tag_value = value; 1983 break; 1984 case OCS_ESOC: 1985 hw->config.esoc = value; 1986 break; 1987 case OCS_HW_HIGH_LOGIN_MODE: 1988 rc = sli_set_hlm(&hw->sli, value); 1989 break; 1990 case OCS_HW_PREREGISTER_SGL: 1991 rc = sli_set_sgl_preregister(&hw->sli, value); 1992 break; 1993 case OCS_HW_ETH_LICENSE: 1994 hw->eth_license = value; 1995 break; 1996 case OCS_HW_EMULATE_I_ONLY_AAB: 1997 hw->config.i_only_aab = value; 1998 break; 1999 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT: 2000 hw->config.emulate_tgt_wqe_timeout = value; 2001 break; 2002 case OCS_HW_BOUNCE: 2003 hw->config.bounce = value; 2004 break; 2005 case OCS_HW_RQ_SELECTION_POLICY: 2006 hw->config.rq_selection_policy = value; 2007 break; 2008 case OCS_HW_RR_QUANTA: 2009 hw->config.rr_quanta = value; 2010 break; 2011 default: 2012 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 2013 rc = OCS_HW_RTN_ERROR; 2014 } 2015 2016 return rc; 2017 } 2018 2019 ocs_hw_rtn_e 2020 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value) 2021 { 2022 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2023 2024 switch (prop) { 2025 case OCS_HW_WAR_VERSION: 2026 hw->hw_war_version = value; 2027 break; 2028 case OCS_HW_FILTER_DEF: { 2029 char *p = value; 2030 uint32_t idx = 0; 2031 2032 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) { 2033 hw->config.filter_def[idx] = 0; 2034 } 2035 2036 for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) { 2037 hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0); 2038 p = ocs_strchr(p, ','); 2039 if (p != NULL) { 2040 p++; 2041 } 2042 } 2043 2044 break; 2045 } 2046 default: 2047 ocs_log_test(hw->os, "unsupported property %#x\n", prop); 2048 rc = OCS_HW_RTN_ERROR; 2049 break; 2050 } 2051 return rc; 2052 } 2053 /** 2054 * @ingroup interrupt 2055 * @brief Check for the events associated with the interrupt vector. 2056 * 2057 * @param hw Hardware context. 2058 * @param vector Zero-based interrupt vector number. 2059 * 2060 * @return Returns 0 on success, or a non-zero value on failure. 2061 */ 2062 int32_t 2063 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector) 2064 { 2065 int32_t rc = 0; 2066 2067 if (!hw) { 2068 ocs_log_err(NULL, "HW context NULL?!?\n"); 2069 return -1; 2070 } 2071 2072 if (vector > hw->eq_count) { 2073 ocs_log_err(hw->os, "vector %d. max %d\n", 2074 vector, hw->eq_count); 2075 return -1; 2076 } 2077 2078 /* 2079 * The caller should disable interrupts if they wish to prevent us 2080 * from processing during a shutdown. The following states are defined: 2081 * OCS_HW_STATE_UNINITIALIZED - No queues allocated 2082 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, 2083 * queues are cleared. 2084 * OCS_HW_STATE_ACTIVE - Chip and queues are operational 2085 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions 2086 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox 2087 * completions. 2088 */ 2089 if (hw->state != OCS_HW_STATE_UNINITIALIZED) { 2090 rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]); 2091 2092 /* Re-arm queue if there are no entries */ 2093 if (rc != 0) { 2094 sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE); 2095 } 2096 } 2097 return rc; 2098 } 2099 2100 void 2101 ocs_hw_unsol_process_bounce(void *arg) 2102 { 2103 ocs_hw_sequence_t *seq = arg; 2104 ocs_hw_t *hw = seq->hw; 2105 2106 ocs_hw_assert(hw != NULL); 2107 ocs_hw_assert(hw->callback.unsolicited != NULL); 2108 2109 hw->callback.unsolicited(hw->args.unsolicited, seq); 2110 } 2111 2112 int32_t 2113 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec) 2114 { 2115 hw_eq_t *eq; 2116 int32_t rc = 0; 2117 2118 CPUTRACE(""); 2119 2120 /* 2121 * The caller should disable interrupts if they wish to prevent us 2122 * from processing during a shutdown. The following states are defined: 2123 * OCS_HW_STATE_UNINITIALIZED - No queues allocated 2124 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, 2125 * queues are cleared. 2126 * OCS_HW_STATE_ACTIVE - Chip and queues are operational 2127 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions 2128 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox 2129 * completions. 2130 */ 2131 if (hw->state == OCS_HW_STATE_UNINITIALIZED) { 2132 return 0; 2133 } 2134 2135 /* Get pointer to hw_eq_t */ 2136 eq = hw->hw_eq[vector]; 2137 2138 OCS_STAT(eq->use_count++); 2139 2140 rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec); 2141 2142 return rc; 2143 } 2144 2145 /** 2146 * @ingroup interrupt 2147 * @brief Process events associated with an EQ. 2148 * 2149 * @par Description 2150 * Loop termination: 2151 * @n @n Without a mechanism to terminate the completion processing loop, it 2152 * is possible under some workload conditions for the loop to never terminate 2153 * (or at least take longer than the OS is happy to have an interrupt handler 2154 * or kernel thread context hold a CPU without yielding). 2155 * @n @n The approach taken here is to periodically check how much time 2156 * we have been in this 2157 * processing loop, and if we exceed a predetermined time (multiple seconds), the 2158 * loop is terminated, and ocs_hw_process() returns. 2159 * 2160 * @param hw Hardware context. 2161 * @param eq Pointer to HW EQ object. 2162 * @param max_isr_time_msec Maximum time in msec to stay in this function. 2163 * 2164 * @return Returns 0 on success, or a non-zero value on failure. 2165 */ 2166 int32_t 2167 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec) 2168 { 2169 uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 }; 2170 uint32_t done = FALSE; 2171 uint32_t tcheck_count; 2172 time_t tstart; 2173 time_t telapsed; 2174 2175 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; 2176 tstart = ocs_msectime(); 2177 2178 CPUTRACE(""); 2179 2180 while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) { 2181 uint16_t cq_id = 0; 2182 int32_t rc; 2183 2184 rc = sli_eq_parse(&hw->sli, eqe, &cq_id); 2185 if (unlikely(rc)) { 2186 if (rc > 0) { 2187 uint32_t i; 2188 2189 /* 2190 * Received a sentinel EQE indicating the EQ is full. 2191 * Process all CQs 2192 */ 2193 for (i = 0; i < hw->cq_count; i++) { 2194 ocs_hw_cq_process(hw, hw->hw_cq[i]); 2195 } 2196 continue; 2197 } else { 2198 return rc; 2199 } 2200 } else { 2201 int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id); 2202 if (likely(index >= 0)) { 2203 ocs_hw_cq_process(hw, hw->hw_cq[index]); 2204 } else { 2205 ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id); 2206 } 2207 } 2208 2209 if (eq->queue->n_posted > (eq->queue->posted_limit)) { 2210 sli_queue_arm(&hw->sli, eq->queue, FALSE); 2211 } 2212 2213 if (tcheck_count && (--tcheck_count == 0)) { 2214 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS; 2215 telapsed = ocs_msectime() - tstart; 2216 if (telapsed >= max_isr_time_msec) { 2217 done = TRUE; 2218 } 2219 } 2220 } 2221 sli_queue_eq_arm(&hw->sli, eq->queue, TRUE); 2222 2223 return 0; 2224 } 2225 2226 /** 2227 * @brief Submit queued (pending) mbx commands. 2228 * 2229 * @par Description 2230 * Submit queued mailbox commands. 2231 * --- Assumes that hw->cmd_lock is held --- 2232 * 2233 * @param hw Hardware context. 2234 * 2235 * @return Returns 0 on success, or a negative error code value on failure. 2236 */ 2237 static int32_t 2238 ocs_hw_cmd_submit_pending(ocs_hw_t *hw) 2239 { 2240 ocs_command_ctx_t *ctx; 2241 int32_t rc = 0; 2242 2243 /* Assumes lock held */ 2244 2245 /* Only submit MQE if there's room */ 2246 while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) { 2247 ctx = ocs_list_remove_head(&hw->cmd_pending); 2248 if (ctx == NULL) { 2249 break; 2250 } 2251 ocs_list_add_tail(&hw->cmd_head, ctx); 2252 hw->cmd_head_count++; 2253 if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) { 2254 ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc); 2255 rc = -1; 2256 break; 2257 } 2258 } 2259 return rc; 2260 } 2261 2262 /** 2263 * @ingroup io 2264 * @brief Issue a SLI command. 2265 * 2266 * @par Description 2267 * Send a mailbox command to the hardware, and either wait for a completion 2268 * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT). 2269 * 2270 * @param hw Hardware context. 2271 * @param cmd Buffer containing a formatted command and results. 2272 * @param opts Command options: 2273 * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion. 2274 * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback. 2275 * @param cb Function callback used for asynchronous mode. May be NULL. 2276 * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>. 2277 * @n @n @b Note: If the 2278 * callback function pointer is NULL, the results of the command are silently 2279 * discarded, allowing this pointer to exist solely on the stack. 2280 * @param arg Argument passed to an asynchronous callback. 2281 * 2282 * @return Returns 0 on success, or a non-zero value on failure. 2283 */ 2284 ocs_hw_rtn_e 2285 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg) 2286 { 2287 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 2288 2289 /* 2290 * If the chip is in an error state (UE'd) then reject this mailbox 2291 * command. 2292 */ 2293 if (sli_fw_error_status(&hw->sli) > 0) { 2294 uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1); 2295 uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2); 2296 if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) { 2297 hw->expiration_logged = 1; 2298 ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n", 2299 hw->watchdog_timeout); 2300 } 2301 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2302 ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n", 2303 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS), 2304 err1, err2); 2305 2306 return OCS_HW_RTN_ERROR; 2307 } 2308 2309 if (OCS_CMD_POLL == opts) { 2310 ocs_lock(&hw->cmd_lock); 2311 if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) { 2312 /* 2313 * Can't issue Boot-strap mailbox command with other 2314 * mail-queue commands pending as this interaction is 2315 * undefined 2316 */ 2317 rc = OCS_HW_RTN_ERROR; 2318 } else { 2319 void *bmbx = hw->sli.bmbx.virt; 2320 2321 ocs_memset(bmbx, 0, SLI4_BMBX_SIZE); 2322 ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE); 2323 2324 if (sli_bmbx_command(&hw->sli) == 0) { 2325 rc = OCS_HW_RTN_SUCCESS; 2326 ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE); 2327 } 2328 } 2329 ocs_unlock(&hw->cmd_lock); 2330 } else if (OCS_CMD_NOWAIT == opts) { 2331 ocs_command_ctx_t *ctx = NULL; 2332 2333 ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT); 2334 if (!ctx) { 2335 ocs_log_err(hw->os, "can't allocate command context\n"); 2336 return OCS_HW_RTN_NO_RESOURCES; 2337 } 2338 2339 if (hw->state != OCS_HW_STATE_ACTIVE) { 2340 ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state); 2341 ocs_free(hw->os, ctx, sizeof(*ctx)); 2342 return OCS_HW_RTN_ERROR; 2343 } 2344 2345 if (cb) { 2346 ctx->cb = cb; 2347 ctx->arg = arg; 2348 } 2349 ctx->buf = cmd; 2350 ctx->ctx = hw; 2351 2352 ocs_lock(&hw->cmd_lock); 2353 2354 /* Add to pending list */ 2355 ocs_list_add_tail(&hw->cmd_pending, ctx); 2356 2357 /* Submit as much of the pending list as we can */ 2358 if (ocs_hw_cmd_submit_pending(hw) == 0) { 2359 rc = OCS_HW_RTN_SUCCESS; 2360 } 2361 2362 ocs_unlock(&hw->cmd_lock); 2363 } 2364 2365 return rc; 2366 } 2367 2368 /** 2369 * @ingroup devInitShutdown 2370 * @brief Register a callback for the given event. 2371 * 2372 * @param hw Hardware context. 2373 * @param which Event of interest. 2374 * @param func Function to call when the event occurs. 2375 * @param arg Argument passed to the callback function. 2376 * 2377 * @return Returns 0 on success, or a non-zero value on failure. 2378 */ 2379 ocs_hw_rtn_e 2380 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg) 2381 { 2382 2383 if (!hw || !func || (which >= OCS_HW_CB_MAX)) { 2384 ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n", 2385 hw, which, func); 2386 return OCS_HW_RTN_ERROR; 2387 } 2388 2389 switch (which) { 2390 case OCS_HW_CB_DOMAIN: 2391 hw->callback.domain = func; 2392 hw->args.domain = arg; 2393 break; 2394 case OCS_HW_CB_PORT: 2395 hw->callback.port = func; 2396 hw->args.port = arg; 2397 break; 2398 case OCS_HW_CB_UNSOLICITED: 2399 hw->callback.unsolicited = func; 2400 hw->args.unsolicited = arg; 2401 break; 2402 case OCS_HW_CB_REMOTE_NODE: 2403 hw->callback.rnode = func; 2404 hw->args.rnode = arg; 2405 break; 2406 case OCS_HW_CB_BOUNCE: 2407 hw->callback.bounce = func; 2408 hw->args.bounce = arg; 2409 break; 2410 default: 2411 ocs_log_test(hw->os, "unknown callback %#x\n", which); 2412 return OCS_HW_RTN_ERROR; 2413 } 2414 2415 return OCS_HW_RTN_SUCCESS; 2416 } 2417 2418 /** 2419 * @ingroup port 2420 * @brief Allocate a port object. 2421 * 2422 * @par Description 2423 * This function allocates a VPI object for the port and stores it in the 2424 * indicator field of the port object. 2425 * 2426 * @param hw Hardware context. 2427 * @param sport SLI port object used to connect to the domain. 2428 * @param domain Domain object associated with this port (may be NULL). 2429 * @param wwpn Port's WWPN in big-endian order, or NULL to use default. 2430 * 2431 * @return Returns 0 on success, or a non-zero value on failure. 2432 */ 2433 ocs_hw_rtn_e 2434 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain, 2435 uint8_t *wwpn) 2436 { 2437 uint8_t *cmd = NULL; 2438 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2439 uint32_t index; 2440 2441 sport->indicator = UINT32_MAX; 2442 sport->hw = hw; 2443 sport->ctx.app = sport; 2444 sport->sm_free_req_pending = 0; 2445 2446 /* 2447 * Check if the chip is in an error state (UE'd) before proceeding. 2448 */ 2449 if (sli_fw_error_status(&hw->sli) > 0) { 2450 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2451 return OCS_HW_RTN_ERROR; 2452 } 2453 2454 if (wwpn) { 2455 ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn)); 2456 } 2457 2458 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) { 2459 ocs_log_err(hw->os, "FCOE_VPI allocation failure\n"); 2460 return OCS_HW_RTN_ERROR; 2461 } 2462 2463 if (domain != NULL) { 2464 ocs_sm_function_t next = NULL; 2465 2466 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 2467 if (!cmd) { 2468 ocs_log_err(hw->os, "command memory allocation failed\n"); 2469 rc = OCS_HW_RTN_NO_MEMORY; 2470 goto ocs_hw_port_alloc_out; 2471 } 2472 2473 /* If the WWPN is NULL, fetch the default WWPN and WWNN before 2474 * initializing the VPI 2475 */ 2476 if (!wwpn) { 2477 next = __ocs_hw_port_alloc_read_sparm64; 2478 } else { 2479 next = __ocs_hw_port_alloc_init_vpi; 2480 } 2481 2482 ocs_sm_transition(&sport->ctx, next, cmd); 2483 } else if (!wwpn) { 2484 /* This is the convention for the HW, not SLI */ 2485 ocs_log_test(hw->os, "need WWN for physical port\n"); 2486 rc = OCS_HW_RTN_ERROR; 2487 } else { 2488 /* domain NULL and wwpn non-NULL */ 2489 ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL); 2490 } 2491 2492 ocs_hw_port_alloc_out: 2493 if (rc != OCS_HW_RTN_SUCCESS) { 2494 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 2495 2496 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 2497 } 2498 2499 return rc; 2500 } 2501 2502 /** 2503 * @ingroup port 2504 * @brief Attach a physical/virtual SLI port to a domain. 2505 * 2506 * @par Description 2507 * This function registers a previously-allocated VPI with the 2508 * device. 2509 * 2510 * @param hw Hardware context. 2511 * @param sport Pointer to the SLI port object. 2512 * @param fc_id Fibre Channel ID to associate with this port. 2513 * 2514 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure. 2515 */ 2516 ocs_hw_rtn_e 2517 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id) 2518 { 2519 uint8_t *buf = NULL; 2520 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2521 2522 if (!hw || !sport) { 2523 ocs_log_err(hw ? hw->os : NULL, 2524 "bad parameter(s) hw=%p sport=%p\n", hw, 2525 sport); 2526 return OCS_HW_RTN_ERROR; 2527 } 2528 2529 /* 2530 * Check if the chip is in an error state (UE'd) before proceeding. 2531 */ 2532 if (sli_fw_error_status(&hw->sli) > 0) { 2533 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2534 return OCS_HW_RTN_ERROR; 2535 } 2536 2537 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2538 if (!buf) { 2539 ocs_log_err(hw->os, "no buffer for command\n"); 2540 return OCS_HW_RTN_NO_MEMORY; 2541 } 2542 2543 sport->fc_id = fc_id; 2544 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf); 2545 return rc; 2546 } 2547 2548 /** 2549 * @brief Called when the port control command completes. 2550 * 2551 * @par Description 2552 * We only need to free the mailbox command buffer. 2553 * 2554 * @param hw Hardware context. 2555 * @param status Status field from the mbox completion. 2556 * @param mqe Mailbox response structure. 2557 * @param arg Pointer to a callback function that signals the caller that the command is done. 2558 * 2559 * @return Returns 0. 2560 */ 2561 static int32_t 2562 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 2563 { 2564 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 2565 return 0; 2566 } 2567 2568 /** 2569 * @ingroup port 2570 * @brief Control a port (initialize, shutdown, or set link configuration). 2571 * 2572 * @par Description 2573 * This function controls a port depending on the @c ctrl parameter: 2574 * - @b OCS_HW_PORT_INIT - 2575 * Issues the CONFIG_LINK and INIT_LINK commands for the specified port. 2576 * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up. 2577 * . 2578 * - @b OCS_HW_PORT_SHUTDOWN - 2579 * Issues the DOWN_LINK command for the specified port. 2580 * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down. 2581 * . 2582 * - @b OCS_HW_PORT_SET_LINK_CONFIG - 2583 * Sets the link configuration. 2584 * 2585 * @param hw Hardware context. 2586 * @param ctrl Specifies the operation: 2587 * - OCS_HW_PORT_INIT 2588 * - OCS_HW_PORT_SHUTDOWN 2589 * - OCS_HW_PORT_SET_LINK_CONFIG 2590 * 2591 * @param value Operation-specific value. 2592 * - OCS_HW_PORT_INIT - Selective reset AL_PA 2593 * - OCS_HW_PORT_SHUTDOWN - N/A 2594 * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value. 2595 * 2596 * @param cb Callback function to invoke the following operation. 2597 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events 2598 * are handled by the OCS_HW_CB_DOMAIN callbacks). 2599 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command 2600 * completes. 2601 * 2602 * @param arg Callback argument invoked after the command completes. 2603 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events 2604 * are handled by the OCS_HW_CB_DOMAIN callbacks). 2605 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command 2606 * completes. 2607 * 2608 * @return Returns 0 on success, or a non-zero value on failure. 2609 */ 2610 ocs_hw_rtn_e 2611 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg) 2612 { 2613 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 2614 2615 switch (ctrl) { 2616 case OCS_HW_PORT_INIT: 2617 { 2618 uint8_t *init_link; 2619 uint32_t speed = 0; 2620 uint8_t reset_alpa = 0; 2621 2622 if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) { 2623 uint8_t *cfg_link; 2624 2625 cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2626 if (cfg_link == NULL) { 2627 ocs_log_err(hw->os, "no buffer for command\n"); 2628 return OCS_HW_RTN_NO_MEMORY; 2629 } 2630 2631 if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) { 2632 rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT, 2633 ocs_hw_cb_port_control, NULL); 2634 } 2635 2636 if (rc != OCS_HW_RTN_SUCCESS) { 2637 ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE); 2638 ocs_log_err(hw->os, "CONFIG_LINK failed\n"); 2639 break; 2640 } 2641 speed = hw->config.speed; 2642 reset_alpa = (uint8_t)(value & 0xff); 2643 } else { 2644 speed = FC_LINK_SPEED_10G; 2645 } 2646 2647 /* 2648 * Bring link up, unless FW version is not supported 2649 */ 2650 if (hw->workaround.fw_version_too_low) { 2651 if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) { 2652 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", 2653 OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0)); 2654 } else { 2655 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n", 2656 OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0)); 2657 } 2658 2659 return OCS_HW_RTN_ERROR; 2660 } 2661 2662 rc = OCS_HW_RTN_ERROR; 2663 2664 /* Allocate a new buffer for the init_link command */ 2665 init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2666 if (init_link == NULL) { 2667 ocs_log_err(hw->os, "no buffer for command\n"); 2668 return OCS_HW_RTN_NO_MEMORY; 2669 } 2670 2671 if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) { 2672 rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT, 2673 ocs_hw_cb_port_control, NULL); 2674 } 2675 /* Free buffer on error, since no callback is coming */ 2676 if (rc != OCS_HW_RTN_SUCCESS) { 2677 ocs_free(hw->os, init_link, SLI4_BMBX_SIZE); 2678 ocs_log_err(hw->os, "INIT_LINK failed\n"); 2679 } 2680 break; 2681 } 2682 case OCS_HW_PORT_SHUTDOWN: 2683 { 2684 uint8_t *down_link; 2685 2686 down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2687 if (down_link == NULL) { 2688 ocs_log_err(hw->os, "no buffer for command\n"); 2689 return OCS_HW_RTN_NO_MEMORY; 2690 } 2691 if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) { 2692 rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT, 2693 ocs_hw_cb_port_control, NULL); 2694 } 2695 /* Free buffer on error, since no callback is coming */ 2696 if (rc != OCS_HW_RTN_SUCCESS) { 2697 ocs_free(hw->os, down_link, SLI4_BMBX_SIZE); 2698 ocs_log_err(hw->os, "DOWN_LINK failed\n"); 2699 } 2700 break; 2701 } 2702 case OCS_HW_PORT_SET_LINK_CONFIG: 2703 rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg); 2704 break; 2705 default: 2706 ocs_log_test(hw->os, "unhandled control %#x\n", ctrl); 2707 break; 2708 } 2709 2710 return rc; 2711 } 2712 2713 /** 2714 * @ingroup port 2715 * @brief Free port resources. 2716 * 2717 * @par Description 2718 * Issue the UNREG_VPI command to free the assigned VPI context. 2719 * 2720 * @param hw Hardware context. 2721 * @param sport SLI port object used to connect to the domain. 2722 * 2723 * @return Returns 0 on success, or a non-zero value on failure. 2724 */ 2725 ocs_hw_rtn_e 2726 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport) 2727 { 2728 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2729 2730 if (!hw || !sport) { 2731 ocs_log_err(hw ? hw->os : NULL, 2732 "bad parameter(s) hw=%p sport=%p\n", hw, 2733 sport); 2734 return OCS_HW_RTN_ERROR; 2735 } 2736 2737 /* 2738 * Check if the chip is in an error state (UE'd) before proceeding. 2739 */ 2740 if (sli_fw_error_status(&hw->sli) > 0) { 2741 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2742 return OCS_HW_RTN_ERROR; 2743 } 2744 2745 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL); 2746 return rc; 2747 } 2748 2749 /** 2750 * @ingroup domain 2751 * @brief Allocate a fabric domain object. 2752 * 2753 * @par Description 2754 * This function starts a series of commands needed to connect to the domain, including 2755 * - REG_FCFI 2756 * - INIT_VFI 2757 * - READ_SPARMS 2758 * . 2759 * @b Note: Not all SLI interface types use all of the above commands. 2760 * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK 2761 * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event. 2762 * 2763 * @param hw Hardware context. 2764 * @param domain Pointer to the domain object. 2765 * @param fcf FCF index. 2766 * @param vlan VLAN ID. 2767 * 2768 * @return Returns 0 on success, or a non-zero value on failure. 2769 */ 2770 ocs_hw_rtn_e 2771 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan) 2772 { 2773 uint8_t *cmd = NULL; 2774 uint32_t index; 2775 2776 if (!hw || !domain || !domain->sport) { 2777 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n", 2778 hw, domain, domain ? domain->sport : NULL); 2779 return OCS_HW_RTN_ERROR; 2780 } 2781 2782 /* 2783 * Check if the chip is in an error state (UE'd) before proceeding. 2784 */ 2785 if (sli_fw_error_status(&hw->sli) > 0) { 2786 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2787 return OCS_HW_RTN_ERROR; 2788 } 2789 2790 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 2791 if (!cmd) { 2792 ocs_log_err(hw->os, "command memory allocation failed\n"); 2793 return OCS_HW_RTN_NO_MEMORY; 2794 } 2795 2796 domain->dma = hw->domain_dmem; 2797 2798 domain->hw = hw; 2799 domain->sm.app = domain; 2800 domain->fcf = fcf; 2801 domain->fcf_indicator = UINT32_MAX; 2802 domain->vlan_id = vlan; 2803 domain->indicator = UINT32_MAX; 2804 2805 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) { 2806 ocs_log_err(hw->os, "FCOE_VFI allocation failure\n"); 2807 2808 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 2809 2810 return OCS_HW_RTN_ERROR; 2811 } 2812 2813 ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd); 2814 return OCS_HW_RTN_SUCCESS; 2815 } 2816 2817 /** 2818 * @ingroup domain 2819 * @brief Attach a SLI port to a domain. 2820 * 2821 * @param hw Hardware context. 2822 * @param domain Pointer to the domain object. 2823 * @param fc_id Fibre Channel ID to associate with this port. 2824 * 2825 * @return Returns 0 on success, or a non-zero value on failure. 2826 */ 2827 ocs_hw_rtn_e 2828 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id) 2829 { 2830 uint8_t *buf = NULL; 2831 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2832 2833 if (!hw || !domain) { 2834 ocs_log_err(hw ? hw->os : NULL, 2835 "bad parameter(s) hw=%p domain=%p\n", 2836 hw, domain); 2837 return OCS_HW_RTN_ERROR; 2838 } 2839 2840 /* 2841 * Check if the chip is in an error state (UE'd) before proceeding. 2842 */ 2843 if (sli_fw_error_status(&hw->sli) > 0) { 2844 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2845 return OCS_HW_RTN_ERROR; 2846 } 2847 2848 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2849 if (!buf) { 2850 ocs_log_err(hw->os, "no buffer for command\n"); 2851 return OCS_HW_RTN_NO_MEMORY; 2852 } 2853 2854 domain->sport->fc_id = fc_id; 2855 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf); 2856 return rc; 2857 } 2858 2859 /** 2860 * @ingroup domain 2861 * @brief Free a fabric domain object. 2862 * 2863 * @par Description 2864 * Free both the driver and SLI port resources associated with the domain. 2865 * 2866 * @param hw Hardware context. 2867 * @param domain Pointer to the domain object. 2868 * 2869 * @return Returns 0 on success, or a non-zero value on failure. 2870 */ 2871 ocs_hw_rtn_e 2872 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain) 2873 { 2874 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 2875 2876 if (!hw || !domain) { 2877 ocs_log_err(hw ? hw->os : NULL, 2878 "bad parameter(s) hw=%p domain=%p\n", 2879 hw, domain); 2880 return OCS_HW_RTN_ERROR; 2881 } 2882 2883 /* 2884 * Check if the chip is in an error state (UE'd) before proceeding. 2885 */ 2886 if (sli_fw_error_status(&hw->sli) > 0) { 2887 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2888 return OCS_HW_RTN_ERROR; 2889 } 2890 2891 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL); 2892 return rc; 2893 } 2894 2895 /** 2896 * @ingroup domain 2897 * @brief Free a fabric domain object. 2898 * 2899 * @par Description 2900 * Free the driver resources associated with the domain. The difference between 2901 * this call and ocs_hw_domain_free() is that this call assumes resources no longer 2902 * exist on the SLI port, due to a reset or after some error conditions. 2903 * 2904 * @param hw Hardware context. 2905 * @param domain Pointer to the domain object. 2906 * 2907 * @return Returns 0 on success, or a non-zero value on failure. 2908 */ 2909 ocs_hw_rtn_e 2910 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain) 2911 { 2912 if (!hw || !domain) { 2913 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain); 2914 return OCS_HW_RTN_ERROR; 2915 } 2916 2917 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); 2918 2919 return OCS_HW_RTN_SUCCESS; 2920 } 2921 2922 /** 2923 * @ingroup node 2924 * @brief Allocate a remote node object. 2925 * 2926 * @param hw Hardware context. 2927 * @param rnode Allocated remote node object to initialize. 2928 * @param fc_addr FC address of the remote node. 2929 * @param sport SLI port used to connect to remote node. 2930 * 2931 * @return Returns 0 on success, or a non-zero value on failure. 2932 */ 2933 ocs_hw_rtn_e 2934 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr, 2935 ocs_sli_port_t *sport) 2936 { 2937 /* Check for invalid indicator */ 2938 if (UINT32_MAX != rnode->indicator) { 2939 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n", 2940 fc_addr, rnode->indicator); 2941 return OCS_HW_RTN_ERROR; 2942 } 2943 2944 /* 2945 * Check if the chip is in an error state (UE'd) before proceeding. 2946 */ 2947 if (sli_fw_error_status(&hw->sli) > 0) { 2948 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2949 return OCS_HW_RTN_ERROR; 2950 } 2951 2952 /* NULL SLI port indicates an unallocated remote node */ 2953 rnode->sport = NULL; 2954 2955 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) { 2956 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", 2957 fc_addr); 2958 return OCS_HW_RTN_ERROR; 2959 } 2960 2961 rnode->fc_id = fc_addr; 2962 rnode->sport = sport; 2963 2964 return OCS_HW_RTN_SUCCESS; 2965 } 2966 2967 /** 2968 * @ingroup node 2969 * @brief Update a remote node object with the remote port's service parameters. 2970 * 2971 * @param hw Hardware context. 2972 * @param rnode Allocated remote node object to initialize. 2973 * @param sparms DMA buffer containing the remote port's service parameters. 2974 * 2975 * @return Returns 0 on success, or a non-zero value on failure. 2976 */ 2977 ocs_hw_rtn_e 2978 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms) 2979 { 2980 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 2981 uint8_t *buf = NULL; 2982 uint32_t count = 0; 2983 2984 if (!hw || !rnode || !sparms) { 2985 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n", 2986 hw, rnode, sparms); 2987 return OCS_HW_RTN_ERROR; 2988 } 2989 2990 /* 2991 * Check if the chip is in an error state (UE'd) before proceeding. 2992 */ 2993 if (sli_fw_error_status(&hw->sli) > 0) { 2994 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 2995 return OCS_HW_RTN_ERROR; 2996 } 2997 2998 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 2999 if (!buf) { 3000 ocs_log_err(hw->os, "no buffer for command\n"); 3001 return OCS_HW_RTN_NO_MEMORY; 3002 } 3003 3004 /* 3005 * If the attach count is non-zero, this RPI has already been registered. 3006 * Otherwise, register the RPI 3007 */ 3008 if (rnode->index == UINT32_MAX) { 3009 ocs_log_err(NULL, "bad parameter rnode->index invalid\n"); 3010 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3011 return OCS_HW_RTN_ERROR; 3012 } 3013 count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1); 3014 if (count) { 3015 /* 3016 * Can't attach multiple FC_ID's to a node unless High Login 3017 * Mode is enabled 3018 */ 3019 if (sli_get_hlm(&hw->sli) == FALSE) { 3020 ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n", 3021 sli_get_hlm(&hw->sli), count); 3022 rc = OCS_HW_RTN_SUCCESS; 3023 } else { 3024 rnode->node_group = TRUE; 3025 rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached); 3026 rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS; 3027 } 3028 } else { 3029 rnode->node_group = FALSE; 3030 3031 ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt); 3032 if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id, 3033 rnode->indicator, rnode->sport->indicator, 3034 sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) { 3035 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, 3036 ocs_hw_cb_node_attach, rnode); 3037 } 3038 } 3039 3040 if (count || rc) { 3041 if (rc < OCS_HW_RTN_SUCCESS) { 3042 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); 3043 ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI"); 3044 } 3045 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3046 } 3047 3048 return rc; 3049 } 3050 3051 /** 3052 * @ingroup node 3053 * @brief Free a remote node resource. 3054 * 3055 * @param hw Hardware context. 3056 * @param rnode Remote node object to free. 3057 * 3058 * @return Returns 0 on success, or a non-zero value on failure. 3059 */ 3060 ocs_hw_rtn_e 3061 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode) 3062 { 3063 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 3064 3065 if (!hw || !rnode) { 3066 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", 3067 hw, rnode); 3068 return OCS_HW_RTN_ERROR; 3069 } 3070 3071 if (rnode->sport) { 3072 if (!rnode->attached) { 3073 if (rnode->indicator != UINT32_MAX) { 3074 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { 3075 ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n", 3076 rnode->indicator, rnode->fc_id); 3077 rc = OCS_HW_RTN_ERROR; 3078 } else { 3079 rnode->node_group = FALSE; 3080 rnode->indicator = UINT32_MAX; 3081 rnode->index = UINT32_MAX; 3082 rnode->free_group = FALSE; 3083 } 3084 } 3085 } else { 3086 ocs_log_err(hw->os, "Error: rnode is still attached\n"); 3087 rc = OCS_HW_RTN_ERROR; 3088 } 3089 } 3090 3091 return rc; 3092 } 3093 3094 /** 3095 * @ingroup node 3096 * @brief Free a remote node object. 3097 * 3098 * @param hw Hardware context. 3099 * @param rnode Remote node object to free. 3100 * 3101 * @return Returns 0 on success, or a non-zero value on failure. 3102 */ 3103 ocs_hw_rtn_e 3104 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode) 3105 { 3106 uint8_t *buf = NULL; 3107 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC; 3108 uint32_t index = UINT32_MAX; 3109 3110 if (!hw || !rnode) { 3111 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n", 3112 hw, rnode); 3113 return OCS_HW_RTN_ERROR; 3114 } 3115 3116 /* 3117 * Check if the chip is in an error state (UE'd) before proceeding. 3118 */ 3119 if (sli_fw_error_status(&hw->sli) > 0) { 3120 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 3121 return OCS_HW_RTN_ERROR; 3122 } 3123 3124 index = rnode->index; 3125 3126 if (rnode->sport) { 3127 uint32_t count = 0; 3128 uint32_t fc_id; 3129 3130 if (!rnode->attached) { 3131 return OCS_HW_RTN_SUCCESS_SYNC; 3132 } 3133 3134 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 3135 if (!buf) { 3136 ocs_log_err(hw->os, "no buffer for command\n"); 3137 return OCS_HW_RTN_NO_MEMORY; 3138 } 3139 3140 count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1); 3141 3142 if (count <= 1) { 3143 /* There are no other references to this RPI 3144 * so unregister it and free the resource. */ 3145 fc_id = UINT32_MAX; 3146 rnode->node_group = FALSE; 3147 rnode->free_group = TRUE; 3148 } else { 3149 if (sli_get_hlm(&hw->sli) == FALSE) { 3150 ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n", 3151 count); 3152 } 3153 fc_id = rnode->fc_id & 0x00ffffff; 3154 } 3155 3156 rc = OCS_HW_RTN_ERROR; 3157 3158 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator, 3159 SLI_RSRC_FCOE_RPI, fc_id)) { 3160 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode); 3161 } 3162 3163 if (rc != OCS_HW_RTN_SUCCESS) { 3164 ocs_log_err(hw->os, "UNREG_RPI failed\n"); 3165 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3166 rc = OCS_HW_RTN_ERROR; 3167 } 3168 } 3169 3170 return rc; 3171 } 3172 3173 /** 3174 * @ingroup node 3175 * @brief Free all remote node objects. 3176 * 3177 * @param hw Hardware context. 3178 * 3179 * @return Returns 0 on success, or a non-zero value on failure. 3180 */ 3181 ocs_hw_rtn_e 3182 ocs_hw_node_free_all(ocs_hw_t *hw) 3183 { 3184 uint8_t *buf = NULL; 3185 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 3186 3187 if (!hw) { 3188 ocs_log_err(NULL, "bad parameter hw=%p\n", hw); 3189 return OCS_HW_RTN_ERROR; 3190 } 3191 3192 /* 3193 * Check if the chip is in an error state (UE'd) before proceeding. 3194 */ 3195 if (sli_fw_error_status(&hw->sli) > 0) { 3196 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n"); 3197 return OCS_HW_RTN_ERROR; 3198 } 3199 3200 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 3201 if (!buf) { 3202 ocs_log_err(hw->os, "no buffer for command\n"); 3203 return OCS_HW_RTN_NO_MEMORY; 3204 } 3205 3206 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff, 3207 SLI_RSRC_FCOE_FCFI, UINT32_MAX)) { 3208 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all, 3209 NULL); 3210 } 3211 3212 if (rc != OCS_HW_RTN_SUCCESS) { 3213 ocs_log_err(hw->os, "UNREG_RPI failed\n"); 3214 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 3215 rc = OCS_HW_RTN_ERROR; 3216 } 3217 3218 return rc; 3219 } 3220 3221 ocs_hw_rtn_e 3222 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) 3223 { 3224 3225 if (!hw || !ngroup) { 3226 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", 3227 hw, ngroup); 3228 return OCS_HW_RTN_ERROR; 3229 } 3230 3231 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator, 3232 &ngroup->index)) { 3233 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n", 3234 ngroup->indicator); 3235 return OCS_HW_RTN_ERROR; 3236 } 3237 3238 return OCS_HW_RTN_SUCCESS; 3239 } 3240 3241 ocs_hw_rtn_e 3242 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode) 3243 { 3244 3245 if (!hw || !ngroup || !rnode) { 3246 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n", 3247 hw, ngroup, rnode); 3248 return OCS_HW_RTN_ERROR; 3249 } 3250 3251 if (rnode->attached) { 3252 ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n", 3253 rnode->indicator, rnode->fc_id); 3254 return OCS_HW_RTN_ERROR; 3255 } 3256 3257 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) { 3258 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", 3259 rnode->indicator); 3260 return OCS_HW_RTN_ERROR; 3261 } 3262 3263 rnode->indicator = ngroup->indicator; 3264 rnode->index = ngroup->index; 3265 3266 return OCS_HW_RTN_SUCCESS; 3267 } 3268 3269 ocs_hw_rtn_e 3270 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup) 3271 { 3272 int ref; 3273 3274 if (!hw || !ngroup) { 3275 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n", 3276 hw, ngroup); 3277 return OCS_HW_RTN_ERROR; 3278 } 3279 3280 ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count); 3281 if (ref) { 3282 /* Hmmm, the reference count is non-zero */ 3283 ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n", 3284 ref, ngroup->indicator); 3285 3286 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) { 3287 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n", 3288 ngroup->indicator); 3289 return OCS_HW_RTN_ERROR; 3290 } 3291 3292 ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0); 3293 } 3294 3295 ngroup->indicator = UINT32_MAX; 3296 ngroup->index = UINT32_MAX; 3297 3298 return OCS_HW_RTN_SUCCESS; 3299 } 3300 3301 /** 3302 * @brief Initialize IO fields on each free call. 3303 * 3304 * @n @b Note: This is done on each free call (as opposed to each 3305 * alloc call) because port-owned XRIs are not 3306 * allocated with ocs_hw_io_alloc() but are freed with this 3307 * function. 3308 * 3309 * @param io Pointer to HW IO. 3310 */ 3311 static inline void 3312 ocs_hw_init_free_io(ocs_hw_io_t *io) 3313 { 3314 /* 3315 * Set io->done to NULL, to avoid any callbacks, should 3316 * a completion be received for one of these IOs 3317 */ 3318 io->done = NULL; 3319 io->abort_done = NULL; 3320 io->status_saved = 0; 3321 io->abort_in_progress = FALSE; 3322 io->port_owned_abort_count = 0; 3323 io->rnode = NULL; 3324 io->type = 0xFFFF; 3325 io->wq = NULL; 3326 io->ul_io = NULL; 3327 io->tgt_wqe_timeout = 0; 3328 } 3329 3330 /** 3331 * @ingroup io 3332 * @brief Lockless allocate a HW IO object. 3333 * 3334 * @par Description 3335 * Assume that hw->ocs_lock is held. This function is only used if 3336 * use_dif_sec_xri workaround is being used. 3337 * 3338 * @param hw Hardware context. 3339 * 3340 * @return Returns a pointer to an object on success, or NULL on failure. 3341 */ 3342 static inline ocs_hw_io_t * 3343 _ocs_hw_io_alloc(ocs_hw_t *hw) 3344 { 3345 ocs_hw_io_t *io = NULL; 3346 3347 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { 3348 ocs_list_add_tail(&hw->io_inuse, io); 3349 io->state = OCS_HW_IO_STATE_INUSE; 3350 io->quarantine = FALSE; 3351 io->quarantine_first_phase = TRUE; 3352 io->abort_reqtag = UINT32_MAX; 3353 ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io); 3354 } else { 3355 ocs_atomic_add_return(&hw->io_alloc_failed_count, 1); 3356 } 3357 3358 return io; 3359 } 3360 /** 3361 * @ingroup io 3362 * @brief Allocate a HW IO object. 3363 * 3364 * @par Description 3365 * @n @b Note: This function applies to non-port owned XRIs 3366 * only. 3367 * 3368 * @param hw Hardware context. 3369 * 3370 * @return Returns a pointer to an object on success, or NULL on failure. 3371 */ 3372 ocs_hw_io_t * 3373 ocs_hw_io_alloc(ocs_hw_t *hw) 3374 { 3375 ocs_hw_io_t *io = NULL; 3376 3377 ocs_lock(&hw->io_lock); 3378 io = _ocs_hw_io_alloc(hw); 3379 ocs_unlock(&hw->io_lock); 3380 3381 return io; 3382 } 3383 3384 /** 3385 * @ingroup io 3386 * @brief Allocate/Activate a port owned HW IO object. 3387 * 3388 * @par Description 3389 * This function is called by the transport layer when an XRI is 3390 * allocated by the SLI-Port. This will "activate" the HW IO 3391 * associated with the XRI received from the SLI-Port to mirror 3392 * the state of the XRI. 3393 * @n @n @b Note: This function applies to port owned XRIs only. 3394 * 3395 * @param hw Hardware context. 3396 * @param io Pointer HW IO to activate/allocate. 3397 * 3398 * @return Returns a pointer to an object on success, or NULL on failure. 3399 */ 3400 ocs_hw_io_t * 3401 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) 3402 { 3403 if (ocs_ref_read_count(&io->ref) > 0) { 3404 ocs_log_err(hw->os, "Bad parameter: refcount > 0\n"); 3405 return NULL; 3406 } 3407 3408 if (io->wq != NULL) { 3409 ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator); 3410 return NULL; 3411 } 3412 3413 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); 3414 io->xbusy = TRUE; 3415 3416 return io; 3417 } 3418 3419 /** 3420 * @ingroup io 3421 * @brief When an IO is freed, depending on the exchange busy flag, and other 3422 * workarounds, move it to the correct list. 3423 * 3424 * @par Description 3425 * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed 3426 * from the busy or wait_free list. 3427 * 3428 * @param hw Hardware context. 3429 * @param io Pointer to the IO object to move. 3430 */ 3431 static void 3432 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io) 3433 { 3434 if (io->xbusy) { 3435 /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */ 3436 ocs_list_add_tail(&hw->io_wait_free, io); 3437 io->state = OCS_HW_IO_STATE_WAIT_FREE; 3438 } else { 3439 /* IO not busy, add to free list */ 3440 ocs_list_add_tail(&hw->io_free, io); 3441 io->state = OCS_HW_IO_STATE_FREE; 3442 } 3443 3444 /* BZ 161832 workaround */ 3445 if (hw->workaround.use_dif_sec_xri) { 3446 ocs_hw_check_sec_hio_list(hw); 3447 } 3448 } 3449 3450 /** 3451 * @ingroup io 3452 * @brief Free a HW IO object. Perform cleanup common to 3453 * port and host-owned IOs. 3454 * 3455 * @param hw Hardware context. 3456 * @param io Pointer to the HW IO object. 3457 */ 3458 static inline void 3459 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io) 3460 { 3461 /* initialize IO fields */ 3462 ocs_hw_init_free_io(io); 3463 3464 /* Restore default SGL */ 3465 ocs_hw_io_restore_sgl(hw, io); 3466 } 3467 3468 /** 3469 * @ingroup io 3470 * @brief Free a HW IO object associated with a port-owned XRI. 3471 * 3472 * @param arg Pointer to the HW IO object. 3473 */ 3474 static void 3475 ocs_hw_io_free_port_owned(void *arg) 3476 { 3477 ocs_hw_io_t *io = (ocs_hw_io_t *)arg; 3478 ocs_hw_t *hw = io->hw; 3479 3480 /* 3481 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs 3482 * waiting for buffers. 3483 */ 3484 if (io->auto_xfer_rdy_dnrx) { 3485 ocs_lock(&hw->io_lock); 3486 /* take a reference count because we still own the IO until the buffer is posted */ 3487 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io); 3488 ocs_list_add_tail(&hw->io_port_dnrx, io); 3489 ocs_unlock(&hw->io_lock); 3490 } 3491 3492 /* perform common cleanup */ 3493 ocs_hw_io_free_common(hw, io); 3494 } 3495 3496 /** 3497 * @ingroup io 3498 * @brief Free a previously-allocated HW IO object. Called when 3499 * IO refcount goes to zero (host-owned IOs only). 3500 * 3501 * @param arg Pointer to the HW IO object. 3502 */ 3503 static void 3504 ocs_hw_io_free_internal(void *arg) 3505 { 3506 ocs_hw_io_t *io = (ocs_hw_io_t *)arg; 3507 ocs_hw_t *hw = io->hw; 3508 3509 /* perform common cleanup */ 3510 ocs_hw_io_free_common(hw, io); 3511 3512 ocs_lock(&hw->io_lock); 3513 /* remove from in-use list */ 3514 ocs_list_remove(&hw->io_inuse, io); 3515 ocs_hw_io_free_move_correct_list(hw, io); 3516 ocs_unlock(&hw->io_lock); 3517 } 3518 3519 /** 3520 * @ingroup io 3521 * @brief Free a previously-allocated HW IO object. 3522 * 3523 * @par Description 3524 * @n @b Note: This function applies to port and host owned XRIs. 3525 * 3526 * @param hw Hardware context. 3527 * @param io Pointer to the HW IO object. 3528 * 3529 * @return Returns a non-zero value if HW IO was freed, 0 if references 3530 * on the IO still exist, or a negative value if an error occurred. 3531 */ 3532 int32_t 3533 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io) 3534 { 3535 /* just put refcount */ 3536 if (ocs_ref_read_count(&io->ref) <= 0) { 3537 ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n", 3538 io->indicator, io->reqtag); 3539 return -1; 3540 } 3541 3542 return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */ 3543 } 3544 3545 /** 3546 * @ingroup io 3547 * @brief Check if given HW IO is in-use 3548 * 3549 * @par Description 3550 * This function returns TRUE if the given HW IO has been 3551 * allocated and is in-use, and FALSE otherwise. It applies to 3552 * port and host owned XRIs. 3553 * 3554 * @param hw Hardware context. 3555 * @param io Pointer to the HW IO object. 3556 * 3557 * @return TRUE if an IO is in use, or FALSE otherwise. 3558 */ 3559 uint8_t 3560 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io) 3561 { 3562 return (ocs_ref_read_count(&io->ref) > 0); 3563 } 3564 3565 /** 3566 * @brief Write a HW IO to a work queue. 3567 * 3568 * @par Description 3569 * A HW IO is written to a work queue. 3570 * 3571 * @param wq Pointer to work queue. 3572 * @param wqe Pointer to WQ entry. 3573 * 3574 * @n @b Note: Assumes the SLI-4 queue lock is held. 3575 * 3576 * @return Returns 0 on success, or a negative error code value on failure. 3577 */ 3578 static int32_t 3579 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) 3580 { 3581 int32_t rc; 3582 int32_t queue_rc; 3583 3584 /* Every so often, set the wqec bit to generate comsummed completions */ 3585 if (wq->wqec_count) { 3586 wq->wqec_count--; 3587 } 3588 if (wq->wqec_count == 0) { 3589 sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf; 3590 genwqe->wqec = 1; 3591 wq->wqec_count = wq->wqec_set_count; 3592 } 3593 3594 /* Decrement WQ free count */ 3595 wq->free_count--; 3596 3597 queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf); 3598 3599 if (queue_rc < 0) { 3600 rc = -1; 3601 } else { 3602 rc = 0; 3603 ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc); 3604 } 3605 3606 return rc; 3607 } 3608 3609 /** 3610 * @brief Write a HW IO to a work queue. 3611 * 3612 * @par Description 3613 * A HW IO is written to a work queue. 3614 * 3615 * @param wq Pointer to work queue. 3616 * @param wqe Pointer to WQE entry. 3617 * 3618 * @n @b Note: Takes the SLI-4 queue lock. 3619 * 3620 * @return Returns 0 on success, or a negative error code value on failure. 3621 */ 3622 int32_t 3623 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) 3624 { 3625 int32_t rc = 0; 3626 3627 sli_queue_lock(wq->queue); 3628 if ( ! ocs_list_empty(&wq->pending_list)) { 3629 ocs_list_add_tail(&wq->pending_list, wqe); 3630 OCS_STAT(wq->wq_pending_count++;) 3631 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { 3632 rc = _hw_wq_write(wq, wqe); 3633 if (rc < 0) { 3634 break; 3635 } 3636 if (wqe->abort_wqe_submit_needed) { 3637 wqe->abort_wqe_submit_needed = 0; 3638 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, 3639 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT ); 3640 ocs_list_add_tail(&wq->pending_list, wqe); 3641 OCS_STAT(wq->wq_pending_count++;) 3642 } 3643 } 3644 } else { 3645 if (wq->free_count > 0) { 3646 rc = _hw_wq_write(wq, wqe); 3647 } else { 3648 ocs_list_add_tail(&wq->pending_list, wqe); 3649 OCS_STAT(wq->wq_pending_count++;) 3650 } 3651 } 3652 3653 sli_queue_unlock(wq->queue); 3654 3655 return rc; 3656 3657 } 3658 3659 /** 3660 * @brief Update free count and submit any pending HW IOs 3661 * 3662 * @par Description 3663 * The WQ free count is updated, and any pending HW IOs are submitted that 3664 * will fit in the queue. 3665 * 3666 * @param wq Pointer to work queue. 3667 * @param update_free_count Value added to WQs free count. 3668 * 3669 * @return None. 3670 */ 3671 static void 3672 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count) 3673 { 3674 ocs_hw_wqe_t *wqe; 3675 3676 sli_queue_lock(wq->queue); 3677 3678 /* Update free count with value passed in */ 3679 wq->free_count += update_free_count; 3680 3681 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) { 3682 _hw_wq_write(wq, wqe); 3683 3684 if (wqe->abort_wqe_submit_needed) { 3685 wqe->abort_wqe_submit_needed = 0; 3686 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI, 3687 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT); 3688 ocs_list_add_tail(&wq->pending_list, wqe); 3689 OCS_STAT(wq->wq_pending_count++;) 3690 } 3691 } 3692 3693 sli_queue_unlock(wq->queue); 3694 } 3695 3696 /** 3697 * @brief Check to see if there are any BZ 161832 workaround waiting IOs 3698 * 3699 * @par Description 3700 * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try 3701 * to allocate a secondary HW io, and dispatch it. 3702 * 3703 * @n @b Note: hw->io_lock MUST be taken when called. 3704 * 3705 * @param hw pointer to HW object 3706 * 3707 * @return none 3708 */ 3709 static void 3710 ocs_hw_check_sec_hio_list(ocs_hw_t *hw) 3711 { 3712 ocs_hw_io_t *io; 3713 ocs_hw_io_t *sec_io; 3714 int rc = 0; 3715 3716 while (!ocs_list_empty(&hw->sec_hio_wait_list)) { 3717 uint16_t flags; 3718 3719 sec_io = _ocs_hw_io_alloc(hw); 3720 if (sec_io == NULL) { 3721 break; 3722 } 3723 3724 io = ocs_list_remove_head(&hw->sec_hio_wait_list); 3725 ocs_list_add_tail(&hw->io_inuse, io); 3726 io->state = OCS_HW_IO_STATE_INUSE; 3727 io->sec_hio = sec_io; 3728 3729 /* mark secondary XRI for second and subsequent data phase as quarantine */ 3730 if (io->xbusy) { 3731 sec_io->quarantine = TRUE; 3732 } 3733 3734 flags = io->sec_iparam.fcp_tgt.flags; 3735 if (io->xbusy) { 3736 flags |= SLI4_IO_CONTINUATION; 3737 } else { 3738 flags &= ~SLI4_IO_CONTINUATION; 3739 } 3740 3741 io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout; 3742 3743 /* Complete (continue) TRECV IO */ 3744 if (io->xbusy) { 3745 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, 3746 io->first_data_sge, 3747 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator, 3748 io->reqtag, SLI4_CQ_DEFAULT, 3749 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, 3750 flags, 3751 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { 3752 ocs_log_test(hw->os, "TRECEIVE WQE error\n"); 3753 break; 3754 } 3755 } else { 3756 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, 3757 io->first_data_sge, 3758 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, 3759 io->reqtag, SLI4_CQ_DEFAULT, 3760 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode, 3761 flags, 3762 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, 3763 io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) { 3764 ocs_log_test(hw->os, "TRECEIVE WQE error\n"); 3765 break; 3766 } 3767 } 3768 3769 if (io->wq == NULL) { 3770 io->wq = ocs_hw_queue_next_wq(hw, io); 3771 ocs_hw_assert(io->wq != NULL); 3772 } 3773 io->xbusy = TRUE; 3774 3775 /* 3776 * Add IO to active io wqe list before submitting, in case the 3777 * wcqe processing preempts this thread. 3778 */ 3779 ocs_hw_add_io_timed_wqe(hw, io); 3780 rc = hw_wq_write(io->wq, &io->wqe); 3781 if (rc >= 0) { 3782 /* non-negative return is success */ 3783 rc = 0; 3784 } else { 3785 /* failed to write wqe, remove from active wqe list */ 3786 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); 3787 io->xbusy = FALSE; 3788 ocs_hw_remove_io_timed_wqe(hw, io); 3789 } 3790 } 3791 } 3792 3793 /** 3794 * @ingroup io 3795 * @brief Send a Single Request/Response Sequence (SRRS). 3796 * 3797 * @par Description 3798 * This routine supports communication sequences consisting of a single 3799 * request and single response between two endpoints. Examples include: 3800 * - Sending an ELS request. 3801 * - Sending an ELS response - To send an ELS reponse, the caller must provide 3802 * the OX_ID from the received request. 3803 * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request, 3804 * the caller must provide the R_CTL, TYPE, and DF_CTL 3805 * values to place in the FC frame header. 3806 * . 3807 * @n @b Note: The caller is expected to provide both send and receive 3808 * buffers for requests. In the case of sending a response, no receive buffer 3809 * is necessary and the caller may pass in a NULL pointer. 3810 * 3811 * @param hw Hardware context. 3812 * @param type Type of sequence (ELS request/response, FC-CT). 3813 * @param io Previously-allocated HW IO object. 3814 * @param send DMA memory holding data to send (for example, ELS request, BLS response). 3815 * @param len Length, in bytes, of data to send. 3816 * @param receive Optional DMA memory to hold a response. 3817 * @param rnode Destination of data (that is, a remote node). 3818 * @param iparam IO parameters (ELS response and FC-CT). 3819 * @param cb Function call upon completion of sending the data (may be NULL). 3820 * @param arg Argument to pass to IO completion function. 3821 * 3822 * @return Returns 0 on success, or a non-zero on failure. 3823 */ 3824 ocs_hw_rtn_e 3825 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, 3826 ocs_dma_t *send, uint32_t len, ocs_dma_t *receive, 3827 ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam, 3828 ocs_hw_srrs_cb_t cb, void *arg) 3829 { 3830 sli4_sge_t *sge = NULL; 3831 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 3832 uint16_t local_flags = 0; 3833 3834 if (!hw || !io || !rnode || !iparam) { 3835 ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n", 3836 hw, io, send, receive, rnode, iparam); 3837 return OCS_HW_RTN_ERROR; 3838 } 3839 3840 if (hw->state != OCS_HW_STATE_ACTIVE) { 3841 ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state); 3842 return OCS_HW_RTN_ERROR; 3843 } 3844 3845 if (ocs_hw_is_xri_port_owned(hw, io->indicator)) { 3846 /* We must set the XC bit for port owned XRIs */ 3847 local_flags |= SLI4_IO_CONTINUATION; 3848 } 3849 io->rnode = rnode; 3850 io->type = type; 3851 io->done = cb; 3852 io->arg = arg; 3853 3854 sge = io->sgl->virt; 3855 3856 /* clear both SGE */ 3857 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); 3858 3859 if (send) { 3860 sge[0].buffer_address_high = ocs_addr32_hi(send->phys); 3861 sge[0].buffer_address_low = ocs_addr32_lo(send->phys); 3862 sge[0].sge_type = SLI4_SGE_TYPE_DATA; 3863 sge[0].buffer_length = len; 3864 } 3865 3866 if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) { 3867 sge[1].buffer_address_high = ocs_addr32_hi(receive->phys); 3868 sge[1].buffer_address_low = ocs_addr32_lo(receive->phys); 3869 sge[1].sge_type = SLI4_SGE_TYPE_DATA; 3870 sge[1].buffer_length = receive->size; 3871 sge[1].last = TRUE; 3872 } else { 3873 sge[0].last = TRUE; 3874 } 3875 3876 switch (type) { 3877 case OCS_HW_ELS_REQ: 3878 if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, 3879 *((uint8_t *)(send->virt)), /* req_type */ 3880 len, receive->size, 3881 iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) { 3882 ocs_log_err(hw->os, "REQ WQE error\n"); 3883 rc = OCS_HW_RTN_ERROR; 3884 } 3885 break; 3886 case OCS_HW_ELS_RSP: 3887 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, 3888 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, 3889 iparam->els.ox_id, 3890 rnode, local_flags, UINT32_MAX)) { 3891 ocs_log_err(hw->os, "RSP WQE error\n"); 3892 rc = OCS_HW_RTN_ERROR; 3893 } 3894 break; 3895 case OCS_HW_ELS_RSP_SID: 3896 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, 3897 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, 3898 iparam->els_sid.ox_id, 3899 rnode, local_flags, iparam->els_sid.s_id)) { 3900 ocs_log_err(hw->os, "RSP (SID) WQE error\n"); 3901 rc = OCS_HW_RTN_ERROR; 3902 } 3903 break; 3904 case OCS_HW_FC_CT: 3905 if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, 3906 receive->size, iparam->fc_ct.timeout, io->indicator, 3907 io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl, 3908 iparam->fc_ct.type, iparam->fc_ct.df_ctl)) { 3909 ocs_log_err(hw->os, "GEN WQE error\n"); 3910 rc = OCS_HW_RTN_ERROR; 3911 } 3912 break; 3913 case OCS_HW_FC_CT_RSP: 3914 if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len, 3915 iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator, 3916 io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl, 3917 iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) { 3918 ocs_log_err(hw->os, "XMIT SEQ WQE error\n"); 3919 rc = OCS_HW_RTN_ERROR; 3920 } 3921 break; 3922 case OCS_HW_BLS_ACC: 3923 case OCS_HW_BLS_RJT: 3924 { 3925 sli_bls_payload_t bls; 3926 3927 if (OCS_HW_BLS_ACC == type) { 3928 bls.type = SLI_BLS_ACC; 3929 ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc)); 3930 } else { 3931 bls.type = SLI_BLS_RJT; 3932 ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt)); 3933 } 3934 3935 bls.ox_id = iparam->bls.ox_id; 3936 bls.rx_id = iparam->bls.rx_id; 3937 3938 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, 3939 io->indicator, io->reqtag, 3940 SLI4_CQ_DEFAULT, 3941 rnode, UINT32_MAX)) { 3942 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n"); 3943 rc = OCS_HW_RTN_ERROR; 3944 } 3945 break; 3946 } 3947 case OCS_HW_BLS_ACC_SID: 3948 { 3949 sli_bls_payload_t bls; 3950 3951 bls.type = SLI_BLS_ACC; 3952 ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc)); 3953 3954 bls.ox_id = iparam->bls_sid.ox_id; 3955 bls.rx_id = iparam->bls_sid.rx_id; 3956 3957 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls, 3958 io->indicator, io->reqtag, 3959 SLI4_CQ_DEFAULT, 3960 rnode, iparam->bls_sid.s_id)) { 3961 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n"); 3962 rc = OCS_HW_RTN_ERROR; 3963 } 3964 break; 3965 } 3966 case OCS_HW_BCAST: 3967 if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len, 3968 iparam->bcast.timeout, io->indicator, io->reqtag, 3969 SLI4_CQ_DEFAULT, rnode, 3970 iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) { 3971 ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n"); 3972 rc = OCS_HW_RTN_ERROR; 3973 } 3974 break; 3975 default: 3976 ocs_log_err(hw->os, "bad SRRS type %#x\n", type); 3977 rc = OCS_HW_RTN_ERROR; 3978 } 3979 3980 if (OCS_HW_RTN_SUCCESS == rc) { 3981 if (io->wq == NULL) { 3982 io->wq = ocs_hw_queue_next_wq(hw, io); 3983 ocs_hw_assert(io->wq != NULL); 3984 } 3985 io->xbusy = TRUE; 3986 3987 /* 3988 * Add IO to active io wqe list before submitting, in case the 3989 * wcqe processing preempts this thread. 3990 */ 3991 OCS_STAT(io->wq->use_count++); 3992 ocs_hw_add_io_timed_wqe(hw, io); 3993 rc = hw_wq_write(io->wq, &io->wqe); 3994 if (rc >= 0) { 3995 /* non-negative return is success */ 3996 rc = 0; 3997 } else { 3998 /* failed to write wqe, remove from active wqe list */ 3999 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); 4000 io->xbusy = FALSE; 4001 ocs_hw_remove_io_timed_wqe(hw, io); 4002 } 4003 } 4004 4005 return rc; 4006 } 4007 4008 /** 4009 * @ingroup io 4010 * @brief Send a read, write, or response IO. 4011 * 4012 * @par Description 4013 * This routine supports sending a higher-level IO (for example, FCP) between two endpoints 4014 * as a target or initiator. Examples include: 4015 * - Sending read data and good response (target). 4016 * - Sending a response (target with no data or after receiving write data). 4017 * . 4018 * This routine assumes all IOs use the SGL associated with the HW IO. Prior to 4019 * calling this routine, the data should be loaded using ocs_hw_io_add_sge(). 4020 * 4021 * @param hw Hardware context. 4022 * @param type Type of IO (target read, target response, and so on). 4023 * @param io Previously-allocated HW IO object. 4024 * @param len Length, in bytes, of data to send. 4025 * @param iparam IO parameters. 4026 * @param rnode Destination of data (that is, a remote node). 4027 * @param cb Function call upon completion of sending data (may be NULL). 4028 * @param arg Argument to pass to IO completion function. 4029 * 4030 * @return Returns 0 on success, or a non-zero value on failure. 4031 * 4032 * @todo 4033 * - Support specifiying relative offset. 4034 * - Use a WQ other than 0. 4035 */ 4036 ocs_hw_rtn_e 4037 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io, 4038 uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode, 4039 void *cb, void *arg) 4040 { 4041 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 4042 uint32_t rpi; 4043 uint8_t send_wqe = TRUE; 4044 4045 CPUTRACE(""); 4046 4047 if (!hw || !io || !rnode || !iparam) { 4048 ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n", 4049 hw, io, iparam, rnode); 4050 return OCS_HW_RTN_ERROR; 4051 } 4052 4053 if (hw->state != OCS_HW_STATE_ACTIVE) { 4054 ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); 4055 return OCS_HW_RTN_ERROR; 4056 } 4057 4058 rpi = rnode->indicator; 4059 4060 if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) { 4061 rpi = hw->workaround.unregistered_rid; 4062 ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi); 4063 } 4064 4065 /* 4066 * Save state needed during later stages 4067 */ 4068 io->rnode = rnode; 4069 io->type = type; 4070 io->done = cb; 4071 io->arg = arg; 4072 4073 /* 4074 * Format the work queue entry used to send the IO 4075 */ 4076 switch (type) { 4077 case OCS_HW_IO_INITIATOR_READ: 4078 /* 4079 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the 4080 * initiator read IO for quarantine 4081 */ 4082 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && 4083 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { 4084 io->quarantine = TRUE; 4085 } 4086 4087 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, 4088 iparam->fcp_ini.rsp); 4089 4090 if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len, 4091 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode, 4092 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, 4093 iparam->fcp_ini.timeout)) { 4094 ocs_log_err(hw->os, "IREAD WQE error\n"); 4095 rc = OCS_HW_RTN_ERROR; 4096 } 4097 break; 4098 case OCS_HW_IO_INITIATOR_WRITE: 4099 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, 4100 iparam->fcp_ini.rsp); 4101 4102 if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4103 len, iparam->fcp_ini.first_burst, 4104 io->indicator, io->reqtag, 4105 SLI4_CQ_DEFAULT, rpi, rnode, 4106 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size, 4107 iparam->fcp_ini.timeout)) { 4108 ocs_log_err(hw->os, "IWRITE WQE error\n"); 4109 rc = OCS_HW_RTN_ERROR; 4110 } 4111 break; 4112 case OCS_HW_IO_INITIATOR_NODATA: 4113 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size, 4114 iparam->fcp_ini.rsp); 4115 4116 if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, 4117 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, 4118 rpi, rnode, iparam->fcp_ini.timeout)) { 4119 ocs_log_err(hw->os, "ICMND WQE error\n"); 4120 rc = OCS_HW_RTN_ERROR; 4121 } 4122 break; 4123 case OCS_HW_IO_TARGET_WRITE: { 4124 uint16_t flags = iparam->fcp_tgt.flags; 4125 fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt; 4126 4127 /* 4128 * Fill in the XFER_RDY for IF_TYPE 0 devices 4129 */ 4130 *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset); 4131 *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len); 4132 *((uint32_t *)xfer->rsvd) = 0; 4133 4134 if (io->xbusy) { 4135 flags |= SLI4_IO_CONTINUATION; 4136 } else { 4137 flags &= ~SLI4_IO_CONTINUATION; 4138 } 4139 4140 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; 4141 4142 /* 4143 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO 4144 * then mark the target write IO for quarantine 4145 */ 4146 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) && 4147 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { 4148 io->quarantine = TRUE; 4149 } 4150 4151 /* 4152 * BZ 161832 Workaround: 4153 * Check for use_dif_sec_xri workaround. Note, even though the first dataphase 4154 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the 4155 * potential for deadlock where all XRI's are allocated as primaries to IOs that 4156 * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first 4157 * data phase, it is marked for quarantine. 4158 */ 4159 if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) { 4160 /* 4161 * If we have allocated a chained SGL for skyhawk, then 4162 * we can re-use this for the sec_hio. 4163 */ 4164 if (io->ovfl_io != NULL) { 4165 io->sec_hio = io->ovfl_io; 4166 io->sec_hio->quarantine = TRUE; 4167 } else { 4168 io->sec_hio = ocs_hw_io_alloc(hw); 4169 } 4170 if (io->sec_hio == NULL) { 4171 /* Failed to allocate, so save full request context and put 4172 * this IO on the wait list 4173 */ 4174 io->sec_iparam = *iparam; 4175 io->sec_len = len; 4176 ocs_lock(&hw->io_lock); 4177 ocs_list_remove(&hw->io_inuse, io); 4178 ocs_list_add_tail(&hw->sec_hio_wait_list, io); 4179 io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO; 4180 hw->sec_hio_wait_count++; 4181 ocs_unlock(&hw->io_lock); 4182 send_wqe = FALSE; 4183 /* Done */ 4184 break; 4185 } 4186 /* We quarantine the secondary IO if this is the second or subsequent data phase */ 4187 if (io->xbusy) { 4188 io->sec_hio->quarantine = TRUE; 4189 } 4190 } 4191 4192 /* 4193 * If not the first data phase, and io->sec_hio has been allocated, then issue 4194 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE 4195 */ 4196 if (io->xbusy && (io->sec_hio != NULL)) { 4197 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4198 iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator, 4199 io->reqtag, SLI4_CQ_DEFAULT, 4200 iparam->fcp_tgt.ox_id, rpi, rnode, 4201 flags, 4202 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, 4203 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { 4204 ocs_log_err(hw->os, "TRECEIVE WQE error\n"); 4205 rc = OCS_HW_RTN_ERROR; 4206 } 4207 } else { 4208 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4209 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, 4210 SLI4_CQ_DEFAULT, 4211 iparam->fcp_tgt.ox_id, rpi, rnode, 4212 flags, 4213 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size, 4214 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) { 4215 ocs_log_err(hw->os, "TRECEIVE WQE error\n"); 4216 rc = OCS_HW_RTN_ERROR; 4217 } 4218 } 4219 break; 4220 } 4221 case OCS_HW_IO_TARGET_READ: { 4222 uint16_t flags = iparam->fcp_tgt.flags; 4223 4224 if (io->xbusy) { 4225 flags |= SLI4_IO_CONTINUATION; 4226 } else { 4227 flags &= ~SLI4_IO_CONTINUATION; 4228 } 4229 4230 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; 4231 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, 4232 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag, 4233 SLI4_CQ_DEFAULT, 4234 iparam->fcp_tgt.ox_id, rpi, rnode, 4235 flags, 4236 iparam->fcp_tgt.dif_oper, 4237 iparam->fcp_tgt.blk_size, 4238 iparam->fcp_tgt.cs_ctl, 4239 iparam->fcp_tgt.app_id)) { 4240 ocs_log_err(hw->os, "TSEND WQE error\n"); 4241 rc = OCS_HW_RTN_ERROR; 4242 } else if (hw->workaround.retain_tsend_io_length) { 4243 io->length = len; 4244 } 4245 break; 4246 } 4247 case OCS_HW_IO_TARGET_RSP: { 4248 uint16_t flags = iparam->fcp_tgt.flags; 4249 4250 if (io->xbusy) { 4251 flags |= SLI4_IO_CONTINUATION; 4252 } else { 4253 flags &= ~SLI4_IO_CONTINUATION; 4254 } 4255 4256 /* post a new auto xfer ready buffer */ 4257 if (hw->auto_xfer_rdy_enabled && io->is_port_owned) { 4258 if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) { 4259 flags |= SLI4_IO_DNRX; 4260 } 4261 } 4262 4263 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout; 4264 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, 4265 &io->def_sgl, 4266 len, 4267 io->indicator, io->reqtag, 4268 SLI4_CQ_DEFAULT, 4269 iparam->fcp_tgt.ox_id, 4270 rpi, rnode, 4271 flags, iparam->fcp_tgt.cs_ctl, 4272 io->is_port_owned, 4273 iparam->fcp_tgt.app_id)) { 4274 ocs_log_err(hw->os, "TRSP WQE error\n"); 4275 rc = OCS_HW_RTN_ERROR; 4276 } 4277 4278 break; 4279 } 4280 default: 4281 ocs_log_err(hw->os, "unsupported IO type %#x\n", type); 4282 rc = OCS_HW_RTN_ERROR; 4283 } 4284 4285 if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) { 4286 if (io->wq == NULL) { 4287 io->wq = ocs_hw_queue_next_wq(hw, io); 4288 ocs_hw_assert(io->wq != NULL); 4289 } 4290 4291 io->xbusy = TRUE; 4292 4293 /* 4294 * Add IO to active io wqe list before submitting, in case the 4295 * wcqe processing preempts this thread. 4296 */ 4297 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); 4298 OCS_STAT(io->wq->use_count++); 4299 ocs_hw_add_io_timed_wqe(hw, io); 4300 rc = hw_wq_write(io->wq, &io->wqe); 4301 if (rc >= 0) { 4302 /* non-negative return is success */ 4303 rc = 0; 4304 } else { 4305 /* failed to write wqe, remove from active wqe list */ 4306 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc); 4307 io->xbusy = FALSE; 4308 ocs_hw_remove_io_timed_wqe(hw, io); 4309 } 4310 } 4311 4312 return rc; 4313 } 4314 4315 /** 4316 * @brief Send a raw frame 4317 * 4318 * @par Description 4319 * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent. 4320 * 4321 * @param hw Pointer to HW object. 4322 * @param hdr Pointer to a little endian formatted FC header. 4323 * @param sof Value to use as the frame SOF. 4324 * @param eof Value to use as the frame EOF. 4325 * @param payload Pointer to payload DMA buffer. 4326 * @param ctx Pointer to caller provided send frame context. 4327 * @param callback Callback function. 4328 * @param arg Callback function argument. 4329 * 4330 * @return Returns 0 on success, or a negative error code value on failure. 4331 */ 4332 ocs_hw_rtn_e 4333 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload, 4334 ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) 4335 { 4336 int32_t rc; 4337 ocs_hw_wqe_t *wqe; 4338 uint32_t xri; 4339 hw_wq_t *wq; 4340 4341 wqe = &ctx->wqe; 4342 4343 /* populate the callback object */ 4344 ctx->hw = hw; 4345 4346 /* Fetch and populate request tag */ 4347 ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg); 4348 if (ctx->wqcb == NULL) { 4349 ocs_log_err(hw->os, "can't allocate request tag\n"); 4350 return OCS_HW_RTN_NO_RESOURCES; 4351 } 4352 4353 /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */ 4354 wq = ocs_varray_iter_next(hw->wq_class_array[1]); 4355 if (wq == NULL) { 4356 wq = hw->hw_wq[0]; 4357 } 4358 4359 /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */ 4360 xri = wq->send_frame_io->indicator; 4361 4362 /* Build the send frame WQE */ 4363 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload, 4364 payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index); 4365 if (rc) { 4366 ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc); 4367 return OCS_HW_RTN_ERROR; 4368 } 4369 4370 /* Write to WQ */ 4371 rc = hw_wq_write(wq, wqe); 4372 if (rc) { 4373 ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc); 4374 return OCS_HW_RTN_ERROR; 4375 } 4376 4377 OCS_STAT(wq->use_count++); 4378 4379 return OCS_HW_RTN_SUCCESS; 4380 } 4381 4382 ocs_hw_rtn_e 4383 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count) 4384 { 4385 if (sli_get_sgl_preregister(&hw->sli)) { 4386 ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n"); 4387 return OCS_HW_RTN_ERROR; 4388 } 4389 io->ovfl_sgl = sgl; 4390 io->ovfl_sgl_count = sgl_count; 4391 io->ovfl_io = NULL; 4392 4393 return OCS_HW_RTN_SUCCESS; 4394 } 4395 4396 static void 4397 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) 4398 { 4399 /* Restore the default */ 4400 io->sgl = &io->def_sgl; 4401 io->sgl_count = io->def_sgl_count; 4402 4403 /* 4404 * For skyhawk, we need to free the IO allocated for the chained 4405 * SGL. For all devices, clear the overflow fields on the IO. 4406 * 4407 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and 4408 * the chained SGLs. If so, then we clear the ovfl_io field 4409 * when the sec_hio is freed. 4410 */ 4411 if (io->ovfl_io != NULL) { 4412 ocs_hw_io_free(hw, io->ovfl_io); 4413 io->ovfl_io = NULL; 4414 } 4415 4416 /* Clear the overflow SGL */ 4417 io->ovfl_sgl = NULL; 4418 io->ovfl_sgl_count = 0; 4419 io->ovfl_lsp = NULL; 4420 } 4421 4422 /** 4423 * @ingroup io 4424 * @brief Initialize the scatter gather list entries of an IO. 4425 * 4426 * @param hw Hardware context. 4427 * @param io Previously-allocated HW IO object. 4428 * @param type Type of IO (target read, target response, and so on). 4429 * 4430 * @return Returns 0 on success, or a non-zero value on failure. 4431 */ 4432 ocs_hw_rtn_e 4433 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type) 4434 { 4435 sli4_sge_t *data = NULL; 4436 uint32_t i = 0; 4437 uint32_t skips = 0; 4438 4439 if (!hw || !io) { 4440 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n", 4441 hw, io); 4442 return OCS_HW_RTN_ERROR; 4443 } 4444 4445 /* Clear / reset the scatter-gather list */ 4446 io->sgl = &io->def_sgl; 4447 io->sgl_count = io->def_sgl_count; 4448 io->first_data_sge = 0; 4449 4450 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t)); 4451 io->n_sge = 0; 4452 io->sge_offset = 0; 4453 4454 io->type = type; 4455 4456 data = io->sgl->virt; 4457 4458 /* 4459 * Some IO types have underlying hardware requirements on the order 4460 * of SGEs. Process all special entries here. 4461 */ 4462 switch (type) { 4463 case OCS_HW_IO_INITIATOR_READ: 4464 case OCS_HW_IO_INITIATOR_WRITE: 4465 case OCS_HW_IO_INITIATOR_NODATA: 4466 /* 4467 * No skips, 2 special for initiator I/Os 4468 * The addresses and length are written later 4469 */ 4470 /* setup command pointer */ 4471 data->sge_type = SLI4_SGE_TYPE_DATA; 4472 data++; 4473 4474 /* setup response pointer */ 4475 data->sge_type = SLI4_SGE_TYPE_DATA; 4476 4477 if (OCS_HW_IO_INITIATOR_NODATA == type) { 4478 data->last = TRUE; 4479 } 4480 data++; 4481 4482 io->n_sge = 2; 4483 break; 4484 case OCS_HW_IO_TARGET_WRITE: 4485 #define OCS_TARGET_WRITE_SKIPS 2 4486 skips = OCS_TARGET_WRITE_SKIPS; 4487 4488 /* populate host resident XFER_RDY buffer */ 4489 data->sge_type = SLI4_SGE_TYPE_DATA; 4490 data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys); 4491 data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys); 4492 data->buffer_length = io->xfer_rdy.size; 4493 data++; 4494 4495 skips--; 4496 4497 io->n_sge = 1; 4498 break; 4499 case OCS_HW_IO_TARGET_READ: 4500 /* 4501 * For FCP_TSEND64, the first 2 entries are SKIP SGE's 4502 */ 4503 #define OCS_TARGET_READ_SKIPS 2 4504 skips = OCS_TARGET_READ_SKIPS; 4505 break; 4506 case OCS_HW_IO_TARGET_RSP: 4507 /* 4508 * No skips, etc. for FCP_TRSP64 4509 */ 4510 break; 4511 default: 4512 ocs_log_err(hw->os, "unsupported IO type %#x\n", type); 4513 return OCS_HW_RTN_ERROR; 4514 } 4515 4516 /* 4517 * Write skip entries 4518 */ 4519 for (i = 0; i < skips; i++) { 4520 data->sge_type = SLI4_SGE_TYPE_SKIP; 4521 data++; 4522 } 4523 4524 io->n_sge += skips; 4525 4526 /* 4527 * Set last 4528 */ 4529 data->last = TRUE; 4530 4531 return OCS_HW_RTN_SUCCESS; 4532 } 4533 4534 /** 4535 * @ingroup io 4536 * @brief Add a T10 PI seed scatter gather list entry. 4537 * 4538 * @param hw Hardware context. 4539 * @param io Previously-allocated HW IO object. 4540 * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF. 4541 * 4542 * @return Returns 0 on success, or a non-zero value on failure. 4543 */ 4544 ocs_hw_rtn_e 4545 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info) 4546 { 4547 sli4_sge_t *data = NULL; 4548 sli4_diseed_sge_t *dif_seed; 4549 4550 /* If no dif_info, or dif_oper is disabled, then just return success */ 4551 if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) { 4552 return OCS_HW_RTN_SUCCESS; 4553 } 4554 4555 if (!hw || !io) { 4556 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n", 4557 hw, io, dif_info); 4558 return OCS_HW_RTN_ERROR; 4559 } 4560 4561 data = io->sgl->virt; 4562 data += io->n_sge; 4563 4564 /* If we are doing T10 DIF add the DIF Seed SGE */ 4565 ocs_memset(data, 0, sizeof(sli4_diseed_sge_t)); 4566 dif_seed = (sli4_diseed_sge_t *)data; 4567 dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp; 4568 dif_seed->ref_tag_repl = dif_info->ref_tag_repl; 4569 dif_seed->app_tag_repl = dif_info->app_tag_repl; 4570 dif_seed->repl_app_tag = dif_info->repl_app_tag; 4571 if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) { 4572 dif_seed->atrt = dif_info->disable_app_ref_ffff; 4573 dif_seed->at = dif_info->disable_app_ffff; 4574 } 4575 dif_seed->sge_type = SLI4_SGE_TYPE_DISEED; 4576 /* Workaround for SKH (BZ157233) */ 4577 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && 4578 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) { 4579 dif_seed->sge_type = SLI4_SGE_TYPE_SKIP; 4580 } 4581 4582 dif_seed->app_tag_cmp = dif_info->app_tag_cmp; 4583 dif_seed->dif_blk_size = dif_info->blk_size; 4584 dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag; 4585 dif_seed->check_app_tag = dif_info->check_app_tag; 4586 dif_seed->check_ref_tag = dif_info->check_ref_tag; 4587 dif_seed->check_crc = dif_info->check_guard; 4588 dif_seed->new_ref_tag = dif_info->repl_ref_tag; 4589 4590 switch(dif_info->dif_oper) { 4591 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC: 4592 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; 4593 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC; 4594 break; 4595 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF: 4596 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; 4597 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF; 4598 break; 4599 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM: 4600 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; 4601 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM; 4602 break; 4603 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF: 4604 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; 4605 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF; 4606 break; 4607 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC: 4608 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; 4609 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC; 4610 break; 4611 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM: 4612 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; 4613 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM; 4614 break; 4615 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM: 4616 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; 4617 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM; 4618 break; 4619 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC: 4620 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; 4621 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC; 4622 break; 4623 case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW: 4624 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; 4625 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW; 4626 break; 4627 default: 4628 ocs_log_err(hw->os, "unsupported DIF operation %#x\n", 4629 dif_info->dif_oper); 4630 return OCS_HW_RTN_ERROR; 4631 } 4632 4633 /* 4634 * Set last, clear previous last 4635 */ 4636 data->last = TRUE; 4637 if (io->n_sge) { 4638 data[-1].last = FALSE; 4639 } 4640 4641 io->n_sge++; 4642 4643 return OCS_HW_RTN_SUCCESS; 4644 } 4645 4646 static ocs_hw_rtn_e 4647 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io) 4648 { 4649 sli4_lsp_sge_t *lsp; 4650 4651 /* fail if we're already pointing to the overflow SGL */ 4652 if (io->sgl == io->ovfl_sgl) { 4653 return OCS_HW_RTN_ERROR; 4654 } 4655 4656 /* 4657 * For skyhawk, we can use another SGL to extend the SGL list. The 4658 * Chained entry must not be in the first 4 entries. 4659 * 4660 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio. 4661 */ 4662 if (sli_get_sgl_preregister(&hw->sli) && 4663 io->def_sgl_count > 4 && 4664 io->ovfl_io == NULL && 4665 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 4666 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) { 4667 io->ovfl_io = ocs_hw_io_alloc(hw); 4668 if (io->ovfl_io != NULL) { 4669 /* 4670 * Note: We can't call ocs_hw_io_register_sgl() here 4671 * because it checks that SGLs are not pre-registered 4672 * and for shyhawk, preregistered SGLs are required. 4673 */ 4674 io->ovfl_sgl = &io->ovfl_io->def_sgl; 4675 io->ovfl_sgl_count = io->ovfl_io->def_sgl_count; 4676 } 4677 } 4678 4679 /* fail if we don't have an overflow SGL registered */ 4680 if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) { 4681 return OCS_HW_RTN_ERROR; 4682 } 4683 4684 /* 4685 * Overflow, we need to put a link SGE in the last location of the current SGL, after 4686 * copying the the last SGE to the overflow SGL 4687 */ 4688 4689 ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1]; 4690 4691 lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1]; 4692 ocs_memset(lsp, 0, sizeof(*lsp)); 4693 4694 if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 4695 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { 4696 sli_skh_chain_sge_build(&hw->sli, 4697 (sli4_sge_t*)lsp, 4698 io->ovfl_io->indicator, 4699 0, /* frag_num */ 4700 0); /* offset */ 4701 } else { 4702 lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys); 4703 lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys); 4704 lsp->sge_type = SLI4_SGE_TYPE_LSP; 4705 lsp->last = 0; 4706 io->ovfl_lsp = lsp; 4707 io->ovfl_lsp->segment_length = sizeof(sli4_sge_t); 4708 } 4709 4710 /* Update the current SGL pointer, and n_sgl */ 4711 io->sgl = io->ovfl_sgl; 4712 io->sgl_count = io->ovfl_sgl_count; 4713 io->n_sge = 1; 4714 4715 return OCS_HW_RTN_SUCCESS; 4716 } 4717 4718 /** 4719 * @ingroup io 4720 * @brief Add a scatter gather list entry to an IO. 4721 * 4722 * @param hw Hardware context. 4723 * @param io Previously-allocated HW IO object. 4724 * @param addr Physical address. 4725 * @param length Length of memory pointed to by @c addr. 4726 * 4727 * @return Returns 0 on success, or a non-zero value on failure. 4728 */ 4729 ocs_hw_rtn_e 4730 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length) 4731 { 4732 sli4_sge_t *data = NULL; 4733 4734 if (!hw || !io || !addr || !length) { 4735 ocs_log_err(hw ? hw->os : NULL, 4736 "bad parameter hw=%p io=%p addr=%lx length=%u\n", 4737 hw, io, addr, length); 4738 return OCS_HW_RTN_ERROR; 4739 } 4740 4741 if ((length != 0) && (io->n_sge + 1) > io->sgl_count) { 4742 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) { 4743 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); 4744 return OCS_HW_RTN_ERROR; 4745 } 4746 } 4747 4748 if (length > sli_get_max_sge(&hw->sli)) { 4749 ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n", 4750 length, sli_get_max_sge(&hw->sli)); 4751 return OCS_HW_RTN_ERROR; 4752 } 4753 4754 data = io->sgl->virt; 4755 data += io->n_sge; 4756 4757 data->sge_type = SLI4_SGE_TYPE_DATA; 4758 data->buffer_address_high = ocs_addr32_hi(addr); 4759 data->buffer_address_low = ocs_addr32_lo(addr); 4760 data->buffer_length = length; 4761 data->data_offset = io->sge_offset; 4762 /* 4763 * Always assume this is the last entry and mark as such. 4764 * If this is not the first entry unset the "last SGE" 4765 * indication for the previous entry 4766 */ 4767 data->last = TRUE; 4768 if (io->n_sge) { 4769 data[-1].last = FALSE; 4770 } 4771 4772 /* Set first_data_bde if not previously set */ 4773 if (io->first_data_sge == 0) { 4774 io->first_data_sge = io->n_sge; 4775 } 4776 4777 io->sge_offset += length; 4778 io->n_sge++; 4779 4780 /* Update the linked segment length (only executed after overflow has begun) */ 4781 if (io->ovfl_lsp != NULL) { 4782 io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t); 4783 } 4784 4785 return OCS_HW_RTN_SUCCESS; 4786 } 4787 4788 /** 4789 * @ingroup io 4790 * @brief Add a T10 DIF scatter gather list entry to an IO. 4791 * 4792 * @param hw Hardware context. 4793 * @param io Previously-allocated HW IO object. 4794 * @param addr DIF physical address. 4795 * 4796 * @return Returns 0 on success, or a non-zero value on failure. 4797 */ 4798 ocs_hw_rtn_e 4799 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr) 4800 { 4801 sli4_dif_sge_t *data = NULL; 4802 4803 if (!hw || !io || !addr) { 4804 ocs_log_err(hw ? hw->os : NULL, 4805 "bad parameter hw=%p io=%p addr=%lx\n", 4806 hw, io, addr); 4807 return OCS_HW_RTN_ERROR; 4808 } 4809 4810 if ((io->n_sge + 1) > hw->config.n_sgl) { 4811 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) { 4812 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge); 4813 return OCS_HW_RTN_ERROR; 4814 } 4815 } 4816 4817 data = io->sgl->virt; 4818 data += io->n_sge; 4819 4820 data->sge_type = SLI4_SGE_TYPE_DIF; 4821 /* Workaround for SKH (BZ157233) */ 4822 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) && 4823 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) { 4824 data->sge_type = SLI4_SGE_TYPE_SKIP; 4825 } 4826 4827 data->buffer_address_high = ocs_addr32_hi(addr); 4828 data->buffer_address_low = ocs_addr32_lo(addr); 4829 4830 /* 4831 * Always assume this is the last entry and mark as such. 4832 * If this is not the first entry unset the "last SGE" 4833 * indication for the previous entry 4834 */ 4835 data->last = TRUE; 4836 if (io->n_sge) { 4837 data[-1].last = FALSE; 4838 } 4839 4840 io->n_sge++; 4841 4842 return OCS_HW_RTN_SUCCESS; 4843 } 4844 4845 /** 4846 * @ingroup io 4847 * @brief Abort a previously-started IO. 4848 * 4849 * @param hw Hardware context. 4850 * @param io_to_abort The IO to abort. 4851 * @param send_abts Boolean to have the hardware automatically 4852 * generate an ABTS. 4853 * @param cb Function call upon completion of the abort (may be NULL). 4854 * @param arg Argument to pass to abort completion function. 4855 * 4856 * @return Returns 0 on success, or a non-zero value on failure. 4857 */ 4858 ocs_hw_rtn_e 4859 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg) 4860 { 4861 sli4_abort_type_e atype = SLI_ABORT_MAX; 4862 uint32_t id = 0, mask = 0; 4863 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 4864 hw_wq_callback_t *wqcb; 4865 4866 if (!hw || !io_to_abort) { 4867 ocs_log_err(hw ? hw->os : NULL, 4868 "bad parameter hw=%p io=%p\n", 4869 hw, io_to_abort); 4870 return OCS_HW_RTN_ERROR; 4871 } 4872 4873 if (hw->state != OCS_HW_STATE_ACTIVE) { 4874 ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n", 4875 hw->state); 4876 return OCS_HW_RTN_ERROR; 4877 } 4878 4879 /* take a reference on IO being aborted */ 4880 if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) { 4881 /* command no longer active */ 4882 ocs_log_test(hw ? hw->os : NULL, 4883 "io not active xri=0x%x tag=0x%x\n", 4884 io_to_abort->indicator, io_to_abort->reqtag); 4885 return OCS_HW_RTN_IO_NOT_ACTIVE; 4886 } 4887 4888 /* non-port owned XRI checks */ 4889 /* Must have a valid WQ reference */ 4890 if (io_to_abort->wq == NULL) { 4891 ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n", 4892 io_to_abort->indicator); 4893 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ 4894 return OCS_HW_RTN_IO_NOT_ACTIVE; 4895 } 4896 4897 /* Validation checks complete; now check to see if already being aborted */ 4898 ocs_lock(&hw->io_abort_lock); 4899 if (io_to_abort->abort_in_progress) { 4900 ocs_unlock(&hw->io_abort_lock); 4901 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ 4902 ocs_log_debug(hw ? hw->os : NULL, 4903 "io already being aborted xri=0x%x tag=0x%x\n", 4904 io_to_abort->indicator, io_to_abort->reqtag); 4905 return OCS_HW_RTN_IO_ABORT_IN_PROGRESS; 4906 } 4907 4908 /* 4909 * This IO is not already being aborted. Set flag so we won't try to 4910 * abort it again. After all, we only have one abort_done callback. 4911 */ 4912 io_to_abort->abort_in_progress = 1; 4913 ocs_unlock(&hw->io_abort_lock); 4914 4915 /* 4916 * If we got here, the possibilities are: 4917 * - host owned xri 4918 * - io_to_abort->wq_index != UINT32_MAX 4919 * - submit ABORT_WQE to same WQ 4920 * - port owned xri: 4921 * - rxri: io_to_abort->wq_index == UINT32_MAX 4922 * - submit ABORT_WQE to any WQ 4923 * - non-rxri 4924 * - io_to_abort->index != UINT32_MAX 4925 * - submit ABORT_WQE to same WQ 4926 * - io_to_abort->index == UINT32_MAX 4927 * - submit ABORT_WQE to any WQ 4928 */ 4929 io_to_abort->abort_done = cb; 4930 io_to_abort->abort_arg = arg; 4931 4932 atype = SLI_ABORT_XRI; 4933 id = io_to_abort->indicator; 4934 4935 /* Allocate a request tag for the abort portion of this IO */ 4936 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort); 4937 if (wqcb == NULL) { 4938 ocs_log_err(hw->os, "can't allocate request tag\n"); 4939 return OCS_HW_RTN_NO_RESOURCES; 4940 } 4941 io_to_abort->abort_reqtag = wqcb->instance_index; 4942 4943 /* 4944 * If the wqe is on the pending list, then set this wqe to be 4945 * aborted when the IO's wqe is removed from the list. 4946 */ 4947 if (io_to_abort->wq != NULL) { 4948 sli_queue_lock(io_to_abort->wq->queue); 4949 if (ocs_list_on_list(&io_to_abort->wqe.link)) { 4950 io_to_abort->wqe.abort_wqe_submit_needed = 1; 4951 io_to_abort->wqe.send_abts = send_abts; 4952 io_to_abort->wqe.id = id; 4953 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag; 4954 sli_queue_unlock(io_to_abort->wq->queue); 4955 return 0; 4956 } 4957 sli_queue_unlock(io_to_abort->wq->queue); 4958 } 4959 4960 if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask, 4961 io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) { 4962 ocs_log_err(hw->os, "ABORT WQE error\n"); 4963 io_to_abort->abort_reqtag = UINT32_MAX; 4964 ocs_hw_reqtag_free(hw, wqcb); 4965 rc = OCS_HW_RTN_ERROR; 4966 } 4967 4968 if (OCS_HW_RTN_SUCCESS == rc) { 4969 if (io_to_abort->wq == NULL) { 4970 io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort); 4971 ocs_hw_assert(io_to_abort->wq != NULL); 4972 } 4973 /* ABORT_WQE does not actually utilize an XRI on the Port, 4974 * therefore, keep xbusy as-is to track the exchange's state, 4975 * not the ABORT_WQE's state 4976 */ 4977 rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe); 4978 if (rc > 0) { 4979 /* non-negative return is success */ 4980 rc = 0; 4981 /* can't abort an abort so skip adding to timed wqe list */ 4982 } 4983 } 4984 4985 if (OCS_HW_RTN_SUCCESS != rc) { 4986 ocs_lock(&hw->io_abort_lock); 4987 io_to_abort->abort_in_progress = 0; 4988 ocs_unlock(&hw->io_abort_lock); 4989 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */ 4990 } 4991 return rc; 4992 } 4993 4994 /** 4995 * @ingroup io 4996 * @brief Return the OX_ID/RX_ID of the IO. 4997 * 4998 * @param hw Hardware context. 4999 * @param io HW IO object. 5000 * 5001 * @return Returns X_ID on success, or -1 on failure. 5002 */ 5003 int32_t 5004 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io) 5005 { 5006 if (!hw || !io) { 5007 ocs_log_err(hw ? hw->os : NULL, 5008 "bad parameter hw=%p io=%p\n", hw, io); 5009 return -1; 5010 } 5011 5012 return io->indicator; 5013 } 5014 5015 typedef struct ocs_hw_fw_write_cb_arg { 5016 ocs_hw_fw_cb_t cb; 5017 void *arg; 5018 } ocs_hw_fw_write_cb_arg_t; 5019 5020 typedef struct ocs_hw_sfp_cb_arg { 5021 ocs_hw_sfp_cb_t cb; 5022 void *arg; 5023 ocs_dma_t payload; 5024 } ocs_hw_sfp_cb_arg_t; 5025 5026 typedef struct ocs_hw_temp_cb_arg { 5027 ocs_hw_temp_cb_t cb; 5028 void *arg; 5029 } ocs_hw_temp_cb_arg_t; 5030 5031 typedef struct ocs_hw_link_stat_cb_arg { 5032 ocs_hw_link_stat_cb_t cb; 5033 void *arg; 5034 } ocs_hw_link_stat_cb_arg_t; 5035 5036 typedef struct ocs_hw_host_stat_cb_arg { 5037 ocs_hw_host_stat_cb_t cb; 5038 void *arg; 5039 } ocs_hw_host_stat_cb_arg_t; 5040 5041 typedef struct ocs_hw_dump_get_cb_arg { 5042 ocs_hw_dump_get_cb_t cb; 5043 void *arg; 5044 void *mbox_cmd; 5045 } ocs_hw_dump_get_cb_arg_t; 5046 5047 typedef struct ocs_hw_dump_clear_cb_arg { 5048 ocs_hw_dump_clear_cb_t cb; 5049 void *arg; 5050 void *mbox_cmd; 5051 } ocs_hw_dump_clear_cb_arg_t; 5052 5053 /** 5054 * @brief Write a portion of a firmware image to the device. 5055 * 5056 * @par Description 5057 * Calls the correct firmware write function based on the device type. 5058 * 5059 * @param hw Hardware context. 5060 * @param dma DMA structure containing the firmware image chunk. 5061 * @param size Size of the firmware image chunk. 5062 * @param offset Offset, in bytes, from the beginning of the firmware image. 5063 * @param last True if this is the last chunk of the image. 5064 * Causes the image to be committed to flash. 5065 * @param cb Pointer to a callback function that is called when the command completes. 5066 * The callback function prototype is 5067 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>. 5068 * @param arg Pointer to be passed to the callback function. 5069 * 5070 * @return Returns 0 on success, or a non-zero value on failure. 5071 */ 5072 ocs_hw_rtn_e 5073 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) 5074 { 5075 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) { 5076 return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg); 5077 } else { 5078 /* Write firmware_write for BE3/Skyhawk not supported */ 5079 return -1; 5080 } 5081 } 5082 5083 /** 5084 * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer). 5085 * 5086 * @par Description 5087 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a 5088 * firmware image chunk, and then sends the command with ocs_hw_command(). On completion, 5089 * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox 5090 * and to signal the caller that the write has completed. 5091 * 5092 * @param hw Hardware context. 5093 * @param dma DMA structure containing the firmware image chunk. 5094 * @param size Size of the firmware image chunk. 5095 * @param offset Offset, in bytes, from the beginning of the firmware image. 5096 * @param last True if this is the last chunk of the image. Causes the image to be committed to flash. 5097 * @param cb Pointer to a callback function that is called when the command completes. 5098 * The callback function prototype is 5099 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>. 5100 * @param arg Pointer to be passed to the callback function. 5101 * 5102 * @return Returns 0 on success, or a non-zero value on failure. 5103 */ 5104 ocs_hw_rtn_e 5105 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg) 5106 { 5107 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5108 uint8_t *mbxdata; 5109 ocs_hw_fw_write_cb_arg_t *cb_arg; 5110 int noc=0; /* No Commit bit - set to 1 for testing */ 5111 5112 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 5113 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 5114 return OCS_HW_RTN_ERROR; 5115 } 5116 5117 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5118 if (mbxdata == NULL) { 5119 ocs_log_err(hw->os, "failed to malloc mbox\n"); 5120 return OCS_HW_RTN_NO_MEMORY; 5121 } 5122 5123 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT); 5124 if (cb_arg == NULL) { 5125 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 5126 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5127 return OCS_HW_RTN_NO_MEMORY; 5128 } 5129 5130 cb_arg->cb = cb; 5131 cb_arg->arg = arg; 5132 5133 if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last, 5134 size, offset, "/prg/", dma)) { 5135 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg); 5136 } 5137 5138 if (rc != OCS_HW_RTN_SUCCESS) { 5139 ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n"); 5140 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5141 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 5142 } 5143 5144 return rc; 5145 5146 } 5147 5148 /** 5149 * @brief Called when the WRITE OBJECT command completes. 5150 * 5151 * @par Description 5152 * Get the number of bytes actually written out of the response, free the mailbox 5153 * that was malloc'd by ocs_hw_firmware_write(), 5154 * then call the callback and pass the status and bytes written. 5155 * 5156 * @param hw Hardware context. 5157 * @param status Status field from the mbox completion. 5158 * @param mqe Mailbox response structure. 5159 * @param arg Pointer to a callback function that signals the caller that the command is done. 5160 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>. 5161 * 5162 * @return Returns 0. 5163 */ 5164 static int32_t 5165 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5166 { 5167 5168 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 5169 sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed); 5170 ocs_hw_fw_write_cb_arg_t *cb_arg = arg; 5171 uint32_t bytes_written; 5172 uint16_t mbox_status; 5173 uint32_t change_status; 5174 5175 bytes_written = wr_obj_rsp->actual_write_length; 5176 mbox_status = mbox_rsp->hdr.status; 5177 change_status = wr_obj_rsp->change_status; 5178 5179 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5180 5181 if (cb_arg) { 5182 if (cb_arg->cb) { 5183 if ((status == 0) && mbox_status) { 5184 status = mbox_status; 5185 } 5186 cb_arg->cb(status, bytes_written, change_status, cb_arg->arg); 5187 } 5188 5189 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 5190 } 5191 5192 return 0; 5193 5194 } 5195 5196 /** 5197 * @brief Called when the READ_TRANSCEIVER_DATA command completes. 5198 * 5199 * @par Description 5200 * Get the number of bytes read out of the response, free the mailbox that was malloc'd 5201 * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written. 5202 * 5203 * @param hw Hardware context. 5204 * @param status Status field from the mbox completion. 5205 * @param mqe Mailbox response structure. 5206 * @param arg Pointer to a callback function that signals the caller that the command is done. 5207 * The callback function prototype is 5208 * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>. 5209 * 5210 * @return Returns 0. 5211 */ 5212 static int32_t 5213 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5214 { 5215 5216 ocs_hw_sfp_cb_arg_t *cb_arg = arg; 5217 ocs_dma_t *payload = NULL; 5218 sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL; 5219 uint32_t bytes_written; 5220 5221 if (cb_arg) { 5222 payload = &(cb_arg->payload); 5223 if (cb_arg->cb) { 5224 mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt; 5225 bytes_written = mbox_rsp->hdr.response_length; 5226 if ((status == 0) && mbox_rsp->hdr.status) { 5227 status = mbox_rsp->hdr.status; 5228 } 5229 cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg); 5230 } 5231 5232 ocs_dma_free(hw->os, &cb_arg->payload); 5233 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); 5234 } 5235 5236 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5237 return 0; 5238 } 5239 5240 /** 5241 * @ingroup io 5242 * @brief Function to retrieve the SFP information. 5243 * 5244 * @param hw Hardware context. 5245 * @param page The page of SFP data to retrieve (0xa0 or 0xa2). 5246 * @param cb Function call upon completion of sending the data (may be NULL). 5247 * @param arg Argument to pass to IO completion function. 5248 * 5249 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5250 */ 5251 ocs_hw_rtn_e 5252 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg) 5253 { 5254 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5255 ocs_hw_sfp_cb_arg_t *cb_arg; 5256 uint8_t *mbxdata; 5257 5258 /* mbxdata holds the header of the command */ 5259 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5260 if (mbxdata == NULL) { 5261 ocs_log_err(hw->os, "failed to malloc mbox\n"); 5262 return OCS_HW_RTN_NO_MEMORY; 5263 } 5264 5265 /* cb_arg holds the data that will be passed to the callback on completion */ 5266 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT); 5267 if (cb_arg == NULL) { 5268 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 5269 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5270 return OCS_HW_RTN_NO_MEMORY; 5271 } 5272 5273 cb_arg->cb = cb; 5274 cb_arg->arg = arg; 5275 5276 /* payload holds the non-embedded portion */ 5277 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t), 5278 OCS_MIN_DMA_ALIGNMENT)) { 5279 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 5280 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); 5281 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5282 return OCS_HW_RTN_NO_MEMORY; 5283 } 5284 5285 /* Send the HW command */ 5286 if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page, 5287 &cb_arg->payload)) { 5288 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg); 5289 } 5290 5291 if (rc != OCS_HW_RTN_SUCCESS) { 5292 ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n", 5293 rc); 5294 ocs_dma_free(hw->os, &cb_arg->payload); 5295 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t)); 5296 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5297 } 5298 5299 return rc; 5300 } 5301 5302 /** 5303 * @brief Function to retrieve the temperature information. 5304 * 5305 * @param hw Hardware context. 5306 * @param cb Function call upon completion of sending the data (may be NULL). 5307 * @param arg Argument to pass to IO completion function. 5308 * 5309 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5310 */ 5311 ocs_hw_rtn_e 5312 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg) 5313 { 5314 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5315 ocs_hw_temp_cb_arg_t *cb_arg; 5316 uint8_t *mbxdata; 5317 5318 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5319 if (mbxdata == NULL) { 5320 ocs_log_err(hw->os, "failed to malloc mbox"); 5321 return OCS_HW_RTN_NO_MEMORY; 5322 } 5323 5324 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT); 5325 if (cb_arg == NULL) { 5326 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5327 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5328 return OCS_HW_RTN_NO_MEMORY; 5329 } 5330 5331 cb_arg->cb = cb; 5332 cb_arg->arg = arg; 5333 5334 if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 5335 SLI4_WKI_TAG_SAT_TEM)) { 5336 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg); 5337 } 5338 5339 if (rc != OCS_HW_RTN_SUCCESS) { 5340 ocs_log_test(hw->os, "DUMP_TYPE4 failed\n"); 5341 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5342 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); 5343 } 5344 5345 return rc; 5346 } 5347 5348 /** 5349 * @brief Called when the DUMP command completes. 5350 * 5351 * @par Description 5352 * Get the temperature data out of the response, free the mailbox that was malloc'd 5353 * by ocs_hw_get_temperature(), then call the callback and pass the status and data. 5354 * 5355 * @param hw Hardware context. 5356 * @param status Status field from the mbox completion. 5357 * @param mqe Mailbox response structure. 5358 * @param arg Pointer to a callback function that signals the caller that the command is done. 5359 * The callback function prototype is defined by ocs_hw_temp_cb_t. 5360 * 5361 * @return Returns 0. 5362 */ 5363 static int32_t 5364 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5365 { 5366 5367 sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe; 5368 ocs_hw_temp_cb_arg_t *cb_arg = arg; 5369 uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */ 5370 uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/ 5371 uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */ 5372 uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */ 5373 uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */ 5374 uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */ 5375 5376 if (cb_arg) { 5377 if (cb_arg->cb) { 5378 if ((status == 0) && mbox_rsp->hdr.status) { 5379 status = mbox_rsp->hdr.status; 5380 } 5381 cb_arg->cb(status, 5382 curr_temp, 5383 crit_temp_thrshld, 5384 warn_temp_thrshld, 5385 norm_temp_thrshld, 5386 fan_off_thrshld, 5387 fan_on_thrshld, 5388 cb_arg->arg); 5389 } 5390 5391 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t)); 5392 } 5393 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5394 5395 return 0; 5396 } 5397 5398 /** 5399 * @brief Function to retrieve the link statistics. 5400 * 5401 * @param hw Hardware context. 5402 * @param req_ext_counters If TRUE, then the extended counters will be requested. 5403 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared. 5404 * @param clear_all_counters If TRUE, the counters will be cleared. 5405 * @param cb Function call upon completion of sending the data (may be NULL). 5406 * @param arg Argument to pass to IO completion function. 5407 * 5408 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5409 */ 5410 ocs_hw_rtn_e 5411 ocs_hw_get_link_stats(ocs_hw_t *hw, 5412 uint8_t req_ext_counters, 5413 uint8_t clear_overflow_flags, 5414 uint8_t clear_all_counters, 5415 ocs_hw_link_stat_cb_t cb, 5416 void *arg) 5417 { 5418 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5419 ocs_hw_link_stat_cb_arg_t *cb_arg; 5420 uint8_t *mbxdata; 5421 5422 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5423 if (mbxdata == NULL) { 5424 ocs_log_err(hw->os, "failed to malloc mbox"); 5425 return OCS_HW_RTN_NO_MEMORY; 5426 } 5427 5428 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT); 5429 if (cb_arg == NULL) { 5430 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5431 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5432 return OCS_HW_RTN_NO_MEMORY; 5433 } 5434 5435 cb_arg->cb = cb; 5436 cb_arg->arg = arg; 5437 5438 if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 5439 req_ext_counters, 5440 clear_overflow_flags, 5441 clear_all_counters)) { 5442 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg); 5443 } 5444 5445 if (rc != OCS_HW_RTN_SUCCESS) { 5446 ocs_log_test(hw->os, "READ_LINK_STATS failed\n"); 5447 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5448 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); 5449 } 5450 5451 return rc; 5452 } 5453 5454 /** 5455 * @brief Called when the READ_LINK_STAT command completes. 5456 * 5457 * @par Description 5458 * Get the counters out of the response, free the mailbox that was malloc'd 5459 * by ocs_hw_get_link_stats(), then call the callback and pass the status and data. 5460 * 5461 * @param hw Hardware context. 5462 * @param status Status field from the mbox completion. 5463 * @param mqe Mailbox response structure. 5464 * @param arg Pointer to a callback function that signals the caller that the command is done. 5465 * The callback function prototype is defined by ocs_hw_link_stat_cb_t. 5466 * 5467 * @return Returns 0. 5468 */ 5469 static int32_t 5470 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5471 { 5472 5473 sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe; 5474 ocs_hw_link_stat_cb_arg_t *cb_arg = arg; 5475 ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX]; 5476 uint32_t num_counters = (mbox_rsp->gec ? 20 : 13); 5477 5478 ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) * 5479 OCS_HW_LINK_STAT_MAX); 5480 5481 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of; 5482 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of; 5483 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of; 5484 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of; 5485 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of; 5486 counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of; 5487 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of; 5488 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of; 5489 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of; 5490 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of; 5491 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of; 5492 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of; 5493 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of; 5494 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of; 5495 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of; 5496 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of; 5497 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of; 5498 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of; 5499 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of; 5500 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of; 5501 5502 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count; 5503 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count; 5504 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count; 5505 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count; 5506 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count; 5507 counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count; 5508 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count; 5509 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count; 5510 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count; 5511 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit; 5512 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit; 5513 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit; 5514 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit; 5515 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count; 5516 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count; 5517 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count; 5518 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count; 5519 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count; 5520 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count; 5521 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count; 5522 5523 if (cb_arg) { 5524 if (cb_arg->cb) { 5525 if ((status == 0) && mbox_rsp->hdr.status) { 5526 status = mbox_rsp->hdr.status; 5527 } 5528 cb_arg->cb(status, 5529 num_counters, 5530 counts, 5531 cb_arg->arg); 5532 } 5533 5534 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t)); 5535 } 5536 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5537 5538 return 0; 5539 } 5540 5541 /** 5542 * @brief Function to retrieve the link and host statistics. 5543 * 5544 * @param hw Hardware context. 5545 * @param cc clear counters, if TRUE all counters will be cleared. 5546 * @param cb Function call upon completion of receiving the data. 5547 * @param arg Argument to pass to pointer fc hosts statistics structure. 5548 * 5549 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY. 5550 */ 5551 ocs_hw_rtn_e 5552 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg) 5553 { 5554 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 5555 ocs_hw_host_stat_cb_arg_t *cb_arg; 5556 uint8_t *mbxdata; 5557 5558 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO); 5559 if (mbxdata == NULL) { 5560 ocs_log_err(hw->os, "failed to malloc mbox"); 5561 return OCS_HW_RTN_NO_MEMORY; 5562 } 5563 5564 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0); 5565 if (cb_arg == NULL) { 5566 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5567 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5568 return OCS_HW_RTN_NO_MEMORY; 5569 } 5570 5571 cb_arg->cb = cb; 5572 cb_arg->arg = arg; 5573 5574 /* Send the HW command to get the host stats */ 5575 if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) { 5576 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg); 5577 } 5578 5579 if (rc != OCS_HW_RTN_SUCCESS) { 5580 ocs_log_test(hw->os, "READ_HOST_STATS failed\n"); 5581 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5582 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); 5583 } 5584 5585 return rc; 5586 } 5587 5588 /** 5589 * @brief Called when the READ_STATUS command completes. 5590 * 5591 * @par Description 5592 * Get the counters out of the response, free the mailbox that was malloc'd 5593 * by ocs_hw_get_host_stats(), then call the callback and pass 5594 * the status and data. 5595 * 5596 * @param hw Hardware context. 5597 * @param status Status field from the mbox completion. 5598 * @param mqe Mailbox response structure. 5599 * @param arg Pointer to a callback function that signals the caller that the command is done. 5600 * The callback function prototype is defined by 5601 * ocs_hw_host_stat_cb_t. 5602 * 5603 * @return Returns 0. 5604 */ 5605 static int32_t 5606 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5607 { 5608 5609 sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe; 5610 ocs_hw_host_stat_cb_arg_t *cb_arg = arg; 5611 ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX]; 5612 uint32_t num_counters = OCS_HW_HOST_STAT_MAX; 5613 5614 ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) * 5615 OCS_HW_HOST_STAT_MAX); 5616 5617 counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count; 5618 counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count; 5619 counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count; 5620 counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count; 5621 counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count; 5622 counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count; 5623 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator; 5624 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder; 5625 counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count; 5626 counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count; 5627 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count; 5628 counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count; 5629 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count; 5630 counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count; 5631 5632 if (cb_arg) { 5633 if (cb_arg->cb) { 5634 if ((status == 0) && mbox_rsp->hdr.status) { 5635 status = mbox_rsp->hdr.status; 5636 } 5637 cb_arg->cb(status, 5638 num_counters, 5639 counts, 5640 cb_arg->arg); 5641 } 5642 5643 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t)); 5644 } 5645 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 5646 5647 return 0; 5648 } 5649 5650 /** 5651 * @brief HW link configuration enum to the CLP string value mapping. 5652 * 5653 * This structure provides a mapping from the ocs_hw_linkcfg_e 5654 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port 5655 * control) to the CLP string that is used 5656 * in the DMTF_CLP_CMD mailbox command. 5657 */ 5658 typedef struct ocs_hw_linkcfg_map_s { 5659 ocs_hw_linkcfg_e linkcfg; 5660 const char *clp_str; 5661 } ocs_hw_linkcfg_map_t; 5662 5663 /** 5664 * @brief Mapping from the HW linkcfg enum to the CLP command value 5665 * string. 5666 */ 5667 static ocs_hw_linkcfg_map_t linkcfg_map[] = { 5668 {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"}, 5669 {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"}, 5670 {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"}, 5671 {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"}, 5672 {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"}, 5673 {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"}, 5674 {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}}; 5675 5676 /** 5677 * @brief HW link configuration enum to Skyhawk link config ID mapping. 5678 * 5679 * This structure provides a mapping from the ocs_hw_linkcfg_e 5680 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port 5681 * control) to the link config ID numbers used by Skyhawk 5682 */ 5683 typedef struct ocs_hw_skyhawk_linkcfg_map_s { 5684 ocs_hw_linkcfg_e linkcfg; 5685 uint32_t config_id; 5686 } ocs_hw_skyhawk_linkcfg_map_t; 5687 5688 /** 5689 * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs 5690 */ 5691 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = { 5692 {OCS_HW_LINKCFG_4X10G, 0x0a}, 5693 {OCS_HW_LINKCFG_1X40G, 0x09}, 5694 }; 5695 5696 /** 5697 * @brief Helper function for getting the HW linkcfg enum from the CLP 5698 * string value 5699 * 5700 * @param clp_str CLP string value from OEMELX_LinkConfig. 5701 * 5702 * @return Returns the HW linkcfg enum corresponding to clp_str. 5703 */ 5704 static ocs_hw_linkcfg_e 5705 ocs_hw_linkcfg_from_clp(const char *clp_str) 5706 { 5707 uint32_t i; 5708 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { 5709 if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) { 5710 return linkcfg_map[i].linkcfg; 5711 } 5712 } 5713 return OCS_HW_LINKCFG_NA; 5714 } 5715 5716 /** 5717 * @brief Helper function for getting the CLP string value from the HW 5718 * linkcfg enum. 5719 * 5720 * @param linkcfg HW linkcfg enum. 5721 * 5722 * @return Returns the OEMELX_LinkConfig CLP string value corresponding to 5723 * given linkcfg. 5724 */ 5725 static const char * 5726 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg) 5727 { 5728 uint32_t i; 5729 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) { 5730 if (linkcfg_map[i].linkcfg == linkcfg) { 5731 return linkcfg_map[i].clp_str; 5732 } 5733 } 5734 return NULL; 5735 } 5736 5737 /** 5738 * @brief Helper function for getting a Skyhawk link config ID from the HW 5739 * linkcfg enum. 5740 * 5741 * @param linkcfg HW linkcfg enum. 5742 * 5743 * @return Returns the Skyhawk link config ID corresponding to 5744 * given linkcfg. 5745 */ 5746 static uint32_t 5747 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg) 5748 { 5749 uint32_t i; 5750 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { 5751 if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) { 5752 return skyhawk_linkcfg_map[i].config_id; 5753 } 5754 } 5755 return 0; 5756 } 5757 5758 /** 5759 * @brief Helper function for getting the HW linkcfg enum from a 5760 * Skyhawk config ID. 5761 * 5762 * @param config_id Skyhawk link config ID. 5763 * 5764 * @return Returns the HW linkcfg enum corresponding to config_id. 5765 */ 5766 static ocs_hw_linkcfg_e 5767 ocs_hw_linkcfg_from_config_id(const uint32_t config_id) 5768 { 5769 uint32_t i; 5770 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) { 5771 if (skyhawk_linkcfg_map[i].config_id == config_id) { 5772 return skyhawk_linkcfg_map[i].linkcfg; 5773 } 5774 } 5775 return OCS_HW_LINKCFG_NA; 5776 } 5777 5778 /** 5779 * @brief Link configuration callback argument. 5780 */ 5781 typedef struct ocs_hw_linkcfg_cb_arg_s { 5782 ocs_hw_port_control_cb_t cb; 5783 void *arg; 5784 uint32_t opts; 5785 int32_t status; 5786 ocs_dma_t dma_cmd; 5787 ocs_dma_t dma_resp; 5788 uint32_t result_len; 5789 } ocs_hw_linkcfg_cb_arg_t; 5790 5791 /** 5792 * @brief Set link configuration. 5793 * 5794 * @param hw Hardware context. 5795 * @param value Link configuration enum to which the link configuration is 5796 * set. 5797 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5798 * @param cb Callback function to invoke following mbx command. 5799 * @param arg Callback argument. 5800 * 5801 * @return Returns OCS_HW_RTN_SUCCESS on success. 5802 */ 5803 static ocs_hw_rtn_e 5804 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 5805 { 5806 if (!sli_link_is_configurable(&hw->sli)) { 5807 ocs_log_debug(hw->os, "Function not supported\n"); 5808 return OCS_HW_RTN_ERROR; 5809 } 5810 5811 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { 5812 return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg); 5813 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 5814 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { 5815 return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg); 5816 } else { 5817 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); 5818 return OCS_HW_RTN_ERROR; 5819 } 5820 } 5821 5822 /** 5823 * @brief Set link configuration for Lancer 5824 * 5825 * @param hw Hardware context. 5826 * @param value Link configuration enum to which the link configuration is 5827 * set. 5828 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5829 * @param cb Callback function to invoke following mbx command. 5830 * @param arg Callback argument. 5831 * 5832 * @return Returns OCS_HW_RTN_SUCCESS on success. 5833 */ 5834 static ocs_hw_rtn_e 5835 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 5836 { 5837 char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; 5838 ocs_hw_linkcfg_cb_arg_t *cb_arg; 5839 const char *value_str = NULL; 5840 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 5841 5842 /* translate ocs_hw_linkcfg_e to CLP string */ 5843 value_str = ocs_hw_clp_from_linkcfg(value); 5844 5845 /* allocate memory for callback argument */ 5846 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); 5847 if (cb_arg == NULL) { 5848 ocs_log_err(hw->os, "failed to malloc cb_arg"); 5849 return OCS_HW_RTN_NO_MEMORY; 5850 } 5851 5852 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str); 5853 /* allocate DMA for command */ 5854 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { 5855 ocs_log_err(hw->os, "malloc failed\n"); 5856 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5857 return OCS_HW_RTN_NO_MEMORY; 5858 } 5859 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); 5860 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); 5861 5862 /* allocate DMA for response */ 5863 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { 5864 ocs_log_err(hw->os, "malloc failed\n"); 5865 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 5866 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5867 return OCS_HW_RTN_NO_MEMORY; 5868 } 5869 cb_arg->cb = cb; 5870 cb_arg->arg = arg; 5871 cb_arg->opts = opts; 5872 5873 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, 5874 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); 5875 5876 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { 5877 /* if failed, or polling, free memory here; if success and not 5878 * polling, will free in callback function 5879 */ 5880 if (rc) { 5881 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", 5882 (char *)cb_arg->dma_cmd.virt); 5883 } 5884 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 5885 ocs_dma_free(hw->os, &cb_arg->dma_resp); 5886 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5887 } 5888 return rc; 5889 } 5890 5891 /** 5892 * @brief Callback for ocs_hw_set_linkcfg_skyhawk 5893 * 5894 * @param hw Hardware context. 5895 * @param status Status from the RECONFIG_GET_LINK_INFO command. 5896 * @param mqe Mailbox response structure. 5897 * @param arg Pointer to a callback argument. 5898 * 5899 * @return none 5900 */ 5901 static void 5902 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 5903 { 5904 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; 5905 5906 if (status) { 5907 ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status); 5908 } 5909 5910 /* invoke callback */ 5911 if (cb_arg->cb) { 5912 cb_arg->cb(status, 0, cb_arg->arg); 5913 } 5914 5915 /* if polling, will free memory in calling function */ 5916 if (cb_arg->opts != OCS_CMD_POLL) { 5917 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 5918 } 5919 } 5920 5921 /** 5922 * @brief Set link configuration for a Skyhawk 5923 * 5924 * @param hw Hardware context. 5925 * @param value Link configuration enum to which the link configuration is 5926 * set. 5927 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5928 * @param cb Callback function to invoke following mbx command. 5929 * @param arg Callback argument. 5930 * 5931 * @return Returns OCS_HW_RTN_SUCCESS on success. 5932 */ 5933 static ocs_hw_rtn_e 5934 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 5935 { 5936 uint8_t *mbxdata; 5937 ocs_hw_linkcfg_cb_arg_t *cb_arg; 5938 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 5939 uint32_t config_id; 5940 5941 config_id = ocs_hw_config_id_from_linkcfg(value); 5942 5943 if (config_id == 0) { 5944 ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value); 5945 return OCS_HW_RTN_ERROR; 5946 } 5947 5948 /* mbxdata holds the header of the command */ 5949 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 5950 if (mbxdata == NULL) { 5951 ocs_log_err(hw->os, "failed to malloc mbox\n"); 5952 return OCS_HW_RTN_NO_MEMORY; 5953 } 5954 5955 /* cb_arg holds the data that will be passed to the callback on completion */ 5956 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); 5957 if (cb_arg == NULL) { 5958 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 5959 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5960 return OCS_HW_RTN_NO_MEMORY; 5961 } 5962 5963 cb_arg->cb = cb; 5964 cb_arg->arg = arg; 5965 5966 if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) { 5967 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg); 5968 } 5969 5970 if (rc != OCS_HW_RTN_SUCCESS) { 5971 ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n"); 5972 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5973 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 5974 } else if (opts == OCS_CMD_POLL) { 5975 /* if we're polling we have to call the callback here. */ 5976 ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg); 5977 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5978 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 5979 } else { 5980 /* We weren't poling, so the callback got called */ 5981 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 5982 } 5983 5984 return rc; 5985 } 5986 5987 /** 5988 * @brief Get link configuration. 5989 * 5990 * @param hw Hardware context. 5991 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 5992 * @param cb Callback function to invoke following mbx command. 5993 * @param arg Callback argument. 5994 * 5995 * @return Returns OCS_HW_RTN_SUCCESS on success. 5996 */ 5997 static ocs_hw_rtn_e 5998 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 5999 { 6000 if (!sli_link_is_configurable(&hw->sli)) { 6001 ocs_log_debug(hw->os, "Function not supported\n"); 6002 return OCS_HW_RTN_ERROR; 6003 } 6004 6005 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) || 6006 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))){ 6007 return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg); 6008 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) || 6009 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) { 6010 return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg); 6011 } else { 6012 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n"); 6013 return OCS_HW_RTN_ERROR; 6014 } 6015 } 6016 6017 /** 6018 * @brief Get link configuration for a Lancer 6019 * 6020 * @param hw Hardware context. 6021 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 6022 * @param cb Callback function to invoke following mbx command. 6023 * @param arg Callback argument. 6024 * 6025 * @return Returns OCS_HW_RTN_SUCCESS on success. 6026 */ 6027 static ocs_hw_rtn_e 6028 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 6029 { 6030 char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; 6031 ocs_hw_linkcfg_cb_arg_t *cb_arg; 6032 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6033 6034 /* allocate memory for callback argument */ 6035 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); 6036 if (cb_arg == NULL) { 6037 ocs_log_err(hw->os, "failed to malloc cb_arg"); 6038 return OCS_HW_RTN_NO_MEMORY; 6039 } 6040 6041 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig"); 6042 6043 /* allocate DMA for command */ 6044 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) { 6045 ocs_log_err(hw->os, "malloc failed\n"); 6046 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6047 return OCS_HW_RTN_NO_MEMORY; 6048 } 6049 6050 /* copy CLP command to DMA command */ 6051 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1); 6052 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd)); 6053 6054 /* allocate DMA for response */ 6055 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { 6056 ocs_log_err(hw->os, "malloc failed\n"); 6057 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6058 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6059 return OCS_HW_RTN_NO_MEMORY; 6060 } 6061 cb_arg->cb = cb; 6062 cb_arg->arg = arg; 6063 cb_arg->opts = opts; 6064 6065 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp, 6066 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg); 6067 6068 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { 6069 /* if failed or polling, free memory here; if not polling and success, 6070 * will free in callback function 6071 */ 6072 if (rc) { 6073 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n", 6074 (char *)cb_arg->dma_cmd.virt); 6075 } 6076 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6077 ocs_dma_free(hw->os, &cb_arg->dma_resp); 6078 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6079 } 6080 return rc; 6081 } 6082 6083 /** 6084 * @brief Get the link configuration callback. 6085 * 6086 * @param hw Hardware context. 6087 * @param status Status from the RECONFIG_GET_LINK_INFO command. 6088 * @param mqe Mailbox response structure. 6089 * @param arg Pointer to a callback argument. 6090 * 6091 * @return none 6092 */ 6093 static void 6094 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6095 { 6096 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; 6097 sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt; 6098 ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA; 6099 6100 if (status) { 6101 ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status); 6102 } else { 6103 /* Call was successful */ 6104 value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id); 6105 } 6106 6107 /* invoke callback */ 6108 if (cb_arg->cb) { 6109 cb_arg->cb(status, value, cb_arg->arg); 6110 } 6111 6112 /* if polling, will free memory in calling function */ 6113 if (cb_arg->opts != OCS_CMD_POLL) { 6114 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6115 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6116 } 6117 } 6118 6119 /** 6120 * @brief Get link configuration for a Skyhawk. 6121 * 6122 * @param hw Hardware context. 6123 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL). 6124 * @param cb Callback function to invoke following mbx command. 6125 * @param arg Callback argument. 6126 * 6127 * @return Returns OCS_HW_RTN_SUCCESS on success. 6128 */ 6129 static ocs_hw_rtn_e 6130 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg) 6131 { 6132 uint8_t *mbxdata; 6133 ocs_hw_linkcfg_cb_arg_t *cb_arg; 6134 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6135 6136 /* mbxdata holds the header of the command */ 6137 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 6138 if (mbxdata == NULL) { 6139 ocs_log_err(hw->os, "failed to malloc mbox\n"); 6140 return OCS_HW_RTN_NO_MEMORY; 6141 } 6142 6143 /* cb_arg holds the data that will be passed to the callback on completion */ 6144 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT); 6145 if (cb_arg == NULL) { 6146 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 6147 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6148 return OCS_HW_RTN_NO_MEMORY; 6149 } 6150 6151 cb_arg->cb = cb; 6152 cb_arg->arg = arg; 6153 cb_arg->opts = opts; 6154 6155 /* dma_mem holds the non-embedded portion */ 6156 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) { 6157 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 6158 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6159 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 6160 return OCS_HW_RTN_NO_MEMORY; 6161 } 6162 6163 if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) { 6164 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg); 6165 } 6166 6167 if (rc != OCS_HW_RTN_SUCCESS) { 6168 ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n"); 6169 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6170 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6171 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 6172 } else if (opts == OCS_CMD_POLL) { 6173 /* if we're polling we have to call the callback here. */ 6174 ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg); 6175 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6176 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6177 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t)); 6178 } else { 6179 /* We weren't poling, so the callback got called */ 6180 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6181 } 6182 6183 return rc; 6184 } 6185 6186 /** 6187 * @brief Sets the DIF seed value. 6188 * 6189 * @param hw Hardware context. 6190 * 6191 * @return Returns OCS_HW_RTN_SUCCESS on success. 6192 */ 6193 static ocs_hw_rtn_e 6194 ocs_hw_set_dif_seed(ocs_hw_t *hw) 6195 { 6196 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6197 uint8_t buf[SLI4_BMBX_SIZE]; 6198 sli4_req_common_set_features_dif_seed_t seed_param; 6199 6200 ocs_memset(&seed_param, 0, sizeof(seed_param)); 6201 seed_param.seed = hw->config.dif_seed; 6202 6203 /* send set_features command */ 6204 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6205 SLI4_SET_FEATURES_DIF_SEED, 6206 4, 6207 (uint32_t*)&seed_param)) { 6208 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6209 if (rc) { 6210 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6211 } else { 6212 ocs_log_debug(hw->os, "DIF seed set to 0x%x\n", 6213 hw->config.dif_seed); 6214 } 6215 } else { 6216 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); 6217 rc = OCS_HW_RTN_ERROR; 6218 } 6219 return rc; 6220 } 6221 6222 /** 6223 * @brief Sets the DIF mode value. 6224 * 6225 * @param hw Hardware context. 6226 * 6227 * @return Returns OCS_HW_RTN_SUCCESS on success. 6228 */ 6229 static ocs_hw_rtn_e 6230 ocs_hw_set_dif_mode(ocs_hw_t *hw) 6231 { 6232 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6233 uint8_t buf[SLI4_BMBX_SIZE]; 6234 sli4_req_common_set_features_t10_pi_mem_model_t mode_param; 6235 6236 ocs_memset(&mode_param, 0, sizeof(mode_param)); 6237 mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1); 6238 6239 /* send set_features command */ 6240 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6241 SLI4_SET_FEATURES_DIF_MEMORY_MODE, 6242 sizeof(mode_param), 6243 (uint32_t*)&mode_param)) { 6244 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6245 if (rc) { 6246 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6247 } else { 6248 ocs_log_test(hw->os, "DIF mode set to %s\n", 6249 (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate")); 6250 } 6251 } else { 6252 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n"); 6253 rc = OCS_HW_RTN_ERROR; 6254 } 6255 return rc; 6256 } 6257 6258 static void 6259 ocs_hw_watchdog_timer_cb(void *arg) 6260 { 6261 ocs_hw_t *hw = (ocs_hw_t *)arg; 6262 6263 ocs_hw_config_watchdog_timer(hw); 6264 return; 6265 } 6266 6267 static void 6268 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6269 { 6270 uint16_t timeout = hw->watchdog_timeout; 6271 6272 if (status != 0) { 6273 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status); 6274 } else { 6275 if(timeout != 0) { 6276 /* keeping callback 500ms before timeout to keep heartbeat alive */ 6277 ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) ); 6278 }else { 6279 ocs_del_timer(&hw->watchdog_timer); 6280 } 6281 } 6282 6283 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 6284 return; 6285 } 6286 6287 /** 6288 * @brief Set configuration parameters for watchdog timer feature. 6289 * 6290 * @param hw Hardware context. 6291 * @param timeout Timeout for watchdog timer in seconds 6292 * 6293 * @return Returns OCS_HW_RTN_SUCCESS on success. 6294 */ 6295 static ocs_hw_rtn_e 6296 ocs_hw_config_watchdog_timer(ocs_hw_t *hw) 6297 { 6298 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6299 uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 6300 6301 if (!buf) { 6302 ocs_log_err(hw->os, "no buffer for command\n"); 6303 return OCS_HW_RTN_NO_MEMORY; 6304 } 6305 6306 sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout); 6307 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL); 6308 if (rc) { 6309 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 6310 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc); 6311 } 6312 return rc; 6313 } 6314 6315 /** 6316 * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature. 6317 * 6318 * @param hw Hardware context. 6319 * @param buf Pointer to a mailbox buffer area. 6320 * 6321 * @return Returns OCS_HW_RTN_SUCCESS on success. 6322 */ 6323 static ocs_hw_rtn_e 6324 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf) 6325 { 6326 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6327 sli4_req_common_set_features_xfer_rdy_t10pi_t param; 6328 6329 ocs_memset(¶m, 0, sizeof(param)); 6330 param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1); 6331 param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0); 6332 param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1); 6333 param.app_tag = hw->config.auto_xfer_rdy_app_tag_value; 6334 param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip; 6335 6336 switch (hw->config.auto_xfer_rdy_p_type) { 6337 case 1: 6338 param.p_type = 0; 6339 break; 6340 case 3: 6341 param.p_type = 2; 6342 break; 6343 default: 6344 ocs_log_err(hw->os, "unsupported p_type %d\n", 6345 hw->config.auto_xfer_rdy_p_type); 6346 return OCS_HW_RTN_ERROR; 6347 } 6348 6349 /* build the set_features command */ 6350 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6351 SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI, 6352 sizeof(param), 6353 ¶m); 6354 6355 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6356 if (rc) { 6357 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6358 } else { 6359 ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n", 6360 param.rtc, param.atv, param.p_type, 6361 param.app_tag, param.blk_size); 6362 } 6363 6364 return rc; 6365 } 6366 6367 /** 6368 * @brief enable sli port health check 6369 * 6370 * @param hw Hardware context. 6371 * @param buf Pointer to a mailbox buffer area. 6372 * @param query current status of the health check feature enabled/disabled 6373 * @param enable if 1: enable 0: disable 6374 * @param buf Pointer to a mailbox buffer area. 6375 * 6376 * @return Returns OCS_HW_RTN_SUCCESS on success. 6377 */ 6378 static ocs_hw_rtn_e 6379 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable) 6380 { 6381 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6382 uint8_t buf[SLI4_BMBX_SIZE]; 6383 sli4_req_common_set_features_health_check_t param; 6384 6385 ocs_memset(¶m, 0, sizeof(param)); 6386 param.hck = enable; 6387 param.qry = query; 6388 6389 /* build the set_features command */ 6390 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6391 SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, 6392 sizeof(param), 6393 ¶m); 6394 6395 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6396 if (rc) { 6397 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc); 6398 } else { 6399 ocs_log_test(hw->os, "SLI Port Health Check is enabled \n"); 6400 } 6401 6402 return rc; 6403 } 6404 6405 /** 6406 * @brief Set FTD transfer hint feature 6407 * 6408 * @param hw Hardware context. 6409 * @param fdt_xfer_hint size in bytes where read requests are segmented. 6410 * 6411 * @return Returns OCS_HW_RTN_SUCCESS on success. 6412 */ 6413 static ocs_hw_rtn_e 6414 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint) 6415 { 6416 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6417 uint8_t buf[SLI4_BMBX_SIZE]; 6418 sli4_req_common_set_features_set_fdt_xfer_hint_t param; 6419 6420 ocs_memset(¶m, 0, sizeof(param)); 6421 param.fdt_xfer_hint = fdt_xfer_hint; 6422 /* build the set_features command */ 6423 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 6424 SLI4_SET_FEATURES_SET_FTD_XFER_HINT, 6425 sizeof(param), 6426 ¶m); 6427 6428 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 6429 if (rc) { 6430 ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc); 6431 } else { 6432 ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint); 6433 } 6434 6435 return rc; 6436 } 6437 6438 /** 6439 * @brief Get the link configuration callback. 6440 * 6441 * @param hw Hardware context. 6442 * @param status Status from the DMTF CLP command. 6443 * @param result_len Length, in bytes, of the DMTF CLP result. 6444 * @param arg Pointer to a callback argument. 6445 * 6446 * @return Returns OCS_HW_RTN_SUCCESS on success. 6447 */ 6448 static void 6449 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg) 6450 { 6451 int32_t rval; 6452 char retdata_str[64]; 6453 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg; 6454 ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA; 6455 6456 if (status) { 6457 ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status); 6458 } else { 6459 /* parse CLP response to get return data */ 6460 rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str, 6461 sizeof(retdata_str), 6462 cb_arg->dma_resp.virt, 6463 result_len); 6464 6465 if (rval <= 0) { 6466 ocs_log_err(hw->os, "failed to get retdata %d\n", result_len); 6467 } else { 6468 /* translate string into hw enum */ 6469 linkcfg = ocs_hw_linkcfg_from_clp(retdata_str); 6470 } 6471 } 6472 6473 /* invoke callback */ 6474 if (cb_arg->cb) { 6475 cb_arg->cb(status, linkcfg, cb_arg->arg); 6476 } 6477 6478 /* if polling, will free memory in calling function */ 6479 if (cb_arg->opts != OCS_CMD_POLL) { 6480 ocs_dma_free(hw->os, &cb_arg->dma_cmd); 6481 ocs_dma_free(hw->os, &cb_arg->dma_resp); 6482 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6483 } 6484 } 6485 6486 /** 6487 * @brief Set the Lancer dump location 6488 * @par Description 6489 * This function tells a Lancer chip to use a specific DMA 6490 * buffer as a dump location rather than the internal flash. 6491 * 6492 * @param hw Hardware context. 6493 * @param num_buffers The number of DMA buffers to hold the dump (1..n). 6494 * @param dump_buffers DMA buffers to hold the dump. 6495 * 6496 * @return Returns OCS_HW_RTN_SUCCESS on success. 6497 */ 6498 ocs_hw_rtn_e 6499 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb) 6500 { 6501 uint8_t bus, dev, func; 6502 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6503 uint8_t buf[SLI4_BMBX_SIZE]; 6504 6505 /* 6506 * Make sure the FW is new enough to support this command. If the FW 6507 * is too old, the FW will UE. 6508 */ 6509 if (hw->workaround.disable_dump_loc) { 6510 ocs_log_test(hw->os, "FW version is too old for this feature\n"); 6511 return OCS_HW_RTN_ERROR; 6512 } 6513 6514 /* This command is only valid for physical port 0 */ 6515 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func); 6516 if (fdb == 0 && func != 0) { 6517 ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n", 6518 func); 6519 return OCS_HW_RTN_ERROR; 6520 } 6521 6522 /* 6523 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers, 6524 * We must allocate a SGL list and then pass the address of the list to the chip. 6525 */ 6526 if (num_buffers > 1) { 6527 uint32_t sge_size = num_buffers * sizeof(sli4_sge_t); 6528 sli4_sge_t *sge; 6529 uint32_t i; 6530 6531 if (hw->dump_sges.size < sge_size) { 6532 ocs_dma_free(hw->os, &hw->dump_sges); 6533 if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) { 6534 ocs_log_err(hw->os, "SGE DMA allocation failed\n"); 6535 return OCS_HW_RTN_NO_MEMORY; 6536 } 6537 } 6538 /* build the SGE list */ 6539 ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size); 6540 hw->dump_sges.len = sge_size; 6541 sge = hw->dump_sges.virt; 6542 for (i = 0; i < num_buffers; i++) { 6543 sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys); 6544 sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys); 6545 sge[i].last = (i == num_buffers - 1 ? 1 : 0); 6546 sge[i].buffer_length = dump_buffers[i].size; 6547 } 6548 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, 6549 SLI4_BMBX_SIZE, FALSE, TRUE, 6550 &hw->dump_sges, fdb); 6551 } else { 6552 dump_buffers->len = dump_buffers->size; 6553 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf, 6554 SLI4_BMBX_SIZE, FALSE, FALSE, 6555 dump_buffers, fdb); 6556 } 6557 6558 if (rc) { 6559 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, 6560 NULL, NULL); 6561 if (rc) { 6562 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", 6563 rc); 6564 } 6565 } else { 6566 ocs_log_err(hw->os, 6567 "sli_cmd_common_set_dump_location failed\n"); 6568 rc = OCS_HW_RTN_ERROR; 6569 } 6570 6571 return rc; 6572 } 6573 6574 /** 6575 * @brief Set the Ethernet license. 6576 * 6577 * @par Description 6578 * This function sends the appropriate mailbox command (DMTF 6579 * CLP) to set the Ethernet license to the given license value. 6580 * Since it is used during the time of ocs_hw_init(), the mailbox 6581 * command is sent via polling (the BMBX route). 6582 * 6583 * @param hw Hardware context. 6584 * @param license 32-bit license value. 6585 * 6586 * @return Returns OCS_HW_RTN_SUCCESS on success. 6587 */ 6588 static ocs_hw_rtn_e 6589 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license) 6590 { 6591 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6592 char cmd[OCS_HW_DMTF_CLP_CMD_MAX]; 6593 ocs_dma_t dma_cmd; 6594 ocs_dma_t dma_resp; 6595 6596 /* only for lancer right now */ 6597 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 6598 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 6599 return OCS_HW_RTN_ERROR; 6600 } 6601 6602 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license); 6603 /* allocate DMA for command */ 6604 if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) { 6605 ocs_log_err(hw->os, "malloc failed\n"); 6606 return OCS_HW_RTN_NO_MEMORY; 6607 } 6608 ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1); 6609 ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd)); 6610 6611 /* allocate DMA for response */ 6612 if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) { 6613 ocs_log_err(hw->os, "malloc failed\n"); 6614 ocs_dma_free(hw->os, &dma_cmd); 6615 return OCS_HW_RTN_NO_MEMORY; 6616 } 6617 6618 /* send DMTF CLP command mbx and poll */ 6619 if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) { 6620 ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt); 6621 rc = OCS_HW_RTN_ERROR; 6622 } 6623 6624 ocs_dma_free(hw->os, &dma_cmd); 6625 ocs_dma_free(hw->os, &dma_resp); 6626 return rc; 6627 } 6628 6629 /** 6630 * @brief Callback argument structure for the DMTF CLP commands. 6631 */ 6632 typedef struct ocs_hw_clp_cb_arg_s { 6633 ocs_hw_dmtf_clp_cb_t cb; 6634 ocs_dma_t *dma_resp; 6635 int32_t status; 6636 uint32_t opts; 6637 void *arg; 6638 } ocs_hw_clp_cb_arg_t; 6639 6640 /** 6641 * @brief Execute the DMTF CLP command. 6642 * 6643 * @param hw Hardware context. 6644 * @param dma_cmd DMA buffer containing the CLP command. 6645 * @param dma_resp DMA buffer that will contain the response (if successful). 6646 * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL). 6647 * @param cb Callback function. 6648 * @param arg Callback argument. 6649 * 6650 * @return Returns the number of bytes written to the response 6651 * buffer on success, or a negative value if failed. 6652 */ 6653 static ocs_hw_rtn_e 6654 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg) 6655 { 6656 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 6657 ocs_hw_clp_cb_arg_t *cb_arg; 6658 uint8_t *mbxdata; 6659 6660 /* allocate DMA for mailbox */ 6661 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 6662 if (mbxdata == NULL) { 6663 ocs_log_err(hw->os, "failed to malloc mbox\n"); 6664 return OCS_HW_RTN_NO_MEMORY; 6665 } 6666 6667 /* allocate memory for callback argument */ 6668 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT); 6669 if (cb_arg == NULL) { 6670 ocs_log_err(hw->os, "failed to malloc cb_arg"); 6671 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6672 return OCS_HW_RTN_NO_MEMORY; 6673 } 6674 6675 cb_arg->cb = cb; 6676 cb_arg->arg = arg; 6677 cb_arg->dma_resp = dma_resp; 6678 cb_arg->opts = opts; 6679 6680 /* Send the HW command */ 6681 if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 6682 dma_cmd, dma_resp)) { 6683 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg); 6684 6685 if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) { 6686 /* if we're polling, copy response and invoke callback to 6687 * parse result */ 6688 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); 6689 ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg); 6690 6691 /* set rc to resulting or "parsed" status */ 6692 rc = cb_arg->status; 6693 } 6694 6695 /* if failed, or polling, free memory here */ 6696 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) { 6697 if (rc != OCS_HW_RTN_SUCCESS) { 6698 ocs_log_test(hw->os, "ocs_hw_command failed\n"); 6699 } 6700 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6701 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6702 } 6703 } else { 6704 ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n"); 6705 rc = OCS_HW_RTN_ERROR; 6706 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6707 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6708 } 6709 6710 return rc; 6711 } 6712 6713 /** 6714 * @brief Called when the DMTF CLP command completes. 6715 * 6716 * @param hw Hardware context. 6717 * @param status Status field from the mbox completion. 6718 * @param mqe Mailbox response structure. 6719 * @param arg Pointer to a callback argument. 6720 * 6721 * @return None. 6722 * 6723 */ 6724 static void 6725 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6726 { 6727 int32_t cb_status = 0; 6728 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 6729 sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed; 6730 ocs_hw_clp_cb_arg_t *cb_arg = arg; 6731 uint32_t result_len = 0; 6732 int32_t stat_len; 6733 char stat_str[8]; 6734 6735 /* there are several status codes here, check them all and condense 6736 * into a single callback status 6737 */ 6738 if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) { 6739 ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n", 6740 status, 6741 mbox_rsp->hdr.status, 6742 clp_rsp->hdr.status, 6743 clp_rsp->hdr.additional_status, 6744 clp_rsp->clp_status, 6745 clp_rsp->clp_detailed_status); 6746 if (status) { 6747 cb_status = status; 6748 } else if (mbox_rsp->hdr.status) { 6749 cb_status = mbox_rsp->hdr.status; 6750 } else { 6751 cb_status = clp_rsp->clp_status; 6752 } 6753 } else { 6754 result_len = clp_rsp->resp_length; 6755 } 6756 6757 if (cb_status) { 6758 goto ocs_hw_cb_dmtf_clp_done; 6759 } 6760 6761 if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) { 6762 ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n", 6763 cb_arg->dma_resp->size, result_len); 6764 cb_status = -1; 6765 goto ocs_hw_cb_dmtf_clp_done; 6766 } 6767 6768 /* parse CLP response to get status */ 6769 stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str, 6770 sizeof(stat_str), 6771 cb_arg->dma_resp->virt, 6772 result_len); 6773 6774 if (stat_len <= 0) { 6775 ocs_log_test(hw->os, "failed to get status %d\n", stat_len); 6776 cb_status = -1; 6777 goto ocs_hw_cb_dmtf_clp_done; 6778 } 6779 6780 if (ocs_strcmp(stat_str, "0") != 0) { 6781 ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str); 6782 cb_status = -1; 6783 goto ocs_hw_cb_dmtf_clp_done; 6784 } 6785 6786 ocs_hw_cb_dmtf_clp_done: 6787 6788 /* save status in cb_arg for callers with NULL cb's + polling */ 6789 cb_arg->status = cb_status; 6790 if (cb_arg->cb) { 6791 cb_arg->cb(hw, cb_status, result_len, cb_arg->arg); 6792 } 6793 /* if polling, caller will free memory */ 6794 if (cb_arg->opts != OCS_CMD_POLL) { 6795 ocs_free(hw->os, cb_arg, sizeof(*cb_arg)); 6796 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 6797 } 6798 } 6799 6800 /** 6801 * @brief Parse the CLP result and get the value corresponding to the given 6802 * keyword. 6803 * 6804 * @param hw Hardware context. 6805 * @param keyword CLP keyword for which the value is returned. 6806 * @param value Location to which the resulting value is copied. 6807 * @param value_len Length of the value parameter. 6808 * @param resp Pointer to the response buffer that is searched 6809 * for the keyword and value. 6810 * @param resp_len Length of response buffer passed in. 6811 * 6812 * @return Returns the number of bytes written to the value 6813 * buffer on success, or a negative vaue on failure. 6814 */ 6815 static int32_t 6816 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len) 6817 { 6818 char *start = NULL; 6819 char *end = NULL; 6820 6821 /* look for specified keyword in string */ 6822 start = ocs_strstr(resp, keyword); 6823 if (start == NULL) { 6824 ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n", 6825 keyword); 6826 return -1; 6827 } 6828 6829 /* now look for '=' and go one past */ 6830 start = ocs_strchr(start, '='); 6831 if (start == NULL) { 6832 ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n", 6833 keyword); 6834 return -1; 6835 } 6836 start++; 6837 6838 /* \r\n terminates value */ 6839 end = ocs_strstr(start, "\r\n"); 6840 if (end == NULL) { 6841 ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n", 6842 keyword); 6843 return -1; 6844 } 6845 6846 /* make sure given result array is big enough */ 6847 if ((end - start + 1) > value_len) { 6848 ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n", 6849 value_len, (end-start)); 6850 return -1; 6851 } 6852 6853 ocs_strncpy(value, start, (end - start)); 6854 value[end-start] = '\0'; 6855 return (end-start+1); 6856 } 6857 6858 /** 6859 * @brief Cause chip to enter an unrecoverable error state. 6860 * 6861 * @par Description 6862 * Cause chip to enter an unrecoverable error state. This is 6863 * used when detecting unexpected FW behavior so that the FW can be 6864 * hwted from the driver as soon as the error is detected. 6865 * 6866 * @param hw Hardware context. 6867 * @param dump Generate dump as part of reset. 6868 * 6869 * @return Returns 0 on success, or a non-zero value on failure. 6870 * 6871 */ 6872 ocs_hw_rtn_e 6873 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump) 6874 { 6875 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 6876 6877 if (sli_raise_ue(&hw->sli, dump) != 0) { 6878 rc = OCS_HW_RTN_ERROR; 6879 } else { 6880 if (hw->state != OCS_HW_STATE_UNINITIALIZED) { 6881 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED; 6882 } 6883 } 6884 6885 return rc; 6886 } 6887 6888 /** 6889 * @brief Called when the OBJECT_GET command completes. 6890 * 6891 * @par Description 6892 * Get the number of bytes actually written out of the response, free the mailbox 6893 * that was malloc'd by ocs_hw_dump_get(), then call the callback 6894 * and pass the status and bytes read. 6895 * 6896 * @param hw Hardware context. 6897 * @param status Status field from the mbox completion. 6898 * @param mqe Mailbox response structure. 6899 * @param arg Pointer to a callback function that signals the caller that the command is done. 6900 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>. 6901 * 6902 * @return Returns 0. 6903 */ 6904 static int32_t 6905 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 6906 { 6907 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 6908 sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed; 6909 ocs_hw_dump_get_cb_arg_t *cb_arg = arg; 6910 uint32_t bytes_read; 6911 uint8_t eof; 6912 6913 bytes_read = rd_obj_rsp->actual_read_length; 6914 eof = rd_obj_rsp->eof; 6915 6916 if (cb_arg) { 6917 if (cb_arg->cb) { 6918 if ((status == 0) && mbox_rsp->hdr.status) { 6919 status = mbox_rsp->hdr.status; 6920 } 6921 cb_arg->cb(status, bytes_read, eof, cb_arg->arg); 6922 } 6923 6924 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); 6925 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); 6926 } 6927 6928 return 0; 6929 } 6930 6931 /** 6932 * @brief Read a dump image to the host. 6933 * 6934 * @par Description 6935 * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a 6936 * dump image chunk, then sends the command with the ocs_hw_command(). On completion, 6937 * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox 6938 * and signal the caller that the read has completed. 6939 * 6940 * @param hw Hardware context. 6941 * @param dma DMA structure to transfer the dump chunk into. 6942 * @param size Size of the dump chunk. 6943 * @param offset Offset, in bytes, from the beginning of the dump. 6944 * @param cb Pointer to a callback function that is called when the command completes. 6945 * The callback function prototype is 6946 * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>. 6947 * @param arg Pointer to be passed to the callback function. 6948 * 6949 * @return Returns 0 on success, or a non-zero value on failure. 6950 */ 6951 ocs_hw_rtn_e 6952 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg) 6953 { 6954 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 6955 uint8_t *mbxdata; 6956 ocs_hw_dump_get_cb_arg_t *cb_arg; 6957 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); 6958 6959 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 6960 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 6961 return OCS_HW_RTN_ERROR; 6962 } 6963 6964 if (1 != sli_dump_is_present(&hw->sli)) { 6965 ocs_log_test(hw->os, "No dump is present\n"); 6966 return OCS_HW_RTN_ERROR; 6967 } 6968 6969 if (1 == sli_reset_required(&hw->sli)) { 6970 ocs_log_test(hw->os, "device reset required\n"); 6971 return OCS_HW_RTN_ERROR; 6972 } 6973 6974 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 6975 if (mbxdata == NULL) { 6976 ocs_log_err(hw->os, "failed to malloc mbox\n"); 6977 return OCS_HW_RTN_NO_MEMORY; 6978 } 6979 6980 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT); 6981 if (cb_arg == NULL) { 6982 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 6983 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 6984 return OCS_HW_RTN_NO_MEMORY; 6985 } 6986 6987 cb_arg->cb = cb; 6988 cb_arg->arg = arg; 6989 cb_arg->mbox_cmd = mbxdata; 6990 6991 if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 6992 size, offset, "/dbg/dump.bin", dma)) { 6993 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg); 6994 if (rc == 0 && opts == OCS_CMD_POLL) { 6995 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); 6996 rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg); 6997 } 6998 } 6999 7000 if (rc != OCS_HW_RTN_SUCCESS) { 7001 ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n"); 7002 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7003 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t)); 7004 } 7005 7006 return rc; 7007 } 7008 7009 /** 7010 * @brief Called when the OBJECT_DELETE command completes. 7011 * 7012 * @par Description 7013 * Free the mailbox that was malloc'd 7014 * by ocs_hw_dump_clear(), then call the callback and pass the status. 7015 * 7016 * @param hw Hardware context. 7017 * @param status Status field from the mbox completion. 7018 * @param mqe Mailbox response structure. 7019 * @param arg Pointer to a callback function that signals the caller that the command is done. 7020 * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>. 7021 * 7022 * @return Returns 0. 7023 */ 7024 static int32_t 7025 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7026 { 7027 ocs_hw_dump_clear_cb_arg_t *cb_arg = arg; 7028 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 7029 7030 if (cb_arg) { 7031 if (cb_arg->cb) { 7032 if ((status == 0) && mbox_rsp->hdr.status) { 7033 status = mbox_rsp->hdr.status; 7034 } 7035 cb_arg->cb(status, cb_arg->arg); 7036 } 7037 7038 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE); 7039 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); 7040 } 7041 7042 return 0; 7043 } 7044 7045 /** 7046 * @brief Clear a dump image from the device. 7047 * 7048 * @par Description 7049 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear 7050 * the dump, then sends the command with ocs_hw_command(). On completion, 7051 * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox 7052 * and to signal the caller that the write has completed. 7053 * 7054 * @param hw Hardware context. 7055 * @param cb Pointer to a callback function that is called when the command completes. 7056 * The callback function prototype is 7057 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>. 7058 * @param arg Pointer to be passed to the callback function. 7059 * 7060 * @return Returns 0 on success, or a non-zero value on failure. 7061 */ 7062 ocs_hw_rtn_e 7063 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg) 7064 { 7065 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 7066 uint8_t *mbxdata; 7067 ocs_hw_dump_clear_cb_arg_t *cb_arg; 7068 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL); 7069 7070 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) { 7071 ocs_log_test(hw->os, "Function only supported for I/F type 2\n"); 7072 return OCS_HW_RTN_ERROR; 7073 } 7074 7075 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7076 if (mbxdata == NULL) { 7077 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7078 return OCS_HW_RTN_NO_MEMORY; 7079 } 7080 7081 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT); 7082 if (cb_arg == NULL) { 7083 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7084 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7085 return OCS_HW_RTN_NO_MEMORY; 7086 } 7087 7088 cb_arg->cb = cb; 7089 cb_arg->arg = arg; 7090 cb_arg->mbox_cmd = mbxdata; 7091 7092 if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 7093 "/dbg/dump.bin")) { 7094 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg); 7095 if (rc == 0 && opts == OCS_CMD_POLL) { 7096 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE); 7097 rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg); 7098 } 7099 } 7100 7101 if (rc != OCS_HW_RTN_SUCCESS) { 7102 ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n"); 7103 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7104 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t)); 7105 } 7106 7107 return rc; 7108 } 7109 7110 typedef struct ocs_hw_get_port_protocol_cb_arg_s { 7111 ocs_get_port_protocol_cb_t cb; 7112 void *arg; 7113 uint32_t pci_func; 7114 ocs_dma_t payload; 7115 } ocs_hw_get_port_protocol_cb_arg_t; 7116 7117 /** 7118 * @brief Called for the completion of get_port_profile for a 7119 * user request. 7120 * 7121 * @param hw Hardware context. 7122 * @param status The status from the MQE. 7123 * @param mqe Pointer to mailbox command buffer. 7124 * @param arg Pointer to a callback argument. 7125 * 7126 * @return Returns 0 on success, or a non-zero value on failure. 7127 */ 7128 static int32_t 7129 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status, 7130 uint8_t *mqe, void *arg) 7131 { 7132 ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg; 7133 ocs_dma_t *payload = &(cb_arg->payload); 7134 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; 7135 ocs_hw_port_protocol_e port_protocol; 7136 int num_descriptors; 7137 sli4_resource_descriptor_v1_t *desc_p; 7138 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; 7139 int i; 7140 7141 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; 7142 7143 num_descriptors = response->desc_count; 7144 desc_p = (sli4_resource_descriptor_v1_t *)response->desc; 7145 for (i=0; i<num_descriptors; i++) { 7146 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { 7147 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; 7148 if (pcie_desc_p->pf_number == cb_arg->pci_func) { 7149 switch(pcie_desc_p->pf_type) { 7150 case 0x02: 7151 port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI; 7152 break; 7153 case 0x04: 7154 port_protocol = OCS_HW_PORT_PROTOCOL_FCOE; 7155 break; 7156 case 0x10: 7157 port_protocol = OCS_HW_PORT_PROTOCOL_FC; 7158 break; 7159 default: 7160 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER; 7161 break; 7162 } 7163 } 7164 } 7165 7166 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); 7167 } 7168 7169 if (cb_arg->cb) { 7170 cb_arg->cb(status, port_protocol, cb_arg->arg); 7171 } 7172 7173 ocs_dma_free(hw->os, &cb_arg->payload); 7174 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); 7175 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7176 7177 return 0; 7178 } 7179 7180 /** 7181 * @ingroup io 7182 * @brief Get the current port protocol. 7183 * @par Description 7184 * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the 7185 * command completes the provided mgmt callback function is 7186 * called. 7187 * 7188 * @param hw Hardware context. 7189 * @param pci_func PCI function to query for current protocol. 7190 * @param cb Callback function to be called when the command completes. 7191 * @param ul_arg An argument that is passed to the callback function. 7192 * 7193 * @return 7194 * - OCS_HW_RTN_SUCCESS on success. 7195 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7196 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7197 * context. 7198 * - OCS_HW_RTN_ERROR on any other error. 7199 */ 7200 ocs_hw_rtn_e 7201 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func, 7202 ocs_get_port_protocol_cb_t cb, void* ul_arg) 7203 { 7204 uint8_t *mbxdata; 7205 ocs_hw_get_port_protocol_cb_arg_t *cb_arg; 7206 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7207 7208 /* Only supported on Skyhawk */ 7209 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7210 return OCS_HW_RTN_ERROR; 7211 } 7212 7213 /* mbxdata holds the header of the command */ 7214 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7215 if (mbxdata == NULL) { 7216 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7217 return OCS_HW_RTN_NO_MEMORY; 7218 } 7219 7220 /* cb_arg holds the data that will be passed to the callback on completion */ 7221 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT); 7222 if (cb_arg == NULL) { 7223 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7224 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7225 return OCS_HW_RTN_NO_MEMORY; 7226 } 7227 7228 cb_arg->cb = cb; 7229 cb_arg->arg = ul_arg; 7230 cb_arg->pci_func = pci_func; 7231 7232 /* dma_mem holds the non-embedded portion */ 7233 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { 7234 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7235 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7236 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); 7237 return OCS_HW_RTN_NO_MEMORY; 7238 } 7239 7240 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { 7241 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg); 7242 } 7243 7244 if (rc != OCS_HW_RTN_SUCCESS) { 7245 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); 7246 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7247 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 7248 ocs_dma_free(hw->os, &cb_arg->payload); 7249 } 7250 7251 return rc; 7252 7253 } 7254 7255 typedef struct ocs_hw_set_port_protocol_cb_arg_s { 7256 ocs_set_port_protocol_cb_t cb; 7257 void *arg; 7258 ocs_dma_t payload; 7259 uint32_t new_protocol; 7260 uint32_t pci_func; 7261 } ocs_hw_set_port_protocol_cb_arg_t; 7262 7263 /** 7264 * @brief Called for the completion of set_port_profile for a 7265 * user request. 7266 * 7267 * @par Description 7268 * This is the second of two callbacks for the set_port_protocol 7269 * function. The set operation is a read-modify-write. This 7270 * callback is called when the write (SET_PROFILE_CONFIG) 7271 * completes. 7272 * 7273 * @param hw Hardware context. 7274 * @param status The status from the MQE. 7275 * @param mqe Pointer to mailbox command buffer. 7276 * @param arg Pointer to a callback argument. 7277 * 7278 * @return 0 on success, non-zero otherwise 7279 */ 7280 static int32_t 7281 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7282 { 7283 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; 7284 7285 if (cb_arg->cb) { 7286 cb_arg->cb( status, cb_arg->arg); 7287 } 7288 7289 ocs_dma_free(hw->os, &(cb_arg->payload)); 7290 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7291 ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7292 7293 return 0; 7294 } 7295 7296 /** 7297 * @brief Called for the completion of set_port_profile for a 7298 * user request. 7299 * 7300 * @par Description 7301 * This is the first of two callbacks for the set_port_protocol 7302 * function. The set operation is a read-modify-write. This 7303 * callback is called when the read completes 7304 * (GET_PROFILE_CONFG). It will updated the resource 7305 * descriptors, then queue the write (SET_PROFILE_CONFIG). 7306 * 7307 * On entry there are three memory areas that were allocated by 7308 * ocs_hw_set_port_protocol. If a failure is detected in this 7309 * function those need to be freed. If this function succeeds 7310 * it allocates three more areas. 7311 * 7312 * @param hw Hardware context. 7313 * @param status The status from the MQE 7314 * @param mqe Pointer to mailbox command buffer. 7315 * @param arg Pointer to a callback argument. 7316 * 7317 * @return Returns 0 on success, or a non-zero value otherwise. 7318 */ 7319 static int32_t 7320 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7321 { 7322 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg; 7323 ocs_dma_t *payload = &(cb_arg->payload); 7324 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt; 7325 int num_descriptors; 7326 sli4_resource_descriptor_v1_t *desc_p; 7327 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p; 7328 int i; 7329 ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg; 7330 ocs_hw_port_protocol_e new_protocol; 7331 uint8_t *dst; 7332 sli4_isap_resouce_descriptor_v1_t *isap_desc_p; 7333 uint8_t *mbxdata; 7334 int pci_descriptor_count; 7335 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7336 int num_fcoe_ports = 0; 7337 int num_iscsi_ports = 0; 7338 7339 new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol; 7340 7341 num_descriptors = response->desc_count; 7342 7343 /* Count PCI descriptors */ 7344 pci_descriptor_count = 0; 7345 desc_p = (sli4_resource_descriptor_v1_t *)response->desc; 7346 for (i=0; i<num_descriptors; i++) { 7347 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { 7348 ++pci_descriptor_count; 7349 } 7350 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); 7351 } 7352 7353 /* mbxdata holds the header of the command */ 7354 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7355 if (mbxdata == NULL) { 7356 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7357 return OCS_HW_RTN_NO_MEMORY; 7358 } 7359 7360 /* cb_arg holds the data that will be passed to the callback on completion */ 7361 new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); 7362 if (new_cb_arg == NULL) { 7363 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7364 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7365 return OCS_HW_RTN_NO_MEMORY; 7366 } 7367 7368 new_cb_arg->cb = cb_arg->cb; 7369 new_cb_arg->arg = cb_arg->arg; 7370 7371 /* Allocate memory for the descriptors we're going to send. This is 7372 * one for each PCI descriptor plus one ISAP descriptor. */ 7373 if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) + 7374 (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) + 7375 sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) { 7376 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7377 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7378 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7379 return OCS_HW_RTN_NO_MEMORY; 7380 } 7381 7382 sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 7383 &new_cb_arg->payload, 7384 0, pci_descriptor_count+1, 1); 7385 7386 /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */ 7387 dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc); 7388 7389 /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it 7390 * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor 7391 * that we're trying to change also set its pf_type. 7392 */ 7393 desc_p = (sli4_resource_descriptor_v1_t *)response->desc; 7394 for (i=0; i<num_descriptors; i++) { 7395 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) { 7396 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p; 7397 if (pcie_desc_p->pf_number == cb_arg->pci_func) { 7398 /* This is the PCIe descriptor for this OCS instance. 7399 * Update it with the new pf_type */ 7400 switch(new_protocol) { 7401 case OCS_HW_PORT_PROTOCOL_FC: 7402 pcie_desc_p->pf_type = SLI4_PROTOCOL_FC; 7403 break; 7404 case OCS_HW_PORT_PROTOCOL_FCOE: 7405 pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE; 7406 break; 7407 case OCS_HW_PORT_PROTOCOL_ISCSI: 7408 pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI; 7409 break; 7410 default: 7411 pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT; 7412 break; 7413 } 7414 } 7415 7416 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) { 7417 ++num_fcoe_ports; 7418 } 7419 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) { 7420 ++num_iscsi_ports; 7421 } 7422 ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t)); 7423 dst += sizeof(sli4_pcie_resource_descriptor_v1_t); 7424 } 7425 7426 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length); 7427 } 7428 7429 /* Create an ISAP resource descriptor */ 7430 isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst; 7431 isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP; 7432 isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t); 7433 if (num_iscsi_ports > 0) { 7434 isap_desc_p->iscsi_tgt = 1; 7435 isap_desc_p->iscsi_ini = 1; 7436 isap_desc_p->iscsi_dif = 1; 7437 } 7438 if (num_fcoe_ports > 0) { 7439 isap_desc_p->fcoe_tgt = 1; 7440 isap_desc_p->fcoe_ini = 1; 7441 isap_desc_p->fcoe_dif = 1; 7442 } 7443 7444 /* At this point we're done with the memory allocated by ocs_port_set_protocol */ 7445 ocs_dma_free(hw->os, &cb_arg->payload); 7446 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7447 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7448 7449 /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */ 7450 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg); 7451 if (rc) { 7452 ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n"); 7453 /* Call the upper level callback to report a failure */ 7454 if (new_cb_arg->cb) { 7455 new_cb_arg->cb( rc, new_cb_arg->arg); 7456 } 7457 7458 /* Free the memory allocated by this function */ 7459 ocs_dma_free(hw->os, &new_cb_arg->payload); 7460 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7461 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t)); 7462 } 7463 7464 return rc; 7465 } 7466 7467 /** 7468 * @ingroup io 7469 * @brief Set the port protocol. 7470 * @par Description 7471 * Setting the port protocol is a read-modify-write operation. 7472 * This function submits a GET_PROFILE_CONFIG command to read 7473 * the current settings. The callback function will modify the 7474 * settings and issue the write. 7475 * 7476 * On successful completion this function will have allocated 7477 * two regular memory areas and one dma area which will need to 7478 * get freed later in the callbacks. 7479 * 7480 * @param hw Hardware context. 7481 * @param new_protocol New protocol to use. 7482 * @param pci_func PCI function to configure. 7483 * @param cb Callback function to be called when the command completes. 7484 * @param ul_arg An argument that is passed to the callback function. 7485 * 7486 * @return 7487 * - OCS_HW_RTN_SUCCESS on success. 7488 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7489 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7490 * context. 7491 * - OCS_HW_RTN_ERROR on any other error. 7492 */ 7493 ocs_hw_rtn_e 7494 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol, 7495 uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg) 7496 { 7497 uint8_t *mbxdata; 7498 ocs_hw_set_port_protocol_cb_arg_t *cb_arg; 7499 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 7500 7501 /* Only supported on Skyhawk */ 7502 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7503 return OCS_HW_RTN_ERROR; 7504 } 7505 7506 /* mbxdata holds the header of the command */ 7507 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7508 if (mbxdata == NULL) { 7509 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7510 return OCS_HW_RTN_NO_MEMORY; 7511 } 7512 7513 /* cb_arg holds the data that will be passed to the callback on completion */ 7514 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT); 7515 if (cb_arg == NULL) { 7516 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7517 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7518 return OCS_HW_RTN_NO_MEMORY; 7519 } 7520 7521 cb_arg->cb = cb; 7522 cb_arg->arg = ul_arg; 7523 cb_arg->new_protocol = new_protocol; 7524 cb_arg->pci_func = pci_func; 7525 7526 /* dma_mem holds the non-embedded portion */ 7527 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) { 7528 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7529 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7530 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t)); 7531 return OCS_HW_RTN_NO_MEMORY; 7532 } 7533 7534 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) { 7535 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg); 7536 } 7537 7538 if (rc != OCS_HW_RTN_SUCCESS) { 7539 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n"); 7540 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7541 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t)); 7542 ocs_dma_free(hw->os, &cb_arg->payload); 7543 } 7544 7545 return rc; 7546 } 7547 7548 typedef struct ocs_hw_get_profile_list_cb_arg_s { 7549 ocs_get_profile_list_cb_t cb; 7550 void *arg; 7551 ocs_dma_t payload; 7552 } ocs_hw_get_profile_list_cb_arg_t; 7553 7554 /** 7555 * @brief Called for the completion of get_profile_list for a 7556 * user request. 7557 * @par Description 7558 * This function is called when the COMMMON_GET_PROFILE_LIST 7559 * mailbox completes. The response will be in 7560 * ctx->non_embedded_mem.virt. This function parses the 7561 * response and creates a ocs_hw_profile_list, then calls the 7562 * mgmt_cb callback function and passes that list to it. 7563 * 7564 * @param hw Hardware context. 7565 * @param status The status from the MQE 7566 * @param mqe Pointer to mailbox command buffer. 7567 * @param arg Pointer to a callback argument. 7568 * 7569 * @return Returns 0 on success, or a non-zero value on failure. 7570 */ 7571 static int32_t 7572 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7573 { 7574 ocs_hw_profile_list_t *list; 7575 ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg; 7576 ocs_dma_t *payload = &(cb_arg->payload); 7577 sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt; 7578 int i; 7579 int num_descriptors; 7580 7581 list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO); 7582 if (list == NULL) { 7583 ocs_log_err(hw->os, "failed to malloc list\n"); 7584 return OCS_HW_RTN_NO_MEMORY; 7585 } 7586 7587 list->num_descriptors = response->profile_descriptor_count; 7588 7589 num_descriptors = list->num_descriptors; 7590 if (num_descriptors > OCS_HW_MAX_PROFILES) { 7591 num_descriptors = OCS_HW_MAX_PROFILES; 7592 } 7593 7594 for (i=0; i<num_descriptors; i++) { 7595 list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id; 7596 list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index; 7597 ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description); 7598 } 7599 7600 if (cb_arg->cb) { 7601 cb_arg->cb(status, list, cb_arg->arg); 7602 } else { 7603 ocs_free(hw->os, list, sizeof(*list)); 7604 } 7605 7606 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7607 ocs_dma_free(hw->os, &cb_arg->payload); 7608 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); 7609 7610 return 0; 7611 } 7612 7613 /** 7614 * @ingroup io 7615 * @brief Get a list of available profiles. 7616 * @par Description 7617 * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the 7618 * command completes the provided mgmt callback function is 7619 * called. 7620 * 7621 * @param hw Hardware context. 7622 * @param cb Callback function to be called when the 7623 * command completes. 7624 * @param ul_arg An argument that is passed to the callback 7625 * function. 7626 * 7627 * @return 7628 * - OCS_HW_RTN_SUCCESS on success. 7629 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7630 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7631 * context. 7632 * - OCS_HW_RTN_ERROR on any other error. 7633 */ 7634 ocs_hw_rtn_e 7635 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg) 7636 { 7637 uint8_t *mbxdata; 7638 ocs_hw_get_profile_list_cb_arg_t *cb_arg; 7639 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7640 7641 /* Only supported on Skyhawk */ 7642 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7643 return OCS_HW_RTN_ERROR; 7644 } 7645 7646 /* mbxdata holds the header of the command */ 7647 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7648 if (mbxdata == NULL) { 7649 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7650 return OCS_HW_RTN_NO_MEMORY; 7651 } 7652 7653 /* cb_arg holds the data that will be passed to the callback on completion */ 7654 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT); 7655 if (cb_arg == NULL) { 7656 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7657 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7658 return OCS_HW_RTN_NO_MEMORY; 7659 } 7660 7661 cb_arg->cb = cb; 7662 cb_arg->arg = ul_arg; 7663 7664 /* dma_mem holds the non-embedded portion */ 7665 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) { 7666 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n"); 7667 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7668 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); 7669 return OCS_HW_RTN_NO_MEMORY; 7670 } 7671 7672 if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) { 7673 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg); 7674 } 7675 7676 if (rc != OCS_HW_RTN_SUCCESS) { 7677 ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n"); 7678 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7679 ocs_dma_free(hw->os, &cb_arg->payload); 7680 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t)); 7681 } 7682 7683 return rc; 7684 } 7685 7686 typedef struct ocs_hw_get_active_profile_cb_arg_s { 7687 ocs_get_active_profile_cb_t cb; 7688 void *arg; 7689 } ocs_hw_get_active_profile_cb_arg_t; 7690 7691 /** 7692 * @brief Called for the completion of get_active_profile for a 7693 * user request. 7694 * 7695 * @param hw Hardware context. 7696 * @param status The status from the MQE 7697 * @param mqe Pointer to mailbox command buffer. 7698 * @param arg Pointer to a callback argument. 7699 * 7700 * @return Returns 0 on success, or a non-zero value on failure. 7701 */ 7702 static int32_t 7703 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7704 { 7705 ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg; 7706 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe; 7707 sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed; 7708 uint32_t active_profile; 7709 7710 active_profile = response->active_profile_id; 7711 7712 if (cb_arg->cb) { 7713 cb_arg->cb(status, active_profile, cb_arg->arg); 7714 } 7715 7716 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7717 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); 7718 7719 return 0; 7720 } 7721 7722 /** 7723 * @ingroup io 7724 * @brief Get the currently active profile. 7725 * @par Description 7726 * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the 7727 * command completes the provided mgmt callback function is 7728 * called. 7729 * 7730 * @param hw Hardware context. 7731 * @param cb Callback function to be called when the 7732 * command completes. 7733 * @param ul_arg An argument that is passed to the callback 7734 * function. 7735 * 7736 * @return 7737 * - OCS_HW_RTN_SUCCESS on success. 7738 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7739 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7740 * context. 7741 * - OCS_HW_RTN_ERROR on any other error. 7742 */ 7743 int32_t 7744 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg) 7745 { 7746 uint8_t *mbxdata; 7747 ocs_hw_get_active_profile_cb_arg_t *cb_arg; 7748 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7749 7750 /* Only supported on Skyhawk */ 7751 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 7752 return OCS_HW_RTN_ERROR; 7753 } 7754 7755 /* mbxdata holds the header of the command */ 7756 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7757 if (mbxdata == NULL) { 7758 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7759 return OCS_HW_RTN_NO_MEMORY; 7760 } 7761 7762 /* cb_arg holds the data that will be passed to the callback on completion */ 7763 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT); 7764 if (cb_arg == NULL) { 7765 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7766 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7767 return OCS_HW_RTN_NO_MEMORY; 7768 } 7769 7770 cb_arg->cb = cb; 7771 cb_arg->arg = ul_arg; 7772 7773 if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { 7774 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg); 7775 } 7776 7777 if (rc != OCS_HW_RTN_SUCCESS) { 7778 ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n"); 7779 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7780 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); 7781 } 7782 7783 return rc; 7784 } 7785 7786 typedef struct ocs_hw_get_nvparms_cb_arg_s { 7787 ocs_get_nvparms_cb_t cb; 7788 void *arg; 7789 } ocs_hw_get_nvparms_cb_arg_t; 7790 7791 /** 7792 * @brief Called for the completion of get_nvparms for a 7793 * user request. 7794 * 7795 * @param hw Hardware context. 7796 * @param status The status from the MQE. 7797 * @param mqe Pointer to mailbox command buffer. 7798 * @param arg Pointer to a callback argument. 7799 * 7800 * @return 0 on success, non-zero otherwise 7801 */ 7802 static int32_t 7803 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7804 { 7805 ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg; 7806 sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe; 7807 7808 if (cb_arg->cb) { 7809 cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa, 7810 mbox_rsp->preferred_d_id, cb_arg->arg); 7811 } 7812 7813 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7814 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); 7815 7816 return 0; 7817 } 7818 7819 /** 7820 * @ingroup io 7821 * @brief Read non-volatile parms. 7822 * @par Description 7823 * Issues a SLI-4 READ_NVPARMS mailbox. When the 7824 * command completes the provided mgmt callback function is 7825 * called. 7826 * 7827 * @param hw Hardware context. 7828 * @param cb Callback function to be called when the 7829 * command completes. 7830 * @param ul_arg An argument that is passed to the callback 7831 * function. 7832 * 7833 * @return 7834 * - OCS_HW_RTN_SUCCESS on success. 7835 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7836 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7837 * context. 7838 * - OCS_HW_RTN_ERROR on any other error. 7839 */ 7840 int32_t 7841 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg) 7842 { 7843 uint8_t *mbxdata; 7844 ocs_hw_get_nvparms_cb_arg_t *cb_arg; 7845 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7846 7847 /* mbxdata holds the header of the command */ 7848 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7849 if (mbxdata == NULL) { 7850 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7851 return OCS_HW_RTN_NO_MEMORY; 7852 } 7853 7854 /* cb_arg holds the data that will be passed to the callback on completion */ 7855 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT); 7856 if (cb_arg == NULL) { 7857 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7858 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7859 return OCS_HW_RTN_NO_MEMORY; 7860 } 7861 7862 cb_arg->cb = cb; 7863 cb_arg->arg = ul_arg; 7864 7865 if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) { 7866 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg); 7867 } 7868 7869 if (rc != OCS_HW_RTN_SUCCESS) { 7870 ocs_log_test(hw->os, "READ_NVPARMS failed\n"); 7871 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7872 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t)); 7873 } 7874 7875 return rc; 7876 } 7877 7878 typedef struct ocs_hw_set_nvparms_cb_arg_s { 7879 ocs_set_nvparms_cb_t cb; 7880 void *arg; 7881 } ocs_hw_set_nvparms_cb_arg_t; 7882 7883 /** 7884 * @brief Called for the completion of set_nvparms for a 7885 * user request. 7886 * 7887 * @param hw Hardware context. 7888 * @param status The status from the MQE. 7889 * @param mqe Pointer to mailbox command buffer. 7890 * @param arg Pointer to a callback argument. 7891 * 7892 * @return Returns 0 on success, or a non-zero value on failure. 7893 */ 7894 static int32_t 7895 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 7896 { 7897 ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg; 7898 7899 if (cb_arg->cb) { 7900 cb_arg->cb(status, cb_arg->arg); 7901 } 7902 7903 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 7904 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); 7905 7906 return 0; 7907 } 7908 7909 /** 7910 * @ingroup io 7911 * @brief Write non-volatile parms. 7912 * @par Description 7913 * Issues a SLI-4 WRITE_NVPARMS mailbox. When the 7914 * command completes the provided mgmt callback function is 7915 * called. 7916 * 7917 * @param hw Hardware context. 7918 * @param cb Callback function to be called when the 7919 * command completes. 7920 * @param wwpn Port's WWPN in big-endian order, or NULL to use default. 7921 * @param wwnn Port's WWNN in big-endian order, or NULL to use default. 7922 * @param hard_alpa A hard AL_PA address setting used during loop 7923 * initialization. If no hard AL_PA is required, set to 0. 7924 * @param preferred_d_id A preferred D_ID address setting 7925 * that may be overridden with the CONFIG_LINK mailbox command. 7926 * If there is no preference, set to 0. 7927 * @param ul_arg An argument that is passed to the callback 7928 * function. 7929 * 7930 * @return 7931 * - OCS_HW_RTN_SUCCESS on success. 7932 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 7933 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 7934 * context. 7935 * - OCS_HW_RTN_ERROR on any other error. 7936 */ 7937 int32_t 7938 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn, 7939 uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg) 7940 { 7941 uint8_t *mbxdata; 7942 ocs_hw_set_nvparms_cb_arg_t *cb_arg; 7943 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 7944 7945 /* mbxdata holds the header of the command */ 7946 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 7947 if (mbxdata == NULL) { 7948 ocs_log_err(hw->os, "failed to malloc mbox\n"); 7949 return OCS_HW_RTN_NO_MEMORY; 7950 } 7951 7952 /* cb_arg holds the data that will be passed to the callback on completion */ 7953 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT); 7954 if (cb_arg == NULL) { 7955 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 7956 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7957 return OCS_HW_RTN_NO_MEMORY; 7958 } 7959 7960 cb_arg->cb = cb; 7961 cb_arg->arg = ul_arg; 7962 7963 if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) { 7964 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg); 7965 } 7966 7967 if (rc != OCS_HW_RTN_SUCCESS) { 7968 ocs_log_test(hw->os, "SET_NVPARMS failed\n"); 7969 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 7970 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t)); 7971 } 7972 7973 return rc; 7974 } 7975 7976 /** 7977 * @brief Called to obtain the count for the specified type. 7978 * 7979 * @param hw Hardware context. 7980 * @param io_count_type IO count type (inuse, free, wait_free). 7981 * 7982 * @return Returns the number of IOs on the specified list type. 7983 */ 7984 uint32_t 7985 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type) 7986 { 7987 ocs_hw_io_t *io = NULL; 7988 uint32_t count = 0; 7989 7990 ocs_lock(&hw->io_lock); 7991 7992 switch (io_count_type) { 7993 case OCS_HW_IO_INUSE_COUNT : 7994 ocs_list_foreach(&hw->io_inuse, io) { 7995 count++; 7996 } 7997 break; 7998 case OCS_HW_IO_FREE_COUNT : 7999 ocs_list_foreach(&hw->io_free, io) { 8000 count++; 8001 } 8002 break; 8003 case OCS_HW_IO_WAIT_FREE_COUNT : 8004 ocs_list_foreach(&hw->io_wait_free, io) { 8005 count++; 8006 } 8007 break; 8008 case OCS_HW_IO_PORT_OWNED_COUNT: 8009 ocs_list_foreach(&hw->io_port_owned, io) { 8010 count++; 8011 } 8012 break; 8013 case OCS_HW_IO_N_TOTAL_IO_COUNT : 8014 count = hw->config.n_io; 8015 break; 8016 } 8017 8018 ocs_unlock(&hw->io_lock); 8019 8020 return count; 8021 } 8022 8023 /** 8024 * @brief Called to obtain the count of produced RQs. 8025 * 8026 * @param hw Hardware context. 8027 * 8028 * @return Returns the number of RQs produced. 8029 */ 8030 uint32_t 8031 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw) 8032 { 8033 uint32_t count = 0; 8034 uint32_t i; 8035 uint32_t j; 8036 8037 for (i = 0; i < hw->hw_rq_count; i++) { 8038 hw_rq_t *rq = hw->hw_rq[i]; 8039 if (rq->rq_tracker != NULL) { 8040 for (j = 0; j < rq->entry_count; j++) { 8041 if (rq->rq_tracker[j] != NULL) { 8042 count++; 8043 } 8044 } 8045 } 8046 } 8047 8048 return count; 8049 } 8050 8051 typedef struct ocs_hw_set_active_profile_cb_arg_s { 8052 ocs_set_active_profile_cb_t cb; 8053 void *arg; 8054 } ocs_hw_set_active_profile_cb_arg_t; 8055 8056 /** 8057 * @brief Called for the completion of set_active_profile for a 8058 * user request. 8059 * 8060 * @param hw Hardware context. 8061 * @param status The status from the MQE 8062 * @param mqe Pointer to mailbox command buffer. 8063 * @param arg Pointer to a callback argument. 8064 * 8065 * @return Returns 0 on success, or a non-zero value on failure. 8066 */ 8067 static int32_t 8068 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 8069 { 8070 ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg; 8071 8072 if (cb_arg->cb) { 8073 cb_arg->cb(status, cb_arg->arg); 8074 } 8075 8076 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 8077 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t)); 8078 8079 return 0; 8080 } 8081 8082 /** 8083 * @ingroup io 8084 * @brief Set the currently active profile. 8085 * @par Description 8086 * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the 8087 * command completes the provided mgmt callback function is 8088 * called. 8089 * 8090 * @param hw Hardware context. 8091 * @param profile_id Profile ID to activate. 8092 * @param cb Callback function to be called when the command completes. 8093 * @param ul_arg An argument that is passed to the callback function. 8094 * 8095 * @return 8096 * - OCS_HW_RTN_SUCCESS on success. 8097 * - OCS_HW_RTN_NO_MEMORY if a malloc fails. 8098 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command 8099 * context. 8100 * - OCS_HW_RTN_ERROR on any other error. 8101 */ 8102 int32_t 8103 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg) 8104 { 8105 uint8_t *mbxdata; 8106 ocs_hw_set_active_profile_cb_arg_t *cb_arg; 8107 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 8108 8109 /* Only supported on Skyhawk */ 8110 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) { 8111 return OCS_HW_RTN_ERROR; 8112 } 8113 8114 /* mbxdata holds the header of the command */ 8115 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 8116 if (mbxdata == NULL) { 8117 ocs_log_err(hw->os, "failed to malloc mbox\n"); 8118 return OCS_HW_RTN_NO_MEMORY; 8119 } 8120 8121 /* cb_arg holds the data that will be passed to the callback on completion */ 8122 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT); 8123 if (cb_arg == NULL) { 8124 ocs_log_err(hw->os, "failed to malloc cb_arg\n"); 8125 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 8126 return OCS_HW_RTN_NO_MEMORY; 8127 } 8128 8129 cb_arg->cb = cb; 8130 cb_arg->arg = ul_arg; 8131 8132 if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) { 8133 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg); 8134 } 8135 8136 if (rc != OCS_HW_RTN_SUCCESS) { 8137 ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n"); 8138 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE); 8139 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t)); 8140 } 8141 8142 return rc; 8143 } 8144 8145 /* 8146 * Private functions 8147 */ 8148 8149 /** 8150 * @brief Update the queue hash with the ID and index. 8151 * 8152 * @param hash Pointer to hash table. 8153 * @param id ID that was created. 8154 * @param index The index into the hash object. 8155 */ 8156 static void 8157 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index) 8158 { 8159 uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1); 8160 8161 /* 8162 * Since the hash is always bigger than the number of queues, then we 8163 * never have to worry about an infinite loop. 8164 */ 8165 while(hash[hash_index].in_use) { 8166 hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1); 8167 } 8168 8169 /* not used, claim the entry */ 8170 hash[hash_index].id = id; 8171 hash[hash_index].in_use = 1; 8172 hash[hash_index].index = index; 8173 } 8174 8175 /** 8176 * @brief Find index given queue ID. 8177 * 8178 * @param hash Pointer to hash table. 8179 * @param id ID to find. 8180 * 8181 * @return Returns the index into the HW cq array or -1 if not found. 8182 */ 8183 int32_t 8184 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id) 8185 { 8186 int32_t rc = -1; 8187 int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1); 8188 8189 /* 8190 * Since the hash is always bigger than the maximum number of Qs, then we 8191 * never have to worry about an infinite loop. We will always find an 8192 * unused entry. 8193 */ 8194 do { 8195 if (hash[index].in_use && 8196 hash[index].id == id) { 8197 rc = hash[index].index; 8198 } else { 8199 index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1); 8200 } 8201 } while(rc == -1 && hash[index].in_use); 8202 8203 return rc; 8204 } 8205 8206 static int32_t 8207 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain) 8208 { 8209 int32_t rc = OCS_HW_RTN_ERROR; 8210 uint16_t fcfi = UINT16_MAX; 8211 8212 if ((hw == NULL) || (domain == NULL)) { 8213 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", 8214 hw, domain); 8215 return OCS_HW_RTN_ERROR; 8216 } 8217 8218 fcfi = domain->fcf_indicator; 8219 8220 if (fcfi < SLI4_MAX_FCFI) { 8221 uint16_t fcf_index = UINT16_MAX; 8222 8223 ocs_log_debug(hw->os, "adding domain %p @ %#x\n", 8224 domain, fcfi); 8225 hw->domains[fcfi] = domain; 8226 8227 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 8228 if (hw->workaround.override_fcfi) { 8229 if (hw->first_domain_idx < 0) { 8230 hw->first_domain_idx = fcfi; 8231 } 8232 } 8233 8234 fcf_index = domain->fcf; 8235 8236 if (fcf_index < SLI4_MAX_FCF_INDEX) { 8237 ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n", 8238 fcf_index, fcfi); 8239 hw->fcf_index_fcfi[fcf_index] = fcfi; 8240 rc = OCS_HW_RTN_SUCCESS; 8241 } else { 8242 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", 8243 fcf_index, SLI4_MAX_FCF_INDEX); 8244 hw->domains[fcfi] = NULL; 8245 } 8246 } else { 8247 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", 8248 fcfi, SLI4_MAX_FCFI); 8249 } 8250 8251 return rc; 8252 } 8253 8254 static int32_t 8255 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain) 8256 { 8257 int32_t rc = OCS_HW_RTN_ERROR; 8258 uint16_t fcfi = UINT16_MAX; 8259 8260 if ((hw == NULL) || (domain == NULL)) { 8261 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n", 8262 hw, domain); 8263 return OCS_HW_RTN_ERROR; 8264 } 8265 8266 fcfi = domain->fcf_indicator; 8267 8268 if (fcfi < SLI4_MAX_FCFI) { 8269 uint16_t fcf_index = UINT16_MAX; 8270 8271 ocs_log_debug(hw->os, "deleting domain %p @ %#x\n", 8272 domain, fcfi); 8273 8274 if (domain != hw->domains[fcfi]) { 8275 ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n", 8276 domain, hw->domains[fcfi]); 8277 return OCS_HW_RTN_ERROR; 8278 } 8279 8280 hw->domains[fcfi] = NULL; 8281 8282 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */ 8283 if (hw->workaround.override_fcfi) { 8284 if (hw->first_domain_idx == fcfi) { 8285 hw->first_domain_idx = -1; 8286 } 8287 } 8288 8289 fcf_index = domain->fcf; 8290 8291 if (fcf_index < SLI4_MAX_FCF_INDEX) { 8292 if (hw->fcf_index_fcfi[fcf_index] == fcfi) { 8293 hw->fcf_index_fcfi[fcf_index] = 0; 8294 rc = OCS_HW_RTN_SUCCESS; 8295 } else { 8296 ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n", 8297 hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index); 8298 } 8299 } else { 8300 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", 8301 fcf_index, SLI4_MAX_FCF_INDEX); 8302 } 8303 } else { 8304 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", 8305 fcfi, SLI4_MAX_FCFI); 8306 } 8307 8308 return rc; 8309 } 8310 8311 ocs_domain_t * 8312 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi) 8313 { 8314 8315 if (hw == NULL) { 8316 ocs_log_err(NULL, "bad parameter hw=%p\n", hw); 8317 return NULL; 8318 } 8319 8320 if (fcfi < SLI4_MAX_FCFI) { 8321 return hw->domains[fcfi]; 8322 } else { 8323 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n", 8324 fcfi, SLI4_MAX_FCFI); 8325 return NULL; 8326 } 8327 } 8328 8329 static ocs_domain_t * 8330 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index) 8331 { 8332 8333 if (hw == NULL) { 8334 ocs_log_err(NULL, "bad parameter hw=%p\n", hw); 8335 return NULL; 8336 } 8337 8338 if (fcf_index < SLI4_MAX_FCF_INDEX) { 8339 return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]); 8340 } else { 8341 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n", 8342 fcf_index, SLI4_MAX_FCF_INDEX); 8343 return NULL; 8344 } 8345 } 8346 8347 /** 8348 * @brief Quaratine an IO by taking a reference count and adding it to the 8349 * quarantine list. When the IO is popped from the list then the 8350 * count is released and the IO MAY be freed depending on whether 8351 * it is still referenced by the IO. 8352 * 8353 * @n @b Note: BZ 160124 - If this is a target write or an initiator read using 8354 * DIF, then we must add the XRI to a quarantine list until we receive 8355 * 4 more completions of this same type. 8356 * 8357 * @param hw Hardware context. 8358 * @param wq Pointer to the WQ associated with the IO object to quarantine. 8359 * @param io Pointer to the io object to quarantine. 8360 */ 8361 static void 8362 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io) 8363 { 8364 ocs_quarantine_info_t *q_info = &wq->quarantine_info; 8365 uint32_t index; 8366 ocs_hw_io_t *free_io = NULL; 8367 8368 /* return if the QX bit was clear */ 8369 if (!io->quarantine) { 8370 return; 8371 } 8372 8373 /* increment the IO refcount to prevent it from being freed before the quarantine is over */ 8374 if (ocs_ref_get_unless_zero(&io->ref) == 0) { 8375 /* command no longer active */ 8376 ocs_log_debug(hw ? hw->os : NULL, 8377 "io not active xri=0x%x tag=0x%x\n", 8378 io->indicator, io->reqtag); 8379 return; 8380 } 8381 8382 sli_queue_lock(wq->queue); 8383 index = q_info->quarantine_index; 8384 free_io = q_info->quarantine_ios[index]; 8385 q_info->quarantine_ios[index] = io; 8386 q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH; 8387 sli_queue_unlock(wq->queue); 8388 8389 if (free_io != NULL) { 8390 ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */ 8391 } 8392 } 8393 8394 /** 8395 * @brief Process entries on the given completion queue. 8396 * 8397 * @param hw Hardware context. 8398 * @param cq Pointer to the HW completion queue object. 8399 * 8400 * @return None. 8401 */ 8402 void 8403 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq) 8404 { 8405 uint8_t cqe[sizeof(sli4_mcqe_t)]; 8406 uint16_t rid = UINT16_MAX; 8407 sli4_qentry_e ctype; /* completion type */ 8408 int32_t status; 8409 uint32_t n_processed = 0; 8410 time_t tstart; 8411 time_t telapsed; 8412 8413 tstart = ocs_msectime(); 8414 8415 while (!sli_queue_read(&hw->sli, cq->queue, cqe)) { 8416 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); 8417 /* 8418 * The sign of status is significant. If status is: 8419 * == 0 : call completed correctly and the CQE indicated success 8420 * > 0 : call completed correctly and the CQE indicated an error 8421 * < 0 : call failed and no information is available about the CQE 8422 */ 8423 if (status < 0) { 8424 if (status == -2) { 8425 /* Notification that an entry was consumed, but not completed */ 8426 continue; 8427 } 8428 8429 break; 8430 } 8431 8432 switch (ctype) { 8433 case SLI_QENTRY_ASYNC: 8434 CPUTRACE("async"); 8435 sli_cqe_async(&hw->sli, cqe); 8436 break; 8437 case SLI_QENTRY_MQ: 8438 /* 8439 * Process MQ entry. Note there is no way to determine 8440 * the MQ_ID from the completion entry. 8441 */ 8442 CPUTRACE("mq"); 8443 ocs_hw_mq_process(hw, status, hw->mq); 8444 break; 8445 case SLI_QENTRY_OPT_WRITE_CMD: 8446 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe); 8447 break; 8448 case SLI_QENTRY_OPT_WRITE_DATA: 8449 ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe); 8450 break; 8451 case SLI_QENTRY_WQ: 8452 CPUTRACE("wq"); 8453 ocs_hw_wq_process(hw, cq, cqe, status, rid); 8454 break; 8455 case SLI_QENTRY_WQ_RELEASE: { 8456 uint32_t wq_id = rid; 8457 int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id); 8458 8459 if (unlikely(index < 0)) { 8460 ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n", 8461 index, rid); 8462 break; 8463 } 8464 8465 hw_wq_t *wq = hw->hw_wq[index]; 8466 8467 /* Submit any HW IOs that are on the WQ pending list */ 8468 hw_wq_submit_pending(wq, wq->wqec_set_count); 8469 8470 break; 8471 } 8472 8473 case SLI_QENTRY_RQ: 8474 CPUTRACE("rq"); 8475 ocs_hw_rqpair_process_rq(hw, cq, cqe); 8476 break; 8477 case SLI_QENTRY_XABT: { 8478 CPUTRACE("xabt"); 8479 ocs_hw_xabt_process(hw, cq, cqe, rid); 8480 break; 8481 } 8482 default: 8483 ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid); 8484 break; 8485 } 8486 8487 n_processed++; 8488 if (n_processed == cq->queue->proc_limit) { 8489 break; 8490 } 8491 8492 if (cq->queue->n_posted >= (cq->queue->posted_limit)) { 8493 sli_queue_arm(&hw->sli, cq->queue, FALSE); 8494 } 8495 } 8496 8497 sli_queue_arm(&hw->sli, cq->queue, TRUE); 8498 8499 if (n_processed > cq->queue->max_num_processed) { 8500 cq->queue->max_num_processed = n_processed; 8501 } 8502 telapsed = ocs_msectime() - tstart; 8503 if (telapsed > cq->queue->max_process_time) { 8504 cq->queue->max_process_time = telapsed; 8505 } 8506 } 8507 8508 /** 8509 * @brief Process WQ completion queue entries. 8510 * 8511 * @param hw Hardware context. 8512 * @param cq Pointer to the HW completion queue object. 8513 * @param cqe Pointer to WQ completion queue. 8514 * @param status Completion status. 8515 * @param rid Resource ID (IO tag). 8516 * 8517 * @return none 8518 */ 8519 void 8520 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid) 8521 { 8522 hw_wq_callback_t *wqcb; 8523 8524 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id, 8525 ((cq->queue->index - 1) & (cq->queue->length - 1))); 8526 8527 if(rid == OCS_HW_REQUE_XRI_REGTAG) { 8528 if(status) { 8529 ocs_log_err(hw->os, "reque xri failed, status = %d \n", status); 8530 } 8531 return; 8532 } 8533 8534 wqcb = ocs_hw_reqtag_get_instance(hw, rid); 8535 if (wqcb == NULL) { 8536 ocs_log_err(hw->os, "invalid request tag: x%x\n", rid); 8537 return; 8538 } 8539 8540 if (wqcb->callback == NULL) { 8541 ocs_log_err(hw->os, "wqcb callback is NULL\n"); 8542 return; 8543 } 8544 8545 (*wqcb->callback)(wqcb->arg, cqe, status); 8546 } 8547 8548 /** 8549 * @brief Process WQ completions for IO requests 8550 * 8551 * @param arg Generic callback argument 8552 * @param cqe Pointer to completion queue entry 8553 * @param status Completion status 8554 * 8555 * @par Description 8556 * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized 8557 * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool. 8558 * 8559 * @return None. 8560 */ 8561 static void 8562 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status) 8563 { 8564 ocs_hw_io_t *io = arg; 8565 ocs_hw_t *hw = io->hw; 8566 sli4_fc_wcqe_t *wcqe = (void *)cqe; 8567 uint32_t len = 0; 8568 uint32_t ext = 0; 8569 uint8_t out_of_order_axr_cmd = 0; 8570 uint8_t out_of_order_axr_data = 0; 8571 uint8_t lock_taken = 0; 8572 #if defined(OCS_DISC_SPIN_DELAY) 8573 uint32_t delay = 0; 8574 char prop_buf[32]; 8575 #endif 8576 8577 /* 8578 * For the primary IO, this will also be used for the 8579 * response. So it is important to only set/clear this 8580 * flag on the first data phase of the IO because 8581 * subsequent phases will be done on the secondary XRI. 8582 */ 8583 if (io->quarantine && io->quarantine_first_phase) { 8584 io->quarantine = (wcqe->qx == 1); 8585 ocs_hw_io_quarantine(hw, io->wq, io); 8586 } 8587 io->quarantine_first_phase = FALSE; 8588 8589 /* BZ 161832 - free secondary HW IO */ 8590 if (io->sec_hio != NULL && 8591 io->sec_hio->quarantine) { 8592 /* 8593 * If the quarantine flag is set on the 8594 * IO, then set it on the secondary IO 8595 * based on the quarantine XRI (QX) bit 8596 * sent by the FW. 8597 */ 8598 io->sec_hio->quarantine = (wcqe->qx == 1); 8599 /* use the primary io->wq because it is not set on the secondary IO. */ 8600 ocs_hw_io_quarantine(hw, io->wq, io->sec_hio); 8601 } 8602 8603 ocs_hw_remove_io_timed_wqe(hw, io); 8604 8605 /* clear xbusy flag if WCQE[XB] is clear */ 8606 if (io->xbusy && wcqe->xb == 0) { 8607 io->xbusy = FALSE; 8608 } 8609 8610 /* get extended CQE status */ 8611 switch (io->type) { 8612 case OCS_HW_BLS_ACC: 8613 case OCS_HW_BLS_ACC_SID: 8614 break; 8615 case OCS_HW_ELS_REQ: 8616 sli_fc_els_did(&hw->sli, cqe, &ext); 8617 len = sli_fc_response_length(&hw->sli, cqe); 8618 break; 8619 case OCS_HW_ELS_RSP: 8620 case OCS_HW_ELS_RSP_SID: 8621 case OCS_HW_FC_CT_RSP: 8622 break; 8623 case OCS_HW_FC_CT: 8624 len = sli_fc_response_length(&hw->sli, cqe); 8625 break; 8626 case OCS_HW_IO_TARGET_WRITE: 8627 len = sli_fc_io_length(&hw->sli, cqe); 8628 #if defined(OCS_DISC_SPIN_DELAY) 8629 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { 8630 delay = ocs_strtoul(prop_buf, 0, 0); 8631 ocs_udelay(delay); 8632 } 8633 #endif 8634 break; 8635 case OCS_HW_IO_TARGET_READ: 8636 len = sli_fc_io_length(&hw->sli, cqe); 8637 /* 8638 * if_type == 2 seems to return 0 "total length placed" on 8639 * FCP_TSEND64_WQE completions. If this appears to happen, 8640 * use the CTIO data transfer length instead. 8641 */ 8642 if (hw->workaround.retain_tsend_io_length && !len && !status) { 8643 len = io->length; 8644 } 8645 8646 break; 8647 case OCS_HW_IO_TARGET_RSP: 8648 if(io->is_port_owned) { 8649 ocs_lock(&io->axr_lock); 8650 lock_taken = 1; 8651 if(io->axr_buf->call_axr_cmd) { 8652 out_of_order_axr_cmd = 1; 8653 } 8654 if(io->axr_buf->call_axr_data) { 8655 out_of_order_axr_data = 1; 8656 } 8657 } 8658 break; 8659 case OCS_HW_IO_INITIATOR_READ: 8660 len = sli_fc_io_length(&hw->sli, cqe); 8661 break; 8662 case OCS_HW_IO_INITIATOR_WRITE: 8663 len = sli_fc_io_length(&hw->sli, cqe); 8664 break; 8665 case OCS_HW_IO_INITIATOR_NODATA: 8666 break; 8667 case OCS_HW_IO_DNRX_REQUEUE: 8668 /* release the count for re-posting the buffer */ 8669 //ocs_hw_io_free(hw, io); 8670 break; 8671 default: 8672 ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n", 8673 io->type, io->indicator); 8674 break; 8675 } 8676 if (status) { 8677 ext = sli_fc_ext_status(&hw->sli, cqe); 8678 /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically 8679 * abort exchange if an error occurred and exchange is still busy. 8680 */ 8681 if (hw->config.i_only_aab && 8682 (ocs_hw_iotype_is_originator(io->type)) && 8683 (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) { 8684 ocs_hw_rtn_e rc; 8685 8686 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", 8687 io->indicator, io->reqtag); 8688 /* 8689 * Because the initiator will not issue another IO phase, then it is OK to issue the 8690 * callback on the abort completion, but for consistency with the target, wait for the 8691 * XRI_ABORTED CQE to issue the IO callback. 8692 */ 8693 rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL); 8694 8695 if (rc == OCS_HW_RTN_SUCCESS) { 8696 /* latch status to return after abort is complete */ 8697 io->status_saved = 1; 8698 io->saved_status = status; 8699 io->saved_ext = ext; 8700 io->saved_len = len; 8701 goto exit_ocs_hw_wq_process_io; 8702 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { 8703 /* 8704 * Already being aborted by someone else (ABTS 8705 * perhaps). Just fall through and return original 8706 * error. 8707 */ 8708 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", 8709 io->indicator, io->reqtag); 8710 8711 } else { 8712 /* Failed to abort for some other reason, log error */ 8713 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", 8714 io->indicator, io->reqtag, rc); 8715 } 8716 } 8717 8718 /* 8719 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW 8720 */ 8721 if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) { 8722 ocs_hw_rtn_e rc; 8723 8724 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag); 8725 8726 /* 8727 * Because targets may send a response when the IO completes using the same XRI, we must 8728 * wait for the XRI_ABORTED CQE to issue the IO callback 8729 */ 8730 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); 8731 if (rc == OCS_HW_RTN_SUCCESS) { 8732 /* latch status to return after abort is complete */ 8733 io->status_saved = 1; 8734 io->saved_status = status; 8735 io->saved_ext = ext; 8736 io->saved_len = len; 8737 goto exit_ocs_hw_wq_process_io; 8738 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) { 8739 /* 8740 * Already being aborted by someone else (ABTS 8741 * perhaps). Just fall through and return original 8742 * error. 8743 */ 8744 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n", 8745 io->indicator, io->reqtag); 8746 8747 } else { 8748 /* Failed to abort for some other reason, log error */ 8749 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n", 8750 io->indicator, io->reqtag, rc); 8751 } 8752 } 8753 } 8754 /* BZ 161832 - free secondary HW IO */ 8755 if (io->sec_hio != NULL) { 8756 ocs_hw_io_free(hw, io->sec_hio); 8757 io->sec_hio = NULL; 8758 } 8759 8760 if (io->done != NULL) { 8761 ocs_hw_done_t done = io->done; 8762 void *arg = io->arg; 8763 8764 io->done = NULL; 8765 8766 if (io->status_saved) { 8767 /* use latched status if exists */ 8768 status = io->saved_status; 8769 len = io->saved_len; 8770 ext = io->saved_ext; 8771 io->status_saved = 0; 8772 } 8773 8774 /* Restore default SGL */ 8775 ocs_hw_io_restore_sgl(hw, io); 8776 done(io, io->rnode, len, status, ext, arg); 8777 } 8778 8779 if(out_of_order_axr_cmd) { 8780 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ 8781 if (hw->config.bounce) { 8782 fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt; 8783 uint32_t s_id = fc_be24toh(hdr->s_id); 8784 uint32_t d_id = fc_be24toh(hdr->d_id); 8785 uint32_t ox_id = ocs_be16toh(hdr->ox_id); 8786 if (hw->callback.bounce != NULL) { 8787 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id); 8788 } 8789 }else { 8790 hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq); 8791 } 8792 8793 if(out_of_order_axr_data) { 8794 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ 8795 if (hw->config.bounce) { 8796 fc_header_t *hdr = io->axr_buf->seq.header->dma.virt; 8797 uint32_t s_id = fc_be24toh(hdr->s_id); 8798 uint32_t d_id = fc_be24toh(hdr->d_id); 8799 uint32_t ox_id = ocs_be16toh(hdr->ox_id); 8800 if (hw->callback.bounce != NULL) { 8801 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id); 8802 } 8803 }else { 8804 hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq); 8805 } 8806 } 8807 } 8808 8809 exit_ocs_hw_wq_process_io: 8810 if(lock_taken) { 8811 ocs_unlock(&io->axr_lock); 8812 } 8813 } 8814 8815 /** 8816 * @brief Process WQ completions for abort requests. 8817 * 8818 * @param arg Generic callback argument. 8819 * @param cqe Pointer to completion queue entry. 8820 * @param status Completion status. 8821 * 8822 * @return None. 8823 */ 8824 static void 8825 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status) 8826 { 8827 ocs_hw_io_t *io = arg; 8828 ocs_hw_t *hw = io->hw; 8829 uint32_t ext = 0; 8830 uint32_t len = 0; 8831 hw_wq_callback_t *wqcb; 8832 8833 /* 8834 * For IOs that were aborted internally, we may need to issue the callback here depending 8835 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then 8836 * issue the callback now. 8837 */ 8838 ext = sli_fc_ext_status(&hw->sli, cqe); 8839 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT && 8840 ext == SLI4_FC_LOCAL_REJECT_NO_XRI && 8841 io->done != NULL) { 8842 ocs_hw_done_t done = io->done; 8843 void *arg = io->arg; 8844 8845 io->done = NULL; 8846 8847 /* 8848 * Use latched status as this is always saved for an internal abort 8849 * 8850 * Note: We wont have both a done and abort_done function, so don't worry about 8851 * clobbering the len, status and ext fields. 8852 */ 8853 status = io->saved_status; 8854 len = io->saved_len; 8855 ext = io->saved_ext; 8856 io->status_saved = 0; 8857 done(io, io->rnode, len, status, ext, arg); 8858 } 8859 8860 if (io->abort_done != NULL) { 8861 ocs_hw_done_t done = io->abort_done; 8862 void *arg = io->abort_arg; 8863 8864 io->abort_done = NULL; 8865 8866 done(io, io->rnode, len, status, ext, arg); 8867 } 8868 ocs_lock(&hw->io_abort_lock); 8869 /* clear abort bit to indicate abort is complete */ 8870 io->abort_in_progress = 0; 8871 ocs_unlock(&hw->io_abort_lock); 8872 8873 /* Free the WQ callback */ 8874 ocs_hw_assert(io->abort_reqtag != UINT32_MAX); 8875 wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag); 8876 ocs_hw_reqtag_free(hw, wqcb); 8877 8878 /* 8879 * Call ocs_hw_io_free() because this releases the WQ reservation as 8880 * well as doing the refcount put. Don't duplicate the code here. 8881 */ 8882 (void)ocs_hw_io_free(hw, io); 8883 } 8884 8885 /** 8886 * @brief Process XABT completions 8887 * 8888 * @param hw Hardware context. 8889 * @param cq Pointer to the HW completion queue object. 8890 * @param cqe Pointer to WQ completion queue. 8891 * @param rid Resource ID (IO tag). 8892 * 8893 * 8894 * @return None. 8895 */ 8896 void 8897 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid) 8898 { 8899 /* search IOs wait free list */ 8900 ocs_hw_io_t *io = NULL; 8901 8902 io = ocs_hw_io_lookup(hw, rid); 8903 8904 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id, 8905 ((cq->queue->index - 1) & (cq->queue->length - 1))); 8906 if (io == NULL) { 8907 /* IO lookup failure should never happen */ 8908 ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid); 8909 return; 8910 } 8911 8912 if (!io->xbusy) { 8913 ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); 8914 } else { 8915 /* mark IO as no longer busy */ 8916 io->xbusy = FALSE; 8917 } 8918 8919 if (io->is_port_owned) { 8920 ocs_lock(&hw->io_lock); 8921 /* Take reference so that below callback will not free io before reque */ 8922 ocs_ref_get(&io->ref); 8923 ocs_unlock(&hw->io_lock); 8924 } 8925 8926 /* For IOs that were aborted internally, we need to issue any pending callback here. */ 8927 if (io->done != NULL) { 8928 ocs_hw_done_t done = io->done; 8929 void *arg = io->arg; 8930 8931 /* Use latched status as this is always saved for an internal abort */ 8932 int32_t status = io->saved_status; 8933 uint32_t len = io->saved_len; 8934 uint32_t ext = io->saved_ext; 8935 8936 io->done = NULL; 8937 io->status_saved = 0; 8938 8939 done(io, io->rnode, len, status, ext, arg); 8940 } 8941 8942 /* Check to see if this is a port owned XRI */ 8943 if (io->is_port_owned) { 8944 ocs_lock(&hw->io_lock); 8945 ocs_hw_reque_xri(hw, io); 8946 ocs_unlock(&hw->io_lock); 8947 /* Not hanlding reque xri completion, free io */ 8948 ocs_hw_io_free(hw, io); 8949 return; 8950 } 8951 8952 ocs_lock(&hw->io_lock); 8953 if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) { 8954 /* if on wait_free list, caller has already freed IO; 8955 * remove from wait_free list and add to free list. 8956 * if on in-use list, already marked as no longer busy; 8957 * just leave there and wait for caller to free. 8958 */ 8959 if (io->state == OCS_HW_IO_STATE_WAIT_FREE) { 8960 io->state = OCS_HW_IO_STATE_FREE; 8961 ocs_list_remove(&hw->io_wait_free, io); 8962 ocs_hw_io_free_move_correct_list(hw, io); 8963 } 8964 } 8965 ocs_unlock(&hw->io_lock); 8966 } 8967 8968 /** 8969 * @brief Adjust the number of WQs and CQs within the HW. 8970 * 8971 * @par Description 8972 * Calculates the number of WQs and associated CQs needed in the HW based on 8973 * the number of IOs. Calculates the starting CQ index for each WQ, RQ and 8974 * MQ. 8975 * 8976 * @param hw Hardware context allocated by the caller. 8977 */ 8978 static void 8979 ocs_hw_adjust_wqs(ocs_hw_t *hw) 8980 { 8981 uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ); 8982 uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ]; 8983 uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ]; 8984 8985 /* 8986 * possibly adjust the the size of the WQs so that the CQ is twice as 8987 * big as the WQ to allow for 2 completions per IO. This allows us to 8988 * handle multi-phase as well as aborts. 8989 */ 8990 if (max_cq_entries < max_wq_entries * 2) { 8991 max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2; 8992 } 8993 8994 /* 8995 * Calculate the number of WQs to use base on the number of IOs. 8996 * 8997 * Note: We need to reserve room for aborts which must be sent down 8998 * the same WQ as the IO. So we allocate enough WQ space to 8999 * handle 2 times the number of IOs. Half of the space will be 9000 * used for normal IOs and the other hwf is reserved for aborts. 9001 */ 9002 hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries; 9003 9004 /* 9005 * For performance reasons, it is best to use use a minimum of 4 WQs 9006 * for BE3 and Skyhawk. 9007 */ 9008 if (hw->config.n_wq < 4 && 9009 SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { 9010 hw->config.n_wq = 4; 9011 } 9012 9013 /* 9014 * For dual-chute support, we need to have at least one WQ per chute. 9015 */ 9016 if (hw->config.n_wq < 2 && 9017 ocs_hw_get_num_chutes(hw) > 1) { 9018 hw->config.n_wq = 2; 9019 } 9020 9021 /* make sure we haven't exceeded the max supported in the HW */ 9022 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) { 9023 hw->config.n_wq = OCS_HW_MAX_NUM_WQ; 9024 } 9025 9026 /* make sure we haven't exceeded the chip maximum */ 9027 if (hw->config.n_wq > max_wq_num) { 9028 hw->config.n_wq = max_wq_num; 9029 } 9030 9031 /* 9032 * Using Queue Topology string, we divide by number of chutes 9033 */ 9034 hw->config.n_wq /= ocs_hw_get_num_chutes(hw); 9035 } 9036 9037 static int32_t 9038 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size) 9039 { 9040 ocs_command_ctx_t *ctx = NULL; 9041 9042 ocs_lock(&hw->cmd_lock); 9043 if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) { 9044 ocs_log_err(hw->os, "XXX no command context?!?\n"); 9045 ocs_unlock(&hw->cmd_lock); 9046 return -1; 9047 } 9048 9049 hw->cmd_head_count--; 9050 9051 /* Post any pending requests */ 9052 ocs_hw_cmd_submit_pending(hw); 9053 9054 ocs_unlock(&hw->cmd_lock); 9055 9056 if (ctx->cb) { 9057 if (ctx->buf) { 9058 ocs_memcpy(ctx->buf, mqe, size); 9059 } 9060 ctx->cb(hw, status, ctx->buf, ctx->arg); 9061 } 9062 9063 ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t)); 9064 ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t)); 9065 9066 return 0; 9067 } 9068 9069 /** 9070 * @brief Process entries on the given mailbox queue. 9071 * 9072 * @param hw Hardware context. 9073 * @param status CQE status. 9074 * @param mq Pointer to the mailbox queue object. 9075 * 9076 * @return Returns 0 on success, or a non-zero value on failure. 9077 */ 9078 static int32_t 9079 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq) 9080 { 9081 uint8_t mqe[SLI4_BMBX_SIZE]; 9082 9083 if (!sli_queue_read(&hw->sli, mq, mqe)) { 9084 ocs_hw_command_process(hw, status, mqe, mq->size); 9085 } 9086 9087 return 0; 9088 } 9089 9090 /** 9091 * @brief Read a FCF table entry. 9092 * 9093 * @param hw Hardware context. 9094 * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first 9095 * read and the next_index field from the FCOE_READ_FCF_TABLE command 9096 * for subsequent reads. 9097 * 9098 * @return Returns 0 on success, or a non-zero value on failure. 9099 */ 9100 static ocs_hw_rtn_e 9101 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index) 9102 { 9103 uint8_t *buf = NULL; 9104 int32_t rc = OCS_HW_RTN_ERROR; 9105 9106 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 9107 if (!buf) { 9108 ocs_log_err(hw->os, "no buffer for command\n"); 9109 return OCS_HW_RTN_NO_MEMORY; 9110 } 9111 9112 if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem, 9113 index)) { 9114 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem); 9115 } 9116 9117 if (rc != OCS_HW_RTN_SUCCESS) { 9118 ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n"); 9119 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 9120 } 9121 9122 return rc; 9123 } 9124 9125 /** 9126 * @brief Callback function for the FCOE_READ_FCF_TABLE command. 9127 * 9128 * @par Description 9129 * Note that the caller has allocated: 9130 * - DMA memory to hold the table contents 9131 * - DMA memory structure 9132 * - Command/results buffer 9133 * . 9134 * Each of these must be freed here. 9135 * 9136 * @param hw Hardware context. 9137 * @param status Hardware status. 9138 * @param mqe Pointer to the mailbox command/results buffer. 9139 * @param arg Pointer to the DMA memory structure. 9140 * 9141 * @return Returns 0 on success, or a non-zero value on failure. 9142 */ 9143 static int32_t 9144 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9145 { 9146 ocs_dma_t *dma = arg; 9147 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9148 9149 if (status || hdr->status) { 9150 ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n", 9151 status, hdr->status); 9152 } else if (dma->virt) { 9153 sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt; 9154 9155 /* if FC or FCOE and FCF entry valid, process it */ 9156 if (read_fcf->fcf_entry.fc || 9157 (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) { 9158 if (hw->callback.domain != NULL) { 9159 ocs_domain_record_t drec = {0}; 9160 9161 if (read_fcf->fcf_entry.fc) { 9162 /* 9163 * This is a pseudo FCF entry. Create a domain 9164 * record based on the read topology information 9165 */ 9166 drec.speed = hw->link.speed; 9167 drec.fc_id = hw->link.fc_id; 9168 drec.is_fc = TRUE; 9169 if (SLI_LINK_TOPO_LOOP == hw->link.topology) { 9170 drec.is_loop = TRUE; 9171 ocs_memcpy(drec.map.loop, hw->link.loop_map, 9172 sizeof(drec.map.loop)); 9173 } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) { 9174 drec.is_nport = TRUE; 9175 } 9176 } else { 9177 drec.index = read_fcf->fcf_entry.fcf_index; 9178 drec.priority = read_fcf->fcf_entry.fip_priority; 9179 9180 /* copy address, wwn and vlan_bitmap */ 9181 ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address, 9182 sizeof(drec.address)); 9183 ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id, 9184 sizeof(drec.wwn)); 9185 ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap, 9186 sizeof(drec.map.vlan)); 9187 9188 drec.is_ethernet = TRUE; 9189 drec.is_nport = TRUE; 9190 } 9191 9192 hw->callback.domain(hw->args.domain, 9193 OCS_HW_DOMAIN_FOUND, 9194 &drec); 9195 } 9196 } else { 9197 /* if FCOE and FCF is not valid, ignore it */ 9198 ocs_log_test(hw->os, "ignore invalid FCF entry\n"); 9199 } 9200 9201 if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) { 9202 ocs_hw_read_fcf(hw, read_fcf->next_index); 9203 } 9204 } 9205 9206 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9207 //ocs_dma_free(hw->os, dma); 9208 //ocs_free(hw->os, dma, sizeof(ocs_dma_t)); 9209 9210 return 0; 9211 } 9212 9213 /** 9214 * @brief Callback function for the SLI link events. 9215 * 9216 * @par Description 9217 * This function allocates memory which must be freed in its callback. 9218 * 9219 * @param ctx Hardware context pointer (that is, ocs_hw_t *). 9220 * @param e Event structure pointer (that is, sli4_link_event_t *). 9221 * 9222 * @return Returns 0 on success, or a non-zero value on failure. 9223 */ 9224 static int32_t 9225 ocs_hw_cb_link(void *ctx, void *e) 9226 { 9227 ocs_hw_t *hw = ctx; 9228 sli4_link_event_t *event = e; 9229 ocs_domain_t *d = NULL; 9230 uint32_t i = 0; 9231 int32_t rc = OCS_HW_RTN_ERROR; 9232 ocs_t *ocs = hw->os; 9233 9234 ocs_hw_link_event_init(hw); 9235 9236 switch (event->status) { 9237 case SLI_LINK_STATUS_UP: 9238 9239 hw->link = *event; 9240 9241 if (SLI_LINK_TOPO_NPORT == event->topology) { 9242 device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed); 9243 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); 9244 } else if (SLI_LINK_TOPO_LOOP == event->topology) { 9245 uint8_t *buf = NULL; 9246 device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed); 9247 9248 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 9249 if (!buf) { 9250 ocs_log_err(hw->os, "no buffer for command\n"); 9251 break; 9252 } 9253 9254 if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) { 9255 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL); 9256 } 9257 9258 if (rc != OCS_HW_RTN_SUCCESS) { 9259 ocs_log_test(hw->os, "READ_TOPOLOGY failed\n"); 9260 ocs_free(hw->os, buf, SLI4_BMBX_SIZE); 9261 } 9262 } else { 9263 device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n", 9264 event->topology, event->speed); 9265 } 9266 break; 9267 case SLI_LINK_STATUS_DOWN: 9268 device_printf(ocs->dev, "Link Down\n"); 9269 9270 hw->link.status = event->status; 9271 9272 for (i = 0; i < SLI4_MAX_FCFI; i++) { 9273 d = hw->domains[i]; 9274 if (d != NULL && 9275 hw->callback.domain != NULL) { 9276 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d); 9277 } 9278 } 9279 break; 9280 default: 9281 ocs_log_test(hw->os, "unhandled link status %#x\n", event->status); 9282 break; 9283 } 9284 9285 return 0; 9286 } 9287 9288 static int32_t 9289 ocs_hw_cb_fip(void *ctx, void *e) 9290 { 9291 ocs_hw_t *hw = ctx; 9292 ocs_domain_t *domain = NULL; 9293 sli4_fip_event_t *event = e; 9294 9295 ocs_hw_assert(event); 9296 ocs_hw_assert(hw); 9297 9298 /* Find the associated domain object */ 9299 if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) { 9300 ocs_domain_t *d = NULL; 9301 uint32_t i = 0; 9302 9303 /* Clear VLINK is different from the other FIP events as it passes back 9304 * a VPI instead of a FCF index. Check all attached SLI ports for a 9305 * matching VPI */ 9306 for (i = 0; i < SLI4_MAX_FCFI; i++) { 9307 d = hw->domains[i]; 9308 if (d != NULL) { 9309 ocs_sport_t *sport = NULL; 9310 9311 ocs_list_foreach(&d->sport_list, sport) { 9312 if (sport->indicator == event->index) { 9313 domain = d; 9314 break; 9315 } 9316 } 9317 9318 if (domain != NULL) { 9319 break; 9320 } 9321 } 9322 } 9323 } else { 9324 domain = ocs_hw_domain_get_indexed(hw, event->index); 9325 } 9326 9327 switch (event->type) { 9328 case SLI4_FCOE_FIP_FCF_DISCOVERED: 9329 ocs_hw_read_fcf(hw, event->index); 9330 break; 9331 case SLI4_FCOE_FIP_FCF_DEAD: 9332 if (domain != NULL && 9333 hw->callback.domain != NULL) { 9334 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); 9335 } 9336 break; 9337 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK: 9338 if (domain != NULL && 9339 hw->callback.domain != NULL) { 9340 /* 9341 * We will want to issue rediscover FCF when this domain is free'd in order 9342 * to invalidate the FCF table 9343 */ 9344 domain->req_rediscover_fcf = TRUE; 9345 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); 9346 } 9347 break; 9348 case SLI4_FCOE_FIP_FCF_MODIFIED: 9349 if (domain != NULL && 9350 hw->callback.domain != NULL) { 9351 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain); 9352 } 9353 9354 ocs_hw_read_fcf(hw, event->index); 9355 break; 9356 default: 9357 ocs_log_test(hw->os, "unsupported event %#x\n", event->type); 9358 } 9359 9360 return 0; 9361 } 9362 9363 static int32_t 9364 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9365 { 9366 ocs_remote_node_t *rnode = arg; 9367 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9368 ocs_hw_remote_node_event_e evt = 0; 9369 9370 if (status || hdr->status) { 9371 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, 9372 hdr->status); 9373 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1); 9374 rnode->attached = FALSE; 9375 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); 9376 evt = OCS_HW_NODE_ATTACH_FAIL; 9377 } else { 9378 rnode->attached = TRUE; 9379 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1); 9380 evt = OCS_HW_NODE_ATTACH_OK; 9381 } 9382 9383 if (hw->callback.rnode != NULL) { 9384 hw->callback.rnode(hw->args.rnode, evt, rnode); 9385 } 9386 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9387 9388 return 0; 9389 } 9390 9391 static int32_t 9392 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9393 { 9394 ocs_remote_node_t *rnode = arg; 9395 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9396 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; 9397 int32_t rc = 0; 9398 9399 if (status || hdr->status) { 9400 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, 9401 hdr->status); 9402 9403 /* 9404 * In certain cases, a non-zero MQE status is OK (all must be true): 9405 * - node is attached 9406 * - if High Login Mode is enabled, node is part of a node group 9407 * - status is 0x1400 9408 */ 9409 if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) || 9410 (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) { 9411 rc = -1; 9412 } 9413 } 9414 9415 if (rc == 0) { 9416 rnode->node_group = FALSE; 9417 rnode->attached = FALSE; 9418 9419 if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) { 9420 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0); 9421 } 9422 9423 evt = OCS_HW_NODE_FREE_OK; 9424 } 9425 9426 if (hw->callback.rnode != NULL) { 9427 hw->callback.rnode(hw->args.rnode, evt, rnode); 9428 } 9429 9430 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9431 9432 return rc; 9433 } 9434 9435 static int32_t 9436 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9437 { 9438 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 9439 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL; 9440 int32_t rc = 0; 9441 uint32_t i; 9442 9443 if (status || hdr->status) { 9444 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, 9445 hdr->status); 9446 } else { 9447 evt = OCS_HW_NODE_FREE_ALL_OK; 9448 } 9449 9450 if (evt == OCS_HW_NODE_FREE_ALL_OK) { 9451 for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) { 9452 ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0); 9453 } 9454 9455 if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) { 9456 ocs_log_test(hw->os, "FCOE_RPI free all failure\n"); 9457 rc = -1; 9458 } 9459 } 9460 9461 if (hw->callback.rnode != NULL) { 9462 hw->callback.rnode(hw->args.rnode, evt, NULL); 9463 } 9464 9465 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9466 9467 return rc; 9468 } 9469 9470 /** 9471 * @brief Initialize the pool of HW IO objects. 9472 * 9473 * @param hw Hardware context. 9474 * 9475 * @return Returns 0 on success, or a non-zero value on failure. 9476 */ 9477 static ocs_hw_rtn_e 9478 ocs_hw_setup_io(ocs_hw_t *hw) 9479 { 9480 uint32_t i = 0; 9481 ocs_hw_io_t *io = NULL; 9482 uintptr_t xfer_virt = 0; 9483 uintptr_t xfer_phys = 0; 9484 uint32_t index; 9485 uint8_t new_alloc = TRUE; 9486 9487 if (NULL == hw->io) { 9488 hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT); 9489 9490 if (NULL == hw->io) { 9491 ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n", 9492 hw->config.n_io, 9493 sizeof(ocs_hw_io_t *)); 9494 return OCS_HW_RTN_NO_MEMORY; 9495 } 9496 for (i = 0; i < hw->config.n_io; i++) { 9497 hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t), 9498 OCS_M_ZERO | OCS_M_NOWAIT); 9499 if (hw->io[i] == NULL) { 9500 ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i); 9501 goto error; 9502 } 9503 } 9504 9505 /* Create WQE buffs for IO */ 9506 hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size, 9507 OCS_M_ZERO | OCS_M_NOWAIT); 9508 if (NULL == hw->wqe_buffs) { 9509 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t)); 9510 ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n", 9511 __func__, hw->config.n_io, hw->sli.config.wqe_size); 9512 return OCS_HW_RTN_NO_MEMORY; 9513 } 9514 9515 } else { 9516 /* re-use existing IOs, including SGLs */ 9517 new_alloc = FALSE; 9518 } 9519 9520 if (new_alloc) { 9521 if (ocs_dma_alloc(hw->os, &hw->xfer_rdy, 9522 sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io, 9523 4/*XXX what does this need to be? */)) { 9524 ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n"); 9525 return OCS_HW_RTN_NO_MEMORY; 9526 } 9527 } 9528 xfer_virt = (uintptr_t)hw->xfer_rdy.virt; 9529 xfer_phys = hw->xfer_rdy.phys; 9530 9531 for (i = 0; i < hw->config.n_io; i++) { 9532 hw_wq_callback_t *wqcb; 9533 9534 io = hw->io[i]; 9535 9536 /* initialize IO fields */ 9537 io->hw = hw; 9538 9539 /* Assign a WQE buff */ 9540 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size]; 9541 9542 /* Allocate the request tag for this IO */ 9543 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io); 9544 if (wqcb == NULL) { 9545 ocs_log_err(hw->os, "can't allocate request tag\n"); 9546 return OCS_HW_RTN_NO_RESOURCES; 9547 } 9548 io->reqtag = wqcb->instance_index; 9549 9550 /* Now for the fields that are initialized on each free */ 9551 ocs_hw_init_free_io(io); 9552 9553 /* The XB flag isn't cleared on IO free, so initialize it to zero here */ 9554 io->xbusy = 0; 9555 9556 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) { 9557 ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i); 9558 return OCS_HW_RTN_NO_MEMORY; 9559 } 9560 9561 if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) { 9562 ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i); 9563 ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t)); 9564 return OCS_HW_RTN_NO_MEMORY; 9565 } 9566 io->def_sgl_count = hw->config.n_sgl; 9567 io->sgl = &io->def_sgl; 9568 io->sgl_count = io->def_sgl_count; 9569 9570 if (hw->xfer_rdy.size) { 9571 io->xfer_rdy.virt = (void *)xfer_virt; 9572 io->xfer_rdy.phys = xfer_phys; 9573 io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t); 9574 9575 xfer_virt += sizeof(fcp_xfer_rdy_iu_t); 9576 xfer_phys += sizeof(fcp_xfer_rdy_iu_t); 9577 } 9578 } 9579 9580 return OCS_HW_RTN_SUCCESS; 9581 error: 9582 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { 9583 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t)); 9584 hw->io[i] = NULL; 9585 } 9586 9587 return OCS_HW_RTN_NO_MEMORY; 9588 } 9589 9590 static ocs_hw_rtn_e 9591 ocs_hw_init_io(ocs_hw_t *hw) 9592 { 9593 uint32_t i = 0, io_index = 0; 9594 uint32_t prereg = 0; 9595 ocs_hw_io_t *io = NULL; 9596 uint8_t cmd[SLI4_BMBX_SIZE]; 9597 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 9598 uint32_t nremaining; 9599 uint32_t n = 0; 9600 uint32_t sgls_per_request = 256; 9601 ocs_dma_t **sgls = NULL; 9602 ocs_dma_t reqbuf = { 0 }; 9603 9604 prereg = sli_get_sgl_preregister(&hw->sli); 9605 9606 if (prereg) { 9607 sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT); 9608 if (sgls == NULL) { 9609 ocs_log_err(hw->os, "ocs_malloc sgls failed\n"); 9610 return OCS_HW_RTN_NO_MEMORY; 9611 } 9612 9613 rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT); 9614 if (rc) { 9615 ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n"); 9616 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); 9617 return OCS_HW_RTN_NO_MEMORY; 9618 } 9619 } 9620 9621 io = hw->io[io_index]; 9622 for (nremaining = hw->config.n_io; nremaining; nremaining -= n) { 9623 if (prereg) { 9624 /* Copy address of SGL's into local sgls[] array, break out if the xri 9625 * is not contiguous. 9626 */ 9627 for (n = 0; n < MIN(sgls_per_request, nremaining); n++) { 9628 /* Check that we have contiguous xri values */ 9629 if (n > 0) { 9630 if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) { 9631 break; 9632 } 9633 } 9634 sgls[n] = hw->io[io_index + n]->sgl; 9635 } 9636 9637 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd), 9638 io->indicator, n, sgls, NULL, &reqbuf)) { 9639 if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) { 9640 rc = OCS_HW_RTN_ERROR; 9641 ocs_log_err(hw->os, "SGL post failed\n"); 9642 break; 9643 } 9644 } 9645 } else { 9646 n = nremaining; 9647 } 9648 9649 /* Add to tail if successful */ 9650 for (i = 0; i < n; i ++) { 9651 io->is_port_owned = 0; 9652 io->state = OCS_HW_IO_STATE_FREE; 9653 ocs_list_add_tail(&hw->io_free, io); 9654 io = hw->io[io_index+1]; 9655 io_index++; 9656 } 9657 } 9658 9659 if (prereg) { 9660 ocs_dma_free(hw->os, &reqbuf); 9661 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request); 9662 } 9663 9664 return rc; 9665 } 9666 9667 static int32_t 9668 ocs_hw_flush(ocs_hw_t *hw) 9669 { 9670 uint32_t i = 0; 9671 9672 /* Process any remaining completions */ 9673 for (i = 0; i < hw->eq_count; i++) { 9674 ocs_hw_process(hw, i, ~0); 9675 } 9676 9677 return 0; 9678 } 9679 9680 static int32_t 9681 ocs_hw_command_cancel(ocs_hw_t *hw) 9682 { 9683 9684 ocs_lock(&hw->cmd_lock); 9685 9686 /* 9687 * Manually clean up remaining commands. Note: since this calls 9688 * ocs_hw_command_process(), we'll also process the cmd_pending 9689 * list, so no need to manually clean that out. 9690 */ 9691 while (!ocs_list_empty(&hw->cmd_head)) { 9692 uint8_t mqe[SLI4_BMBX_SIZE] = { 0 }; 9693 ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head); 9694 9695 ocs_log_test(hw->os, "hung command %08x\n", 9696 NULL == ctx ? UINT32_MAX : 9697 (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf))); 9698 ocs_unlock(&hw->cmd_lock); 9699 ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE); 9700 ocs_lock(&hw->cmd_lock); 9701 } 9702 9703 ocs_unlock(&hw->cmd_lock); 9704 9705 return 0; 9706 } 9707 9708 /** 9709 * @brief Find IO given indicator (xri). 9710 * 9711 * @param hw Hal context. 9712 * @param indicator Indicator (xri) to look for. 9713 * 9714 * @return Returns io if found, NULL otherwise. 9715 */ 9716 ocs_hw_io_t * 9717 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri) 9718 { 9719 uint32_t ioindex; 9720 ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0]; 9721 return hw->io[ioindex]; 9722 } 9723 9724 /** 9725 * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists. 9726 * 9727 * @param hw Hal context. 9728 * @param io Pointer to the IO to cleanup. 9729 */ 9730 static void 9731 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io) 9732 { 9733 ocs_hw_done_t done = io->done; 9734 ocs_hw_done_t abort_done = io->abort_done; 9735 9736 /* first check active_wqe list and remove if there */ 9737 if (ocs_list_on_list(&io->wqe_link)) { 9738 ocs_list_remove(&hw->io_timed_wqe, io); 9739 } 9740 9741 /* Remove from WQ pending list */ 9742 if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) { 9743 ocs_list_remove(&io->wq->pending_list, io); 9744 } 9745 9746 if (io->done) { 9747 void *arg = io->arg; 9748 9749 io->done = NULL; 9750 ocs_unlock(&hw->io_lock); 9751 done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg); 9752 ocs_lock(&hw->io_lock); 9753 } 9754 9755 if (io->abort_done != NULL) { 9756 void *abort_arg = io->abort_arg; 9757 9758 io->abort_done = NULL; 9759 ocs_unlock(&hw->io_lock); 9760 abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg); 9761 ocs_lock(&hw->io_lock); 9762 } 9763 } 9764 9765 static int32_t 9766 ocs_hw_io_cancel(ocs_hw_t *hw) 9767 { 9768 ocs_hw_io_t *io = NULL; 9769 ocs_hw_io_t *tmp_io = NULL; 9770 uint32_t iters = 100; /* One second limit */ 9771 9772 /* 9773 * Manually clean up outstanding IO. 9774 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called. 9775 */ 9776 ocs_lock(&hw->io_lock); 9777 ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) { 9778 ocs_hw_done_t done = io->done; 9779 ocs_hw_done_t abort_done = io->abort_done; 9780 9781 ocs_hw_io_cancel_cleanup(hw, io); 9782 9783 /* 9784 * Since this is called in a reset/shutdown 9785 * case, If there is no callback, then just 9786 * free the IO. 9787 * 9788 * Note: A port owned XRI cannot be on 9789 * the in use list. We cannot call 9790 * ocs_hw_io_free() because we already 9791 * hold the io_lock. 9792 */ 9793 if (done == NULL && 9794 abort_done == NULL) { 9795 /* 9796 * Since this is called in a reset/shutdown 9797 * case, If there is no callback, then just 9798 * free the IO. 9799 */ 9800 ocs_hw_io_free_common(hw, io); 9801 ocs_list_remove(&hw->io_inuse, io); 9802 ocs_hw_io_free_move_correct_list(hw, io); 9803 } 9804 } 9805 9806 /* 9807 * For port owned XRIs, they are not on the in use list, so 9808 * walk though XRIs and issue any callbacks. 9809 */ 9810 ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) { 9811 /* check list and remove if there */ 9812 if (ocs_list_on_list(&io->dnrx_link)) { 9813 ocs_list_remove(&hw->io_port_dnrx, io); 9814 ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */ 9815 } 9816 ocs_hw_io_cancel_cleanup(hw, io); 9817 ocs_list_remove(&hw->io_port_owned, io); 9818 ocs_hw_io_free_common(hw, io); 9819 } 9820 ocs_unlock(&hw->io_lock); 9821 9822 /* Give time for the callbacks to complete */ 9823 do { 9824 ocs_udelay(10000); 9825 iters--; 9826 } while (!ocs_list_empty(&hw->io_inuse) && iters); 9827 9828 /* Leave a breadcrumb that cleanup is not yet complete. */ 9829 if (!ocs_list_empty(&hw->io_inuse)) { 9830 ocs_log_test(hw->os, "io_inuse list is not empty\n"); 9831 } 9832 9833 return 0; 9834 } 9835 9836 static int32_t 9837 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size, 9838 ocs_dma_t *rsp) 9839 { 9840 sli4_sge_t *data = NULL; 9841 9842 if (!hw || !io) { 9843 ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io); 9844 return OCS_HW_RTN_ERROR; 9845 } 9846 9847 data = io->def_sgl.virt; 9848 9849 /* setup command pointer */ 9850 data->buffer_address_high = ocs_addr32_hi(cmnd->phys); 9851 data->buffer_address_low = ocs_addr32_lo(cmnd->phys); 9852 data->buffer_length = cmnd_size; 9853 data++; 9854 9855 /* setup response pointer */ 9856 data->buffer_address_high = ocs_addr32_hi(rsp->phys); 9857 data->buffer_address_low = ocs_addr32_lo(rsp->phys); 9858 data->buffer_length = rsp->size; 9859 9860 return 0; 9861 } 9862 9863 static int32_t 9864 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 9865 { 9866 sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe; 9867 9868 if (status || read_topo->hdr.status) { 9869 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", 9870 status, read_topo->hdr.status); 9871 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9872 return -1; 9873 } 9874 9875 switch (read_topo->attention_type) { 9876 case SLI4_READ_TOPOLOGY_LINK_UP: 9877 hw->link.status = SLI_LINK_STATUS_UP; 9878 break; 9879 case SLI4_READ_TOPOLOGY_LINK_DOWN: 9880 hw->link.status = SLI_LINK_STATUS_DOWN; 9881 break; 9882 case SLI4_READ_TOPOLOGY_LINK_NO_ALPA: 9883 hw->link.status = SLI_LINK_STATUS_NO_ALPA; 9884 break; 9885 default: 9886 hw->link.status = SLI_LINK_STATUS_MAX; 9887 break; 9888 } 9889 9890 switch (read_topo->topology) { 9891 case SLI4_READ_TOPOLOGY_NPORT: 9892 hw->link.topology = SLI_LINK_TOPO_NPORT; 9893 break; 9894 case SLI4_READ_TOPOLOGY_FC_AL: 9895 hw->link.topology = SLI_LINK_TOPO_LOOP; 9896 if (SLI_LINK_STATUS_UP == hw->link.status) { 9897 hw->link.loop_map = hw->loop_map.virt; 9898 } 9899 hw->link.fc_id = read_topo->acquired_al_pa; 9900 break; 9901 default: 9902 hw->link.topology = SLI_LINK_TOPO_MAX; 9903 break; 9904 } 9905 9906 hw->link.medium = SLI_LINK_MEDIUM_FC; 9907 9908 switch (read_topo->link_current.link_speed) { 9909 case SLI4_READ_TOPOLOGY_SPEED_1G: 9910 hw->link.speed = 1 * 1000; 9911 break; 9912 case SLI4_READ_TOPOLOGY_SPEED_2G: 9913 hw->link.speed = 2 * 1000; 9914 break; 9915 case SLI4_READ_TOPOLOGY_SPEED_4G: 9916 hw->link.speed = 4 * 1000; 9917 break; 9918 case SLI4_READ_TOPOLOGY_SPEED_8G: 9919 hw->link.speed = 8 * 1000; 9920 break; 9921 case SLI4_READ_TOPOLOGY_SPEED_16G: 9922 hw->link.speed = 16 * 1000; 9923 hw->link.loop_map = NULL; 9924 break; 9925 case SLI4_READ_TOPOLOGY_SPEED_32G: 9926 hw->link.speed = 32 * 1000; 9927 hw->link.loop_map = NULL; 9928 break; 9929 } 9930 9931 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 9932 9933 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST); 9934 9935 return 0; 9936 } 9937 9938 static int32_t 9939 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 9940 { 9941 ocs_sli_port_t *sport = ctx->app; 9942 ocs_hw_t *hw = sport->hw; 9943 9944 smtrace("port"); 9945 9946 switch (evt) { 9947 case OCS_EVT_EXIT: 9948 /* ignore */ 9949 break; 9950 9951 case OCS_EVT_HW_PORT_REQ_FREE: 9952 case OCS_EVT_HW_PORT_REQ_ATTACH: 9953 if (data != NULL) { 9954 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 9955 } 9956 /* fall through */ 9957 default: 9958 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); 9959 break; 9960 } 9961 9962 return 0; 9963 } 9964 9965 static void * 9966 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 9967 { 9968 ocs_sli_port_t *sport = ctx->app; 9969 ocs_hw_t *hw = sport->hw; 9970 9971 smtrace("port"); 9972 9973 switch (evt) { 9974 case OCS_EVT_ENTER: 9975 if (data != NULL) { 9976 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 9977 } 9978 if (hw->callback.port != NULL) { 9979 hw->callback.port(hw->args.port, 9980 OCS_HW_PORT_FREE_FAIL, sport); 9981 } 9982 break; 9983 default: 9984 break; 9985 } 9986 9987 return NULL; 9988 } 9989 9990 static void * 9991 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 9992 { 9993 ocs_sli_port_t *sport = ctx->app; 9994 ocs_hw_t *hw = sport->hw; 9995 9996 smtrace("port"); 9997 9998 switch (evt) { 9999 case OCS_EVT_ENTER: 10000 /* free SLI resource */ 10001 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) { 10002 ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id); 10003 } 10004 10005 /* free mailbox buffer */ 10006 if (data != NULL) { 10007 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10008 } 10009 if (hw->callback.port != NULL) { 10010 hw->callback.port(hw->args.port, 10011 OCS_HW_PORT_FREE_OK, sport); 10012 } 10013 break; 10014 default: 10015 break; 10016 } 10017 10018 return NULL; 10019 } 10020 10021 static void * 10022 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10023 { 10024 ocs_sli_port_t *sport = ctx->app; 10025 ocs_hw_t *hw = sport->hw; 10026 10027 smtrace("port"); 10028 10029 switch (evt) { 10030 case OCS_EVT_ENTER: 10031 /* free SLI resource */ 10032 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 10033 10034 /* free mailbox buffer */ 10035 if (data != NULL) { 10036 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10037 } 10038 10039 if (hw->callback.port != NULL) { 10040 hw->callback.port(hw->args.port, 10041 OCS_HW_PORT_ATTACH_FAIL, sport); 10042 } 10043 if (sport->sm_free_req_pending) { 10044 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10045 } 10046 break; 10047 default: 10048 __ocs_hw_port_common(__func__, ctx, evt, data); 10049 break; 10050 } 10051 10052 return NULL; 10053 } 10054 10055 static void * 10056 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10057 { 10058 ocs_sli_port_t *sport = ctx->app; 10059 ocs_hw_t *hw = sport->hw; 10060 uint8_t *cmd = NULL; 10061 10062 smtrace("port"); 10063 10064 switch (evt) { 10065 case OCS_EVT_ENTER: 10066 /* allocate memory and send unreg_vpi */ 10067 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 10068 if (!cmd) { 10069 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10070 break; 10071 } 10072 10073 if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator, 10074 SLI4_UNREG_TYPE_PORT)) { 10075 ocs_log_err(hw->os, "UNREG_VPI format failure\n"); 10076 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 10077 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10078 break; 10079 } 10080 10081 if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10082 ocs_log_err(hw->os, "UNREG_VPI command failure\n"); 10083 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE); 10084 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10085 break; 10086 } 10087 break; 10088 case OCS_EVT_RESPONSE: 10089 ocs_sm_transition(ctx, __ocs_hw_port_freed, data); 10090 break; 10091 case OCS_EVT_ERROR: 10092 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); 10093 break; 10094 default: 10095 __ocs_hw_port_common(__func__, ctx, evt, data); 10096 break; 10097 } 10098 10099 return NULL; 10100 } 10101 10102 static void * 10103 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10104 { 10105 ocs_sli_port_t *sport = ctx->app; 10106 ocs_hw_t *hw = sport->hw; 10107 10108 smtrace("port"); 10109 10110 switch (evt) { 10111 case OCS_EVT_ENTER: 10112 /* Forward to execute in mailbox completion processing context */ 10113 if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) { 10114 ocs_log_err(hw->os, "ocs_hw_async_call failed\n"); 10115 } 10116 break; 10117 case OCS_EVT_RESPONSE: 10118 ocs_sm_transition(ctx, __ocs_hw_port_freed, data); 10119 break; 10120 case OCS_EVT_ERROR: 10121 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data); 10122 break; 10123 default: 10124 break; 10125 } 10126 10127 return NULL; 10128 } 10129 10130 static void * 10131 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10132 { 10133 ocs_sli_port_t *sport = ctx->app; 10134 ocs_hw_t *hw = sport->hw; 10135 10136 smtrace("port"); 10137 10138 switch (evt) { 10139 case OCS_EVT_ENTER: 10140 if (data != NULL) { 10141 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10142 } 10143 if (hw->callback.port != NULL) { 10144 hw->callback.port(hw->args.port, 10145 OCS_HW_PORT_ATTACH_OK, sport); 10146 } 10147 if (sport->sm_free_req_pending) { 10148 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10149 } 10150 break; 10151 case OCS_EVT_HW_PORT_REQ_FREE: 10152 /* virtual/physical port request free */ 10153 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10154 break; 10155 default: 10156 __ocs_hw_port_common(__func__, ctx, evt, data); 10157 break; 10158 } 10159 10160 return NULL; 10161 } 10162 10163 static void * 10164 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10165 { 10166 ocs_sli_port_t *sport = ctx->app; 10167 ocs_hw_t *hw = sport->hw; 10168 10169 smtrace("port"); 10170 10171 switch (evt) { 10172 case OCS_EVT_ENTER: 10173 if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) { 10174 ocs_log_err(hw->os, "REG_VPI format failure\n"); 10175 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10176 break; 10177 } 10178 10179 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10180 ocs_log_err(hw->os, "REG_VPI command failure\n"); 10181 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10182 break; 10183 } 10184 break; 10185 case OCS_EVT_RESPONSE: 10186 ocs_sm_transition(ctx, __ocs_hw_port_attached, data); 10187 break; 10188 case OCS_EVT_ERROR: 10189 ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data); 10190 break; 10191 case OCS_EVT_HW_PORT_REQ_FREE: 10192 /* Wait for attach response and then free */ 10193 sport->sm_free_req_pending = 1; 10194 break; 10195 default: 10196 __ocs_hw_port_common(__func__, ctx, evt, data); 10197 break; 10198 } 10199 10200 return NULL; 10201 } 10202 10203 static void * 10204 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10205 { 10206 ocs_sli_port_t *sport = ctx->app; 10207 ocs_hw_t *hw = sport->hw; 10208 10209 smtrace("port"); 10210 10211 switch (evt) { 10212 case OCS_EVT_ENTER: 10213 /* free SLI resource */ 10214 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 10215 10216 /* free mailbox buffer */ 10217 if (data != NULL) { 10218 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10219 } 10220 break; 10221 default: 10222 __ocs_hw_port_common(__func__, ctx, evt, data); 10223 break; 10224 } 10225 10226 return NULL; 10227 } 10228 10229 static void * 10230 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10231 { 10232 ocs_sli_port_t *sport = ctx->app; 10233 ocs_hw_t *hw = sport->hw; 10234 10235 smtrace("port"); 10236 10237 switch (evt) { 10238 case OCS_EVT_ENTER: 10239 if (data != NULL) { 10240 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10241 } 10242 if (hw->callback.port != NULL) { 10243 hw->callback.port(hw->args.port, 10244 OCS_HW_PORT_ALLOC_OK, sport); 10245 } 10246 /* If there is a pending free request, then handle it now */ 10247 if (sport->sm_free_req_pending) { 10248 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10249 } 10250 break; 10251 case OCS_EVT_HW_PORT_REQ_ATTACH: 10252 /* virtual port requests attach */ 10253 ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data); 10254 break; 10255 case OCS_EVT_HW_PORT_ATTACH_OK: 10256 /* physical port attached (as part of attaching domain) */ 10257 ocs_sm_transition(ctx, __ocs_hw_port_attached, data); 10258 break; 10259 case OCS_EVT_HW_PORT_REQ_FREE: 10260 /* virtual port request free */ 10261 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) { 10262 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10263 } else { 10264 /* 10265 * Note: BE3/Skyhawk will respond with a status of 0x20 10266 * unless the reg_vpi has been issued, so we can 10267 * skip the unreg_vpi for these adapters. 10268 * 10269 * Send a nop to make sure that free doesn't occur in 10270 * same context 10271 */ 10272 ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL); 10273 } 10274 break; 10275 default: 10276 __ocs_hw_port_common(__func__, ctx, evt, data); 10277 break; 10278 } 10279 10280 return NULL; 10281 } 10282 10283 static void * 10284 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10285 { 10286 ocs_sli_port_t *sport = ctx->app; 10287 ocs_hw_t *hw = sport->hw; 10288 10289 smtrace("port"); 10290 10291 switch (evt) { 10292 case OCS_EVT_ENTER: 10293 /* free SLI resource */ 10294 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator); 10295 10296 /* free mailbox buffer */ 10297 if (data != NULL) { 10298 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10299 } 10300 10301 if (hw->callback.port != NULL) { 10302 hw->callback.port(hw->args.port, 10303 OCS_HW_PORT_ALLOC_FAIL, sport); 10304 } 10305 10306 /* If there is a pending free request, then handle it now */ 10307 if (sport->sm_free_req_pending) { 10308 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL); 10309 } 10310 break; 10311 default: 10312 __ocs_hw_port_common(__func__, ctx, evt, data); 10313 break; 10314 } 10315 10316 return NULL; 10317 } 10318 10319 static void * 10320 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10321 { 10322 ocs_sli_port_t *sport = ctx->app; 10323 ocs_hw_t *hw = sport->hw; 10324 uint8_t *payload = NULL; 10325 10326 smtrace("port"); 10327 10328 switch (evt) { 10329 case OCS_EVT_ENTER: 10330 /* allocate memory for the service parameters */ 10331 if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) { 10332 ocs_log_err(hw->os, "Failed to allocate DMA memory\n"); 10333 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10334 break; 10335 } 10336 10337 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, 10338 &sport->dma, sport->indicator)) { 10339 ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n"); 10340 ocs_dma_free(hw->os, &sport->dma); 10341 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10342 break; 10343 } 10344 10345 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10346 ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); 10347 ocs_dma_free(hw->os, &sport->dma); 10348 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10349 break; 10350 } 10351 break; 10352 case OCS_EVT_RESPONSE: 10353 payload = sport->dma.virt; 10354 10355 ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload); 10356 10357 ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, 10358 sizeof(sport->sli_wwpn)); 10359 ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, 10360 sizeof(sport->sli_wwnn)); 10361 10362 ocs_dma_free(hw->os, &sport->dma); 10363 ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data); 10364 break; 10365 case OCS_EVT_ERROR: 10366 ocs_dma_free(hw->os, &sport->dma); 10367 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); 10368 break; 10369 case OCS_EVT_HW_PORT_REQ_FREE: 10370 /* Wait for attach response and then free */ 10371 sport->sm_free_req_pending = 1; 10372 break; 10373 case OCS_EVT_EXIT: 10374 break; 10375 default: 10376 __ocs_hw_port_common(__func__, ctx, evt, data); 10377 break; 10378 } 10379 10380 return NULL; 10381 } 10382 10383 static void * 10384 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10385 { 10386 ocs_sli_port_t *sport = ctx->app; 10387 10388 smtrace("port"); 10389 10390 switch (evt) { 10391 case OCS_EVT_ENTER: 10392 /* no-op */ 10393 break; 10394 case OCS_EVT_HW_PORT_ALLOC_OK: 10395 ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL); 10396 break; 10397 case OCS_EVT_HW_PORT_ALLOC_FAIL: 10398 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL); 10399 break; 10400 case OCS_EVT_HW_PORT_REQ_FREE: 10401 /* Wait for attach response and then free */ 10402 sport->sm_free_req_pending = 1; 10403 break; 10404 default: 10405 __ocs_hw_port_common(__func__, ctx, evt, data); 10406 break; 10407 } 10408 10409 return NULL; 10410 } 10411 10412 static void * 10413 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10414 { 10415 ocs_sli_port_t *sport = ctx->app; 10416 ocs_hw_t *hw = sport->hw; 10417 10418 smtrace("port"); 10419 10420 switch (evt) { 10421 case OCS_EVT_ENTER: 10422 /* If there is a pending free request, then handle it now */ 10423 if (sport->sm_free_req_pending) { 10424 ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL); 10425 return NULL; 10426 } 10427 10428 /* TODO XXX transitioning to done only works if this is called 10429 * directly from ocs_hw_port_alloc BUT not if called from 10430 * read_sparm64. In the later case, we actually want to go 10431 * through report_ok/fail 10432 */ 10433 if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE, 10434 sport->indicator, sport->domain->indicator)) { 10435 ocs_log_err(hw->os, "INIT_VPI allocation failure\n"); 10436 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10437 break; 10438 } 10439 10440 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) { 10441 ocs_log_err(hw->os, "INIT_VPI command failure\n"); 10442 ocs_sm_transition(ctx, __ocs_hw_port_done, data); 10443 break; 10444 } 10445 break; 10446 case OCS_EVT_RESPONSE: 10447 ocs_sm_transition(ctx, __ocs_hw_port_allocated, data); 10448 break; 10449 case OCS_EVT_ERROR: 10450 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data); 10451 break; 10452 case OCS_EVT_HW_PORT_REQ_FREE: 10453 /* Wait for attach response and then free */ 10454 sport->sm_free_req_pending = 1; 10455 break; 10456 case OCS_EVT_EXIT: 10457 break; 10458 default: 10459 __ocs_hw_port_common(__func__, ctx, evt, data); 10460 break; 10461 } 10462 10463 return NULL; 10464 } 10465 10466 static int32_t 10467 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 10468 { 10469 ocs_sli_port_t *sport = arg; 10470 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 10471 ocs_sm_event_t evt; 10472 10473 if (status || hdr->status) { 10474 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", 10475 sport->indicator, status, hdr->status); 10476 evt = OCS_EVT_ERROR; 10477 } else { 10478 evt = OCS_EVT_RESPONSE; 10479 } 10480 10481 ocs_sm_post_event(&sport->ctx, evt, mqe); 10482 10483 return 0; 10484 } 10485 10486 static int32_t 10487 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 10488 { 10489 ocs_sli_port_t *sport = arg; 10490 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 10491 ocs_sm_event_t evt; 10492 uint8_t *mqecpy; 10493 10494 if (status || hdr->status) { 10495 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n", 10496 sport->indicator, status, hdr->status); 10497 evt = OCS_EVT_ERROR; 10498 } else { 10499 evt = OCS_EVT_RESPONSE; 10500 } 10501 10502 /* 10503 * In this case we have to malloc a mailbox command buffer, as it is reused 10504 * in the state machine post event call, and eventually freed 10505 */ 10506 mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 10507 if (mqecpy == NULL) { 10508 ocs_log_err(hw->os, "malloc mqecpy failed\n"); 10509 return -1; 10510 } 10511 ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE); 10512 10513 ocs_sm_post_event(&sport->ctx, evt, mqecpy); 10514 10515 return 0; 10516 } 10517 10518 /*************************************************************************** 10519 * Domain state machine 10520 */ 10521 10522 static int32_t 10523 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10524 { 10525 ocs_domain_t *domain = ctx->app; 10526 ocs_hw_t *hw = domain->hw; 10527 10528 smtrace("domain"); 10529 10530 switch (evt) { 10531 case OCS_EVT_EXIT: 10532 /* ignore */ 10533 break; 10534 10535 default: 10536 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt)); 10537 break; 10538 } 10539 10540 return 0; 10541 } 10542 10543 static void * 10544 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10545 { 10546 ocs_domain_t *domain = ctx->app; 10547 ocs_hw_t *hw = domain->hw; 10548 10549 smtrace("domain"); 10550 10551 switch (evt) { 10552 case OCS_EVT_ENTER: 10553 /* free command buffer */ 10554 if (data != NULL) { 10555 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10556 } 10557 /* free SLI resources */ 10558 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); 10559 /* TODO how to free FCFI (or do we at all)? */ 10560 10561 if (hw->callback.domain != NULL) { 10562 hw->callback.domain(hw->args.domain, 10563 OCS_HW_DOMAIN_ALLOC_FAIL, 10564 domain); 10565 } 10566 break; 10567 default: 10568 __ocs_hw_domain_common(__func__, ctx, evt, data); 10569 break; 10570 } 10571 10572 return NULL; 10573 } 10574 10575 static void * 10576 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10577 { 10578 ocs_domain_t *domain = ctx->app; 10579 ocs_hw_t *hw = domain->hw; 10580 10581 smtrace("domain"); 10582 10583 switch (evt) { 10584 case OCS_EVT_ENTER: 10585 /* free mailbox buffer and send alloc ok to physical sport */ 10586 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10587 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL); 10588 10589 /* now inform registered callbacks */ 10590 if (hw->callback.domain != NULL) { 10591 hw->callback.domain(hw->args.domain, 10592 OCS_HW_DOMAIN_ATTACH_OK, 10593 domain); 10594 } 10595 break; 10596 case OCS_EVT_HW_DOMAIN_REQ_FREE: 10597 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); 10598 break; 10599 default: 10600 __ocs_hw_domain_common(__func__, ctx, evt, data); 10601 break; 10602 } 10603 10604 return NULL; 10605 } 10606 10607 static void * 10608 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10609 { 10610 ocs_domain_t *domain = ctx->app; 10611 ocs_hw_t *hw = domain->hw; 10612 10613 smtrace("domain"); 10614 10615 switch (evt) { 10616 case OCS_EVT_ENTER: 10617 if (data != NULL) { 10618 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10619 } 10620 /* free SLI resources */ 10621 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator); 10622 /* TODO how to free FCFI (or do we at all)? */ 10623 10624 if (hw->callback.domain != NULL) { 10625 hw->callback.domain(hw->args.domain, 10626 OCS_HW_DOMAIN_ATTACH_FAIL, 10627 domain); 10628 } 10629 break; 10630 case OCS_EVT_EXIT: 10631 break; 10632 default: 10633 __ocs_hw_domain_common(__func__, ctx, evt, data); 10634 break; 10635 } 10636 10637 return NULL; 10638 } 10639 10640 static void * 10641 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10642 { 10643 ocs_domain_t *domain = ctx->app; 10644 ocs_hw_t *hw = domain->hw; 10645 10646 smtrace("domain"); 10647 10648 switch (evt) { 10649 case OCS_EVT_ENTER: 10650 10651 ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt); 10652 10653 if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) { 10654 ocs_log_err(hw->os, "REG_VFI format failure\n"); 10655 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10656 break; 10657 } 10658 10659 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10660 ocs_log_err(hw->os, "REG_VFI command failure\n"); 10661 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10662 break; 10663 } 10664 break; 10665 case OCS_EVT_RESPONSE: 10666 ocs_sm_transition(ctx, __ocs_hw_domain_attached, data); 10667 break; 10668 case OCS_EVT_ERROR: 10669 ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data); 10670 break; 10671 default: 10672 __ocs_hw_domain_common(__func__, ctx, evt, data); 10673 break; 10674 } 10675 10676 return NULL; 10677 } 10678 10679 static void * 10680 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10681 { 10682 ocs_domain_t *domain = ctx->app; 10683 ocs_hw_t *hw = domain->hw; 10684 10685 smtrace("domain"); 10686 10687 switch (evt) { 10688 case OCS_EVT_ENTER: 10689 /* free mailbox buffer and send alloc ok to physical sport */ 10690 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 10691 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL); 10692 10693 ocs_hw_domain_add(hw, domain); 10694 10695 /* now inform registered callbacks */ 10696 if (hw->callback.domain != NULL) { 10697 hw->callback.domain(hw->args.domain, 10698 OCS_HW_DOMAIN_ALLOC_OK, 10699 domain); 10700 } 10701 break; 10702 case OCS_EVT_HW_DOMAIN_REQ_ATTACH: 10703 ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data); 10704 break; 10705 case OCS_EVT_HW_DOMAIN_REQ_FREE: 10706 /* unreg_fcfi/vfi */ 10707 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { 10708 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL); 10709 } else { 10710 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL); 10711 } 10712 break; 10713 default: 10714 __ocs_hw_domain_common(__func__, ctx, evt, data); 10715 break; 10716 } 10717 10718 return NULL; 10719 } 10720 10721 static void * 10722 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10723 { 10724 ocs_domain_t *domain = ctx->app; 10725 ocs_hw_t *hw = domain->hw; 10726 10727 smtrace("domain"); 10728 10729 switch (evt) { 10730 case OCS_EVT_ENTER: 10731 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE, 10732 &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) { 10733 ocs_log_err(hw->os, "READ_SPARM64 format failure\n"); 10734 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10735 break; 10736 } 10737 10738 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10739 ocs_log_err(hw->os, "READ_SPARM64 command failure\n"); 10740 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10741 break; 10742 } 10743 break; 10744 case OCS_EVT_EXIT: 10745 break; 10746 case OCS_EVT_RESPONSE: 10747 ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt); 10748 10749 ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data); 10750 break; 10751 case OCS_EVT_ERROR: 10752 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); 10753 break; 10754 default: 10755 __ocs_hw_domain_common(__func__, ctx, evt, data); 10756 break; 10757 } 10758 10759 return NULL; 10760 } 10761 10762 static void * 10763 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10764 { 10765 ocs_domain_t *domain = ctx->app; 10766 ocs_sli_port_t *sport = domain->sport; 10767 ocs_hw_t *hw = domain->hw; 10768 10769 smtrace("domain"); 10770 10771 switch (evt) { 10772 case OCS_EVT_ENTER: 10773 if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator, 10774 domain->fcf_indicator, sport->indicator)) { 10775 ocs_log_err(hw->os, "INIT_VFI format failure\n"); 10776 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10777 break; 10778 } 10779 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10780 ocs_log_err(hw->os, "INIT_VFI command failure\n"); 10781 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10782 break; 10783 } 10784 break; 10785 case OCS_EVT_EXIT: 10786 break; 10787 case OCS_EVT_RESPONSE: 10788 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); 10789 break; 10790 case OCS_EVT_ERROR: 10791 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); 10792 break; 10793 default: 10794 __ocs_hw_domain_common(__func__, ctx, evt, data); 10795 break; 10796 } 10797 10798 return NULL; 10799 } 10800 10801 static void * 10802 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10803 { 10804 ocs_domain_t *domain = ctx->app; 10805 ocs_hw_t *hw = domain->hw; 10806 10807 smtrace("domain"); 10808 10809 switch (evt) { 10810 case OCS_EVT_ENTER: { 10811 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]; 10812 uint32_t i; 10813 10814 /* Set the filter match/mask values from hw's filter_def values */ 10815 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 10816 rq_cfg[i].rq_id = 0xffff; 10817 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i]; 10818 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8); 10819 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16); 10820 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24); 10821 } 10822 10823 /* Set the rq_id for each, in order of RQ definition */ 10824 for (i = 0; i < hw->hw_rq_count; i++) { 10825 if (i >= ARRAY_SIZE(rq_cfg)) { 10826 ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n"); 10827 break; 10828 } 10829 rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id; 10830 } 10831 10832 if (!data) { 10833 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10834 break; 10835 } 10836 10837 if (hw->hw_mrq_count) { 10838 if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 10839 domain->vlan_id, domain->fcf)) { 10840 ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n"); 10841 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10842 break; 10843 } 10844 10845 } else { 10846 if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf, 10847 rq_cfg, domain->vlan_id)) { 10848 ocs_log_err(hw->os, "REG_FCFI format failure\n"); 10849 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10850 break; 10851 } 10852 } 10853 10854 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 10855 ocs_log_err(hw->os, "REG_FCFI command failure\n"); 10856 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10857 break; 10858 } 10859 break; 10860 } 10861 case OCS_EVT_EXIT: 10862 break; 10863 case OCS_EVT_RESPONSE: 10864 if (!data) { 10865 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 10866 break; 10867 } 10868 10869 domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi; 10870 10871 /* 10872 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization 10873 * and instead rely on implicit initialization during VFI registration. 10874 * Short circuit normal processing here for those devices. 10875 */ 10876 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) { 10877 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data); 10878 } else { 10879 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data); 10880 } 10881 break; 10882 case OCS_EVT_ERROR: 10883 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data); 10884 break; 10885 default: 10886 __ocs_hw_domain_common(__func__, ctx, evt, data); 10887 break; 10888 } 10889 10890 return NULL; 10891 } 10892 10893 static void * 10894 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10895 { 10896 ocs_domain_t *domain = ctx->app; 10897 ocs_hw_t *hw = domain->hw; 10898 10899 smtrace("domain"); 10900 10901 switch (evt) { 10902 case OCS_EVT_ENTER: 10903 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) { 10904 /* 10905 * For FC, the HW alread registered a FCFI 10906 * Copy FCF information into the domain and jump to INIT_VFI 10907 */ 10908 domain->fcf_indicator = hw->fcf_indicator; 10909 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data); 10910 } else { 10911 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data); 10912 } 10913 break; 10914 default: 10915 __ocs_hw_domain_common(__func__, ctx, evt, data); 10916 break; 10917 } 10918 10919 return NULL; 10920 } 10921 10922 static void * 10923 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10924 { 10925 ocs_domain_t *domain = ctx->app; 10926 10927 smtrace("domain"); 10928 10929 switch (evt) { 10930 case OCS_EVT_ENTER: 10931 if (domain != NULL) { 10932 ocs_hw_t *hw = domain->hw; 10933 10934 ocs_hw_domain_del(hw, domain); 10935 10936 if (hw->callback.domain != NULL) { 10937 hw->callback.domain(hw->args.domain, 10938 OCS_HW_DOMAIN_FREE_FAIL, 10939 domain); 10940 } 10941 } 10942 10943 /* free command buffer */ 10944 if (data != NULL) { 10945 ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE); 10946 } 10947 break; 10948 case OCS_EVT_EXIT: 10949 break; 10950 default: 10951 __ocs_hw_domain_common(__func__, ctx, evt, data); 10952 break; 10953 } 10954 10955 return NULL; 10956 } 10957 10958 static void * 10959 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 10960 { 10961 ocs_domain_t *domain = ctx->app; 10962 10963 smtrace("domain"); 10964 10965 switch (evt) { 10966 case OCS_EVT_ENTER: 10967 /* Free DMA and mailbox buffer */ 10968 if (domain != NULL) { 10969 ocs_hw_t *hw = domain->hw; 10970 10971 /* free VFI resource */ 10972 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, 10973 domain->indicator); 10974 10975 ocs_hw_domain_del(hw, domain); 10976 10977 /* inform registered callbacks */ 10978 if (hw->callback.domain != NULL) { 10979 hw->callback.domain(hw->args.domain, 10980 OCS_HW_DOMAIN_FREE_OK, 10981 domain); 10982 } 10983 } 10984 if (data != NULL) { 10985 ocs_free(NULL, data, SLI4_BMBX_SIZE); 10986 } 10987 break; 10988 case OCS_EVT_EXIT: 10989 break; 10990 default: 10991 __ocs_hw_domain_common(__func__, ctx, evt, data); 10992 break; 10993 } 10994 10995 return NULL; 10996 } 10997 10998 static void * 10999 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 11000 { 11001 ocs_domain_t *domain = ctx->app; 11002 ocs_hw_t *hw = domain->hw; 11003 11004 smtrace("domain"); 11005 11006 switch (evt) { 11007 case OCS_EVT_ENTER: 11008 /* if we're in the middle of a teardown, skip sending rediscover */ 11009 if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) { 11010 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11011 break; 11012 } 11013 if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) { 11014 ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n"); 11015 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11016 break; 11017 } 11018 11019 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 11020 ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n"); 11021 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11022 } 11023 break; 11024 case OCS_EVT_RESPONSE: 11025 case OCS_EVT_ERROR: 11026 /* REDISCOVER_FCF can fail if none exist */ 11027 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11028 break; 11029 case OCS_EVT_EXIT: 11030 break; 11031 default: 11032 __ocs_hw_domain_common(__func__, ctx, evt, data); 11033 break; 11034 } 11035 11036 return NULL; 11037 } 11038 11039 static void * 11040 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 11041 { 11042 ocs_domain_t *domain = ctx->app; 11043 ocs_hw_t *hw = domain->hw; 11044 11045 smtrace("domain"); 11046 11047 switch (evt) { 11048 case OCS_EVT_ENTER: 11049 if (data == NULL) { 11050 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 11051 if (!data) { 11052 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11053 break; 11054 } 11055 } 11056 11057 if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) { 11058 ocs_log_err(hw->os, "UNREG_FCFI format failure\n"); 11059 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11060 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11061 break; 11062 } 11063 11064 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 11065 ocs_log_err(hw->os, "UNREG_FCFI command failure\n"); 11066 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11067 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11068 break; 11069 } 11070 break; 11071 case OCS_EVT_RESPONSE: 11072 if (domain->req_rediscover_fcf) { 11073 domain->req_rediscover_fcf = FALSE; 11074 ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data); 11075 } else { 11076 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11077 } 11078 break; 11079 case OCS_EVT_ERROR: 11080 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); 11081 break; 11082 case OCS_EVT_EXIT: 11083 break; 11084 default: 11085 __ocs_hw_domain_common(__func__, ctx, evt, data); 11086 break; 11087 } 11088 11089 return NULL; 11090 } 11091 11092 static void * 11093 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) 11094 { 11095 ocs_domain_t *domain = ctx->app; 11096 ocs_hw_t *hw = domain->hw; 11097 uint8_t is_fc = FALSE; 11098 11099 smtrace("domain"); 11100 11101 is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC); 11102 11103 switch (evt) { 11104 case OCS_EVT_ENTER: 11105 if (data == NULL) { 11106 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); 11107 if (!data) { 11108 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11109 break; 11110 } 11111 } 11112 11113 if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain, 11114 SLI4_UNREG_TYPE_DOMAIN)) { 11115 ocs_log_err(hw->os, "UNREG_VFI format failure\n"); 11116 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11117 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11118 break; 11119 } 11120 11121 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) { 11122 ocs_log_err(hw->os, "UNREG_VFI command failure\n"); 11123 ocs_free(hw->os, data, SLI4_BMBX_SIZE); 11124 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL); 11125 break; 11126 } 11127 break; 11128 case OCS_EVT_ERROR: 11129 if (is_fc) { 11130 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data); 11131 } else { 11132 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); 11133 } 11134 break; 11135 case OCS_EVT_RESPONSE: 11136 if (is_fc) { 11137 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data); 11138 } else { 11139 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data); 11140 } 11141 break; 11142 default: 11143 __ocs_hw_domain_common(__func__, ctx, evt, data); 11144 break; 11145 } 11146 11147 return NULL; 11148 } 11149 11150 /* callback for domain alloc/attach/free */ 11151 static int32_t 11152 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11153 { 11154 ocs_domain_t *domain = arg; 11155 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 11156 ocs_sm_event_t evt; 11157 11158 if (status || hdr->status) { 11159 ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n", 11160 domain->indicator, status, hdr->status); 11161 evt = OCS_EVT_ERROR; 11162 } else { 11163 evt = OCS_EVT_RESPONSE; 11164 } 11165 11166 ocs_sm_post_event(&domain->sm, evt, mqe); 11167 11168 return 0; 11169 } 11170 11171 static int32_t 11172 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11173 { 11174 ocs_hw_io_t *io = NULL; 11175 ocs_hw_io_t *io_next = NULL; 11176 uint64_t ticks_current = ocs_get_os_ticks(); 11177 uint32_t sec_elapsed; 11178 ocs_hw_rtn_e rc; 11179 11180 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe; 11181 11182 if (status || hdr->status) { 11183 ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n", 11184 status, hdr->status); 11185 /* go ahead and proceed with wqe timer checks... */ 11186 } 11187 11188 /* loop through active WQE list and check for timeouts */ 11189 ocs_lock(&hw->io_lock); 11190 ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) { 11191 sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq()); 11192 11193 /* 11194 * If elapsed time > timeout, abort it. No need to check type since 11195 * it wouldn't be on this list unless it was a target WQE 11196 */ 11197 if (sec_elapsed > io->tgt_wqe_timeout) { 11198 ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n", 11199 io->indicator, io->reqtag, io->type); 11200 11201 /* remove from active_wqe list so won't try to abort again */ 11202 ocs_list_remove(&hw->io_timed_wqe, io); 11203 11204 /* save status of "timed out" for when abort completes */ 11205 io->status_saved = 1; 11206 io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT; 11207 io->saved_ext = 0; 11208 io->saved_len = 0; 11209 11210 /* now abort outstanding IO */ 11211 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL); 11212 if (rc) { 11213 ocs_log_test(hw->os, 11214 "abort failed xri=%#x tag=%#x rc=%d\n", 11215 io->indicator, io->reqtag, rc); 11216 } 11217 } 11218 /* 11219 * need to go through entire list since each IO could have a 11220 * different timeout value 11221 */ 11222 } 11223 ocs_unlock(&hw->io_lock); 11224 11225 /* if we're not in the middle of shutting down, schedule next timer */ 11226 if (!hw->active_wqe_timer_shutdown) { 11227 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS); 11228 } 11229 hw->in_active_wqe_timer = FALSE; 11230 return 0; 11231 } 11232 11233 static void 11234 target_wqe_timer_cb(void *arg) 11235 { 11236 ocs_hw_t *hw = (ocs_hw_t *)arg; 11237 11238 /* delete existing timer; will kick off new timer after checking wqe timeouts */ 11239 hw->in_active_wqe_timer = TRUE; 11240 ocs_del_timer(&hw->wqe_timer); 11241 11242 /* Forward timer callback to execute in the mailbox completion processing context */ 11243 if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) { 11244 ocs_log_test(hw->os, "ocs_hw_async_call failed\n"); 11245 } 11246 } 11247 11248 static void 11249 shutdown_target_wqe_timer(ocs_hw_t *hw) 11250 { 11251 uint32_t iters = 100; 11252 11253 if (hw->config.emulate_tgt_wqe_timeout) { 11254 /* request active wqe timer shutdown, then wait for it to complete */ 11255 hw->active_wqe_timer_shutdown = TRUE; 11256 11257 /* delete WQE timer and wait for timer handler to complete (if necessary) */ 11258 ocs_del_timer(&hw->wqe_timer); 11259 11260 /* now wait for timer handler to complete (if necessary) */ 11261 while (hw->in_active_wqe_timer && iters) { 11262 /* 11263 * if we happen to have just sent NOP mailbox command, make sure 11264 * completions are being processed 11265 */ 11266 ocs_hw_flush(hw); 11267 iters--; 11268 } 11269 11270 if (iters == 0) { 11271 ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n"); 11272 } 11273 } 11274 } 11275 11276 /** 11277 * @brief Determine if HW IO is owned by the port. 11278 * 11279 * @par Description 11280 * Determines if the given HW IO has been posted to the chip. 11281 * 11282 * @param hw Hardware context allocated by the caller. 11283 * @param io HW IO. 11284 * 11285 * @return Returns TRUE if given HW IO is port-owned. 11286 */ 11287 uint8_t 11288 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io) 11289 { 11290 /* Check to see if this is a port owned XRI */ 11291 return io->is_port_owned; 11292 } 11293 11294 /** 11295 * @brief Return TRUE if exchange is port-owned. 11296 * 11297 * @par Description 11298 * Test to see if the xri is a port-owned xri. 11299 * 11300 * @param hw Hardware context. 11301 * @param xri Exchange indicator. 11302 * 11303 * @return Returns TRUE if XRI is a port owned XRI. 11304 */ 11305 11306 uint8_t 11307 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri) 11308 { 11309 ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri); 11310 return (io == NULL ? FALSE : io->is_port_owned); 11311 } 11312 11313 /** 11314 * @brief Returns an XRI from the port owned list to the host. 11315 * 11316 * @par Description 11317 * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes. 11318 * 11319 * @param hw Hardware context. 11320 * @param xri_base The starting XRI number. 11321 * @param xri_count The number of XRIs to free from the base. 11322 */ 11323 static void 11324 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count) 11325 { 11326 ocs_hw_io_t *io; 11327 uint32_t i; 11328 11329 for (i = 0; i < xri_count; i++) { 11330 io = ocs_hw_io_lookup(hw, xri_base + i); 11331 11332 /* 11333 * if this is an auto xfer rdy XRI, then we need to release any 11334 * buffer attached to the XRI before moving the XRI back to the free pool. 11335 */ 11336 if (hw->auto_xfer_rdy_enabled) { 11337 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io); 11338 } 11339 11340 ocs_lock(&hw->io_lock); 11341 ocs_list_remove(&hw->io_port_owned, io); 11342 io->is_port_owned = 0; 11343 ocs_list_add_tail(&hw->io_free, io); 11344 ocs_unlock(&hw->io_lock); 11345 } 11346 } 11347 11348 /** 11349 * @brief Called when the POST_XRI command completes. 11350 * 11351 * @par Description 11352 * Free the mailbox command buffer and reclaim the XRIs on failure. 11353 * 11354 * @param hw Hardware context. 11355 * @param status Status field from the mbox completion. 11356 * @param mqe Mailbox response structure. 11357 * @param arg Pointer to a callback function that signals the caller that the command is done. 11358 * 11359 * @return Returns 0. 11360 */ 11361 static int32_t 11362 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11363 { 11364 sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe; 11365 11366 /* Reclaim the XRIs as host owned if the command fails */ 11367 if (status != 0) { 11368 ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n", 11369 status, post_xri->xri_base, post_xri->xri_count); 11370 ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count); 11371 } 11372 11373 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 11374 return 0; 11375 } 11376 11377 /** 11378 * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port. 11379 * 11380 * @param hw Hardware context. 11381 * @param xri_start The starting XRI to post. 11382 * @param num_to_post The number of XRIs to post. 11383 * 11384 * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS. 11385 */ 11386 11387 static ocs_hw_rtn_e 11388 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post) 11389 { 11390 uint8_t *post_xri; 11391 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 11392 11393 /* Since we need to allocate for mailbox queue, just always allocate */ 11394 post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 11395 if (post_xri == NULL) { 11396 ocs_log_err(hw->os, "no buffer for command\n"); 11397 return OCS_HW_RTN_NO_MEMORY; 11398 } 11399 11400 /* Register the XRIs */ 11401 if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE, 11402 xri_start, num_to_post)) { 11403 rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL); 11404 if (rc != OCS_HW_RTN_SUCCESS) { 11405 ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE); 11406 ocs_log_err(hw->os, "post_xri failed\n"); 11407 } 11408 } 11409 return rc; 11410 } 11411 11412 /** 11413 * @brief Move XRIs from the host-controlled pool to the port. 11414 * 11415 * @par Description 11416 * Removes IOs from the free list and moves them to the port. 11417 * 11418 * @param hw Hardware context. 11419 * @param num_xri The number of XRIs being requested to move to the chip. 11420 * 11421 * @return Returns the number of XRIs that were moved. 11422 */ 11423 11424 uint32_t 11425 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri) 11426 { 11427 ocs_hw_io_t *io; 11428 uint32_t i; 11429 uint32_t num_posted = 0; 11430 11431 /* 11432 * Note: We cannot use ocs_hw_io_alloc() because that would place the 11433 * IO on the io_inuse list. We need to move from the io_free to 11434 * the io_port_owned list. 11435 */ 11436 ocs_lock(&hw->io_lock); 11437 11438 for (i = 0; i < num_xri; i++) { 11439 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) { 11440 ocs_hw_rtn_e rc; 11441 11442 /* 11443 * if this is an auto xfer rdy XRI, then we need to attach a 11444 * buffer to the XRI before submitting it to the chip. If a 11445 * buffer is unavailable, then we cannot post it, so return it 11446 * to the free pool. 11447 */ 11448 if (hw->auto_xfer_rdy_enabled) { 11449 /* Note: uses the IO lock to get the auto xfer rdy buffer */ 11450 ocs_unlock(&hw->io_lock); 11451 rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io); 11452 ocs_lock(&hw->io_lock); 11453 if (rc != OCS_HW_RTN_SUCCESS) { 11454 ocs_list_add_head(&hw->io_free, io); 11455 break; 11456 } 11457 } 11458 ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator); 11459 io->is_port_owned = 1; 11460 ocs_list_add_tail(&hw->io_port_owned, io); 11461 11462 /* Post XRI */ 11463 if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) { 11464 ocs_hw_reclaim_xri(hw, io->indicator, i); 11465 break; 11466 } 11467 num_posted++; 11468 } else { 11469 /* no more free XRIs */ 11470 break; 11471 } 11472 } 11473 ocs_unlock(&hw->io_lock); 11474 11475 return num_posted; 11476 } 11477 11478 /** 11479 * @brief Called when the RELEASE_XRI command completes. 11480 * 11481 * @par Description 11482 * Move the IOs back to the free pool on success. 11483 * 11484 * @param hw Hardware context. 11485 * @param status Status field from the mbox completion. 11486 * @param mqe Mailbox response structure. 11487 * @param arg Pointer to a callback function that signals the caller that the command is done. 11488 * 11489 * @return Returns 0. 11490 */ 11491 static int32_t 11492 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11493 { 11494 sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe; 11495 uint8_t i; 11496 11497 /* Reclaim the XRIs as host owned if the command fails */ 11498 if (status != 0) { 11499 ocs_log_err(hw->os, "Status 0x%x\n", status); 11500 } else { 11501 for (i = 0; i < release_xri->released_xri_count; i++) { 11502 uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 : 11503 release_xri->xri_tbl[i/2].xri_tag1); 11504 ocs_hw_reclaim_xri(hw, xri, 1); 11505 } 11506 } 11507 11508 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); 11509 return 0; 11510 } 11511 11512 /** 11513 * @brief Move XRIs from the port-controlled pool to the host. 11514 * 11515 * Requests XRIs from the FW to return to the host-owned pool. 11516 * 11517 * @param hw Hardware context. 11518 * @param num_xri The number of XRIs being requested to moved from the chip. 11519 * 11520 * @return Returns 0 for success, or a negative error code value for failure. 11521 */ 11522 11523 ocs_hw_rtn_e 11524 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri) 11525 { 11526 uint8_t *release_xri; 11527 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR; 11528 11529 /* non-local buffer required for mailbox queue */ 11530 release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); 11531 if (release_xri == NULL) { 11532 ocs_log_err(hw->os, "no buffer for command\n"); 11533 return OCS_HW_RTN_NO_MEMORY; 11534 } 11535 11536 /* release the XRIs */ 11537 if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) { 11538 rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL); 11539 if (rc != OCS_HW_RTN_SUCCESS) { 11540 ocs_log_err(hw->os, "release_xri failed\n"); 11541 } 11542 } 11543 /* If we are polling or an error occurred, then free the mailbox buffer */ 11544 if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) { 11545 ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE); 11546 } 11547 return rc; 11548 } 11549 11550 /** 11551 * @brief Allocate an ocs_hw_rx_buffer_t array. 11552 * 11553 * @par Description 11554 * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory. 11555 * 11556 * @param hw Pointer to HW object. 11557 * @param rqindex RQ index for this buffer. 11558 * @param count Count of buffers in array. 11559 * @param size Size of buffer. 11560 * 11561 * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array. 11562 */ 11563 static ocs_hw_rq_buffer_t * 11564 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size) 11565 { 11566 ocs_t *ocs = hw->os; 11567 ocs_hw_rq_buffer_t *rq_buf = NULL; 11568 ocs_hw_rq_buffer_t *prq; 11569 uint32_t i; 11570 11571 if (count != 0) { 11572 rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO); 11573 if (rq_buf == NULL) { 11574 ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n"); 11575 return NULL; 11576 } 11577 11578 for (i = 0, prq = rq_buf; i < count; i ++, prq++) { 11579 prq->rqindex = rqindex; 11580 if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) { 11581 ocs_log_err(hw->os, "DMA allocation failed\n"); 11582 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); 11583 rq_buf = NULL; 11584 break; 11585 } 11586 } 11587 } 11588 return rq_buf; 11589 } 11590 11591 /** 11592 * @brief Free an ocs_hw_rx_buffer_t array. 11593 * 11594 * @par Description 11595 * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory. 11596 * 11597 * @param hw Pointer to HW object. 11598 * @param rq_buf Pointer to ocs_hw_rx_buffer_t array. 11599 * @param count Count of buffers in array. 11600 * 11601 * @return None. 11602 */ 11603 static void 11604 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count) 11605 { 11606 ocs_t *ocs = hw->os; 11607 uint32_t i; 11608 ocs_hw_rq_buffer_t *prq; 11609 11610 if (rq_buf != NULL) { 11611 for (i = 0, prq = rq_buf; i < count; i++, prq++) { 11612 ocs_dma_free(ocs, &prq->dma); 11613 } 11614 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count); 11615 } 11616 } 11617 11618 /** 11619 * @brief Allocate the RQ data buffers. 11620 * 11621 * @param hw Pointer to HW object. 11622 * 11623 * @return Returns 0 on success, or a non-zero value on failure. 11624 */ 11625 ocs_hw_rtn_e 11626 ocs_hw_rx_allocate(ocs_hw_t *hw) 11627 { 11628 ocs_t *ocs = hw->os; 11629 uint32_t i; 11630 int32_t rc = OCS_HW_RTN_SUCCESS; 11631 uint32_t rqindex = 0; 11632 hw_rq_t *rq; 11633 uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR; 11634 uint32_t payload_size = hw->config.rq_default_buffer_size; 11635 11636 rqindex = 0; 11637 11638 for (i = 0; i < hw->hw_rq_count; i++) { 11639 rq = hw->hw_rq[i]; 11640 11641 /* Allocate header buffers */ 11642 rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size); 11643 if (rq->hdr_buf == NULL) { 11644 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n"); 11645 rc = OCS_HW_RTN_ERROR; 11646 break; 11647 } 11648 11649 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id, 11650 rq->entry_count, hdr_size); 11651 11652 rqindex++; 11653 11654 /* Allocate payload buffers */ 11655 rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size); 11656 if (rq->payload_buf == NULL) { 11657 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n"); 11658 rc = OCS_HW_RTN_ERROR; 11659 break; 11660 } 11661 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id, 11662 rq->entry_count, payload_size); 11663 rqindex++; 11664 } 11665 11666 return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS; 11667 } 11668 11669 /** 11670 * @brief Post the RQ data buffers to the chip. 11671 * 11672 * @param hw Pointer to HW object. 11673 * 11674 * @return Returns 0 on success, or a non-zero value on failure. 11675 */ 11676 ocs_hw_rtn_e 11677 ocs_hw_rx_post(ocs_hw_t *hw) 11678 { 11679 uint32_t i; 11680 uint32_t idx; 11681 uint32_t rq_idx; 11682 int32_t rc = 0; 11683 11684 /* 11685 * In RQ pair mode, we MUST post the header and payload buffer at the 11686 * same time. 11687 */ 11688 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { 11689 hw_rq_t *rq = hw->hw_rq[rq_idx]; 11690 11691 for (i = 0; i < rq->entry_count-1; i++) { 11692 ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++); 11693 ocs_hw_assert(seq != NULL); 11694 11695 seq->header = &rq->hdr_buf[i]; 11696 11697 seq->payload = &rq->payload_buf[i]; 11698 11699 rc = ocs_hw_sequence_free(hw, seq); 11700 if (rc) { 11701 break; 11702 } 11703 } 11704 if (rc) { 11705 break; 11706 } 11707 } 11708 11709 return rc; 11710 } 11711 11712 /** 11713 * @brief Free the RQ data buffers. 11714 * 11715 * @param hw Pointer to HW object. 11716 * 11717 */ 11718 void 11719 ocs_hw_rx_free(ocs_hw_t *hw) 11720 { 11721 hw_rq_t *rq; 11722 uint32_t i; 11723 11724 /* Free hw_rq buffers */ 11725 for (i = 0; i < hw->hw_rq_count; i++) { 11726 rq = hw->hw_rq[i]; 11727 if (rq != NULL) { 11728 ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count); 11729 rq->hdr_buf = NULL; 11730 ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count); 11731 rq->payload_buf = NULL; 11732 } 11733 } 11734 } 11735 11736 /** 11737 * @brief HW async call context structure. 11738 */ 11739 typedef struct { 11740 ocs_hw_async_cb_t callback; 11741 void *arg; 11742 uint8_t cmd[SLI4_BMBX_SIZE]; 11743 } ocs_hw_async_call_ctx_t; 11744 11745 /** 11746 * @brief HW async callback handler 11747 * 11748 * @par Description 11749 * This function is called when the NOP mailbox command completes. The callback stored 11750 * in the requesting context is invoked. 11751 * 11752 * @param hw Pointer to HW object. 11753 * @param status Completion status. 11754 * @param mqe Pointer to mailbox completion queue entry. 11755 * @param arg Caller-provided argument. 11756 * 11757 * @return None. 11758 */ 11759 static void 11760 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 11761 { 11762 ocs_hw_async_call_ctx_t *ctx = arg; 11763 11764 if (ctx != NULL) { 11765 if (ctx->callback != NULL) { 11766 (*ctx->callback)(hw, status, mqe, ctx->arg); 11767 } 11768 ocs_free(hw->os, ctx, sizeof(*ctx)); 11769 } 11770 } 11771 11772 /** 11773 * @brief Make an async callback using NOP mailbox command 11774 * 11775 * @par Description 11776 * Post a NOP mailbox command; the callback with argument is invoked upon completion 11777 * while in the event processing context. 11778 * 11779 * @param hw Pointer to HW object. 11780 * @param callback Pointer to callback function. 11781 * @param arg Caller-provided callback. 11782 * 11783 * @return Returns 0 on success, or a negative error code value on failure. 11784 */ 11785 int32_t 11786 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg) 11787 { 11788 ocs_hw_async_call_ctx_t *ctx; 11789 11790 /* 11791 * Allocate a callback context (which includes the mailbox command buffer), we need 11792 * this to be persistent as the mailbox command submission may be queued and executed later 11793 * execution. 11794 */ 11795 ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT); 11796 if (ctx == NULL) { 11797 ocs_log_err(hw->os, "failed to malloc async call context\n"); 11798 return OCS_HW_RTN_NO_MEMORY; 11799 } 11800 ctx->callback = callback; 11801 ctx->arg = arg; 11802 11803 /* Build and send a NOP mailbox command */ 11804 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) { 11805 ocs_log_err(hw->os, "COMMON_NOP format failure\n"); 11806 ocs_free(hw->os, ctx, sizeof(*ctx)); 11807 return OCS_HW_RTN_ERROR; 11808 } 11809 11810 if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) { 11811 ocs_log_err(hw->os, "COMMON_NOP command failure\n"); 11812 ocs_free(hw->os, ctx, sizeof(*ctx)); 11813 return OCS_HW_RTN_ERROR; 11814 } 11815 return OCS_HW_RTN_SUCCESS; 11816 } 11817 11818 /** 11819 * @brief Initialize the reqtag pool. 11820 * 11821 * @par Description 11822 * The WQ request tag pool is initialized. 11823 * 11824 * @param hw Pointer to HW object. 11825 * 11826 * @return Returns 0 on success, or a negative error code value on failure. 11827 */ 11828 ocs_hw_rtn_e 11829 ocs_hw_reqtag_init(ocs_hw_t *hw) 11830 { 11831 if (hw->wq_reqtag_pool == NULL) { 11832 hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE); 11833 if (hw->wq_reqtag_pool == NULL) { 11834 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n"); 11835 return OCS_HW_RTN_NO_MEMORY; 11836 } 11837 } 11838 ocs_hw_reqtag_reset(hw); 11839 return OCS_HW_RTN_SUCCESS; 11840 } 11841 11842 /** 11843 * @brief Allocate a WQ request tag. 11844 * 11845 * Allocate and populate a WQ request tag from the WQ request tag pool. 11846 * 11847 * @param hw Pointer to HW object. 11848 * @param callback Callback function. 11849 * @param arg Pointer to callback argument. 11850 * 11851 * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated. 11852 */ 11853 hw_wq_callback_t * 11854 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg) 11855 { 11856 hw_wq_callback_t *wqcb; 11857 11858 ocs_hw_assert(callback != NULL); 11859 11860 wqcb = ocs_pool_get(hw->wq_reqtag_pool); 11861 if (wqcb != NULL) { 11862 ocs_hw_assert(wqcb->callback == NULL); 11863 wqcb->callback = callback; 11864 wqcb->arg = arg; 11865 } 11866 return wqcb; 11867 } 11868 11869 /** 11870 * @brief Free a WQ request tag. 11871 * 11872 * Free the passed in WQ request tag. 11873 * 11874 * @param hw Pointer to HW object. 11875 * @param wqcb Pointer to WQ request tag object to free. 11876 * 11877 * @return None. 11878 */ 11879 void 11880 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb) 11881 { 11882 ocs_hw_assert(wqcb->callback != NULL); 11883 wqcb->callback = NULL; 11884 wqcb->arg = NULL; 11885 ocs_pool_put(hw->wq_reqtag_pool, wqcb); 11886 } 11887 11888 /** 11889 * @brief Return WQ request tag by index. 11890 * 11891 * @par Description 11892 * Return pointer to WQ request tag object given an index. 11893 * 11894 * @param hw Pointer to HW object. 11895 * @param instance_index Index of WQ request tag to return. 11896 * 11897 * @return Pointer to WQ request tag, or NULL. 11898 */ 11899 hw_wq_callback_t * 11900 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index) 11901 { 11902 hw_wq_callback_t *wqcb; 11903 11904 wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index); 11905 if (wqcb == NULL) { 11906 ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index); 11907 } 11908 return wqcb; 11909 } 11910 11911 /** 11912 * @brief Reset the WQ request tag pool. 11913 * 11914 * @par Description 11915 * Reset the WQ request tag pool, returning all to the free list. 11916 * 11917 * @param hw pointer to HW object. 11918 * 11919 * @return None. 11920 */ 11921 void 11922 ocs_hw_reqtag_reset(ocs_hw_t *hw) 11923 { 11924 hw_wq_callback_t *wqcb; 11925 uint32_t i; 11926 11927 /* Remove all from freelist */ 11928 while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) { 11929 ; 11930 } 11931 11932 /* Put them all back */ 11933 for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) { 11934 wqcb->instance_index = i; 11935 wqcb->callback = NULL; 11936 wqcb->arg = NULL; 11937 ocs_pool_put(hw->wq_reqtag_pool, wqcb); 11938 } 11939 } 11940 11941 /** 11942 * @brief Handle HW assertion 11943 * 11944 * HW assert, display diagnostic message, and abort. 11945 * 11946 * @param cond string describing failing assertion condition 11947 * @param filename file name 11948 * @param linenum line number 11949 * 11950 * @return none 11951 */ 11952 void 11953 _ocs_hw_assert(const char *cond, const char *filename, int linenum) 11954 { 11955 ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond); 11956 ocs_abort(); 11957 /* no return */ 11958 } 11959 11960 /** 11961 * @brief Handle HW verify 11962 * 11963 * HW verify, display diagnostic message, dump stack and return. 11964 * 11965 * @param cond string describing failing verify condition 11966 * @param filename file name 11967 * @param linenum line number 11968 * 11969 * @return none 11970 */ 11971 void 11972 _ocs_hw_verify(const char *cond, const char *filename, int linenum) 11973 { 11974 ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond); 11975 ocs_print_stack(); 11976 } 11977 11978 /** 11979 * @brief Reque XRI 11980 * 11981 * @par Description 11982 * Reque XRI 11983 * 11984 * @param hw Pointer to HW object. 11985 * @param io Pointer to HW IO 11986 * 11987 * @return Return 0 if successful else returns -1 11988 */ 11989 int32_t 11990 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io ) 11991 { 11992 int32_t rc = 0; 11993 11994 rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1); 11995 if (rc) { 11996 ocs_list_add_tail(&hw->io_port_dnrx, io); 11997 rc = -1; 11998 goto exit_ocs_hw_reque_xri; 11999 } 12000 12001 io->auto_xfer_rdy_dnrx = 0; 12002 io->type = OCS_HW_IO_DNRX_REQUEUE; 12003 if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) { 12004 /* Clear buffer from XRI */ 12005 ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf); 12006 io->axr_buf = NULL; 12007 12008 ocs_log_err(hw->os, "requeue_xri WQE error\n"); 12009 ocs_list_add_tail(&hw->io_port_dnrx, io); 12010 12011 rc = -1; 12012 goto exit_ocs_hw_reque_xri; 12013 } 12014 12015 if (io->wq == NULL) { 12016 io->wq = ocs_hw_queue_next_wq(hw, io); 12017 ocs_hw_assert(io->wq != NULL); 12018 } 12019 12020 /* 12021 * Add IO to active io wqe list before submitting, in case the 12022 * wcqe processing preempts this thread. 12023 */ 12024 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++); 12025 OCS_STAT(io->wq->use_count++); 12026 12027 rc = hw_wq_write(io->wq, &io->wqe); 12028 if (rc < 0) { 12029 ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc); 12030 rc = -1; 12031 } 12032 12033 exit_ocs_hw_reque_xri: 12034 return 0; 12035 } 12036 12037 uint32_t 12038 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn) 12039 { 12040 sli4_t *sli4 = &ocs->hw.sli; 12041 ocs_dma_t dma; 12042 uint8_t *payload = NULL; 12043 12044 int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan; 12045 12046 /* allocate memory for the service parameters */ 12047 if (ocs_dma_alloc(ocs, &dma, 112, 4)) { 12048 ocs_log_err(ocs, "Failed to allocate DMA memory\n"); 12049 return 1; 12050 } 12051 12052 if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, 12053 &dma, indicator)) { 12054 ocs_log_err(ocs, "READ_SPARM64 allocation failure\n"); 12055 ocs_dma_free(ocs, &dma); 12056 return 1; 12057 } 12058 12059 if (sli_bmbx_command(sli4)) { 12060 ocs_log_err(ocs, "READ_SPARM64 command failure\n"); 12061 ocs_dma_free(ocs, &dma); 12062 return 1; 12063 } 12064 12065 payload = dma.virt; 12066 ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn)); 12067 ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn)); 12068 ocs_dma_free(ocs, &dma); 12069 return 0; 12070 } 12071 12072 uint32_t 12073 ocs_hw_get_config_persistent_topology(ocs_hw_t *hw) 12074 { 12075 uint32_t topology = OCS_HW_TOPOLOGY_AUTO; 12076 sli4_t *sli = &hw->sli; 12077 12078 if (!sli_persist_topology_enabled(sli)) 12079 return topology; 12080 12081 switch (sli->config.pt) { 12082 case SLI4_INIT_LINK_F_P2P_ONLY: 12083 topology = OCS_HW_TOPOLOGY_NPORT; 12084 break; 12085 case SLI4_INIT_LINK_F_FCAL_ONLY: 12086 topology = OCS_HW_TOPOLOGY_LOOP; 12087 break; 12088 default: 12089 break; 12090 } 12091 12092 return topology; 12093 } 12094 12095 /* 12096 * @brief Persistent topology configuration callback argument. 12097 */ 12098 typedef struct ocs_hw_persistent_topo_cb_arg { 12099 ocs_sem_t semaphore; 12100 int32_t status; 12101 } ocs_hw_persistent_topo_cb_arg_t; 12102 12103 /* 12104 * @brief Called after the completion of set persistent topology request 12105 * 12106 * @par Description 12107 * This is callback fn for the set_persistent_topology 12108 * function. This callback is called when the common feature mbx cmd 12109 * completes. 12110 * 12111 * @param hw Hardware context. 12112 * @param status The status from the MQE. 12113 * @param mqe Pointer to mailbox command buffer. 12114 * @param arg Pointer to a callback argument. 12115 * 12116 * @return 0 on success, non-zero otherwise 12117 */ 12118 static int32_t 12119 ocs_hw_set_persistent_topolgy_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) 12120 { 12121 ocs_hw_persistent_topo_cb_arg_t *req = (ocs_hw_persistent_topo_cb_arg_t *)arg; 12122 12123 req->status = status; 12124 12125 ocs_sem_v(&req->semaphore); 12126 12127 return 0; 12128 } 12129 12130 /** 12131 * @brief Set persistent topology 12132 * 12133 * Sets the persistent topology(PT) feature using 12134 * COMMON_SET_FEATURES cmd. If mbx cmd succeeds, update the 12135 * topology into sli config. PT stores the value to be set into link_flags 12136 * of the cmd INIT_LINK, to bring up the link. 12137 * 12138 * SLI specs defines following for PT: 12139 * When TF is set to 0: 12140 * 0 Reserved 12141 * 1 Attempt point-to-point initialization (direct attach or Fabric topology). 12142 * 2 Attempt FC-AL loop initialization. 12143 * 3 Reserved 12144 * 12145 * When TF is set to 1: 12146 * 0 Attempt FC-AL loop initialization; if it fails, attempt point-to-point initialization. 12147 * 1 Attempt point-to-point initialization; if it fails, attempt FC-AL loop initialization. 12148 * 2 Reserved 12149 * 3 Reserved 12150 * 12151 * Note: Topology failover is only available on Lancer G5. This command will fail 12152 * if TF is set to 1 on any other ASICs 12153 * 12154 * @param hw Pointer to hw 12155 * @param topology topology value to be set, provided through 12156 * elxsdkutil set-topology cmd 12157 * 12158 * @return Returns 0 on success, or a non-zero value on failure. 12159 */ 12160 ocs_hw_rtn_e 12161 ocs_hw_set_persistent_topology(ocs_hw_t *hw, uint32_t topology, uint32_t opts) 12162 { 12163 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; 12164 uint8_t buf[SLI4_BMBX_SIZE]; 12165 sli4_req_common_set_features_persistent_topo_param_t param; 12166 ocs_hw_persistent_topo_cb_arg_t request; 12167 12168 ocs_memset(¶m, 0, sizeof(param)); 12169 param.persistent_topo = topology; 12170 12171 switch (topology) { 12172 case OCS_HW_TOPOLOGY_AUTO: 12173 if (sli_get_asic_type(&hw->sli) == SLI4_ASIC_TYPE_LANCER) { 12174 param.persistent_topo = SLI4_INIT_LINK_F_P2P_FAIL_OVER; 12175 param.topo_failover = 1; 12176 } else { 12177 param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;; 12178 param.topo_failover = 0; 12179 } 12180 break; 12181 12182 case OCS_HW_TOPOLOGY_NPORT: 12183 param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY; 12184 param.topo_failover = 0; 12185 break; 12186 12187 case OCS_HW_TOPOLOGY_LOOP: 12188 param.persistent_topo = SLI4_INIT_LINK_F_FCAL_ONLY; 12189 param.topo_failover = 0; 12190 break; 12191 12192 default: 12193 ocs_log_err(hw->os, "unsupported topology %#x\n", topology); 12194 return -1; 12195 } 12196 12197 ocs_sem_init(&request.semaphore, 0, "set_persistent_topo"); 12198 12199 /* build the set_features command */ 12200 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE, 12201 SLI4_SET_FEATURES_PERSISTENT_TOPOLOGY, sizeof(param), ¶m); 12202 12203 if (opts == OCS_CMD_POLL) { 12204 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL); 12205 if (rc) { 12206 ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc); 12207 return rc; 12208 } 12209 } else { 12210 12211 // there's no response for this feature command 12212 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_set_persistent_topolgy_cb, &request); 12213 if (rc) { 12214 ocs_log_err(hw->os, "Failed to set persistent topology, rc: %#x\n", rc); 12215 return rc; 12216 } 12217 12218 if (ocs_sem_p(&request.semaphore, OCS_SEM_FOREVER)) { 12219 ocs_log_err(hw->os, "ocs_sem_p failed\n"); 12220 return -ENXIO; 12221 } 12222 12223 if (request.status) { 12224 ocs_log_err(hw->os, "set persistent topology failed; status: %d\n", request.status); 12225 return -EFAULT; 12226 } 12227 } 12228 12229 sli_config_persistent_topology(&hw->sli, ¶m); 12230 12231 return rc; 12232 } 12233 12234 /** 12235 * @page fc_hw_api_overview HW APIs 12236 * - @ref devInitShutdown 12237 * - @ref domain 12238 * - @ref port 12239 * - @ref node 12240 * - @ref io 12241 * - @ref interrupt 12242 * 12243 * <div class="overview"> 12244 * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4 12245 * message details, but the higher level code must still manage domains, ports, 12246 * IT nexuses, and IOs. The HW API is designed to help the higher level manage 12247 * these objects.<br><br> 12248 * 12249 * The HW uses function callbacks to notify the higher-level code of events 12250 * that are received from the chip. There are currently three types of 12251 * functions that may be registered: 12252 * 12253 * <ul><li>domain – This function is called whenever a domain event is generated 12254 * within the HW. Examples include a new FCF is discovered, a connection 12255 * to a domain is disrupted, and allocation callbacks.</li> 12256 * <li>unsolicited – This function is called whenever new data is received in 12257 * the SLI-4 receive queue.</li> 12258 * <li>rnode – This function is called for remote node events, such as attach status 12259 * and allocation callbacks.</li></ul> 12260 * 12261 * Upper layer functions may be registered by using the ocs_hw_callback() function. 12262 * 12263 * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/> 12264 * <h2>FC/FCoE HW API</h2> 12265 * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible 12266 * interface for creating the necessary common objects and sending I/Os. It may be used 12267 * “as is” in customer implementations or it can serve as an example of typical interactions 12268 * between a driver and the SLI-4 hardware. The broad categories of functionality include: 12269 * 12270 * <ul><li>Setting-up and tearing-down of the HW.</li> 12271 * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li> 12272 * <li>Sending and receiving I/Os.</li></ul> 12273 * 12274 * <h3>HW Setup</h3> 12275 * To set up the HW: 12276 * 12277 * <ol> 12278 * <li>Set up the HW object using ocs_hw_setup().<br> 12279 * This step performs a basic configuration of the SLI-4 component and the HW to 12280 * enable querying the hardware for its capabilities. At this stage, the HW is not 12281 * capable of general operations (such as, receiving events or sending I/Os).</li><br><br> 12282 * <li>Configure the HW according to the driver requirements.<br> 12283 * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as 12284 * well as configures the amount of resources required (ocs_hw_set()). The driver 12285 * must also register callback functions (ocs_hw_callback()) to receive notification of 12286 * various asynchronous events.<br><br> 12287 * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This 12288 * step creates the underlying queues, commits resources to the hardware, and 12289 * prepares the hardware for operation. While the hardware is operational, the 12290 * port is not online, and cannot send or receive data.</li><br><br> 12291 * <br><br> 12292 * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br> 12293 * When the link comes up, the HW determines if a domain is present and notifies the 12294 * driver using the domain callback function. This is the starting point of the driver's 12295 * interaction with the common objects.<br><br> 12296 * @b Note: For FCoE, there may be more than one domain available and, therefore, 12297 * more than one callback.</li> 12298 * </ol> 12299 * 12300 * <h3>Allocating and Using Common Objects</h3> 12301 * Common objects provide a mechanism through which the various OneCore Storage 12302 * driver components share and track information. These data structures are primarily 12303 * used to track SLI component information but can be extended by other components, if 12304 * needed. The main objects are: 12305 * 12306 * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct 12307 * memory access (DMA) transactions.</li> 12308 * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including 12309 * any infrastructure devices such as FC switches and FC forwarders. The domain 12310 * object contains both an FCFI and a VFI.</li> 12311 * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between 12312 * the driver and the SCSI domain. The SLI Port object contains a VPI.</li> 12313 * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI 12314 * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul> 12315 * 12316 * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote 12317 * node common objects and establish the connections between them. The goal is to 12318 * connect the driver to the SCSI domain to exchange I/Os with other devices. These 12319 * common object connections are shown in the following figure, FC Driver Common Objects: 12320 * <img src="elx_fc_common_objects.jpg" 12321 * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/> 12322 * 12323 * The first step is to create a connection to the domain by allocating an SLI Port object. 12324 * The SLI Port object represents a particular FC ID and must be initialized with one. With 12325 * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying 12326 * a domain, the driver allocates a domain object and attaches to it using the previous SLI 12327 * port object.<br><br> 12328 * 12329 * @b Note: In some cases, the driver may need to negotiate service parameters (that is, 12330 * FLOGI) with the domain before attaching.<br><br> 12331 * 12332 * Once attached to the domain, the driver can discover and attach to other devices 12333 * (remote nodes). The exact discovery method depends on the driver, but it typically 12334 * includes using a position map, querying the fabric name server, or an out-of-band 12335 * method. In most cases, it is necessary to log in with devices before performing I/Os. 12336 * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must 12337 * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is 12338 * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port 12339 * before exchanging FCP I/O.<br><br> 12340 * 12341 * @b Note: The HW manages both the well known fabric address and the name server as 12342 * nodes in the domain. Therefore, the driver must allocate node objects prior to 12343 * communicating with either of these entities. 12344 * 12345 * <h3>Sending and Receiving I/Os</h3> 12346 * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the 12347 * commands are conceptually similar. Since the commands complete asynchronously, 12348 * the caller must provide a HW I/O object that maintains the I/O state, as well as 12349 * provide a callback function. The driver may use the same callback function for all I/O 12350 * operations, but each operation must use a unique HW I/O object. In the SLI-4 12351 * architecture, there is a direct association between the HW I/O object and the SGL used 12352 * to describe the data. Therefore, a driver typically performs the following operations: 12353 * 12354 * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li> 12355 * <li>Formats the SGL, specifying both the HW I/O object and the SGL. 12356 * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li> 12357 * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul> 12358 * 12359 * <h3>HW Tear Down</h3> 12360 * To tear-down the HW: 12361 * 12362 * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further 12363 * data andevents.</li> 12364 * <li>Destroy the HW object (ocs_hw_teardown()).</li> 12365 * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol> 12366 * <br> 12367 * </div><!-- overview --> 12368 * 12369 */ 12370 12371 /** 12372 * This contains all hw runtime workaround code. Based on the asic type, 12373 * asic revision, and range of fw revisions, a particular workaround may be enabled. 12374 * 12375 * A workaround may consist of overriding a particular HW/SLI4 value that was initialized 12376 * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue 12377 * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to 12378 * control specific runtime behavior. 12379 * 12380 * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we 12381 * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than 12382 * what we might previously see as "if this is a BE3, then do xxx" 12383 * 12384 */ 12385 12386 #define HW_FWREV_ZERO (0ull) 12387 #define HW_FWREV_MAX (~0ull) 12388 12389 #define SLI4_ASIC_TYPE_ANY 0 12390 #define SLI4_ASIC_REV_ANY 0 12391 12392 /** 12393 * @brief Internal definition of workarounds 12394 */ 12395 12396 typedef enum { 12397 HW_WORKAROUND_TEST = 1, 12398 HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */ 12399 HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */ 12400 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 12401 HW_WORKAROUND_WQE_COUNT_METHOD, 12402 HW_WORKAROUND_RQE_COUNT_METHOD, 12403 HW_WORKAROUND_USE_UNREGISTERD_RPI, 12404 HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */ 12405 HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 12406 HW_WORKAROUND_USE_DIF_QUARANTINE, 12407 HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */ 12408 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */ 12409 HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */ 12410 HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */ 12411 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */ 12412 } hw_workaround_e; 12413 12414 /** 12415 * @brief Internal workaround structure instance 12416 */ 12417 12418 typedef struct { 12419 sli4_asic_type_e asic_type; 12420 sli4_asic_rev_e asic_rev; 12421 uint64_t fwrev_low; 12422 uint64_t fwrev_high; 12423 12424 hw_workaround_e workaround; 12425 uint32_t value; 12426 } hw_workaround_t; 12427 12428 static hw_workaround_t hw_workarounds[] = { 12429 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12430 HW_WORKAROUND_TEST, 999}, 12431 12432 /* Bug: 127585: if_type == 2 returns 0 for total length placed on 12433 * FCP_TSEND64_WQE completions. Note, original driver code enables this 12434 * workaround for all asic types 12435 */ 12436 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12437 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0}, 12438 12439 /* Bug: unknown, Lancer A0 has mis-reported max queue depth */ 12440 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX, 12441 HW_WORKAROUND_MAX_QUEUE, 2048}, 12442 12443 /* Bug: 143399, BE3 has mis-reported max RQ queue depth */ 12444 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0), 12445 HW_WORKAROUND_MAX_RQ, 2048}, 12446 12447 /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */ 12448 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0), 12449 HW_WORKAROUND_MAX_RQ, 2048}, 12450 12451 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */ 12452 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), 12453 HW_WORKAROUND_WQE_COUNT_METHOD, 1}, 12454 12455 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */ 12456 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0), 12457 HW_WORKAROUND_RQE_COUNT_METHOD, 1}, 12458 12459 /* Bug: 142968, BE3 UE with RPI == 0xffff */ 12460 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12461 HW_WORKAROUND_USE_UNREGISTERD_RPI, 0}, 12462 12463 /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */ 12464 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12465 HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0}, 12466 12467 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0), 12468 HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0}, 12469 12470 /* Bug: 160124, Skyhawk quarantine DIF XRIs */ 12471 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12472 HW_WORKAROUND_USE_DIF_QUARANTINE, 0}, 12473 12474 /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */ 12475 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12476 HW_WORKAROUND_USE_DIF_SEC_XRI, 0}, 12477 12478 /* Bug: xxxxxx, FCFI reported in SRB not corrrect */ 12479 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12480 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0}, 12481 #if 0 12482 /* Bug: 165642, FW version check for driver */ 12483 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER), 12484 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, 12485 #endif 12486 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK), 12487 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0}, 12488 12489 /* Bug 177061, Lancer FW does not set the SGLC bit */ 12490 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12491 HW_WORKAROUND_SGLC_MISREPORTED, 0}, 12492 12493 /* BZ 181208/183914, enable this workaround for ALL revisions */ 12494 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX, 12495 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0}, 12496 }; 12497 12498 /** 12499 * @brief Function prototypes 12500 */ 12501 12502 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w); 12503 12504 /** 12505 * @brief Parse the firmware version (name) 12506 * 12507 * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined 12508 * by the HW_FWREV() macro 12509 * 12510 * @param fwrev_string pointer to the firmware string 12511 * 12512 * @return packed firmware revision value 12513 */ 12514 12515 static uint64_t 12516 parse_fw_version(const char *fwrev_string) 12517 { 12518 int v[4] = {0}; 12519 const char *p; 12520 int i; 12521 12522 for (p = fwrev_string, i = 0; *p && (i < 4); i ++) { 12523 v[i] = ocs_strtoul(p, 0, 0); 12524 while(*p && *p != '.') { 12525 p ++; 12526 } 12527 if (*p) { 12528 p ++; 12529 } 12530 } 12531 12532 /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */ 12533 if (v[2] == 9999) { 12534 return HW_FWREV_MAX; 12535 } else { 12536 return HW_FWREV(v[0], v[1], v[2], v[3]); 12537 } 12538 } 12539 12540 /** 12541 * @brief Test for a workaround match 12542 * 12543 * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match. 12544 * 12545 * @param hw Pointer to the HW structure 12546 * @param w Pointer to a workaround structure entry 12547 * 12548 * @return Return TRUE for a match 12549 */ 12550 12551 static int32_t 12552 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w) 12553 { 12554 return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) && 12555 ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) && 12556 (w->fwrev_low <= hw->workaround.fwrev) && 12557 ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high))); 12558 } 12559 12560 /** 12561 * @brief Setup HW runtime workarounds 12562 * 12563 * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds 12564 * based on the HW/SLI setup. 12565 * 12566 * @param hw Pointer to HW structure 12567 * 12568 * @return none 12569 */ 12570 12571 void 12572 ocs_hw_workaround_setup(struct ocs_hw_s *hw) 12573 { 12574 hw_workaround_t *w; 12575 sli4_t *sli4 = &hw->sli; 12576 uint32_t i; 12577 12578 /* Initialize the workaround settings */ 12579 ocs_memset(&hw->workaround, 0, sizeof(hw->workaround)); 12580 12581 /* If hw_war_version is non-null, then its a value that was set by a module parameter 12582 * (sorry for the break in abstraction, but workarounds are ... well, workarounds) 12583 */ 12584 12585 if (hw->hw_war_version) { 12586 hw->workaround.fwrev = parse_fw_version(hw->hw_war_version); 12587 } else { 12588 hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]); 12589 } 12590 12591 /* Walk the workaround list, if a match is found, then handle it */ 12592 for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) { 12593 if (ocs_hw_workaround_match(hw, w)) { 12594 switch(w->workaround) { 12595 case HW_WORKAROUND_TEST: { 12596 ocs_log_debug(hw->os, "Override: test: %d\n", w->value); 12597 break; 12598 } 12599 12600 case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: { 12601 ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n"); 12602 hw->workaround.retain_tsend_io_length = 1; 12603 break; 12604 } 12605 case HW_WORKAROUND_MAX_QUEUE: { 12606 sli4_qtype_e q; 12607 12608 ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value); 12609 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { 12610 if (hw->num_qentries[q] > w->value) { 12611 hw->num_qentries[q] = w->value; 12612 } 12613 } 12614 break; 12615 } 12616 case HW_WORKAROUND_MAX_RQ: { 12617 ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value); 12618 if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) { 12619 hw->num_qentries[SLI_QTYPE_RQ] = w->value; 12620 } 12621 break; 12622 } 12623 case HW_WORKAROUND_WQE_COUNT_METHOD: { 12624 ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value); 12625 sli4->config.count_method[SLI_QTYPE_WQ] = w->value; 12626 sli_calc_max_qentries(sli4); 12627 break; 12628 } 12629 case HW_WORKAROUND_RQE_COUNT_METHOD: { 12630 ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value); 12631 sli4->config.count_method[SLI_QTYPE_RQ] = w->value; 12632 sli_calc_max_qentries(sli4); 12633 break; 12634 } 12635 case HW_WORKAROUND_USE_UNREGISTERD_RPI: 12636 ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n"); 12637 hw->workaround.use_unregistered_rpi = TRUE; 12638 /* 12639 * Allocate an RPI that is never registered, to be used in the case where 12640 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF 12641 */ 12642 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid, 12643 &hw->workaround.unregistered_index)) { 12644 ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n"); 12645 hw->workaround.use_unregistered_rpi = FALSE; 12646 } 12647 break; 12648 case HW_WORKAROUND_DISABLE_AR_TGT_DIF: 12649 ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n"); 12650 hw->workaround.disable_ar_tgt_dif = TRUE; 12651 break; 12652 case HW_WORKAROUND_DISABLE_SET_DUMP_LOC: 12653 ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n"); 12654 hw->workaround.disable_dump_loc = TRUE; 12655 break; 12656 case HW_WORKAROUND_USE_DIF_QUARANTINE: 12657 ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n"); 12658 hw->workaround.use_dif_quarantine = TRUE; 12659 break; 12660 case HW_WORKAROUND_USE_DIF_SEC_XRI: 12661 ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n"); 12662 hw->workaround.use_dif_sec_xri = TRUE; 12663 break; 12664 case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB: 12665 ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n"); 12666 hw->workaround.override_fcfi = TRUE; 12667 break; 12668 12669 case HW_WORKAROUND_FW_VERSION_TOO_LOW: 12670 ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n"); 12671 hw->workaround.fw_version_too_low = TRUE; 12672 break; 12673 case HW_WORKAROUND_SGLC_MISREPORTED: 12674 ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n"); 12675 hw->workaround.sglc_misreported = TRUE; 12676 break; 12677 case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE: 12678 ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n"); 12679 hw->workaround.ignore_send_frame = TRUE; 12680 break; 12681 } /* switch(w->workaround) */ 12682 } 12683 } 12684 } 12685