1 /*- 2 * Copyright (c) 2017 Broadcom. All rights reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the copyright holder nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 /** 35 * @defgroup sli SLI-4 Base APIs 36 */ 37 38 /** 39 * @file 40 * All common (i.e. transport-independent) SLI-4 functions are implemented 41 * in this file. 42 */ 43 44 #include "sli4.h" 45 46 #if defined(OCS_INCLUDE_DEBUG) 47 #include "ocs_utils.h" 48 #endif 49 50 #define SLI4_BMBX_DELAY_US 1000 /* 1 ms */ 51 #define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */ 52 53 static int32_t sli_fw_init(sli4_t *); 54 static int32_t sli_fw_term(sli4_t *); 55 static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian); 56 static int32_t sli_cmd_fw_deinitialize(sli4_t *, void *, size_t); 57 static int32_t sli_cmd_fw_initialize(sli4_t *, void *, size_t); 58 static int32_t sli_queue_doorbell(sli4_t *, sli4_queue_t *); 59 static uint8_t sli_queue_entry_is_valid(sli4_queue_t *, uint8_t *, uint8_t); 60 61 const uint8_t sli4_fw_initialize[] = { 62 0xff, 0x12, 0x34, 0xff, 63 0xff, 0x56, 0x78, 0xff, 64 }; 65 66 const uint8_t sli4_fw_deinitialize[] = { 67 0xff, 0xaa, 0xbb, 0xff, 68 0xff, 0xcc, 0xdd, 0xff, 69 }; 70 71 typedef struct { 72 uint32_t rev_id; 73 uint32_t family; /* generation */ 74 sli4_asic_type_e type; 75 sli4_asic_rev_e rev; 76 } sli4_asic_entry_t; 77 78 sli4_asic_entry_t sli4_asic_table[] = { 79 { 0x00, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A0}, 80 { 0x01, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A1}, 81 { 0x02, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A2}, 82 { 0x00, 4, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0}, 83 { 0x00, 2, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0}, 84 { 0x10, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_B0}, 85 { 0x10, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B0}, 86 { 0x11, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B1}, 87 { 0x0, 0x0a, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0}, 88 { 0x10, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_B0}, 89 { 0x30, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_D0}, 90 { 0x3, 0x0b, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3}, 91 { 0x0, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A0}, 92 { 0x1, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A1}, 93 { 0x3, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3}, 94 { 0x1, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_A1}, 95 { 0x10, 0x0d, SLI4_ASIC_TYPE_LANCERG7,SLI4_ASIC_REV_B0}, 96 { 0x00, 0x05, SLI4_ASIC_TYPE_CORSAIR, SLI4_ASIC_REV_A0}, 97 }; 98 99 /* 100 * @brief Convert queue type enum (SLI_QTYPE_*) into a string. 101 */ 102 const char *SLI_QNAME[] = { 103 "Event Queue", 104 "Completion Queue", 105 "Mailbox Queue", 106 "Work Queue", 107 "Receive Queue", 108 "Undefined" 109 }; 110 111 /** 112 * @brief Define the mapping of registers to their BAR and offset. 113 * 114 * @par Description 115 * Although SLI-4 specification defines a common set of registers, their locations 116 * (both BAR and offset) depend on the interface type. This array maps a register 117 * enum to an array of BAR/offset pairs indexed by the interface type. For 118 * example, to access the bootstrap mailbox register on an interface type 0 119 * device, code can refer to the offset using regmap[SLI4_REG_BMBX][0].offset. 120 * 121 * @b Note: A value of UINT32_MAX for either the register set (rset) or offset (off) 122 * indicates an invalid mapping. 123 */ 124 const sli4_reg_t regmap[SLI4_REG_MAX][SLI4_MAX_IF_TYPES] = { 125 /* SLI4_REG_BMBX */ 126 { 127 { 2, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, 128 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX } , { 0, SLI4_BMBX_REG }, 129 }, 130 /* SLI4_REG_EQCQ_DOORBELL */ 131 { 132 { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, 133 { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, 134 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 135 { 1, SLI4_IF6_EQ_DOORBELL_REG } 136 }, 137 // SLI4_REG_CQ_DOORBELL 138 { 139 { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, 140 { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG }, 141 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 142 { 1, SLI4_IF6_CQ_DOORBELL_REG } 143 }, 144 /* SLI4_REG_FCOE_RQ_DOORBELL */ 145 { 146 { 2, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG }, 147 { 0, SLI4_RQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, 148 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 149 { 1, SLI4_IF6_RQ_DOORBELL_REG } 150 }, 151 /* SLI4_REG_IO_WQ_DOORBELL */ 152 { 153 { 2, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG }, 154 { 0, SLI4_IO_WQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX }, 155 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 156 { 1, SLI4_IF6_WQ_DOORBELL_REG } 157 }, 158 /* SLI4_REG_MQ_DOORBELL */ 159 { 160 { 2, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, 161 { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG }, 162 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 163 { 1, SLI4_IF6_MQ_DOORBELL_REG } 164 }, 165 /* SLI4_REG_PHYSDEV_CONTROL */ 166 { 167 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 168 { 0, SLI4_PHSDEV_CONTROL_REG_236 }, { 0, SLI4_PHSDEV_CONTROL_REG_236 }, 169 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 170 { 0, SLI4_PHSDEV_CONTROL_REG_236 } 171 }, 172 /* SLI4_REG_SLIPORT_CONTROL */ 173 { 174 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 175 { 0, SLI4_SLIPORT_CONTROL_REG }, { UINT32_MAX, UINT32_MAX }, 176 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 177 { 0, SLI4_SLIPORT_CONTROL_REG }, 178 }, 179 /* SLI4_REG_SLIPORT_ERROR1 */ 180 { 181 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 182 { 0, SLI4_SLIPORT_ERROR1 }, { UINT32_MAX, UINT32_MAX }, 183 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 184 { 0, SLI4_SLIPORT_ERROR1 }, 185 }, 186 /* SLI4_REG_SLIPORT_ERROR2 */ 187 { 188 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 189 { 0, SLI4_SLIPORT_ERROR2 }, { UINT32_MAX, UINT32_MAX }, 190 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 191 { 0, SLI4_SLIPORT_ERROR2 }, 192 }, 193 /* SLI4_REG_SLIPORT_SEMAPHORE */ 194 { 195 { 1, SLI4_PORT_SEMAPHORE_REG_0 }, { 0, SLI4_PORT_SEMAPHORE_REG_1 }, 196 { 0, SLI4_PORT_SEMAPHORE_REG_236 }, { 0, SLI4_PORT_SEMAPHORE_REG_236 }, 197 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 198 { 0, SLI4_PORT_SEMAPHORE_REG_236 }, 199 }, 200 /* SLI4_REG_SLIPORT_STATUS */ 201 { 202 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 203 { 0, SLI4_PORT_STATUS_REG_236 }, { 0, SLI4_PORT_STATUS_REG_236 }, 204 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 205 { 0, SLI4_PORT_STATUS_REG_236 }, 206 }, 207 /* SLI4_REG_UERR_MASK_HI */ 208 { 209 { 0, SLI4_UERR_MASK_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, 210 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 211 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 212 { UINT32_MAX, UINT32_MAX } 213 }, 214 /* SLI4_REG_UERR_MASK_LO */ 215 { 216 { 0, SLI4_UERR_MASK_LOW_REG }, { UINT32_MAX, UINT32_MAX }, 217 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 218 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 219 { UINT32_MAX, UINT32_MAX } 220 }, 221 /* SLI4_REG_UERR_STATUS_HI */ 222 { 223 { 0, SLI4_UERR_STATUS_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, 224 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 225 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 226 { UINT32_MAX, UINT32_MAX } 227 }, 228 /* SLI4_REG_UERR_STATUS_LO */ 229 { 230 { 0, SLI4_UERR_STATUS_LOW_REG }, { UINT32_MAX, UINT32_MAX }, 231 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 232 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 233 { UINT32_MAX, UINT32_MAX } 234 }, 235 /* SLI4_REG_SW_UE_CSR1 */ 236 { 237 { 1, SLI4_SW_UE_CSR1}, { UINT32_MAX, UINT32_MAX }, 238 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 239 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 240 { UINT32_MAX, UINT32_MAX } 241 }, 242 /* SLI4_REG_SW_UE_CSR2 */ 243 { 244 { 1, SLI4_SW_UE_CSR2}, { UINT32_MAX, UINT32_MAX }, 245 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 246 { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, 247 { UINT32_MAX, UINT32_MAX } 248 }, 249 }; 250 251 /** 252 * @brief Read the given SLI register. 253 * 254 * @param sli Pointer to the SLI context. 255 * @param reg Register name enum. 256 * 257 * @return Returns the register value. 258 */ 259 uint32_t 260 sli_reg_read(sli4_t *sli, sli4_regname_e reg) 261 { 262 const sli4_reg_t *r = &(regmap[reg][sli->if_type]); 263 264 if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) { 265 ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type); 266 return UINT32_MAX; 267 } 268 269 return ocs_reg_read32(sli->os, r->rset, r->off); 270 } 271 272 /** 273 * @brief Write the value to the given SLI register. 274 * 275 * @param sli Pointer to the SLI context. 276 * @param reg Register name enum. 277 * @param val Value to write. 278 * 279 * @return None. 280 */ 281 void 282 sli_reg_write(sli4_t *sli, sli4_regname_e reg, uint32_t val) 283 { 284 const sli4_reg_t *r = &(regmap[reg][sli->if_type]); 285 286 if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) { 287 ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type); 288 return; 289 } 290 291 ocs_reg_write32(sli->os, r->rset, r->off, val); 292 } 293 294 /** 295 * @brief Check if the SLI_INTF register is valid. 296 * 297 * @param val 32-bit SLI_INTF register value. 298 * 299 * @return Returns 0 on success, or a non-zero value on failure. 300 */ 301 static uint8_t 302 sli_intf_valid_check(uint32_t val) 303 { 304 return ((val >> SLI4_INTF_VALID_SHIFT) & SLI4_INTF_VALID_MASK) != SLI4_INTF_VALID; 305 } 306 307 /** 308 * @brief Retrieve the SLI revision level. 309 * 310 * @param val 32-bit SLI_INTF register value. 311 * 312 * @return Returns the SLI revision level. 313 */ 314 static uint8_t 315 sli_intf_sli_revision(uint32_t val) 316 { 317 return ((val >> SLI4_INTF_SLI_REVISION_SHIFT) & SLI4_INTF_SLI_REVISION_MASK); 318 } 319 320 static uint8_t 321 sli_intf_sli_family(uint32_t val) 322 { 323 return ((val >> SLI4_INTF_SLI_FAMILY_SHIFT) & SLI4_INTF_SLI_FAMILY_MASK); 324 } 325 326 /** 327 * @brief Retrieve the SLI interface type. 328 * 329 * @param val 32-bit SLI_INTF register value. 330 * 331 * @return Returns the SLI interface type. 332 */ 333 static uint8_t 334 sli_intf_if_type(uint32_t val) 335 { 336 return ((val >> SLI4_INTF_IF_TYPE_SHIFT) & SLI4_INTF_IF_TYPE_MASK); 337 } 338 339 /** 340 * @brief Retrieve PCI revision ID. 341 * 342 * @param val 32-bit PCI CLASS_REVISION register value. 343 * 344 * @return Returns the PCI revision ID. 345 */ 346 static uint8_t 347 sli_pci_rev_id(uint32_t val) 348 { 349 return ((val >> SLI4_PCI_REV_ID_SHIFT) & SLI4_PCI_REV_ID_MASK); 350 } 351 352 /** 353 * @brief retrieve SLI ASIC generation 354 * 355 * @param val 32-bit SLI_ASIC_ID register value 356 * 357 * @return SLI ASIC generation 358 */ 359 static uint8_t 360 sli_asic_gen(uint32_t val) 361 { 362 return ((val >> SLI4_ASIC_GEN_SHIFT) & SLI4_ASIC_GEN_MASK); 363 } 364 365 /** 366 * @brief Wait for the bootstrap mailbox to report "ready". 367 * 368 * @param sli4 SLI context pointer. 369 * @param msec Number of milliseconds to wait. 370 * 371 * @return Returns 0 if BMBX is ready, or non-zero otherwise (i.e. time out occurred). 372 */ 373 static int32_t 374 sli_bmbx_wait(sli4_t *sli4, uint32_t msec) 375 { 376 uint32_t val = 0; 377 378 do { 379 ocs_udelay(SLI4_BMBX_DELAY_US); 380 val = sli_reg_read(sli4, SLI4_REG_BMBX); 381 msec--; 382 } while(msec && !(val & SLI4_BMBX_RDY)); 383 384 return(!(val & SLI4_BMBX_RDY)); 385 } 386 387 /** 388 * @brief Write bootstrap mailbox. 389 * 390 * @param sli4 SLI context pointer. 391 * 392 * @return Returns 0 if command succeeded, or non-zero otherwise. 393 */ 394 static int32_t 395 sli_bmbx_write(sli4_t *sli4) 396 { 397 uint32_t val = 0; 398 399 /* write buffer location to bootstrap mailbox register */ 400 ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_PREWRITE); 401 val = SLI4_BMBX_WRITE_HI(sli4->bmbx.phys); 402 sli_reg_write(sli4, SLI4_REG_BMBX, val); 403 404 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { 405 ocs_log_crit(sli4->os, "BMBX WRITE_HI failed\n"); 406 return -1; 407 } 408 val = SLI4_BMBX_WRITE_LO(sli4->bmbx.phys); 409 sli_reg_write(sli4, SLI4_REG_BMBX, val); 410 411 /* wait for SLI Port to set ready bit */ 412 return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC/*XXX*/); 413 } 414 415 #if defined(OCS_INCLUDE_DEBUG) 416 /** 417 * @ingroup sli 418 * @brief Dump BMBX mailbox command. 419 * 420 * @par Description 421 * Convenience function for dumping BMBX mailbox commands. Takes 422 * into account which mailbox command is given since SLI_CONFIG 423 * commands are special. 424 * 425 * @b Note: This function takes advantage of 426 * the one-command-at-a-time nature of the BMBX to be able to 427 * display non-embedded SLI_CONFIG commands. This will not work 428 * for mailbox commands on the MQ. Luckily, all current non-emb 429 * mailbox commands go through the BMBX. 430 * 431 * @param sli4 SLI context pointer. 432 * @param mbx Pointer to mailbox command to dump. 433 * @param prefix Prefix for dump label. 434 * 435 * @return None. 436 */ 437 static void 438 sli_dump_bmbx_command(sli4_t *sli4, void *mbx, const char *prefix) 439 { 440 uint32_t size = 0; 441 char label[64]; 442 uint32_t i; 443 /* Mailbox diagnostic logging */ 444 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mbx; 445 446 if (!ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)) { 447 return; 448 } 449 450 if (hdr->command == SLI4_MBOX_COMMAND_SLI_CONFIG) { 451 sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)hdr; 452 sli4_req_hdr_t *sli_config_hdr; 453 if (sli_config->emb) { 454 ocs_snprintf(label, sizeof(label), "%s (emb)", prefix); 455 456 /* if embedded, dump entire command */ 457 sli_config_hdr = (sli4_req_hdr_t *)sli_config->payload.embed; 458 size = sizeof(*sli_config) - sizeof(sli_config->payload) + 459 sli_config_hdr->request_length + (4*sizeof(uint32_t)); 460 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, 461 (uint8_t *)sli4->bmbx.virt, size); 462 } else { 463 sli4_sli_config_pmd_t *pmd; 464 ocs_snprintf(label, sizeof(label), "%s (non-emb hdr)", prefix); 465 466 /* if non-embedded, break up into two parts: SLI_CONFIG hdr 467 and the payload(s) */ 468 size = sizeof(*sli_config) - sizeof(sli_config->payload) + (12 * sli_config->pmd_count); 469 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, 470 (uint8_t *)sli4->bmbx.virt, size); 471 472 /* as sanity check, make sure first PMD matches what was saved */ 473 pmd = &sli_config->payload.mem; 474 if ((pmd->address_high == ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys)) && 475 (pmd->address_low == ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys))) { 476 for (i = 0; i < sli_config->pmd_count; i++, pmd++) { 477 sli_config_hdr = sli4->bmbx_non_emb_pmd->virt; 478 ocs_snprintf(label, sizeof(label), "%s (non-emb pay[%d])", 479 prefix, i); 480 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label, 481 (uint8_t *)sli4->bmbx_non_emb_pmd->virt, 482 sli_config_hdr->request_length + (4*sizeof(uint32_t))); 483 } 484 } else { 485 ocs_log_debug(sli4->os, "pmd addr does not match pmd:%x %x (%x %x)\n", 486 pmd->address_high, pmd->address_low, 487 ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys), 488 ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys)); 489 } 490 } 491 } else { 492 /* not an SLI_CONFIG command, just display first 64 bytes, like we do 493 for MQEs */ 494 size = 64; 495 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, prefix, 496 (uint8_t *)mbx, size); 497 } 498 } 499 #endif 500 501 /** 502 * @ingroup sli 503 * @brief Submit a command to the bootstrap mailbox and check the status. 504 * 505 * @param sli4 SLI context pointer. 506 * 507 * @return Returns 0 on success, or a non-zero value on failure. 508 */ 509 int32_t 510 sli_bmbx_command(sli4_t *sli4) 511 { 512 void *cqe = (uint8_t *)sli4->bmbx.virt + SLI4_BMBX_SIZE; 513 514 #if defined(OCS_INCLUDE_DEBUG) 515 sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmd"); 516 #endif 517 518 if (sli_fw_error_status(sli4) > 0) { 519 ocs_log_crit(sli4->os, "Chip is in an error state - Mailbox " 520 "command rejected status=%#x error1=%#x error2=%#x\n", 521 sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS), 522 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1), 523 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2)); 524 return -1; 525 } 526 527 if (sli_bmbx_write(sli4)) { 528 ocs_log_crit(sli4->os, "bootstrap mailbox write fail phys=%p reg=%#x\n", 529 (void*)sli4->bmbx.phys, 530 sli_reg_read(sli4, SLI4_REG_BMBX)); 531 return -1; 532 } 533 534 /* check completion queue entry status */ 535 ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_POSTREAD); 536 if (((sli4_mcqe_t *)cqe)->val) { 537 #if defined(OCS_INCLUDE_DEBUG) 538 sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmpl"); 539 ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "bmbx cqe", cqe, sizeof(sli4_mcqe_t)); 540 #endif 541 return sli_cqe_mq(cqe); 542 } else { 543 ocs_log_err(sli4->os, "invalid or wrong type\n"); 544 return -1; 545 } 546 } 547 548 /**************************************************************************** 549 * Messages 550 */ 551 552 /** 553 * @ingroup sli 554 * @brief Write a CONFIG_LINK command to the provided buffer. 555 * 556 * @param sli4 SLI context pointer. 557 * @param buf Virtual pointer to the destination buffer. 558 * @param size Buffer size, in bytes. 559 * 560 * @return Returns the number of bytes written. 561 */ 562 int32_t 563 sli_cmd_config_link(sli4_t *sli4, void *buf, size_t size) 564 { 565 sli4_cmd_config_link_t *config_link = buf; 566 567 ocs_memset(buf, 0, size); 568 569 config_link->hdr.command = SLI4_MBOX_COMMAND_CONFIG_LINK; 570 571 /* Port interprets zero in a field as "use default value" */ 572 573 return sizeof(sli4_cmd_config_link_t); 574 } 575 576 /** 577 * @ingroup sli 578 * @brief Write a DOWN_LINK command to the provided buffer. 579 * 580 * @param sli4 SLI context pointer. 581 * @param buf Virtual pointer to the destination buffer. 582 * @param size Buffer size, in bytes. 583 * 584 * @return Returns the number of bytes written. 585 */ 586 int32_t 587 sli_cmd_down_link(sli4_t *sli4, void *buf, size_t size) 588 { 589 sli4_mbox_command_header_t *hdr = buf; 590 591 ocs_memset(buf, 0, size); 592 593 hdr->command = SLI4_MBOX_COMMAND_DOWN_LINK; 594 595 /* Port interprets zero in a field as "use default value" */ 596 597 return sizeof(sli4_mbox_command_header_t); 598 } 599 600 /** 601 * @ingroup sli 602 * @brief Write a DUMP Type 4 command to the provided buffer. 603 * 604 * @param sli4 SLI context pointer. 605 * @param buf Virtual pointer to the destination buffer. 606 * @param size Buffer size, in bytes. 607 * @param wki The well known item ID. 608 * 609 * @return Returns the number of bytes written. 610 */ 611 int32_t 612 sli_cmd_dump_type4(sli4_t *sli4, void *buf, size_t size, uint16_t wki) 613 { 614 sli4_cmd_dump4_t *cmd = buf; 615 616 ocs_memset(buf, 0, size); 617 618 cmd->hdr.command = SLI4_MBOX_COMMAND_DUMP; 619 cmd->type = 4; 620 cmd->wki_selection = wki; 621 return sizeof(sli4_cmd_dump4_t); 622 } 623 624 /** 625 * @ingroup sli 626 * @brief Write a COMMON_READ_TRANSCEIVER_DATA command. 627 * 628 * @param sli4 SLI context. 629 * @param buf Destination buffer for the command. 630 * @param size Buffer size, in bytes. 631 * @param page_num The page of SFP data to retrieve (0xa0 or 0xa2). 632 * @param dma DMA structure from which the data will be copied. 633 * 634 * @note This creates a Version 0 message. 635 * 636 * @return Returns the number of bytes written. 637 */ 638 int32_t 639 sli_cmd_common_read_transceiver_data(sli4_t *sli4, void *buf, size_t size, uint32_t page_num, 640 ocs_dma_t *dma) 641 { 642 sli4_req_common_read_transceiver_data_t *req = NULL; 643 uint32_t sli_config_off = 0; 644 uint32_t payload_size; 645 646 if (dma == NULL) { 647 /* Payload length must accommodate both request and response */ 648 payload_size = max(sizeof(sli4_req_common_read_transceiver_data_t), 649 sizeof(sli4_res_common_read_transceiver_data_t)); 650 } else { 651 payload_size = dma->size; 652 } 653 654 if (sli4->port_type == SLI4_PORT_TYPE_FC) { 655 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma); 656 } 657 658 if (dma == NULL) { 659 req = (sli4_req_common_read_transceiver_data_t *)((uint8_t *)buf + sli_config_off); 660 } else { 661 req = (sli4_req_common_read_transceiver_data_t *)dma->virt; 662 ocs_memset(req, 0, dma->size); 663 } 664 665 req->hdr.opcode = SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA; 666 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 667 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 668 669 req->page_number = page_num; 670 req->port = sli4->physical_port; 671 672 return(sli_config_off + sizeof(sli4_req_common_read_transceiver_data_t)); 673 } 674 675 /** 676 * @ingroup sli 677 * @brief Write a READ_LINK_STAT command to the provided buffer. 678 * 679 * @param sli4 SLI context pointer. 680 * @param buf Virtual pointer to the destination buffer. 681 * @param size Buffer size, in bytes. 682 * @param req_ext_counters If TRUE, then the extended counters will be requested. 683 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared. 684 * @param clear_all_counters If TRUE, the counters will be cleared. 685 * 686 * @return Returns the number of bytes written. 687 */ 688 int32_t 689 sli_cmd_read_link_stats(sli4_t *sli4, void *buf, size_t size, 690 uint8_t req_ext_counters, 691 uint8_t clear_overflow_flags, 692 uint8_t clear_all_counters) 693 { 694 sli4_cmd_read_link_stats_t *cmd = buf; 695 696 ocs_memset(buf, 0, size); 697 698 cmd->hdr.command = SLI4_MBOX_COMMAND_READ_LNK_STAT; 699 cmd->rec = req_ext_counters; 700 cmd->clrc = clear_all_counters; 701 cmd->clof = clear_overflow_flags; 702 return sizeof(sli4_cmd_read_link_stats_t); 703 } 704 705 /** 706 * @ingroup sli 707 * @brief Write a READ_STATUS command to the provided buffer. 708 * 709 * @param sli4 SLI context pointer. 710 * @param buf Virtual pointer to the destination buffer. 711 * @param size Buffer size, in bytes. 712 * @param clear_counters If TRUE, the counters will be cleared. 713 * 714 * @return Returns the number of bytes written. 715 */ 716 int32_t 717 sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size, 718 uint8_t clear_counters) 719 { 720 sli4_cmd_read_status_t *cmd = buf; 721 722 ocs_memset(buf, 0, size); 723 724 cmd->hdr.command = SLI4_MBOX_COMMAND_READ_STATUS; 725 cmd->cc = clear_counters; 726 return sizeof(sli4_cmd_read_status_t); 727 } 728 729 /** 730 * @brief Write a FW_DEINITIALIZE command to the provided buffer. 731 * 732 * @param sli4 SLI context pointer. 733 * @param buf Virtual pointer to the destination buffer. 734 * @param size Buffer size, in bytes. 735 * 736 * @return Returns the number of bytes written. 737 */ 738 static int32_t 739 sli_cmd_fw_deinitialize(sli4_t *sli4, void *buf, size_t size) 740 { 741 742 ocs_memset(buf, 0, size); 743 ocs_memcpy(buf, sli4_fw_deinitialize, sizeof(sli4_fw_deinitialize)); 744 745 return sizeof(sli4_fw_deinitialize); 746 } 747 748 /** 749 * @brief Write a FW_INITIALIZE command to the provided buffer. 750 * 751 * @param sli4 SLI context pointer. 752 * @param buf Virtual pointer to the destination buffer. 753 * @param size Buffer size, in bytes. 754 * 755 * @return Returns the number of bytes written. 756 */ 757 static int32_t 758 sli_cmd_fw_initialize(sli4_t *sli4, void *buf, size_t size) 759 { 760 761 ocs_memset(buf, 0, size); 762 ocs_memcpy(buf, sli4_fw_initialize, sizeof(sli4_fw_initialize)); 763 764 return sizeof(sli4_fw_initialize); 765 } 766 767 /** 768 * @ingroup sli 769 * @brief Write an INIT_LINK command to the provided buffer. 770 * 771 * @param sli4 SLI context pointer. 772 * @param buf Virtual pointer to the destination buffer. 773 * @param size Buffer size, in bytes. 774 * @param speed Link speed. 775 * @param reset_alpa For native FC, this is the selective reset AL_PA 776 * 777 * @return Returns the number of bytes written. 778 */ 779 int32_t 780 sli_cmd_init_link(sli4_t *sli4, void *buf, size_t size, uint32_t speed, uint8_t reset_alpa) 781 { 782 sli4_cmd_init_link_t *init_link = buf; 783 784 ocs_memset(buf, 0, size); 785 786 init_link->hdr.command = SLI4_MBOX_COMMAND_INIT_LINK; 787 788 /* Most fields only have meaning for FC links */ 789 if (sli4->config.topology != SLI4_READ_CFG_TOPO_FCOE) { 790 init_link->selective_reset_al_pa = reset_alpa; 791 init_link->link_flags.loopback = FALSE; 792 793 init_link->link_speed_selection_code = speed; 794 switch (speed) { 795 case FC_LINK_SPEED_1G: 796 case FC_LINK_SPEED_2G: 797 case FC_LINK_SPEED_4G: 798 case FC_LINK_SPEED_8G: 799 case FC_LINK_SPEED_16G: 800 case FC_LINK_SPEED_32G: 801 init_link->link_flags.fixed_speed = TRUE; 802 break; 803 case FC_LINK_SPEED_10G: 804 ocs_log_test(sli4->os, "unsupported FC speed %d\n", speed); 805 return 0; 806 } 807 808 switch (sli4->config.topology) { 809 case SLI4_READ_CFG_TOPO_FC: 810 /* Attempt P2P but failover to FC-AL */ 811 init_link->link_flags.enable_topology_failover = TRUE; 812 813 if (sli_get_asic_type(sli4) == SLI4_ASIC_TYPE_LANCER) 814 init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_FAIL_OVER; 815 else 816 init_link->link_flags.topology = SLI4_INIT_LINK_F_P2P_FAIL_OVER; 817 818 break; 819 case SLI4_READ_CFG_TOPO_FC_AL: 820 init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_ONLY; 821 if ((init_link->link_speed_selection_code == FC_LINK_SPEED_16G) || 822 (init_link->link_speed_selection_code == FC_LINK_SPEED_32G)) { 823 ocs_log_test(sli4->os, "unsupported FC-AL speed %d\n", speed); 824 return 0; 825 } 826 break; 827 case SLI4_READ_CFG_TOPO_FC_DA: 828 init_link->link_flags.topology = FC_TOPOLOGY_P2P; 829 break; 830 default: 831 ocs_log_test(sli4->os, "unsupported topology %#x\n", sli4->config.topology); 832 return 0; 833 } 834 835 init_link->link_flags.unfair = FALSE; 836 init_link->link_flags.skip_lirp_lilp = FALSE; 837 init_link->link_flags.gen_loop_validity_check = FALSE; 838 init_link->link_flags.skip_lisa = FALSE; 839 init_link->link_flags.select_hightest_al_pa = FALSE; 840 } 841 842 return sizeof(sli4_cmd_init_link_t); 843 } 844 845 /** 846 * @ingroup sli 847 * @brief Write an INIT_VFI command to the provided buffer. 848 * 849 * @param sli4 SLI context pointer. 850 * @param buf Virtual pointer to the destination buffer. 851 * @param size Buffer size, in bytes. 852 * @param vfi VFI 853 * @param fcfi FCFI 854 * @param vpi VPI (Set to -1 if unused.) 855 * 856 * @return Returns the number of bytes written. 857 */ 858 int32_t 859 sli_cmd_init_vfi(sli4_t *sli4, void *buf, size_t size, uint16_t vfi, 860 uint16_t fcfi, uint16_t vpi) 861 { 862 sli4_cmd_init_vfi_t *init_vfi = buf; 863 864 ocs_memset(buf, 0, size); 865 866 init_vfi->hdr.command = SLI4_MBOX_COMMAND_INIT_VFI; 867 868 init_vfi->vfi = vfi; 869 init_vfi->fcfi = fcfi; 870 871 /* 872 * If the VPI is valid, initialize it at the same time as 873 * the VFI 874 */ 875 if (0xffff != vpi) { 876 init_vfi->vp = TRUE; 877 init_vfi->vpi = vpi; 878 } 879 880 return sizeof(sli4_cmd_init_vfi_t); 881 } 882 883 /** 884 * @ingroup sli 885 * @brief Write an INIT_VPI command to the provided buffer. 886 * 887 * @param sli4 SLI context pointer. 888 * @param buf Virtual pointer to the destination buffer. 889 * @param size Buffer size, in bytes. 890 * @param vpi VPI allocated. 891 * @param vfi VFI associated with this VPI. 892 * 893 * @return Returns the number of bytes written. 894 */ 895 int32_t 896 sli_cmd_init_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t vpi, uint16_t vfi) 897 { 898 sli4_cmd_init_vpi_t *init_vpi = buf; 899 900 ocs_memset(buf, 0, size); 901 902 init_vpi->hdr.command = SLI4_MBOX_COMMAND_INIT_VPI; 903 init_vpi->vpi = vpi; 904 init_vpi->vfi = vfi; 905 906 return sizeof(sli4_cmd_init_vpi_t); 907 } 908 909 /** 910 * @ingroup sli 911 * @brief Write a POST_XRI command to the provided buffer. 912 * 913 * @param sli4 SLI context pointer. 914 * @param buf Virtual pointer to the destination buffer. 915 * @param size Buffer size, in bytes. 916 * @param xri_base Starting XRI value for range of XRI given to SLI Port. 917 * @param xri_count Number of XRIs provided to the SLI Port. 918 * 919 * @return Returns the number of bytes written. 920 */ 921 int32_t 922 sli_cmd_post_xri(sli4_t *sli4, void *buf, size_t size, uint16_t xri_base, uint16_t xri_count) 923 { 924 sli4_cmd_post_xri_t *post_xri = buf; 925 926 ocs_memset(buf, 0, size); 927 928 post_xri->hdr.command = SLI4_MBOX_COMMAND_POST_XRI; 929 post_xri->xri_base = xri_base; 930 post_xri->xri_count = xri_count; 931 932 if (sli4->config.auto_xfer_rdy == 0) { 933 post_xri->enx = TRUE; 934 post_xri->val = TRUE; 935 } 936 937 return sizeof(sli4_cmd_post_xri_t); 938 } 939 940 /** 941 * @ingroup sli 942 * @brief Write a RELEASE_XRI command to the provided buffer. 943 * 944 * @param sli4 SLI context pointer. 945 * @param buf Virtual pointer to the destination buffer. 946 * @param size Buffer size, in bytes. 947 * @param num_xri The number of XRIs to be released. 948 * 949 * @return Returns the number of bytes written. 950 */ 951 int32_t 952 sli_cmd_release_xri(sli4_t *sli4, void *buf, size_t size, uint8_t num_xri) 953 { 954 sli4_cmd_release_xri_t *release_xri = buf; 955 956 ocs_memset(buf, 0, size); 957 958 release_xri->hdr.command = SLI4_MBOX_COMMAND_RELEASE_XRI; 959 release_xri->xri_count = num_xri; 960 961 return sizeof(sli4_cmd_release_xri_t); 962 } 963 964 /** 965 * @brief Write a READ_CONFIG command to the provided buffer. 966 * 967 * @param sli4 SLI context pointer. 968 * @param buf Virtual pointer to the destination buffer. 969 * @param size Buffer size, in bytes 970 * 971 * @return Returns the number of bytes written. 972 */ 973 static int32_t 974 sli_cmd_read_config(sli4_t *sli4, void *buf, size_t size) 975 { 976 sli4_cmd_read_config_t *read_config = buf; 977 978 ocs_memset(buf, 0, size); 979 980 read_config->hdr.command = SLI4_MBOX_COMMAND_READ_CONFIG; 981 982 return sizeof(sli4_cmd_read_config_t); 983 } 984 985 /** 986 * @brief Write a READ_NVPARMS command to the provided buffer. 987 * 988 * @param sli4 SLI context pointer. 989 * @param buf Virtual pointer to the destination buffer. 990 * @param size Buffer size, in bytes. 991 * 992 * @return Returns the number of bytes written. 993 */ 994 int32_t 995 sli_cmd_read_nvparms(sli4_t *sli4, void *buf, size_t size) 996 { 997 sli4_cmd_read_nvparms_t *read_nvparms = buf; 998 999 ocs_memset(buf, 0, size); 1000 1001 read_nvparms->hdr.command = SLI4_MBOX_COMMAND_READ_NVPARMS; 1002 1003 return sizeof(sli4_cmd_read_nvparms_t); 1004 } 1005 1006 /** 1007 * @brief Write a WRITE_NVPARMS command to the provided buffer. 1008 * 1009 * @param sli4 SLI context pointer. 1010 * @param buf Virtual pointer to the destination buffer. 1011 * @param size Buffer size, in bytes. 1012 * @param wwpn WWPN to write - pointer to array of 8 uint8_t. 1013 * @param wwnn WWNN to write - pointer to array of 8 uint8_t. 1014 * @param hard_alpa Hard ALPA to write. 1015 * @param preferred_d_id Preferred D_ID to write. 1016 * 1017 * @return Returns the number of bytes written. 1018 */ 1019 int32_t 1020 sli_cmd_write_nvparms(sli4_t *sli4, void *buf, size_t size, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, 1021 uint32_t preferred_d_id) 1022 { 1023 sli4_cmd_write_nvparms_t *write_nvparms = buf; 1024 1025 ocs_memset(buf, 0, size); 1026 1027 write_nvparms->hdr.command = SLI4_MBOX_COMMAND_WRITE_NVPARMS; 1028 ocs_memcpy(write_nvparms->wwpn, wwpn, 8); 1029 ocs_memcpy(write_nvparms->wwnn, wwnn, 8); 1030 write_nvparms->hard_alpa = hard_alpa; 1031 write_nvparms->preferred_d_id = preferred_d_id; 1032 1033 return sizeof(sli4_cmd_write_nvparms_t); 1034 } 1035 1036 /** 1037 * @brief Write a READ_REV command to the provided buffer. 1038 * 1039 * @param sli4 SLI context pointer. 1040 * @param buf Virtual pointer to the destination buffer. 1041 * @param size Buffer size, in bytes. 1042 * @param vpd Pointer to the buffer. 1043 * 1044 * @return Returns the number of bytes written. 1045 */ 1046 static int32_t 1047 sli_cmd_read_rev(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *vpd) 1048 { 1049 sli4_cmd_read_rev_t *read_rev = buf; 1050 1051 ocs_memset(buf, 0, size); 1052 1053 read_rev->hdr.command = SLI4_MBOX_COMMAND_READ_REV; 1054 1055 if (vpd && vpd->size) { 1056 read_rev->vpd = TRUE; 1057 1058 read_rev->available_length = vpd->size; 1059 1060 read_rev->physical_address_low = ocs_addr32_lo(vpd->phys); 1061 read_rev->physical_address_high = ocs_addr32_hi(vpd->phys); 1062 } 1063 1064 return sizeof(sli4_cmd_read_rev_t); 1065 } 1066 1067 /** 1068 * @ingroup sli 1069 * @brief Write a READ_SPARM64 command to the provided buffer. 1070 * 1071 * @param sli4 SLI context pointer. 1072 * @param buf Virtual pointer to the destination buffer. 1073 * @param size Buffer size, in bytes. 1074 * @param dma DMA buffer for the service parameters. 1075 * @param vpi VPI used to determine the WWN. 1076 * 1077 * @return Returns the number of bytes written. 1078 */ 1079 int32_t 1080 sli_cmd_read_sparm64(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, 1081 uint16_t vpi) 1082 { 1083 sli4_cmd_read_sparm64_t *read_sparm64 = buf; 1084 1085 ocs_memset(buf, 0, size); 1086 1087 if (SLI4_READ_SPARM64_VPI_SPECIAL == vpi) { 1088 ocs_log_test(sli4->os, "special VPI not supported!!!\n"); 1089 return -1; 1090 } 1091 1092 if (!dma || !dma->phys) { 1093 ocs_log_test(sli4->os, "bad DMA buffer\n"); 1094 return -1; 1095 } 1096 1097 read_sparm64->hdr.command = SLI4_MBOX_COMMAND_READ_SPARM64; 1098 1099 read_sparm64->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64; 1100 read_sparm64->bde_64.buffer_length = dma->size; 1101 read_sparm64->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); 1102 read_sparm64->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); 1103 1104 read_sparm64->vpi = vpi; 1105 1106 return sizeof(sli4_cmd_read_sparm64_t); 1107 } 1108 1109 /** 1110 * @ingroup sli 1111 * @brief Write a READ_TOPOLOGY command to the provided buffer. 1112 * 1113 * @param sli4 SLI context pointer. 1114 * @param buf Virtual pointer to the destination buffer. 1115 * @param size Buffer size, in bytes. 1116 * @param dma DMA buffer for loop map (optional). 1117 * 1118 * @return Returns the number of bytes written. 1119 */ 1120 int32_t 1121 sli_cmd_read_topology(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) 1122 { 1123 sli4_cmd_read_topology_t *read_topo = buf; 1124 1125 ocs_memset(buf, 0, size); 1126 1127 read_topo->hdr.command = SLI4_MBOX_COMMAND_READ_TOPOLOGY; 1128 1129 if (dma && dma->size) { 1130 if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) { 1131 ocs_log_test(sli4->os, "loop map buffer too small %jd\n", 1132 dma->size); 1133 return 0; 1134 } 1135 1136 ocs_memset(dma->virt, 0, dma->size); 1137 1138 read_topo->bde_loop_map.bde_type = SLI4_BDE_TYPE_BDE_64; 1139 read_topo->bde_loop_map.buffer_length = dma->size; 1140 read_topo->bde_loop_map.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); 1141 read_topo->bde_loop_map.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); 1142 } 1143 1144 return sizeof(sli4_cmd_read_topology_t); 1145 } 1146 1147 /** 1148 * @ingroup sli 1149 * @brief Write a REG_FCFI command to the provided buffer. 1150 * 1151 * @param sli4 SLI context pointer. 1152 * @param buf Virtual pointer to the destination buffer. 1153 * @param size Buffer size, in bytes. 1154 * @param index FCF index returned by READ_FCF_TABLE. 1155 * @param rq_cfg RQ_ID/R_CTL/TYPE routing information 1156 * @param vlan_id VLAN ID tag. 1157 * 1158 * @return Returns the number of bytes written. 1159 */ 1160 int32_t 1161 sli_cmd_reg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t index, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t vlan_id) 1162 { 1163 sli4_cmd_reg_fcfi_t *reg_fcfi = buf; 1164 uint32_t i; 1165 1166 ocs_memset(buf, 0, size); 1167 1168 reg_fcfi->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI; 1169 1170 reg_fcfi->fcf_index = index; 1171 1172 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 1173 switch(i) { 1174 case 0: reg_fcfi->rq_id_0 = rq_cfg[0].rq_id; break; 1175 case 1: reg_fcfi->rq_id_1 = rq_cfg[1].rq_id; break; 1176 case 2: reg_fcfi->rq_id_2 = rq_cfg[2].rq_id; break; 1177 case 3: reg_fcfi->rq_id_3 = rq_cfg[3].rq_id; break; 1178 } 1179 reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; 1180 reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; 1181 reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask; 1182 reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match; 1183 } 1184 1185 if (vlan_id) { 1186 reg_fcfi->vv = TRUE; 1187 reg_fcfi->vlan_tag = vlan_id; 1188 } 1189 1190 return sizeof(sli4_cmd_reg_fcfi_t); 1191 } 1192 1193 /** 1194 * @brief Write REG_FCFI_MRQ to provided command buffer 1195 * 1196 * @param sli4 SLI context pointer. 1197 * @param buf Virtual pointer to the destination buffer. 1198 * @param size Buffer size, in bytes. 1199 * @param fcf_index FCF index returned by READ_FCF_TABLE. 1200 * @param vlan_id VLAN ID tag. 1201 * @param rr_quant Round robin quanta if RQ selection policy is 2 1202 * @param rq_selection_policy RQ selection policy 1203 * @param num_rqs Array of count of RQs per filter 1204 * @param rq_ids Array of RQ ids per filter 1205 * @param rq_cfg RQ_ID/R_CTL/TYPE routing information 1206 * 1207 * @return returns 0 for success, a negative error code value for failure. 1208 */ 1209 int32_t 1210 sli_cmd_reg_fcfi_mrq(sli4_t *sli4, void *buf, size_t size, uint8_t mode, 1211 uint16_t fcf_index, uint16_t vlan_id, uint8_t rq_selection_policy, 1212 uint8_t mrq_bit_mask, uint16_t num_mrqs, 1213 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG]) 1214 { 1215 sli4_cmd_reg_fcfi_mrq_t *reg_fcfi_mrq = buf; 1216 uint32_t i; 1217 1218 ocs_memset(buf, 0, size); 1219 1220 reg_fcfi_mrq->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI_MRQ; 1221 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { 1222 reg_fcfi_mrq->fcf_index = fcf_index; 1223 if (vlan_id) { 1224 reg_fcfi_mrq->vv = TRUE; 1225 reg_fcfi_mrq->vlan_tag = vlan_id; 1226 } 1227 goto done; 1228 } 1229 1230 reg_fcfi_mrq->mode = mode; 1231 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { 1232 reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; 1233 reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; 1234 reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask; 1235 reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match; 1236 1237 switch(i) { 1238 case 3: reg_fcfi_mrq->rq_id_3 = rq_cfg[i].rq_id; break; 1239 case 2: reg_fcfi_mrq->rq_id_2 = rq_cfg[i].rq_id; break; 1240 case 1: reg_fcfi_mrq->rq_id_1 = rq_cfg[i].rq_id; break; 1241 case 0: reg_fcfi_mrq->rq_id_0 = rq_cfg[i].rq_id; break; 1242 } 1243 } 1244 1245 reg_fcfi_mrq->rq_selection_policy = rq_selection_policy; 1246 reg_fcfi_mrq->mrq_filter_bitmask = mrq_bit_mask; 1247 reg_fcfi_mrq->num_mrq_pairs = num_mrqs; 1248 done: 1249 return sizeof(sli4_cmd_reg_fcfi_mrq_t); 1250 } 1251 1252 /** 1253 * @ingroup sli 1254 * @brief Write a REG_RPI command to the provided buffer. 1255 * 1256 * @param sli4 SLI context pointer. 1257 * @param buf Virtual pointer to the destination buffer. 1258 * @param size Buffer size, in bytes. 1259 * @param nport_id Remote F/N_Port_ID. 1260 * @param rpi Previously-allocated Remote Port Indicator. 1261 * @param vpi Previously-allocated Virtual Port Indicator. 1262 * @param dma DMA buffer that contains the remote port's service parameters. 1263 * @param update Boolean indicating an update to an existing RPI (TRUE) 1264 * or a new registration (FALSE). 1265 * 1266 * @return Returns the number of bytes written. 1267 */ 1268 int32_t 1269 sli_cmd_reg_rpi(sli4_t *sli4, void *buf, size_t size, uint32_t nport_id, uint16_t rpi, 1270 uint16_t vpi, ocs_dma_t *dma, uint8_t update, uint8_t enable_t10_pi) 1271 { 1272 sli4_cmd_reg_rpi_t *reg_rpi = buf; 1273 1274 ocs_memset(buf, 0, size); 1275 1276 reg_rpi->hdr.command = SLI4_MBOX_COMMAND_REG_RPI; 1277 1278 reg_rpi->rpi = rpi; 1279 reg_rpi->remote_n_port_id = nport_id; 1280 reg_rpi->upd = update; 1281 reg_rpi->etow = enable_t10_pi; 1282 1283 reg_rpi->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64; 1284 reg_rpi->bde_64.buffer_length = SLI4_REG_RPI_BUF_LEN; 1285 reg_rpi->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys); 1286 reg_rpi->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys); 1287 1288 reg_rpi->vpi = vpi; 1289 1290 return sizeof(sli4_cmd_reg_rpi_t); 1291 } 1292 1293 /** 1294 * @ingroup sli 1295 * @brief Write a REG_VFI command to the provided buffer. 1296 * 1297 * @param sli4 SLI context pointer. 1298 * @param buf Virtual pointer to the destination buffer. 1299 * @param size Buffer size, in bytes. 1300 * @param domain Pointer to the domain object. 1301 * 1302 * @return Returns the number of bytes written. 1303 */ 1304 int32_t 1305 sli_cmd_reg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain) 1306 { 1307 sli4_cmd_reg_vfi_t *reg_vfi = buf; 1308 1309 if (!sli4 || !buf || !domain) { 1310 return 0; 1311 } 1312 1313 ocs_memset(buf, 0, size); 1314 1315 reg_vfi->hdr.command = SLI4_MBOX_COMMAND_REG_VFI; 1316 1317 reg_vfi->vfi = domain->indicator; 1318 1319 reg_vfi->fcfi = domain->fcf_indicator; 1320 1321 /* TODO contents of domain->dma only valid if topo == FABRIC */ 1322 reg_vfi->sparm.bde_type = SLI4_BDE_TYPE_BDE_64; 1323 reg_vfi->sparm.buffer_length = 0x70; 1324 reg_vfi->sparm.u.data.buffer_address_low = ocs_addr32_lo(domain->dma.phys); 1325 reg_vfi->sparm.u.data.buffer_address_high = ocs_addr32_hi(domain->dma.phys); 1326 1327 reg_vfi->e_d_tov = sli4->config.e_d_tov; 1328 reg_vfi->r_a_tov = sli4->config.r_a_tov; 1329 1330 reg_vfi->vp = TRUE; 1331 reg_vfi->vpi = domain->sport->indicator; 1332 ocs_memcpy(reg_vfi->wwpn, &domain->sport->sli_wwpn, sizeof(reg_vfi->wwpn)); 1333 reg_vfi->local_n_port_id = domain->sport->fc_id; 1334 1335 return sizeof(sli4_cmd_reg_vfi_t); 1336 } 1337 1338 /** 1339 * @ingroup sli 1340 * @brief Write a REG_VPI command to the provided buffer. 1341 * 1342 * @param sli4 SLI context pointer. 1343 * @param buf Virtual pointer to the destination buffer. 1344 * @param size Buffer size, in bytes. 1345 * @param sport Point to SLI Port object. 1346 * @param update Boolean indicating whether to update the existing VPI (true) 1347 * or create a new VPI (false). 1348 * 1349 * @return Returns the number of bytes written. 1350 */ 1351 int32_t 1352 sli_cmd_reg_vpi(sli4_t *sli4, void *buf, size_t size, ocs_sli_port_t *sport, uint8_t update) 1353 { 1354 sli4_cmd_reg_vpi_t *reg_vpi = buf; 1355 1356 if (!sli4 || !buf || !sport) { 1357 return 0; 1358 } 1359 1360 ocs_memset(buf, 0, size); 1361 1362 reg_vpi->hdr.command = SLI4_MBOX_COMMAND_REG_VPI; 1363 1364 reg_vpi->local_n_port_id = sport->fc_id; 1365 reg_vpi->upd = update != 0; 1366 ocs_memcpy(reg_vpi->wwpn, &sport->sli_wwpn, sizeof(reg_vpi->wwpn)); 1367 reg_vpi->vpi = sport->indicator; 1368 reg_vpi->vfi = sport->domain->indicator; 1369 1370 return sizeof(sli4_cmd_reg_vpi_t); 1371 } 1372 1373 /** 1374 * @brief Write a REQUEST_FEATURES command to the provided buffer. 1375 * 1376 * @param sli4 SLI context pointer. 1377 * @param buf Virtual pointer to the destination buffer. 1378 * @param size Buffer size, in bytes. 1379 * @param mask Features to request. 1380 * @param query Use feature query mode (does not change FW). 1381 * 1382 * @return Returns the number of bytes written. 1383 */ 1384 static int32_t 1385 sli_cmd_request_features(sli4_t *sli4, void *buf, size_t size, sli4_features_t mask, uint8_t query) 1386 { 1387 sli4_cmd_request_features_t *features = buf; 1388 1389 ocs_memset(buf, 0, size); 1390 1391 features->hdr.command = SLI4_MBOX_COMMAND_REQUEST_FEATURES; 1392 1393 if (query) { 1394 features->qry = TRUE; 1395 } 1396 features->command.dword = mask.dword; 1397 1398 return sizeof(sli4_cmd_request_features_t); 1399 } 1400 1401 /** 1402 * @ingroup sli 1403 * @brief Write a SLI_CONFIG command to the provided buffer. 1404 * 1405 * @param sli4 SLI context pointer. 1406 * @param buf Virtual pointer to the destination buffer. 1407 * @param size Buffer size, in bytes. 1408 * @param length Length in bytes of attached command. 1409 * @param dma DMA buffer for non-embedded commands. 1410 * 1411 * @return Returns the number of bytes written. 1412 */ 1413 int32_t 1414 sli_cmd_sli_config(sli4_t *sli4, void *buf, size_t size, uint32_t length, ocs_dma_t *dma) 1415 { 1416 sli4_cmd_sli_config_t *sli_config = NULL; 1417 1418 if ((length > sizeof(sli_config->payload.embed)) && (dma == NULL)) { 1419 ocs_log_test(sli4->os, "length(%d) > payload(%ld)\n", 1420 length, sizeof(sli_config->payload.embed)); 1421 return -1; 1422 } 1423 1424 sli_config = buf; 1425 1426 ocs_memset(buf, 0, size); 1427 1428 sli_config->hdr.command = SLI4_MBOX_COMMAND_SLI_CONFIG; 1429 if (NULL == dma) { 1430 sli_config->emb = TRUE; 1431 sli_config->payload_length = length; 1432 } else { 1433 sli_config->emb = FALSE; 1434 1435 sli_config->pmd_count = 1; 1436 1437 sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys); 1438 sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys); 1439 sli_config->payload.mem.length = dma->size; 1440 sli_config->payload_length = dma->size; 1441 #if defined(OCS_INCLUDE_DEBUG) 1442 /* save pointer to DMA for BMBX dumping purposes */ 1443 sli4->bmbx_non_emb_pmd = dma; 1444 #endif 1445 } 1446 1447 return offsetof(sli4_cmd_sli_config_t, payload.embed); 1448 } 1449 1450 /** 1451 * @brief Initialize SLI Port control register. 1452 * 1453 * @param sli4 SLI context pointer. 1454 * @param endian Endian value to write. 1455 * 1456 * @return Returns 0 on success, or a negative error code value on failure. 1457 */ 1458 1459 static int32_t 1460 sli_sliport_control(sli4_t *sli4, uint32_t endian) 1461 { 1462 uint32_t iter; 1463 int32_t rc; 1464 1465 rc = -1; 1466 1467 /* Initialize port, endian */ 1468 sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, endian | SLI4_SLIPORT_CONTROL_IP); 1469 1470 for (iter = 0; iter < 3000; iter ++) { 1471 ocs_udelay(SLI4_INIT_PORT_DELAY_US); 1472 if (sli_fw_ready(sli4) == 1) { 1473 rc = 0; 1474 break; 1475 } 1476 } 1477 1478 if (rc != 0) { 1479 ocs_log_crit(sli4->os, "port failed to become ready after initialization\n"); 1480 } 1481 1482 return rc; 1483 } 1484 1485 /** 1486 * @ingroup sli 1487 * @brief Write a UNREG_FCFI command to the provided buffer. 1488 * 1489 * @param sli4 SLI context pointer. 1490 * @param buf Virtual pointer to the destination buffer. 1491 * @param size Buffer size, in bytes. 1492 * @param indicator Indicator value. 1493 * 1494 * @return Returns the number of bytes written. 1495 */ 1496 int32_t 1497 sli_cmd_unreg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator) 1498 { 1499 sli4_cmd_unreg_fcfi_t *unreg_fcfi = buf; 1500 1501 if (!sli4 || !buf) { 1502 return 0; 1503 } 1504 1505 ocs_memset(buf, 0, size); 1506 1507 unreg_fcfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_FCFI; 1508 1509 unreg_fcfi->fcfi = indicator; 1510 1511 return sizeof(sli4_cmd_unreg_fcfi_t); 1512 } 1513 1514 /** 1515 * @ingroup sli 1516 * @brief Write an UNREG_RPI command to the provided buffer. 1517 * 1518 * @param sli4 SLI context pointer. 1519 * @param buf Virtual pointer to the destination buffer. 1520 * @param size Buffer size, in bytes. 1521 * @param indicator Indicator value. 1522 * @param which Type of unregister, such as node, port, domain, or FCF. 1523 * @param fc_id FC address. 1524 * 1525 * @return Returns the number of bytes written. 1526 */ 1527 int32_t 1528 sli_cmd_unreg_rpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, sli4_resource_e which, 1529 uint32_t fc_id) 1530 { 1531 sli4_cmd_unreg_rpi_t *unreg_rpi = buf; 1532 uint8_t index_indicator = 0; 1533 1534 if (!sli4 || !buf) { 1535 return 0; 1536 } 1537 1538 ocs_memset(buf, 0, size); 1539 1540 unreg_rpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_RPI; 1541 1542 switch (which) { 1543 case SLI_RSRC_FCOE_RPI: 1544 index_indicator = SLI4_UNREG_RPI_II_RPI; 1545 if (fc_id != UINT32_MAX) { 1546 unreg_rpi->dp = TRUE; 1547 unreg_rpi->destination_n_port_id = fc_id & 0x00ffffff; 1548 } 1549 break; 1550 case SLI_RSRC_FCOE_VPI: 1551 index_indicator = SLI4_UNREG_RPI_II_VPI; 1552 break; 1553 case SLI_RSRC_FCOE_VFI: 1554 index_indicator = SLI4_UNREG_RPI_II_VFI; 1555 break; 1556 case SLI_RSRC_FCOE_FCFI: 1557 index_indicator = SLI4_UNREG_RPI_II_FCFI; 1558 break; 1559 default: 1560 ocs_log_test(sli4->os, "unknown type %#x\n", which); 1561 return 0; 1562 } 1563 1564 unreg_rpi->ii = index_indicator; 1565 unreg_rpi->index = indicator; 1566 1567 return sizeof(sli4_cmd_unreg_rpi_t); 1568 } 1569 1570 /** 1571 * @ingroup sli 1572 * @brief Write an UNREG_VFI command to the provided buffer. 1573 * 1574 * @param sli4 SLI context pointer. 1575 * @param buf Virtual pointer to the destination buffer. 1576 * @param size Buffer size, in bytes. 1577 * @param domain Pointer to the domain object 1578 * @param which Type of unregister, such as domain, FCFI, or everything. 1579 * 1580 * @return Returns the number of bytes written. 1581 */ 1582 int32_t 1583 sli_cmd_unreg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain, uint32_t which) 1584 { 1585 sli4_cmd_unreg_vfi_t *unreg_vfi = buf; 1586 1587 if (!sli4 || !buf || !domain) { 1588 return 0; 1589 } 1590 1591 ocs_memset(buf, 0, size); 1592 1593 unreg_vfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VFI; 1594 switch (which) { 1595 case SLI4_UNREG_TYPE_DOMAIN: 1596 unreg_vfi->index = domain->indicator; 1597 break; 1598 case SLI4_UNREG_TYPE_FCF: 1599 unreg_vfi->index = domain->fcf_indicator; 1600 break; 1601 case SLI4_UNREG_TYPE_ALL: 1602 unreg_vfi->index = UINT16_MAX; 1603 break; 1604 default: 1605 return 0; 1606 } 1607 1608 if (SLI4_UNREG_TYPE_DOMAIN != which) { 1609 unreg_vfi->ii = SLI4_UNREG_VFI_II_FCFI; 1610 } 1611 1612 return sizeof(sli4_cmd_unreg_vfi_t); 1613 } 1614 1615 /** 1616 * @ingroup sli 1617 * @brief Write an UNREG_VPI command to the provided buffer. 1618 * 1619 * @param sli4 SLI context pointer. 1620 * @param buf Virtual pointer to the destination buffer. 1621 * @param size Buffer size, in bytes. 1622 * @param indicator Indicator value. 1623 * @param which Type of unregister: port, domain, FCFI, everything 1624 * 1625 * @return Returns the number of bytes written. 1626 */ 1627 int32_t 1628 sli_cmd_unreg_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, uint32_t which) 1629 { 1630 sli4_cmd_unreg_vpi_t *unreg_vpi = buf; 1631 1632 if (!sli4 || !buf) { 1633 return 0; 1634 } 1635 1636 ocs_memset(buf, 0, size); 1637 1638 unreg_vpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VPI; 1639 unreg_vpi->index = indicator; 1640 switch (which) { 1641 case SLI4_UNREG_TYPE_PORT: 1642 unreg_vpi->ii = SLI4_UNREG_VPI_II_VPI; 1643 break; 1644 case SLI4_UNREG_TYPE_DOMAIN: 1645 unreg_vpi->ii = SLI4_UNREG_VPI_II_VFI; 1646 break; 1647 case SLI4_UNREG_TYPE_FCF: 1648 unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI; 1649 break; 1650 case SLI4_UNREG_TYPE_ALL: 1651 unreg_vpi->index = UINT16_MAX; /* override indicator */ 1652 unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI; 1653 break; 1654 default: 1655 return 0; 1656 } 1657 1658 return sizeof(sli4_cmd_unreg_vpi_t); 1659 } 1660 1661 /** 1662 * @ingroup sli 1663 * @brief Write an CONFIG_AUTO_XFER_RDY command to the provided buffer. 1664 * 1665 * @param sli4 SLI context pointer. 1666 * @param buf Virtual pointer to the destination buffer. 1667 * @param size Buffer size, in bytes. 1668 * @param max_burst_len if the write FCP_DL is less than this size, 1669 * then the SLI port will generate the auto XFER_RDY. 1670 * 1671 * @return Returns the number of bytes written. 1672 */ 1673 int32_t 1674 sli_cmd_config_auto_xfer_rdy(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len) 1675 { 1676 sli4_cmd_config_auto_xfer_rdy_t *req = buf; 1677 1678 if (!sli4 || !buf) { 1679 return 0; 1680 } 1681 1682 ocs_memset(buf, 0, size); 1683 1684 req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY; 1685 req->max_burst_len = max_burst_len; 1686 1687 return sizeof(sli4_cmd_config_auto_xfer_rdy_t); 1688 } 1689 1690 /** 1691 * @ingroup sli 1692 * @brief Write an CONFIG_AUTO_XFER_RDY_HP command to the provided buffer. 1693 * 1694 * @param sli4 SLI context pointer. 1695 * @param buf Virtual pointer to the destination buffer. 1696 * @param size Buffer size, in bytes. 1697 * @param max_burst_len if the write FCP_DL is less than this size, 1698 * @param esoc enable start offset computation, 1699 * @param block_size block size, 1700 * then the SLI port will generate the auto XFER_RDY. 1701 * 1702 * @return Returns the number of bytes written. 1703 */ 1704 int32_t 1705 sli_cmd_config_auto_xfer_rdy_hp(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len, 1706 uint32_t esoc, uint32_t block_size ) 1707 { 1708 sli4_cmd_config_auto_xfer_rdy_hp_t *req = buf; 1709 1710 if (!sli4 || !buf) { 1711 return 0; 1712 } 1713 1714 ocs_memset(buf, 0, size); 1715 1716 req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP; 1717 req->max_burst_len = max_burst_len; 1718 req->esoc = esoc; 1719 req->block_size = block_size; 1720 return sizeof(sli4_cmd_config_auto_xfer_rdy_hp_t); 1721 } 1722 1723 /** 1724 * @brief Write a COMMON_FUNCTION_RESET command. 1725 * 1726 * @param sli4 SLI context. 1727 * @param buf Destination buffer for the command. 1728 * @param size Buffer size, in bytes. 1729 * 1730 * @return Returns the number of bytes written. 1731 */ 1732 static int32_t 1733 sli_cmd_common_function_reset(sli4_t *sli4, void *buf, size_t size) 1734 { 1735 sli4_req_common_function_reset_t *reset = NULL; 1736 uint32_t sli_config_off = 0; 1737 1738 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 1739 uint32_t payload_size; 1740 1741 /* Payload length must accommodate both request and response */ 1742 payload_size = max(sizeof(sli4_req_common_function_reset_t), 1743 sizeof(sli4_res_common_function_reset_t)); 1744 1745 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 1746 NULL); 1747 } 1748 reset = (sli4_req_common_function_reset_t *)((uint8_t *)buf + sli_config_off); 1749 1750 reset->hdr.opcode = SLI4_OPC_COMMON_FUNCTION_RESET; 1751 reset->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 1752 1753 return(sli_config_off + sizeof(sli4_req_common_function_reset_t)); 1754 } 1755 1756 /** 1757 * @brief Write a COMMON_CREATE_CQ command. 1758 * 1759 * @param sli4 SLI context. 1760 * @param buf Destination buffer for the command. 1761 * @param size Buffer size, in bytes. 1762 * @param qmem DMA memory for the queue. 1763 * @param eq_id Associated EQ_ID 1764 * @param ignored This parameter carries the ULP which is only used for WQ and RQs 1765 * 1766 * @note This creates a Version 0 message. 1767 * 1768 * @return Returns the number of bytes written. 1769 */ 1770 static int32_t 1771 sli_cmd_common_create_cq(sli4_t *sli4, void *buf, size_t size, 1772 ocs_dma_t *qmem, uint16_t eq_id, uint16_t ignored) 1773 { 1774 sli4_req_common_create_cq_v0_t *cqv0 = NULL; 1775 sli4_req_common_create_cq_v2_t *cqv2 = NULL; 1776 uint32_t sli_config_off = 0; 1777 uint32_t p; 1778 uintptr_t addr; 1779 uint32_t if_type = sli4->if_type; 1780 uint32_t page_bytes = 0; 1781 uint32_t num_pages = 0; 1782 uint32_t cmd_size = 0; 1783 uint32_t page_size = 0; 1784 uint32_t n_cqe = 0; 1785 1786 /* First calculate number of pages and the mailbox cmd length */ 1787 switch (if_type) 1788 { 1789 case SLI4_IF_TYPE_BE3_SKH_PF: 1790 page_bytes = SLI_PAGE_SIZE; 1791 num_pages = sli_page_count(qmem->size, page_bytes); 1792 cmd_size = sizeof(sli4_req_common_create_cq_v0_t) + (8 * num_pages); 1793 break; 1794 case SLI4_IF_TYPE_LANCER_FC_ETH: 1795 case SLI4_IF_TYPE_LANCER_G7: 1796 n_cqe = qmem->size / SLI4_CQE_BYTES; 1797 switch (n_cqe) { 1798 case 256: 1799 case 512: 1800 case 1024: 1801 case 2048: 1802 page_size = 1; 1803 break; 1804 case 4096: 1805 page_size = 2; 1806 break; 1807 default: 1808 return 0; 1809 } 1810 page_bytes = page_size * SLI_PAGE_SIZE; 1811 num_pages = sli_page_count(qmem->size, page_bytes); 1812 cmd_size = sizeof(sli4_req_common_create_cq_v2_t) + (8 * num_pages); 1813 break; 1814 default: 1815 ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type); 1816 return -1; 1817 } 1818 1819 /* now that we have the mailbox command size, we can set SLI_CONFIG fields */ 1820 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 1821 uint32_t payload_size; 1822 1823 /* Payload length must accommodate both request and response */ 1824 payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_t)); 1825 1826 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 1827 NULL); 1828 } 1829 1830 switch (if_type) 1831 { 1832 case SLI4_IF_TYPE_BE3_SKH_PF: 1833 cqv0 = (sli4_req_common_create_cq_v0_t *)((uint8_t *)buf + sli_config_off); 1834 cqv0->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ; 1835 cqv0->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 1836 cqv0->hdr.version = 0; 1837 cqv0->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); 1838 1839 /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ 1840 cqv0->num_pages = num_pages; 1841 switch (cqv0->num_pages) { 1842 case 1: 1843 cqv0->cqecnt = SLI4_CQ_CNT_256; 1844 break; 1845 case 2: 1846 cqv0->cqecnt = SLI4_CQ_CNT_512; 1847 break; 1848 case 4: 1849 cqv0->cqecnt = SLI4_CQ_CNT_1024; 1850 break; 1851 default: 1852 ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv0->num_pages); 1853 return -1; 1854 } 1855 cqv0->evt = TRUE; 1856 cqv0->valid = TRUE; 1857 /* TODO cq->nodelay = ???; */ 1858 /* TODO cq->clswm = ???; */ 1859 cqv0->arm = FALSE; 1860 cqv0->eq_id = eq_id; 1861 1862 for (p = 0, addr = qmem->phys; 1863 p < cqv0->num_pages; 1864 p++, addr += page_bytes) { 1865 cqv0->page_physical_address[p].low = ocs_addr32_lo(addr); 1866 cqv0->page_physical_address[p].high = ocs_addr32_hi(addr); 1867 } 1868 1869 break; 1870 case SLI4_IF_TYPE_LANCER_FC_ETH: 1871 case SLI4_IF_TYPE_LANCER_G7: 1872 { 1873 cqv2 = (sli4_req_common_create_cq_v2_t *)((uint8_t *)buf + sli_config_off); 1874 cqv2->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ; 1875 cqv2->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 1876 cqv2->hdr.version = 2; 1877 cqv2->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); 1878 1879 if (if_type == SLI4_IF_TYPE_LANCER_G7) 1880 cqv2->autovalid = TRUE; 1881 1882 cqv2->page_size = page_size; 1883 1884 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */ 1885 cqv2->num_pages = num_pages; 1886 if (!cqv2->num_pages || (cqv2->num_pages > SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES)) { 1887 return 0; 1888 } 1889 1890 switch (cqv2->num_pages) { 1891 case 1: 1892 cqv2->cqecnt = SLI4_CQ_CNT_256; 1893 break; 1894 case 2: 1895 cqv2->cqecnt = SLI4_CQ_CNT_512; 1896 break; 1897 case 4: 1898 cqv2->cqecnt = SLI4_CQ_CNT_1024; 1899 break; 1900 case 8: 1901 cqv2->cqecnt = SLI4_CQ_CNT_LARGE; 1902 cqv2->cqe_count = n_cqe; 1903 break; 1904 default: 1905 ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv2->num_pages); 1906 return -1; 1907 } 1908 1909 cqv2->evt = TRUE; 1910 cqv2->valid = TRUE; 1911 /* TODO cq->nodelay = ???; */ 1912 /* TODO cq->clswm = ???; */ 1913 cqv2->arm = FALSE; 1914 cqv2->eq_id = eq_id; 1915 1916 for (p = 0, addr = qmem->phys; 1917 p < cqv2->num_pages; 1918 p++, addr += page_bytes) { 1919 cqv2->page_physical_address[p].low = ocs_addr32_lo(addr); 1920 cqv2->page_physical_address[p].high = ocs_addr32_hi(addr); 1921 } 1922 } 1923 break; 1924 } 1925 1926 return (sli_config_off + cmd_size); 1927 } 1928 1929 /** 1930 * @brief Write a COMMON_DESTROY_CQ command. 1931 * 1932 * @param sli4 SLI context. 1933 * @param buf Destination buffer for the command. 1934 * @param size Buffer size, in bytes. 1935 * @param cq_id CQ ID 1936 * 1937 * @note This creates a Version 0 message. 1938 * 1939 * @return Returns the number of bytes written. 1940 */ 1941 static int32_t 1942 sli_cmd_common_destroy_cq(sli4_t *sli4, void *buf, size_t size, uint16_t cq_id) 1943 { 1944 sli4_req_common_destroy_cq_t *cq = NULL; 1945 uint32_t sli_config_off = 0; 1946 1947 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 1948 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 1949 /* Payload length must accommodate both request and response */ 1950 max(sizeof(sli4_req_common_destroy_cq_t), 1951 sizeof(sli4_res_hdr_t)), 1952 NULL); 1953 } 1954 cq = (sli4_req_common_destroy_cq_t *)((uint8_t *)buf + sli_config_off); 1955 1956 cq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_CQ; 1957 cq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 1958 cq->hdr.request_length = sizeof(sli4_req_common_destroy_cq_t) - 1959 sizeof(sli4_req_hdr_t); 1960 cq->cq_id = cq_id; 1961 1962 return(sli_config_off + sizeof(sli4_req_common_destroy_cq_t)); 1963 } 1964 1965 /** 1966 * @brief Write a COMMON_MODIFY_EQ_DELAY command. 1967 * 1968 * @param sli4 SLI context. 1969 * @param buf Destination buffer for the command. 1970 * @param size Buffer size, in bytes. 1971 * @param q Queue object array. 1972 * @param num_q Queue object array count. 1973 * @param shift Phase shift for staggering interrupts. 1974 * @param delay_mult Delay multiplier for limiting interrupt frequency. 1975 * 1976 * @return Returns the number of bytes written. 1977 */ 1978 static int32_t 1979 sli_cmd_common_modify_eq_delay(sli4_t *sli4, void *buf, size_t size, sli4_queue_t *q, int num_q, uint32_t shift, 1980 uint32_t delay_mult) 1981 { 1982 sli4_req_common_modify_eq_delay_t *modify_delay = NULL; 1983 uint32_t sli_config_off = 0; 1984 int i; 1985 1986 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 1987 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 1988 /* Payload length must accommodate both request and response */ 1989 max(sizeof(sli4_req_common_modify_eq_delay_t), sizeof(sli4_res_hdr_t)), 1990 NULL); 1991 } 1992 1993 modify_delay = (sli4_req_common_modify_eq_delay_t *)((uint8_t *)buf + sli_config_off); 1994 1995 modify_delay->hdr.opcode = SLI4_OPC_COMMON_MODIFY_EQ_DELAY; 1996 modify_delay->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 1997 modify_delay->hdr.request_length = sizeof(sli4_req_common_modify_eq_delay_t) - 1998 sizeof(sli4_req_hdr_t); 1999 2000 modify_delay->num_eq = num_q; 2001 2002 for (i = 0; i<num_q; i++) { 2003 modify_delay->eq_delay_record[i].eq_id = q[i].id; 2004 modify_delay->eq_delay_record[i].phase = shift; 2005 modify_delay->eq_delay_record[i].delay_multiplier = delay_mult; 2006 } 2007 2008 return(sli_config_off + sizeof(sli4_req_common_modify_eq_delay_t)); 2009 } 2010 2011 /** 2012 * @brief Write a COMMON_CREATE_EQ command. 2013 * 2014 * @param sli4 SLI context. 2015 * @param buf Destination buffer for the command. 2016 * @param size Buffer size, in bytes. 2017 * @param qmem DMA memory for the queue. 2018 * @param ignored1 Ignored (used for consistency among queue creation functions). 2019 * @param ignored2 Ignored (used for consistency among queue creation functions). 2020 * 2021 * @note Other queue creation routines use the last parameter to pass in 2022 * the associated Q_ID and ULP. EQ doesn't have an associated queue or ULP, 2023 * so these parameters are ignored 2024 * 2025 * @note This creates a Version 0 message 2026 * 2027 * @return Returns the number of bytes written. 2028 */ 2029 static int32_t 2030 sli_cmd_common_create_eq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, 2031 uint16_t ignored1, uint16_t ignored2) 2032 { 2033 sli4_req_common_create_eq_t *eq = NULL; 2034 uint32_t sli_config_off = 0; 2035 uint32_t p; 2036 uintptr_t addr; 2037 2038 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2039 uint32_t payload_size; 2040 2041 /* Payload length must accommodate both request and response */ 2042 payload_size = max(sizeof(sli4_req_common_create_eq_t), 2043 sizeof(sli4_res_common_create_queue_t)); 2044 2045 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 2046 NULL); 2047 } 2048 eq = (sli4_req_common_create_eq_t *)((uint8_t *)buf + sli_config_off); 2049 2050 eq->hdr.opcode = SLI4_OPC_COMMON_CREATE_EQ; 2051 eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2052 eq->hdr.request_length = sizeof(sli4_req_common_create_eq_t) - 2053 sizeof(sli4_req_hdr_t); 2054 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) { 2055 eq->hdr.version = 2; 2056 eq->autovalid = TRUE; 2057 } 2058 /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ 2059 eq->num_pages = qmem->size / SLI_PAGE_SIZE; 2060 switch (eq->num_pages) { 2061 case 1: 2062 eq->eqesz = SLI4_EQE_SIZE_4; 2063 eq->count = SLI4_EQ_CNT_1024; 2064 break; 2065 case 2: 2066 eq->eqesz = SLI4_EQE_SIZE_4; 2067 eq->count = SLI4_EQ_CNT_2048; 2068 break; 2069 case 4: 2070 eq->eqesz = SLI4_EQE_SIZE_4; 2071 eq->count = SLI4_EQ_CNT_4096; 2072 break; 2073 default: 2074 ocs_log_test(sli4->os, "num_pages %d not valid\n", eq->num_pages); 2075 return -1; 2076 } 2077 eq->valid = TRUE; 2078 eq->arm = FALSE; 2079 eq->delay_multiplier = 32; 2080 2081 for (p = 0, addr = qmem->phys; 2082 p < eq->num_pages; 2083 p++, addr += SLI_PAGE_SIZE) { 2084 eq->page_address[p].low = ocs_addr32_lo(addr); 2085 eq->page_address[p].high = ocs_addr32_hi(addr); 2086 } 2087 2088 return(sli_config_off + sizeof(sli4_req_common_create_eq_t)); 2089 } 2090 2091 /** 2092 * @brief Write a COMMON_DESTROY_EQ command. 2093 * 2094 * @param sli4 SLI context. 2095 * @param buf Destination buffer for the command. 2096 * @param size Buffer size, in bytes. 2097 * @param eq_id Queue ID to destroy. 2098 * 2099 * @note Other queue creation routines use the last parameter to pass in 2100 * the associated Q_ID. EQ doesn't have an associated queue so this 2101 * parameter is ignored. 2102 * 2103 * @note This creates a Version 0 message. 2104 * 2105 * @return Returns the number of bytes written. 2106 */ 2107 static int32_t 2108 sli_cmd_common_destroy_eq(sli4_t *sli4, void *buf, size_t size, uint16_t eq_id) 2109 { 2110 sli4_req_common_destroy_eq_t *eq = NULL; 2111 uint32_t sli_config_off = 0; 2112 2113 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2114 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2115 /* Payload length must accommodate both request and response */ 2116 max(sizeof(sli4_req_common_destroy_eq_t), 2117 sizeof(sli4_res_hdr_t)), 2118 NULL); 2119 } 2120 eq = (sli4_req_common_destroy_eq_t *)((uint8_t *)buf + sli_config_off); 2121 2122 eq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_EQ; 2123 eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2124 eq->hdr.request_length = sizeof(sli4_req_common_destroy_eq_t) - 2125 sizeof(sli4_req_hdr_t); 2126 2127 eq->eq_id = eq_id; 2128 2129 return(sli_config_off + sizeof(sli4_req_common_destroy_eq_t)); 2130 } 2131 2132 /** 2133 * @brief Write a LOWLEVEL_SET_WATCHDOG command. 2134 * 2135 * @param sli4 SLI context. 2136 * @param buf Destination buffer for the command. 2137 * @param size Buffer size, in bytes. 2138 * @param timeout watchdog timer timeout in seconds 2139 * 2140 * @return void 2141 */ 2142 void 2143 sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout) 2144 { 2145 2146 sli4_req_lowlevel_set_watchdog_t *req = NULL; 2147 uint32_t sli_config_off = 0; 2148 2149 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2150 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2151 /* Payload length must accommodate both request and response */ 2152 max(sizeof(sli4_req_lowlevel_set_watchdog_t), 2153 sizeof(sli4_res_lowlevel_set_watchdog_t)), 2154 NULL); 2155 } 2156 req = (sli4_req_lowlevel_set_watchdog_t *)((uint8_t *)buf + sli_config_off); 2157 2158 req->hdr.opcode = SLI4_OPC_LOWLEVEL_SET_WATCHDOG; 2159 req->hdr.subsystem = SLI4_SUBSYSTEM_LOWLEVEL; 2160 req->hdr.request_length = sizeof(sli4_req_lowlevel_set_watchdog_t) - sizeof(sli4_req_hdr_t); 2161 req->watchdog_timeout = timeout; 2162 2163 return; 2164 } 2165 2166 static int32_t 2167 sli_cmd_common_get_cntl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) 2168 { 2169 sli4_req_hdr_t *hdr = NULL; 2170 uint32_t sli_config_off = 0; 2171 2172 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2173 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2174 sizeof(sli4_req_hdr_t), 2175 dma); 2176 } 2177 2178 if (dma == NULL) { 2179 return 0; 2180 } 2181 2182 ocs_memset(dma->virt, 0, dma->size); 2183 2184 hdr = dma->virt; 2185 2186 hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES; 2187 hdr->subsystem = SLI4_SUBSYSTEM_COMMON; 2188 hdr->request_length = dma->size; 2189 2190 return(sli_config_off + sizeof(sli4_req_hdr_t)); 2191 } 2192 2193 /** 2194 * @brief Write a COMMON_GET_CNTL_ADDL_ATTRIBUTES command. 2195 * 2196 * @param sli4 SLI context. 2197 * @param buf Destination buffer for the command. 2198 * @param size Buffer size, in bytes. 2199 * @param dma DMA structure from which the data will be copied. 2200 * 2201 * @note This creates a Version 0 message. 2202 * 2203 * @return Returns the number of bytes written. 2204 */ 2205 static int32_t 2206 sli_cmd_common_get_cntl_addl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) 2207 { 2208 sli4_req_hdr_t *hdr = NULL; 2209 uint32_t sli_config_off = 0; 2210 2211 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2212 sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma); 2213 } 2214 2215 if (dma == NULL) { 2216 return 0; 2217 } 2218 2219 ocs_memset(dma->virt, 0, dma->size); 2220 2221 hdr = dma->virt; 2222 2223 hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES; 2224 hdr->subsystem = SLI4_SUBSYSTEM_COMMON; 2225 hdr->request_length = dma->size; 2226 2227 return(sli_config_off + sizeof(sli4_req_hdr_t)); 2228 } 2229 2230 /** 2231 * @brief Write a COMMON_CREATE_MQ_EXT command. 2232 * 2233 * @param sli4 SLI context. 2234 * @param buf Destination buffer for the command. 2235 * @param size Buffer size, in bytes. 2236 * @param qmem DMA memory for the queue. 2237 * @param cq_id Associated CQ_ID. 2238 * @param ignored This parameter carries the ULP which is only used for WQ and RQs 2239 * 2240 * @note This creates a Version 0 message. 2241 * 2242 * @return Returns the number of bytes written. 2243 */ 2244 static int32_t 2245 sli_cmd_common_create_mq_ext(sli4_t *sli4, void *buf, size_t size, 2246 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored) 2247 { 2248 sli4_req_common_create_mq_ext_t *mq = NULL; 2249 uint32_t sli_config_off = 0; 2250 uint32_t p; 2251 uintptr_t addr; 2252 2253 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2254 uint32_t payload_size; 2255 2256 /* Payload length must accommodate both request and response */ 2257 payload_size = max(sizeof(sli4_req_common_create_mq_ext_t), 2258 sizeof(sli4_res_common_create_queue_t)); 2259 2260 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 2261 NULL); 2262 } 2263 mq = (sli4_req_common_create_mq_ext_t *)((uint8_t *)buf + sli_config_off); 2264 2265 mq->hdr.opcode = SLI4_OPC_COMMON_CREATE_MQ_EXT; 2266 mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2267 mq->hdr.request_length = sizeof(sli4_req_common_create_mq_ext_t) - 2268 sizeof(sli4_req_hdr_t); 2269 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */ 2270 mq->num_pages = qmem->size / SLI_PAGE_SIZE; 2271 switch (mq->num_pages) { 2272 case 1: 2273 mq->ring_size = SLI4_MQE_SIZE_16; 2274 break; 2275 case 2: 2276 mq->ring_size = SLI4_MQE_SIZE_32; 2277 break; 2278 case 4: 2279 mq->ring_size = SLI4_MQE_SIZE_64; 2280 break; 2281 case 8: 2282 mq->ring_size = SLI4_MQE_SIZE_128; 2283 break; 2284 default: 2285 ocs_log_test(sli4->os, "num_pages %d not valid\n", mq->num_pages); 2286 return -1; 2287 } 2288 2289 /* TODO break this down by sli4->config.topology */ 2290 mq->async_event_bitmap = SLI4_ASYNC_EVT_FC_FCOE; 2291 2292 if (sli4->config.mq_create_version) { 2293 mq->cq_id_v1 = cq_id; 2294 mq->hdr.version = 1; 2295 } 2296 else { 2297 mq->cq_id_v0 = cq_id; 2298 } 2299 mq->val = TRUE; 2300 2301 for (p = 0, addr = qmem->phys; 2302 p < mq->num_pages; 2303 p++, addr += SLI_PAGE_SIZE) { 2304 mq->page_physical_address[p].low = ocs_addr32_lo(addr); 2305 mq->page_physical_address[p].high = ocs_addr32_hi(addr); 2306 } 2307 2308 return(sli_config_off + sizeof(sli4_req_common_create_mq_ext_t)); 2309 } 2310 2311 /** 2312 * @brief Write a COMMON_DESTROY_MQ command. 2313 * 2314 * @param sli4 SLI context. 2315 * @param buf Destination buffer for the command. 2316 * @param size Buffer size, in bytes. 2317 * @param mq_id MQ ID 2318 * 2319 * @note This creates a Version 0 message. 2320 * 2321 * @return Returns the number of bytes written. 2322 */ 2323 static int32_t 2324 sli_cmd_common_destroy_mq(sli4_t *sli4, void *buf, size_t size, uint16_t mq_id) 2325 { 2326 sli4_req_common_destroy_mq_t *mq = NULL; 2327 uint32_t sli_config_off = 0; 2328 2329 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2330 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2331 /* Payload length must accommodate both request and response */ 2332 max(sizeof(sli4_req_common_destroy_mq_t), 2333 sizeof(sli4_res_hdr_t)), 2334 NULL); 2335 } 2336 mq = (sli4_req_common_destroy_mq_t *)((uint8_t *)buf + sli_config_off); 2337 2338 mq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_MQ; 2339 mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2340 mq->hdr.request_length = sizeof(sli4_req_common_destroy_mq_t) - 2341 sizeof(sli4_req_hdr_t); 2342 2343 mq->mq_id = mq_id; 2344 2345 return(sli_config_off + sizeof(sli4_req_common_destroy_mq_t)); 2346 } 2347 2348 /** 2349 * @ingroup sli 2350 * @brief Write a COMMON_NOP command 2351 * 2352 * @param sli4 SLI context. 2353 * @param buf Destination buffer for the command. 2354 * @param size Buffer size, in bytes. 2355 * @param context NOP context value (passed to response, except on FC/FCoE). 2356 * 2357 * @return Returns the number of bytes written. 2358 */ 2359 int32_t 2360 sli_cmd_common_nop(sli4_t *sli4, void *buf, size_t size, uint64_t context) 2361 { 2362 sli4_req_common_nop_t *nop = NULL; 2363 uint32_t sli_config_off = 0; 2364 2365 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2366 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2367 /* Payload length must accommodate both request and response */ 2368 max(sizeof(sli4_req_common_nop_t), sizeof(sli4_res_common_nop_t)), 2369 NULL); 2370 } 2371 2372 nop = (sli4_req_common_nop_t *)((uint8_t *)buf + sli_config_off); 2373 2374 nop->hdr.opcode = SLI4_OPC_COMMON_NOP; 2375 nop->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2376 nop->hdr.request_length = 8; 2377 2378 ocs_memcpy(&nop->context, &context, sizeof(context)); 2379 2380 return(sli_config_off + sizeof(sli4_req_common_nop_t)); 2381 } 2382 2383 /** 2384 * @ingroup sli 2385 * @brief Write a COMMON_GET_RESOURCE_EXTENT_INFO command. 2386 * 2387 * @param sli4 SLI context. 2388 * @param buf Destination buffer for the command. 2389 * @param size Buffer size, in bytes. 2390 * @param rtype Resource type (for example, XRI, VFI, VPI, and RPI). 2391 * 2392 * @return Returns the number of bytes written. 2393 */ 2394 int32_t 2395 sli_cmd_common_get_resource_extent_info(sli4_t *sli4, void *buf, size_t size, uint16_t rtype) 2396 { 2397 sli4_req_common_get_resource_extent_info_t *extent = NULL; 2398 uint32_t sli_config_off = 0; 2399 2400 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2401 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2402 sizeof(sli4_req_common_get_resource_extent_info_t), 2403 NULL); 2404 } 2405 2406 extent = (sli4_req_common_get_resource_extent_info_t *)((uint8_t *)buf + sli_config_off); 2407 2408 extent->hdr.opcode = SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO; 2409 extent->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2410 extent->hdr.request_length = 4; 2411 2412 extent->resource_type = rtype; 2413 2414 return(sli_config_off + sizeof(sli4_req_common_get_resource_extent_info_t)); 2415 } 2416 2417 /** 2418 * @ingroup sli 2419 * @brief Write a COMMON_GET_SLI4_PARAMETERS command. 2420 * 2421 * @param sli4 SLI context. 2422 * @param buf Destination buffer for the command. 2423 * @param size Buffer size, in bytes. 2424 * 2425 * @return Returns the number of bytes written. 2426 */ 2427 int32_t 2428 sli_cmd_common_get_sli4_parameters(sli4_t *sli4, void *buf, size_t size) 2429 { 2430 sli4_req_hdr_t *hdr = NULL; 2431 uint32_t sli_config_off = 0; 2432 2433 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2434 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2435 sizeof(sli4_res_common_get_sli4_parameters_t), 2436 NULL); 2437 } 2438 2439 hdr = (sli4_req_hdr_t *)((uint8_t *)buf + sli_config_off); 2440 2441 hdr->opcode = SLI4_OPC_COMMON_GET_SLI4_PARAMETERS; 2442 hdr->subsystem = SLI4_SUBSYSTEM_COMMON; 2443 hdr->request_length = 0x50; 2444 2445 return(sli_config_off + sizeof(sli4_req_hdr_t)); 2446 } 2447 2448 /** 2449 * @brief Write a COMMON_QUERY_FW_CONFIG command to the provided buffer. 2450 * 2451 * @param sli4 SLI context pointer. 2452 * @param buf Virtual pointer to destination buffer. 2453 * @param size Buffer size in bytes. 2454 * 2455 * @return Returns the number of bytes written 2456 */ 2457 static int32_t 2458 sli_cmd_common_query_fw_config(sli4_t *sli4, void *buf, size_t size) 2459 { 2460 sli4_req_common_query_fw_config_t *fw_config; 2461 uint32_t sli_config_off = 0; 2462 uint32_t payload_size; 2463 2464 /* Payload length must accommodate both request and response */ 2465 payload_size = max(sizeof(sli4_req_common_query_fw_config_t), 2466 sizeof(sli4_res_common_query_fw_config_t)); 2467 2468 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2469 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2470 payload_size, 2471 NULL); 2472 } 2473 2474 fw_config = (sli4_req_common_query_fw_config_t*)((uint8_t*)buf + sli_config_off); 2475 fw_config->hdr.opcode = SLI4_OPC_COMMON_QUERY_FW_CONFIG; 2476 fw_config->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2477 fw_config->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 2478 return sli_config_off + sizeof(sli4_req_common_query_fw_config_t); 2479 } 2480 2481 /** 2482 * @brief Write a COMMON_GET_PORT_NAME command to the provided buffer. 2483 * 2484 * @param sli4 SLI context pointer. 2485 * @param buf Virtual pointer to destination buffer. 2486 * @param size Buffer size in bytes. 2487 * 2488 * @note Function supports both version 0 and 1 forms of this command via 2489 * the IF_TYPE. 2490 * 2491 * @return Returns the number of bytes written. 2492 */ 2493 static int32_t 2494 sli_cmd_common_get_port_name(sli4_t *sli4, void *buf, size_t size) 2495 { 2496 sli4_req_common_get_port_name_t *port_name; 2497 uint32_t sli_config_off = 0; 2498 uint32_t payload_size; 2499 uint8_t version = 0; 2500 uint8_t pt = 0; 2501 2502 /* Select command version according to IF_TYPE */ 2503 switch (sli4->if_type) { 2504 case SLI4_IF_TYPE_BE3_SKH_PF: 2505 case SLI4_IF_TYPE_BE3_SKH_VF: 2506 version = 0; 2507 break; 2508 case SLI4_IF_TYPE_LANCER_FC_ETH: 2509 case SLI4_IF_TYPE_LANCER_RDMA: 2510 case SLI4_IF_TYPE_LANCER_G7: 2511 version = 1; 2512 break; 2513 default: 2514 ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", sli4->if_type); 2515 return 0; 2516 } 2517 2518 /* Payload length must accommodate both request and response */ 2519 payload_size = max(sizeof(sli4_req_common_get_port_name_t), 2520 sizeof(sli4_res_common_get_port_name_t)); 2521 2522 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2523 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2524 payload_size, 2525 NULL); 2526 2527 pt = 1; 2528 } 2529 2530 port_name = (sli4_req_common_get_port_name_t *)((uint8_t *)buf + sli_config_off); 2531 2532 port_name->hdr.opcode = SLI4_OPC_COMMON_GET_PORT_NAME; 2533 port_name->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2534 port_name->hdr.request_length = sizeof(sli4_req_hdr_t) + (version * sizeof(uint32_t)); 2535 port_name->hdr.version = version; 2536 2537 /* Set the port type value (ethernet=0, FC=1) for V1 commands */ 2538 if (version == 1) { 2539 port_name->pt = pt; 2540 } 2541 2542 return sli_config_off + port_name->hdr.request_length; 2543 } 2544 2545 /** 2546 * @ingroup sli 2547 * @brief Write a COMMON_WRITE_OBJECT command. 2548 * 2549 * @param sli4 SLI context. 2550 * @param buf Destination buffer for the command. 2551 * @param size Buffer size, in bytes. 2552 * @param noc True if the object should be written but not committed to flash. 2553 * @param eof True if this is the last write for this object. 2554 * @param desired_write_length Number of bytes of data to write to the object. 2555 * @param offset Offset, in bytes, from the start of the object. 2556 * @param object_name Name of the object to write. 2557 * @param dma DMA structure from which the data will be copied. 2558 * 2559 * @return Returns the number of bytes written. 2560 */ 2561 int32_t 2562 sli_cmd_common_write_object(sli4_t *sli4, void *buf, size_t size, 2563 uint16_t noc, uint16_t eof, uint32_t desired_write_length, 2564 uint32_t offset, 2565 char *object_name, 2566 ocs_dma_t *dma) 2567 { 2568 sli4_req_common_write_object_t *wr_obj = NULL; 2569 uint32_t sli_config_off = 0; 2570 sli4_bde_t *host_buffer; 2571 2572 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2573 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2574 sizeof (sli4_req_common_write_object_t) + sizeof (sli4_bde_t), 2575 NULL); 2576 } 2577 2578 wr_obj = (sli4_req_common_write_object_t *)((uint8_t *)buf + sli_config_off); 2579 2580 wr_obj->hdr.opcode = SLI4_OPC_COMMON_WRITE_OBJECT; 2581 wr_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2582 wr_obj->hdr.request_length = sizeof(*wr_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t); 2583 wr_obj->hdr.timeout = 0; 2584 wr_obj->hdr.version = 0; 2585 2586 wr_obj->noc = noc; 2587 wr_obj->eof = eof; 2588 wr_obj->desired_write_length = desired_write_length; 2589 wr_obj->write_offset = offset; 2590 ocs_strncpy(wr_obj->object_name, object_name, sizeof(wr_obj->object_name)); 2591 wr_obj->host_buffer_descriptor_count = 1; 2592 2593 host_buffer = (sli4_bde_t *)wr_obj->host_buffer_descriptor; 2594 2595 /* Setup to transfer xfer_size bytes to device */ 2596 host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64; 2597 host_buffer->buffer_length = desired_write_length; 2598 host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys); 2599 host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys); 2600 2601 return(sli_config_off + sizeof(sli4_req_common_write_object_t) + sizeof (sli4_bde_t)); 2602 } 2603 2604 /** 2605 * @ingroup sli 2606 * @brief Write a COMMON_DELETE_OBJECT command. 2607 * 2608 * @param sli4 SLI context. 2609 * @param buf Destination buffer for the command. 2610 * @param size Buffer size, in bytes. 2611 * @param object_name Name of the object to write. 2612 * 2613 * @return Returns the number of bytes written. 2614 */ 2615 int32_t 2616 sli_cmd_common_delete_object(sli4_t *sli4, void *buf, size_t size, 2617 char *object_name) 2618 { 2619 sli4_req_common_delete_object_t *del_obj = NULL; 2620 uint32_t sli_config_off = 0; 2621 2622 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2623 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2624 sizeof (sli4_req_common_delete_object_t), 2625 NULL); 2626 } 2627 2628 del_obj = (sli4_req_common_delete_object_t *)((uint8_t *)buf + sli_config_off); 2629 2630 del_obj->hdr.opcode = SLI4_OPC_COMMON_DELETE_OBJECT; 2631 del_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2632 del_obj->hdr.request_length = sizeof(*del_obj); 2633 del_obj->hdr.timeout = 0; 2634 del_obj->hdr.version = 0; 2635 2636 ocs_strncpy(del_obj->object_name, object_name, sizeof(del_obj->object_name)); 2637 return(sli_config_off + sizeof(sli4_req_common_delete_object_t)); 2638 } 2639 2640 /** 2641 * @ingroup sli 2642 * @brief Write a COMMON_READ_OBJECT command. 2643 * 2644 * @param sli4 SLI context. 2645 * @param buf Destination buffer for the command. 2646 * @param size Buffer size, in bytes. 2647 * @param desired_read_length Number of bytes of data to read from the object. 2648 * @param offset Offset, in bytes, from the start of the object. 2649 * @param object_name Name of the object to read. 2650 * @param dma DMA structure from which the data will be copied. 2651 * 2652 * @return Returns the number of bytes written. 2653 */ 2654 int32_t 2655 sli_cmd_common_read_object(sli4_t *sli4, void *buf, size_t size, 2656 uint32_t desired_read_length, 2657 uint32_t offset, 2658 char *object_name, 2659 ocs_dma_t *dma) 2660 { 2661 sli4_req_common_read_object_t *rd_obj = NULL; 2662 uint32_t sli_config_off = 0; 2663 sli4_bde_t *host_buffer; 2664 2665 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2666 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2667 sizeof (sli4_req_common_read_object_t) + sizeof (sli4_bde_t), 2668 NULL); 2669 } 2670 2671 rd_obj = (sli4_req_common_read_object_t *)((uint8_t *)buf + sli_config_off); 2672 2673 rd_obj->hdr.opcode = SLI4_OPC_COMMON_READ_OBJECT; 2674 rd_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2675 rd_obj->hdr.request_length = sizeof(*rd_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t); 2676 rd_obj->hdr.timeout = 0; 2677 rd_obj->hdr.version = 0; 2678 2679 rd_obj->desired_read_length = desired_read_length; 2680 rd_obj->read_offset = offset; 2681 ocs_strncpy(rd_obj->object_name, object_name, sizeof(rd_obj->object_name)); 2682 rd_obj->host_buffer_descriptor_count = 1; 2683 2684 host_buffer = (sli4_bde_t *)rd_obj->host_buffer_descriptor; 2685 2686 /* Setup to transfer xfer_size bytes to device */ 2687 host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64; 2688 host_buffer->buffer_length = desired_read_length; 2689 if (dma != NULL) { 2690 host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys); 2691 host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys); 2692 } else { 2693 host_buffer->u.data.buffer_address_low = 0; 2694 host_buffer->u.data.buffer_address_high = 0; 2695 } 2696 2697 return(sli_config_off + sizeof(sli4_req_common_read_object_t) + sizeof (sli4_bde_t)); 2698 } 2699 2700 /** 2701 * @ingroup sli 2702 * @brief Write a DMTF_EXEC_CLP_CMD command. 2703 * 2704 * @param sli4 SLI context. 2705 * @param buf Destination buffer for the command. 2706 * @param size Buffer size, in bytes. 2707 * @param cmd DMA structure that describes the buffer for the command. 2708 * @param resp DMA structure that describes the buffer for the response. 2709 * 2710 * @return Returns the number of bytes written. 2711 */ 2712 int32_t 2713 sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size, 2714 ocs_dma_t *cmd, 2715 ocs_dma_t *resp) 2716 { 2717 sli4_req_dmtf_exec_clp_cmd_t *clp_cmd = NULL; 2718 uint32_t sli_config_off = 0; 2719 2720 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2721 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2722 sizeof (sli4_req_dmtf_exec_clp_cmd_t), 2723 NULL); 2724 } 2725 2726 clp_cmd = (sli4_req_dmtf_exec_clp_cmd_t*)((uint8_t *)buf + sli_config_off); 2727 2728 clp_cmd->hdr.opcode = SLI4_OPC_DMTF_EXEC_CLP_CMD; 2729 clp_cmd->hdr.subsystem = SLI4_SUBSYSTEM_DMTF; 2730 clp_cmd->hdr.request_length = sizeof(sli4_req_dmtf_exec_clp_cmd_t) - 2731 sizeof(sli4_req_hdr_t); 2732 clp_cmd->hdr.timeout = 0; 2733 clp_cmd->hdr.version = 0; 2734 clp_cmd->cmd_buf_length = cmd->size; 2735 clp_cmd->cmd_buf_addr_low = ocs_addr32_lo(cmd->phys); 2736 clp_cmd->cmd_buf_addr_high = ocs_addr32_hi(cmd->phys); 2737 clp_cmd->resp_buf_length = resp->size; 2738 clp_cmd->resp_buf_addr_low = ocs_addr32_lo(resp->phys); 2739 clp_cmd->resp_buf_addr_high = ocs_addr32_hi(resp->phys); 2740 2741 return(sli_config_off + sizeof(sli4_req_dmtf_exec_clp_cmd_t)); 2742 } 2743 2744 /** 2745 * @ingroup sli 2746 * @brief Write a COMMON_SET_DUMP_LOCATION command. 2747 * 2748 * @param sli4 SLI context. 2749 * @param buf Destination buffer for the command. 2750 * @param size Buffer size, in bytes. 2751 * @param query Zero to set dump location, non-zero to query dump size 2752 * @param is_buffer_list Set to one if the buffer is a set of buffer descriptors or 2753 * set to 0 if the buffer is a contiguous dump area. 2754 * @param buffer DMA structure to which the dump will be copied. 2755 * 2756 * @return Returns the number of bytes written. 2757 */ 2758 int32_t 2759 sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size, 2760 uint8_t query, uint8_t is_buffer_list, 2761 ocs_dma_t *buffer, uint8_t fdb) 2762 { 2763 sli4_req_common_set_dump_location_t *set_dump_loc = NULL; 2764 uint32_t sli_config_off = 0; 2765 2766 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2767 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2768 sizeof (sli4_req_common_set_dump_location_t), 2769 NULL); 2770 } 2771 2772 set_dump_loc = (sli4_req_common_set_dump_location_t *)((uint8_t *)buf + sli_config_off); 2773 2774 set_dump_loc->hdr.opcode = SLI4_OPC_COMMON_SET_DUMP_LOCATION; 2775 set_dump_loc->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2776 set_dump_loc->hdr.request_length = sizeof(sli4_req_common_set_dump_location_t) - sizeof(sli4_req_hdr_t); 2777 set_dump_loc->hdr.timeout = 0; 2778 set_dump_loc->hdr.version = 0; 2779 2780 set_dump_loc->blp = is_buffer_list; 2781 set_dump_loc->qry = query; 2782 set_dump_loc->fdb = fdb; 2783 2784 if (buffer) { 2785 set_dump_loc->buf_addr_low = ocs_addr32_lo(buffer->phys); 2786 set_dump_loc->buf_addr_high = ocs_addr32_hi(buffer->phys); 2787 set_dump_loc->buffer_length = buffer->len; 2788 } else { 2789 set_dump_loc->buf_addr_low = 0; 2790 set_dump_loc->buf_addr_high = 0; 2791 set_dump_loc->buffer_length = 0; 2792 } 2793 2794 return(sli_config_off + sizeof(sli4_req_common_set_dump_location_t)); 2795 } 2796 2797 /** 2798 * @ingroup sli 2799 * @brief Write a COMMON_SET_FEATURES command. 2800 * 2801 * @param sli4 SLI context. 2802 * @param buf Destination buffer for the command. 2803 * @param size Buffer size, in bytes. 2804 * @param feature Feature to set. 2805 * @param param_len Length of the parameter (must be a multiple of 4 bytes). 2806 * @param parameter Pointer to the parameter value. 2807 * 2808 * @return Returns the number of bytes written. 2809 */ 2810 int32_t 2811 sli_cmd_common_set_features(sli4_t *sli4, void *buf, size_t size, 2812 uint32_t feature, 2813 uint32_t param_len, 2814 void* parameter) 2815 { 2816 sli4_req_common_set_features_t *cmd = NULL; 2817 uint32_t sli_config_off = 0; 2818 2819 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2820 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2821 sizeof (sli4_req_common_set_features_t), 2822 NULL); 2823 } 2824 2825 cmd = (sli4_req_common_set_features_t *)((uint8_t *)buf + sli_config_off); 2826 2827 cmd->hdr.opcode = SLI4_OPC_COMMON_SET_FEATURES; 2828 cmd->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2829 cmd->hdr.request_length = sizeof(sli4_req_common_set_features_t) - sizeof(sli4_req_hdr_t); 2830 cmd->hdr.timeout = 0; 2831 cmd->hdr.version = 0; 2832 2833 cmd->feature = feature; 2834 cmd->param_len = param_len; 2835 ocs_memcpy(cmd->params, parameter, param_len); 2836 2837 return(sli_config_off + sizeof(sli4_req_common_set_features_t)); 2838 } 2839 2840 /** 2841 * @ingroup sli 2842 * @brief Write a COMMON_COMMON_GET_PROFILE_CONFIG command. 2843 * 2844 * @param sli4 SLI context. 2845 * @param buf Destination buffer for the command. 2846 * @param size Buffer size in bytes. 2847 * @param dma DMA capable memory used to retrieve profile. 2848 * 2849 * @return Returns the number of bytes written. 2850 */ 2851 int32_t 2852 sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) 2853 { 2854 sli4_req_common_get_profile_config_t *req = NULL; 2855 uint32_t sli_config_off = 0; 2856 uint32_t payload_size; 2857 2858 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2859 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 2860 sizeof (sli4_req_common_get_profile_config_t), 2861 dma); 2862 } 2863 2864 if (dma != NULL) { 2865 req = dma->virt; 2866 ocs_memset(req, 0, dma->size); 2867 payload_size = dma->size; 2868 } else { 2869 req = (sli4_req_common_get_profile_config_t *)((uint8_t *)buf + sli_config_off); 2870 payload_size = sizeof(sli4_req_common_get_profile_config_t); 2871 } 2872 2873 req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_CONFIG; 2874 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2875 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 2876 req->hdr.version = 1; 2877 2878 return(sli_config_off + sizeof(sli4_req_common_get_profile_config_t)); 2879 } 2880 2881 /** 2882 * @ingroup sli 2883 * @brief Write a COMMON_COMMON_SET_PROFILE_CONFIG command. 2884 * 2885 * @param sli4 SLI context. 2886 * @param buf Destination buffer for the command. 2887 * @param size Buffer size, in bytes. 2888 * @param dma DMA capable memory containing profile. 2889 * @param profile_id Profile ID to configure. 2890 * @param descriptor_count Number of descriptors in DMA buffer. 2891 * @param isap Implicit Set Active Profile value to use. 2892 * 2893 * @return Returns the number of bytes written. 2894 */ 2895 int32_t 2896 sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, 2897 uint8_t profile_id, uint32_t descriptor_count, uint8_t isap) 2898 { 2899 sli4_req_common_set_profile_config_t *req = NULL; 2900 uint32_t cmd_off = 0; 2901 uint32_t payload_size; 2902 2903 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2904 cmd_off = sli_cmd_sli_config(sli4, buf, size, 2905 sizeof (sli4_req_common_set_profile_config_t), 2906 dma); 2907 } 2908 2909 if (dma != NULL) { 2910 req = dma->virt; 2911 ocs_memset(req, 0, dma->size); 2912 payload_size = dma->size; 2913 } else { 2914 req = (sli4_req_common_set_profile_config_t *)((uint8_t *)buf + cmd_off); 2915 payload_size = sizeof(sli4_req_common_set_profile_config_t); 2916 } 2917 2918 req->hdr.opcode = SLI4_OPC_COMMON_SET_PROFILE_CONFIG; 2919 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2920 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 2921 req->hdr.version = 1; 2922 req->profile_id = profile_id; 2923 req->desc_count = descriptor_count; 2924 req->isap = isap; 2925 2926 return(cmd_off + sizeof(sli4_req_common_set_profile_config_t)); 2927 } 2928 2929 /** 2930 * @ingroup sli 2931 * @brief Write a COMMON_COMMON_GET_PROFILE_LIST command. 2932 * 2933 * @param sli4 SLI context. 2934 * @param buf Destination buffer for the command. 2935 * @param size Buffer size in bytes. 2936 * @param start_profile_index First profile index to return. 2937 * @param dma Buffer into which the list will be written. 2938 * 2939 * @return Returns the number of bytes written. 2940 */ 2941 int32_t 2942 sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf, size_t size, 2943 uint32_t start_profile_index, ocs_dma_t *dma) 2944 { 2945 sli4_req_common_get_profile_list_t *req = NULL; 2946 uint32_t cmd_off = 0; 2947 uint32_t payload_size; 2948 2949 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2950 cmd_off = sli_cmd_sli_config(sli4, buf, size, 2951 sizeof (sli4_req_common_get_profile_list_t), 2952 dma); 2953 } 2954 2955 if (dma != NULL) { 2956 req = dma->virt; 2957 ocs_memset(req, 0, dma->size); 2958 payload_size = dma->size; 2959 } else { 2960 req = (sli4_req_common_get_profile_list_t *)((uint8_t *)buf + cmd_off); 2961 payload_size = sizeof(sli4_req_common_get_profile_list_t); 2962 } 2963 2964 req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_LIST; 2965 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 2966 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 2967 req->hdr.version = 0; 2968 2969 req->start_profile_index = start_profile_index; 2970 2971 return(cmd_off + sizeof(sli4_req_common_get_profile_list_t)); 2972 } 2973 2974 /** 2975 * @ingroup sli 2976 * @brief Write a COMMON_COMMON_GET_ACTIVE_PROFILE command. 2977 * 2978 * @param sli4 SLI context. 2979 * @param buf Destination buffer for the command. 2980 * @param size Buffer size in bytes. 2981 * 2982 * @return Returns the number of bytes written. 2983 */ 2984 int32_t 2985 sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf, size_t size) 2986 { 2987 sli4_req_common_get_active_profile_t *req = NULL; 2988 uint32_t cmd_off = 0; 2989 uint32_t payload_size; 2990 2991 /* Payload length must accommodate both request and response */ 2992 payload_size = max(sizeof(sli4_req_common_get_active_profile_t), 2993 sizeof(sli4_res_common_get_active_profile_t)); 2994 2995 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 2996 cmd_off = sli_cmd_sli_config(sli4, buf, size, 2997 payload_size, 2998 NULL); 2999 } 3000 3001 req = (sli4_req_common_get_active_profile_t *) 3002 ((uint8_t*)buf + cmd_off); 3003 3004 req->hdr.opcode = SLI4_OPC_COMMON_GET_ACTIVE_PROFILE; 3005 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 3006 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 3007 req->hdr.version = 0; 3008 3009 return(cmd_off + sizeof(sli4_req_common_get_active_profile_t)); 3010 } 3011 3012 /** 3013 * @ingroup sli 3014 * @brief Write a COMMON_COMMON_SET_ACTIVE_PROFILE command. 3015 * 3016 * @param sli4 SLI context. 3017 * @param buf Destination buffer for the command. 3018 * @param size Buffer size in bytes. 3019 * @param fd If non-zero, set profile to factory default. 3020 * @param active_profile_id ID of new active profile. 3021 * 3022 * @return Returns the number of bytes written. 3023 */ 3024 int32_t 3025 sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf, size_t size, 3026 uint32_t fd, uint32_t active_profile_id) 3027 { 3028 sli4_req_common_set_active_profile_t *req = NULL; 3029 uint32_t cmd_off = 0; 3030 uint32_t payload_size; 3031 3032 /* Payload length must accommodate both request and response */ 3033 payload_size = max(sizeof(sli4_req_common_set_active_profile_t), 3034 sizeof(sli4_res_common_set_active_profile_t)); 3035 3036 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 3037 cmd_off = sli_cmd_sli_config(sli4, buf, size, 3038 payload_size, 3039 NULL); 3040 } 3041 3042 req = (sli4_req_common_set_active_profile_t *) 3043 ((uint8_t*)buf + cmd_off); 3044 3045 req->hdr.opcode = SLI4_OPC_COMMON_SET_ACTIVE_PROFILE; 3046 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 3047 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 3048 req->hdr.version = 0; 3049 req->fd = fd; 3050 req->active_profile_id = active_profile_id; 3051 3052 return(cmd_off + sizeof(sli4_req_common_set_active_profile_t)); 3053 } 3054 3055 /** 3056 * @ingroup sli 3057 * @brief Write a COMMON_GET_RECONFIG_LINK_INFO command. 3058 * 3059 * @param sli4 SLI context. 3060 * @param buf Destination buffer for the command. 3061 * @param size Buffer size in bytes. 3062 * @param dma Buffer to store the supported link configuration modes from the physical device. 3063 * 3064 * @return Returns the number of bytes written. 3065 */ 3066 int32_t 3067 sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma) 3068 { 3069 sli4_req_common_get_reconfig_link_info_t *req = NULL; 3070 uint32_t cmd_off = 0; 3071 uint32_t payload_size; 3072 3073 /* Payload length must accommodate both request and response */ 3074 payload_size = max(sizeof(sli4_req_common_get_reconfig_link_info_t), 3075 sizeof(sli4_res_common_get_reconfig_link_info_t)); 3076 3077 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 3078 cmd_off = sli_cmd_sli_config(sli4, buf, size, 3079 payload_size, 3080 dma); 3081 } 3082 3083 if (dma != NULL) { 3084 req = dma->virt; 3085 ocs_memset(req, 0, dma->size); 3086 payload_size = dma->size; 3087 } else { 3088 req = (sli4_req_common_get_reconfig_link_info_t *)((uint8_t *)buf + cmd_off); 3089 payload_size = sizeof(sli4_req_common_get_reconfig_link_info_t); 3090 } 3091 3092 req->hdr.opcode = SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO; 3093 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 3094 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 3095 req->hdr.version = 0; 3096 3097 return(cmd_off + sizeof(sli4_req_common_get_reconfig_link_info_t)); 3098 } 3099 3100 /** 3101 * @ingroup sli 3102 * @brief Write a COMMON_SET_RECONFIG_LINK_ID command. 3103 * 3104 * @param sli4 SLI context. 3105 * @param buf destination buffer for the command. 3106 * @param size buffer size in bytes. 3107 * @param fd If non-zero, set link config to factory default. 3108 * @param active_link_config_id ID of new active profile. 3109 * @param dma Buffer to assign the link configuration mode that is to become active from the physical device. 3110 * 3111 * @return Returns the number of bytes written. 3112 */ 3113 int32_t 3114 sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, 3115 uint32_t fd, uint32_t active_link_config_id) 3116 { 3117 sli4_req_common_set_reconfig_link_id_t *req = NULL; 3118 uint32_t cmd_off = 0; 3119 uint32_t payload_size; 3120 3121 /* Payload length must accommodate both request and response */ 3122 payload_size = max(sizeof(sli4_req_common_set_reconfig_link_id_t), 3123 sizeof(sli4_res_common_set_reconfig_link_id_t)); 3124 3125 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 3126 cmd_off = sli_cmd_sli_config(sli4, buf, size, 3127 payload_size, 3128 NULL); 3129 } 3130 3131 if (dma != NULL) { 3132 req = dma->virt; 3133 ocs_memset(req, 0, dma->size); 3134 payload_size = dma->size; 3135 } else { 3136 req = (sli4_req_common_set_reconfig_link_id_t *)((uint8_t *)buf + cmd_off); 3137 payload_size = sizeof(sli4_req_common_set_reconfig_link_id_t); 3138 } 3139 3140 req->hdr.opcode = SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID; 3141 req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON; 3142 req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t); 3143 req->hdr.version = 0; 3144 req->fd = fd; 3145 req->next_link_config_id = active_link_config_id; 3146 3147 return(cmd_off + sizeof(sli4_req_common_set_reconfig_link_id_t)); 3148 } 3149 3150 /** 3151 * @ingroup sli 3152 * @brief Check the mailbox/queue completion entry. 3153 * 3154 * @param buf Pointer to the MCQE. 3155 * 3156 * @return Returns 0 on success, or a non-zero value on failure. 3157 */ 3158 int32_t 3159 sli_cqe_mq(void *buf) 3160 { 3161 sli4_mcqe_t *mcqe = buf; 3162 3163 /* 3164 * Firmware can split mbx completions into two MCQEs: first with only 3165 * the "consumed" bit set and a second with the "complete" bit set. 3166 * Thus, ignore MCQE unless "complete" is set. 3167 */ 3168 if (!mcqe->cmp) { 3169 return -2; 3170 } 3171 3172 if (mcqe->completion_status) { 3173 ocs_log_debug(NULL, "bad status (cmpl=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n", 3174 mcqe->completion_status, 3175 mcqe->extended_status, 3176 mcqe->con, 3177 mcqe->cmp, 3178 mcqe->ae, 3179 mcqe->val); 3180 } 3181 3182 return mcqe->completion_status; 3183 } 3184 3185 /** 3186 * @ingroup sli 3187 * @brief Check the asynchronous event completion entry. 3188 * 3189 * @param sli4 SLI context. 3190 * @param buf Pointer to the ACQE. 3191 * 3192 * @return Returns 0 on success, or a non-zero value on failure. 3193 */ 3194 int32_t 3195 sli_cqe_async(sli4_t *sli4, void *buf) 3196 { 3197 sli4_acqe_t *acqe = buf; 3198 int32_t rc = -1; 3199 3200 if (!sli4 || !buf) { 3201 ocs_log_err(NULL, "bad parameter sli4=%p buf=%p\n", sli4, buf); 3202 return -1; 3203 } 3204 3205 switch (acqe->event_code) { 3206 case SLI4_ACQE_EVENT_CODE_LINK_STATE: 3207 rc = sli_fc_process_link_state(sli4, buf); 3208 break; 3209 case SLI4_ACQE_EVENT_CODE_FCOE_FIP: 3210 rc = sli_fc_process_fcoe(sli4, buf); 3211 break; 3212 case SLI4_ACQE_EVENT_CODE_GRP_5: 3213 /*TODO*/ocs_log_debug(sli4->os, "ACQE GRP5\n"); 3214 break; 3215 case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT: 3216 ocs_log_debug(sli4->os,"ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n", 3217 acqe->event_type, acqe->event_data[0], acqe->event_data[1]); 3218 #if defined(OCS_INCLUDE_DEBUG) 3219 ocs_dump32(OCS_DEBUG_ALWAYS, sli4->os, "acq", acqe, sizeof(*acqe)); 3220 #endif 3221 break; 3222 case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT: 3223 rc = sli_fc_process_link_attention(sli4, buf); 3224 break; 3225 default: 3226 /*TODO*/ocs_log_test(sli4->os, "ACQE unknown=%#x\n", acqe->event_code); 3227 } 3228 3229 return rc; 3230 } 3231 3232 /** 3233 * @brief Check the SLI_CONFIG response. 3234 * 3235 * @par Description 3236 * Function checks the SLI_CONFIG response and the payload status. 3237 * 3238 * @param buf Pointer to SLI_CONFIG response. 3239 * 3240 * @return Returns 0 on success, or a non-zero value on failure. 3241 */ 3242 static int32_t 3243 sli_res_sli_config(void *buf) 3244 { 3245 sli4_cmd_sli_config_t *sli_config = buf; 3246 3247 if (!buf || (SLI4_MBOX_COMMAND_SLI_CONFIG != sli_config->hdr.command)) { 3248 ocs_log_err(NULL, "bad parameter buf=%p cmd=%#x\n", buf, 3249 buf ? sli_config->hdr.command : -1); 3250 return -1; 3251 } 3252 3253 if (sli_config->hdr.status) { 3254 return sli_config->hdr.status; 3255 } 3256 3257 if (sli_config->emb) { 3258 return sli_config->payload.embed[4]; 3259 } else { 3260 ocs_log_test(NULL, "external buffers not supported\n"); 3261 return -1; 3262 } 3263 } 3264 3265 /** 3266 * @brief Issue a COMMON_FUNCTION_RESET command. 3267 * 3268 * @param sli4 SLI context. 3269 * 3270 * @return Returns 0 on success, or a non-zero value on failure. 3271 */ 3272 static int32_t 3273 sli_common_function_reset(sli4_t *sli4) 3274 { 3275 3276 if (sli_cmd_common_function_reset(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3277 if (sli_bmbx_command(sli4)) { 3278 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COM_FUNC_RESET)\n"); 3279 return -1; 3280 } 3281 if (sli_res_sli_config(sli4->bmbx.virt)) { 3282 ocs_log_err(sli4->os, "bad status COM_FUNC_RESET\n"); 3283 return -1; 3284 } 3285 } else { 3286 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); 3287 return -1; 3288 } 3289 3290 return 0; 3291 } 3292 3293 /** 3294 * @brief check to see if the FW is ready. 3295 * 3296 * @par Description 3297 * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>. 3298 * 3299 * @param sli4 SLI context. 3300 * @param timeout_ms Time, in milliseconds, to wait for the port to be ready 3301 * before failing. 3302 * 3303 * @return Returns TRUE for ready, or FALSE otherwise. 3304 */ 3305 static int32_t 3306 sli_wait_for_fw_ready(sli4_t *sli4, uint32_t timeout_ms) 3307 { 3308 uint32_t iter = timeout_ms / (SLI4_INIT_PORT_DELAY_US / 1000); 3309 uint32_t ready = FALSE; 3310 3311 do { 3312 iter--; 3313 ocs_udelay(SLI4_INIT_PORT_DELAY_US); 3314 if (sli_fw_ready(sli4) == 1) { 3315 ready = TRUE; 3316 } 3317 } while (!ready && (iter > 0)); 3318 3319 return ready; 3320 } 3321 3322 /** 3323 * @brief Initialize the firmware. 3324 * 3325 * @par Description 3326 * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>. 3327 * 3328 * @param sli4 SLI context. 3329 * 3330 * @return Returns 0 on success, or a non-zero value on failure. 3331 */ 3332 static int32_t 3333 sli_fw_init(sli4_t *sli4) 3334 { 3335 uint32_t ready; 3336 uint32_t endian; 3337 3338 /* 3339 * Is firmware ready for operation? 3340 */ 3341 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); 3342 if (!ready) { 3343 ocs_log_crit(sli4->os, "FW status is NOT ready\n"); 3344 return -1; 3345 } 3346 3347 /* 3348 * Reset port to a known state 3349 */ 3350 switch (sli4->if_type) { 3351 case SLI4_IF_TYPE_BE3_SKH_PF: 3352 case SLI4_IF_TYPE_BE3_SKH_VF: 3353 /* No SLIPORT_CONTROL register so use command sequence instead */ 3354 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { 3355 ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n"); 3356 return -1; 3357 } 3358 3359 if (sli_cmd_fw_initialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3360 if (sli_bmbx_command(sli4)) { 3361 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_INIT)\n"); 3362 return -1; 3363 } 3364 } else { 3365 ocs_log_crit(sli4->os, "bad FW_INIT write\n"); 3366 return -1; 3367 } 3368 3369 if (sli_common_function_reset(sli4)) { 3370 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); 3371 return -1; 3372 } 3373 break; 3374 case SLI4_IF_TYPE_LANCER_FC_ETH: 3375 case SLI4_IF_TYPE_LANCER_G7: 3376 #if BYTE_ORDER == LITTLE_ENDIAN 3377 endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN; 3378 #else 3379 endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN; 3380 #endif 3381 3382 if (sli_sliport_control(sli4, endian)) 3383 return -1; 3384 break; 3385 default: 3386 ocs_log_test(sli4->os, "if_type %d not supported\n", sli4->if_type); 3387 return -1; 3388 } 3389 3390 return 0; 3391 } 3392 3393 /** 3394 * @brief Terminate the firmware. 3395 * 3396 * @param sli4 SLI context. 3397 * 3398 * @return Returns 0 on success, or a non-zero value on failure. 3399 */ 3400 static int32_t 3401 sli_fw_term(sli4_t *sli4) 3402 { 3403 uint32_t endian; 3404 3405 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF || 3406 sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF) { 3407 /* No SLIPORT_CONTROL register so use command sequence instead */ 3408 if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { 3409 ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n"); 3410 return -1; 3411 } 3412 3413 if (sli_common_function_reset(sli4)) { 3414 ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n"); 3415 return -1; 3416 } 3417 3418 if (sli_cmd_fw_deinitialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3419 if (sli_bmbx_command(sli4)) { 3420 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_DEINIT)\n"); 3421 return -1; 3422 } 3423 } else { 3424 ocs_log_test(sli4->os, "bad FW_DEINIT write\n"); 3425 return -1; 3426 } 3427 } else { 3428 #if BYTE_ORDER == LITTLE_ENDIAN 3429 endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN; 3430 #else 3431 endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN; 3432 #endif 3433 /* type 2 etc. use SLIPORT_CONTROL to initialize port */ 3434 sli_sliport_control(sli4, endian); 3435 } 3436 return 0; 3437 } 3438 3439 /** 3440 * @brief Write the doorbell register associated with the queue object. 3441 * 3442 * @param sli4 SLI context. 3443 * @param q Queue object. 3444 * 3445 * @return Returns 0 on success, or a non-zero value on failure. 3446 */ 3447 static int32_t 3448 sli_queue_doorbell(sli4_t *sli4, sli4_queue_t *q) 3449 { 3450 uint32_t val = 0; 3451 3452 switch (q->type) { 3453 case SLI_QTYPE_EQ: 3454 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) 3455 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, FALSE); 3456 else 3457 val = sli_eq_doorbell(q->n_posted, q->id, FALSE); 3458 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 3459 break; 3460 case SLI_QTYPE_CQ: 3461 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) 3462 val = sli_iftype6_cq_doorbell(q->n_posted, q->id, FALSE); 3463 else 3464 val = sli_cq_doorbell(q->n_posted, q->id, FALSE); 3465 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 3466 break; 3467 case SLI_QTYPE_MQ: 3468 val = SLI4_MQ_DOORBELL(q->n_posted, q->id); 3469 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 3470 break; 3471 case SLI_QTYPE_RQ: 3472 { 3473 uint32_t n_posted = q->n_posted; 3474 /* 3475 * FC/FCoE has different rules for Receive Queues. The host 3476 * should only update the doorbell of the RQ-pair containing 3477 * the headers since the header / payload RQs are treated 3478 * as a matched unit. 3479 */ 3480 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 3481 /* 3482 * In RQ-pair, an RQ either contains the FC header 3483 * (i.e. is_hdr == TRUE) or the payload. 3484 * 3485 * Don't ring doorbell for payload RQ 3486 */ 3487 if (!q->u.flag.is_hdr) { 3488 break; 3489 } 3490 /* 3491 * Some RQ cannot be incremented one entry at a time. Instead, 3492 * the driver collects a number of entries and updates the 3493 * RQ in batches. 3494 */ 3495 if (q->u.flag.rq_batch) { 3496 if (((q->index + q->n_posted) % SLI4_QUEUE_RQ_BATCH)) { 3497 break; 3498 } 3499 n_posted = SLI4_QUEUE_RQ_BATCH; 3500 } 3501 } 3502 3503 val = SLI4_RQ_DOORBELL(n_posted, q->id); 3504 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 3505 break; 3506 } 3507 case SLI_QTYPE_WQ: 3508 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) { 3509 val = SLI4_WQ_DOORBELL(q->n_posted, 0, q->id); 3510 } else { 3511 /* For iftype = 2 and 3, q->index value is ignored */ 3512 val = SLI4_WQ_DOORBELL(q->n_posted, q->index, q->id); 3513 } 3514 3515 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 3516 break; 3517 default: 3518 ocs_log_test(sli4->os, "bad queue type %d\n", q->type); 3519 return -1; 3520 } 3521 3522 return 0; 3523 } 3524 3525 static int32_t 3526 sli_request_features(sli4_t *sli4, sli4_features_t *features, uint8_t query) 3527 { 3528 3529 if (sli_cmd_request_features(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, 3530 *features, query)) { 3531 sli4_cmd_request_features_t *req_features = sli4->bmbx.virt; 3532 3533 if (sli_bmbx_command(sli4)) { 3534 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (REQUEST_FEATURES)\n"); 3535 return -1; 3536 } 3537 if (req_features->hdr.status) { 3538 ocs_log_err(sli4->os, "REQUEST_FEATURES bad status %#x\n", 3539 req_features->hdr.status); 3540 return -1; 3541 } 3542 features->dword = req_features->response.dword; 3543 } else { 3544 ocs_log_err(sli4->os, "bad REQUEST_FEATURES write\n"); 3545 return -1; 3546 } 3547 3548 return 0; 3549 } 3550 3551 /** 3552 * @brief Calculate max queue entries. 3553 * 3554 * @param sli4 SLI context. 3555 * 3556 * @return Returns 0 on success, or a non-zero value on failure. 3557 */ 3558 void 3559 sli_calc_max_qentries(sli4_t *sli4) 3560 { 3561 sli4_qtype_e q; 3562 uint32_t alloc_size, qentries, qentry_size; 3563 3564 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { 3565 sli4->config.max_qentries[q] = sli_convert_mask_to_count(sli4->config.count_method[q], 3566 sli4->config.count_mask[q]); 3567 } 3568 3569 /* single, continguous DMA allocations will be called for each queue 3570 * of size (max_qentries * queue entry size); since these can be large, 3571 * check against the OS max DMA allocation size 3572 */ 3573 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) { 3574 qentries = sli4->config.max_qentries[q]; 3575 qentry_size = sli_get_queue_entry_size(sli4, q); 3576 alloc_size = qentries * qentry_size; 3577 if (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) { 3578 while (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) { 3579 /* cut the qentries in hwf until alloc_size <= max DMA alloc size */ 3580 qentries >>= 1; 3581 alloc_size = qentries * qentry_size; 3582 } 3583 ocs_log_debug(sli4->os, "[%s]: max_qentries from %d to %d (max dma %d)\n", 3584 SLI_QNAME[q], sli4->config.max_qentries[q], 3585 qentries, ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)); 3586 sli4->config.max_qentries[q] = qentries; 3587 } 3588 } 3589 } 3590 3591 /** 3592 * @brief Issue a FW_CONFIG mailbox command and store the results. 3593 * 3594 * @param sli4 SLI context. 3595 * 3596 * @return Returns 0 on success, or a non-zero value on failure. 3597 */ 3598 static int32_t 3599 sli_query_fw_config(sli4_t *sli4) 3600 { 3601 /* 3602 * Read the device configuration 3603 * 3604 * Note: Only ulp0 fields contain values 3605 */ 3606 if (sli_cmd_common_query_fw_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3607 sli4_res_common_query_fw_config_t *fw_config = 3608 (sli4_res_common_query_fw_config_t *) 3609 (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); 3610 3611 if (sli_bmbx_command(sli4)) { 3612 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (QUERY_FW_CONFIG)\n"); 3613 return -1; 3614 } 3615 if (fw_config->hdr.status) { 3616 ocs_log_err(sli4->os, "COMMON_QUERY_FW_CONFIG bad status %#x\n", 3617 fw_config->hdr.status); 3618 return -1; 3619 } 3620 3621 sli4->physical_port = fw_config->physical_port; 3622 sli4->config.dual_ulp_capable = ((fw_config->function_mode & SLI4_FUNCTION_MODE_DUA_MODE) == 0 ? 0 : 1); 3623 sli4->config.is_ulp_fc[0] = ((fw_config->ulp0_mode & 3624 (SLI4_ULP_MODE_FCOE_INI | 3625 SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1); 3626 sli4->config.is_ulp_fc[1] = ((fw_config->ulp1_mode & 3627 (SLI4_ULP_MODE_FCOE_INI | 3628 SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1); 3629 3630 if (sli4->config.dual_ulp_capable) { 3631 /* 3632 * Lancer will not support this, so we use the values 3633 * from the READ_CONFIG. 3634 */ 3635 if (sli4->config.is_ulp_fc[0] && 3636 sli4->config.is_ulp_fc[1]) { 3637 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total + fw_config->ulp1_toe_wq_total; 3638 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total + fw_config->ulp1_toe_defrq_total; 3639 } else if (sli4->config.is_ulp_fc[0]) { 3640 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total; 3641 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total; 3642 } else { 3643 sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp1_toe_wq_total; 3644 sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp1_toe_defrq_total; 3645 } 3646 } 3647 } else { 3648 ocs_log_err(sli4->os, "bad QUERY_FW_CONFIG write\n"); 3649 return -1; 3650 } 3651 return 0; 3652 } 3653 3654 static int32_t 3655 sli_get_config(sli4_t *sli4) 3656 { 3657 ocs_dma_t get_cntl_addl_data; 3658 3659 /* 3660 * Read the device configuration 3661 */ 3662 if (sli_cmd_read_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3663 sli4_res_read_config_t *read_config = sli4->bmbx.virt; 3664 uint32_t i; 3665 uint32_t total; 3666 3667 if (sli_bmbx_command(sli4)) { 3668 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_CONFIG)\n"); 3669 return -1; 3670 } 3671 if (read_config->hdr.status) { 3672 ocs_log_err(sli4->os, "READ_CONFIG bad status %#x\n", 3673 read_config->hdr.status); 3674 return -1; 3675 } 3676 3677 sli4->config.has_extents = read_config->ext; 3678 if (FALSE == sli4->config.has_extents) { 3679 uint32_t i = 0; 3680 uint32_t *base = sli4->config.extent[0].base; 3681 3682 if (!base) { 3683 if (NULL == (base = ocs_malloc(sli4->os, SLI_RSRC_MAX * sizeof(uint32_t), 3684 OCS_M_ZERO | OCS_M_NOWAIT))) { 3685 ocs_log_err(sli4->os, "memory allocation failed for sli4_resource_t\n"); 3686 return -1; 3687 } 3688 } 3689 3690 for (i = 0; i < SLI_RSRC_MAX; i++) { 3691 sli4->config.extent[i].number = 1; 3692 sli4->config.extent[i].n_alloc = 0; 3693 sli4->config.extent[i].base = &base[i]; 3694 } 3695 3696 sli4->config.extent[SLI_RSRC_FCOE_VFI].base[0] = read_config->vfi_base; 3697 sli4->config.extent[SLI_RSRC_FCOE_VFI].size = read_config->vfi_count; 3698 3699 sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] = read_config->vpi_base; 3700 sli4->config.extent[SLI_RSRC_FCOE_VPI].size = read_config->vpi_count; 3701 3702 sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0] = read_config->rpi_base; 3703 sli4->config.extent[SLI_RSRC_FCOE_RPI].size = read_config->rpi_count; 3704 3705 sli4->config.extent[SLI_RSRC_FCOE_XRI].base[0] = read_config->xri_base; 3706 sli4->config.extent[SLI_RSRC_FCOE_XRI].size = OCS_MIN(255,read_config->xri_count); 3707 3708 sli4->config.extent[SLI_RSRC_FCOE_FCFI].base[0] = 0; 3709 sli4->config.extent[SLI_RSRC_FCOE_FCFI].size = read_config->fcfi_count; 3710 } else { 3711 /* TODO extents*/ 3712 ; 3713 } 3714 3715 for (i = 0; i < SLI_RSRC_MAX; i++) { 3716 total = sli4->config.extent[i].number * sli4->config.extent[i].size; 3717 sli4->config.extent[i].use_map = ocs_bitmap_alloc(total); 3718 if (NULL == sli4->config.extent[i].use_map) { 3719 ocs_log_err(sli4->os, "bitmap memory allocation failed " 3720 "resource %d\n", i); 3721 return -1; 3722 } 3723 sli4->config.extent[i].map_size = total; 3724 } 3725 3726 sli4->config.topology = read_config->topology; 3727 switch (sli4->config.topology) { 3728 case SLI4_READ_CFG_TOPO_FCOE: 3729 ocs_log_debug(sli4->os, "FCoE\n"); 3730 break; 3731 case SLI4_READ_CFG_TOPO_FC: 3732 ocs_log_debug(sli4->os, "FC (unknown)\n"); 3733 break; 3734 case SLI4_READ_CFG_TOPO_FC_DA: 3735 ocs_log_debug(sli4->os, "FC (direct attach)\n"); 3736 break; 3737 case SLI4_READ_CFG_TOPO_FC_AL: 3738 ocs_log_debug(sli4->os, "FC (arbitrated loop)\n"); 3739 break; 3740 default: 3741 ocs_log_test(sli4->os, "bad topology %#x\n", sli4->config.topology); 3742 } 3743 3744 sli4->config.e_d_tov = read_config->e_d_tov; 3745 sli4->config.r_a_tov = read_config->r_a_tov; 3746 3747 sli4->config.link_module_type = read_config->lmt; 3748 3749 sli4->config.max_qcount[SLI_QTYPE_EQ] = read_config->eq_count; 3750 sli4->config.max_qcount[SLI_QTYPE_CQ] = read_config->cq_count; 3751 sli4->config.max_qcount[SLI_QTYPE_WQ] = read_config->wq_count; 3752 sli4->config.max_qcount[SLI_QTYPE_RQ] = read_config->rq_count; 3753 3754 /* 3755 * READ_CONFIG doesn't give the max number of MQ. Applications 3756 * will typically want 1, but we may need another at some future 3757 * date. Dummy up a "max" MQ count here. 3758 */ 3759 sli4->config.max_qcount[SLI_QTYPE_MQ] = SLI_USER_MQ_COUNT; 3760 } else { 3761 ocs_log_err(sli4->os, "bad READ_CONFIG write\n"); 3762 return -1; 3763 } 3764 3765 if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3766 sli4_res_common_get_sli4_parameters_t *parms = (sli4_res_common_get_sli4_parameters_t *) 3767 (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed)); 3768 3769 if (sli_bmbx_command(sli4)) { 3770 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_SLI4_PARAMETERS)\n"); 3771 return -1; 3772 } else if (parms->hdr.status) { 3773 ocs_log_err(sli4->os, "COMMON_GET_SLI4_PARAMETERS bad status %#x att'l %#x\n", 3774 parms->hdr.status, parms->hdr.additional_status); 3775 return -1; 3776 } 3777 3778 sli4->config.auto_reg = parms->areg; 3779 sli4->config.auto_xfer_rdy = parms->agxf; 3780 sli4->config.hdr_template_req = parms->hdrr; 3781 sli4->config.t10_dif_inline_capable = parms->timm; 3782 sli4->config.t10_dif_separate_capable = parms->tsmm; 3783 3784 sli4->config.mq_create_version = parms->mqv; 3785 sli4->config.cq_create_version = parms->cqv; 3786 sli4->config.rq_min_buf_size = parms->min_rq_buffer_size; 3787 sli4->config.rq_max_buf_size = parms->max_rq_buffer_size; 3788 3789 sli4->config.qpage_count[SLI_QTYPE_EQ] = parms->eq_page_cnt; 3790 sli4->config.qpage_count[SLI_QTYPE_CQ] = parms->cq_page_cnt; 3791 sli4->config.qpage_count[SLI_QTYPE_MQ] = parms->mq_page_cnt; 3792 sli4->config.qpage_count[SLI_QTYPE_WQ] = parms->wq_page_cnt; 3793 sli4->config.qpage_count[SLI_QTYPE_RQ] = parms->rq_page_cnt; 3794 3795 /* save count methods and masks for each queue type */ 3796 sli4->config.count_mask[SLI_QTYPE_EQ] = parms->eqe_count_mask; 3797 sli4->config.count_method[SLI_QTYPE_EQ] = parms->eqe_count_method; 3798 sli4->config.count_mask[SLI_QTYPE_CQ] = parms->cqe_count_mask; 3799 sli4->config.count_method[SLI_QTYPE_CQ] = parms->cqe_count_method; 3800 sli4->config.count_mask[SLI_QTYPE_MQ] = parms->mqe_count_mask; 3801 sli4->config.count_method[SLI_QTYPE_MQ] = parms->mqe_count_method; 3802 sli4->config.count_mask[SLI_QTYPE_WQ] = parms->wqe_count_mask; 3803 sli4->config.count_method[SLI_QTYPE_WQ] = parms->wqe_count_method; 3804 sli4->config.count_mask[SLI_QTYPE_RQ] = parms->rqe_count_mask; 3805 sli4->config.count_method[SLI_QTYPE_RQ] = parms->rqe_count_method; 3806 3807 /* now calculate max queue entries */ 3808 sli_calc_max_qentries(sli4); 3809 3810 sli4->config.max_sgl_pages = parms->sgl_page_cnt; /* max # of pages */ 3811 sli4->config.sgl_page_sizes = parms->sgl_page_sizes; /* bit map of available sizes */ 3812 /* ignore HLM here. Use value from REQUEST_FEATURES */ 3813 3814 sli4->config.sge_supported_length = parms->sge_supported_length; 3815 if (sli4->config.sge_supported_length > OCS_MAX_SGE_SIZE) 3816 sli4->config.sge_supported_length = OCS_MAX_SGE_SIZE; 3817 3818 sli4->config.sgl_pre_registration_required = parms->sglr; 3819 /* default to using pre-registered SGL's */ 3820 sli4->config.sgl_pre_registered = TRUE; 3821 3822 sli4->config.perf_hint = parms->phon; 3823 sli4->config.perf_wq_id_association = parms->phwq; 3824 3825 sli4->config.rq_batch = parms->rq_db_window; 3826 3827 /* save the fields for skyhawk SGL chaining */ 3828 sli4->config.sgl_chaining_params.chaining_capable = 3829 (parms->sglc == 1); 3830 sli4->config.sgl_chaining_params.frag_num_field_offset = 3831 parms->frag_num_field_offset; 3832 sli4->config.sgl_chaining_params.frag_num_field_mask = 3833 (1ull << parms->frag_num_field_size) - 1; 3834 sli4->config.sgl_chaining_params.sgl_index_field_offset = 3835 parms->sgl_index_field_offset; 3836 sli4->config.sgl_chaining_params.sgl_index_field_mask = 3837 (1ull << parms->sgl_index_field_size) - 1; 3838 sli4->config.sgl_chaining_params.chain_sge_initial_value_lo = 3839 parms->chain_sge_initial_value_lo; 3840 sli4->config.sgl_chaining_params.chain_sge_initial_value_hi = 3841 parms->chain_sge_initial_value_hi; 3842 3843 /* Use the highest available WQE size. */ 3844 if (parms->wqe_sizes & SLI4_128BYTE_WQE_SUPPORT) { 3845 sli4->config.wqe_size = SLI4_WQE_EXT_BYTES; 3846 } else { 3847 sli4->config.wqe_size = SLI4_WQE_BYTES; 3848 } 3849 } 3850 3851 if (sli_query_fw_config(sli4)) { 3852 ocs_log_err(sli4->os, "Error sending QUERY_FW_CONFIG\n"); 3853 return -1; 3854 } 3855 3856 sli4->config.port_number = 0; 3857 3858 /* 3859 * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily 3860 * uses VPD DMA buffer as the response won't fit in the embedded 3861 * buffer. 3862 */ 3863 if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) { 3864 sli4_res_common_get_cntl_attributes_t *attr = sli4->vpd.data.virt; 3865 3866 if (sli_bmbx_command(sli4)) { 3867 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ATTRIBUTES)\n"); 3868 return -1; 3869 } else if (attr->hdr.status) { 3870 ocs_log_err(sli4->os, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x att'l %#x\n", 3871 attr->hdr.status, attr->hdr.additional_status); 3872 return -1; 3873 } 3874 3875 sli4->config.port_number = attr->port_number; 3876 3877 ocs_memcpy(sli4->config.bios_version_string, attr->bios_version_string, 3878 sizeof(sli4->config.bios_version_string)); 3879 } else { 3880 ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ATTRIBUTES write\n"); 3881 return -1; 3882 } 3883 3884 if (ocs_dma_alloc(sli4->os, &get_cntl_addl_data, sizeof(sli4_res_common_get_cntl_addl_attributes_t), 3885 OCS_MIN_DMA_ALIGNMENT)) { 3886 ocs_log_err(sli4->os, "Failed to allocate memory for GET_CNTL_ADDL_ATTR data\n"); 3887 } else { 3888 if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, 3889 &get_cntl_addl_data)) { 3890 sli4_res_common_get_cntl_addl_attributes_t *attr = get_cntl_addl_data.virt; 3891 3892 if (sli_bmbx_command(sli4)) { 3893 ocs_log_crit(sli4->os, 3894 "bootstrap mailbox write fail (COMMON_GET_CNTL_ADDL_ATTRIBUTES)\n"); 3895 ocs_dma_free(sli4->os, &get_cntl_addl_data); 3896 return -1; 3897 } 3898 if (attr->hdr.status) { 3899 ocs_log_err(sli4->os, "COMMON_GET_CNTL_ADDL_ATTRIBUTES bad status %#x\n", 3900 attr->hdr.status); 3901 ocs_dma_free(sli4->os, &get_cntl_addl_data); 3902 return -1; 3903 } 3904 3905 ocs_memcpy(sli4->config.ipl_name, attr->ipl_file_name, sizeof(sli4->config.ipl_name)); 3906 3907 ocs_log_debug(sli4->os, "IPL:%s \n", (char*)sli4->config.ipl_name); 3908 } else { 3909 ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ADDL_ATTRIBUTES write\n"); 3910 ocs_dma_free(sli4->os, &get_cntl_addl_data); 3911 return -1; 3912 } 3913 3914 ocs_dma_free(sli4->os, &get_cntl_addl_data); 3915 } 3916 3917 if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3918 sli4_res_common_get_port_name_t *port_name = (sli4_res_common_get_port_name_t *)(((uint8_t *)sli4->bmbx.virt) + 3919 offsetof(sli4_cmd_sli_config_t, payload.embed)); 3920 3921 if (sli_bmbx_command(sli4)) { 3922 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_PORT_NAME)\n"); 3923 return -1; 3924 } 3925 3926 sli4->config.port_name[0] = port_name->port_name[sli4->config.port_number]; 3927 } 3928 sli4->config.port_name[1] = '\0'; 3929 3930 if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) { 3931 sli4_cmd_read_rev_t *read_rev = sli4->bmbx.virt; 3932 3933 if (sli_bmbx_command(sli4)) { 3934 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_REV)\n"); 3935 return -1; 3936 } 3937 if (read_rev->hdr.status) { 3938 ocs_log_err(sli4->os, "READ_REV bad status %#x\n", 3939 read_rev->hdr.status); 3940 return -1; 3941 } 3942 3943 sli4->config.fw_rev[0] = read_rev->first_fw_id; 3944 ocs_memcpy(sli4->config.fw_name[0],read_rev->first_fw_name, sizeof(sli4->config.fw_name[0])); 3945 3946 sli4->config.fw_rev[1] = read_rev->second_fw_id; 3947 ocs_memcpy(sli4->config.fw_name[1],read_rev->second_fw_name, sizeof(sli4->config.fw_name[1])); 3948 3949 sli4->config.hw_rev[0] = read_rev->first_hw_revision; 3950 sli4->config.hw_rev[1] = read_rev->second_hw_revision; 3951 sli4->config.hw_rev[2] = read_rev->third_hw_revision; 3952 3953 ocs_log_debug(sli4->os, "FW1:%s (%08x) / FW2:%s (%08x)\n", 3954 read_rev->first_fw_name, read_rev->first_fw_id, 3955 read_rev->second_fw_name, read_rev->second_fw_id); 3956 3957 ocs_log_debug(sli4->os, "HW1: %08x / HW2: %08x\n", read_rev->first_hw_revision, 3958 read_rev->second_hw_revision); 3959 3960 /* Check that all VPD data was returned */ 3961 if (read_rev->returned_vpd_length != read_rev->actual_vpd_length) { 3962 ocs_log_test(sli4->os, "VPD length: available=%d returned=%d actual=%d\n", 3963 read_rev->available_length, 3964 read_rev->returned_vpd_length, 3965 read_rev->actual_vpd_length); 3966 } 3967 sli4->vpd.length = read_rev->returned_vpd_length; 3968 } else { 3969 ocs_log_err(sli4->os, "bad READ_REV write\n"); 3970 return -1; 3971 } 3972 3973 if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) { 3974 sli4_cmd_read_nvparms_t *read_nvparms = sli4->bmbx.virt; 3975 3976 if (sli_bmbx_command(sli4)) { 3977 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_NVPARMS)\n"); 3978 return -1; 3979 } 3980 if (read_nvparms->hdr.status) { 3981 ocs_log_err(sli4->os, "READ_NVPARMS bad status %#x\n", 3982 read_nvparms->hdr.status); 3983 return -1; 3984 } 3985 3986 ocs_memcpy(sli4->config.wwpn, read_nvparms->wwpn, sizeof(sli4->config.wwpn)); 3987 ocs_memcpy(sli4->config.wwnn, read_nvparms->wwnn, sizeof(sli4->config.wwnn)); 3988 3989 ocs_log_debug(sli4->os, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 3990 sli4->config.wwpn[0], 3991 sli4->config.wwpn[1], 3992 sli4->config.wwpn[2], 3993 sli4->config.wwpn[3], 3994 sli4->config.wwpn[4], 3995 sli4->config.wwpn[5], 3996 sli4->config.wwpn[6], 3997 sli4->config.wwpn[7]); 3998 ocs_log_debug(sli4->os, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 3999 sli4->config.wwnn[0], 4000 sli4->config.wwnn[1], 4001 sli4->config.wwnn[2], 4002 sli4->config.wwnn[3], 4003 sli4->config.wwnn[4], 4004 sli4->config.wwnn[5], 4005 sli4->config.wwnn[6], 4006 sli4->config.wwnn[7]); 4007 } else { 4008 ocs_log_err(sli4->os, "bad READ_NVPARMS write\n"); 4009 return -1; 4010 } 4011 4012 return 0; 4013 } 4014 4015 /**************************************************************************** 4016 * Public functions 4017 */ 4018 4019 /** 4020 * @ingroup sli 4021 * @brief Set up the SLI context. 4022 * 4023 * @param sli4 SLI context. 4024 * @param os Device abstraction. 4025 * @param port_type Protocol type of port (for example, FC and NIC). 4026 * 4027 * @return Returns 0 on success, or a non-zero value on failure. 4028 */ 4029 int32_t 4030 sli_setup(sli4_t *sli4, ocs_os_handle_t os, sli4_port_type_e port_type) 4031 { 4032 uint32_t sli_intf = UINT32_MAX; 4033 uint32_t pci_class_rev = 0; 4034 uint32_t rev_id = 0; 4035 uint32_t family = 0; 4036 uint32_t i; 4037 sli4_asic_entry_t *asic; 4038 4039 ocs_memset(sli4, 0, sizeof(sli4_t)); 4040 4041 sli4->os = os; 4042 sli4->port_type = port_type; 4043 4044 /* 4045 * Read the SLI_INTF register to discover the register layout 4046 * and other capability information 4047 */ 4048 sli_intf = ocs_config_read32(os, SLI4_INTF_REG); 4049 4050 if (sli_intf_valid_check(sli_intf)) { 4051 ocs_log_err(os, "SLI_INTF is not valid\n"); 4052 return -1; 4053 } 4054 4055 /* driver only support SLI-4 */ 4056 sli4->sli_rev = sli_intf_sli_revision(sli_intf); 4057 if (4 != sli4->sli_rev) { 4058 ocs_log_err(os, "Unsupported SLI revision (intf=%#x)\n", 4059 sli_intf); 4060 return -1; 4061 } 4062 4063 sli4->sli_family = sli_intf_sli_family(sli_intf); 4064 4065 sli4->if_type = sli_intf_if_type(sli_intf); 4066 4067 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) || 4068 (SLI4_IF_TYPE_LANCER_G7 == sli4->if_type)) { 4069 ocs_log_debug(os, "status=%#x error1=%#x error2=%#x\n", 4070 sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS), 4071 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1), 4072 sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2)); 4073 } 4074 4075 /* 4076 * set the ASIC type and revision 4077 */ 4078 pci_class_rev = ocs_config_read32(os, SLI4_PCI_CLASS_REVISION); 4079 rev_id = sli_pci_rev_id(pci_class_rev); 4080 family = sli4->sli_family; 4081 if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) { 4082 uint32_t asic_id = ocs_config_read32(os, SLI4_ASIC_ID_REG); 4083 family = sli_asic_gen(asic_id); 4084 } 4085 4086 for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); i++, asic++) { 4087 if ((rev_id == asic->rev_id) && (family == asic->family)) { 4088 sli4->asic_type = asic->type; 4089 sli4->asic_rev = asic->rev; 4090 break; 4091 } 4092 } 4093 /* Fail if no matching asic type/rev was found */ 4094 if( (sli4->asic_type == 0) || (sli4->asic_rev == 0)) { 4095 ocs_log_err(os, "no matching asic family/rev found: %02x/%02x\n", family, rev_id); 4096 return -1; 4097 } 4098 4099 /* 4100 * The bootstrap mailbox is equivalent to a MQ with a single 256 byte 4101 * entry, a CQ with a single 16 byte entry, and no event queue. 4102 * Alignment must be 16 bytes as the low order address bits in the 4103 * address register are also control / status. 4104 */ 4105 if (ocs_dma_alloc(sli4->os, &sli4->bmbx, SLI4_BMBX_SIZE + 4106 sizeof(sli4_mcqe_t), 16)) { 4107 ocs_log_err(os, "bootstrap mailbox allocation failed\n"); 4108 return -1; 4109 } 4110 4111 if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) { 4112 ocs_log_err(os, "bad alignment for bootstrap mailbox\n"); 4113 return -1; 4114 } 4115 4116 ocs_log_debug(os, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt, 4117 ocs_addr32_hi(sli4->bmbx.phys), 4118 ocs_addr32_lo(sli4->bmbx.phys), 4119 sli4->bmbx.size); 4120 4121 /* TODO 4096 is arbitrary. What should this value actually be? */ 4122 if (ocs_dma_alloc(sli4->os, &sli4->vpd.data, 4096/*TODO*/, 4096)) { 4123 /* Note that failure isn't fatal in this specific case */ 4124 sli4->vpd.data.size = 0; 4125 ocs_log_test(os, "VPD buffer allocation failed\n"); 4126 } 4127 4128 if (sli_fw_init(sli4)) { 4129 ocs_log_err(sli4->os, "FW initialization failed\n"); 4130 return -1; 4131 } 4132 4133 /* 4134 * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true 4135 * in addition to any other desired features 4136 */ 4137 sli4->config.features.flag.iaab = TRUE; 4138 sli4->config.features.flag.npiv = TRUE; 4139 sli4->config.features.flag.dif = TRUE; 4140 sli4->config.features.flag.vf = TRUE; 4141 sli4->config.features.flag.fcpc = TRUE; 4142 sli4->config.features.flag.iaar = TRUE; 4143 sli4->config.features.flag.hlm = TRUE; 4144 sli4->config.features.flag.perfh = TRUE; 4145 sli4->config.features.flag.rxseq = TRUE; 4146 sli4->config.features.flag.rxri = TRUE; 4147 sli4->config.features.flag.mrqp = TRUE; 4148 4149 /* use performance hints if available */ 4150 if (sli4->config.perf_hint) { 4151 sli4->config.features.flag.perfh = TRUE; 4152 } 4153 4154 if (sli_request_features(sli4, &sli4->config.features, TRUE)) { 4155 return -1; 4156 } 4157 4158 if (sli_get_config(sli4)) { 4159 return -1; 4160 } 4161 4162 return 0; 4163 } 4164 4165 int32_t 4166 sli_init(sli4_t *sli4) 4167 { 4168 4169 if (sli4->config.has_extents) { 4170 /* TODO COMMON_ALLOC_RESOURCE_EXTENTS */; 4171 ocs_log_test(sli4->os, "XXX need to implement extent allocation\n"); 4172 return -1; 4173 } 4174 4175 sli4->config.features.flag.hlm = sli4->config.high_login_mode; 4176 sli4->config.features.flag.rxseq = FALSE; 4177 sli4->config.features.flag.rxri = FALSE; 4178 4179 if (sli_request_features(sli4, &sli4->config.features, FALSE)) { 4180 return -1; 4181 } 4182 4183 return 0; 4184 } 4185 4186 int32_t 4187 sli_reset(sli4_t *sli4) 4188 { 4189 uint32_t i; 4190 4191 if (sli_fw_init(sli4)) { 4192 ocs_log_crit(sli4->os, "FW initialization failed\n"); 4193 return -1; 4194 } 4195 4196 if (sli4->config.extent[0].base) { 4197 ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t)); 4198 sli4->config.extent[0].base = NULL; 4199 } 4200 4201 for (i = 0; i < SLI_RSRC_MAX; i++) { 4202 if (sli4->config.extent[i].use_map) { 4203 ocs_bitmap_free(sli4->config.extent[i].use_map); 4204 sli4->config.extent[i].use_map = NULL; 4205 } 4206 sli4->config.extent[i].base = NULL; 4207 } 4208 4209 if (sli_get_config(sli4)) { 4210 return -1; 4211 } 4212 4213 return 0; 4214 } 4215 4216 /** 4217 * @ingroup sli 4218 * @brief Issue a Firmware Reset. 4219 * 4220 * @par Description 4221 * Issues a Firmware Reset to the chip. This reset affects the entire chip, 4222 * so all PCI function on the same PCI bus and device are affected. 4223 * @n @n This type of reset can be used to activate newly downloaded firmware. 4224 * @n @n The driver should be considered to be in an unknown state after this 4225 * reset and should be reloaded. 4226 * 4227 * @param sli4 SLI context. 4228 * 4229 * @return Returns 0 on success, or -1 otherwise. 4230 */ 4231 4232 int32_t 4233 sli_fw_reset(sli4_t *sli4) 4234 { 4235 uint32_t val; 4236 uint32_t ready; 4237 4238 /* 4239 * Firmware must be ready before issuing the reset. 4240 */ 4241 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); 4242 if (!ready) { 4243 ocs_log_crit(sli4->os, "FW status is NOT ready\n"); 4244 return -1; 4245 } 4246 switch(sli4->if_type) { 4247 case SLI4_IF_TYPE_BE3_SKH_PF: 4248 /* BE3 / Skyhawk use PCICFG_SOFT_RESET_CSR */ 4249 val = ocs_config_read32(sli4->os, SLI4_PCI_SOFT_RESET_CSR); 4250 val |= SLI4_PCI_SOFT_RESET_MASK; 4251 ocs_config_write32(sli4->os, SLI4_PCI_SOFT_RESET_CSR, val); 4252 break; 4253 case SLI4_IF_TYPE_LANCER_FC_ETH: 4254 /* Lancer uses PHYDEV_CONTROL */ 4255 4256 val = SLI4_PHYDEV_CONTROL_FRST; 4257 sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, val); 4258 break; 4259 default: 4260 ocs_log_test(sli4->os, "Unexpected iftype %d\n", sli4->if_type); 4261 return -1; 4262 break; 4263 } 4264 4265 /* wait for the FW to become ready after the reset */ 4266 ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); 4267 if (!ready) { 4268 ocs_log_crit(sli4->os, "Failed to become ready after firmware reset\n"); 4269 return -1; 4270 } 4271 return 0; 4272 } 4273 4274 /** 4275 * @ingroup sli 4276 * @brief Tear down a SLI context. 4277 * 4278 * @param sli4 SLI context. 4279 * 4280 * @return Returns 0 on success, or non-zero otherwise. 4281 */ 4282 int32_t 4283 sli_teardown(sli4_t *sli4) 4284 { 4285 uint32_t i; 4286 4287 if (sli4->config.extent[0].base) { 4288 ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t)); 4289 sli4->config.extent[0].base = NULL; 4290 } 4291 4292 for (i = 0; i < SLI_RSRC_MAX; i++) { 4293 if (sli4->config.has_extents) { 4294 /* TODO COMMON_DEALLOC_RESOURCE_EXTENTS */; 4295 } 4296 4297 sli4->config.extent[i].base = NULL; 4298 4299 ocs_bitmap_free(sli4->config.extent[i].use_map); 4300 sli4->config.extent[i].use_map = NULL; 4301 } 4302 4303 if (sli_fw_term(sli4)) { 4304 ocs_log_err(sli4->os, "FW deinitialization failed\n"); 4305 } 4306 4307 ocs_dma_free(sli4->os, &sli4->vpd.data); 4308 ocs_dma_free(sli4->os, &sli4->bmbx); 4309 4310 return 0; 4311 } 4312 4313 /** 4314 * @ingroup sli 4315 * @brief Register a callback for the given event. 4316 * 4317 * @param sli4 SLI context. 4318 * @param which Event of interest. 4319 * @param func Function to call when the event occurs. 4320 * @param arg Argument passed to the callback function. 4321 * 4322 * @return Returns 0 on success, or non-zero otherwise. 4323 */ 4324 int32_t 4325 sli_callback(sli4_t *sli4, sli4_callback_e which, void *func, void *arg) 4326 { 4327 4328 if (!sli4 || !func || (which >= SLI4_CB_MAX)) { 4329 ocs_log_err(NULL, "bad parameter sli4=%p which=%#x func=%p\n", 4330 sli4, which, func); 4331 return -1; 4332 } 4333 4334 switch (which) { 4335 case SLI4_CB_LINK: 4336 sli4->link = func; 4337 sli4->link_arg = arg; 4338 break; 4339 case SLI4_CB_FIP: 4340 sli4->fip = func; 4341 sli4->fip_arg = arg; 4342 break; 4343 default: 4344 ocs_log_test(sli4->os, "unknown callback %#x\n", which); 4345 return -1; 4346 } 4347 4348 return 0; 4349 } 4350 4351 /** 4352 * @ingroup sli 4353 * @brief Initialize a queue object. 4354 * 4355 * @par Description 4356 * This initializes the sli4_queue_t object members, including the underlying 4357 * DMA memory. 4358 * 4359 * @param sli4 SLI context. 4360 * @param q Pointer to queue object. 4361 * @param qtype Type of queue to create. 4362 * @param size Size of each entry. 4363 * @param n_entries Number of entries to allocate. 4364 * @param align Starting memory address alignment. 4365 * 4366 * @note Checks if using the existing DMA memory (if any) is possible. If not, 4367 * it frees the existing memory and re-allocates. 4368 * 4369 * @return Returns 0 on success, or non-zero otherwise. 4370 */ 4371 int32_t 4372 __sli_queue_init(sli4_t *sli4, sli4_queue_t *q, uint32_t qtype, 4373 size_t size, uint32_t n_entries, uint32_t align) 4374 { 4375 4376 if ((q->dma.virt == NULL) || (size != q->size) || (n_entries != q->length)) { 4377 if (q->dma.size) { 4378 ocs_dma_free(sli4->os, &q->dma); 4379 } 4380 4381 ocs_memset(q, 0, sizeof(sli4_queue_t)); 4382 4383 if (ocs_dma_alloc(sli4->os, &q->dma, size * n_entries, align)) { 4384 ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]); 4385 return -1; 4386 } 4387 4388 ocs_memset(q->dma.virt, 0, size * n_entries); 4389 4390 ocs_lock_init(sli4->os, &q->lock, "%s lock[%d:%p]", 4391 SLI_QNAME[qtype], ocs_instance(sli4->os), &q->lock); 4392 4393 q->type = qtype; 4394 q->size = size; 4395 q->length = n_entries; 4396 4397 /* Limit to hwf the queue size per interrupt */ 4398 q->proc_limit = n_entries / 2; 4399 4400 if ( (q->type == SLI_QTYPE_EQ) || (q->type == SLI_QTYPE_CQ) ) { 4401 /* For prism, phase will be flipped after a sweep through eq and cq */ 4402 q->phase = 1; 4403 } 4404 4405 switch(q->type) { 4406 case SLI_QTYPE_EQ: 4407 q->posted_limit = q->length / 2; 4408 break; 4409 default: 4410 if ((sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) || 4411 (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF)) { 4412 /* For Skyhawk, ring the doorbell more often */ 4413 q->posted_limit = 8; 4414 } else { 4415 q->posted_limit = 64; 4416 } 4417 break; 4418 } 4419 } 4420 4421 return 0; 4422 } 4423 4424 /** 4425 * @ingroup sli 4426 * @brief Issue the command to create a queue. 4427 * 4428 * @param sli4 SLI context. 4429 * @param q Pointer to queue object. 4430 * 4431 * @return Returns 0 on success, or non-zero otherwise. 4432 */ 4433 int32_t 4434 __sli_create_queue(sli4_t *sli4, sli4_queue_t *q) 4435 { 4436 sli4_res_common_create_queue_t *res_q = NULL; 4437 4438 if (sli_bmbx_command(sli4)){ 4439 ocs_log_crit(sli4->os, "bootstrap mailbox write fail %s\n", 4440 SLI_QNAME[q->type]); 4441 ocs_dma_free(sli4->os, &q->dma); 4442 return -1; 4443 } 4444 if (sli_res_sli_config(sli4->bmbx.virt)) { 4445 ocs_log_err(sli4->os, "bad status create %s\n", SLI_QNAME[q->type]); 4446 ocs_dma_free(sli4->os, &q->dma); 4447 return -1; 4448 } 4449 res_q = (void *)((uint8_t *)sli4->bmbx.virt + 4450 offsetof(sli4_cmd_sli_config_t, payload)); 4451 4452 if (res_q->hdr.status) { 4453 ocs_log_err(sli4->os, "bad create %s status=%#x addl=%#x\n", 4454 SLI_QNAME[q->type], 4455 res_q->hdr.status, res_q->hdr.additional_status); 4456 ocs_dma_free(sli4->os, &q->dma); 4457 return -1; 4458 } else { 4459 q->id = res_q->q_id; 4460 q->doorbell_offset = res_q->db_offset; 4461 q->doorbell_rset = res_q->db_rs; 4462 4463 switch (q->type) { 4464 case SLI_QTYPE_EQ: 4465 /* No doorbell information in response for EQs */ 4466 q->doorbell_offset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].off; 4467 q->doorbell_rset = regmap[SLI4_REG_EQ_DOORBELL][sli4->if_type].rset; 4468 break; 4469 case SLI_QTYPE_CQ: 4470 /* No doorbell information in response for CQs */ 4471 q->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off; 4472 q->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset; 4473 break; 4474 case SLI_QTYPE_MQ: 4475 /* No doorbell information in response for MQs */ 4476 q->doorbell_offset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].off; 4477 q->doorbell_rset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].rset; 4478 break; 4479 case SLI_QTYPE_RQ: 4480 /* set the doorbell for non-skyhawks */ 4481 if (!sli4->config.dual_ulp_capable) { 4482 q->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off; 4483 q->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset; 4484 } 4485 break; 4486 case SLI_QTYPE_WQ: 4487 /* set the doorbell for non-skyhawks */ 4488 if (!sli4->config.dual_ulp_capable) { 4489 q->doorbell_offset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].off; 4490 q->doorbell_rset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].rset; 4491 } 4492 break; 4493 default: 4494 break; 4495 } 4496 } 4497 4498 return 0; 4499 } 4500 4501 /** 4502 * @ingroup sli 4503 * @brief Get queue entry size. 4504 * 4505 * Get queue entry size given queue type. 4506 * 4507 * @param sli4 SLI context 4508 * @param qtype Type for which the entry size is returned. 4509 * 4510 * @return Returns > 0 on success (queue entry size), or a negative value on failure. 4511 */ 4512 int32_t 4513 sli_get_queue_entry_size(sli4_t *sli4, uint32_t qtype) 4514 { 4515 uint32_t size = 0; 4516 4517 if (!sli4) { 4518 ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4); 4519 return -1; 4520 } 4521 4522 switch (qtype) { 4523 case SLI_QTYPE_EQ: 4524 size = sizeof(uint32_t); 4525 break; 4526 case SLI_QTYPE_CQ: 4527 size = 16; 4528 break; 4529 case SLI_QTYPE_MQ: 4530 size = 256; 4531 break; 4532 case SLI_QTYPE_WQ: 4533 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 4534 size = sli4->config.wqe_size; 4535 } else { 4536 /* TODO */ 4537 ocs_log_test(sli4->os, "unsupported queue entry size\n"); 4538 return -1; 4539 } 4540 break; 4541 case SLI_QTYPE_RQ: 4542 size = SLI4_FCOE_RQE_SIZE; 4543 break; 4544 default: 4545 ocs_log_test(sli4->os, "unknown queue type %d\n", qtype); 4546 return -1; 4547 } 4548 return size; 4549 } 4550 4551 /** 4552 * @ingroup sli 4553 * @brief Modify the delay timer for all the EQs 4554 * 4555 * @param sli4 SLI context. 4556 * @param eq Array of EQs. 4557 * @param num_eq Count of EQs. 4558 * @param shift Phase shift for staggering interrupts. 4559 * @param delay_mult Delay multiplier for limiting interrupt frequency. 4560 * 4561 * @return Returns 0 on success, or -1 otherwise. 4562 */ 4563 int32_t 4564 sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult) 4565 { 4566 4567 sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, eq, num_eq, shift, delay_mult); 4568 4569 if (sli_bmbx_command(sli4)) { 4570 ocs_log_crit(sli4->os, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n"); 4571 return -1; 4572 } 4573 if (sli_res_sli_config(sli4->bmbx.virt)) { 4574 ocs_log_err(sli4->os, "bad status MODIFY EQ DELAY\n"); 4575 return -1; 4576 } 4577 4578 return 0; 4579 } 4580 4581 /** 4582 * @ingroup sli 4583 * @brief Allocate a queue. 4584 * 4585 * @par Description 4586 * Allocates DMA memory and configures the requested queue type. 4587 * 4588 * @param sli4 SLI context. 4589 * @param qtype Type of queue to create. 4590 * @param q Pointer to the queue object. 4591 * @param n_entries Number of entries to allocate. 4592 * @param assoc Associated queue (that is, the EQ for a CQ, the CQ for a MQ, and so on). 4593 * @param ulp The ULP to bind, which is only used for WQ and RQs 4594 * 4595 * @return Returns 0 on success, or -1 otherwise. 4596 */ 4597 int32_t 4598 sli_queue_alloc(sli4_t *sli4, uint32_t qtype, sli4_queue_t *q, uint32_t n_entries, 4599 sli4_queue_t *assoc, uint16_t ulp) 4600 { 4601 int32_t size; 4602 uint32_t align = 0; 4603 sli4_create_q_fn_t create = NULL; 4604 4605 if (!sli4 || !q) { 4606 ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q); 4607 return -1; 4608 } 4609 4610 /* get queue size */ 4611 size = sli_get_queue_entry_size(sli4, qtype); 4612 if (size < 0) 4613 return -1; 4614 align = SLI_PAGE_SIZE; 4615 4616 switch (qtype) { 4617 case SLI_QTYPE_EQ: 4618 create = sli_cmd_common_create_eq; 4619 break; 4620 case SLI_QTYPE_CQ: 4621 create = sli_cmd_common_create_cq; 4622 break; 4623 case SLI_QTYPE_MQ: 4624 /* Validate the number of entries */ 4625 switch (n_entries) { 4626 case 16: 4627 case 32: 4628 case 64: 4629 case 128: 4630 break; 4631 default: 4632 ocs_log_test(sli4->os, "illegal n_entries value %d for MQ\n", n_entries); 4633 return -1; 4634 } 4635 assoc->u.flag.is_mq = TRUE; 4636 create = sli_cmd_common_create_mq_ext; 4637 break; 4638 case SLI_QTYPE_WQ: 4639 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 4640 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) { 4641 create = sli_cmd_fcoe_wq_create; 4642 } else { 4643 create = sli_cmd_fcoe_wq_create_v1; 4644 } 4645 } else { 4646 /* TODO */ 4647 ocs_log_test(sli4->os, "unsupported WQ create\n"); 4648 return -1; 4649 } 4650 break; 4651 default: 4652 ocs_log_test(sli4->os, "unknown queue type %d\n", qtype); 4653 return -1; 4654 } 4655 4656 if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) { 4657 ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]); 4658 return -1; 4659 } 4660 4661 if (create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, assoc ? assoc->id : 0, ulp)) { 4662 if (__sli_create_queue(sli4, q)) { 4663 ocs_log_err(sli4->os, "create %s failed\n", SLI_QNAME[qtype]); 4664 return -1; 4665 } 4666 q->ulp = ulp; 4667 } else { 4668 ocs_log_err(sli4->os, "cannot create %s\n", SLI_QNAME[qtype]); 4669 return -1; 4670 } 4671 4672 return 0; 4673 } 4674 4675 /** 4676 * @ingroup sli 4677 * @brief Allocate a c queue set. 4678 * 4679 * @param sli4 SLI context. 4680 * @param num_cqs to create 4681 * @param qs Pointers to the queue objects. 4682 * @param n_entries Number of entries to allocate per CQ. 4683 * @param eqs Associated event queues 4684 * 4685 * @return Returns 0 on success, or -1 otherwise. 4686 */ 4687 int32_t 4688 sli_cq_alloc_set(sli4_t *sli4, sli4_queue_t *qs[], uint32_t num_cqs, 4689 uint32_t n_entries, sli4_queue_t *eqs[]) 4690 { 4691 uint32_t i, offset = 0, page_bytes = 0, payload_size, cmd_size = 0; 4692 uint32_t p = 0, page_size = 0, n_cqe = 0, num_pages_cq; 4693 uintptr_t addr; 4694 ocs_dma_t dma; 4695 sli4_req_common_create_cq_set_v0_t *req = NULL; 4696 sli4_res_common_create_queue_set_t *res = NULL; 4697 4698 if (!sli4) { 4699 ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4); 4700 return -1; 4701 } 4702 4703 memset(&dma, 0, sizeof(dma)); 4704 4705 /* Align the queue DMA memory */ 4706 for (i = 0; i < num_cqs; i++) { 4707 if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_CQ, SLI4_CQE_BYTES, 4708 n_entries, SLI_PAGE_SIZE)) { 4709 ocs_log_err(sli4->os, "Queue init failed.\n"); 4710 goto error; 4711 } 4712 } 4713 4714 n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES; 4715 switch (n_cqe) { 4716 case 256: 4717 case 512: 4718 case 1024: 4719 case 2048: 4720 page_size = 1; 4721 break; 4722 case 4096: 4723 page_size = 2; 4724 break; 4725 default: 4726 return -1; 4727 } 4728 4729 page_bytes = page_size * SLI_PAGE_SIZE; 4730 num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes); 4731 cmd_size = sizeof(sli4_req_common_create_cq_set_v0_t) + (8 * num_pages_cq * num_cqs); 4732 payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_set_t)); 4733 4734 if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) { 4735 ocs_log_err(sli4->os, "DMA allocation failed\n"); 4736 goto error; 4737 } 4738 ocs_memset(dma.virt, 0, payload_size); 4739 4740 if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, 4741 payload_size, &dma) == -1) { 4742 goto error; 4743 } 4744 4745 /* Fill the request structure */ 4746 4747 req = (sli4_req_common_create_cq_set_v0_t *)((uint8_t *)dma.virt); 4748 req->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ_SET; 4749 req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 4750 req->hdr.version = 0; 4751 req->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t); 4752 req->page_size = page_size; 4753 4754 req->num_pages = num_pages_cq; 4755 switch (req->num_pages) { 4756 case 1: 4757 req->cqecnt = SLI4_CQ_CNT_256; 4758 break; 4759 case 2: 4760 req->cqecnt = SLI4_CQ_CNT_512; 4761 break; 4762 case 4: 4763 req->cqecnt = SLI4_CQ_CNT_1024; 4764 break; 4765 case 8: 4766 req->cqecnt = SLI4_CQ_CNT_LARGE; 4767 req->cqe_count = n_cqe; 4768 break; 4769 default: 4770 ocs_log_test(sli4->os, "num_pages %d not valid\n", req->num_pages); 4771 goto error; 4772 } 4773 4774 req->evt = TRUE; 4775 req->valid = TRUE; 4776 req->arm = FALSE; 4777 req->num_cq_req = num_cqs; 4778 4779 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) 4780 req->autovalid = TRUE; 4781 4782 /* Fill page addresses of all the CQs. */ 4783 for (i = 0; i < num_cqs; i++) { 4784 req->eq_id[i] = eqs[i]->id; 4785 for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += page_bytes) { 4786 req->page_physical_address[offset].low = ocs_addr32_lo(addr); 4787 req->page_physical_address[offset].high = ocs_addr32_hi(addr); 4788 offset++; 4789 } 4790 } 4791 4792 if (sli_bmbx_command(sli4)) { 4793 ocs_log_crit(sli4->os, "bootstrap mailbox write fail CQSet\n"); 4794 goto error; 4795 } 4796 4797 res = (void *)((uint8_t *)dma.virt); 4798 if (res->hdr.status) { 4799 ocs_log_err(sli4->os, "bad create CQSet status=%#x addl=%#x\n", 4800 res->hdr.status, res->hdr.additional_status); 4801 goto error; 4802 } else { 4803 /* Check if we got all requested CQs. */ 4804 if (res->num_q_allocated != num_cqs) { 4805 ocs_log_crit(sli4->os, "Requested count CQs doesnt match.\n"); 4806 goto error; 4807 } 4808 4809 /* Fill the resp cq ids. */ 4810 for (i = 0; i < num_cqs; i++) { 4811 qs[i]->id = res->q_id + i; 4812 qs[i]->doorbell_offset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].off; 4813 qs[i]->doorbell_rset = regmap[SLI4_REG_CQ_DOORBELL][sli4->if_type].rset; 4814 } 4815 } 4816 4817 ocs_dma_free(sli4->os, &dma); 4818 4819 return 0; 4820 4821 error: 4822 for (i = 0; i < num_cqs; i++) { 4823 if (qs[i]->dma.size) { 4824 ocs_dma_free(sli4->os, &qs[i]->dma); 4825 } 4826 } 4827 4828 if (dma.size) { 4829 ocs_dma_free(sli4->os, &dma); 4830 } 4831 4832 return -1; 4833 } 4834 4835 /** 4836 * @ingroup sli 4837 * @brief Free a queue. 4838 * 4839 * @par Description 4840 * Frees DMA memory and de-registers the requested queue. 4841 * 4842 * @param sli4 SLI context. 4843 * @param q Pointer to the queue object. 4844 * @param destroy_queues Non-zero if the mailbox commands should be sent to destroy the queues. 4845 * @param free_memory Non-zero if the DMA memory associated with the queue should be freed. 4846 * 4847 * @return Returns 0 on success, or -1 otherwise. 4848 */ 4849 int32_t 4850 sli_queue_free(sli4_t *sli4, sli4_queue_t *q, uint32_t destroy_queues, uint32_t free_memory) 4851 { 4852 sli4_destroy_q_fn_t destroy = NULL; 4853 int32_t rc = -1; 4854 4855 if (!sli4 || !q) { 4856 ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q); 4857 return -1; 4858 } 4859 4860 if (destroy_queues) { 4861 switch (q->type) { 4862 case SLI_QTYPE_EQ: 4863 destroy = sli_cmd_common_destroy_eq; 4864 break; 4865 case SLI_QTYPE_CQ: 4866 destroy = sli_cmd_common_destroy_cq; 4867 break; 4868 case SLI_QTYPE_MQ: 4869 destroy = sli_cmd_common_destroy_mq; 4870 break; 4871 case SLI_QTYPE_WQ: 4872 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 4873 destroy = sli_cmd_fcoe_wq_destroy; 4874 } else { 4875 /* TODO */ 4876 ocs_log_test(sli4->os, "unsupported WQ destroy\n"); 4877 return -1; 4878 } 4879 break; 4880 case SLI_QTYPE_RQ: 4881 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 4882 destroy = sli_cmd_fcoe_rq_destroy; 4883 } else { 4884 /* TODO */ 4885 ocs_log_test(sli4->os, "unsupported RQ destroy\n"); 4886 return -1; 4887 } 4888 break; 4889 default: 4890 ocs_log_test(sli4->os, "bad queue type %d\n", 4891 q->type); 4892 return -1; 4893 } 4894 4895 /* 4896 * Destroying queues makes BE3 sad (version 0 interface type). Rely 4897 * on COMMON_FUNCTION_RESET to free host allocated queue resources 4898 * inside the SLI Port. 4899 */ 4900 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { 4901 destroy = NULL; 4902 } 4903 4904 /* Destroy the queue if the operation is defined */ 4905 if (destroy && destroy(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, q->id)) { 4906 sli4_res_hdr_t *res = NULL; 4907 4908 if (sli_bmbx_command(sli4)){ 4909 ocs_log_crit(sli4->os, "bootstrap mailbox write fail destroy %s\n", 4910 SLI_QNAME[q->type]); 4911 } else if (sli_res_sli_config(sli4->bmbx.virt)) { 4912 ocs_log_err(sli4->os, "bad status destroy %s\n", SLI_QNAME[q->type]); 4913 } else { 4914 res = (void *)((uint8_t *)sli4->bmbx.virt + 4915 offsetof(sli4_cmd_sli_config_t, payload)); 4916 4917 if (res->status) { 4918 ocs_log_err(sli4->os, "bad destroy %s status=%#x addl=%#x\n", 4919 SLI_QNAME[q->type], 4920 res->status, res->additional_status); 4921 } else { 4922 rc = 0; 4923 } 4924 } 4925 } 4926 } 4927 4928 if (free_memory) { 4929 ocs_lock_free(&q->lock); 4930 4931 if (ocs_dma_free(sli4->os, &q->dma)) { 4932 ocs_log_err(sli4->os, "%s queue ID %d free failed\n", 4933 SLI_QNAME[q->type], q->id); 4934 rc = -1; 4935 } 4936 } 4937 4938 return rc; 4939 } 4940 4941 int32_t 4942 sli_queue_reset(sli4_t *sli4, sli4_queue_t *q) 4943 { 4944 4945 ocs_lock(&q->lock); 4946 4947 q->index = 0; 4948 q->n_posted = 0; 4949 4950 if (SLI_QTYPE_MQ == q->type) { 4951 q->u.r_idx = 0; 4952 } 4953 4954 if (q->dma.virt != NULL) { 4955 ocs_memset(q->dma.virt, 0, (q->size * (uint64_t)q->length)); 4956 } 4957 4958 ocs_unlock(&q->lock); 4959 4960 return 0; 4961 } 4962 4963 /** 4964 * @ingroup sli 4965 * @brief Check if the given queue is empty. 4966 * 4967 * @par Description 4968 * If the valid bit of the current entry is unset, the queue is empty. 4969 * 4970 * @param sli4 SLI context. 4971 * @param q Pointer to the queue object. 4972 * 4973 * @return Returns TRUE if empty, or FALSE otherwise. 4974 */ 4975 int32_t 4976 sli_queue_is_empty(sli4_t *sli4, sli4_queue_t *q) 4977 { 4978 int32_t rc = TRUE; 4979 uint8_t *qe = q->dma.virt; 4980 4981 ocs_lock(&q->lock); 4982 4983 ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD); 4984 4985 qe += q->index * q->size; 4986 4987 rc = !sli_queue_entry_is_valid(q, qe, FALSE); 4988 4989 ocs_unlock(&q->lock); 4990 4991 return rc; 4992 } 4993 4994 /** 4995 * @ingroup sli 4996 * @brief Arm an EQ. 4997 * 4998 * @param sli4 SLI context. 4999 * @param q Pointer to queue object. 5000 * @param arm If TRUE, arm the EQ. 5001 * 5002 * @return Returns 0 on success, or non-zero otherwise. 5003 */ 5004 int32_t 5005 sli_queue_eq_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm) 5006 { 5007 uint32_t val = 0; 5008 5009 ocs_lock(&q->lock); 5010 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) 5011 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm); 5012 else 5013 val = sli_eq_doorbell(q->n_posted, q->id, arm); 5014 5015 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 5016 q->n_posted = 0; 5017 ocs_unlock(&q->lock); 5018 5019 return 0; 5020 } 5021 5022 /** 5023 * @ingroup sli 5024 * @brief Arm a queue. 5025 * 5026 * @param sli4 SLI context. 5027 * @param q Pointer to queue object. 5028 * @param arm If TRUE, arm the queue. 5029 * 5030 * @return Returns 0 on success, or non-zero otherwise. 5031 */ 5032 int32_t 5033 sli_queue_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm) 5034 { 5035 uint32_t val = 0; 5036 5037 ocs_lock(&q->lock); 5038 5039 switch (q->type) { 5040 case SLI_QTYPE_EQ: 5041 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) 5042 val = sli_iftype6_eq_doorbell(q->n_posted, q->id, arm); 5043 else 5044 val = sli_eq_doorbell(q->n_posted, q->id, arm); 5045 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 5046 q->n_posted = 0; 5047 break; 5048 case SLI_QTYPE_CQ: 5049 if (sli4->if_type == SLI4_IF_TYPE_LANCER_G7) 5050 val = sli_iftype6_cq_doorbell(q->n_posted, q->id, arm); 5051 else 5052 val = sli_cq_doorbell(q->n_posted, q->id, arm); 5053 ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val); 5054 q->n_posted = 0; 5055 break; 5056 default: 5057 ocs_log_test(sli4->os, "should only be used for EQ/CQ, not %s\n", 5058 SLI_QNAME[q->type]); 5059 } 5060 5061 ocs_unlock(&q->lock); 5062 5063 return 0; 5064 } 5065 5066 /** 5067 * @ingroup sli 5068 * @brief Write an entry to the queue object. 5069 * 5070 * Note: Assumes the q->lock will be locked and released by the caller. 5071 * 5072 * @param sli4 SLI context. 5073 * @param q Pointer to the queue object. 5074 * @param entry Pointer to the entry contents. 5075 * 5076 * @return Returns queue index on success, or negative error value otherwise. 5077 */ 5078 int32_t 5079 _sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) 5080 { 5081 int32_t rc = 0; 5082 uint8_t *qe = q->dma.virt; 5083 uint32_t qindex; 5084 5085 qindex = q->index; 5086 qe += q->index * q->size; 5087 5088 if (entry) { 5089 if ((SLI_QTYPE_WQ == q->type) && sli4->config.perf_wq_id_association) { 5090 sli_set_wq_id_association(entry, q->id); 5091 } 5092 #if defined(OCS_INCLUDE_DEBUG) 5093 switch (q->type) { 5094 case SLI_QTYPE_WQ: { 5095 ocs_dump32(OCS_DEBUG_ENABLE_WQ_DUMP, sli4->os, "wqe", entry, q->size); 5096 break; 5097 } 5098 case SLI_QTYPE_MQ: 5099 /* Note: we don't really need to dump the whole 5100 * 256 bytes, just do 64 */ 5101 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mqe outbound", entry, 64); 5102 break; 5103 5104 default: 5105 break; 5106 } 5107 #endif 5108 ocs_memcpy(qe, entry, q->size); 5109 q->n_posted = 1; 5110 } 5111 5112 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); 5113 5114 rc = sli_queue_doorbell(sli4, q); 5115 5116 q->index = (q->index + q->n_posted) & (q->length - 1); 5117 q->n_posted = 0; 5118 5119 if (rc < 0) { 5120 /* failure */ 5121 return rc; 5122 } else if (rc > 0) { 5123 /* failure, but we need to return a negative value on failure */ 5124 return -rc; 5125 } else { 5126 return qindex; 5127 } 5128 } 5129 5130 /** 5131 * @ingroup sli 5132 * @brief Write an entry to the queue object. 5133 * 5134 * Note: Assumes the q->lock will be locked and released by the caller. 5135 * 5136 * @param sli4 SLI context. 5137 * @param q Pointer to the queue object. 5138 * @param entry Pointer to the entry contents. 5139 * 5140 * @return Returns queue index on success, or negative error value otherwise. 5141 */ 5142 int32_t 5143 sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) 5144 { 5145 int32_t rc; 5146 5147 ocs_lock(&q->lock); 5148 rc = _sli_queue_write(sli4, q, entry); 5149 ocs_unlock(&q->lock); 5150 5151 return rc; 5152 } 5153 5154 /** 5155 * @brief Check if the current queue entry is valid. 5156 * 5157 * @param q Pointer to the queue object. 5158 * @param qe Pointer to the queue entry. 5159 * @param clear Boolean to clear valid bit. 5160 * 5161 * @return Returns TRUE if the entry is valid, or FALSE otherwise. 5162 */ 5163 static uint8_t 5164 sli_queue_entry_is_valid(sli4_queue_t *q, uint8_t *qe, uint8_t clear) 5165 { 5166 uint8_t valid = FALSE; 5167 uint8_t valid_bit_set = 0; 5168 5169 switch (q->type) { 5170 case SLI_QTYPE_EQ: 5171 valid = (((sli4_eqe_t *)qe)->vld == q->phase) ? 1 : 0; 5172 if (valid && clear) { 5173 ((sli4_eqe_t *)qe)->vld = 0; 5174 } 5175 break; 5176 case SLI_QTYPE_CQ: 5177 /* 5178 * For both MCQE and WCQE/RCQE, the valid bit 5179 * is bit 31 of dword 3 (0 based) 5180 */ 5181 valid_bit_set = (qe[15] & 0x80) != 0; 5182 if (valid_bit_set == q->phase) 5183 valid = 1; 5184 5185 if (valid & clear) { 5186 qe[15] &= ~0x80; 5187 } 5188 break; 5189 case SLI_QTYPE_MQ: 5190 valid = q->index != q->u.r_idx; 5191 break; 5192 case SLI_QTYPE_RQ: 5193 valid = TRUE; 5194 clear = FALSE; 5195 break; 5196 default: 5197 ocs_log_test(NULL, "doesn't handle type=%#x\n", q->type); 5198 } 5199 5200 if (clear) { 5201 5202 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); 5203 } 5204 5205 return valid; 5206 } 5207 5208 /** 5209 * @ingroup sli 5210 * @brief Read an entry from the queue object. 5211 * 5212 * @param sli4 SLI context. 5213 * @param q Pointer to the queue object. 5214 * @param entry Destination pointer for the queue entry contents. 5215 * 5216 * @return Returns 0 on success, or non-zero otherwise. 5217 */ 5218 int32_t 5219 sli_queue_read(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry) 5220 { 5221 int32_t rc = 0; 5222 uint8_t *qe = q->dma.virt; 5223 uint32_t *qindex = NULL; 5224 5225 uint8_t clear = (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) ? FALSE : TRUE; 5226 if (SLI_QTYPE_MQ == q->type) { 5227 qindex = &q->u.r_idx; 5228 } else { 5229 qindex = &q->index; 5230 } 5231 5232 ocs_lock(&q->lock); 5233 5234 ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD); 5235 5236 qe += *qindex * q->size; 5237 5238 if (!sli_queue_entry_is_valid(q, qe, clear)) { 5239 ocs_unlock(&q->lock); 5240 return -1; 5241 } 5242 5243 if (entry) { 5244 ocs_memcpy(entry, qe, q->size); 5245 #if defined(OCS_INCLUDE_DEBUG) 5246 switch(q->type) { 5247 case SLI_QTYPE_CQ: 5248 ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "cq", entry, q->size); 5249 break; 5250 case SLI_QTYPE_MQ: 5251 ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mq Compl", entry, 64); 5252 break; 5253 case SLI_QTYPE_EQ: 5254 ocs_dump32(OCS_DEBUG_ENABLE_EQ_DUMP, sli4->os, "eq Compl", entry, q->size); 5255 break; 5256 default: 5257 break; 5258 } 5259 #endif 5260 } 5261 5262 switch (q->type) { 5263 case SLI_QTYPE_EQ: 5264 case SLI_QTYPE_CQ: 5265 case SLI_QTYPE_MQ: 5266 *qindex = (*qindex + 1) & (q->length - 1); 5267 if (SLI_QTYPE_MQ != q->type) { 5268 q->n_posted++; 5269 /* 5270 * For prism, the phase value will be used to check the validity of eq/cq entries. 5271 * The value toggles after a complete sweep through the queue. 5272 */ 5273 if ((SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4)) && (*qindex == 0)) { 5274 q->phase ^= (uint16_t) 0x1; 5275 } 5276 } 5277 break; 5278 default: 5279 /* reads don't update the index */ 5280 break; 5281 } 5282 5283 ocs_unlock(&q->lock); 5284 5285 return rc; 5286 } 5287 5288 int32_t 5289 sli_queue_index(sli4_t *sli4, sli4_queue_t *q) 5290 { 5291 5292 if (q) { 5293 return q->index; 5294 } else { 5295 return -1; 5296 } 5297 } 5298 5299 int32_t 5300 sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry) 5301 { 5302 int32_t rc; 5303 5304 ocs_lock(&q->lock); 5305 rc = _sli_queue_poke(sli4, q, index, entry); 5306 ocs_unlock(&q->lock); 5307 5308 return rc; 5309 } 5310 5311 int32_t 5312 _sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry) 5313 { 5314 int32_t rc = 0; 5315 uint8_t *qe = q->dma.virt; 5316 5317 if (index >= q->length) { 5318 return -1; 5319 } 5320 5321 qe += index * q->size; 5322 5323 if (entry) { 5324 ocs_memcpy(qe, entry, q->size); 5325 } 5326 5327 ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE); 5328 5329 return rc; 5330 } 5331 5332 /** 5333 * @ingroup sli 5334 * @brief Allocate SLI Port resources. 5335 * 5336 * @par Description 5337 * Allocate port-related resources, such as VFI, RPI, XRI, and so on. 5338 * Resources are modeled using extents, regardless of whether the underlying 5339 * device implements resource extents. If the device does not implement 5340 * extents, the SLI layer models this as a single (albeit large) extent. 5341 * 5342 * @param sli4 SLI context. 5343 * @param rtype Resource type (for example, RPI or XRI) 5344 * @param rid Allocated resource ID. 5345 * @param index Index into the bitmap. 5346 * 5347 * @return Returns 0 on success, or a non-zero value on failure. 5348 */ 5349 int32_t 5350 sli_resource_alloc(sli4_t *sli4, sli4_resource_e rtype, uint32_t *rid, uint32_t *index) 5351 { 5352 int32_t rc = 0; 5353 uint32_t size; 5354 uint32_t extent_idx; 5355 uint32_t item_idx; 5356 int status; 5357 5358 *rid = UINT32_MAX; 5359 *index = UINT32_MAX; 5360 5361 switch (rtype) { 5362 case SLI_RSRC_FCOE_VFI: 5363 case SLI_RSRC_FCOE_VPI: 5364 case SLI_RSRC_FCOE_RPI: 5365 case SLI_RSRC_FCOE_XRI: 5366 status = ocs_bitmap_find(sli4->config.extent[rtype].use_map, 5367 sli4->config.extent[rtype].map_size); 5368 if (status < 0) { 5369 ocs_log_err(sli4->os, "out of resource %d (alloc=%d)\n", 5370 rtype, sli4->config.extent[rtype].n_alloc); 5371 rc = -1; 5372 break; 5373 } else { 5374 *index = status; 5375 } 5376 5377 size = sli4->config.extent[rtype].size; 5378 5379 extent_idx = *index / size; 5380 item_idx = *index % size; 5381 5382 *rid = sli4->config.extent[rtype].base[extent_idx] + item_idx; 5383 5384 sli4->config.extent[rtype].n_alloc++; 5385 break; 5386 default: 5387 rc = -1; 5388 } 5389 5390 return rc; 5391 } 5392 5393 /** 5394 * @ingroup sli 5395 * @brief Free the SLI Port resources. 5396 * 5397 * @par Description 5398 * Free port-related resources, such as VFI, RPI, XRI, and so. See discussion of 5399 * "extent" usage in sli_resource_alloc. 5400 * 5401 * @param sli4 SLI context. 5402 * @param rtype Resource type (for example, RPI or XRI). 5403 * @param rid Allocated resource ID. 5404 * 5405 * @return Returns 0 on success, or a non-zero value on failure. 5406 */ 5407 int32_t 5408 sli_resource_free(sli4_t *sli4, sli4_resource_e rtype, uint32_t rid) 5409 { 5410 int32_t rc = -1; 5411 uint32_t x; 5412 uint32_t size, *base; 5413 5414 switch (rtype) { 5415 case SLI_RSRC_FCOE_VFI: 5416 case SLI_RSRC_FCOE_VPI: 5417 case SLI_RSRC_FCOE_RPI: 5418 case SLI_RSRC_FCOE_XRI: 5419 /* 5420 * Figure out which extent contains the resource ID. I.e. find 5421 * the extent such that 5422 * extent->base <= resource ID < extent->base + extent->size 5423 */ 5424 base = sli4->config.extent[rtype].base; 5425 size = sli4->config.extent[rtype].size; 5426 5427 /* 5428 * In the case of FW reset, this may be cleared but the force_free path will 5429 * still attempt to free the resource. Prevent a NULL pointer access. 5430 */ 5431 if (base != NULL) { 5432 for (x = 0; x < sli4->config.extent[rtype].number; x++) { 5433 if ((rid >= base[x]) && (rid < (base[x] + size))) { 5434 rid -= base[x]; 5435 ocs_bitmap_clear(sli4->config.extent[rtype].use_map, 5436 (x * size) + rid); 5437 rc = 0; 5438 break; 5439 } 5440 } 5441 } 5442 break; 5443 default: 5444 ; 5445 } 5446 5447 return rc; 5448 } 5449 5450 int32_t 5451 sli_resource_reset(sli4_t *sli4, sli4_resource_e rtype) 5452 { 5453 int32_t rc = -1; 5454 uint32_t i; 5455 5456 switch (rtype) { 5457 case SLI_RSRC_FCOE_VFI: 5458 case SLI_RSRC_FCOE_VPI: 5459 case SLI_RSRC_FCOE_RPI: 5460 case SLI_RSRC_FCOE_XRI: 5461 for (i = 0; i < sli4->config.extent[rtype].map_size; i++) { 5462 ocs_bitmap_clear(sli4->config.extent[rtype].use_map, i); 5463 } 5464 rc = 0; 5465 break; 5466 default: 5467 ; 5468 } 5469 5470 return rc; 5471 } 5472 5473 /** 5474 * @ingroup sli 5475 * @brief Parse an EQ entry to retrieve the CQ_ID for this event. 5476 * 5477 * @param sli4 SLI context. 5478 * @param buf Pointer to the EQ entry. 5479 * @param cq_id CQ_ID for this entry (only valid on success). 5480 * 5481 * @return 5482 * - 0 if success. 5483 * - < 0 if error. 5484 * - > 0 if firmware detects EQ overflow. 5485 */ 5486 int32_t 5487 sli_eq_parse(sli4_t *sli4, uint8_t *buf, uint16_t *cq_id) 5488 { 5489 sli4_eqe_t *eqe = (void *)buf; 5490 int32_t rc = 0; 5491 5492 if (!sli4 || !buf || !cq_id) { 5493 ocs_log_err(NULL, "bad parameters sli4=%p buf=%p cq_id=%p\n", 5494 sli4, buf, cq_id); 5495 return -1; 5496 } 5497 5498 switch (eqe->major_code) { 5499 case SLI4_MAJOR_CODE_STANDARD: 5500 *cq_id = eqe->resource_id; 5501 break; 5502 case SLI4_MAJOR_CODE_SENTINEL: 5503 ocs_log_debug(sli4->os, "sentinel EQE\n"); 5504 rc = 1; 5505 break; 5506 default: 5507 ocs_log_test(sli4->os, "Unsupported EQE: major %x minor %x\n", 5508 eqe->major_code, eqe->minor_code); 5509 rc = -1; 5510 } 5511 5512 return rc; 5513 } 5514 5515 /** 5516 * @ingroup sli 5517 * @brief Parse a CQ entry to retrieve the event type and the associated queue. 5518 * 5519 * @param sli4 SLI context. 5520 * @param cq CQ to process. 5521 * @param cqe Pointer to the CQ entry. 5522 * @param etype CQ event type. 5523 * @param q_id Queue ID associated with this completion message 5524 * (that is, MQ_ID, RQ_ID, and so on). 5525 * 5526 * @return 5527 * - 0 if call completed correctly and CQE status is SUCCESS. 5528 * - -1 if call failed (no CQE status). 5529 * - Other value if call completed correctly and return value is a CQE status value. 5530 */ 5531 int32_t 5532 sli_cq_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype, 5533 uint16_t *q_id) 5534 { 5535 int32_t rc = 0; 5536 5537 if (!sli4 || !cq || !cqe || !etype) { 5538 ocs_log_err(NULL, "bad parameters sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n", 5539 sli4, cq, cqe, etype, q_id); 5540 return -1; 5541 } 5542 5543 if (cq->u.flag.is_mq) { 5544 sli4_mcqe_t *mcqe = (void *)cqe; 5545 5546 if (mcqe->ae) { 5547 *etype = SLI_QENTRY_ASYNC; 5548 } else { 5549 *etype = SLI_QENTRY_MQ; 5550 rc = sli_cqe_mq(mcqe); 5551 } 5552 *q_id = -1; 5553 } else if (SLI4_PORT_TYPE_FC == sli4->port_type) { 5554 rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id); 5555 } else { 5556 ocs_log_test(sli4->os, "implement CQE parsing type = %#x\n", 5557 sli4->port_type); 5558 rc = -1; 5559 } 5560 5561 return rc; 5562 } 5563 5564 /** 5565 * @ingroup sli 5566 * @brief Cause chip to enter an unrecoverable error state. 5567 * 5568 * @par Description 5569 * Cause chip to enter an unrecoverable error state. This is 5570 * used when detecting unexpected FW behavior so FW can be 5571 * hwted from the driver as soon as error is detected. 5572 * 5573 * @param sli4 SLI context. 5574 * @param dump Generate dump as part of reset. 5575 * 5576 * @return Returns 0 if call completed correctly, or -1 if call failed (unsupported chip). 5577 */ 5578 int32_t sli_raise_ue(sli4_t *sli4, uint8_t dump) 5579 { 5580 #define FDD 2 5581 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { 5582 switch(sli_get_asic_type(sli4)) { 5583 case SLI4_ASIC_TYPE_BE3: { 5584 sli_reg_write(sli4, SLI4_REG_SW_UE_CSR1, 0xffffffff); 5585 sli_reg_write(sli4, SLI4_REG_SW_UE_CSR2, 0); 5586 break; 5587 } 5588 case SLI4_ASIC_TYPE_SKYHAWK: { 5589 uint32_t value; 5590 value = ocs_config_read32(sli4->os, SLI4_SW_UE_REG); 5591 ocs_config_write32(sli4->os, SLI4_SW_UE_REG, (value | (1U << 24))); 5592 break; 5593 } 5594 default: 5595 ocs_log_test(sli4->os, "invalid asic type %d\n", sli_get_asic_type(sli4)); 5596 return -1; 5597 } 5598 } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) || 5599 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) { 5600 if (FDD == dump) { 5601 sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, SLI4_SLIPORT_CONTROL_FDD | SLI4_SLIPORT_CONTROL_IP); 5602 } else { 5603 uint32_t value = SLI4_PHYDEV_CONTROL_FRST; 5604 if (dump == 1) { 5605 value |= SLI4_PHYDEV_CONTROL_DD; 5606 } 5607 sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, value); 5608 } 5609 } else { 5610 ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4)); 5611 return -1; 5612 } 5613 return 0; 5614 } 5615 5616 /** 5617 * @ingroup sli 5618 * @brief Read the SLIPORT_STATUS register to to check if a dump is present. 5619 * 5620 * @param sli4 SLI context. 5621 * 5622 * @return Returns 1 if the chip is ready, or 0 if the chip is not ready, 2 if fdp is present. 5623 */ 5624 int32_t sli_dump_is_ready(sli4_t *sli4) 5625 { 5626 int32_t rc = 0; 5627 uint32_t port_val; 5628 uint32_t bmbx_val; 5629 uint32_t uerr_lo; 5630 uint32_t uerr_hi; 5631 uint32_t uerr_mask_lo; 5632 uint32_t uerr_mask_hi; 5633 5634 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { 5635 /* for iftype=0, dump ready when UE is encountered */ 5636 uerr_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO); 5637 uerr_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI); 5638 uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO); 5639 uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI); 5640 if ((uerr_lo & ~uerr_mask_lo) || (uerr_hi & ~uerr_mask_hi)) { 5641 rc = 1; 5642 } 5643 5644 } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) || 5645 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(sli4))) { 5646 /* 5647 * Ensure that the port is ready AND the mailbox is 5648 * ready before signaling that the dump is ready to go. 5649 */ 5650 port_val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); 5651 bmbx_val = sli_reg_read(sli4, SLI4_REG_BMBX); 5652 5653 if ((bmbx_val & SLI4_BMBX_RDY) && 5654 SLI4_PORT_STATUS_READY(port_val)) { 5655 if(SLI4_PORT_STATUS_DUMP_PRESENT(port_val)) { 5656 rc = 1; 5657 }else if( SLI4_PORT_STATUS_FDP_PRESENT(port_val)) { 5658 rc = 2; 5659 } 5660 } 5661 } else { 5662 ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4)); 5663 return -1; 5664 } 5665 return rc; 5666 } 5667 5668 /** 5669 * @ingroup sli 5670 * @brief Read the SLIPORT_STATUS register to check if a dump is present. 5671 * 5672 * @param sli4 SLI context. 5673 * 5674 * @return 5675 * - 0 if call completed correctly and no dump is present. 5676 * - 1 if call completed and dump is present. 5677 * - -1 if call failed (unsupported chip). 5678 */ 5679 int32_t sli_dump_is_present(sli4_t *sli4) 5680 { 5681 uint32_t val; 5682 uint32_t ready; 5683 5684 if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(sli4)) && 5685 (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(sli4))) { 5686 ocs_log_test(sli4->os, "Function only supported for I/F type 2"); 5687 return -1; 5688 } 5689 5690 /* If the chip is not ready, then there cannot be a dump */ 5691 ready = sli_wait_for_fw_ready(sli4, SLI4_INIT_PORT_DELAY_US); 5692 if (!ready) { 5693 return 0; 5694 } 5695 5696 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); 5697 if (UINT32_MAX == val) { 5698 ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n"); 5699 return -1; 5700 } else { 5701 return ((val & SLI4_PORT_STATUS_DIP) ? 1 : 0); 5702 } 5703 } 5704 5705 /** 5706 * @ingroup sli 5707 * @brief Read the SLIPORT_STATUS register to check if the reset required is set. 5708 * 5709 * @param sli4 SLI context. 5710 * 5711 * @return 5712 * - 0 if call completed correctly and reset is not required. 5713 * - 1 if call completed and reset is required. 5714 * - -1 if call failed. 5715 */ 5716 int32_t sli_reset_required(sli4_t *sli4) 5717 { 5718 uint32_t val; 5719 5720 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) { 5721 ocs_log_test(sli4->os, "reset required N/A for iftype 0\n"); 5722 return 0; 5723 } 5724 5725 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); 5726 if (UINT32_MAX == val) { 5727 ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n"); 5728 return -1; 5729 } else { 5730 return ((val & SLI4_PORT_STATUS_RN) ? 1 : 0); 5731 } 5732 } 5733 5734 /** 5735 * @ingroup sli 5736 * @brief Read the SLIPORT_SEMAPHORE and SLIPORT_STATUS registers to check if 5737 * the port status indicates that a FW error has occurred. 5738 * 5739 * @param sli4 SLI context. 5740 * 5741 * @return 5742 * - 0 if call completed correctly and no FW error occurred. 5743 * - > 0 which indicates that a FW error has occurred. 5744 * - -1 if call failed. 5745 */ 5746 int32_t sli_fw_error_status(sli4_t *sli4) 5747 { 5748 uint32_t sliport_semaphore; 5749 int32_t rc = 0; 5750 5751 sliport_semaphore = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE); 5752 if (UINT32_MAX == sliport_semaphore) { 5753 ocs_log_err(sli4->os, "error reading SLIPORT_SEMAPHORE register\n"); 5754 return 1; 5755 } 5756 rc = (SLI4_PORT_SEMAPHORE_IN_ERR(sliport_semaphore) ? 1 : 0); 5757 5758 if (rc == 0) { 5759 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type || 5760 (SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type)) { 5761 uint32_t uerr_mask_lo, uerr_mask_hi; 5762 uint32_t uerr_status_lo, uerr_status_hi; 5763 5764 uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO); 5765 uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI); 5766 uerr_status_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO); 5767 uerr_status_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI); 5768 if ((uerr_mask_lo & uerr_status_lo) != 0 || 5769 (uerr_mask_hi & uerr_status_hi) != 0) { 5770 rc = 1; 5771 } 5772 } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type || 5773 SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) { 5774 uint32_t sliport_status; 5775 5776 sliport_status = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); 5777 rc = (SLI4_PORT_STATUS_ERROR(sliport_status) ? 1 : 0); 5778 } 5779 } 5780 return rc; 5781 } 5782 5783 /** 5784 * @ingroup sli 5785 * @brief Determine if the chip FW is in a ready state 5786 * 5787 * @param sli4 SLI context. 5788 * 5789 * @return 5790 * - 0 if call completed correctly and FW is not ready. 5791 * - 1 if call completed correctly and FW is ready. 5792 * - -1 if call failed. 5793 */ 5794 int32_t 5795 sli_fw_ready(sli4_t *sli4) 5796 { 5797 uint32_t val; 5798 int32_t rc = -1; 5799 5800 /* 5801 * Is firmware ready for operation? Check needed depends on IF_TYPE 5802 */ 5803 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type || 5804 SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type) { 5805 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE); 5806 rc = ((SLI4_PORT_SEMAPHORE_STATUS_POST_READY == 5807 SLI4_PORT_SEMAPHORE_PORT(val)) && 5808 (!SLI4_PORT_SEMAPHORE_IN_ERR(val)) ? 1 : 0); 5809 } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type || 5810 SLI4_IF_TYPE_LANCER_G7 == sli4->if_type) { 5811 val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS); 5812 rc = (SLI4_PORT_STATUS_READY(val) ? 1 : 0); 5813 } 5814 return rc; 5815 } 5816 5817 /** 5818 * @ingroup sli 5819 * @brief Determine if the link can be configured 5820 * 5821 * @param sli4 SLI context. 5822 * 5823 * @return 5824 * - 0 if link is not configurable. 5825 * - 1 if link is configurable. 5826 */ 5827 int32_t sli_link_is_configurable(sli4_t *sli) 5828 { 5829 int32_t rc = 0; 5830 /* 5831 * Link config works on: Skyhawk and Lancer 5832 * Link config does not work on: LancerG6 5833 */ 5834 5835 switch (sli_get_asic_type(sli)) { 5836 case SLI4_ASIC_TYPE_SKYHAWK: 5837 case SLI4_ASIC_TYPE_LANCER: 5838 case SLI4_ASIC_TYPE_CORSAIR: 5839 rc = 1; 5840 break; 5841 case SLI4_ASIC_TYPE_LANCERG6: 5842 case SLI4_ASIC_TYPE_LANCERG7: 5843 case SLI4_ASIC_TYPE_BE3: 5844 default: 5845 rc = 0; 5846 break; 5847 } 5848 5849 return rc; 5850 5851 } 5852 5853 /* vim: set noexpandtab textwidth=120: */ 5854 5855 /** 5856 * @ingroup sli_fc 5857 * @brief Write an FCOE_WQ_CREATE command. 5858 * 5859 * @param sli4 SLI context. 5860 * @param buf Destination buffer for the command. 5861 * @param size Buffer size, in bytes. 5862 * @param qmem DMA memory for the queue. 5863 * @param cq_id Associated CQ_ID. 5864 * @param ulp The ULP to bind 5865 * 5866 * @note This creates a Version 0 message. 5867 * 5868 * @return Returns the number of bytes written. 5869 */ 5870 int32_t 5871 sli_cmd_fcoe_wq_create(sli4_t *sli4, void *buf, size_t size, 5872 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp) 5873 { 5874 sli4_req_fcoe_wq_create_t *wq = NULL; 5875 uint32_t sli_config_off = 0; 5876 uint32_t p; 5877 uintptr_t addr; 5878 5879 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 5880 uint32_t payload_size; 5881 5882 /* Payload length must accommodate both request and response */ 5883 payload_size = max(sizeof(sli4_req_fcoe_wq_create_t), 5884 sizeof(sli4_res_common_create_queue_t)); 5885 5886 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 5887 NULL); 5888 } 5889 wq = (sli4_req_fcoe_wq_create_t *)((uint8_t *)buf + sli_config_off); 5890 5891 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE; 5892 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 5893 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_t) - 5894 sizeof(sli4_req_hdr_t); 5895 /* valid values for number of pages: 1-4 (sec 4.5.1) */ 5896 wq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); 5897 if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES)) { 5898 return 0; 5899 } 5900 5901 wq->cq_id = cq_id; 5902 5903 if (sli4->config.dual_ulp_capable) { 5904 wq->dua = 1; 5905 wq->bqu = 1; 5906 wq->ulp = ulp; 5907 } 5908 5909 for (p = 0, addr = qmem->phys; 5910 p < wq->num_pages; 5911 p++, addr += SLI_PAGE_SIZE) { 5912 wq->page_physical_address[p].low = ocs_addr32_lo(addr); 5913 wq->page_physical_address[p].high = ocs_addr32_hi(addr); 5914 } 5915 5916 return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_t)); 5917 } 5918 5919 /** 5920 * @ingroup sli_fc 5921 * @brief Write an FCOE_WQ_CREATE_V1 command. 5922 * 5923 * @param sli4 SLI context. 5924 * @param buf Destination buffer for the command. 5925 * @param size Buffer size, in bytes. 5926 * @param qmem DMA memory for the queue. 5927 * @param cq_id Associated CQ_ID. 5928 * @param ignored This parameter carries the ULP for WQ (ignored for V1) 5929 5930 * 5931 * @return Returns the number of bytes written. 5932 */ 5933 int32_t 5934 sli_cmd_fcoe_wq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem, 5935 uint16_t cq_id, uint16_t ignored) 5936 { 5937 sli4_req_fcoe_wq_create_v1_t *wq = NULL; 5938 uint32_t sli_config_off = 0; 5939 uint32_t p; 5940 uintptr_t addr; 5941 uint32_t page_size = 0; 5942 uint32_t page_bytes = 0; 5943 uint32_t n_wqe = 0; 5944 5945 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 5946 uint32_t payload_size; 5947 5948 /* Payload length must accommodate both request and response */ 5949 payload_size = max(sizeof(sli4_req_fcoe_wq_create_v1_t), 5950 sizeof(sli4_res_common_create_queue_t)); 5951 5952 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 5953 NULL); 5954 } 5955 wq = (sli4_req_fcoe_wq_create_v1_t *)((uint8_t *)buf + sli_config_off); 5956 5957 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE; 5958 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 5959 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_v1_t) - 5960 sizeof(sli4_req_hdr_t); 5961 wq->hdr.version = 1; 5962 5963 n_wqe = qmem->size / sli4->config.wqe_size; 5964 5965 /* This heuristic to determine the page size is simplistic 5966 * but could be made more sophisticated 5967 */ 5968 switch (qmem->size) { 5969 case 4096: 5970 case 8192: 5971 case 16384: 5972 case 32768: 5973 page_size = 1; 5974 break; 5975 case 65536: 5976 page_size = 2; 5977 break; 5978 case 131072: 5979 page_size = 4; 5980 break; 5981 case 262144: 5982 page_size = 8; 5983 break; 5984 case 524288: 5985 page_size = 10; 5986 break; 5987 default: 5988 return 0; 5989 } 5990 page_bytes = page_size * SLI_PAGE_SIZE; 5991 5992 /* valid values for number of pages: 1-8 */ 5993 wq->num_pages = sli_page_count(qmem->size, page_bytes); 5994 if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES)) { 5995 return 0; 5996 } 5997 5998 wq->cq_id = cq_id; 5999 6000 wq->page_size = page_size; 6001 6002 if (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) { 6003 wq->wqe_size = SLI4_WQE_EXT_SIZE; 6004 } else { 6005 wq->wqe_size = SLI4_WQE_SIZE; 6006 } 6007 6008 wq->wqe_count = n_wqe; 6009 6010 for (p = 0, addr = qmem->phys; 6011 p < wq->num_pages; 6012 p++, addr += page_bytes) { 6013 wq->page_physical_address[p].low = ocs_addr32_lo(addr); 6014 wq->page_physical_address[p].high = ocs_addr32_hi(addr); 6015 } 6016 6017 return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_v1_t)); 6018 } 6019 6020 /** 6021 * @ingroup sli_fc 6022 * @brief Write an FCOE_WQ_DESTROY command. 6023 * 6024 * @param sli4 SLI context. 6025 * @param buf Destination buffer for the command. 6026 * @param size Buffer size, in bytes. 6027 * @param wq_id WQ_ID. 6028 * 6029 * @return Returns the number of bytes written. 6030 */ 6031 int32_t 6032 sli_cmd_fcoe_wq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t wq_id) 6033 { 6034 sli4_req_fcoe_wq_destroy_t *wq = NULL; 6035 uint32_t sli_config_off = 0; 6036 6037 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 6038 uint32_t payload_size; 6039 6040 /* Payload length must accommodate both request and response */ 6041 payload_size = max(sizeof(sli4_req_fcoe_wq_destroy_t), 6042 sizeof(sli4_res_hdr_t)); 6043 6044 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 6045 NULL); 6046 } 6047 wq = (sli4_req_fcoe_wq_destroy_t *)((uint8_t *)buf + sli_config_off); 6048 6049 wq->hdr.opcode = SLI4_OPC_FCOE_WQ_DESTROY; 6050 wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6051 wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_destroy_t) - 6052 sizeof(sli4_req_hdr_t); 6053 6054 wq->wq_id = wq_id; 6055 6056 return(sli_config_off + sizeof(sli4_req_fcoe_wq_destroy_t)); 6057 } 6058 6059 /** 6060 * @ingroup sli_fc 6061 * @brief Write an FCOE_POST_SGL_PAGES command. 6062 * 6063 * @param sli4 SLI context. 6064 * @param buf Destination buffer for the command. 6065 * @param size Buffer size, in bytes. 6066 * @param xri starting XRI 6067 * @param xri_count XRI 6068 * @param page0 First SGL memory page. 6069 * @param page1 Second SGL memory page (optional). 6070 * @param dma DMA buffer for non-embedded mailbox command (options) 6071 * 6072 * if non-embedded mbx command is used, dma buffer must be at least (32 + xri_count*16) in length 6073 * 6074 * @return Returns the number of bytes written. 6075 */ 6076 int32_t 6077 sli_cmd_fcoe_post_sgl_pages(sli4_t *sli4, void *buf, size_t size, 6078 uint16_t xri, uint32_t xri_count, ocs_dma_t *page0[], ocs_dma_t *page1[], ocs_dma_t *dma) 6079 { 6080 sli4_req_fcoe_post_sgl_pages_t *post = NULL; 6081 uint32_t sli_config_off = 0; 6082 uint32_t i; 6083 6084 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 6085 uint32_t payload_size; 6086 6087 /* Payload length must accommodate both request and response */ 6088 payload_size = max(sizeof(sli4_req_fcoe_post_sgl_pages_t), 6089 sizeof(sli4_res_hdr_t)); 6090 6091 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 6092 dma); 6093 } 6094 if (dma) { 6095 post = dma->virt; 6096 ocs_memset(post, 0, dma->size); 6097 } else { 6098 post = (sli4_req_fcoe_post_sgl_pages_t *)((uint8_t *)buf + sli_config_off); 6099 } 6100 6101 post->hdr.opcode = SLI4_OPC_FCOE_POST_SGL_PAGES; 6102 post->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6103 /* payload size calculation 6104 * 4 = xri_start + xri_count 6105 * xri_count = # of XRI's registered 6106 * sizeof(uint64_t) = physical address size 6107 * 2 = # of physical addresses per page set 6108 */ 6109 post->hdr.request_length = 4 + (xri_count * (sizeof(uint64_t) * 2)); 6110 6111 post->xri_start = xri; 6112 post->xri_count = xri_count; 6113 6114 for (i = 0; i < xri_count; i++) { 6115 post->page_set[i].page0_low = ocs_addr32_lo(page0[i]->phys); 6116 post->page_set[i].page0_high = ocs_addr32_hi(page0[i]->phys); 6117 } 6118 6119 if (page1) { 6120 for (i = 0; i < xri_count; i++) { 6121 post->page_set[i].page1_low = ocs_addr32_lo(page1[i]->phys); 6122 post->page_set[i].page1_high = ocs_addr32_hi(page1[i]->phys); 6123 } 6124 } 6125 6126 return dma ? sli_config_off : (sli_config_off + sizeof(sli4_req_fcoe_post_sgl_pages_t)); 6127 } 6128 6129 /** 6130 * @ingroup sli_fc 6131 * @brief Write an FCOE_RQ_CREATE command. 6132 * 6133 * @param sli4 SLI context. 6134 * @param buf Destination buffer for the command. 6135 * @param size Buffer size, in bytes. 6136 * @param qmem DMA memory for the queue. 6137 * @param cq_id Associated CQ_ID. 6138 * @param ulp This parameter carries the ULP for the RQ 6139 * @param buffer_size Buffer size pointed to by each RQE. 6140 * 6141 * @note This creates a Version 0 message. 6142 * 6143 * @return Returns the number of bytes written. 6144 */ 6145 int32_t 6146 sli_cmd_fcoe_rq_create(sli4_t *sli4, void *buf, size_t size, 6147 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size) 6148 { 6149 sli4_req_fcoe_rq_create_t *rq = NULL; 6150 uint32_t sli_config_off = 0; 6151 uint32_t p; 6152 uintptr_t addr; 6153 6154 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 6155 uint32_t payload_size; 6156 6157 /* Payload length must accommodate both request and response */ 6158 payload_size = max(sizeof(sli4_req_fcoe_rq_create_t), 6159 sizeof(sli4_res_common_create_queue_t)); 6160 6161 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 6162 NULL); 6163 } 6164 rq = (sli4_req_fcoe_rq_create_t *)((uint8_t *)buf + sli_config_off); 6165 6166 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; 6167 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6168 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_t) - 6169 sizeof(sli4_req_hdr_t); 6170 /* valid values for number of pages: 1-8 (sec 4.5.6) */ 6171 rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); 6172 if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES)) { 6173 ocs_log_test(sli4->os, "num_pages %d not valid\n", rq->num_pages); 6174 return 0; 6175 } 6176 6177 /* 6178 * RQE count is the log base 2 of the total number of entries 6179 */ 6180 rq->rqe_count = ocs_lg2(qmem->size / SLI4_FCOE_RQE_SIZE); 6181 6182 if ((buffer_size < SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE) || 6183 (buffer_size > SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE)) { 6184 ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n", 6185 buffer_size, 6186 SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE, 6187 SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE); 6188 return -1; 6189 } 6190 rq->buffer_size = buffer_size; 6191 6192 rq->cq_id = cq_id; 6193 6194 if (sli4->config.dual_ulp_capable) { 6195 rq->dua = 1; 6196 rq->bqu = 1; 6197 rq->ulp = ulp; 6198 } 6199 6200 for (p = 0, addr = qmem->phys; 6201 p < rq->num_pages; 6202 p++, addr += SLI_PAGE_SIZE) { 6203 rq->page_physical_address[p].low = ocs_addr32_lo(addr); 6204 rq->page_physical_address[p].high = ocs_addr32_hi(addr); 6205 } 6206 6207 return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_t)); 6208 } 6209 6210 /** 6211 * @ingroup sli_fc 6212 * @brief Write an FCOE_RQ_CREATE_V1 command. 6213 * 6214 * @param sli4 SLI context. 6215 * @param buf Destination buffer for the command. 6216 * @param size Buffer size, in bytes. 6217 * @param qmem DMA memory for the queue. 6218 * @param cq_id Associated CQ_ID. 6219 * @param ulp This parameter carries the ULP for RQ (ignored for V1) 6220 * @param buffer_size Buffer size pointed to by each RQE. 6221 * 6222 * @note This creates a Version 0 message 6223 * 6224 * @return Returns the number of bytes written. 6225 */ 6226 int32_t 6227 sli_cmd_fcoe_rq_create_v1(sli4_t *sli4, void *buf, size_t size, 6228 ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, 6229 uint16_t buffer_size) 6230 { 6231 sli4_req_fcoe_rq_create_v1_t *rq = NULL; 6232 uint32_t sli_config_off = 0; 6233 uint32_t p; 6234 uintptr_t addr; 6235 6236 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 6237 uint32_t payload_size; 6238 6239 /* Payload length must accommodate both request and response */ 6240 payload_size = max(sizeof(sli4_req_fcoe_rq_create_v1_t), 6241 sizeof(sli4_res_common_create_queue_t)); 6242 6243 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 6244 NULL); 6245 } 6246 rq = (sli4_req_fcoe_rq_create_v1_t *)((uint8_t *)buf + sli_config_off); 6247 6248 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; 6249 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6250 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v1_t) - 6251 sizeof(sli4_req_hdr_t); 6252 rq->hdr.version = 1; 6253 6254 /* Disable "no buffer warnings" to avoid Lancer bug */ 6255 rq->dnb = TRUE; 6256 6257 /* valid values for number of pages: 1-8 (sec 4.5.6) */ 6258 rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); 6259 if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES)) { 6260 ocs_log_test(sli4->os, "num_pages %d not valid, max %d\n", 6261 rq->num_pages, SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES); 6262 return 0; 6263 } 6264 6265 /* 6266 * RQE count is the total number of entries (note not lg2(# entries)) 6267 */ 6268 rq->rqe_count = qmem->size / SLI4_FCOE_RQE_SIZE; 6269 6270 rq->rqe_size = SLI4_FCOE_RQE_SIZE_8; 6271 6272 rq->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096; 6273 6274 if ((buffer_size < sli4->config.rq_min_buf_size) || 6275 (buffer_size > sli4->config.rq_max_buf_size)) { 6276 ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n", 6277 buffer_size, 6278 sli4->config.rq_min_buf_size, 6279 sli4->config.rq_max_buf_size); 6280 return -1; 6281 } 6282 rq->buffer_size = buffer_size; 6283 6284 rq->cq_id = cq_id; 6285 6286 for (p = 0, addr = qmem->phys; 6287 p < rq->num_pages; 6288 p++, addr += SLI_PAGE_SIZE) { 6289 rq->page_physical_address[p].low = ocs_addr32_lo(addr); 6290 rq->page_physical_address[p].high = ocs_addr32_hi(addr); 6291 } 6292 6293 return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_v1_t)); 6294 } 6295 6296 /** 6297 * @ingroup sli_fc 6298 * @brief Write an FCOE_RQ_DESTROY command. 6299 * 6300 * @param sli4 SLI context. 6301 * @param buf Destination buffer for the command. 6302 * @param size Buffer size, in bytes. 6303 * @param rq_id RQ_ID. 6304 * 6305 * @return Returns the number of bytes written. 6306 */ 6307 int32_t 6308 sli_cmd_fcoe_rq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t rq_id) 6309 { 6310 sli4_req_fcoe_rq_destroy_t *rq = NULL; 6311 uint32_t sli_config_off = 0; 6312 6313 if (SLI4_PORT_TYPE_FC == sli4->port_type) { 6314 uint32_t payload_size; 6315 6316 /* Payload length must accommodate both request and response */ 6317 payload_size = max(sizeof(sli4_req_fcoe_rq_destroy_t), 6318 sizeof(sli4_res_hdr_t)); 6319 6320 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, 6321 NULL); 6322 } 6323 rq = (sli4_req_fcoe_rq_destroy_t *)((uint8_t *)buf + sli_config_off); 6324 6325 rq->hdr.opcode = SLI4_OPC_FCOE_RQ_DESTROY; 6326 rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6327 rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_destroy_t) - 6328 sizeof(sli4_req_hdr_t); 6329 6330 rq->rq_id = rq_id; 6331 6332 return(sli_config_off + sizeof(sli4_req_fcoe_rq_destroy_t)); 6333 } 6334 6335 /** 6336 * @ingroup sli_fc 6337 * @brief Write an FCOE_READ_FCF_TABLE command. 6338 * 6339 * @note 6340 * The response of this command exceeds the size of an embedded 6341 * command and requires an external buffer with DMA capability to hold the results. 6342 * The caller should allocate the ocs_dma_t structure / memory. 6343 * 6344 * @param sli4 SLI context. 6345 * @param buf Destination buffer for the command. 6346 * @param size Buffer size, in bytes. 6347 * @param dma Pointer to DMA memory structure. This is allocated by the caller. 6348 * @param index FCF table index to retrieve. 6349 * 6350 * @return Returns the number of bytes written. 6351 */ 6352 int32_t 6353 sli_cmd_fcoe_read_fcf_table(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t index) 6354 { 6355 sli4_req_fcoe_read_fcf_table_t *read_fcf = NULL; 6356 6357 if (SLI4_PORT_TYPE_FC != sli4->port_type) { 6358 ocs_log_test(sli4->os, "FCOE_READ_FCF_TABLE only supported on FC\n"); 6359 return -1; 6360 } 6361 6362 read_fcf = dma->virt; 6363 6364 ocs_memset(read_fcf, 0, sizeof(sli4_req_fcoe_read_fcf_table_t)); 6365 6366 read_fcf->hdr.opcode = SLI4_OPC_FCOE_READ_FCF_TABLE; 6367 read_fcf->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6368 read_fcf->hdr.request_length = dma->size - 6369 sizeof(sli4_req_fcoe_read_fcf_table_t); 6370 read_fcf->fcf_index = index; 6371 6372 return sli_cmd_sli_config(sli4, buf, size, 0, dma); 6373 } 6374 6375 /** 6376 * @ingroup sli_fc 6377 * @brief Write an FCOE_POST_HDR_TEMPLATES command. 6378 * 6379 * @param sli4 SLI context. 6380 * @param buf Destination buffer for the command. 6381 * @param size Buffer size, in bytes. 6382 * @param dma Pointer to DMA memory structure. This is allocated by the caller. 6383 * @param rpi Starting RPI index for the header templates. 6384 * @param payload_dma Pointer to DMA memory used to hold larger descriptor counts. 6385 * 6386 * @return Returns the number of bytes written. 6387 */ 6388 int32_t 6389 sli_cmd_fcoe_post_hdr_templates(sli4_t *sli4, void *buf, size_t size, 6390 ocs_dma_t *dma, uint16_t rpi, ocs_dma_t *payload_dma) 6391 { 6392 sli4_req_fcoe_post_hdr_templates_t *template = NULL; 6393 uint32_t sli_config_off = 0; 6394 uintptr_t phys = 0; 6395 uint32_t i = 0; 6396 uint32_t page_count; 6397 uint32_t payload_size; 6398 6399 page_count = sli_page_count(dma->size, SLI_PAGE_SIZE); 6400 6401 payload_size = sizeof(sli4_req_fcoe_post_hdr_templates_t) + 6402 page_count * sizeof(sli4_physical_page_descriptor_t); 6403 6404 if (page_count > 16) { 6405 /* We can't fit more than 16 descriptors into an embedded mailbox 6406 command, it has to be non-embedded */ 6407 if (ocs_dma_alloc(sli4->os, payload_dma, payload_size, 4)) { 6408 ocs_log_err(sli4->os, "mailbox payload memory allocation fail\n"); 6409 return 0; 6410 } 6411 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, payload_dma); 6412 template = (sli4_req_fcoe_post_hdr_templates_t *)payload_dma->virt; 6413 } else { 6414 sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL); 6415 template = (sli4_req_fcoe_post_hdr_templates_t *)((uint8_t *)buf + sli_config_off); 6416 } 6417 6418 if (UINT16_MAX == rpi) { 6419 rpi = sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0]; 6420 } 6421 6422 template->hdr.opcode = SLI4_OPC_FCOE_POST_HDR_TEMPLATES; 6423 template->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6424 template->hdr.request_length = sizeof(sli4_req_fcoe_post_hdr_templates_t) - 6425 sizeof(sli4_req_hdr_t); 6426 6427 template->rpi_offset = rpi; 6428 template->page_count = page_count; 6429 phys = dma->phys; 6430 for (i = 0; i < template->page_count; i++) { 6431 template->page_descriptor[i].low = ocs_addr32_lo(phys); 6432 template->page_descriptor[i].high = ocs_addr32_hi(phys); 6433 6434 phys += SLI_PAGE_SIZE; 6435 } 6436 6437 return(sli_config_off + payload_size); 6438 } 6439 6440 int32_t 6441 sli_cmd_fcoe_rediscover_fcf(sli4_t *sli4, void *buf, size_t size, uint16_t index) 6442 { 6443 sli4_req_fcoe_rediscover_fcf_t *redisc = NULL; 6444 uint32_t sli_config_off = 0; 6445 6446 sli_config_off = sli_cmd_sli_config(sli4, buf, size, 6447 sizeof(sli4_req_fcoe_rediscover_fcf_t), 6448 NULL); 6449 6450 redisc = (sli4_req_fcoe_rediscover_fcf_t *)((uint8_t *)buf + sli_config_off); 6451 6452 redisc->hdr.opcode = SLI4_OPC_FCOE_REDISCOVER_FCF; 6453 redisc->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 6454 redisc->hdr.request_length = sizeof(sli4_req_fcoe_rediscover_fcf_t) - 6455 sizeof(sli4_req_hdr_t); 6456 6457 if (index == UINT16_MAX) { 6458 redisc->fcf_count = 0; 6459 } else { 6460 redisc->fcf_count = 1; 6461 redisc->fcf_index[0] = index; 6462 } 6463 6464 return(sli_config_off + sizeof(sli4_req_fcoe_rediscover_fcf_t)); 6465 } 6466 6467 /** 6468 * @ingroup sli_fc 6469 * @brief Write an ABORT_WQE work queue entry. 6470 * 6471 * @param sli4 SLI context. 6472 * @param buf Destination buffer for the WQE. 6473 * @param size Buffer size, in bytes. 6474 * @param type Abort type, such as XRI, abort tag, and request tag. 6475 * @param send_abts Boolean to cause the hardware to automatically generate an ABTS. 6476 * @param ids ID of IOs to abort. 6477 * @param mask Mask applied to the ID values to abort. 6478 * @param tag Tag value associated with this abort. 6479 * @param cq_id The id of the completion queue where the WQE response is sent. 6480 * @param dnrx When set to 1, this field indicates that the SLI Port must not return the associated XRI to the SLI 6481 * Port's optimized write XRI pool. 6482 * 6483 * @return Returns 0 on success, or a non-zero value on failure. 6484 */ 6485 int32_t 6486 sli_abort_wqe(sli4_t *sli4, void *buf, size_t size, sli4_abort_type_e type, uint32_t send_abts, 6487 uint32_t ids, uint32_t mask, uint16_t tag, uint16_t cq_id) 6488 { 6489 sli4_abort_wqe_t *abort = buf; 6490 6491 ocs_memset(buf, 0, size); 6492 6493 switch (type) { 6494 case SLI_ABORT_XRI: 6495 abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; 6496 if (mask) { 6497 ocs_log_warn(sli4->os, "warning non-zero mask %#x when aborting XRI %#x\n", mask, ids); 6498 mask = 0; 6499 } 6500 break; 6501 case SLI_ABORT_ABORT_ID: 6502 abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG; 6503 break; 6504 case SLI_ABORT_REQUEST_ID: 6505 abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG; 6506 break; 6507 default: 6508 ocs_log_test(sli4->os, "unsupported type %#x\n", type); 6509 return -1; 6510 } 6511 6512 abort->ia = send_abts ? 0 : 1; 6513 6514 /* Suppress ABTS retries */ 6515 abort->ir = 1; 6516 6517 abort->t_mask = mask; 6518 abort->t_tag = ids; 6519 abort->command = SLI4_WQE_ABORT; 6520 abort->request_tag = tag; 6521 abort->qosd = TRUE; 6522 abort->cq_id = cq_id; 6523 abort->cmd_type = SLI4_CMD_ABORT_WQE; 6524 6525 return 0; 6526 } 6527 6528 /** 6529 * @ingroup sli_fc 6530 * @brief Write an ELS_REQUEST64_WQE work queue entry. 6531 * 6532 * @param sli4 SLI context. 6533 * @param buf Destination buffer for the WQE. 6534 * @param size Buffer size, in bytes. 6535 * @param sgl DMA memory for the ELS request. 6536 * @param req_type ELS request type. 6537 * @param req_len Length of ELS request in bytes. 6538 * @param max_rsp_len Max length of ELS response in bytes. 6539 * @param timeout Time, in seconds, before an IO times out. Zero means 2 * R_A_TOV. 6540 * @param xri XRI for this exchange. 6541 * @param tag IO tag value. 6542 * @param cq_id The id of the completion queue where the WQE response is sent. 6543 * @param rnode Destination of ELS request (that is, the remote node). 6544 * 6545 * @return Returns 0 on success, or a non-zero value on failure. 6546 */ 6547 int32_t 6548 sli_els_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint8_t req_type, 6549 uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout, 6550 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode) 6551 { 6552 sli4_els_request64_wqe_t *els = buf; 6553 sli4_sge_t *sge = sgl->virt; 6554 uint8_t is_fabric = FALSE; 6555 6556 ocs_memset(buf, 0, size); 6557 6558 if (sli4->config.sgl_pre_registered) { 6559 els->xbl = FALSE; 6560 6561 els->dbde = TRUE; 6562 els->els_request_payload.bde_type = SLI4_BDE_TYPE_BDE_64; 6563 6564 els->els_request_payload.buffer_length = req_len; 6565 els->els_request_payload.u.data.buffer_address_low = sge[0].buffer_address_low; 6566 els->els_request_payload.u.data.buffer_address_high = sge[0].buffer_address_high; 6567 } else { 6568 els->xbl = TRUE; 6569 6570 els->els_request_payload.bde_type = SLI4_BDE_TYPE_BLP; 6571 6572 els->els_request_payload.buffer_length = 2 * sizeof(sli4_sge_t); 6573 els->els_request_payload.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); 6574 els->els_request_payload.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); 6575 } 6576 6577 els->els_request_payload_length = req_len; 6578 els->max_response_payload_length = max_rsp_len; 6579 6580 els->xri_tag = xri; 6581 els->timer = timeout; 6582 els->class = SLI4_ELS_REQUEST64_CLASS_3; 6583 6584 els->command = SLI4_WQE_ELS_REQUEST64; 6585 6586 els->request_tag = tag; 6587 6588 if (rnode->node_group) { 6589 els->hlm = TRUE; 6590 els->remote_id = rnode->fc_id & 0x00ffffff; 6591 } 6592 6593 els->iod = SLI4_ELS_REQUEST64_DIR_READ; 6594 6595 els->qosd = TRUE; 6596 6597 /* figure out the ELS_ID value from the request buffer */ 6598 6599 switch (req_type) { 6600 case FC_ELS_CMD_LOGO: 6601 els->els_id = SLI4_ELS_REQUEST64_LOGO; 6602 if (rnode->attached) { 6603 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 6604 els->context_tag = rnode->indicator; 6605 } else { 6606 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 6607 els->context_tag = rnode->sport->indicator; 6608 } 6609 if (FC_ADDR_FABRIC == rnode->fc_id) { 6610 is_fabric = TRUE; 6611 } 6612 break; 6613 case FC_ELS_CMD_FDISC: 6614 if (FC_ADDR_FABRIC == rnode->fc_id) { 6615 is_fabric = TRUE; 6616 } 6617 if (0 == rnode->sport->fc_id) { 6618 els->els_id = SLI4_ELS_REQUEST64_FDISC; 6619 is_fabric = TRUE; 6620 } else { 6621 els->els_id = SLI4_ELS_REQUEST64_OTHER; 6622 } 6623 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 6624 els->context_tag = rnode->sport->indicator; 6625 els->sp = TRUE; 6626 break; 6627 case FC_ELS_CMD_FLOGI: 6628 els->els_id = SLI4_ELS_REQUEST64_FLOGIN; 6629 is_fabric = TRUE; 6630 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { 6631 if (!rnode->sport->domain) { 6632 ocs_log_test(sli4->os, "invalid domain handle\n"); 6633 return -1; 6634 } 6635 /* 6636 * IF_TYPE 0 skips INIT_VFI/INIT_VPI and therefore must use the 6637 * FCFI here 6638 */ 6639 els->ct = SLI4_ELS_REQUEST64_CONTEXT_FCFI; 6640 els->context_tag = rnode->sport->domain->fcf_indicator; 6641 els->sp = TRUE; 6642 } else { 6643 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 6644 els->context_tag = rnode->sport->indicator; 6645 6646 /* 6647 * Set SP here ... we haven't done a REG_VPI yet 6648 * TODO: need to maybe not set this when we have 6649 * completed VFI/VPI registrations ... 6650 * 6651 * Use the FC_ID of the SPORT if it has been allocated, otherwise 6652 * use an S_ID of zero. 6653 */ 6654 els->sp = TRUE; 6655 if (rnode->sport->fc_id != UINT32_MAX) { 6656 els->sid = rnode->sport->fc_id; 6657 } 6658 } 6659 break; 6660 case FC_ELS_CMD_PLOGI: 6661 els->els_id = SLI4_ELS_REQUEST64_PLOGI; 6662 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 6663 els->context_tag = rnode->sport->indicator; 6664 break; 6665 case FC_ELS_CMD_SCR: 6666 els->els_id = SLI4_ELS_REQUEST64_OTHER; 6667 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 6668 els->context_tag = rnode->sport->indicator; 6669 break; 6670 default: 6671 els->els_id = SLI4_ELS_REQUEST64_OTHER; 6672 if (rnode->attached) { 6673 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 6674 els->context_tag = rnode->indicator; 6675 } else { 6676 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 6677 els->context_tag = rnode->sport->indicator; 6678 } 6679 break; 6680 } 6681 6682 if (is_fabric) { 6683 els->cmd_type = SLI4_ELS_REQUEST64_CMD_FABRIC; 6684 } else { 6685 els->cmd_type = SLI4_ELS_REQUEST64_CMD_NON_FABRIC; 6686 } 6687 6688 els->cq_id = cq_id; 6689 6690 if (SLI4_ELS_REQUEST64_CONTEXT_RPI != els->ct) { 6691 els->remote_id = rnode->fc_id; 6692 } 6693 if (SLI4_ELS_REQUEST64_CONTEXT_VPI == els->ct) { 6694 els->temporary_rpi = rnode->indicator; 6695 } 6696 6697 return 0; 6698 } 6699 6700 /** 6701 * @ingroup sli_fc 6702 * @brief Write an FCP_ICMND64_WQE work queue entry. 6703 * 6704 * @param sli4 SLI context. 6705 * @param buf Destination buffer for the WQE. 6706 * @param size Buffer size, in bytes. 6707 * @param sgl DMA memory for the scatter gather list. 6708 * @param xri XRI for this exchange. 6709 * @param tag IO tag value. 6710 * @param cq_id The id of the completion queue where the WQE response is sent. 6711 * @param rpi remote node indicator (RPI) 6712 * @param rnode Destination request (that is, the remote node). 6713 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. 6714 * 6715 * @return Returns 0 on success, or a non-zero value on failure. 6716 */ 6717 int32_t 6718 sli_fcp_icmnd64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, 6719 uint16_t xri, uint16_t tag, uint16_t cq_id, 6720 uint32_t rpi, ocs_remote_node_t *rnode, uint8_t timeout) 6721 { 6722 sli4_fcp_icmnd64_wqe_t *icmnd = buf; 6723 sli4_sge_t *sge = NULL; 6724 6725 ocs_memset(buf, 0, size); 6726 6727 if (!sgl || !sgl->virt) { 6728 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", 6729 sgl, sgl ? sgl->virt : NULL); 6730 return -1; 6731 } 6732 sge = sgl->virt; 6733 6734 if (sli4->config.sgl_pre_registered) { 6735 icmnd->xbl = FALSE; 6736 6737 icmnd->dbde = TRUE; 6738 icmnd->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 6739 6740 icmnd->bde.buffer_length = sge[0].buffer_length; 6741 icmnd->bde.u.data.buffer_address_low = sge[0].buffer_address_low; 6742 icmnd->bde.u.data.buffer_address_high = sge[0].buffer_address_high; 6743 } else { 6744 icmnd->xbl = TRUE; 6745 6746 icmnd->bde.bde_type = SLI4_BDE_TYPE_BLP; 6747 6748 icmnd->bde.buffer_length = sgl->size; 6749 icmnd->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); 6750 icmnd->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); 6751 } 6752 6753 icmnd->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; 6754 icmnd->xri_tag = xri; 6755 icmnd->context_tag = rpi; 6756 icmnd->timer = timeout; 6757 6758 icmnd->pu = 2; /* WQE word 4 contains read transfer length */ 6759 icmnd->class = SLI4_ELS_REQUEST64_CLASS_3; 6760 icmnd->command = SLI4_WQE_FCP_ICMND64; 6761 icmnd->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 6762 6763 icmnd->abort_tag = xri; 6764 6765 icmnd->request_tag = tag; 6766 icmnd->len_loc = 3; 6767 if (rnode->node_group) { 6768 icmnd->hlm = TRUE; 6769 icmnd->remote_n_port_id = rnode->fc_id & 0x00ffffff; 6770 } 6771 if (((ocs_node_t *)rnode->node)->fcp2device) { 6772 icmnd->erp = TRUE; 6773 } 6774 icmnd->cmd_type = SLI4_CMD_FCP_ICMND64_WQE; 6775 icmnd->cq_id = cq_id; 6776 6777 return 0; 6778 } 6779 6780 /** 6781 * @ingroup sli_fc 6782 * @brief Write an FCP_IREAD64_WQE work queue entry. 6783 * 6784 * @param sli4 SLI context. 6785 * @param buf Destination buffer for the WQE. 6786 * @param size Buffer size, in bytes. 6787 * @param sgl DMA memory for the scatter gather list. 6788 * @param first_data_sge Index of first data sge (used if perf hints are enabled) 6789 * @param xfer_len Data transfer length. 6790 * @param xri XRI for this exchange. 6791 * @param tag IO tag value. 6792 * @param cq_id The id of the completion queue where the WQE response is sent. 6793 * @param rpi remote node indicator (RPI) 6794 * @param rnode Destination request (i.e. remote node). 6795 * @param dif T10 DIF operation, or 0 to disable. 6796 * @param bs T10 DIF block size, or 0 if DIF is disabled. 6797 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. 6798 * 6799 * @return Returns 0 on success, or a non-zero value on failure. 6800 */ 6801 int32_t 6802 sli_fcp_iread64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, 6803 uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, 6804 uint32_t rpi, ocs_remote_node_t *rnode, 6805 uint8_t dif, uint8_t bs, uint8_t timeout) 6806 { 6807 sli4_fcp_iread64_wqe_t *iread = buf; 6808 sli4_sge_t *sge = NULL; 6809 6810 ocs_memset(buf, 0, size); 6811 6812 if (!sgl || !sgl->virt) { 6813 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", 6814 sgl, sgl ? sgl->virt : NULL); 6815 return -1; 6816 } 6817 sge = sgl->virt; 6818 6819 if (sli4->config.sgl_pre_registered) { 6820 iread->xbl = FALSE; 6821 6822 iread->dbde = TRUE; 6823 iread->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 6824 6825 iread->bde.buffer_length = sge[0].buffer_length; 6826 iread->bde.u.data.buffer_address_low = sge[0].buffer_address_low; 6827 iread->bde.u.data.buffer_address_high = sge[0].buffer_address_high; 6828 } else { 6829 iread->xbl = TRUE; 6830 6831 iread->bde.bde_type = SLI4_BDE_TYPE_BLP; 6832 6833 iread->bde.buffer_length = sgl->size; 6834 iread->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); 6835 iread->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); 6836 6837 /* fill out fcp_cmnd buffer len and change resp buffer to be of type 6838 * "skip" (note: response will still be written to sge[1] if necessary) */ 6839 iread->fcp_cmd_buffer_length = sge[0].buffer_length; 6840 sge[1].sge_type = SLI4_SGE_TYPE_SKIP; 6841 } 6842 6843 iread->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; 6844 iread->total_transfer_length = xfer_len; 6845 6846 iread->xri_tag = xri; 6847 iread->context_tag = rpi; 6848 6849 iread->timer = timeout; 6850 6851 iread->pu = 2; /* WQE word 4 contains read transfer length */ 6852 iread->class = SLI4_ELS_REQUEST64_CLASS_3; 6853 iread->command = SLI4_WQE_FCP_IREAD64; 6854 iread->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 6855 iread->dif = dif; 6856 iread->bs = bs; 6857 6858 iread->abort_tag = xri; 6859 6860 iread->request_tag = tag; 6861 iread->len_loc = 3; 6862 if (rnode->node_group) { 6863 iread->hlm = TRUE; 6864 iread->remote_n_port_id = rnode->fc_id & 0x00ffffff; 6865 } 6866 if (((ocs_node_t *)rnode->node)->fcp2device) { 6867 iread->erp = TRUE; 6868 } 6869 iread->iod = 1; 6870 iread->cmd_type = SLI4_CMD_FCP_IREAD64_WQE; 6871 iread->cq_id = cq_id; 6872 6873 if (sli4->config.perf_hint) { 6874 iread->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; 6875 iread->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; 6876 iread->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; 6877 iread->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; 6878 } 6879 6880 return 0; 6881 } 6882 6883 /** 6884 * @ingroup sli_fc 6885 * @brief Write an FCP_IWRITE64_WQE work queue entry. 6886 * 6887 * @param sli4 SLI context. 6888 * @param buf Destination buffer for the WQE. 6889 * @param size Buffer size, in bytes. 6890 * @param sgl DMA memory for the scatter gather list. 6891 * @param first_data_sge Index of first data sge (used if perf hints are enabled) 6892 * @param xfer_len Data transfer length. 6893 * @param first_burst The number of first burst bytes 6894 * @param xri XRI for this exchange. 6895 * @param tag IO tag value. 6896 * @param cq_id The id of the completion queue where the WQE response is sent. 6897 * @param rpi remote node indicator (RPI) 6898 * @param rnode Destination request (i.e. remote node) 6899 * @param dif T10 DIF operation, or 0 to disable 6900 * @param bs T10 DIF block size, or 0 if DIF is disabled 6901 * @param timeout Time, in seconds, before an IO times out. Zero means no timeout. 6902 * 6903 * @return Returns 0 on success, or a non-zero value on failure. 6904 */ 6905 int32_t 6906 sli_fcp_iwrite64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, 6907 uint32_t xfer_len, uint32_t first_burst, uint16_t xri, uint16_t tag, uint16_t cq_id, 6908 uint32_t rpi, ocs_remote_node_t *rnode, 6909 uint8_t dif, uint8_t bs, uint8_t timeout) 6910 { 6911 sli4_fcp_iwrite64_wqe_t *iwrite = buf; 6912 sli4_sge_t *sge = NULL; 6913 6914 ocs_memset(buf, 0, size); 6915 6916 if (!sgl || !sgl->virt) { 6917 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", 6918 sgl, sgl ? sgl->virt : NULL); 6919 return -1; 6920 } 6921 sge = sgl->virt; 6922 6923 if (sli4->config.sgl_pre_registered) { 6924 iwrite->xbl = FALSE; 6925 6926 iwrite->dbde = TRUE; 6927 iwrite->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 6928 6929 iwrite->bde.buffer_length = sge[0].buffer_length; 6930 iwrite->bde.u.data.buffer_address_low = sge[0].buffer_address_low; 6931 iwrite->bde.u.data.buffer_address_high = sge[0].buffer_address_high; 6932 } else { 6933 iwrite->xbl = TRUE; 6934 6935 iwrite->bde.bde_type = SLI4_BDE_TYPE_BLP; 6936 6937 iwrite->bde.buffer_length = sgl->size; 6938 iwrite->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); 6939 iwrite->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); 6940 6941 /* fill out fcp_cmnd buffer len and change resp buffer to be of type 6942 * "skip" (note: response will still be written to sge[1] if necessary) */ 6943 iwrite->fcp_cmd_buffer_length = sge[0].buffer_length; 6944 sge[1].sge_type = SLI4_SGE_TYPE_SKIP; 6945 } 6946 6947 iwrite->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length; 6948 iwrite->total_transfer_length = xfer_len; 6949 iwrite->initial_transfer_length = MIN(xfer_len, first_burst); 6950 6951 iwrite->xri_tag = xri; 6952 iwrite->context_tag = rpi; 6953 6954 iwrite->timer = timeout; 6955 6956 iwrite->pu = 2; /* WQE word 4 contains read transfer length */ 6957 iwrite->class = SLI4_ELS_REQUEST64_CLASS_3; 6958 iwrite->command = SLI4_WQE_FCP_IWRITE64; 6959 iwrite->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 6960 iwrite->dif = dif; 6961 iwrite->bs = bs; 6962 6963 iwrite->abort_tag = xri; 6964 6965 iwrite->request_tag = tag; 6966 iwrite->len_loc = 3; 6967 if (rnode->node_group) { 6968 iwrite->hlm = TRUE; 6969 iwrite->remote_n_port_id = rnode->fc_id & 0x00ffffff; 6970 } 6971 if (((ocs_node_t *)rnode->node)->fcp2device) { 6972 iwrite->erp = TRUE; 6973 } 6974 iwrite->cmd_type = SLI4_CMD_FCP_IWRITE64_WQE; 6975 iwrite->cq_id = cq_id; 6976 6977 if (sli4->config.perf_hint) { 6978 iwrite->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; 6979 iwrite->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; 6980 iwrite->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; 6981 iwrite->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; 6982 } 6983 6984 return 0; 6985 } 6986 6987 /** 6988 * @ingroup sli_fc 6989 * @brief Write an FCP_TRECEIVE64_WQE work queue entry. 6990 * 6991 * @param sli4 SLI context. 6992 * @param buf Destination buffer for the WQE. 6993 * @param size Buffer size, in bytes. 6994 * @param sgl DMA memory for the Scatter-Gather List. 6995 * @param first_data_sge Index of first data sge (used if perf hints are enabled) 6996 * @param relative_off Relative offset of the IO (if any). 6997 * @param xfer_len Data transfer length. 6998 * @param xri XRI for this exchange. 6999 * @param tag IO tag value. 7000 * @param xid OX_ID for the exchange. 7001 * @param cq_id The id of the completion queue where the WQE response is sent. 7002 * @param rpi remote node indicator (RPI) 7003 * @param rnode Destination request (i.e. remote node). 7004 * @param flags Optional attributes, including: 7005 * - ACTIVE - IO is already active. 7006 * - AUTO RSP - Automatically generate a good FCP_RSP. 7007 * @param dif T10 DIF operation, or 0 to disable. 7008 * @param bs T10 DIF block size, or 0 if DIF is disabled. 7009 * @param csctl value of csctl field. 7010 * @param app_id value for VM application header. 7011 * 7012 * @return Returns 0 on success, or a non-zero value on failure. 7013 */ 7014 int32_t 7015 sli_fcp_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, 7016 uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id, 7017 uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs, 7018 uint8_t csctl, uint32_t app_id) 7019 { 7020 sli4_fcp_treceive64_wqe_t *trecv = buf; 7021 sli4_fcp_128byte_wqe_t *trecv_128 = buf; 7022 sli4_sge_t *sge = NULL; 7023 7024 ocs_memset(buf, 0, size); 7025 7026 if (!sgl || !sgl->virt) { 7027 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", 7028 sgl, sgl ? sgl->virt : NULL); 7029 return -1; 7030 } 7031 sge = sgl->virt; 7032 7033 if (sli4->config.sgl_pre_registered) { 7034 trecv->xbl = FALSE; 7035 7036 trecv->dbde = TRUE; 7037 trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7038 7039 trecv->bde.buffer_length = sge[0].buffer_length; 7040 trecv->bde.u.data.buffer_address_low = sge[0].buffer_address_low; 7041 trecv->bde.u.data.buffer_address_high = sge[0].buffer_address_high; 7042 7043 trecv->payload_offset_length = sge[0].buffer_length; 7044 } else { 7045 trecv->xbl = TRUE; 7046 7047 /* if data is a single physical address, use a BDE */ 7048 if (!dif && (xfer_len <= sge[2].buffer_length)) { 7049 trecv->dbde = TRUE; 7050 trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7051 7052 trecv->bde.buffer_length = sge[2].buffer_length; 7053 trecv->bde.u.data.buffer_address_low = sge[2].buffer_address_low; 7054 trecv->bde.u.data.buffer_address_high = sge[2].buffer_address_high; 7055 } else { 7056 trecv->bde.bde_type = SLI4_BDE_TYPE_BLP; 7057 trecv->bde.buffer_length = sgl->size; 7058 trecv->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); 7059 trecv->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); 7060 } 7061 } 7062 7063 trecv->relative_offset = relative_off; 7064 7065 if (flags & SLI4_IO_CONTINUATION) { 7066 trecv->xc = TRUE; 7067 } 7068 trecv->xri_tag = xri; 7069 7070 trecv->context_tag = rpi; 7071 7072 trecv->pu = TRUE; /* WQE uses relative offset */ 7073 7074 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { 7075 trecv->ar = TRUE; 7076 } 7077 7078 trecv->command = SLI4_WQE_FCP_TRECEIVE64; 7079 trecv->class = SLI4_ELS_REQUEST64_CLASS_3; 7080 trecv->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 7081 trecv->dif = dif; 7082 trecv->bs = bs; 7083 7084 trecv->remote_xid = xid; 7085 7086 trecv->request_tag = tag; 7087 7088 trecv->iod = 1; 7089 7090 trecv->len_loc = 0x2; 7091 7092 if (rnode->node_group) { 7093 trecv->hlm = TRUE; 7094 trecv->dword5.dword = rnode->fc_id & 0x00ffffff; 7095 } 7096 7097 trecv->cmd_type = SLI4_CMD_FCP_TRECEIVE64_WQE; 7098 7099 trecv->cq_id = cq_id; 7100 7101 trecv->fcp_data_receive_length = xfer_len; 7102 7103 if (sli4->config.perf_hint) { 7104 trecv->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7105 trecv->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; 7106 trecv->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; 7107 trecv->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; 7108 } 7109 7110 /* The upper 7 bits of csctl is the priority */ 7111 if (csctl & SLI4_MASK_CCP) { 7112 trecv->ccpe = 1; 7113 trecv->ccp = (csctl & SLI4_MASK_CCP); 7114 } 7115 7116 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trecv->eat) { 7117 trecv->app_id_valid = 1; 7118 trecv->wqes = 1; 7119 trecv_128->dw[31] = app_id; 7120 } 7121 return 0; 7122 } 7123 7124 /** 7125 * @ingroup sli_fc 7126 * @brief Write an FCP_CONT_TRECEIVE64_WQE work queue entry. 7127 * 7128 * @param sli4 SLI context. 7129 * @param buf Destination buffer for the WQE. 7130 * @param size Buffer size, in bytes. 7131 * @param sgl DMA memory for the Scatter-Gather List. 7132 * @param first_data_sge Index of first data sge (used if perf hints are enabled) 7133 * @param relative_off Relative offset of the IO (if any). 7134 * @param xfer_len Data transfer length. 7135 * @param xri XRI for this exchange. 7136 * @param sec_xri Secondary XRI for this exchange. (BZ 161832 workaround) 7137 * @param tag IO tag value. 7138 * @param xid OX_ID for the exchange. 7139 * @param cq_id The id of the completion queue where the WQE response is sent. 7140 * @param rpi remote node indicator (RPI) 7141 * @param rnode Destination request (i.e. remote node). 7142 * @param flags Optional attributes, including: 7143 * - ACTIVE - IO is already active. 7144 * - AUTO RSP - Automatically generate a good FCP_RSP. 7145 * @param dif T10 DIF operation, or 0 to disable. 7146 * @param bs T10 DIF block size, or 0 if DIF is disabled. 7147 * @param csctl value of csctl field. 7148 * @param app_id value for VM application header. 7149 * 7150 * @return Returns 0 on success, or a non-zero value on failure. 7151 */ 7152 int32_t 7153 sli_fcp_cont_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, 7154 uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t sec_xri, uint16_t tag, 7155 uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, 7156 uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) 7157 { 7158 int32_t rc; 7159 7160 rc = sli_fcp_treceive64_wqe(sli4, buf, size, sgl, first_data_sge, relative_off, xfer_len, xri, tag, 7161 cq_id, xid, rpi, rnode, flags, dif, bs, csctl, app_id); 7162 if (rc == 0) { 7163 sli4_fcp_treceive64_wqe_t *trecv = buf; 7164 7165 trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64; 7166 trecv->dword5.sec_xri_tag = sec_xri; 7167 } 7168 return rc; 7169 } 7170 7171 /** 7172 * @ingroup sli_fc 7173 * @brief Write an FCP_TRSP64_WQE work queue entry. 7174 * 7175 * @param sli4 SLI context. 7176 * @param buf Destination buffer for the WQE. 7177 * @param size Buffer size, in bytes. 7178 * @param sgl DMA memory for the Scatter-Gather List. 7179 * @param rsp_len Response data length. 7180 * @param xri XRI for this exchange. 7181 * @param tag IO tag value. 7182 * @param cq_id The id of the completion queue where the WQE response is sent. 7183 * @param xid OX_ID for the exchange. 7184 * @param rpi remote node indicator (RPI) 7185 * @param rnode Destination request (i.e. remote node). 7186 * @param flags Optional attributes, including: 7187 * - ACTIVE - IO is already active 7188 * - AUTO RSP - Automatically generate a good FCP_RSP. 7189 * @param csctl value of csctl field. 7190 * @param port_owned 0/1 to indicate if the XRI is port owned (used to set XBL=0) 7191 * @param app_id value for VM application header. 7192 * 7193 * @return Returns 0 on success, or a non-zero value on failure. 7194 */ 7195 int32_t 7196 sli_fcp_trsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t rsp_len, 7197 uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, 7198 uint32_t flags, uint8_t csctl, uint8_t port_owned, uint32_t app_id) 7199 { 7200 sli4_fcp_trsp64_wqe_t *trsp = buf; 7201 sli4_fcp_128byte_wqe_t *trsp_128 = buf; 7202 7203 ocs_memset(buf, 0, size); 7204 7205 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { 7206 trsp->ag = TRUE; 7207 /* 7208 * The SLI-4 documentation states that the BDE is ignored when 7209 * using auto-good response, but, at least for IF_TYPE 0 devices, 7210 * this does not appear to be true. 7211 */ 7212 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { 7213 trsp->bde.buffer_length = 12; /* byte size of RSP */ 7214 } 7215 } else { 7216 sli4_sge_t *sge = sgl->virt; 7217 7218 if (sli4->config.sgl_pre_registered || port_owned) { 7219 trsp->dbde = TRUE; 7220 } else { 7221 trsp->xbl = TRUE; 7222 } 7223 7224 trsp->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7225 trsp->bde.buffer_length = sge[0].buffer_length; 7226 trsp->bde.u.data.buffer_address_low = sge[0].buffer_address_low; 7227 trsp->bde.u.data.buffer_address_high = sge[0].buffer_address_high; 7228 7229 trsp->fcp_response_length = rsp_len; 7230 } 7231 7232 if (flags & SLI4_IO_CONTINUATION) { 7233 trsp->xc = TRUE; 7234 } 7235 7236 if (rnode->node_group) { 7237 trsp->hlm = TRUE; 7238 trsp->dword5 = rnode->fc_id & 0x00ffffff; 7239 } 7240 7241 trsp->xri_tag = xri; 7242 trsp->rpi = rpi; 7243 7244 trsp->command = SLI4_WQE_FCP_TRSP64; 7245 trsp->class = SLI4_ELS_REQUEST64_CLASS_3; 7246 7247 trsp->remote_xid = xid; 7248 trsp->request_tag = tag; 7249 trsp->dnrx = ((flags & SLI4_IO_DNRX) == 0 ? 0 : 1); 7250 trsp->len_loc = 0x1; 7251 trsp->cq_id = cq_id; 7252 trsp->cmd_type = SLI4_CMD_FCP_TRSP64_WQE; 7253 7254 /* The upper 7 bits of csctl is the priority */ 7255 if (csctl & SLI4_MASK_CCP) { 7256 trsp->ccpe = 1; 7257 trsp->ccp = (csctl & SLI4_MASK_CCP); 7258 } 7259 7260 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trsp->eat) { 7261 trsp->app_id_valid = 1; 7262 trsp->wqes = 1; 7263 trsp_128->dw[31] = app_id; 7264 } 7265 return 0; 7266 } 7267 7268 /** 7269 * @ingroup sli_fc 7270 * @brief Write an FCP_TSEND64_WQE work queue entry. 7271 * 7272 * @param sli4 SLI context. 7273 * @param buf Destination buffer for the WQE. 7274 * @param size Buffer size, in bytes. 7275 * @param sgl DMA memory for the scatter gather list. 7276 * @param first_data_sge Index of first data sge (used if perf hints are enabled) 7277 * @param relative_off Relative offset of the IO (if any). 7278 * @param xfer_len Data transfer length. 7279 * @param xri XRI for this exchange. 7280 * @param tag IO tag value. 7281 * @param cq_id The id of the completion queue where the WQE response is sent. 7282 * @param xid OX_ID for the exchange. 7283 * @param rpi remote node indicator (RPI) 7284 * @param rnode Destination request (i.e. remote node). 7285 * @param flags Optional attributes, including: 7286 * - ACTIVE - IO is already active. 7287 * - AUTO RSP - Automatically generate a good FCP_RSP. 7288 * @param dif T10 DIF operation, or 0 to disable. 7289 * @param bs T10 DIF block size, or 0 if DIF is disabled. 7290 * @param csctl value of csctl field. 7291 * @param app_id value for VM application header. 7292 * 7293 * @return Returns 0 on success, or a non-zero value on failure. 7294 */ 7295 int32_t 7296 sli_fcp_tsend64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge, 7297 uint32_t relative_off, uint32_t xfer_len, 7298 uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, 7299 uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id) 7300 { 7301 sli4_fcp_tsend64_wqe_t *tsend = buf; 7302 sli4_fcp_128byte_wqe_t *tsend_128 = buf; 7303 sli4_sge_t *sge = NULL; 7304 7305 ocs_memset(buf, 0, size); 7306 7307 if (!sgl || !sgl->virt) { 7308 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", 7309 sgl, sgl ? sgl->virt : NULL); 7310 return -1; 7311 } 7312 sge = sgl->virt; 7313 7314 if (sli4->config.sgl_pre_registered) { 7315 tsend->xbl = FALSE; 7316 7317 tsend->dbde = TRUE; 7318 tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7319 7320 /* TSEND64_WQE specifies first two SGE are skipped 7321 * (i.e. 3rd is valid) */ 7322 tsend->bde.buffer_length = sge[2].buffer_length; 7323 tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low; 7324 tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high; 7325 } else { 7326 tsend->xbl = TRUE; 7327 7328 /* if data is a single physical address, use a BDE */ 7329 if (!dif && (xfer_len <= sge[2].buffer_length)) { 7330 tsend->dbde = TRUE; 7331 tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7332 /* TSEND64_WQE specifies first two SGE are skipped 7333 * (i.e. 3rd is valid) */ 7334 tsend->bde.buffer_length = sge[2].buffer_length; 7335 tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low; 7336 tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high; 7337 } else { 7338 tsend->bde.bde_type = SLI4_BDE_TYPE_BLP; 7339 tsend->bde.buffer_length = sgl->size; 7340 tsend->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); 7341 tsend->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); 7342 } 7343 } 7344 7345 tsend->relative_offset = relative_off; 7346 7347 if (flags & SLI4_IO_CONTINUATION) { 7348 tsend->xc = TRUE; 7349 } 7350 tsend->xri_tag = xri; 7351 7352 tsend->rpi = rpi; 7353 7354 tsend->pu = TRUE; /* WQE uses relative offset */ 7355 7356 if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) { 7357 tsend->ar = TRUE; 7358 } 7359 7360 tsend->command = SLI4_WQE_FCP_TSEND64; 7361 tsend->class = SLI4_ELS_REQUEST64_CLASS_3; 7362 tsend->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 7363 tsend->dif = dif; 7364 tsend->bs = bs; 7365 7366 tsend->remote_xid = xid; 7367 7368 tsend->request_tag = tag; 7369 7370 tsend->len_loc = 0x2; 7371 7372 if (rnode->node_group) { 7373 tsend->hlm = TRUE; 7374 tsend->dword5 = rnode->fc_id & 0x00ffffff; 7375 } 7376 7377 tsend->cq_id = cq_id; 7378 7379 tsend->cmd_type = SLI4_CMD_FCP_TSEND64_WQE; 7380 7381 tsend->fcp_data_transmit_length = xfer_len; 7382 7383 if (sli4->config.perf_hint) { 7384 tsend->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7385 tsend->first_data_bde.buffer_length = sge[first_data_sge].buffer_length; 7386 tsend->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low; 7387 tsend->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high; 7388 } 7389 7390 /* The upper 7 bits of csctl is the priority */ 7391 if (csctl & SLI4_MASK_CCP) { 7392 tsend->ccpe = 1; 7393 tsend->ccp = (csctl & SLI4_MASK_CCP); 7394 } 7395 7396 if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !tsend->eat) { 7397 tsend->app_id_valid = 1; 7398 tsend->wqes = 1; 7399 tsend_128->dw[31] = app_id; 7400 } 7401 return 0; 7402 } 7403 7404 /** 7405 * @ingroup sli_fc 7406 * @brief Write a GEN_REQUEST64 work queue entry. 7407 * 7408 * @note This WQE is only used to send FC-CT commands. 7409 * 7410 * @param sli4 SLI context. 7411 * @param buf Destination buffer for the WQE. 7412 * @param size Buffer size, in bytes. 7413 * @param sgl DMA memory for the request. 7414 * @param req_len Length of request. 7415 * @param max_rsp_len Max length of response. 7416 * @param timeout Time, in seconds, before an IO times out. Zero means infinite. 7417 * @param xri XRI for this exchange. 7418 * @param tag IO tag value. 7419 * @param cq_id The id of the completion queue where the WQE response is sent. 7420 * @param rnode Destination of request (that is, the remote node). 7421 * @param r_ctl R_CTL value for sequence. 7422 * @param type TYPE value for sequence. 7423 * @param df_ctl DF_CTL value for sequence. 7424 * 7425 * @return Returns 0 on success, or a non-zero value on failure. 7426 */ 7427 int32_t 7428 sli_gen_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, 7429 uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout, 7430 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, 7431 uint8_t r_ctl, uint8_t type, uint8_t df_ctl) 7432 { 7433 sli4_gen_request64_wqe_t *gen = buf; 7434 sli4_sge_t *sge = NULL; 7435 7436 ocs_memset(buf, 0, size); 7437 7438 if (!sgl || !sgl->virt) { 7439 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", 7440 sgl, sgl ? sgl->virt : NULL); 7441 return -1; 7442 } 7443 sge = sgl->virt; 7444 7445 if (sli4->config.sgl_pre_registered) { 7446 gen->xbl = FALSE; 7447 7448 gen->dbde = TRUE; 7449 gen->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7450 7451 gen->bde.buffer_length = req_len; 7452 gen->bde.u.data.buffer_address_low = sge[0].buffer_address_low; 7453 gen->bde.u.data.buffer_address_high = sge[0].buffer_address_high; 7454 } else { 7455 gen->xbl = TRUE; 7456 7457 gen->bde.bde_type = SLI4_BDE_TYPE_BLP; 7458 7459 gen->bde.buffer_length = 2 * sizeof(sli4_sge_t); 7460 gen->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys); 7461 gen->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys); 7462 } 7463 7464 gen->request_payload_length = req_len; 7465 gen->max_response_payload_length = max_rsp_len; 7466 7467 gen->df_ctl = df_ctl; 7468 gen->type = type; 7469 gen->r_ctl = r_ctl; 7470 7471 gen->xri_tag = xri; 7472 7473 gen->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 7474 gen->context_tag = rnode->indicator; 7475 7476 gen->class = SLI4_ELS_REQUEST64_CLASS_3; 7477 7478 gen->command = SLI4_WQE_GEN_REQUEST64; 7479 7480 gen->timer = timeout; 7481 7482 gen->request_tag = tag; 7483 7484 gen->iod = SLI4_ELS_REQUEST64_DIR_READ; 7485 7486 gen->qosd = TRUE; 7487 7488 if (rnode->node_group) { 7489 gen->hlm = TRUE; 7490 gen->remote_n_port_id = rnode->fc_id & 0x00ffffff; 7491 } 7492 7493 gen->cmd_type = SLI4_CMD_GEN_REQUEST64_WQE; 7494 7495 gen->cq_id = cq_id; 7496 7497 return 0; 7498 } 7499 7500 /** 7501 * @ingroup sli_fc 7502 * @brief Write a SEND_FRAME work queue entry 7503 * 7504 * @param sli4 SLI context. 7505 * @param buf Destination buffer for the WQE. 7506 * @param size Buffer size, in bytes. 7507 * @param sof Start of frame value 7508 * @param eof End of frame value 7509 * @param hdr Pointer to FC header data 7510 * @param payload DMA memory for the payload. 7511 * @param req_len Length of payload. 7512 * @param timeout Time, in seconds, before an IO times out. Zero means infinite. 7513 * @param xri XRI for this exchange. 7514 * @param req_tag IO tag value. 7515 * 7516 * @return Returns 0 on success, or a non-zero value on failure. 7517 */ 7518 int32_t 7519 sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr, 7520 ocs_dma_t *payload, uint32_t req_len, uint8_t timeout, 7521 uint16_t xri, uint16_t req_tag) 7522 { 7523 sli4_send_frame_wqe_t *sf = buf; 7524 7525 ocs_memset(buf, 0, size); 7526 7527 sf->dbde = TRUE; 7528 sf->bde.buffer_length = req_len; 7529 sf->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); 7530 sf->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); 7531 7532 /* Copy FC header */ 7533 sf->fc_header_0_1[0] = hdr[0]; 7534 sf->fc_header_0_1[1] = hdr[1]; 7535 sf->fc_header_2_5[0] = hdr[2]; 7536 sf->fc_header_2_5[1] = hdr[3]; 7537 sf->fc_header_2_5[2] = hdr[4]; 7538 sf->fc_header_2_5[3] = hdr[5]; 7539 7540 sf->frame_length = req_len; 7541 7542 sf->xri_tag = xri; 7543 sf->pu = 0; 7544 sf->context_tag = 0; 7545 7546 sf->ct = 0; 7547 sf->command = SLI4_WQE_SEND_FRAME; 7548 sf->class = SLI4_ELS_REQUEST64_CLASS_3; 7549 sf->timer = timeout; 7550 7551 sf->request_tag = req_tag; 7552 sf->eof = eof; 7553 sf->sof = sof; 7554 7555 sf->qosd = 0; 7556 sf->lenloc = 1; 7557 sf->xc = 0; 7558 7559 sf->xbl = 1; 7560 7561 sf->cmd_type = SLI4_CMD_SEND_FRAME_WQE; 7562 sf->cq_id = 0xffff; 7563 7564 return 0; 7565 } 7566 7567 /** 7568 * @ingroup sli_fc 7569 * @brief Write a XMIT_SEQUENCE64 work queue entry. 7570 * 7571 * This WQE is used to send FC-CT response frames. 7572 * 7573 * @note This API implements a restricted use for this WQE, a TODO: would 7574 * include passing in sequence initiative, and full SGL's 7575 * 7576 * @param sli4 SLI context. 7577 * @param buf Destination buffer for the WQE. 7578 * @param size Buffer size, in bytes. 7579 * @param payload DMA memory for the request. 7580 * @param payload_len Length of request. 7581 * @param timeout Time, in seconds, before an IO times out. Zero means infinite. 7582 * @param ox_id originator exchange ID 7583 * @param xri XRI for this exchange. 7584 * @param tag IO tag value. 7585 * @param rnode Destination of request (that is, the remote node). 7586 * @param r_ctl R_CTL value for sequence. 7587 * @param type TYPE value for sequence. 7588 * @param df_ctl DF_CTL value for sequence. 7589 * 7590 * @return Returns 0 on success, or a non-zero value on failure. 7591 */ 7592 int32_t 7593 sli_xmit_sequence64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload, 7594 uint32_t payload_len, uint8_t timeout, uint16_t ox_id, 7595 uint16_t xri, uint16_t tag, ocs_remote_node_t *rnode, 7596 uint8_t r_ctl, uint8_t type, uint8_t df_ctl) 7597 { 7598 sli4_xmit_sequence64_wqe_t *xmit = buf; 7599 7600 ocs_memset(buf, 0, size); 7601 7602 if ((payload == NULL) || (payload->virt == NULL)) { 7603 ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n", 7604 payload, payload ? payload->virt : NULL); 7605 return -1; 7606 } 7607 7608 if (sli4->config.sgl_pre_registered) { 7609 xmit->dbde = TRUE; 7610 } else { 7611 xmit->xbl = TRUE; 7612 } 7613 7614 xmit->bde.bde_type = SLI4_BDE_TYPE_BDE_64; 7615 xmit->bde.buffer_length = payload_len; 7616 xmit->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); 7617 xmit->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); 7618 xmit->sequence_payload_len = payload_len; 7619 7620 xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff; 7621 7622 xmit->relative_offset = 0; 7623 7624 xmit->si = 0; /* sequence initiative - this matches what is seen from 7625 * FC switches in response to FCGS commands */ 7626 xmit->ft = 0; /* force transmit */ 7627 xmit->xo = 0; /* exchange responder */ 7628 xmit->ls = 1; /* last in seqence */ 7629 xmit->df_ctl = df_ctl; 7630 xmit->type = type; 7631 xmit->r_ctl = r_ctl; 7632 7633 xmit->xri_tag = xri; 7634 xmit->context_tag = rnode->indicator; 7635 7636 xmit->dif = 0; 7637 xmit->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 7638 xmit->bs = 0; 7639 7640 xmit->command = SLI4_WQE_XMIT_SEQUENCE64; 7641 xmit->class = SLI4_ELS_REQUEST64_CLASS_3; 7642 xmit->pu = 0; 7643 xmit->timer = timeout; 7644 7645 xmit->abort_tag = 0; 7646 xmit->request_tag = tag; 7647 xmit->remote_xid = ox_id; 7648 7649 xmit->iod = SLI4_ELS_REQUEST64_DIR_READ; 7650 7651 if (rnode->node_group) { 7652 xmit->hlm = TRUE; 7653 xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff; 7654 } 7655 7656 xmit->cmd_type = SLI4_CMD_XMIT_SEQUENCE64_WQE; 7657 7658 xmit->len_loc = 2; 7659 7660 xmit->cq_id = 0xFFFF; 7661 7662 return 0; 7663 } 7664 7665 /** 7666 * @ingroup sli_fc 7667 * @brief Write a REQUEUE_XRI_WQE work queue entry. 7668 * 7669 * @param sli4 SLI context. 7670 * @param buf Destination buffer for the WQE. 7671 * @param size Buffer size, in bytes. 7672 * @param xri XRI for this exchange. 7673 * @param tag IO tag value. 7674 * @param cq_id The id of the completion queue where the WQE response is sent. 7675 * 7676 * @return Returns 0 on success, or a non-zero value on failure. 7677 */ 7678 int32_t 7679 sli_requeue_xri_wqe(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint16_t tag, uint16_t cq_id) 7680 { 7681 sli4_requeue_xri_wqe_t *requeue = buf; 7682 7683 ocs_memset(buf, 0, size); 7684 7685 requeue->command = SLI4_WQE_REQUEUE_XRI; 7686 requeue->xri_tag = xri; 7687 requeue->request_tag = tag; 7688 requeue->xc = 1; 7689 requeue->qosd = 1; 7690 requeue->cq_id = cq_id; 7691 requeue->cmd_type = SLI4_CMD_REQUEUE_XRI_WQE; 7692 return 0; 7693 } 7694 7695 int32_t 7696 sli_xmit_bcast64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload, 7697 uint32_t payload_len, uint8_t timeout, uint16_t xri, uint16_t tag, 7698 uint16_t cq_id, ocs_remote_node_t *rnode, 7699 uint8_t r_ctl, uint8_t type, uint8_t df_ctl) 7700 { 7701 sli4_xmit_bcast64_wqe_t *bcast = buf; 7702 7703 /* Command requires a temporary RPI (i.e. unused remote node) */ 7704 if (rnode->attached) { 7705 ocs_log_test(sli4->os, "remote node %d in use\n", rnode->indicator); 7706 return -1; 7707 } 7708 7709 ocs_memset(buf, 0, size); 7710 7711 bcast->dbde = TRUE; 7712 bcast->sequence_payload.bde_type = SLI4_BDE_TYPE_BDE_64; 7713 bcast->sequence_payload.buffer_length = payload_len; 7714 bcast->sequence_payload.u.data.buffer_address_low = ocs_addr32_lo(payload->phys); 7715 bcast->sequence_payload.u.data.buffer_address_high = ocs_addr32_hi(payload->phys); 7716 7717 bcast->sequence_payload_length = payload_len; 7718 7719 bcast->df_ctl = df_ctl; 7720 bcast->type = type; 7721 bcast->r_ctl = r_ctl; 7722 7723 bcast->xri_tag = xri; 7724 7725 bcast->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 7726 bcast->context_tag = rnode->sport->indicator; 7727 7728 bcast->class = SLI4_ELS_REQUEST64_CLASS_3; 7729 7730 bcast->command = SLI4_WQE_XMIT_BCAST64; 7731 7732 bcast->timer = timeout; 7733 7734 bcast->request_tag = tag; 7735 7736 bcast->temporary_rpi = rnode->indicator; 7737 7738 bcast->len_loc = 0x1; 7739 7740 bcast->iod = SLI4_ELS_REQUEST64_DIR_WRITE; 7741 7742 bcast->cmd_type = SLI4_CMD_XMIT_BCAST64_WQE; 7743 7744 bcast->cq_id = cq_id; 7745 7746 return 0; 7747 } 7748 7749 /** 7750 * @ingroup sli_fc 7751 * @brief Write an XMIT_BLS_RSP64_WQE work queue entry. 7752 * 7753 * @param sli4 SLI context. 7754 * @param buf Destination buffer for the WQE. 7755 * @param size Buffer size, in bytes. 7756 * @param payload Contents of the BLS payload to be sent. 7757 * @param xri XRI for this exchange. 7758 * @param tag IO tag value. 7759 * @param cq_id The id of the completion queue where the WQE response is sent. 7760 * @param rnode Destination of request (that is, the remote node). 7761 * @param s_id Source ID to use in the response. If UINT32_MAX, use SLI Port's ID. 7762 * 7763 * @return Returns 0 on success, or a non-zero value on failure. 7764 */ 7765 int32_t 7766 sli_xmit_bls_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, sli_bls_payload_t *payload, 7767 uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint32_t s_id) 7768 { 7769 sli4_xmit_bls_rsp_wqe_t *bls = buf; 7770 7771 /* 7772 * Callers can either specify RPI or S_ID, but not both 7773 */ 7774 if (rnode->attached && (s_id != UINT32_MAX)) { 7775 ocs_log_test(sli4->os, "S_ID specified for attached remote node %d\n", 7776 rnode->indicator); 7777 return -1; 7778 } 7779 7780 ocs_memset(buf, 0, size); 7781 7782 if (SLI_BLS_ACC == payload->type) { 7783 bls->payload_word0 = (payload->u.acc.seq_id_last << 16) | 7784 (payload->u.acc.seq_id_validity << 24); 7785 bls->high_seq_cnt = payload->u.acc.high_seq_cnt; 7786 bls->low_seq_cnt = payload->u.acc.low_seq_cnt; 7787 } else if (SLI_BLS_RJT == payload->type) { 7788 bls->payload_word0 = *((uint32_t *)&payload->u.rjt); 7789 bls->ar = TRUE; 7790 } else { 7791 ocs_log_test(sli4->os, "bad BLS type %#x\n", 7792 payload->type); 7793 return -1; 7794 } 7795 7796 bls->ox_id = payload->ox_id; 7797 bls->rx_id = payload->rx_id; 7798 7799 if (rnode->attached) { 7800 bls->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 7801 bls->context_tag = rnode->indicator; 7802 } else { 7803 bls->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 7804 bls->context_tag = rnode->sport->indicator; 7805 7806 if (UINT32_MAX != s_id) { 7807 bls->local_n_port_id = s_id & 0x00ffffff; 7808 } else { 7809 bls->local_n_port_id = rnode->sport->fc_id & 0x00ffffff; 7810 } 7811 bls->remote_id = rnode->fc_id & 0x00ffffff; 7812 7813 bls->temporary_rpi = rnode->indicator; 7814 } 7815 7816 bls->xri_tag = xri; 7817 7818 bls->class = SLI4_ELS_REQUEST64_CLASS_3; 7819 7820 bls->command = SLI4_WQE_XMIT_BLS_RSP; 7821 7822 bls->request_tag = tag; 7823 7824 bls->qosd = TRUE; 7825 7826 if (rnode->node_group) { 7827 bls->hlm = TRUE; 7828 bls->remote_id = rnode->fc_id & 0x00ffffff; 7829 } 7830 7831 bls->cq_id = cq_id; 7832 7833 bls->cmd_type = SLI4_CMD_XMIT_BLS_RSP64_WQE; 7834 7835 return 0; 7836 } 7837 7838 /** 7839 * @ingroup sli_fc 7840 * @brief Write a XMIT_ELS_RSP64_WQE work queue entry. 7841 * 7842 * @param sli4 SLI context. 7843 * @param buf Destination buffer for the WQE. 7844 * @param size Buffer size, in bytes. 7845 * @param rsp DMA memory for the ELS response. 7846 * @param rsp_len Length of ELS response, in bytes. 7847 * @param xri XRI for this exchange. 7848 * @param tag IO tag value. 7849 * @param cq_id The id of the completion queue where the WQE response is sent. 7850 * @param ox_id OX_ID of the exchange containing the request. 7851 * @param rnode Destination of the ELS response (that is, the remote node). 7852 * @param flags Optional attributes, including: 7853 * - SLI4_IO_CONTINUATION - IO is already active. 7854 * @param s_id S_ID used for special responses. 7855 * 7856 * @return Returns 0 on success, or a non-zero value on failure. 7857 */ 7858 int32_t 7859 sli_xmit_els_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *rsp, 7860 uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id, 7861 uint16_t ox_id, ocs_remote_node_t *rnode, uint32_t flags, uint32_t s_id) 7862 { 7863 sli4_xmit_els_rsp64_wqe_t *els = buf; 7864 7865 ocs_memset(buf, 0, size); 7866 7867 if (sli4->config.sgl_pre_registered) { 7868 els->dbde = TRUE; 7869 } else { 7870 els->xbl = TRUE; 7871 } 7872 7873 els->els_response_payload.bde_type = SLI4_BDE_TYPE_BDE_64; 7874 els->els_response_payload.buffer_length = rsp_len; 7875 els->els_response_payload.u.data.buffer_address_low = ocs_addr32_lo(rsp->phys); 7876 els->els_response_payload.u.data.buffer_address_high = ocs_addr32_hi(rsp->phys); 7877 7878 els->els_response_payload_length = rsp_len; 7879 7880 els->xri_tag = xri; 7881 7882 els->class = SLI4_ELS_REQUEST64_CLASS_3; 7883 7884 els->command = SLI4_WQE_ELS_RSP64; 7885 7886 els->request_tag = tag; 7887 7888 els->ox_id = ox_id; 7889 7890 els->iod = SLI4_ELS_REQUEST64_DIR_WRITE; 7891 7892 els->qosd = TRUE; 7893 7894 if (flags & SLI4_IO_CONTINUATION) { 7895 els->xc = TRUE; 7896 } 7897 7898 if (rnode->attached) { 7899 els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI; 7900 els->context_tag = rnode->indicator; 7901 } else { 7902 els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI; 7903 els->context_tag = rnode->sport->indicator; 7904 els->remote_id = rnode->fc_id & 0x00ffffff; 7905 els->temporary_rpi = rnode->indicator; 7906 if (UINT32_MAX != s_id) { 7907 els->sp = TRUE; 7908 els->s_id = s_id & 0x00ffffff; 7909 } 7910 } 7911 7912 if (rnode->node_group) { 7913 els->hlm = TRUE; 7914 els->remote_id = rnode->fc_id & 0x00ffffff; 7915 } 7916 7917 els->cmd_type = SLI4_ELS_REQUEST64_CMD_GEN; 7918 7919 els->cq_id = cq_id; 7920 7921 return 0; 7922 } 7923 7924 /** 7925 * @ingroup sli_fc 7926 * @brief Process an asynchronous Link State event entry. 7927 * 7928 * @par Description 7929 * Parses Asynchronous Completion Queue Entry (ACQE), 7930 * creates an abstracted event, and calls registered callback functions. 7931 * 7932 * @param sli4 SLI context. 7933 * @param acqe Pointer to the ACQE. 7934 * 7935 * @return Returns 0 on success, or a non-zero value on failure. 7936 */ 7937 int32_t 7938 sli_fc_process_link_state(sli4_t *sli4, void *acqe) 7939 { 7940 sli4_link_state_t *link_state = acqe; 7941 sli4_link_event_t event = { 0 }; 7942 int32_t rc = 0; 7943 7944 if (!sli4->link) { 7945 /* bail if there is no callback */ 7946 return 0; 7947 } 7948 7949 if (SLI4_LINK_TYPE_ETHERNET == link_state->link_type) { 7950 event.topology = SLI_LINK_TOPO_NPORT; 7951 event.medium = SLI_LINK_MEDIUM_ETHERNET; 7952 } else { 7953 /* TODO is this supported for anything other than FCoE? */ 7954 ocs_log_test(sli4->os, "unsupported link type %#x\n", 7955 link_state->link_type); 7956 event.topology = SLI_LINK_TOPO_MAX; 7957 event.medium = SLI_LINK_MEDIUM_MAX; 7958 rc = -1; 7959 } 7960 7961 switch (link_state->port_link_status) { 7962 case SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN: 7963 case SLI4_PORT_LINK_STATUS_LOGICAL_DOWN: 7964 event.status = SLI_LINK_STATUS_DOWN; 7965 break; 7966 case SLI4_PORT_LINK_STATUS_PHYSICAL_UP: 7967 case SLI4_PORT_LINK_STATUS_LOGICAL_UP: 7968 event.status = SLI_LINK_STATUS_UP; 7969 break; 7970 default: 7971 ocs_log_test(sli4->os, "unsupported link status %#x\n", 7972 link_state->port_link_status); 7973 event.status = SLI_LINK_STATUS_MAX; 7974 rc = -1; 7975 } 7976 7977 switch (link_state->port_speed) { 7978 case 0: 7979 event.speed = 0; 7980 break; 7981 case 1: 7982 event.speed = 10; 7983 break; 7984 case 2: 7985 event.speed = 100; 7986 break; 7987 case 3: 7988 event.speed = 1000; 7989 break; 7990 case 4: 7991 event.speed = 10000; 7992 break; 7993 case 5: 7994 event.speed = 20000; 7995 break; 7996 case 6: 7997 event.speed = 25000; 7998 break; 7999 case 7: 8000 event.speed = 40000; 8001 break; 8002 case 8: 8003 event.speed = 100000; 8004 break; 8005 default: 8006 ocs_log_test(sli4->os, "unsupported port_speed %#x\n", 8007 link_state->port_speed); 8008 rc = -1; 8009 } 8010 8011 sli4->link(sli4->link_arg, (void *)&event); 8012 8013 return rc; 8014 } 8015 8016 /** 8017 * @ingroup sli_fc 8018 * @brief Process an asynchronous Link Attention event entry. 8019 * 8020 * @par Description 8021 * Parses Asynchronous Completion Queue Entry (ACQE), 8022 * creates an abstracted event, and calls the registered callback functions. 8023 * 8024 * @param sli4 SLI context. 8025 * @param acqe Pointer to the ACQE. 8026 * 8027 * @todo XXX all events return LINK_UP. 8028 * 8029 * @return Returns 0 on success, or a non-zero value on failure. 8030 */ 8031 int32_t 8032 sli_fc_process_link_attention(sli4_t *sli4, void *acqe) 8033 { 8034 sli4_link_attention_t *link_attn = acqe; 8035 sli4_link_event_t event = { 0 }; 8036 8037 ocs_log_debug(sli4->os, "link_number=%d attn_type=%#x topology=%#x port_speed=%#x " 8038 "port_fault=%#x shared_link_status=%#x logical_link_speed=%#x " 8039 "event_tag=%#x\n", link_attn->link_number, link_attn->attn_type, 8040 link_attn->topology, link_attn->port_speed, link_attn->port_fault, 8041 link_attn->shared_link_status, link_attn->logical_link_speed, 8042 link_attn->event_tag); 8043 8044 if (!sli4->link) { 8045 return 0; 8046 } 8047 8048 event.medium = SLI_LINK_MEDIUM_FC; 8049 8050 switch (link_attn->attn_type) { 8051 case SLI4_LINK_ATTN_TYPE_LINK_UP: 8052 event.status = SLI_LINK_STATUS_UP; 8053 break; 8054 case SLI4_LINK_ATTN_TYPE_LINK_DOWN: 8055 event.status = SLI_LINK_STATUS_DOWN; 8056 break; 8057 case SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA: 8058 ocs_log_debug(sli4->os, "attn_type: no hard alpa\n"); 8059 event.status = SLI_LINK_STATUS_NO_ALPA; 8060 break; 8061 default: 8062 ocs_log_test(sli4->os, "attn_type: unknown\n"); 8063 break; 8064 } 8065 8066 switch (link_attn->event_type) { 8067 case SLI4_FC_EVENT_LINK_ATTENTION: 8068 break; 8069 case SLI4_FC_EVENT_SHARED_LINK_ATTENTION: 8070 ocs_log_debug(sli4->os, "event_type: FC shared link event \n"); 8071 break; 8072 default: 8073 ocs_log_test(sli4->os, "event_type: unknown\n"); 8074 break; 8075 } 8076 8077 switch (link_attn->topology) { 8078 case SLI4_LINK_ATTN_P2P: 8079 event.topology = SLI_LINK_TOPO_NPORT; 8080 break; 8081 case SLI4_LINK_ATTN_FC_AL: 8082 event.topology = SLI_LINK_TOPO_LOOP; 8083 break; 8084 case SLI4_LINK_ATTN_INTERNAL_LOOPBACK: 8085 ocs_log_debug(sli4->os, "topology Internal loopback\n"); 8086 event.topology = SLI_LINK_TOPO_LOOPBACK_INTERNAL; 8087 break; 8088 case SLI4_LINK_ATTN_SERDES_LOOPBACK: 8089 ocs_log_debug(sli4->os, "topology serdes loopback\n"); 8090 event.topology = SLI_LINK_TOPO_LOOPBACK_EXTERNAL; 8091 break; 8092 default: 8093 ocs_log_test(sli4->os, "topology: unknown\n"); 8094 break; 8095 } 8096 8097 event.speed = link_attn->port_speed * 1000; 8098 8099 sli4->link(sli4->link_arg, (void *)&event); 8100 8101 return 0; 8102 } 8103 8104 /** 8105 * @ingroup sli_fc 8106 * @brief Parse an FC/FCoE work queue CQ entry. 8107 * 8108 * @param sli4 SLI context. 8109 * @param cq CQ to process. 8110 * @param cqe Pointer to the CQ entry. 8111 * @param etype CQ event type. 8112 * @param r_id Resource ID associated with this completion message (such as the IO tag). 8113 * 8114 * @return Returns 0 on success, or a non-zero value on failure. 8115 */ 8116 int32_t 8117 sli_fc_cqe_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype, 8118 uint16_t *r_id) 8119 { 8120 uint8_t code = cqe[SLI4_CQE_CODE_OFFSET]; 8121 int32_t rc = -1; 8122 8123 switch (code) { 8124 case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION: 8125 { 8126 sli4_fc_wcqe_t *wcqe = (void *)cqe; 8127 8128 *etype = SLI_QENTRY_WQ; 8129 *r_id = wcqe->request_tag; 8130 rc = wcqe->status; 8131 8132 /* Flag errors except for FCP_RSP_FAILURE */ 8133 if (rc && (rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE)) { 8134 ocs_log_test(sli4->os, "WCQE: status=%#x hw_status=%#x tag=%#x w1=%#x w2=%#x xb=%d\n", 8135 wcqe->status, wcqe->hw_status, 8136 wcqe->request_tag, wcqe->wqe_specific_1, 8137 wcqe->wqe_specific_2, wcqe->xb); 8138 ocs_log_test(sli4->os, " %08X %08X %08X %08X\n", ((uint32_t*) cqe)[0], ((uint32_t*) cqe)[1], 8139 ((uint32_t*) cqe)[2], ((uint32_t*) cqe)[3]); 8140 } 8141 8142 /* TODO: need to pass additional status back out of here as well 8143 * as status (could overload rc as status/addlstatus are only 8 bits each) 8144 */ 8145 8146 break; 8147 } 8148 case SLI4_CQE_CODE_RQ_ASYNC: 8149 { 8150 sli4_fc_async_rcqe_t *rcqe = (void *)cqe; 8151 8152 *etype = SLI_QENTRY_RQ; 8153 *r_id = rcqe->rq_id; 8154 rc = rcqe->status; 8155 break; 8156 } 8157 case SLI4_CQE_CODE_RQ_ASYNC_V1: 8158 { 8159 sli4_fc_async_rcqe_v1_t *rcqe = (void *)cqe; 8160 8161 *etype = SLI_QENTRY_RQ; 8162 *r_id = rcqe->rq_id; 8163 rc = rcqe->status; 8164 break; 8165 } 8166 case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: 8167 { 8168 sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe; 8169 8170 *etype = SLI_QENTRY_OPT_WRITE_CMD; 8171 *r_id = optcqe->rq_id; 8172 rc = optcqe->status; 8173 break; 8174 } 8175 case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA: 8176 { 8177 sli4_fc_optimized_write_data_cqe_t *dcqe = (void *)cqe; 8178 8179 *etype = SLI_QENTRY_OPT_WRITE_DATA; 8180 *r_id = dcqe->xri; 8181 rc = dcqe->status; 8182 8183 /* Flag errors */ 8184 if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) { 8185 ocs_log_test(sli4->os, "Optimized DATA CQE: status=%#x hw_status=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n", 8186 dcqe->status, dcqe->hw_status, 8187 dcqe->xri, dcqe->total_data_placed, 8188 ((uint32_t*) cqe)[3], dcqe->xb); 8189 } 8190 break; 8191 } 8192 case SLI4_CQE_CODE_RQ_COALESCING: 8193 { 8194 sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe; 8195 8196 *etype = SLI_QENTRY_RQ; 8197 *r_id = rcqe->rq_id; 8198 rc = rcqe->status; 8199 break; 8200 } 8201 case SLI4_CQE_CODE_XRI_ABORTED: 8202 { 8203 sli4_fc_xri_aborted_cqe_t *xa = (void *)cqe; 8204 8205 *etype = SLI_QENTRY_XABT; 8206 *r_id = xa->xri; 8207 rc = 0; 8208 break; 8209 } 8210 case SLI4_CQE_CODE_RELEASE_WQE: { 8211 sli4_fc_wqec_t *wqec = (void*) cqe; 8212 8213 *etype = SLI_QENTRY_WQ_RELEASE; 8214 *r_id = wqec->wq_id; 8215 rc = 0; 8216 break; 8217 } 8218 default: 8219 ocs_log_test(sli4->os, "CQE completion code %d not handled\n", code); 8220 *etype = SLI_QENTRY_MAX; 8221 *r_id = UINT16_MAX; 8222 } 8223 8224 return rc; 8225 } 8226 8227 /** 8228 * @ingroup sli_fc 8229 * @brief Return the ELS/CT response length. 8230 * 8231 * @param sli4 SLI context. 8232 * @param cqe Pointer to the CQ entry. 8233 * 8234 * @return Returns the length, in bytes. 8235 */ 8236 uint32_t 8237 sli_fc_response_length(sli4_t *sli4, uint8_t *cqe) 8238 { 8239 sli4_fc_wcqe_t *wcqe = (void *)cqe; 8240 8241 return wcqe->wqe_specific_1; 8242 } 8243 8244 /** 8245 * @ingroup sli_fc 8246 * @brief Return the FCP IO length. 8247 * 8248 * @param sli4 SLI context. 8249 * @param cqe Pointer to the CQ entry. 8250 * 8251 * @return Returns the length, in bytes. 8252 */ 8253 uint32_t 8254 sli_fc_io_length(sli4_t *sli4, uint8_t *cqe) 8255 { 8256 sli4_fc_wcqe_t *wcqe = (void *)cqe; 8257 8258 return wcqe->wqe_specific_1; 8259 } 8260 8261 /** 8262 * @ingroup sli_fc 8263 * @brief Retrieve the D_ID from the completion. 8264 * 8265 * @param sli4 SLI context. 8266 * @param cqe Pointer to the CQ entry. 8267 * @param d_id Pointer where the D_ID is written. 8268 * 8269 * @return Returns 0 on success, or a non-zero value on failure. 8270 */ 8271 int32_t 8272 sli_fc_els_did(sli4_t *sli4, uint8_t *cqe, uint32_t *d_id) 8273 { 8274 sli4_fc_wcqe_t *wcqe = (void *)cqe; 8275 8276 *d_id = 0; 8277 8278 if (wcqe->status) { 8279 return -1; 8280 } else { 8281 *d_id = wcqe->wqe_specific_2 & 0x00ffffff; 8282 return 0; 8283 } 8284 } 8285 8286 uint32_t 8287 sli_fc_ext_status(sli4_t *sli4, uint8_t *cqe) 8288 { 8289 sli4_fc_wcqe_t *wcqe = (void *)cqe; 8290 uint32_t mask; 8291 8292 switch (wcqe->status) { 8293 case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: 8294 mask = UINT32_MAX; 8295 break; 8296 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: 8297 case SLI4_FC_WCQE_STATUS_CMD_REJECT: 8298 mask = 0xff; 8299 break; 8300 case SLI4_FC_WCQE_STATUS_NPORT_RJT: 8301 case SLI4_FC_WCQE_STATUS_FABRIC_RJT: 8302 case SLI4_FC_WCQE_STATUS_NPORT_BSY: 8303 case SLI4_FC_WCQE_STATUS_FABRIC_BSY: 8304 case SLI4_FC_WCQE_STATUS_LS_RJT: 8305 mask = UINT32_MAX; 8306 break; 8307 case SLI4_FC_WCQE_STATUS_DI_ERROR: 8308 mask = UINT32_MAX; 8309 break; 8310 default: 8311 mask = 0; 8312 } 8313 8314 return wcqe->wqe_specific_2 & mask; 8315 } 8316 8317 /** 8318 * @ingroup sli_fc 8319 * @brief Retrieve the RQ index from the completion. 8320 * 8321 * @param sli4 SLI context. 8322 * @param cqe Pointer to the CQ entry. 8323 * @param rq_id Pointer where the rq_id is written. 8324 * @param index Pointer where the index is written. 8325 * 8326 * @return Returns 0 on success, or a non-zero value on failure. 8327 */ 8328 int32_t 8329 sli_fc_rqe_rqid_and_index(sli4_t *sli4, uint8_t *cqe, uint16_t *rq_id, uint32_t *index) 8330 { 8331 sli4_fc_async_rcqe_t *rcqe = (void *)cqe; 8332 sli4_fc_async_rcqe_v1_t *rcqe_v1 = (void *)cqe; 8333 int32_t rc = -1; 8334 uint8_t code = 0; 8335 8336 *rq_id = 0; 8337 *index = UINT32_MAX; 8338 8339 code = cqe[SLI4_CQE_CODE_OFFSET]; 8340 8341 if (code == SLI4_CQE_CODE_RQ_ASYNC) { 8342 *rq_id = rcqe->rq_id; 8343 if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) { 8344 *index = rcqe->rq_element_index; 8345 rc = 0; 8346 } else { 8347 *index = rcqe->rq_element_index; 8348 rc = rcqe->status; 8349 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", 8350 rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id, 8351 rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte, 8352 rcqe->eof_byte, rcqe->header_data_placement_length); 8353 } 8354 } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) { 8355 *rq_id = rcqe_v1->rq_id; 8356 if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe_v1->status) { 8357 *index = rcqe_v1->rq_element_index; 8358 rc = 0; 8359 } else { 8360 *index = rcqe_v1->rq_element_index; 8361 rc = rcqe_v1->status; 8362 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", 8363 rcqe_v1->status, sli_fc_get_status_string(rcqe_v1->status), 8364 rcqe_v1->rq_id, rcqe_v1->rq_element_index, 8365 rcqe_v1->payload_data_placement_length, rcqe_v1->sof_byte, 8366 rcqe_v1->eof_byte, rcqe_v1->header_data_placement_length); 8367 } 8368 } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) { 8369 sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe; 8370 8371 *rq_id = optcqe->rq_id; 8372 if (SLI4_FC_ASYNC_RQ_SUCCESS == optcqe->status) { 8373 *index = optcqe->rq_element_index; 8374 rc = 0; 8375 } else { 8376 *index = optcqe->rq_element_index; 8377 rc = optcqe->status; 8378 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x hdpl=%x oox=%d agxr=%d xri=0x%x rpi=0x%x\n", 8379 optcqe->status, sli_fc_get_status_string(optcqe->status), optcqe->rq_id, 8380 optcqe->rq_element_index, optcqe->payload_data_placement_length, 8381 optcqe->header_data_placement_length, optcqe->oox, optcqe->agxr, optcqe->xri, 8382 optcqe->rpi); 8383 } 8384 } else if (code == SLI4_CQE_CODE_RQ_COALESCING) { 8385 sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe; 8386 8387 *rq_id = rcqe->rq_id; 8388 if (SLI4_FC_COALESCE_RQ_SUCCESS == rcqe->status) { 8389 *index = rcqe->rq_element_index; 8390 rc = 0; 8391 } else { 8392 *index = UINT32_MAX; 8393 rc = rcqe->status; 8394 8395 ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x rq_id=%#x sdpl=%x\n", 8396 rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id, 8397 rcqe->rq_element_index, rcqe->rq_id, rcqe->sequence_reporting_placement_length); 8398 } 8399 } else { 8400 *index = UINT32_MAX; 8401 8402 rc = rcqe->status; 8403 8404 ocs_log_debug(sli4->os, "status=%02x rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n", 8405 rcqe->status, rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length, 8406 rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length); 8407 } 8408 8409 return rc; 8410 } 8411 8412 /** 8413 * @ingroup sli_fc 8414 * @brief Process an asynchronous FCoE event entry. 8415 * 8416 * @par Description 8417 * Parses Asynchronous Completion Queue Entry (ACQE), 8418 * creates an abstracted event, and calls the registered callback functions. 8419 * 8420 * @param sli4 SLI context. 8421 * @param acqe Pointer to the ACQE. 8422 * 8423 * @return Returns 0 on success, or a non-zero value on failure. 8424 */ 8425 int32_t 8426 sli_fc_process_fcoe(sli4_t *sli4, void *acqe) 8427 { 8428 sli4_fcoe_fip_t *fcoe = acqe; 8429 sli4_fip_event_t event = { 0 }; 8430 uint32_t mask = UINT32_MAX; 8431 8432 ocs_log_debug(sli4->os, "ACQE FCoE FIP type=%02x count=%d tag=%#x\n", 8433 fcoe->event_type, 8434 fcoe->fcf_count, 8435 fcoe->event_tag); 8436 8437 if (!sli4->fip) { 8438 return 0; 8439 } 8440 8441 event.type = fcoe->event_type; 8442 event.index = UINT32_MAX; 8443 8444 switch (fcoe->event_type) { 8445 case SLI4_FCOE_FIP_FCF_DISCOVERED: 8446 ocs_log_debug(sli4->os, "FCF Discovered index=%d\n", fcoe->event_information); 8447 break; 8448 case SLI4_FCOE_FIP_FCF_TABLE_FULL: 8449 ocs_log_debug(sli4->os, "FCF Table Full\n"); 8450 mask = 0; 8451 break; 8452 case SLI4_FCOE_FIP_FCF_DEAD: 8453 ocs_log_debug(sli4->os, "FCF Dead/Gone index=%d\n", fcoe->event_information); 8454 break; 8455 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK: 8456 mask = UINT16_MAX; 8457 ocs_log_debug(sli4->os, "Clear VLINK Received VPI=%#x\n", fcoe->event_information & mask); 8458 break; 8459 case SLI4_FCOE_FIP_FCF_MODIFIED: 8460 ocs_log_debug(sli4->os, "FCF Modified\n"); 8461 break; 8462 default: 8463 ocs_log_test(sli4->os, "bad FCoE type %#x", fcoe->event_type); 8464 mask = 0; 8465 } 8466 8467 if (mask != 0) { 8468 event.index = fcoe->event_information & mask; 8469 } 8470 8471 sli4->fip(sli4->fip_arg, &event); 8472 8473 return 0; 8474 } 8475 8476 /** 8477 * @ingroup sli_fc 8478 * @brief Allocate a receive queue. 8479 * 8480 * @par Description 8481 * Allocates DMA memory and configures the requested queue type. 8482 * 8483 * @param sli4 SLI context. 8484 * @param q Pointer to the queue object for the header. 8485 * @param n_entries Number of entries to allocate. 8486 * @param buffer_size buffer size for the queue. 8487 * @param cq Associated CQ. 8488 * @param ulp The ULP to bind 8489 * @param is_hdr Used to validate the rq_id and set the type of queue 8490 * 8491 * @return Returns 0 on success, or -1 on failure. 8492 */ 8493 int32_t 8494 sli_fc_rq_alloc(sli4_t *sli4, sli4_queue_t *q, 8495 uint32_t n_entries, uint32_t buffer_size, 8496 sli4_queue_t *cq, uint16_t ulp, uint8_t is_hdr) 8497 { 8498 int32_t (*rq_create)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t); 8499 8500 if ((sli4 == NULL) || (q == NULL)) { 8501 void *os = sli4 != NULL ? sli4->os : NULL; 8502 8503 ocs_log_err(os, "bad parameter sli4=%p q=%p\n", sli4, q); 8504 return -1; 8505 } 8506 8507 if (__sli_queue_init(sli4, q, SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE, 8508 n_entries, SLI_PAGE_SIZE)) { 8509 return -1; 8510 } 8511 8512 if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) { 8513 rq_create = sli_cmd_fcoe_rq_create; 8514 } else { 8515 rq_create = sli_cmd_fcoe_rq_create_v1; 8516 } 8517 8518 if (rq_create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, 8519 cq->id, ulp, buffer_size)) { 8520 if (__sli_create_queue(sli4, q)) { 8521 ocs_dma_free(sli4->os, &q->dma); 8522 return -1; 8523 } 8524 if (is_hdr && q->id & 1) { 8525 ocs_log_test(sli4->os, "bad header RQ_ID %d\n", q->id); 8526 ocs_dma_free(sli4->os, &q->dma); 8527 return -1; 8528 } else if (!is_hdr && (q->id & 1) == 0) { 8529 ocs_log_test(sli4->os, "bad data RQ_ID %d\n", q->id); 8530 ocs_dma_free(sli4->os, &q->dma); 8531 return -1; 8532 } 8533 } else { 8534 return -1; 8535 } 8536 q->u.flag.is_hdr = is_hdr; 8537 if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) { 8538 q->u.flag.rq_batch = TRUE; 8539 } 8540 return 0; 8541 } 8542 8543 /** 8544 * @ingroup sli_fc 8545 * @brief Allocate a receive queue set. 8546 * 8547 * @param sli4 SLI context. 8548 * @param num_rq_pairs to create 8549 * @param qs Pointers to the queue objects for both header and data. 8550 * Length of this arrays should be 2 * num_rq_pairs 8551 * @param base_cq_id. Assumes base_cq_id : (base_cq_id + num_rq_pairs) cqs as allotted. 8552 * @param n_entries number of entries in each RQ queue. 8553 * @param header_buffer_size 8554 * @param payload_buffer_size 8555 * @param ulp The ULP to bind 8556 * 8557 * @return Returns 0 on success, or -1 on failure. 8558 */ 8559 int32_t 8560 sli_fc_rq_set_alloc(sli4_t *sli4, uint32_t num_rq_pairs, 8561 sli4_queue_t *qs[], uint32_t base_cq_id, 8562 uint32_t n_entries, uint32_t header_buffer_size, 8563 uint32_t payload_buffer_size, uint16_t ulp) 8564 { 8565 uint32_t i, p, offset = 0; 8566 uint32_t payload_size, total_page_count = 0; 8567 uintptr_t addr; 8568 ocs_dma_t dma; 8569 sli4_res_common_create_queue_set_t *rsp = NULL; 8570 sli4_req_fcoe_rq_create_v2_t *req = NULL; 8571 8572 ocs_memset(&dma, 0, sizeof(dma)); 8573 8574 for (i = 0; i < (num_rq_pairs * 2); i++) { 8575 if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE, 8576 n_entries, SLI_PAGE_SIZE)) { 8577 goto error; 8578 } 8579 } 8580 8581 total_page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rq_pairs * 2; 8582 8583 /* Payload length must accommodate both request and response */ 8584 payload_size = max((sizeof(sli4_req_fcoe_rq_create_v1_t) + (8 * total_page_count)), 8585 sizeof(sli4_res_common_create_queue_set_t)); 8586 8587 if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) { 8588 ocs_log_err(sli4->os, "DMA allocation failed\n"); 8589 goto error; 8590 } 8591 ocs_memset(dma.virt, 0, payload_size); 8592 8593 if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, 8594 payload_size, &dma) == -1) { 8595 goto error; 8596 } 8597 req = (sli4_req_fcoe_rq_create_v2_t *)((uint8_t *)dma.virt); 8598 8599 /* Fill Header fields */ 8600 req->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE; 8601 req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE; 8602 req->hdr.version = 2; 8603 req->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v2_t) - sizeof(sli4_req_hdr_t) 8604 + (8 * total_page_count); 8605 8606 /* Fill Payload fields */ 8607 req->dnb = TRUE; 8608 req->num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE); 8609 req->rqe_count = qs[0]->dma.size / SLI4_FCOE_RQE_SIZE; 8610 req->rqe_size = SLI4_FCOE_RQE_SIZE_8; 8611 req->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096; 8612 req->rq_count = num_rq_pairs * 2; 8613 req->base_cq_id = base_cq_id; 8614 req->hdr_buffer_size = header_buffer_size; 8615 req->payload_buffer_size = payload_buffer_size; 8616 8617 for (i = 0; i < (num_rq_pairs * 2); i++) { 8618 for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += SLI_PAGE_SIZE) { 8619 req->page_physical_address[offset].low = ocs_addr32_lo(addr); 8620 req->page_physical_address[offset].high = ocs_addr32_hi(addr); 8621 offset++; 8622 } 8623 } 8624 8625 if (sli_bmbx_command(sli4)){ 8626 ocs_log_crit(sli4->os, "bootstrap mailbox write faild RQSet\n"); 8627 goto error; 8628 } 8629 8630 rsp = (void *)((uint8_t *)dma.virt); 8631 if (rsp->hdr.status) { 8632 ocs_log_err(sli4->os, "bad create RQSet status=%#x addl=%#x\n", 8633 rsp->hdr.status, rsp->hdr.additional_status); 8634 goto error; 8635 } else { 8636 for (i = 0; i < (num_rq_pairs * 2); i++) { 8637 qs[i]->id = i + rsp->q_id; 8638 if ((qs[i]->id & 1) == 0) { 8639 qs[i]->u.flag.is_hdr = TRUE; 8640 } else { 8641 qs[i]->u.flag.is_hdr = FALSE; 8642 } 8643 qs[i]->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off; 8644 qs[i]->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset; 8645 } 8646 } 8647 8648 ocs_dma_free(sli4->os, &dma); 8649 8650 return 0; 8651 8652 error: 8653 for (i = 0; i < (num_rq_pairs * 2); i++) { 8654 if (qs[i]->dma.size) { 8655 ocs_dma_free(sli4->os, &qs[i]->dma); 8656 } 8657 } 8658 8659 if (dma.size) { 8660 ocs_dma_free(sli4->os, &dma); 8661 } 8662 8663 return -1; 8664 } 8665 8666 /** 8667 * @ingroup sli_fc 8668 * @brief Get the RPI resource requirements. 8669 * 8670 * @param sli4 SLI context. 8671 * @param n_rpi Number of RPIs desired. 8672 * 8673 * @return Returns the number of bytes needed. This value may be zero. 8674 */ 8675 uint32_t 8676 sli_fc_get_rpi_requirements(sli4_t *sli4, uint32_t n_rpi) 8677 { 8678 uint32_t bytes = 0; 8679 8680 /* Check if header templates needed */ 8681 if (sli4->config.hdr_template_req) { 8682 /* round up to a page */ 8683 bytes = SLI_ROUND_PAGE(n_rpi * SLI4_FCOE_HDR_TEMPLATE_SIZE); 8684 } 8685 8686 return bytes; 8687 } 8688 8689 /** 8690 * @ingroup sli_fc 8691 * @brief Return a text string corresponding to a CQE status value 8692 * 8693 * @param status Status value 8694 * 8695 * @return Returns corresponding string, otherwise "unknown" 8696 */ 8697 const char * 8698 sli_fc_get_status_string(uint32_t status) 8699 { 8700 static struct { 8701 uint32_t code; 8702 const char *label; 8703 } lookup[] = { 8704 {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"}, 8705 {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"}, 8706 {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"}, 8707 {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"}, 8708 {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"}, 8709 {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"}, 8710 {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"}, 8711 {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"}, 8712 {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"}, 8713 {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"}, 8714 {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"}, 8715 {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"}, 8716 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, "RQ_INSUFF_BUF_NEEDED"}, 8717 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"}, 8718 {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"}, 8719 {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"}, 8720 {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"}, 8721 {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"}, 8722 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, "RQ_INSUFF_XRI_NEEDED"}, 8723 {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"}, 8724 {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"}, 8725 {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"}, 8726 }; 8727 uint32_t i; 8728 8729 for (i = 0; i < ARRAY_SIZE(lookup); i++) { 8730 if (status == lookup[i].code) { 8731 return lookup[i].label; 8732 } 8733 } 8734 return "unknown"; 8735 } 8736