1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/pci_regs.h> 37 #include <linux/firmware.h> 38 #include <linux/stddef.h> 39 #include <linux/delay.h> 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <linux/jiffies.h> 43 #include <linux/kernel.h> 44 #include <linux/log2.h> 45 46 #include "csio_hw.h" 47 #include "csio_lnode.h" 48 #include "csio_rnode.h" 49 50 int csio_force_master; 51 int csio_dbg_level = 0xFEFF; 52 unsigned int csio_port_mask = 0xf; 53 54 /* Default FW event queue entries. */ 55 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; 56 57 /* Default MSI param level */ 58 int csio_msi = 2; 59 60 /* FCoE function instances */ 61 static int dev_num; 62 63 /* FCoE Adapter types & its description */ 64 static const struct csio_adap_desc csio_t4_fcoe_adapters[] = { 65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, 66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, 67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, 68 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"}, 69 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"}, 70 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"}, 71 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"}, 72 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"}, 73 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"}, 74 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"}, 75 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"}, 76 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"}, 77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, 78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, 79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, 80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"}, 82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"}, 83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"}, 84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"}, 85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"}, 86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"}, 87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"}, 88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"} 89 }; 90 91 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { 92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, 93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, 94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"}, 95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, 96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, 97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, 98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, 99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, 100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, 101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, 102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, 103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, 104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, 105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, 106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, 107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, 108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, 109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, 110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, 111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"} 112 }; 113 114 static void csio_mgmtm_cleanup(struct csio_mgmtm *); 115 static void csio_hw_mbm_cleanup(struct csio_hw *); 116 117 /* State machine forward declarations */ 118 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); 119 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); 120 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); 121 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); 122 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); 123 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); 124 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); 125 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); 126 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); 127 128 static void csio_hw_initialize(struct csio_hw *hw); 129 static void csio_evtq_stop(struct csio_hw *hw); 130 static void csio_evtq_start(struct csio_hw *hw); 131 132 int csio_is_hw_ready(struct csio_hw *hw) 133 { 134 return csio_match_state(hw, csio_hws_ready); 135 } 136 137 int csio_is_hw_removing(struct csio_hw *hw) 138 { 139 return csio_match_state(hw, csio_hws_removing); 140 } 141 142 143 /* 144 * csio_hw_wait_op_done_val - wait until an operation is completed 145 * @hw: the HW module 146 * @reg: the register to check for completion 147 * @mask: a single-bit field within @reg that indicates completion 148 * @polarity: the value of the field when the operation is completed 149 * @attempts: number of check iterations 150 * @delay: delay in usecs between iterations 151 * @valp: where to store the value of the register at completion time 152 * 153 * Wait until an operation is completed by checking a bit in a register 154 * up to @attempts times. If @valp is not NULL the value of the register 155 * at the time it indicated completion is stored there. Returns 0 if the 156 * operation completes and -EAGAIN otherwise. 157 */ 158 int 159 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 160 int polarity, int attempts, int delay, uint32_t *valp) 161 { 162 uint32_t val; 163 while (1) { 164 val = csio_rd_reg32(hw, reg); 165 166 if (!!(val & mask) == polarity) { 167 if (valp) 168 *valp = val; 169 return 0; 170 } 171 172 if (--attempts == 0) 173 return -EAGAIN; 174 if (delay) 175 udelay(delay); 176 } 177 } 178 179 /* 180 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register 181 * @hw: the adapter 182 * @addr: the indirect TP register address 183 * @mask: specifies the field within the register to modify 184 * @val: new value for the field 185 * 186 * Sets a field of an indirect TP register to the given value. 187 */ 188 void 189 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, 190 unsigned int mask, unsigned int val) 191 { 192 csio_wr_reg32(hw, addr, TP_PIO_ADDR); 193 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask; 194 csio_wr_reg32(hw, val, TP_PIO_DATA); 195 } 196 197 void 198 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 199 uint32_t value) 200 { 201 uint32_t val = csio_rd_reg32(hw, reg) & ~mask; 202 203 csio_wr_reg32(hw, val | value, reg); 204 /* Flush */ 205 csio_rd_reg32(hw, reg); 206 207 } 208 209 static int 210 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 211 { 212 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, 213 addr, len, buf, 0); 214 } 215 216 /* 217 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 218 */ 219 #define EEPROM_MAX_RD_POLL 40 220 #define EEPROM_MAX_WR_POLL 6 221 #define EEPROM_STAT_ADDR 0x7bfc 222 #define VPD_BASE 0x400 223 #define VPD_BASE_OLD 0 224 #define VPD_LEN 1024 225 #define VPD_INFO_FLD_HDR_SIZE 3 226 227 /* 228 * csio_hw_seeprom_read - read a serial EEPROM location 229 * @hw: hw to read 230 * @addr: EEPROM virtual address 231 * @data: where to store the read data 232 * 233 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 234 * VPD capability. Note that this function must be called with a virtual 235 * address. 236 */ 237 static int 238 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) 239 { 240 uint16_t val = 0; 241 int attempts = EEPROM_MAX_RD_POLL; 242 uint32_t base = hw->params.pci.vpd_cap_addr; 243 244 if (addr >= EEPROMVSIZE || (addr & 3)) 245 return -EINVAL; 246 247 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); 248 249 do { 250 udelay(10); 251 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); 252 } while (!(val & PCI_VPD_ADDR_F) && --attempts); 253 254 if (!(val & PCI_VPD_ADDR_F)) { 255 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); 256 return -EINVAL; 257 } 258 259 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); 260 *data = le32_to_cpu(*data); 261 262 return 0; 263 } 264 265 /* 266 * Partial EEPROM Vital Product Data structure. Includes only the ID and 267 * VPD-R sections. 268 */ 269 struct t4_vpd_hdr { 270 u8 id_tag; 271 u8 id_len[2]; 272 u8 id_data[ID_LEN]; 273 u8 vpdr_tag; 274 u8 vpdr_len[2]; 275 }; 276 277 /* 278 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in 279 * the VPD 280 * @v: Pointer to buffered vpd data structure 281 * @kw: The keyword to search for 282 * 283 * Returns the value of the information field keyword or 284 * -EINVAL otherwise. 285 */ 286 static int 287 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 288 { 289 int32_t i; 290 int32_t offset , len; 291 const uint8_t *buf = &v->id_tag; 292 const uint8_t *vpdr_len = &v->vpdr_tag; 293 offset = sizeof(struct t4_vpd_hdr); 294 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); 295 296 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) 297 return -EINVAL; 298 299 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { 300 if (memcmp(buf + i , kw, 2) == 0) { 301 i += VPD_INFO_FLD_HDR_SIZE; 302 return i; 303 } 304 305 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 306 } 307 308 return -EINVAL; 309 } 310 311 static int 312 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) 313 { 314 *pos = pci_find_capability(pdev, cap); 315 if (*pos) 316 return 0; 317 318 return -1; 319 } 320 321 /* 322 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM 323 * @hw: HW module 324 * @p: where to store the parameters 325 * 326 * Reads card parameters stored in VPD EEPROM. 327 */ 328 static int 329 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) 330 { 331 int i, ret, ec, sn, addr; 332 uint8_t *vpd, csum; 333 const struct t4_vpd_hdr *v; 334 /* To get around compilation warning from strstrip */ 335 char *s; 336 337 if (csio_is_valid_vpd(hw)) 338 return 0; 339 340 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, 341 &hw->params.pci.vpd_cap_addr); 342 if (ret) 343 return -EINVAL; 344 345 vpd = kzalloc(VPD_LEN, GFP_ATOMIC); 346 if (vpd == NULL) 347 return -ENOMEM; 348 349 /* 350 * Card information normally starts at VPD_BASE but early cards had 351 * it at 0. 352 */ 353 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); 354 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 355 356 for (i = 0; i < VPD_LEN; i += 4) { 357 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); 358 if (ret) { 359 kfree(vpd); 360 return ret; 361 } 362 } 363 364 /* Reset the VPD flag! */ 365 hw->flags &= (~CSIO_HWF_VPD_VALID); 366 367 v = (const struct t4_vpd_hdr *)vpd; 368 369 #define FIND_VPD_KW(var, name) do { \ 370 var = csio_hw_get_vpd_keyword_val(v, name); \ 371 if (var < 0) { \ 372 csio_err(hw, "missing VPD keyword " name "\n"); \ 373 kfree(vpd); \ 374 return -EINVAL; \ 375 } \ 376 } while (0) 377 378 FIND_VPD_KW(i, "RV"); 379 for (csum = 0; i >= 0; i--) 380 csum += vpd[i]; 381 382 if (csum) { 383 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); 384 kfree(vpd); 385 return -EINVAL; 386 } 387 FIND_VPD_KW(ec, "EC"); 388 FIND_VPD_KW(sn, "SN"); 389 #undef FIND_VPD_KW 390 391 memcpy(p->id, v->id_data, ID_LEN); 392 s = strstrip(p->id); 393 memcpy(p->ec, vpd + ec, EC_LEN); 394 s = strstrip(p->ec); 395 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 396 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 397 s = strstrip(p->sn); 398 399 csio_valid_vpd_copied(hw); 400 401 kfree(vpd); 402 return 0; 403 } 404 405 /* 406 * csio_hw_sf1_read - read data from the serial flash 407 * @hw: the HW module 408 * @byte_cnt: number of bytes to read 409 * @cont: whether another operation will be chained 410 * @lock: whether to lock SF for PL access only 411 * @valp: where to store the read data 412 * 413 * Reads up to 4 bytes of data from the serial flash. The location of 414 * the read needs to be specified prior to calling this by issuing the 415 * appropriate commands to the serial flash. 416 */ 417 static int 418 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, 419 int32_t lock, uint32_t *valp) 420 { 421 int ret; 422 423 if (!byte_cnt || byte_cnt > 4) 424 return -EINVAL; 425 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 426 return -EBUSY; 427 428 cont = cont ? SF_CONT : 0; 429 lock = lock ? SF_LOCK : 0; 430 431 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP); 432 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 433 10, NULL); 434 if (!ret) 435 *valp = csio_rd_reg32(hw, SF_DATA); 436 return ret; 437 } 438 439 /* 440 * csio_hw_sf1_write - write data to the serial flash 441 * @hw: the HW module 442 * @byte_cnt: number of bytes to write 443 * @cont: whether another operation will be chained 444 * @lock: whether to lock SF for PL access only 445 * @val: value to write 446 * 447 * Writes up to 4 bytes of data to the serial flash. The location of 448 * the write needs to be specified prior to calling this by issuing the 449 * appropriate commands to the serial flash. 450 */ 451 static int 452 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, 453 int32_t lock, uint32_t val) 454 { 455 if (!byte_cnt || byte_cnt > 4) 456 return -EINVAL; 457 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY) 458 return -EBUSY; 459 460 cont = cont ? SF_CONT : 0; 461 lock = lock ? SF_LOCK : 0; 462 463 csio_wr_reg32(hw, val, SF_DATA); 464 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP); 465 466 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 467 10, NULL); 468 } 469 470 /* 471 * csio_hw_flash_wait_op - wait for a flash operation to complete 472 * @hw: the HW module 473 * @attempts: max number of polls of the status register 474 * @delay: delay between polls in ms 475 * 476 * Wait for a flash operation to complete by polling the status register. 477 */ 478 static int 479 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) 480 { 481 int ret; 482 uint32_t status; 483 484 while (1) { 485 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); 486 if (ret != 0) 487 return ret; 488 489 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); 490 if (ret != 0) 491 return ret; 492 493 if (!(status & 1)) 494 return 0; 495 if (--attempts == 0) 496 return -EAGAIN; 497 if (delay) 498 msleep(delay); 499 } 500 } 501 502 /* 503 * csio_hw_read_flash - read words from serial flash 504 * @hw: the HW module 505 * @addr: the start address for the read 506 * @nwords: how many 32-bit words to read 507 * @data: where to store the read data 508 * @byte_oriented: whether to store data as bytes or as words 509 * 510 * Read the specified number of 32-bit words from the serial flash. 511 * If @byte_oriented is set the read data is stored as a byte array 512 * (i.e., big-endian), otherwise as 32-bit words in the platform's 513 * natural endianess. 514 */ 515 static int 516 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, 517 uint32_t *data, int32_t byte_oriented) 518 { 519 int ret; 520 521 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) 522 return -EINVAL; 523 524 addr = swab32(addr) | SF_RD_DATA_FAST; 525 526 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); 527 if (ret != 0) 528 return ret; 529 530 ret = csio_hw_sf1_read(hw, 1, 1, 0, data); 531 if (ret != 0) 532 return ret; 533 534 for ( ; nwords; nwords--, data++) { 535 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); 536 if (nwords == 1) 537 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 538 if (ret) 539 return ret; 540 if (byte_oriented) 541 *data = htonl(*data); 542 } 543 return 0; 544 } 545 546 /* 547 * csio_hw_write_flash - write up to a page of data to the serial flash 548 * @hw: the hw 549 * @addr: the start address to write 550 * @n: length of data to write in bytes 551 * @data: the data to write 552 * 553 * Writes up to a page of data (256 bytes) to the serial flash starting 554 * at the given address. All the data must be written to the same page. 555 */ 556 static int 557 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, 558 uint32_t n, const uint8_t *data) 559 { 560 int ret = -EINVAL; 561 uint32_t buf[64]; 562 uint32_t i, c, left, val, offset = addr & 0xff; 563 564 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) 565 return -EINVAL; 566 567 val = swab32(addr) | SF_PROG_PAGE; 568 569 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 570 if (ret != 0) 571 goto unlock; 572 573 ret = csio_hw_sf1_write(hw, 4, 1, 1, val); 574 if (ret != 0) 575 goto unlock; 576 577 for (left = n; left; left -= c) { 578 c = min(left, 4U); 579 for (val = 0, i = 0; i < c; ++i) 580 val = (val << 8) + *data++; 581 582 ret = csio_hw_sf1_write(hw, c, c != left, 1, val); 583 if (ret) 584 goto unlock; 585 } 586 ret = csio_hw_flash_wait_op(hw, 8, 1); 587 if (ret) 588 goto unlock; 589 590 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 591 592 /* Read the page to verify the write succeeded */ 593 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 594 if (ret) 595 return ret; 596 597 if (memcmp(data - n, (uint8_t *)buf + offset, n)) { 598 csio_err(hw, 599 "failed to correctly write the flash page at %#x\n", 600 addr); 601 return -EINVAL; 602 } 603 604 return 0; 605 606 unlock: 607 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 608 return ret; 609 } 610 611 /* 612 * csio_hw_flash_erase_sectors - erase a range of flash sectors 613 * @hw: the HW module 614 * @start: the first sector to erase 615 * @end: the last sector to erase 616 * 617 * Erases the sectors in the given inclusive range. 618 */ 619 static int 620 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) 621 { 622 int ret = 0; 623 624 while (start <= end) { 625 626 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); 627 if (ret != 0) 628 goto out; 629 630 ret = csio_hw_sf1_write(hw, 4, 0, 1, 631 SF_ERASE_SECTOR | (start << 8)); 632 if (ret != 0) 633 goto out; 634 635 ret = csio_hw_flash_wait_op(hw, 14, 500); 636 if (ret != 0) 637 goto out; 638 639 start++; 640 } 641 out: 642 if (ret) 643 csio_err(hw, "erase of flash sector %d failed, error %d\n", 644 start, ret); 645 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 646 return 0; 647 } 648 649 static void 650 csio_hw_print_fw_version(struct csio_hw *hw, char *str) 651 { 652 csio_info(hw, "%s: %u.%u.%u.%u\n", str, 653 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), 654 FW_HDR_FW_VER_MINOR_GET(hw->fwrev), 655 FW_HDR_FW_VER_MICRO_GET(hw->fwrev), 656 FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); 657 } 658 659 /* 660 * csio_hw_get_fw_version - read the firmware version 661 * @hw: HW module 662 * @vers: where to place the version 663 * 664 * Reads the FW version from flash. 665 */ 666 static int 667 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) 668 { 669 return csio_hw_read_flash(hw, FW_IMG_START + 670 offsetof(struct fw_hdr, fw_ver), 1, 671 vers, 0); 672 } 673 674 /* 675 * csio_hw_get_tp_version - read the TP microcode version 676 * @hw: HW module 677 * @vers: where to place the version 678 * 679 * Reads the TP microcode version from flash. 680 */ 681 static int 682 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) 683 { 684 return csio_hw_read_flash(hw, FLASH_FW_START + 685 offsetof(struct fw_hdr, tp_microcode_ver), 1, 686 vers, 0); 687 } 688 689 /* 690 * csio_hw_check_fw_version - check if the FW is compatible with 691 * this driver 692 * @hw: HW module 693 * 694 * Checks if an adapter's FW is compatible with the driver. Returns 0 695 * if there's exact match, a negative error if the version could not be 696 * read or there's a major/minor version mismatch/minor. 697 */ 698 static int 699 csio_hw_check_fw_version(struct csio_hw *hw) 700 { 701 int ret, major, minor, micro; 702 703 ret = csio_hw_get_fw_version(hw, &hw->fwrev); 704 if (!ret) 705 ret = csio_hw_get_tp_version(hw, &hw->tp_vers); 706 if (ret) 707 return ret; 708 709 major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev); 710 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); 711 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); 712 713 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ 714 csio_err(hw, "card FW has major version %u, driver wants %u\n", 715 major, FW_VERSION_MAJOR(hw)); 716 return -EINVAL; 717 } 718 719 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw)) 720 return 0; /* perfect match */ 721 722 /* Minor/micro version mismatch */ 723 return -EINVAL; 724 } 725 726 /* 727 * csio_hw_fw_dload - download firmware. 728 * @hw: HW module 729 * @fw_data: firmware image to write. 730 * @size: image size 731 * 732 * Write the supplied firmware image to the card's serial flash. 733 */ 734 static int 735 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) 736 { 737 uint32_t csum; 738 int32_t addr; 739 int ret; 740 uint32_t i; 741 uint8_t first_page[SF_PAGE_SIZE]; 742 const __be32 *p = (const __be32 *)fw_data; 743 struct fw_hdr *hdr = (struct fw_hdr *)fw_data; 744 uint32_t sf_sec_size; 745 746 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { 747 csio_err(hw, "Serial Flash data invalid\n"); 748 return -EINVAL; 749 } 750 751 if (!size) { 752 csio_err(hw, "FW image has no data\n"); 753 return -EINVAL; 754 } 755 756 if (size & 511) { 757 csio_err(hw, "FW image size not multiple of 512 bytes\n"); 758 return -EINVAL; 759 } 760 761 if (ntohs(hdr->len512) * 512 != size) { 762 csio_err(hw, "FW image size differs from size in FW header\n"); 763 return -EINVAL; 764 } 765 766 if (size > FW_MAX_SIZE) { 767 csio_err(hw, "FW image too large, max is %u bytes\n", 768 FW_MAX_SIZE); 769 return -EINVAL; 770 } 771 772 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 773 csum += ntohl(p[i]); 774 775 if (csum != 0xffffffff) { 776 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); 777 return -EINVAL; 778 } 779 780 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; 781 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 782 783 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", 784 FW_START_SEC, FW_START_SEC + i - 1); 785 786 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC, 787 FW_START_SEC + i - 1); 788 if (ret) { 789 csio_err(hw, "Flash Erase failed\n"); 790 goto out; 791 } 792 793 /* 794 * We write the correct version at the end so the driver can see a bad 795 * version if the FW write fails. Start by writing a copy of the 796 * first page with a bad version. 797 */ 798 memcpy(first_page, fw_data, SF_PAGE_SIZE); 799 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 800 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page); 801 if (ret) 802 goto out; 803 804 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", 805 FW_IMG_START, FW_IMG_START + size); 806 807 addr = FW_IMG_START; 808 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 809 addr += SF_PAGE_SIZE; 810 fw_data += SF_PAGE_SIZE; 811 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); 812 if (ret) 813 goto out; 814 } 815 816 ret = csio_hw_write_flash(hw, 817 FW_IMG_START + 818 offsetof(struct fw_hdr, fw_ver), 819 sizeof(hdr->fw_ver), 820 (const uint8_t *)&hdr->fw_ver); 821 822 out: 823 if (ret) 824 csio_err(hw, "firmware download failed, error %d\n", ret); 825 return ret; 826 } 827 828 static int 829 csio_hw_get_flash_params(struct csio_hw *hw) 830 { 831 int ret; 832 uint32_t info = 0; 833 834 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); 835 if (!ret) 836 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); 837 csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */ 838 if (ret != 0) 839 return ret; 840 841 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 842 return -EINVAL; 843 info >>= 16; /* log2 of size */ 844 if (info >= 0x14 && info < 0x18) 845 hw->params.sf_nsec = 1 << (info - 16); 846 else if (info == 0x18) 847 hw->params.sf_nsec = 64; 848 else 849 return -EINVAL; 850 hw->params.sf_size = 1 << info; 851 852 return 0; 853 } 854 855 static void 856 csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) 857 { 858 uint16_t val; 859 int pcie_cap; 860 861 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { 862 pci_read_config_word(hw->pdev, 863 pcie_cap + PCI_EXP_DEVCTL2, &val); 864 val &= 0xfff0; 865 val |= range ; 866 pci_write_config_word(hw->pdev, 867 pcie_cap + PCI_EXP_DEVCTL2, val); 868 } 869 } 870 871 /*****************************************************************************/ 872 /* HW State machine assists */ 873 /*****************************************************************************/ 874 875 static int 876 csio_hw_dev_ready(struct csio_hw *hw) 877 { 878 uint32_t reg; 879 int cnt = 6; 880 881 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) && 882 (--cnt != 0)) 883 mdelay(100); 884 885 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) || 886 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) { 887 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); 888 return -EIO; 889 } 890 891 hw->pfn = SOURCEPF_GET(reg); 892 893 return 0; 894 } 895 896 /* 897 * csio_do_hello - Perform the HELLO FW Mailbox command and process response. 898 * @hw: HW module 899 * @state: Device state 900 * 901 * FW_HELLO_CMD has to be polled for completion. 902 */ 903 static int 904 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) 905 { 906 struct csio_mb *mbp; 907 int rv = 0; 908 enum csio_dev_master master; 909 enum fw_retval retval; 910 uint8_t mpfn; 911 char state_str[16]; 912 int retries = FW_CMD_HELLO_RETRIES; 913 914 memset(state_str, 0, sizeof(state_str)); 915 916 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 917 if (!mbp) { 918 rv = -ENOMEM; 919 CSIO_INC_STATS(hw, n_err_nomem); 920 goto out; 921 } 922 923 master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY; 924 925 retry: 926 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 927 hw->pfn, master, NULL); 928 929 rv = csio_mb_issue(hw, mbp); 930 if (rv) { 931 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); 932 goto out_free_mb; 933 } 934 935 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); 936 if (retval != FW_SUCCESS) { 937 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); 938 rv = -EINVAL; 939 goto out_free_mb; 940 } 941 942 /* Firmware has designated us to be master */ 943 if (hw->pfn == mpfn) { 944 hw->flags |= CSIO_HWF_MASTER; 945 } else if (*state == CSIO_DEV_STATE_UNINIT) { 946 /* 947 * If we're not the Master PF then we need to wait around for 948 * the Master PF Driver to finish setting up the adapter. 949 * 950 * Note that we also do this wait if we're a non-Master-capable 951 * PF and there is no current Master PF; a Master PF may show up 952 * momentarily and we wouldn't want to fail pointlessly. (This 953 * can happen when an OS loads lots of different drivers rapidly 954 * at the same time). In this case, the Master PF returned by 955 * the firmware will be PCIE_FW_MASTER_MASK so the test below 956 * will work ... 957 */ 958 959 int waiting = FW_CMD_HELLO_TIMEOUT; 960 961 /* 962 * Wait for the firmware to either indicate an error or 963 * initialized state. If we see either of these we bail out 964 * and report the issue to the caller. If we exhaust the 965 * "hello timeout" and we haven't exhausted our retries, try 966 * again. Otherwise bail with a timeout error. 967 */ 968 for (;;) { 969 uint32_t pcie_fw; 970 971 spin_unlock_irq(&hw->lock); 972 msleep(50); 973 spin_lock_irq(&hw->lock); 974 waiting -= 50; 975 976 /* 977 * If neither Error nor Initialialized are indicated 978 * by the firmware keep waiting till we exaust our 979 * timeout ... and then retry if we haven't exhausted 980 * our retries ... 981 */ 982 pcie_fw = csio_rd_reg32(hw, PCIE_FW); 983 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { 984 if (waiting <= 0) { 985 if (retries-- > 0) 986 goto retry; 987 988 rv = -ETIMEDOUT; 989 break; 990 } 991 continue; 992 } 993 994 /* 995 * We either have an Error or Initialized condition 996 * report errors preferentially. 997 */ 998 if (state) { 999 if (pcie_fw & PCIE_FW_ERR) { 1000 *state = CSIO_DEV_STATE_ERR; 1001 rv = -ETIMEDOUT; 1002 } else if (pcie_fw & PCIE_FW_INIT) 1003 *state = CSIO_DEV_STATE_INIT; 1004 } 1005 1006 /* 1007 * If we arrived before a Master PF was selected and 1008 * there's not a valid Master PF, grab its identity 1009 * for our caller. 1010 */ 1011 if (mpfn == PCIE_FW_MASTER_MASK && 1012 (pcie_fw & PCIE_FW_MASTER_VLD)) 1013 mpfn = PCIE_FW_MASTER_GET(pcie_fw); 1014 break; 1015 } 1016 hw->flags &= ~CSIO_HWF_MASTER; 1017 } 1018 1019 switch (*state) { 1020 case CSIO_DEV_STATE_UNINIT: 1021 strcpy(state_str, "Initializing"); 1022 break; 1023 case CSIO_DEV_STATE_INIT: 1024 strcpy(state_str, "Initialized"); 1025 break; 1026 case CSIO_DEV_STATE_ERR: 1027 strcpy(state_str, "Error"); 1028 break; 1029 default: 1030 strcpy(state_str, "Unknown"); 1031 break; 1032 } 1033 1034 if (hw->pfn == mpfn) 1035 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", 1036 hw->pfn, state_str); 1037 else 1038 csio_info(hw, 1039 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", 1040 hw->pfn, mpfn, state_str); 1041 1042 out_free_mb: 1043 mempool_free(mbp, hw->mb_mempool); 1044 out: 1045 return rv; 1046 } 1047 1048 /* 1049 * csio_do_bye - Perform the BYE FW Mailbox command and process response. 1050 * @hw: HW module 1051 * 1052 */ 1053 static int 1054 csio_do_bye(struct csio_hw *hw) 1055 { 1056 struct csio_mb *mbp; 1057 enum fw_retval retval; 1058 1059 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1060 if (!mbp) { 1061 CSIO_INC_STATS(hw, n_err_nomem); 1062 return -ENOMEM; 1063 } 1064 1065 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1066 1067 if (csio_mb_issue(hw, mbp)) { 1068 csio_err(hw, "Issue of BYE command failed\n"); 1069 mempool_free(mbp, hw->mb_mempool); 1070 return -EINVAL; 1071 } 1072 1073 retval = csio_mb_fw_retval(mbp); 1074 if (retval != FW_SUCCESS) { 1075 mempool_free(mbp, hw->mb_mempool); 1076 return -EINVAL; 1077 } 1078 1079 mempool_free(mbp, hw->mb_mempool); 1080 1081 return 0; 1082 } 1083 1084 /* 1085 * csio_do_reset- Perform the device reset. 1086 * @hw: HW module 1087 * @fw_rst: FW reset 1088 * 1089 * If fw_rst is set, issues FW reset mbox cmd otherwise 1090 * does PIO reset. 1091 * Performs reset of the function. 1092 */ 1093 static int 1094 csio_do_reset(struct csio_hw *hw, bool fw_rst) 1095 { 1096 struct csio_mb *mbp; 1097 enum fw_retval retval; 1098 1099 if (!fw_rst) { 1100 /* PIO reset */ 1101 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1102 mdelay(2000); 1103 return 0; 1104 } 1105 1106 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1107 if (!mbp) { 1108 CSIO_INC_STATS(hw, n_err_nomem); 1109 return -ENOMEM; 1110 } 1111 1112 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1113 PIORSTMODE | PIORST, 0, NULL); 1114 1115 if (csio_mb_issue(hw, mbp)) { 1116 csio_err(hw, "Issue of RESET command failed.n"); 1117 mempool_free(mbp, hw->mb_mempool); 1118 return -EINVAL; 1119 } 1120 1121 retval = csio_mb_fw_retval(mbp); 1122 if (retval != FW_SUCCESS) { 1123 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); 1124 mempool_free(mbp, hw->mb_mempool); 1125 return -EINVAL; 1126 } 1127 1128 mempool_free(mbp, hw->mb_mempool); 1129 1130 return 0; 1131 } 1132 1133 static int 1134 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) 1135 { 1136 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; 1137 uint16_t caps; 1138 1139 caps = ntohs(rsp->fcoecaps); 1140 1141 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { 1142 csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); 1143 return -EINVAL; 1144 } 1145 1146 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { 1147 csio_err(hw, "No FCoE Control Offload capability\n"); 1148 return -EINVAL; 1149 } 1150 1151 return 0; 1152 } 1153 1154 /* 1155 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET 1156 * @hw: the HW module 1157 * @mbox: mailbox to use for the FW RESET command (if desired) 1158 * @force: force uP into RESET even if FW RESET command fails 1159 * 1160 * Issues a RESET command to firmware (if desired) with a HALT indication 1161 * and then puts the microprocessor into RESET state. The RESET command 1162 * will only be issued if a legitimate mailbox is provided (mbox <= 1163 * PCIE_FW_MASTER_MASK). 1164 * 1165 * This is generally used in order for the host to safely manipulate the 1166 * adapter without fear of conflicting with whatever the firmware might 1167 * be doing. The only way out of this state is to RESTART the firmware 1168 * ... 1169 */ 1170 static int 1171 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) 1172 { 1173 enum fw_retval retval = 0; 1174 1175 /* 1176 * If a legitimate mailbox is provided, issue a RESET command 1177 * with a HALT indication. 1178 */ 1179 if (mbox <= PCIE_FW_MASTER_MASK) { 1180 struct csio_mb *mbp; 1181 1182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1183 if (!mbp) { 1184 CSIO_INC_STATS(hw, n_err_nomem); 1185 return -ENOMEM; 1186 } 1187 1188 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, 1189 PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1), 1190 NULL); 1191 1192 if (csio_mb_issue(hw, mbp)) { 1193 csio_err(hw, "Issue of RESET command failed!\n"); 1194 mempool_free(mbp, hw->mb_mempool); 1195 return -EINVAL; 1196 } 1197 1198 retval = csio_mb_fw_retval(mbp); 1199 mempool_free(mbp, hw->mb_mempool); 1200 } 1201 1202 /* 1203 * Normally we won't complete the operation if the firmware RESET 1204 * command fails but if our caller insists we'll go ahead and put the 1205 * uP into RESET. This can be useful if the firmware is hung or even 1206 * missing ... We'll have to take the risk of putting the uP into 1207 * RESET without the cooperation of firmware in that case. 1208 * 1209 * We also force the firmware's HALT flag to be on in case we bypassed 1210 * the firmware RESET command above or we're dealing with old firmware 1211 * which doesn't have the HALT capability. This will serve as a flag 1212 * for the incoming firmware to know that it's coming out of a HALT 1213 * rather than a RESET ... if it's new enough to understand that ... 1214 */ 1215 if (retval == 0 || force) { 1216 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST); 1217 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT); 1218 } 1219 1220 /* 1221 * And we always return the result of the firmware RESET command 1222 * even when we force the uP into RESET ... 1223 */ 1224 return retval ? -EINVAL : 0; 1225 } 1226 1227 /* 1228 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET 1229 * @hw: the HW module 1230 * @reset: if we want to do a RESET to restart things 1231 * 1232 * Restart firmware previously halted by csio_hw_fw_halt(). On successful 1233 * return the previous PF Master remains as the new PF Master and there 1234 * is no need to issue a new HELLO command, etc. 1235 * 1236 * We do this in two ways: 1237 * 1238 * 1. If we're dealing with newer firmware we'll simply want to take 1239 * the chip's microprocessor out of RESET. This will cause the 1240 * firmware to start up from its start vector. And then we'll loop 1241 * until the firmware indicates it's started again (PCIE_FW.HALT 1242 * reset to 0) or we timeout. 1243 * 1244 * 2. If we're dealing with older firmware then we'll need to RESET 1245 * the chip since older firmware won't recognize the PCIE_FW.HALT 1246 * flag and automatically RESET itself on startup. 1247 */ 1248 static int 1249 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) 1250 { 1251 if (reset) { 1252 /* 1253 * Since we're directing the RESET instead of the firmware 1254 * doing it automatically, we need to clear the PCIE_FW.HALT 1255 * bit. 1256 */ 1257 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0); 1258 1259 /* 1260 * If we've been given a valid mailbox, first try to get the 1261 * firmware to do the RESET. If that works, great and we can 1262 * return success. Otherwise, if we haven't been given a 1263 * valid mailbox or the RESET command failed, fall back to 1264 * hitting the chip with a hammer. 1265 */ 1266 if (mbox <= PCIE_FW_MASTER_MASK) { 1267 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1268 msleep(100); 1269 if (csio_do_reset(hw, true) == 0) 1270 return 0; 1271 } 1272 1273 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 1274 msleep(2000); 1275 } else { 1276 int ms; 1277 1278 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0); 1279 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 1280 if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT)) 1281 return 0; 1282 msleep(100); 1283 ms += 100; 1284 } 1285 return -ETIMEDOUT; 1286 } 1287 return 0; 1288 } 1289 1290 /* 1291 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW 1292 * @hw: the HW module 1293 * @mbox: mailbox to use for the FW RESET command (if desired) 1294 * @fw_data: the firmware image to write 1295 * @size: image size 1296 * @force: force upgrade even if firmware doesn't cooperate 1297 * 1298 * Perform all of the steps necessary for upgrading an adapter's 1299 * firmware image. Normally this requires the cooperation of the 1300 * existing firmware in order to halt all existing activities 1301 * but if an invalid mailbox token is passed in we skip that step 1302 * (though we'll still put the adapter microprocessor into RESET in 1303 * that case). 1304 * 1305 * On successful return the new firmware will have been loaded and 1306 * the adapter will have been fully RESET losing all previous setup 1307 * state. On unsuccessful return the adapter may be completely hosed ... 1308 * positive errno indicates that the adapter is ~probably~ intact, a 1309 * negative errno indicates that things are looking bad ... 1310 */ 1311 static int 1312 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, 1313 const u8 *fw_data, uint32_t size, int32_t force) 1314 { 1315 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 1316 int reset, ret; 1317 1318 ret = csio_hw_fw_halt(hw, mbox, force); 1319 if (ret != 0 && !force) 1320 return ret; 1321 1322 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); 1323 if (ret != 0) 1324 return ret; 1325 1326 /* 1327 * Older versions of the firmware don't understand the new 1328 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 1329 * restart. So for newly loaded older firmware we'll have to do the 1330 * RESET for it so it starts up on a clean slate. We can tell if 1331 * the newly loaded firmware will handle this right by checking 1332 * its header flags to see if it advertises the capability. 1333 */ 1334 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 1335 return csio_hw_fw_restart(hw, mbox, reset); 1336 } 1337 1338 1339 /* 1340 * csio_hw_fw_config_file - setup an adapter via a Configuration File 1341 * @hw: the HW module 1342 * @mbox: mailbox to use for the FW command 1343 * @mtype: the memory type where the Configuration File is located 1344 * @maddr: the memory address where the Configuration File is located 1345 * @finiver: return value for CF [fini] version 1346 * @finicsum: return value for CF [fini] checksum 1347 * @cfcsum: return value for CF computed checksum 1348 * 1349 * Issue a command to get the firmware to process the Configuration 1350 * File located at the specified mtype/maddress. If the Configuration 1351 * File is processed successfully and return value pointers are 1352 * provided, the Configuration File "[fini] section version and 1353 * checksum values will be returned along with the computed checksum. 1354 * It's up to the caller to decide how it wants to respond to the 1355 * checksums not matching but it recommended that a prominant warning 1356 * be emitted in order to help people rapidly identify changed or 1357 * corrupted Configuration Files. 1358 * 1359 * Also note that it's possible to modify things like "niccaps", 1360 * "toecaps",etc. between processing the Configuration File and telling 1361 * the firmware to use the new configuration. Callers which want to 1362 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for 1363 * Configuration Files if they want to do this. 1364 */ 1365 static int 1366 csio_hw_fw_config_file(struct csio_hw *hw, 1367 unsigned int mtype, unsigned int maddr, 1368 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum) 1369 { 1370 struct csio_mb *mbp; 1371 struct fw_caps_config_cmd *caps_cmd; 1372 int rv = -EINVAL; 1373 enum fw_retval ret; 1374 1375 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1376 if (!mbp) { 1377 CSIO_INC_STATS(hw, n_err_nomem); 1378 return -ENOMEM; 1379 } 1380 /* 1381 * Tell the firmware to process the indicated Configuration File. 1382 * If there are no errors and the caller has provided return value 1383 * pointers for the [fini] section version, checksum and computed 1384 * checksum, pass those back to the caller. 1385 */ 1386 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); 1387 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); 1388 caps_cmd->op_to_write = 1389 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1390 FW_CMD_REQUEST | 1391 FW_CMD_READ); 1392 caps_cmd->cfvalid_to_len16 = 1393 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 1394 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1395 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 1396 FW_LEN16(*caps_cmd)); 1397 1398 if (csio_mb_issue(hw, mbp)) { 1399 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1400 goto out; 1401 } 1402 1403 ret = csio_mb_fw_retval(mbp); 1404 if (ret != FW_SUCCESS) { 1405 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1406 goto out; 1407 } 1408 1409 if (finiver) 1410 *finiver = ntohl(caps_cmd->finiver); 1411 if (finicsum) 1412 *finicsum = ntohl(caps_cmd->finicsum); 1413 if (cfcsum) 1414 *cfcsum = ntohl(caps_cmd->cfcsum); 1415 1416 /* Validate device capabilities */ 1417 if (csio_hw_validate_caps(hw, mbp)) { 1418 rv = -ENOENT; 1419 goto out; 1420 } 1421 1422 /* 1423 * And now tell the firmware to use the configuration we just loaded. 1424 */ 1425 caps_cmd->op_to_write = 1426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1427 FW_CMD_REQUEST | 1428 FW_CMD_WRITE); 1429 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); 1430 1431 if (csio_mb_issue(hw, mbp)) { 1432 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n"); 1433 goto out; 1434 } 1435 1436 ret = csio_mb_fw_retval(mbp); 1437 if (ret != FW_SUCCESS) { 1438 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); 1439 goto out; 1440 } 1441 1442 rv = 0; 1443 out: 1444 mempool_free(mbp, hw->mb_mempool); 1445 return rv; 1446 } 1447 1448 /* 1449 * csio_get_device_params - Get device parameters. 1450 * @hw: HW module 1451 * 1452 */ 1453 static int 1454 csio_get_device_params(struct csio_hw *hw) 1455 { 1456 struct csio_wrm *wrm = csio_hw_to_wrm(hw); 1457 struct csio_mb *mbp; 1458 enum fw_retval retval; 1459 u32 param[6]; 1460 int i, j = 0; 1461 1462 /* Initialize portids to -1 */ 1463 for (i = 0; i < CSIO_MAX_PPORTS; i++) 1464 hw->pport[i].portid = -1; 1465 1466 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1467 if (!mbp) { 1468 CSIO_INC_STATS(hw, n_err_nomem); 1469 return -ENOMEM; 1470 } 1471 1472 /* Get port vec information. */ 1473 param[0] = FW_PARAM_DEV(PORTVEC); 1474 1475 /* Get Core clock. */ 1476 param[1] = FW_PARAM_DEV(CCLK); 1477 1478 /* Get EQ id start and end. */ 1479 param[2] = FW_PARAM_PFVF(EQ_START); 1480 param[3] = FW_PARAM_PFVF(EQ_END); 1481 1482 /* Get IQ id start and end. */ 1483 param[4] = FW_PARAM_PFVF(IQFLINT_START); 1484 param[5] = FW_PARAM_PFVF(IQFLINT_END); 1485 1486 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1487 ARRAY_SIZE(param), param, NULL, false, NULL); 1488 if (csio_mb_issue(hw, mbp)) { 1489 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1490 mempool_free(mbp, hw->mb_mempool); 1491 return -EINVAL; 1492 } 1493 1494 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1495 ARRAY_SIZE(param), param); 1496 if (retval != FW_SUCCESS) { 1497 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1498 retval); 1499 mempool_free(mbp, hw->mb_mempool); 1500 return -EINVAL; 1501 } 1502 1503 /* cache the information. */ 1504 hw->port_vec = param[0]; 1505 hw->vpd.cclk = param[1]; 1506 wrm->fw_eq_start = param[2]; 1507 wrm->fw_iq_start = param[4]; 1508 1509 /* Using FW configured max iqs & eqs */ 1510 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || 1511 !csio_is_hw_master(hw)) { 1512 hw->cfg_niq = param[5] - param[4] + 1; 1513 hw->cfg_neq = param[3] - param[2] + 1; 1514 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", 1515 hw->cfg_niq, hw->cfg_neq); 1516 } 1517 1518 hw->port_vec &= csio_port_mask; 1519 1520 hw->num_pports = hweight32(hw->port_vec); 1521 1522 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", 1523 hw->port_vec, hw->num_pports); 1524 1525 for (i = 0; i < hw->num_pports; i++) { 1526 while ((hw->port_vec & (1 << j)) == 0) 1527 j++; 1528 hw->pport[i].portid = j++; 1529 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); 1530 } 1531 mempool_free(mbp, hw->mb_mempool); 1532 1533 return 0; 1534 } 1535 1536 1537 /* 1538 * csio_config_device_caps - Get and set device capabilities. 1539 * @hw: HW module 1540 * 1541 */ 1542 static int 1543 csio_config_device_caps(struct csio_hw *hw) 1544 { 1545 struct csio_mb *mbp; 1546 enum fw_retval retval; 1547 int rv = -EINVAL; 1548 1549 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1550 if (!mbp) { 1551 CSIO_INC_STATS(hw, n_err_nomem); 1552 return -ENOMEM; 1553 } 1554 1555 /* Get device capabilities */ 1556 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); 1557 1558 if (csio_mb_issue(hw, mbp)) { 1559 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); 1560 goto out; 1561 } 1562 1563 retval = csio_mb_fw_retval(mbp); 1564 if (retval != FW_SUCCESS) { 1565 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); 1566 goto out; 1567 } 1568 1569 /* Validate device capabilities */ 1570 if (csio_hw_validate_caps(hw, mbp)) 1571 goto out; 1572 1573 /* Don't config device capabilities if already configured */ 1574 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 1575 rv = 0; 1576 goto out; 1577 } 1578 1579 /* Write back desired device capabilities */ 1580 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, 1581 false, true, NULL); 1582 1583 if (csio_mb_issue(hw, mbp)) { 1584 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); 1585 goto out; 1586 } 1587 1588 retval = csio_mb_fw_retval(mbp); 1589 if (retval != FW_SUCCESS) { 1590 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); 1591 goto out; 1592 } 1593 1594 rv = 0; 1595 out: 1596 mempool_free(mbp, hw->mb_mempool); 1597 return rv; 1598 } 1599 1600 /* 1601 * csio_enable_ports - Bring up all available ports. 1602 * @hw: HW module. 1603 * 1604 */ 1605 static int 1606 csio_enable_ports(struct csio_hw *hw) 1607 { 1608 struct csio_mb *mbp; 1609 enum fw_retval retval; 1610 uint8_t portid; 1611 int i; 1612 1613 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1614 if (!mbp) { 1615 CSIO_INC_STATS(hw, n_err_nomem); 1616 return -ENOMEM; 1617 } 1618 1619 for (i = 0; i < hw->num_pports; i++) { 1620 portid = hw->pport[i].portid; 1621 1622 /* Read PORT information */ 1623 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, 1624 false, 0, 0, NULL); 1625 1626 if (csio_mb_issue(hw, mbp)) { 1627 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", 1628 portid); 1629 mempool_free(mbp, hw->mb_mempool); 1630 return -EINVAL; 1631 } 1632 1633 csio_mb_process_read_port_rsp(hw, mbp, &retval, 1634 &hw->pport[i].pcap); 1635 if (retval != FW_SUCCESS) { 1636 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", 1637 portid, retval); 1638 mempool_free(mbp, hw->mb_mempool); 1639 return -EINVAL; 1640 } 1641 1642 /* Write back PORT information */ 1643 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true, 1644 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL); 1645 1646 if (csio_mb_issue(hw, mbp)) { 1647 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", 1648 portid); 1649 mempool_free(mbp, hw->mb_mempool); 1650 return -EINVAL; 1651 } 1652 1653 retval = csio_mb_fw_retval(mbp); 1654 if (retval != FW_SUCCESS) { 1655 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", 1656 portid, retval); 1657 mempool_free(mbp, hw->mb_mempool); 1658 return -EINVAL; 1659 } 1660 1661 } /* For all ports */ 1662 1663 mempool_free(mbp, hw->mb_mempool); 1664 1665 return 0; 1666 } 1667 1668 /* 1669 * csio_get_fcoe_resinfo - Read fcoe fw resource info. 1670 * @hw: HW module 1671 * Issued with lock held. 1672 */ 1673 static int 1674 csio_get_fcoe_resinfo(struct csio_hw *hw) 1675 { 1676 struct csio_fcoe_res_info *res_info = &hw->fres_info; 1677 struct fw_fcoe_res_info_cmd *rsp; 1678 struct csio_mb *mbp; 1679 enum fw_retval retval; 1680 1681 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1682 if (!mbp) { 1683 CSIO_INC_STATS(hw, n_err_nomem); 1684 return -ENOMEM; 1685 } 1686 1687 /* Get FCoE FW resource information */ 1688 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 1689 1690 if (csio_mb_issue(hw, mbp)) { 1691 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); 1692 mempool_free(mbp, hw->mb_mempool); 1693 return -EINVAL; 1694 } 1695 1696 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); 1697 retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); 1698 if (retval != FW_SUCCESS) { 1699 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", 1700 retval); 1701 mempool_free(mbp, hw->mb_mempool); 1702 return -EINVAL; 1703 } 1704 1705 res_info->e_d_tov = ntohs(rsp->e_d_tov); 1706 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); 1707 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); 1708 res_info->r_r_tov = ntohs(rsp->r_r_tov); 1709 res_info->max_xchgs = ntohl(rsp->max_xchgs); 1710 res_info->max_ssns = ntohl(rsp->max_ssns); 1711 res_info->used_xchgs = ntohl(rsp->used_xchgs); 1712 res_info->used_ssns = ntohl(rsp->used_ssns); 1713 res_info->max_fcfs = ntohl(rsp->max_fcfs); 1714 res_info->max_vnps = ntohl(rsp->max_vnps); 1715 res_info->used_fcfs = ntohl(rsp->used_fcfs); 1716 res_info->used_vnps = ntohl(rsp->used_vnps); 1717 1718 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, 1719 res_info->max_xchgs); 1720 mempool_free(mbp, hw->mb_mempool); 1721 1722 return 0; 1723 } 1724 1725 static int 1726 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) 1727 { 1728 struct csio_mb *mbp; 1729 enum fw_retval retval; 1730 u32 _param[1]; 1731 1732 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 1733 if (!mbp) { 1734 CSIO_INC_STATS(hw, n_err_nomem); 1735 return -ENOMEM; 1736 } 1737 1738 /* 1739 * Find out whether we're dealing with a version of 1740 * the firmware which has configuration file support. 1741 */ 1742 _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1743 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 1744 1745 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, 1746 ARRAY_SIZE(_param), _param, NULL, false, NULL); 1747 if (csio_mb_issue(hw, mbp)) { 1748 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); 1749 mempool_free(mbp, hw->mb_mempool); 1750 return -EINVAL; 1751 } 1752 1753 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1754 ARRAY_SIZE(_param), _param); 1755 if (retval != FW_SUCCESS) { 1756 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", 1757 retval); 1758 mempool_free(mbp, hw->mb_mempool); 1759 return -EINVAL; 1760 } 1761 1762 mempool_free(mbp, hw->mb_mempool); 1763 *param = _param[0]; 1764 1765 return 0; 1766 } 1767 1768 static int 1769 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) 1770 { 1771 int ret = 0; 1772 const struct firmware *cf; 1773 struct pci_dev *pci_dev = hw->pdev; 1774 struct device *dev = &pci_dev->dev; 1775 unsigned int mtype = 0, maddr = 0; 1776 uint32_t *cfg_data; 1777 int value_to_add = 0; 1778 1779 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) { 1780 csio_err(hw, "could not find config file %s, err: %d\n", 1781 CSIO_CF_FNAME(hw), ret); 1782 return -ENOENT; 1783 } 1784 1785 if (cf->size%4 != 0) 1786 value_to_add = 4 - (cf->size % 4); 1787 1788 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 1789 if (cfg_data == NULL) { 1790 ret = -ENOMEM; 1791 goto leave; 1792 } 1793 1794 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 1795 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { 1796 ret = -EINVAL; 1797 goto leave; 1798 } 1799 1800 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 1801 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 1802 1803 ret = csio_memory_write(hw, mtype, maddr, 1804 cf->size + value_to_add, cfg_data); 1805 1806 if ((ret == 0) && (value_to_add != 0)) { 1807 union { 1808 u32 word; 1809 char buf[4]; 1810 } last; 1811 size_t size = cf->size & ~0x3; 1812 int i; 1813 1814 last.word = cfg_data[size >> 2]; 1815 for (i = value_to_add; i < 4; i++) 1816 last.buf[i] = 0; 1817 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); 1818 } 1819 if (ret == 0) { 1820 csio_info(hw, "config file upgraded to %s\n", 1821 CSIO_CF_FNAME(hw)); 1822 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw)); 1823 } 1824 1825 leave: 1826 kfree(cfg_data); 1827 release_firmware(cf); 1828 return ret; 1829 } 1830 1831 /* 1832 * HW initialization: contact FW, obtain config, perform basic init. 1833 * 1834 * If the firmware we're dealing with has Configuration File support, then 1835 * we use that to perform all configuration -- either using the configuration 1836 * file stored in flash on the adapter or using a filesystem-local file 1837 * if available. 1838 * 1839 * If we don't have configuration file support in the firmware, then we'll 1840 * have to set things up the old fashioned way with hard-coded register 1841 * writes and firmware commands ... 1842 */ 1843 1844 /* 1845 * Attempt to initialize the HW via a Firmware Configuration File. 1846 */ 1847 static int 1848 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) 1849 { 1850 unsigned int mtype, maddr; 1851 int rv; 1852 uint32_t finiver = 0, finicsum = 0, cfcsum = 0; 1853 int using_flash; 1854 char path[64]; 1855 1856 /* 1857 * Reset device if necessary 1858 */ 1859 if (reset) { 1860 rv = csio_do_reset(hw, true); 1861 if (rv != 0) 1862 goto bye; 1863 } 1864 1865 /* 1866 * If we have a configuration file in host , 1867 * then use that. Otherwise, use the configuration file stored 1868 * in the HW flash ... 1869 */ 1870 spin_unlock_irq(&hw->lock); 1871 rv = csio_hw_flash_config(hw, fw_cfg_param, path); 1872 spin_lock_irq(&hw->lock); 1873 if (rv != 0) { 1874 if (rv == -ENOENT) { 1875 /* 1876 * config file was not found. Use default 1877 * config file from flash. 1878 */ 1879 mtype = FW_MEMTYPE_CF_FLASH; 1880 maddr = hw->chip_ops->chip_flash_cfg_addr(hw); 1881 using_flash = 1; 1882 } else { 1883 /* 1884 * we revert back to the hardwired config if 1885 * flashing failed. 1886 */ 1887 goto bye; 1888 } 1889 } else { 1890 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 1891 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 1892 using_flash = 0; 1893 } 1894 1895 hw->cfg_store = (uint8_t)mtype; 1896 1897 /* 1898 * Issue a Capability Configuration command to the firmware to get it 1899 * to parse the Configuration File. 1900 */ 1901 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver, 1902 &finicsum, &cfcsum); 1903 if (rv != 0) 1904 goto bye; 1905 1906 hw->cfg_finiver = finiver; 1907 hw->cfg_finicsum = finicsum; 1908 hw->cfg_cfcsum = cfcsum; 1909 hw->cfg_csum_status = true; 1910 1911 if (finicsum != cfcsum) { 1912 csio_warn(hw, 1913 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1914 finicsum, cfcsum); 1915 1916 hw->cfg_csum_status = false; 1917 } 1918 1919 /* 1920 * Note that we're operating with parameters 1921 * not supplied by the driver, rather than from hard-wired 1922 * initialization constants buried in the driver. 1923 */ 1924 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 1925 1926 /* device parameters */ 1927 rv = csio_get_device_params(hw); 1928 if (rv != 0) 1929 goto bye; 1930 1931 /* Configure SGE */ 1932 csio_wr_sge_init(hw); 1933 1934 /* 1935 * And finally tell the firmware to initialize itself using the 1936 * parameters from the Configuration File. 1937 */ 1938 /* Post event to notify completion of configuration */ 1939 csio_post_event(&hw->sm, CSIO_HWE_INIT); 1940 1941 csio_info(hw, 1942 "Firmware Configuration File %s, version %#x, computed checksum %#x\n", 1943 (using_flash ? "in device FLASH" : path), finiver, cfcsum); 1944 1945 return 0; 1946 1947 /* 1948 * Something bad happened. Return the error ... 1949 */ 1950 bye: 1951 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; 1952 csio_dbg(hw, "Configuration file error %d\n", rv); 1953 return rv; 1954 } 1955 1956 /* 1957 * Attempt to initialize the adapter via hard-coded, driver supplied 1958 * parameters ... 1959 */ 1960 static int 1961 csio_hw_no_fwconfig(struct csio_hw *hw, int reset) 1962 { 1963 int rv; 1964 /* 1965 * Reset device if necessary 1966 */ 1967 if (reset) { 1968 rv = csio_do_reset(hw, true); 1969 if (rv != 0) 1970 goto out; 1971 } 1972 1973 /* Get and set device capabilities */ 1974 rv = csio_config_device_caps(hw); 1975 if (rv != 0) 1976 goto out; 1977 1978 /* device parameters */ 1979 rv = csio_get_device_params(hw); 1980 if (rv != 0) 1981 goto out; 1982 1983 /* Configure SGE */ 1984 csio_wr_sge_init(hw); 1985 1986 /* Post event to notify completion of configuration */ 1987 csio_post_event(&hw->sm, CSIO_HWE_INIT); 1988 1989 out: 1990 return rv; 1991 } 1992 1993 /* 1994 * Returns -EINVAL if attempts to flash the firmware failed 1995 * else returns 0, 1996 * if flashing was not attempted because the card had the 1997 * latest firmware ECANCELED is returned 1998 */ 1999 static int 2000 csio_hw_flash_fw(struct csio_hw *hw) 2001 { 2002 int ret = -ECANCELED; 2003 const struct firmware *fw; 2004 const struct fw_hdr *hdr; 2005 u32 fw_ver; 2006 struct pci_dev *pci_dev = hw->pdev; 2007 struct device *dev = &pci_dev->dev ; 2008 2009 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) { 2010 csio_err(hw, "could not find firmware image %s, err: %d\n", 2011 CSIO_FW_FNAME(hw), ret); 2012 return -EINVAL; 2013 } 2014 2015 hdr = (const struct fw_hdr *)fw->data; 2016 fw_ver = ntohl(hdr->fw_ver); 2017 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw)) 2018 return -EINVAL; /* wrong major version, won't do */ 2019 2020 /* 2021 * If the flash FW is unusable or we found something newer, load it. 2022 */ 2023 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) || 2024 fw_ver > hw->fwrev) { 2025 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, 2026 /*force=*/false); 2027 if (!ret) 2028 csio_info(hw, 2029 "firmware upgraded to version %pI4 from %s\n", 2030 &hdr->fw_ver, CSIO_FW_FNAME(hw)); 2031 else 2032 csio_err(hw, "firmware upgrade failed! err=%d\n", ret); 2033 } else 2034 ret = -EINVAL; 2035 2036 release_firmware(fw); 2037 2038 return ret; 2039 } 2040 2041 2042 /* 2043 * csio_hw_configure - Configure HW 2044 * @hw - HW module 2045 * 2046 */ 2047 static void 2048 csio_hw_configure(struct csio_hw *hw) 2049 { 2050 int reset = 1; 2051 int rv; 2052 u32 param[1]; 2053 2054 rv = csio_hw_dev_ready(hw); 2055 if (rv != 0) { 2056 CSIO_INC_STATS(hw, n_err_fatal); 2057 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2058 goto out; 2059 } 2060 2061 /* HW version */ 2062 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV); 2063 2064 /* Needed for FW download */ 2065 rv = csio_hw_get_flash_params(hw); 2066 if (rv != 0) { 2067 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); 2068 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2069 goto out; 2070 } 2071 2072 /* Set pci completion timeout value to 4 seconds. */ 2073 csio_set_pcie_completion_timeout(hw, 0xd); 2074 2075 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); 2076 2077 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2078 if (rv != 0) 2079 goto out; 2080 2081 csio_hw_print_fw_version(hw, "Firmware revision"); 2082 2083 rv = csio_do_hello(hw, &hw->fw_state); 2084 if (rv != 0) { 2085 CSIO_INC_STATS(hw, n_err_fatal); 2086 csio_post_event(&hw->sm, CSIO_HWE_FATAL); 2087 goto out; 2088 } 2089 2090 /* Read vpd */ 2091 rv = csio_hw_get_vpd_params(hw, &hw->vpd); 2092 if (rv != 0) 2093 goto out; 2094 2095 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2096 rv = csio_hw_check_fw_version(hw); 2097 if (rv == -EINVAL) { 2098 2099 /* Do firmware update */ 2100 spin_unlock_irq(&hw->lock); 2101 rv = csio_hw_flash_fw(hw); 2102 spin_lock_irq(&hw->lock); 2103 2104 if (rv == 0) { 2105 reset = 0; 2106 /* 2107 * Note that the chip was reset as part of the 2108 * firmware upgrade so we don't reset it again 2109 * below and grab the new firmware version. 2110 */ 2111 rv = csio_hw_check_fw_version(hw); 2112 } 2113 } 2114 /* 2115 * If the firmware doesn't support Configuration 2116 * Files, use the old Driver-based, hard-wired 2117 * initialization. Otherwise, try using the 2118 * Configuration File support and fall back to the 2119 * Driver-based initialization if there's no 2120 * Configuration File found. 2121 */ 2122 if (csio_hw_check_fwconfig(hw, param) == 0) { 2123 rv = csio_hw_use_fwconfig(hw, reset, param); 2124 if (rv == -ENOENT) 2125 goto out; 2126 if (rv != 0) { 2127 csio_info(hw, 2128 "No Configuration File present " 2129 "on adapter. Using hard-wired " 2130 "configuration parameters.\n"); 2131 rv = csio_hw_no_fwconfig(hw, reset); 2132 } 2133 } else { 2134 rv = csio_hw_no_fwconfig(hw, reset); 2135 } 2136 2137 if (rv != 0) 2138 goto out; 2139 2140 } else { 2141 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2142 2143 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; 2144 2145 /* device parameters */ 2146 rv = csio_get_device_params(hw); 2147 if (rv != 0) 2148 goto out; 2149 2150 /* Get device capabilities */ 2151 rv = csio_config_device_caps(hw); 2152 if (rv != 0) 2153 goto out; 2154 2155 /* Configure SGE */ 2156 csio_wr_sge_init(hw); 2157 2158 /* Post event to notify completion of configuration */ 2159 csio_post_event(&hw->sm, CSIO_HWE_INIT); 2160 goto out; 2161 } 2162 } /* if not master */ 2163 2164 out: 2165 return; 2166 } 2167 2168 /* 2169 * csio_hw_initialize - Initialize HW 2170 * @hw - HW module 2171 * 2172 */ 2173 static void 2174 csio_hw_initialize(struct csio_hw *hw) 2175 { 2176 struct csio_mb *mbp; 2177 enum fw_retval retval; 2178 int rv; 2179 int i; 2180 2181 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); 2183 if (!mbp) 2184 goto out; 2185 2186 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); 2187 2188 if (csio_mb_issue(hw, mbp)) { 2189 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); 2190 goto free_and_out; 2191 } 2192 2193 retval = csio_mb_fw_retval(mbp); 2194 if (retval != FW_SUCCESS) { 2195 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", 2196 retval); 2197 goto free_and_out; 2198 } 2199 2200 mempool_free(mbp, hw->mb_mempool); 2201 } 2202 2203 rv = csio_get_fcoe_resinfo(hw); 2204 if (rv != 0) { 2205 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); 2206 goto out; 2207 } 2208 2209 spin_unlock_irq(&hw->lock); 2210 rv = csio_config_queues(hw); 2211 spin_lock_irq(&hw->lock); 2212 2213 if (rv != 0) { 2214 csio_err(hw, "Config of queues failed!: %d\n", rv); 2215 goto out; 2216 } 2217 2218 for (i = 0; i < hw->num_pports; i++) 2219 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; 2220 2221 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { 2222 rv = csio_enable_ports(hw); 2223 if (rv != 0) { 2224 csio_err(hw, "Failed to enable ports: %d\n", rv); 2225 goto out; 2226 } 2227 } 2228 2229 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); 2230 return; 2231 2232 free_and_out: 2233 mempool_free(mbp, hw->mb_mempool); 2234 out: 2235 return; 2236 } 2237 2238 #define PF_INTR_MASK (PFSW | PFCIM) 2239 2240 /* 2241 * csio_hw_intr_enable - Enable HW interrupts 2242 * @hw: Pointer to HW module. 2243 * 2244 * Enable interrupts in HW registers. 2245 */ 2246 static void 2247 csio_hw_intr_enable(struct csio_hw *hw) 2248 { 2249 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); 2250 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2251 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE); 2252 2253 /* 2254 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up 2255 * by FW, so do nothing for INTX. 2256 */ 2257 if (hw->intr_mode == CSIO_IM_MSIX) 2258 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2259 AIVEC(AIVEC_MASK), vec); 2260 else if (hw->intr_mode == CSIO_IM_MSI) 2261 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG), 2262 AIVEC(AIVEC_MASK), 0); 2263 2264 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE)); 2265 2266 /* Turn on MB interrupts - this will internally flush PIO as well */ 2267 csio_mb_intr_enable(hw); 2268 2269 /* These are common registers - only a master can modify them */ 2270 if (csio_is_hw_master(hw)) { 2271 /* 2272 * Disable the Serial FLASH interrupt, if enabled! 2273 */ 2274 pl &= (~SF); 2275 csio_wr_reg32(hw, pl, PL_INT_ENABLE); 2276 2277 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE | 2278 EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC | 2279 ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | 2280 ERR_DATA_CPL_ON_HIGH_QID1 | 2281 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 2282 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 2283 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 2284 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR, 2285 SGE_INT_ENABLE3); 2286 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf); 2287 } 2288 2289 hw->flags |= CSIO_HWF_HW_INTR_ENABLED; 2290 2291 } 2292 2293 /* 2294 * csio_hw_intr_disable - Disable HW interrupts 2295 * @hw: Pointer to HW module. 2296 * 2297 * Turn off Mailbox and PCI_PF_CFG interrupts. 2298 */ 2299 void 2300 csio_hw_intr_disable(struct csio_hw *hw) 2301 { 2302 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI)); 2303 2304 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) 2305 return; 2306 2307 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; 2308 2309 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE)); 2310 if (csio_is_hw_master(hw)) 2311 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0); 2312 2313 /* Turn off MB interrupts */ 2314 csio_mb_intr_disable(hw); 2315 2316 } 2317 2318 void 2319 csio_hw_fatal_err(struct csio_hw *hw) 2320 { 2321 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); 2322 csio_hw_intr_disable(hw); 2323 2324 /* Do not reset HW, we may need FW state for debugging */ 2325 csio_fatal(hw, "HW Fatal error encountered!\n"); 2326 } 2327 2328 /*****************************************************************************/ 2329 /* START: HW SM */ 2330 /*****************************************************************************/ 2331 /* 2332 * csio_hws_uninit - Uninit state 2333 * @hw - HW module 2334 * @evt - Event 2335 * 2336 */ 2337 static void 2338 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) 2339 { 2340 hw->prev_evt = hw->cur_evt; 2341 hw->cur_evt = evt; 2342 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2343 2344 switch (evt) { 2345 case CSIO_HWE_CFG: 2346 csio_set_state(&hw->sm, csio_hws_configuring); 2347 csio_hw_configure(hw); 2348 break; 2349 2350 default: 2351 CSIO_INC_STATS(hw, n_evt_unexp); 2352 break; 2353 } 2354 } 2355 2356 /* 2357 * csio_hws_configuring - Configuring state 2358 * @hw - HW module 2359 * @evt - Event 2360 * 2361 */ 2362 static void 2363 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) 2364 { 2365 hw->prev_evt = hw->cur_evt; 2366 hw->cur_evt = evt; 2367 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2368 2369 switch (evt) { 2370 case CSIO_HWE_INIT: 2371 csio_set_state(&hw->sm, csio_hws_initializing); 2372 csio_hw_initialize(hw); 2373 break; 2374 2375 case CSIO_HWE_INIT_DONE: 2376 csio_set_state(&hw->sm, csio_hws_ready); 2377 /* Fan out event to all lnode SMs */ 2378 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2379 break; 2380 2381 case CSIO_HWE_FATAL: 2382 csio_set_state(&hw->sm, csio_hws_uninit); 2383 break; 2384 2385 case CSIO_HWE_PCI_REMOVE: 2386 csio_do_bye(hw); 2387 break; 2388 default: 2389 CSIO_INC_STATS(hw, n_evt_unexp); 2390 break; 2391 } 2392 } 2393 2394 /* 2395 * csio_hws_initializing - Initialiazing state 2396 * @hw - HW module 2397 * @evt - Event 2398 * 2399 */ 2400 static void 2401 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) 2402 { 2403 hw->prev_evt = hw->cur_evt; 2404 hw->cur_evt = evt; 2405 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2406 2407 switch (evt) { 2408 case CSIO_HWE_INIT_DONE: 2409 csio_set_state(&hw->sm, csio_hws_ready); 2410 2411 /* Fan out event to all lnode SMs */ 2412 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); 2413 2414 /* Enable interrupts */ 2415 csio_hw_intr_enable(hw); 2416 break; 2417 2418 case CSIO_HWE_FATAL: 2419 csio_set_state(&hw->sm, csio_hws_uninit); 2420 break; 2421 2422 case CSIO_HWE_PCI_REMOVE: 2423 csio_do_bye(hw); 2424 break; 2425 2426 default: 2427 CSIO_INC_STATS(hw, n_evt_unexp); 2428 break; 2429 } 2430 } 2431 2432 /* 2433 * csio_hws_ready - Ready state 2434 * @hw - HW module 2435 * @evt - Event 2436 * 2437 */ 2438 static void 2439 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) 2440 { 2441 /* Remember the event */ 2442 hw->evtflag = evt; 2443 2444 hw->prev_evt = hw->cur_evt; 2445 hw->cur_evt = evt; 2446 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2447 2448 switch (evt) { 2449 case CSIO_HWE_HBA_RESET: 2450 case CSIO_HWE_FW_DLOAD: 2451 case CSIO_HWE_SUSPEND: 2452 case CSIO_HWE_PCI_REMOVE: 2453 case CSIO_HWE_PCIERR_DETECTED: 2454 csio_set_state(&hw->sm, csio_hws_quiescing); 2455 /* cleanup all outstanding cmds */ 2456 if (evt == CSIO_HWE_HBA_RESET || 2457 evt == CSIO_HWE_PCIERR_DETECTED) 2458 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); 2459 else 2460 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); 2461 2462 csio_hw_intr_disable(hw); 2463 csio_hw_mbm_cleanup(hw); 2464 csio_evtq_stop(hw); 2465 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); 2466 csio_evtq_flush(hw); 2467 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); 2468 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); 2469 break; 2470 2471 case CSIO_HWE_FATAL: 2472 csio_set_state(&hw->sm, csio_hws_uninit); 2473 break; 2474 2475 default: 2476 CSIO_INC_STATS(hw, n_evt_unexp); 2477 break; 2478 } 2479 } 2480 2481 /* 2482 * csio_hws_quiescing - Quiescing state 2483 * @hw - HW module 2484 * @evt - Event 2485 * 2486 */ 2487 static void 2488 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) 2489 { 2490 hw->prev_evt = hw->cur_evt; 2491 hw->cur_evt = evt; 2492 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2493 2494 switch (evt) { 2495 case CSIO_HWE_QUIESCED: 2496 switch (hw->evtflag) { 2497 case CSIO_HWE_FW_DLOAD: 2498 csio_set_state(&hw->sm, csio_hws_resetting); 2499 /* Download firmware */ 2500 /* Fall through */ 2501 2502 case CSIO_HWE_HBA_RESET: 2503 csio_set_state(&hw->sm, csio_hws_resetting); 2504 /* Start reset of the HBA */ 2505 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); 2506 csio_wr_destroy_queues(hw, false); 2507 csio_do_reset(hw, false); 2508 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); 2509 break; 2510 2511 case CSIO_HWE_PCI_REMOVE: 2512 csio_set_state(&hw->sm, csio_hws_removing); 2513 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); 2514 csio_wr_destroy_queues(hw, true); 2515 /* Now send the bye command */ 2516 csio_do_bye(hw); 2517 break; 2518 2519 case CSIO_HWE_SUSPEND: 2520 csio_set_state(&hw->sm, csio_hws_quiesced); 2521 break; 2522 2523 case CSIO_HWE_PCIERR_DETECTED: 2524 csio_set_state(&hw->sm, csio_hws_pcierr); 2525 csio_wr_destroy_queues(hw, false); 2526 break; 2527 2528 default: 2529 CSIO_INC_STATS(hw, n_evt_unexp); 2530 break; 2531 2532 } 2533 break; 2534 2535 default: 2536 CSIO_INC_STATS(hw, n_evt_unexp); 2537 break; 2538 } 2539 } 2540 2541 /* 2542 * csio_hws_quiesced - Quiesced state 2543 * @hw - HW module 2544 * @evt - Event 2545 * 2546 */ 2547 static void 2548 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) 2549 { 2550 hw->prev_evt = hw->cur_evt; 2551 hw->cur_evt = evt; 2552 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2553 2554 switch (evt) { 2555 case CSIO_HWE_RESUME: 2556 csio_set_state(&hw->sm, csio_hws_configuring); 2557 csio_hw_configure(hw); 2558 break; 2559 2560 default: 2561 CSIO_INC_STATS(hw, n_evt_unexp); 2562 break; 2563 } 2564 } 2565 2566 /* 2567 * csio_hws_resetting - HW Resetting state 2568 * @hw - HW module 2569 * @evt - Event 2570 * 2571 */ 2572 static void 2573 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) 2574 { 2575 hw->prev_evt = hw->cur_evt; 2576 hw->cur_evt = evt; 2577 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2578 2579 switch (evt) { 2580 case CSIO_HWE_HBA_RESET_DONE: 2581 csio_evtq_start(hw); 2582 csio_set_state(&hw->sm, csio_hws_configuring); 2583 csio_hw_configure(hw); 2584 break; 2585 2586 default: 2587 CSIO_INC_STATS(hw, n_evt_unexp); 2588 break; 2589 } 2590 } 2591 2592 /* 2593 * csio_hws_removing - PCI Hotplug removing state 2594 * @hw - HW module 2595 * @evt - Event 2596 * 2597 */ 2598 static void 2599 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) 2600 { 2601 hw->prev_evt = hw->cur_evt; 2602 hw->cur_evt = evt; 2603 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2604 2605 switch (evt) { 2606 case CSIO_HWE_HBA_RESET: 2607 if (!csio_is_hw_master(hw)) 2608 break; 2609 /* 2610 * The BYE should have alerady been issued, so we cant 2611 * use the mailbox interface. Hence we use the PL_RST 2612 * register directly. 2613 */ 2614 csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); 2615 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST); 2616 mdelay(2000); 2617 break; 2618 2619 /* Should never receive any new events */ 2620 default: 2621 CSIO_INC_STATS(hw, n_evt_unexp); 2622 break; 2623 2624 } 2625 } 2626 2627 /* 2628 * csio_hws_pcierr - PCI Error state 2629 * @hw - HW module 2630 * @evt - Event 2631 * 2632 */ 2633 static void 2634 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) 2635 { 2636 hw->prev_evt = hw->cur_evt; 2637 hw->cur_evt = evt; 2638 CSIO_INC_STATS(hw, n_evt_sm[evt]); 2639 2640 switch (evt) { 2641 case CSIO_HWE_PCIERR_SLOT_RESET: 2642 csio_evtq_start(hw); 2643 csio_set_state(&hw->sm, csio_hws_configuring); 2644 csio_hw_configure(hw); 2645 break; 2646 2647 default: 2648 CSIO_INC_STATS(hw, n_evt_unexp); 2649 break; 2650 } 2651 } 2652 2653 /*****************************************************************************/ 2654 /* END: HW SM */ 2655 /*****************************************************************************/ 2656 2657 /* 2658 * csio_handle_intr_status - table driven interrupt handler 2659 * @hw: HW instance 2660 * @reg: the interrupt status register to process 2661 * @acts: table of interrupt actions 2662 * 2663 * A table driven interrupt handler that applies a set of masks to an 2664 * interrupt status word and performs the corresponding actions if the 2665 * interrupts described by the mask have occured. The actions include 2666 * optionally emitting a warning or alert message. The table is terminated 2667 * by an entry specifying mask 0. Returns the number of fatal interrupt 2668 * conditions. 2669 */ 2670 int 2671 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2672 const struct intr_info *acts) 2673 { 2674 int fatal = 0; 2675 unsigned int mask = 0; 2676 unsigned int status = csio_rd_reg32(hw, reg); 2677 2678 for ( ; acts->mask; ++acts) { 2679 if (!(status & acts->mask)) 2680 continue; 2681 if (acts->fatal) { 2682 fatal++; 2683 csio_fatal(hw, "Fatal %s (0x%x)\n", 2684 acts->msg, status & acts->mask); 2685 } else if (acts->msg) 2686 csio_info(hw, "%s (0x%x)\n", 2687 acts->msg, status & acts->mask); 2688 mask |= acts->mask; 2689 } 2690 status &= mask; 2691 if (status) /* clear processed interrupts */ 2692 csio_wr_reg32(hw, status, reg); 2693 return fatal; 2694 } 2695 2696 /* 2697 * TP interrupt handler. 2698 */ 2699 static void csio_tp_intr_handler(struct csio_hw *hw) 2700 { 2701 static struct intr_info tp_intr_info[] = { 2702 { 0x3fffffff, "TP parity error", -1, 1 }, 2703 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 2704 { 0, NULL, 0, 0 } 2705 }; 2706 2707 if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info)) 2708 csio_hw_fatal_err(hw); 2709 } 2710 2711 /* 2712 * SGE interrupt handler. 2713 */ 2714 static void csio_sge_intr_handler(struct csio_hw *hw) 2715 { 2716 uint64_t v; 2717 2718 static struct intr_info sge_intr_info[] = { 2719 { ERR_CPL_EXCEED_IQE_SIZE, 2720 "SGE received CPL exceeding IQE size", -1, 1 }, 2721 { ERR_INVALID_CIDX_INC, 2722 "SGE GTS CIDX increment too large", -1, 0 }, 2723 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 2724 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, 2725 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 2726 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 2727 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 2728 0 }, 2729 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 2730 0 }, 2731 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 2732 0 }, 2733 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 2734 0 }, 2735 { ERR_ING_CTXT_PRIO, 2736 "SGE too many priority ingress contexts", -1, 0 }, 2737 { ERR_EGR_CTXT_PRIO, 2738 "SGE too many priority egress contexts", -1, 0 }, 2739 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 2740 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 2741 { 0, NULL, 0, 0 } 2742 }; 2743 2744 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) | 2745 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32); 2746 if (v) { 2747 csio_fatal(hw, "SGE parity error (%#llx)\n", 2748 (unsigned long long)v); 2749 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), 2750 SGE_INT_CAUSE1); 2751 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2); 2752 } 2753 2754 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info); 2755 2756 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) || 2757 v != 0) 2758 csio_hw_fatal_err(hw); 2759 } 2760 2761 #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\ 2762 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR) 2763 #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\ 2764 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR) 2765 2766 /* 2767 * CIM interrupt handler. 2768 */ 2769 static void csio_cim_intr_handler(struct csio_hw *hw) 2770 { 2771 static struct intr_info cim_intr_info[] = { 2772 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 2773 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 2774 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 2775 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 2776 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 2777 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 2778 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 2779 { 0, NULL, 0, 0 } 2780 }; 2781 static struct intr_info cim_upintr_info[] = { 2782 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 2783 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 2784 { ILLWRINT, "CIM illegal write", -1, 1 }, 2785 { ILLRDINT, "CIM illegal read", -1, 1 }, 2786 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 2787 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 2788 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 2789 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 2790 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 2791 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 2792 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 2793 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 2794 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 2795 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 2796 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 2797 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 2798 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 2799 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 2800 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 2801 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 2802 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 2803 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 2804 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 2805 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 2806 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 2807 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 2808 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 2809 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 2810 { 0, NULL, 0, 0 } 2811 }; 2812 2813 int fat; 2814 2815 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE, 2816 cim_intr_info) + 2817 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE, 2818 cim_upintr_info); 2819 if (fat) 2820 csio_hw_fatal_err(hw); 2821 } 2822 2823 /* 2824 * ULP RX interrupt handler. 2825 */ 2826 static void csio_ulprx_intr_handler(struct csio_hw *hw) 2827 { 2828 static struct intr_info ulprx_intr_info[] = { 2829 { 0x1800000, "ULPRX context error", -1, 1 }, 2830 { 0x7fffff, "ULPRX parity error", -1, 1 }, 2831 { 0, NULL, 0, 0 } 2832 }; 2833 2834 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info)) 2835 csio_hw_fatal_err(hw); 2836 } 2837 2838 /* 2839 * ULP TX interrupt handler. 2840 */ 2841 static void csio_ulptx_intr_handler(struct csio_hw *hw) 2842 { 2843 static struct intr_info ulptx_intr_info[] = { 2844 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 2845 0 }, 2846 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 2847 0 }, 2848 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 2849 0 }, 2850 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 2851 0 }, 2852 { 0xfffffff, "ULPTX parity error", -1, 1 }, 2853 { 0, NULL, 0, 0 } 2854 }; 2855 2856 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info)) 2857 csio_hw_fatal_err(hw); 2858 } 2859 2860 /* 2861 * PM TX interrupt handler. 2862 */ 2863 static void csio_pmtx_intr_handler(struct csio_hw *hw) 2864 { 2865 static struct intr_info pmtx_intr_info[] = { 2866 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 2867 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 2868 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 2869 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 2870 { 0xffffff0, "PMTX framing error", -1, 1 }, 2871 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 2872 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 2873 1 }, 2874 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 2875 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 2876 { 0, NULL, 0, 0 } 2877 }; 2878 2879 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info)) 2880 csio_hw_fatal_err(hw); 2881 } 2882 2883 /* 2884 * PM RX interrupt handler. 2885 */ 2886 static void csio_pmrx_intr_handler(struct csio_hw *hw) 2887 { 2888 static struct intr_info pmrx_intr_info[] = { 2889 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 2890 { 0x3ffff0, "PMRX framing error", -1, 1 }, 2891 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 2892 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 2893 1 }, 2894 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 2895 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 2896 { 0, NULL, 0, 0 } 2897 }; 2898 2899 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info)) 2900 csio_hw_fatal_err(hw); 2901 } 2902 2903 /* 2904 * CPL switch interrupt handler. 2905 */ 2906 static void csio_cplsw_intr_handler(struct csio_hw *hw) 2907 { 2908 static struct intr_info cplsw_intr_info[] = { 2909 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 2910 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 2911 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 2912 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 2913 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 2914 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 2915 { 0, NULL, 0, 0 } 2916 }; 2917 2918 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info)) 2919 csio_hw_fatal_err(hw); 2920 } 2921 2922 /* 2923 * LE interrupt handler. 2924 */ 2925 static void csio_le_intr_handler(struct csio_hw *hw) 2926 { 2927 static struct intr_info le_intr_info[] = { 2928 { LIPMISS, "LE LIP miss", -1, 0 }, 2929 { LIP0, "LE 0 LIP error", -1, 0 }, 2930 { PARITYERR, "LE parity error", -1, 1 }, 2931 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 2932 { REQQPARERR, "LE request queue parity error", -1, 1 }, 2933 { 0, NULL, 0, 0 } 2934 }; 2935 2936 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info)) 2937 csio_hw_fatal_err(hw); 2938 } 2939 2940 /* 2941 * MPS interrupt handler. 2942 */ 2943 static void csio_mps_intr_handler(struct csio_hw *hw) 2944 { 2945 static struct intr_info mps_rx_intr_info[] = { 2946 { 0xffffff, "MPS Rx parity error", -1, 1 }, 2947 { 0, NULL, 0, 0 } 2948 }; 2949 static struct intr_info mps_tx_intr_info[] = { 2950 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 2951 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 2952 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 2953 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 2954 { BUBBLE, "MPS Tx underflow", -1, 1 }, 2955 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 2956 { FRMERR, "MPS Tx framing error", -1, 1 }, 2957 { 0, NULL, 0, 0 } 2958 }; 2959 static struct intr_info mps_trc_intr_info[] = { 2960 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 2961 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 2962 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 2963 { 0, NULL, 0, 0 } 2964 }; 2965 static struct intr_info mps_stat_sram_intr_info[] = { 2966 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 2967 { 0, NULL, 0, 0 } 2968 }; 2969 static struct intr_info mps_stat_tx_intr_info[] = { 2970 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 2971 { 0, NULL, 0, 0 } 2972 }; 2973 static struct intr_info mps_stat_rx_intr_info[] = { 2974 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 2975 { 0, NULL, 0, 0 } 2976 }; 2977 static struct intr_info mps_cls_intr_info[] = { 2978 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 2979 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 2980 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 2981 { 0, NULL, 0, 0 } 2982 }; 2983 2984 int fat; 2985 2986 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE, 2987 mps_rx_intr_info) + 2988 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE, 2989 mps_tx_intr_info) + 2990 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE, 2991 mps_trc_intr_info) + 2992 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM, 2993 mps_stat_sram_intr_info) + 2994 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 2995 mps_stat_tx_intr_info) + 2996 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 2997 mps_stat_rx_intr_info) + 2998 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE, 2999 mps_cls_intr_info); 3000 3001 csio_wr_reg32(hw, 0, MPS_INT_CAUSE); 3002 csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */ 3003 if (fat) 3004 csio_hw_fatal_err(hw); 3005 } 3006 3007 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 3008 3009 /* 3010 * EDC/MC interrupt handler. 3011 */ 3012 static void csio_mem_intr_handler(struct csio_hw *hw, int idx) 3013 { 3014 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 3015 3016 unsigned int addr, cnt_addr, v; 3017 3018 if (idx <= MEM_EDC1) { 3019 addr = EDC_REG(EDC_INT_CAUSE, idx); 3020 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 3021 } else { 3022 addr = MC_INT_CAUSE; 3023 cnt_addr = MC_ECC_STATUS; 3024 } 3025 3026 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; 3027 if (v & PERR_INT_CAUSE) 3028 csio_fatal(hw, "%s FIFO parity error\n", name[idx]); 3029 if (v & ECC_CE_INT_CAUSE) { 3030 uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr)); 3031 3032 csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr); 3033 csio_warn(hw, "%u %s correctable ECC data error%s\n", 3034 cnt, name[idx], cnt > 1 ? "s" : ""); 3035 } 3036 if (v & ECC_UE_INT_CAUSE) 3037 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); 3038 3039 csio_wr_reg32(hw, v, addr); 3040 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 3041 csio_hw_fatal_err(hw); 3042 } 3043 3044 /* 3045 * MA interrupt handler. 3046 */ 3047 static void csio_ma_intr_handler(struct csio_hw *hw) 3048 { 3049 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE); 3050 3051 if (status & MEM_PERR_INT_CAUSE) 3052 csio_fatal(hw, "MA parity error, parity status %#x\n", 3053 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS)); 3054 if (status & MEM_WRAP_INT_CAUSE) { 3055 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS); 3056 csio_fatal(hw, 3057 "MA address wrap-around error by client %u to address %#x\n", 3058 MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); 3059 } 3060 csio_wr_reg32(hw, status, MA_INT_CAUSE); 3061 csio_hw_fatal_err(hw); 3062 } 3063 3064 /* 3065 * SMB interrupt handler. 3066 */ 3067 static void csio_smb_intr_handler(struct csio_hw *hw) 3068 { 3069 static struct intr_info smb_intr_info[] = { 3070 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 3071 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 3072 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 3073 { 0, NULL, 0, 0 } 3074 }; 3075 3076 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info)) 3077 csio_hw_fatal_err(hw); 3078 } 3079 3080 /* 3081 * NC-SI interrupt handler. 3082 */ 3083 static void csio_ncsi_intr_handler(struct csio_hw *hw) 3084 { 3085 static struct intr_info ncsi_intr_info[] = { 3086 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 3087 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 3088 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 3089 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 3090 { 0, NULL, 0, 0 } 3091 }; 3092 3093 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info)) 3094 csio_hw_fatal_err(hw); 3095 } 3096 3097 /* 3098 * XGMAC interrupt handler. 3099 */ 3100 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3101 { 3102 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port)); 3103 3104 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 3105 if (!v) 3106 return; 3107 3108 if (v & TXFIFO_PRTY_ERR) 3109 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3110 if (v & RXFIFO_PRTY_ERR) 3111 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3112 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port)); 3113 csio_hw_fatal_err(hw); 3114 } 3115 3116 /* 3117 * PL interrupt handler. 3118 */ 3119 static void csio_pl_intr_handler(struct csio_hw *hw) 3120 { 3121 static struct intr_info pl_intr_info[] = { 3122 { FATALPERR, "T4 fatal parity error", -1, 1 }, 3123 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 3124 { 0, NULL, 0, 0 } 3125 }; 3126 3127 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info)) 3128 csio_hw_fatal_err(hw); 3129 } 3130 3131 /* 3132 * csio_hw_slow_intr_handler - control path interrupt handler 3133 * @hw: HW module 3134 * 3135 * Interrupt handler for non-data global interrupt events, e.g., errors. 3136 * The designation 'slow' is because it involves register reads, while 3137 * data interrupts typically don't involve any MMIOs. 3138 */ 3139 int 3140 csio_hw_slow_intr_handler(struct csio_hw *hw) 3141 { 3142 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE); 3143 3144 if (!(cause & CSIO_GLBL_INTR_MASK)) { 3145 CSIO_INC_STATS(hw, n_plint_unexp); 3146 return 0; 3147 } 3148 3149 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); 3150 3151 CSIO_INC_STATS(hw, n_plint_cnt); 3152 3153 if (cause & CIM) 3154 csio_cim_intr_handler(hw); 3155 3156 if (cause & MPS) 3157 csio_mps_intr_handler(hw); 3158 3159 if (cause & NCSI) 3160 csio_ncsi_intr_handler(hw); 3161 3162 if (cause & PL) 3163 csio_pl_intr_handler(hw); 3164 3165 if (cause & SMB) 3166 csio_smb_intr_handler(hw); 3167 3168 if (cause & XGMAC0) 3169 csio_xgmac_intr_handler(hw, 0); 3170 3171 if (cause & XGMAC1) 3172 csio_xgmac_intr_handler(hw, 1); 3173 3174 if (cause & XGMAC_KR0) 3175 csio_xgmac_intr_handler(hw, 2); 3176 3177 if (cause & XGMAC_KR1) 3178 csio_xgmac_intr_handler(hw, 3); 3179 3180 if (cause & PCIE) 3181 hw->chip_ops->chip_pcie_intr_handler(hw); 3182 3183 if (cause & MC) 3184 csio_mem_intr_handler(hw, MEM_MC); 3185 3186 if (cause & EDC0) 3187 csio_mem_intr_handler(hw, MEM_EDC0); 3188 3189 if (cause & EDC1) 3190 csio_mem_intr_handler(hw, MEM_EDC1); 3191 3192 if (cause & LE) 3193 csio_le_intr_handler(hw); 3194 3195 if (cause & TP) 3196 csio_tp_intr_handler(hw); 3197 3198 if (cause & MA) 3199 csio_ma_intr_handler(hw); 3200 3201 if (cause & PM_TX) 3202 csio_pmtx_intr_handler(hw); 3203 3204 if (cause & PM_RX) 3205 csio_pmrx_intr_handler(hw); 3206 3207 if (cause & ULP_RX) 3208 csio_ulprx_intr_handler(hw); 3209 3210 if (cause & CPL_SWITCH) 3211 csio_cplsw_intr_handler(hw); 3212 3213 if (cause & SGE) 3214 csio_sge_intr_handler(hw); 3215 3216 if (cause & ULP_TX) 3217 csio_ulptx_intr_handler(hw); 3218 3219 /* Clear the interrupts just processed for which we are the master. */ 3220 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE); 3221 csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */ 3222 3223 return 1; 3224 } 3225 3226 /***************************************************************************** 3227 * HW <--> mailbox interfacing routines. 3228 ****************************************************************************/ 3229 /* 3230 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions 3231 * 3232 * @data: Private data pointer. 3233 * 3234 * Called from worker thread context. 3235 */ 3236 static void 3237 csio_mberr_worker(void *data) 3238 { 3239 struct csio_hw *hw = (struct csio_hw *)data; 3240 struct csio_mbm *mbm = &hw->mbm; 3241 LIST_HEAD(cbfn_q); 3242 struct csio_mb *mbp_next; 3243 int rv; 3244 3245 del_timer_sync(&mbm->timer); 3246 3247 spin_lock_irq(&hw->lock); 3248 if (list_empty(&mbm->cbfn_q)) { 3249 spin_unlock_irq(&hw->lock); 3250 return; 3251 } 3252 3253 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); 3254 mbm->stats.n_cbfnq = 0; 3255 3256 /* Try to start waiting mailboxes */ 3257 if (!list_empty(&mbm->req_q)) { 3258 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); 3259 list_del_init(&mbp_next->list); 3260 3261 rv = csio_mb_issue(hw, mbp_next); 3262 if (rv != 0) 3263 list_add_tail(&mbp_next->list, &mbm->req_q); 3264 else 3265 CSIO_DEC_STATS(mbm, n_activeq); 3266 } 3267 spin_unlock_irq(&hw->lock); 3268 3269 /* Now callback completions */ 3270 csio_mb_completions(hw, &cbfn_q); 3271 } 3272 3273 /* 3274 * csio_hw_mb_timer - Top-level Mailbox timeout handler. 3275 * 3276 * @data: private data pointer 3277 * 3278 **/ 3279 static void 3280 csio_hw_mb_timer(uintptr_t data) 3281 { 3282 struct csio_hw *hw = (struct csio_hw *)data; 3283 struct csio_mb *mbp = NULL; 3284 3285 spin_lock_irq(&hw->lock); 3286 mbp = csio_mb_tmo_handler(hw); 3287 spin_unlock_irq(&hw->lock); 3288 3289 /* Call back the function for the timed-out Mailbox */ 3290 if (mbp) 3291 mbp->mb_cbfn(hw, mbp); 3292 3293 } 3294 3295 /* 3296 * csio_hw_mbm_cleanup - Cleanup Mailbox module. 3297 * @hw: HW module 3298 * 3299 * Called with lock held, should exit with lock held. 3300 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them 3301 * into a local queue. Drops lock and calls the completions. Holds 3302 * lock and returns. 3303 */ 3304 static void 3305 csio_hw_mbm_cleanup(struct csio_hw *hw) 3306 { 3307 LIST_HEAD(cbfn_q); 3308 3309 csio_mb_cancel_all(hw, &cbfn_q); 3310 3311 spin_unlock_irq(&hw->lock); 3312 csio_mb_completions(hw, &cbfn_q); 3313 spin_lock_irq(&hw->lock); 3314 } 3315 3316 /***************************************************************************** 3317 * Event handling 3318 ****************************************************************************/ 3319 int 3320 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3321 uint16_t len) 3322 { 3323 struct csio_evt_msg *evt_entry = NULL; 3324 3325 if (type >= CSIO_EVT_MAX) 3326 return -EINVAL; 3327 3328 if (len > CSIO_EVT_MSG_SIZE) 3329 return -EINVAL; 3330 3331 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3332 return -EINVAL; 3333 3334 if (list_empty(&hw->evt_free_q)) { 3335 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3336 type, len); 3337 return -ENOMEM; 3338 } 3339 3340 evt_entry = list_first_entry(&hw->evt_free_q, 3341 struct csio_evt_msg, list); 3342 list_del_init(&evt_entry->list); 3343 3344 /* copy event msg and queue the event */ 3345 evt_entry->type = type; 3346 memcpy((void *)evt_entry->data, evt_msg, len); 3347 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3348 3349 CSIO_DEC_STATS(hw, n_evt_freeq); 3350 CSIO_INC_STATS(hw, n_evt_activeq); 3351 3352 return 0; 3353 } 3354 3355 static int 3356 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, 3357 uint16_t len, bool msg_sg) 3358 { 3359 struct csio_evt_msg *evt_entry = NULL; 3360 struct csio_fl_dma_buf *fl_sg; 3361 uint32_t off = 0; 3362 unsigned long flags; 3363 int n, ret = 0; 3364 3365 if (type >= CSIO_EVT_MAX) 3366 return -EINVAL; 3367 3368 if (len > CSIO_EVT_MSG_SIZE) 3369 return -EINVAL; 3370 3371 spin_lock_irqsave(&hw->lock, flags); 3372 if (hw->flags & CSIO_HWF_FWEVT_STOP) { 3373 ret = -EINVAL; 3374 goto out; 3375 } 3376 3377 if (list_empty(&hw->evt_free_q)) { 3378 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", 3379 type, len); 3380 ret = -ENOMEM; 3381 goto out; 3382 } 3383 3384 evt_entry = list_first_entry(&hw->evt_free_q, 3385 struct csio_evt_msg, list); 3386 list_del_init(&evt_entry->list); 3387 3388 /* copy event msg and queue the event */ 3389 evt_entry->type = type; 3390 3391 /* If Payload in SG list*/ 3392 if (msg_sg) { 3393 fl_sg = (struct csio_fl_dma_buf *) evt_msg; 3394 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { 3395 memcpy((void *)((uintptr_t)evt_entry->data + off), 3396 fl_sg->flbufs[n].vaddr, 3397 fl_sg->flbufs[n].len); 3398 off += fl_sg->flbufs[n].len; 3399 } 3400 } else 3401 memcpy((void *)evt_entry->data, evt_msg, len); 3402 3403 list_add_tail(&evt_entry->list, &hw->evt_active_q); 3404 CSIO_DEC_STATS(hw, n_evt_freeq); 3405 CSIO_INC_STATS(hw, n_evt_activeq); 3406 out: 3407 spin_unlock_irqrestore(&hw->lock, flags); 3408 return ret; 3409 } 3410 3411 static void 3412 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) 3413 { 3414 if (evt_entry) { 3415 spin_lock_irq(&hw->lock); 3416 list_del_init(&evt_entry->list); 3417 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3418 CSIO_DEC_STATS(hw, n_evt_activeq); 3419 CSIO_INC_STATS(hw, n_evt_freeq); 3420 spin_unlock_irq(&hw->lock); 3421 } 3422 } 3423 3424 void 3425 csio_evtq_flush(struct csio_hw *hw) 3426 { 3427 uint32_t count; 3428 count = 30; 3429 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { 3430 spin_unlock_irq(&hw->lock); 3431 msleep(2000); 3432 spin_lock_irq(&hw->lock); 3433 } 3434 3435 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); 3436 } 3437 3438 static void 3439 csio_evtq_stop(struct csio_hw *hw) 3440 { 3441 hw->flags |= CSIO_HWF_FWEVT_STOP; 3442 } 3443 3444 static void 3445 csio_evtq_start(struct csio_hw *hw) 3446 { 3447 hw->flags &= ~CSIO_HWF_FWEVT_STOP; 3448 } 3449 3450 static void 3451 csio_evtq_cleanup(struct csio_hw *hw) 3452 { 3453 struct list_head *evt_entry, *next_entry; 3454 3455 /* Release outstanding events from activeq to freeq*/ 3456 if (!list_empty(&hw->evt_active_q)) 3457 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); 3458 3459 hw->stats.n_evt_activeq = 0; 3460 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3461 3462 /* Freeup event entry */ 3463 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { 3464 kfree(evt_entry); 3465 CSIO_DEC_STATS(hw, n_evt_freeq); 3466 } 3467 3468 hw->stats.n_evt_freeq = 0; 3469 } 3470 3471 3472 static void 3473 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, 3474 struct csio_fl_dma_buf *flb, void *priv) 3475 { 3476 __u8 op; 3477 void *msg = NULL; 3478 uint32_t msg_len = 0; 3479 bool msg_sg = 0; 3480 3481 op = ((struct rss_header *) wr)->opcode; 3482 if (op == CPL_FW6_PLD) { 3483 CSIO_INC_STATS(hw, n_cpl_fw6_pld); 3484 if (!flb || !flb->totlen) { 3485 CSIO_INC_STATS(hw, n_cpl_unexp); 3486 return; 3487 } 3488 3489 msg = (void *) flb; 3490 msg_len = flb->totlen; 3491 msg_sg = 1; 3492 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3493 3494 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3495 /* skip RSS header */ 3496 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3497 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3498 sizeof(struct cpl_fw4_msg); 3499 } else { 3500 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3501 CSIO_INC_STATS(hw, n_cpl_unexp); 3502 return; 3503 } 3504 3505 /* 3506 * Enqueue event to EventQ. Events processing happens 3507 * in Event worker thread context 3508 */ 3509 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, 3510 (uint16_t)msg_len, msg_sg)) 3511 CSIO_INC_STATS(hw, n_evt_drop); 3512 } 3513 3514 void 3515 csio_evtq_worker(struct work_struct *work) 3516 { 3517 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); 3518 struct list_head *evt_entry, *next_entry; 3519 LIST_HEAD(evt_q); 3520 struct csio_evt_msg *evt_msg; 3521 struct cpl_fw6_msg *msg; 3522 struct csio_rnode *rn; 3523 int rv = 0; 3524 uint8_t evtq_stop = 0; 3525 3526 csio_dbg(hw, "event worker thread active evts#%d\n", 3527 hw->stats.n_evt_activeq); 3528 3529 spin_lock_irq(&hw->lock); 3530 while (!list_empty(&hw->evt_active_q)) { 3531 list_splice_tail_init(&hw->evt_active_q, &evt_q); 3532 spin_unlock_irq(&hw->lock); 3533 3534 list_for_each_safe(evt_entry, next_entry, &evt_q) { 3535 evt_msg = (struct csio_evt_msg *) evt_entry; 3536 3537 /* Drop events if queue is STOPPED */ 3538 spin_lock_irq(&hw->lock); 3539 if (hw->flags & CSIO_HWF_FWEVT_STOP) 3540 evtq_stop = 1; 3541 spin_unlock_irq(&hw->lock); 3542 if (evtq_stop) { 3543 CSIO_INC_STATS(hw, n_evt_drop); 3544 goto free_evt; 3545 } 3546 3547 switch (evt_msg->type) { 3548 case CSIO_EVT_FW: 3549 msg = (struct cpl_fw6_msg *)(evt_msg->data); 3550 3551 if ((msg->opcode == CPL_FW6_MSG || 3552 msg->opcode == CPL_FW4_MSG) && 3553 !msg->type) { 3554 rv = csio_mb_fwevt_handler(hw, 3555 msg->data); 3556 if (!rv) 3557 break; 3558 /* Handle any remaining fw events */ 3559 csio_fcoe_fwevt_handler(hw, 3560 msg->opcode, msg->data); 3561 } else if (msg->opcode == CPL_FW6_PLD) { 3562 3563 csio_fcoe_fwevt_handler(hw, 3564 msg->opcode, msg->data); 3565 } else { 3566 csio_warn(hw, 3567 "Unhandled FW msg op %x type %x\n", 3568 msg->opcode, msg->type); 3569 CSIO_INC_STATS(hw, n_evt_drop); 3570 } 3571 break; 3572 3573 case CSIO_EVT_MBX: 3574 csio_mberr_worker(hw); 3575 break; 3576 3577 case CSIO_EVT_DEV_LOSS: 3578 memcpy(&rn, evt_msg->data, sizeof(rn)); 3579 csio_rnode_devloss_handler(rn); 3580 break; 3581 3582 default: 3583 csio_warn(hw, "Unhandled event %x on evtq\n", 3584 evt_msg->type); 3585 CSIO_INC_STATS(hw, n_evt_unexp); 3586 break; 3587 } 3588 free_evt: 3589 csio_free_evt(hw, evt_msg); 3590 } 3591 3592 spin_lock_irq(&hw->lock); 3593 } 3594 hw->flags &= ~CSIO_HWF_FWEVT_PENDING; 3595 spin_unlock_irq(&hw->lock); 3596 } 3597 3598 int 3599 csio_fwevtq_handler(struct csio_hw *hw) 3600 { 3601 int rv; 3602 3603 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { 3604 CSIO_INC_STATS(hw, n_int_stray); 3605 return -EINVAL; 3606 } 3607 3608 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, 3609 csio_process_fwevtq_entry, NULL); 3610 return rv; 3611 } 3612 3613 /**************************************************************************** 3614 * Entry points 3615 ****************************************************************************/ 3616 3617 /* Management module */ 3618 /* 3619 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. 3620 * mgmt - mgmt module 3621 * @io_req - io request 3622 * 3623 * Return - 0:if given IO Req exists in active Q. 3624 * -EINVAL :if lookup fails. 3625 */ 3626 int 3627 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) 3628 { 3629 struct list_head *tmp; 3630 3631 /* Lookup ioreq in the ACTIVEQ */ 3632 list_for_each(tmp, &mgmtm->active_q) { 3633 if (io_req == (struct csio_ioreq *)tmp) 3634 return 0; 3635 } 3636 return -EINVAL; 3637 } 3638 3639 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ 3640 3641 /* 3642 * csio_mgmts_tmo_handler - MGMT IO Timeout handler. 3643 * @data - Event data. 3644 * 3645 * Return - none. 3646 */ 3647 static void 3648 csio_mgmt_tmo_handler(uintptr_t data) 3649 { 3650 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; 3651 struct list_head *tmp; 3652 struct csio_ioreq *io_req; 3653 3654 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); 3655 3656 spin_lock_irq(&mgmtm->hw->lock); 3657 3658 list_for_each(tmp, &mgmtm->active_q) { 3659 io_req = (struct csio_ioreq *) tmp; 3660 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); 3661 3662 if (!io_req->tmo) { 3663 /* Dequeue the request from retry Q. */ 3664 tmp = csio_list_prev(tmp); 3665 list_del_init(&io_req->sm.sm_list); 3666 if (io_req->io_cbfn) { 3667 /* io_req will be freed by completion handler */ 3668 io_req->wr_status = -ETIMEDOUT; 3669 io_req->io_cbfn(mgmtm->hw, io_req); 3670 } else { 3671 CSIO_DB_ASSERT(0); 3672 } 3673 } 3674 } 3675 3676 /* If retry queue is not empty, re-arm timer */ 3677 if (!list_empty(&mgmtm->active_q)) 3678 mod_timer(&mgmtm->mgmt_timer, 3679 jiffies + msecs_to_jiffies(ECM_MIN_TMO)); 3680 spin_unlock_irq(&mgmtm->hw->lock); 3681 } 3682 3683 static void 3684 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) 3685 { 3686 struct csio_hw *hw = mgmtm->hw; 3687 struct csio_ioreq *io_req; 3688 struct list_head *tmp; 3689 uint32_t count; 3690 3691 count = 30; 3692 /* Wait for all outstanding req to complete gracefully */ 3693 while ((!list_empty(&mgmtm->active_q)) && count--) { 3694 spin_unlock_irq(&hw->lock); 3695 msleep(2000); 3696 spin_lock_irq(&hw->lock); 3697 } 3698 3699 /* release outstanding req from ACTIVEQ */ 3700 list_for_each(tmp, &mgmtm->active_q) { 3701 io_req = (struct csio_ioreq *) tmp; 3702 tmp = csio_list_prev(tmp); 3703 list_del_init(&io_req->sm.sm_list); 3704 mgmtm->stats.n_active--; 3705 if (io_req->io_cbfn) { 3706 /* io_req will be freed by completion handler */ 3707 io_req->wr_status = -ETIMEDOUT; 3708 io_req->io_cbfn(mgmtm->hw, io_req); 3709 } 3710 } 3711 } 3712 3713 /* 3714 * csio_mgmt_init - Mgmt module init entry point 3715 * @mgmtsm - mgmt module 3716 * @hw - HW module 3717 * 3718 * Initialize mgmt timer, resource wait queue, active queue, 3719 * completion q. Allocate Egress and Ingress 3720 * WR queues and save off the queue index returned by the WR 3721 * module for future use. Allocate and save off mgmt reqs in the 3722 * mgmt_req_freelist for future use. Make sure their SM is initialized 3723 * to uninit state. 3724 * Returns: 0 - on success 3725 * -ENOMEM - on error. 3726 */ 3727 static int 3728 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 3729 { 3730 struct timer_list *timer = &mgmtm->mgmt_timer; 3731 3732 init_timer(timer); 3733 timer->function = csio_mgmt_tmo_handler; 3734 timer->data = (unsigned long)mgmtm; 3735 3736 INIT_LIST_HEAD(&mgmtm->active_q); 3737 INIT_LIST_HEAD(&mgmtm->cbfn_q); 3738 3739 mgmtm->hw = hw; 3740 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ 3741 3742 return 0; 3743 } 3744 3745 /* 3746 * csio_mgmtm_exit - MGMT module exit entry point 3747 * @mgmtsm - mgmt module 3748 * 3749 * This function called during MGMT module uninit. 3750 * Stop timers, free ioreqs allocated. 3751 * Returns: None 3752 * 3753 */ 3754 static void 3755 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 3756 { 3757 del_timer_sync(&mgmtm->mgmt_timer); 3758 } 3759 3760 3761 /** 3762 * csio_hw_start - Kicks off the HW State machine 3763 * @hw: Pointer to HW module. 3764 * 3765 * It is assumed that the initialization is a synchronous operation. 3766 * So when we return afer posting the event, the HW SM should be in 3767 * the ready state, if there were no errors during init. 3768 */ 3769 int 3770 csio_hw_start(struct csio_hw *hw) 3771 { 3772 spin_lock_irq(&hw->lock); 3773 csio_post_event(&hw->sm, CSIO_HWE_CFG); 3774 spin_unlock_irq(&hw->lock); 3775 3776 if (csio_is_hw_ready(hw)) 3777 return 0; 3778 else 3779 return -EINVAL; 3780 } 3781 3782 int 3783 csio_hw_stop(struct csio_hw *hw) 3784 { 3785 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); 3786 3787 if (csio_is_hw_removing(hw)) 3788 return 0; 3789 else 3790 return -EINVAL; 3791 } 3792 3793 /* Max reset retries */ 3794 #define CSIO_MAX_RESET_RETRIES 3 3795 3796 /** 3797 * csio_hw_reset - Reset the hardware 3798 * @hw: HW module. 3799 * 3800 * Caller should hold lock across this function. 3801 */ 3802 int 3803 csio_hw_reset(struct csio_hw *hw) 3804 { 3805 if (!csio_is_hw_master(hw)) 3806 return -EPERM; 3807 3808 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { 3809 csio_dbg(hw, "Max hw reset attempts reached.."); 3810 return -EINVAL; 3811 } 3812 3813 hw->rst_retries++; 3814 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); 3815 3816 if (csio_is_hw_ready(hw)) { 3817 hw->rst_retries = 0; 3818 hw->stats.n_reset_start = jiffies_to_msecs(jiffies); 3819 return 0; 3820 } else 3821 return -EINVAL; 3822 } 3823 3824 /* 3825 * csio_hw_get_device_id - Caches the Adapter's vendor & device id. 3826 * @hw: HW module. 3827 */ 3828 static void 3829 csio_hw_get_device_id(struct csio_hw *hw) 3830 { 3831 /* Is the adapter device id cached already ?*/ 3832 if (csio_is_dev_id_cached(hw)) 3833 return; 3834 3835 /* Get the PCI vendor & device id */ 3836 pci_read_config_word(hw->pdev, PCI_VENDOR_ID, 3837 &hw->params.pci.vendor_id); 3838 pci_read_config_word(hw->pdev, PCI_DEVICE_ID, 3839 &hw->params.pci.device_id); 3840 3841 csio_dev_id_cached(hw); 3842 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); 3843 3844 } /* csio_hw_get_device_id */ 3845 3846 /* 3847 * csio_hw_set_description - Set the model, description of the hw. 3848 * @hw: HW module. 3849 * @ven_id: PCI Vendor ID 3850 * @dev_id: PCI Device ID 3851 */ 3852 static void 3853 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) 3854 { 3855 uint32_t adap_type, prot_type; 3856 3857 if (ven_id == CSIO_VENDOR_ID) { 3858 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 3859 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 3860 3861 if (prot_type == CSIO_T4_FCOE_ASIC) { 3862 memcpy(hw->hw_ver, 3863 csio_t4_fcoe_adapters[adap_type].model_no, 16); 3864 memcpy(hw->model_desc, 3865 csio_t4_fcoe_adapters[adap_type].description, 3866 32); 3867 } else if (prot_type == CSIO_T5_FCOE_ASIC) { 3868 memcpy(hw->hw_ver, 3869 csio_t5_fcoe_adapters[adap_type].model_no, 16); 3870 memcpy(hw->model_desc, 3871 csio_t5_fcoe_adapters[adap_type].description, 3872 32); 3873 } else { 3874 char tempName[32] = "Chelsio FCoE Controller"; 3875 memcpy(hw->model_desc, tempName, 32); 3876 } 3877 } 3878 } /* csio_hw_set_description */ 3879 3880 /** 3881 * csio_hw_init - Initialize HW module. 3882 * @hw: Pointer to HW module. 3883 * 3884 * Initialize the members of the HW module. 3885 */ 3886 int 3887 csio_hw_init(struct csio_hw *hw) 3888 { 3889 int rv = -EINVAL; 3890 uint32_t i; 3891 uint16_t ven_id, dev_id; 3892 struct csio_evt_msg *evt_entry; 3893 3894 INIT_LIST_HEAD(&hw->sm.sm_list); 3895 csio_init_state(&hw->sm, csio_hws_uninit); 3896 spin_lock_init(&hw->lock); 3897 INIT_LIST_HEAD(&hw->sln_head); 3898 3899 /* Get the PCI vendor & device id */ 3900 csio_hw_get_device_id(hw); 3901 3902 strcpy(hw->name, CSIO_HW_NAME); 3903 3904 /* Initialize the HW chip ops with T4/T5 specific ops */ 3905 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops; 3906 3907 /* Set the model & its description */ 3908 3909 ven_id = hw->params.pci.vendor_id; 3910 dev_id = hw->params.pci.device_id; 3911 3912 csio_hw_set_description(hw, ven_id, dev_id); 3913 3914 /* Initialize default log level */ 3915 hw->params.log_level = (uint32_t) csio_dbg_level; 3916 3917 csio_set_fwevt_intr_idx(hw, -1); 3918 csio_set_nondata_intr_idx(hw, -1); 3919 3920 /* Init all the modules: Mailbox, WorkRequest and Transport */ 3921 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) 3922 goto err; 3923 3924 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); 3925 if (rv) 3926 goto err_mbm_exit; 3927 3928 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); 3929 if (rv) 3930 goto err_wrm_exit; 3931 3932 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); 3933 if (rv) 3934 goto err_scsim_exit; 3935 /* Pre-allocate evtq and initialize them */ 3936 INIT_LIST_HEAD(&hw->evt_active_q); 3937 INIT_LIST_HEAD(&hw->evt_free_q); 3938 for (i = 0; i < csio_evtq_sz; i++) { 3939 3940 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); 3941 if (!evt_entry) { 3942 csio_err(hw, "Failed to initialize eventq"); 3943 goto err_evtq_cleanup; 3944 } 3945 3946 list_add_tail(&evt_entry->list, &hw->evt_free_q); 3947 CSIO_INC_STATS(hw, n_evt_freeq); 3948 } 3949 3950 hw->dev_num = dev_num; 3951 dev_num++; 3952 3953 return 0; 3954 3955 err_evtq_cleanup: 3956 csio_evtq_cleanup(hw); 3957 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 3958 err_scsim_exit: 3959 csio_scsim_exit(csio_hw_to_scsim(hw)); 3960 err_wrm_exit: 3961 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 3962 err_mbm_exit: 3963 csio_mbm_exit(csio_hw_to_mbm(hw)); 3964 err: 3965 return rv; 3966 } 3967 3968 /** 3969 * csio_hw_exit - Un-initialize HW module. 3970 * @hw: Pointer to HW module. 3971 * 3972 */ 3973 void 3974 csio_hw_exit(struct csio_hw *hw) 3975 { 3976 csio_evtq_cleanup(hw); 3977 csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); 3978 csio_scsim_exit(csio_hw_to_scsim(hw)); 3979 csio_wrm_exit(csio_hw_to_wrm(hw), hw); 3980 csio_mbm_exit(csio_hw_to_mbm(hw)); 3981 } 3982