1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 8 /* 9 * Table for showing the current message id in use for particular level 10 * Change this table for addition of log/debug messages. 11 * ---------------------------------------------------------------------- 12 * | Level | Last Value Used | Holes | 13 * ---------------------------------------------------------------------- 14 * | Module Init and Probe | 0x0116 | 0xfa | 15 * | Mailbox commands | 0x112b | | 16 * | Device Discovery | 0x2084 | | 17 * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, | 18 * | | | 0x302e | 19 * | DPC Thread | 0x401c | | 20 * | Async Events | 0x5057 | 0x5052 | 21 * | Timer Routines | 0x6011 | 0x600e,0x600f | 22 * | User Space Interactions | 0x709e | | 23 * | Task Management | 0x803c | 0x8025-0x8026 | 24 * | | | 0x800b,0x8039 | 25 * | AER/EEH | 0x900f | | 26 * | Virtual Port | 0xa007 | | 27 * | ISP82XX Specific | 0xb052 | | 28 * | MultiQ | 0xc00b | | 29 * | Misc | 0xd00b | | 30 * ---------------------------------------------------------------------- 31 */ 32 33 #include "qla_def.h" 34 35 #include <linux/delay.h> 36 37 static uint32_t ql_dbg_offset = 0x800; 38 39 static inline void 40 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 41 { 42 fw_dump->fw_major_version = htonl(ha->fw_major_version); 43 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 44 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); 45 fw_dump->fw_attributes = htonl(ha->fw_attributes); 46 47 fw_dump->vendor = htonl(ha->pdev->vendor); 48 fw_dump->device = htonl(ha->pdev->device); 49 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); 50 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); 51 } 52 53 static inline void * 54 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) 55 { 56 struct req_que *req = ha->req_q_map[0]; 57 struct rsp_que *rsp = ha->rsp_q_map[0]; 58 /* Request queue. */ 59 memcpy(ptr, req->ring, req->length * 60 sizeof(request_t)); 61 62 /* Response queue. */ 63 ptr += req->length * sizeof(request_t); 64 memcpy(ptr, rsp->ring, rsp->length * 65 sizeof(response_t)); 66 67 return ptr + (rsp->length * sizeof(response_t)); 68 } 69 70 static int 71 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 72 uint32_t ram_dwords, void **nxt) 73 { 74 int rval; 75 uint32_t cnt, stat, timer, dwords, idx; 76 uint16_t mb0; 77 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 78 dma_addr_t dump_dma = ha->gid_list_dma; 79 uint32_t *dump = (uint32_t *)ha->gid_list; 80 81 rval = QLA_SUCCESS; 82 mb0 = 0; 83 84 WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); 85 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 86 87 dwords = GID_LIST_SIZE / 4; 88 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; 89 cnt += dwords, addr += dwords) { 90 if (cnt + dwords > ram_dwords) 91 dwords = ram_dwords - cnt; 92 93 WRT_REG_WORD(®->mailbox1, LSW(addr)); 94 WRT_REG_WORD(®->mailbox8, MSW(addr)); 95 96 WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); 97 WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); 98 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); 99 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); 100 101 WRT_REG_WORD(®->mailbox4, MSW(dwords)); 102 WRT_REG_WORD(®->mailbox5, LSW(dwords)); 103 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 104 105 for (timer = 6000000; timer; timer--) { 106 /* Check for pending interrupts. */ 107 stat = RD_REG_DWORD(®->host_status); 108 if (stat & HSRX_RISC_INT) { 109 stat &= 0xff; 110 111 if (stat == 0x1 || stat == 0x2 || 112 stat == 0x10 || stat == 0x11) { 113 set_bit(MBX_INTERRUPT, 114 &ha->mbx_cmd_flags); 115 116 mb0 = RD_REG_WORD(®->mailbox0); 117 118 WRT_REG_DWORD(®->hccr, 119 HCCRX_CLR_RISC_INT); 120 RD_REG_DWORD(®->hccr); 121 break; 122 } 123 124 /* Clear this intr; it wasn't a mailbox intr */ 125 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 126 RD_REG_DWORD(®->hccr); 127 } 128 udelay(5); 129 } 130 131 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 132 rval = mb0 & MBS_MASK; 133 for (idx = 0; idx < dwords; idx++) 134 ram[cnt + idx] = swab32(dump[idx]); 135 } else { 136 rval = QLA_FUNCTION_FAILED; 137 } 138 } 139 140 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; 141 return rval; 142 } 143 144 static int 145 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, 146 uint32_t cram_size, void **nxt) 147 { 148 int rval; 149 150 /* Code RAM. */ 151 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); 152 if (rval != QLA_SUCCESS) 153 return rval; 154 155 /* External Memory. */ 156 return qla24xx_dump_ram(ha, 0x100000, *nxt, 157 ha->fw_memory_size - 0x100000 + 1, nxt); 158 } 159 160 static uint32_t * 161 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 162 uint32_t count, uint32_t *buf) 163 { 164 uint32_t __iomem *dmp_reg; 165 166 WRT_REG_DWORD(®->iobase_addr, iobase); 167 dmp_reg = ®->iobase_window; 168 while (count--) 169 *buf++ = htonl(RD_REG_DWORD(dmp_reg++)); 170 171 return buf; 172 } 173 174 static inline int 175 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) 176 { 177 int rval = QLA_SUCCESS; 178 uint32_t cnt; 179 180 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); 181 for (cnt = 30000; 182 ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && 183 rval == QLA_SUCCESS; cnt--) { 184 if (cnt) 185 udelay(100); 186 else 187 rval = QLA_FUNCTION_TIMEOUT; 188 } 189 190 return rval; 191 } 192 193 static int 194 qla24xx_soft_reset(struct qla_hw_data *ha) 195 { 196 int rval = QLA_SUCCESS; 197 uint32_t cnt; 198 uint16_t mb0, wd; 199 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 200 201 /* Reset RISC. */ 202 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 203 for (cnt = 0; cnt < 30000; cnt++) { 204 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 205 break; 206 207 udelay(10); 208 } 209 210 WRT_REG_DWORD(®->ctrl_status, 211 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 212 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 213 214 udelay(100); 215 /* Wait for firmware to complete NVRAM accesses. */ 216 mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); 217 for (cnt = 10000 ; cnt && mb0; cnt--) { 218 udelay(5); 219 mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); 220 barrier(); 221 } 222 223 /* Wait for soft-reset to complete. */ 224 for (cnt = 0; cnt < 30000; cnt++) { 225 if ((RD_REG_DWORD(®->ctrl_status) & 226 CSRX_ISP_SOFT_RESET) == 0) 227 break; 228 229 udelay(10); 230 } 231 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 232 RD_REG_DWORD(®->hccr); /* PCI Posting. */ 233 234 for (cnt = 30000; RD_REG_WORD(®->mailbox0) != 0 && 235 rval == QLA_SUCCESS; cnt--) { 236 if (cnt) 237 udelay(100); 238 else 239 rval = QLA_FUNCTION_TIMEOUT; 240 } 241 242 return rval; 243 } 244 245 static int 246 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, 247 uint32_t ram_words, void **nxt) 248 { 249 int rval; 250 uint32_t cnt, stat, timer, words, idx; 251 uint16_t mb0; 252 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 253 dma_addr_t dump_dma = ha->gid_list_dma; 254 uint16_t *dump = (uint16_t *)ha->gid_list; 255 256 rval = QLA_SUCCESS; 257 mb0 = 0; 258 259 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); 260 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 261 262 words = GID_LIST_SIZE / 2; 263 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; 264 cnt += words, addr += words) { 265 if (cnt + words > ram_words) 266 words = ram_words - cnt; 267 268 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); 269 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); 270 271 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); 272 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); 273 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); 274 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); 275 276 WRT_MAILBOX_REG(ha, reg, 4, words); 277 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 278 279 for (timer = 6000000; timer; timer--) { 280 /* Check for pending interrupts. */ 281 stat = RD_REG_DWORD(®->u.isp2300.host_status); 282 if (stat & HSR_RISC_INT) { 283 stat &= 0xff; 284 285 if (stat == 0x1 || stat == 0x2) { 286 set_bit(MBX_INTERRUPT, 287 &ha->mbx_cmd_flags); 288 289 mb0 = RD_MAILBOX_REG(ha, reg, 0); 290 291 /* Release mailbox registers. */ 292 WRT_REG_WORD(®->semaphore, 0); 293 WRT_REG_WORD(®->hccr, 294 HCCR_CLR_RISC_INT); 295 RD_REG_WORD(®->hccr); 296 break; 297 } else if (stat == 0x10 || stat == 0x11) { 298 set_bit(MBX_INTERRUPT, 299 &ha->mbx_cmd_flags); 300 301 mb0 = RD_MAILBOX_REG(ha, reg, 0); 302 303 WRT_REG_WORD(®->hccr, 304 HCCR_CLR_RISC_INT); 305 RD_REG_WORD(®->hccr); 306 break; 307 } 308 309 /* clear this intr; it wasn't a mailbox intr */ 310 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 311 RD_REG_WORD(®->hccr); 312 } 313 udelay(5); 314 } 315 316 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 317 rval = mb0 & MBS_MASK; 318 for (idx = 0; idx < words; idx++) 319 ram[cnt + idx] = swab16(dump[idx]); 320 } else { 321 rval = QLA_FUNCTION_FAILED; 322 } 323 } 324 325 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; 326 return rval; 327 } 328 329 static inline void 330 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 331 uint16_t *buf) 332 { 333 uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; 334 335 while (count--) 336 *buf++ = htons(RD_REG_WORD(dmp_reg++)); 337 } 338 339 static inline void * 340 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) 341 { 342 if (!ha->eft) 343 return ptr; 344 345 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); 346 return ptr + ntohl(ha->fw_dump->eft_size); 347 } 348 349 static inline void * 350 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 351 { 352 uint32_t cnt; 353 uint32_t *iter_reg; 354 struct qla2xxx_fce_chain *fcec = ptr; 355 356 if (!ha->fce) 357 return ptr; 358 359 *last_chain = &fcec->type; 360 fcec->type = __constant_htonl(DUMP_CHAIN_FCE); 361 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 362 fce_calc_size(ha->fce_bufs)); 363 fcec->size = htonl(fce_calc_size(ha->fce_bufs)); 364 fcec->addr_l = htonl(LSD(ha->fce_dma)); 365 fcec->addr_h = htonl(MSD(ha->fce_dma)); 366 367 iter_reg = fcec->eregs; 368 for (cnt = 0; cnt < 8; cnt++) 369 *iter_reg++ = htonl(ha->fce_mb[cnt]); 370 371 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 372 373 return (char *)iter_reg + ntohl(fcec->size); 374 } 375 376 static inline void * 377 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 378 { 379 uint32_t cnt, que_idx; 380 uint8_t que_cnt; 381 struct qla2xxx_mq_chain *mq = ptr; 382 struct device_reg_25xxmq __iomem *reg; 383 384 if (!ha->mqenable) 385 return ptr; 386 387 mq = ptr; 388 *last_chain = &mq->type; 389 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 390 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 391 392 que_cnt = ha->max_req_queues > ha->max_rsp_queues ? 393 ha->max_req_queues : ha->max_rsp_queues; 394 mq->count = htonl(que_cnt); 395 for (cnt = 0; cnt < que_cnt; cnt++) { 396 reg = (struct device_reg_25xxmq *) ((void *) 397 ha->mqiobase + cnt * QLA_QUE_PAGE); 398 que_idx = cnt * 4; 399 mq->qregs[que_idx] = htonl(RD_REG_DWORD(®->req_q_in)); 400 mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(®->req_q_out)); 401 mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(®->rsp_q_in)); 402 mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(®->rsp_q_out)); 403 } 404 405 return ptr + sizeof(struct qla2xxx_mq_chain); 406 } 407 408 void 409 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 410 { 411 struct qla_hw_data *ha = vha->hw; 412 413 if (rval != QLA_SUCCESS) { 414 ql_log(ql_log_warn, vha, 0xd000, 415 "Failed to dump firmware (%x).\n", rval); 416 ha->fw_dumped = 0; 417 } else { 418 ql_log(ql_log_info, vha, 0xd001, 419 "Firmware dump saved to temp buffer (%ld/%p).\n", 420 vha->host_no, ha->fw_dump); 421 ha->fw_dumped = 1; 422 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 423 } 424 } 425 426 /** 427 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 428 * @ha: HA context 429 * @hardware_locked: Called with the hardware_lock 430 */ 431 void 432 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 433 { 434 int rval; 435 uint32_t cnt; 436 struct qla_hw_data *ha = vha->hw; 437 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 438 uint16_t __iomem *dmp_reg; 439 unsigned long flags; 440 struct qla2300_fw_dump *fw; 441 void *nxt; 442 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 443 444 flags = 0; 445 446 if (!hardware_locked) 447 spin_lock_irqsave(&ha->hardware_lock, flags); 448 449 if (!ha->fw_dump) { 450 ql_log(ql_log_warn, vha, 0xd002, 451 "No buffer available for dump.\n"); 452 goto qla2300_fw_dump_failed; 453 } 454 455 if (ha->fw_dumped) { 456 ql_log(ql_log_warn, vha, 0xd003, 457 "Firmware has been previously dumped (%p) " 458 "-- ignoring request.\n", 459 ha->fw_dump); 460 goto qla2300_fw_dump_failed; 461 } 462 fw = &ha->fw_dump->isp.isp23; 463 qla2xxx_prep_dump(ha, ha->fw_dump); 464 465 rval = QLA_SUCCESS; 466 fw->hccr = htons(RD_REG_WORD(®->hccr)); 467 468 /* Pause RISC. */ 469 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 470 if (IS_QLA2300(ha)) { 471 for (cnt = 30000; 472 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 473 rval == QLA_SUCCESS; cnt--) { 474 if (cnt) 475 udelay(100); 476 else 477 rval = QLA_FUNCTION_TIMEOUT; 478 } 479 } else { 480 RD_REG_WORD(®->hccr); /* PCI Posting. */ 481 udelay(10); 482 } 483 484 if (rval == QLA_SUCCESS) { 485 dmp_reg = ®->flash_address; 486 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 487 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 488 489 dmp_reg = ®->u.isp2300.req_q_in; 490 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) 491 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 492 493 dmp_reg = ®->u.isp2300.mailbox0; 494 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 495 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 496 497 WRT_REG_WORD(®->ctrl_status, 0x40); 498 qla2xxx_read_window(reg, 32, fw->resp_dma_reg); 499 500 WRT_REG_WORD(®->ctrl_status, 0x50); 501 qla2xxx_read_window(reg, 48, fw->dma_reg); 502 503 WRT_REG_WORD(®->ctrl_status, 0x00); 504 dmp_reg = ®->risc_hw; 505 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 506 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 507 508 WRT_REG_WORD(®->pcr, 0x2000); 509 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 510 511 WRT_REG_WORD(®->pcr, 0x2200); 512 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 513 514 WRT_REG_WORD(®->pcr, 0x2400); 515 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 516 517 WRT_REG_WORD(®->pcr, 0x2600); 518 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 519 520 WRT_REG_WORD(®->pcr, 0x2800); 521 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 522 523 WRT_REG_WORD(®->pcr, 0x2A00); 524 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 525 526 WRT_REG_WORD(®->pcr, 0x2C00); 527 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 528 529 WRT_REG_WORD(®->pcr, 0x2E00); 530 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 531 532 WRT_REG_WORD(®->ctrl_status, 0x10); 533 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); 534 535 WRT_REG_WORD(®->ctrl_status, 0x20); 536 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 537 538 WRT_REG_WORD(®->ctrl_status, 0x30); 539 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 540 541 /* Reset RISC. */ 542 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 543 for (cnt = 0; cnt < 30000; cnt++) { 544 if ((RD_REG_WORD(®->ctrl_status) & 545 CSR_ISP_SOFT_RESET) == 0) 546 break; 547 548 udelay(10); 549 } 550 } 551 552 if (!IS_QLA2300(ha)) { 553 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 554 rval == QLA_SUCCESS; cnt--) { 555 if (cnt) 556 udelay(100); 557 else 558 rval = QLA_FUNCTION_TIMEOUT; 559 } 560 } 561 562 /* Get RISC SRAM. */ 563 if (rval == QLA_SUCCESS) 564 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, 565 sizeof(fw->risc_ram) / 2, &nxt); 566 567 /* Get stack SRAM. */ 568 if (rval == QLA_SUCCESS) 569 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, 570 sizeof(fw->stack_ram) / 2, &nxt); 571 572 /* Get data SRAM. */ 573 if (rval == QLA_SUCCESS) 574 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, 575 ha->fw_memory_size - 0x11000 + 1, &nxt); 576 577 if (rval == QLA_SUCCESS) 578 qla2xxx_copy_queues(ha, nxt); 579 580 qla2xxx_dump_post_process(base_vha, rval); 581 582 qla2300_fw_dump_failed: 583 if (!hardware_locked) 584 spin_unlock_irqrestore(&ha->hardware_lock, flags); 585 } 586 587 /** 588 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 589 * @ha: HA context 590 * @hardware_locked: Called with the hardware_lock 591 */ 592 void 593 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 594 { 595 int rval; 596 uint32_t cnt, timer; 597 uint16_t risc_address; 598 uint16_t mb0, mb2; 599 struct qla_hw_data *ha = vha->hw; 600 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 601 uint16_t __iomem *dmp_reg; 602 unsigned long flags; 603 struct qla2100_fw_dump *fw; 604 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 605 606 risc_address = 0; 607 mb0 = mb2 = 0; 608 flags = 0; 609 610 if (!hardware_locked) 611 spin_lock_irqsave(&ha->hardware_lock, flags); 612 613 if (!ha->fw_dump) { 614 ql_log(ql_log_warn, vha, 0xd004, 615 "No buffer available for dump.\n"); 616 goto qla2100_fw_dump_failed; 617 } 618 619 if (ha->fw_dumped) { 620 ql_log(ql_log_warn, vha, 0xd005, 621 "Firmware has been previously dumped (%p) " 622 "-- ignoring request.\n", 623 ha->fw_dump); 624 goto qla2100_fw_dump_failed; 625 } 626 fw = &ha->fw_dump->isp.isp21; 627 qla2xxx_prep_dump(ha, ha->fw_dump); 628 629 rval = QLA_SUCCESS; 630 fw->hccr = htons(RD_REG_WORD(®->hccr)); 631 632 /* Pause RISC. */ 633 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 634 for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 635 rval == QLA_SUCCESS; cnt--) { 636 if (cnt) 637 udelay(100); 638 else 639 rval = QLA_FUNCTION_TIMEOUT; 640 } 641 if (rval == QLA_SUCCESS) { 642 dmp_reg = ®->flash_address; 643 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 644 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 645 646 dmp_reg = ®->u.isp2100.mailbox0; 647 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 648 if (cnt == 8) 649 dmp_reg = ®->u_end.isp2200.mailbox8; 650 651 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 652 } 653 654 dmp_reg = ®->u.isp2100.unused_2[0]; 655 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 656 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 657 658 WRT_REG_WORD(®->ctrl_status, 0x00); 659 dmp_reg = ®->risc_hw; 660 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 661 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 662 663 WRT_REG_WORD(®->pcr, 0x2000); 664 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 665 666 WRT_REG_WORD(®->pcr, 0x2100); 667 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 668 669 WRT_REG_WORD(®->pcr, 0x2200); 670 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 671 672 WRT_REG_WORD(®->pcr, 0x2300); 673 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 674 675 WRT_REG_WORD(®->pcr, 0x2400); 676 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 677 678 WRT_REG_WORD(®->pcr, 0x2500); 679 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 680 681 WRT_REG_WORD(®->pcr, 0x2600); 682 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 683 684 WRT_REG_WORD(®->pcr, 0x2700); 685 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 686 687 WRT_REG_WORD(®->ctrl_status, 0x10); 688 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); 689 690 WRT_REG_WORD(®->ctrl_status, 0x20); 691 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 692 693 WRT_REG_WORD(®->ctrl_status, 0x30); 694 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 695 696 /* Reset the ISP. */ 697 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 698 } 699 700 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 701 rval == QLA_SUCCESS; cnt--) { 702 if (cnt) 703 udelay(100); 704 else 705 rval = QLA_FUNCTION_TIMEOUT; 706 } 707 708 /* Pause RISC. */ 709 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && 710 (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { 711 712 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 713 for (cnt = 30000; 714 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 715 rval == QLA_SUCCESS; cnt--) { 716 if (cnt) 717 udelay(100); 718 else 719 rval = QLA_FUNCTION_TIMEOUT; 720 } 721 if (rval == QLA_SUCCESS) { 722 /* Set memory configuration and timing. */ 723 if (IS_QLA2100(ha)) 724 WRT_REG_WORD(®->mctr, 0xf1); 725 else 726 WRT_REG_WORD(®->mctr, 0xf2); 727 RD_REG_WORD(®->mctr); /* PCI Posting. */ 728 729 /* Release RISC. */ 730 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 731 } 732 } 733 734 if (rval == QLA_SUCCESS) { 735 /* Get RISC SRAM. */ 736 risc_address = 0x1000; 737 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 738 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 739 } 740 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; 741 cnt++, risc_address++) { 742 WRT_MAILBOX_REG(ha, reg, 1, risc_address); 743 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 744 745 for (timer = 6000000; timer != 0; timer--) { 746 /* Check for pending interrupts. */ 747 if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { 748 if (RD_REG_WORD(®->semaphore) & BIT_0) { 749 set_bit(MBX_INTERRUPT, 750 &ha->mbx_cmd_flags); 751 752 mb0 = RD_MAILBOX_REG(ha, reg, 0); 753 mb2 = RD_MAILBOX_REG(ha, reg, 2); 754 755 WRT_REG_WORD(®->semaphore, 0); 756 WRT_REG_WORD(®->hccr, 757 HCCR_CLR_RISC_INT); 758 RD_REG_WORD(®->hccr); 759 break; 760 } 761 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 762 RD_REG_WORD(®->hccr); 763 } 764 udelay(5); 765 } 766 767 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 768 rval = mb0 & MBS_MASK; 769 fw->risc_ram[cnt] = htons(mb2); 770 } else { 771 rval = QLA_FUNCTION_FAILED; 772 } 773 } 774 775 if (rval == QLA_SUCCESS) 776 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); 777 778 qla2xxx_dump_post_process(base_vha, rval); 779 780 qla2100_fw_dump_failed: 781 if (!hardware_locked) 782 spin_unlock_irqrestore(&ha->hardware_lock, flags); 783 } 784 785 void 786 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 787 { 788 int rval; 789 uint32_t cnt; 790 uint32_t risc_address; 791 struct qla_hw_data *ha = vha->hw; 792 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 793 uint32_t __iomem *dmp_reg; 794 uint32_t *iter_reg; 795 uint16_t __iomem *mbx_reg; 796 unsigned long flags; 797 struct qla24xx_fw_dump *fw; 798 uint32_t ext_mem_cnt; 799 void *nxt; 800 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 801 802 if (IS_QLA82XX(ha)) 803 return; 804 805 risc_address = ext_mem_cnt = 0; 806 flags = 0; 807 808 if (!hardware_locked) 809 spin_lock_irqsave(&ha->hardware_lock, flags); 810 811 if (!ha->fw_dump) { 812 ql_log(ql_log_warn, vha, 0xd006, 813 "No buffer available for dump.\n"); 814 goto qla24xx_fw_dump_failed; 815 } 816 817 if (ha->fw_dumped) { 818 ql_log(ql_log_warn, vha, 0xd007, 819 "Firmware has been previously dumped (%p) " 820 "-- ignoring request.\n", 821 ha->fw_dump); 822 goto qla24xx_fw_dump_failed; 823 } 824 fw = &ha->fw_dump->isp.isp24; 825 qla2xxx_prep_dump(ha, ha->fw_dump); 826 827 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 828 829 /* Pause RISC. */ 830 rval = qla24xx_pause_risc(reg); 831 if (rval != QLA_SUCCESS) 832 goto qla24xx_fw_dump_failed_0; 833 834 /* Host interface registers. */ 835 dmp_reg = ®->flash_addr; 836 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 837 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 838 839 /* Disable interrupts. */ 840 WRT_REG_DWORD(®->ictrl, 0); 841 RD_REG_DWORD(®->ictrl); 842 843 /* Shadow registers. */ 844 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 845 RD_REG_DWORD(®->iobase_addr); 846 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 847 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 848 849 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 850 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 851 852 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 853 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 854 855 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 856 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 857 858 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 859 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 860 861 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 862 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 863 864 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 865 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 866 867 /* Mailbox registers. */ 868 mbx_reg = ®->mailbox0; 869 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 870 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 871 872 /* Transfer sequence registers. */ 873 iter_reg = fw->xseq_gp_reg; 874 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 875 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 876 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 877 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 878 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 879 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 880 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 881 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 882 883 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); 884 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 885 886 /* Receive sequence registers. */ 887 iter_reg = fw->rseq_gp_reg; 888 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 889 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 890 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 891 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 892 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 893 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 894 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 895 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 896 897 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); 898 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 899 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 900 901 /* Command DMA registers. */ 902 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 903 904 /* Queues. */ 905 iter_reg = fw->req0_dma_reg; 906 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 907 dmp_reg = ®->iobase_q; 908 for (cnt = 0; cnt < 7; cnt++) 909 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 910 911 iter_reg = fw->resp0_dma_reg; 912 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 913 dmp_reg = ®->iobase_q; 914 for (cnt = 0; cnt < 7; cnt++) 915 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 916 917 iter_reg = fw->req1_dma_reg; 918 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 919 dmp_reg = ®->iobase_q; 920 for (cnt = 0; cnt < 7; cnt++) 921 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 922 923 /* Transmit DMA registers. */ 924 iter_reg = fw->xmt0_dma_reg; 925 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 926 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 927 928 iter_reg = fw->xmt1_dma_reg; 929 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 930 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 931 932 iter_reg = fw->xmt2_dma_reg; 933 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 934 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 935 936 iter_reg = fw->xmt3_dma_reg; 937 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 938 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 939 940 iter_reg = fw->xmt4_dma_reg; 941 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 942 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 943 944 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 945 946 /* Receive DMA registers. */ 947 iter_reg = fw->rcvt0_data_dma_reg; 948 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 949 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 950 951 iter_reg = fw->rcvt1_data_dma_reg; 952 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 953 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 954 955 /* RISC registers. */ 956 iter_reg = fw->risc_gp_reg; 957 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 958 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 959 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 960 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 961 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 962 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 963 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 964 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 965 966 /* Local memory controller registers. */ 967 iter_reg = fw->lmc_reg; 968 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 969 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 970 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 971 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 972 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 973 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 974 qla24xx_read_window(reg, 0x3060, 16, iter_reg); 975 976 /* Fibre Protocol Module registers. */ 977 iter_reg = fw->fpm_hdw_reg; 978 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 979 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 980 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 981 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 982 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 983 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 984 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 985 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 986 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 987 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 988 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 989 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 990 991 /* Frame Buffer registers. */ 992 iter_reg = fw->fb_hdw_reg; 993 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 994 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 995 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 996 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 997 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 998 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 999 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1000 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1001 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1002 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1003 qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1004 1005 rval = qla24xx_soft_reset(ha); 1006 if (rval != QLA_SUCCESS) 1007 goto qla24xx_fw_dump_failed_0; 1008 1009 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1010 &nxt); 1011 if (rval != QLA_SUCCESS) 1012 goto qla24xx_fw_dump_failed_0; 1013 1014 nxt = qla2xxx_copy_queues(ha, nxt); 1015 1016 qla24xx_copy_eft(ha, nxt); 1017 1018 qla24xx_fw_dump_failed_0: 1019 qla2xxx_dump_post_process(base_vha, rval); 1020 1021 qla24xx_fw_dump_failed: 1022 if (!hardware_locked) 1023 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1024 } 1025 1026 void 1027 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1028 { 1029 int rval; 1030 uint32_t cnt; 1031 uint32_t risc_address; 1032 struct qla_hw_data *ha = vha->hw; 1033 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1034 uint32_t __iomem *dmp_reg; 1035 uint32_t *iter_reg; 1036 uint16_t __iomem *mbx_reg; 1037 unsigned long flags; 1038 struct qla25xx_fw_dump *fw; 1039 uint32_t ext_mem_cnt; 1040 void *nxt, *nxt_chain; 1041 uint32_t *last_chain = NULL; 1042 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1043 1044 risc_address = ext_mem_cnt = 0; 1045 flags = 0; 1046 1047 if (!hardware_locked) 1048 spin_lock_irqsave(&ha->hardware_lock, flags); 1049 1050 if (!ha->fw_dump) { 1051 ql_log(ql_log_warn, vha, 0xd008, 1052 "No buffer available for dump.\n"); 1053 goto qla25xx_fw_dump_failed; 1054 } 1055 1056 if (ha->fw_dumped) { 1057 ql_log(ql_log_warn, vha, 0xd009, 1058 "Firmware has been previously dumped (%p) " 1059 "-- ignoring request.\n", 1060 ha->fw_dump); 1061 goto qla25xx_fw_dump_failed; 1062 } 1063 fw = &ha->fw_dump->isp.isp25; 1064 qla2xxx_prep_dump(ha, ha->fw_dump); 1065 ha->fw_dump->version = __constant_htonl(2); 1066 1067 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1068 1069 /* Pause RISC. */ 1070 rval = qla24xx_pause_risc(reg); 1071 if (rval != QLA_SUCCESS) 1072 goto qla25xx_fw_dump_failed_0; 1073 1074 /* Host/Risc registers. */ 1075 iter_reg = fw->host_risc_reg; 1076 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1077 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1078 1079 /* PCIe registers. */ 1080 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1081 RD_REG_DWORD(®->iobase_addr); 1082 WRT_REG_DWORD(®->iobase_window, 0x01); 1083 dmp_reg = ®->iobase_c4; 1084 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); 1085 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1086 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1087 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1088 1089 WRT_REG_DWORD(®->iobase_window, 0x00); 1090 RD_REG_DWORD(®->iobase_window); 1091 1092 /* Host interface registers. */ 1093 dmp_reg = ®->flash_addr; 1094 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 1095 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 1096 1097 /* Disable interrupts. */ 1098 WRT_REG_DWORD(®->ictrl, 0); 1099 RD_REG_DWORD(®->ictrl); 1100 1101 /* Shadow registers. */ 1102 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1103 RD_REG_DWORD(®->iobase_addr); 1104 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1105 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1106 1107 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1108 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1109 1110 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1111 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1112 1113 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1114 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1115 1116 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1117 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1118 1119 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1120 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1121 1122 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1123 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1124 1125 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1126 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1127 1128 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1129 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1130 1131 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1132 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1133 1134 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1135 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1136 1137 /* RISC I/O register. */ 1138 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1139 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1140 1141 /* Mailbox registers. */ 1142 mbx_reg = ®->mailbox0; 1143 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 1144 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 1145 1146 /* Transfer sequence registers. */ 1147 iter_reg = fw->xseq_gp_reg; 1148 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1149 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1150 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1151 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1152 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1153 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1154 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1155 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1156 1157 iter_reg = fw->xseq_0_reg; 1158 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1159 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1160 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1161 1162 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1163 1164 /* Receive sequence registers. */ 1165 iter_reg = fw->rseq_gp_reg; 1166 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1167 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1168 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1169 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1170 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1171 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1172 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1173 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1174 1175 iter_reg = fw->rseq_0_reg; 1176 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1177 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1178 1179 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1180 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1181 1182 /* Auxiliary sequence registers. */ 1183 iter_reg = fw->aseq_gp_reg; 1184 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1185 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1186 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1187 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1188 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1189 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1190 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1191 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1192 1193 iter_reg = fw->aseq_0_reg; 1194 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1195 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1196 1197 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1198 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1199 1200 /* Command DMA registers. */ 1201 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1202 1203 /* Queues. */ 1204 iter_reg = fw->req0_dma_reg; 1205 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1206 dmp_reg = ®->iobase_q; 1207 for (cnt = 0; cnt < 7; cnt++) 1208 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1209 1210 iter_reg = fw->resp0_dma_reg; 1211 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1212 dmp_reg = ®->iobase_q; 1213 for (cnt = 0; cnt < 7; cnt++) 1214 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1215 1216 iter_reg = fw->req1_dma_reg; 1217 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1218 dmp_reg = ®->iobase_q; 1219 for (cnt = 0; cnt < 7; cnt++) 1220 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1221 1222 /* Transmit DMA registers. */ 1223 iter_reg = fw->xmt0_dma_reg; 1224 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1225 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1226 1227 iter_reg = fw->xmt1_dma_reg; 1228 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1229 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1230 1231 iter_reg = fw->xmt2_dma_reg; 1232 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1233 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1234 1235 iter_reg = fw->xmt3_dma_reg; 1236 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1237 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1238 1239 iter_reg = fw->xmt4_dma_reg; 1240 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1241 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1242 1243 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1244 1245 /* Receive DMA registers. */ 1246 iter_reg = fw->rcvt0_data_dma_reg; 1247 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1248 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1249 1250 iter_reg = fw->rcvt1_data_dma_reg; 1251 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1252 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1253 1254 /* RISC registers. */ 1255 iter_reg = fw->risc_gp_reg; 1256 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1257 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1258 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1259 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1260 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1261 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1262 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1263 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1264 1265 /* Local memory controller registers. */ 1266 iter_reg = fw->lmc_reg; 1267 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1268 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1269 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1270 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1271 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1272 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1273 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1274 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1275 1276 /* Fibre Protocol Module registers. */ 1277 iter_reg = fw->fpm_hdw_reg; 1278 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1279 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1280 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1281 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1282 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1283 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1284 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1285 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1286 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1287 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1288 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1289 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1290 1291 /* Frame Buffer registers. */ 1292 iter_reg = fw->fb_hdw_reg; 1293 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1294 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1295 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1296 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1297 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1298 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1299 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1300 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1301 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1302 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1303 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1304 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1305 1306 /* Multi queue registers */ 1307 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1308 &last_chain); 1309 1310 rval = qla24xx_soft_reset(ha); 1311 if (rval != QLA_SUCCESS) 1312 goto qla25xx_fw_dump_failed_0; 1313 1314 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1315 &nxt); 1316 if (rval != QLA_SUCCESS) 1317 goto qla25xx_fw_dump_failed_0; 1318 1319 nxt = qla2xxx_copy_queues(ha, nxt); 1320 1321 nxt = qla24xx_copy_eft(ha, nxt); 1322 1323 /* Chain entries -- started with MQ. */ 1324 qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1325 if (last_chain) { 1326 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1327 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1328 } 1329 1330 qla25xx_fw_dump_failed_0: 1331 qla2xxx_dump_post_process(base_vha, rval); 1332 1333 qla25xx_fw_dump_failed: 1334 if (!hardware_locked) 1335 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1336 } 1337 1338 void 1339 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1340 { 1341 int rval; 1342 uint32_t cnt; 1343 uint32_t risc_address; 1344 struct qla_hw_data *ha = vha->hw; 1345 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1346 uint32_t __iomem *dmp_reg; 1347 uint32_t *iter_reg; 1348 uint16_t __iomem *mbx_reg; 1349 unsigned long flags; 1350 struct qla81xx_fw_dump *fw; 1351 uint32_t ext_mem_cnt; 1352 void *nxt, *nxt_chain; 1353 uint32_t *last_chain = NULL; 1354 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1355 1356 risc_address = ext_mem_cnt = 0; 1357 flags = 0; 1358 1359 if (!hardware_locked) 1360 spin_lock_irqsave(&ha->hardware_lock, flags); 1361 1362 if (!ha->fw_dump) { 1363 ql_log(ql_log_warn, vha, 0xd00a, 1364 "No buffer available for dump.\n"); 1365 goto qla81xx_fw_dump_failed; 1366 } 1367 1368 if (ha->fw_dumped) { 1369 ql_log(ql_log_warn, vha, 0xd00b, 1370 "Firmware has been previously dumped (%p) " 1371 "-- ignoring request.\n", 1372 ha->fw_dump); 1373 goto qla81xx_fw_dump_failed; 1374 } 1375 fw = &ha->fw_dump->isp.isp81; 1376 qla2xxx_prep_dump(ha, ha->fw_dump); 1377 1378 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1379 1380 /* Pause RISC. */ 1381 rval = qla24xx_pause_risc(reg); 1382 if (rval != QLA_SUCCESS) 1383 goto qla81xx_fw_dump_failed_0; 1384 1385 /* Host/Risc registers. */ 1386 iter_reg = fw->host_risc_reg; 1387 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1388 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1389 1390 /* PCIe registers. */ 1391 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1392 RD_REG_DWORD(®->iobase_addr); 1393 WRT_REG_DWORD(®->iobase_window, 0x01); 1394 dmp_reg = ®->iobase_c4; 1395 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); 1396 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1397 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1398 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1399 1400 WRT_REG_DWORD(®->iobase_window, 0x00); 1401 RD_REG_DWORD(®->iobase_window); 1402 1403 /* Host interface registers. */ 1404 dmp_reg = ®->flash_addr; 1405 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 1406 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 1407 1408 /* Disable interrupts. */ 1409 WRT_REG_DWORD(®->ictrl, 0); 1410 RD_REG_DWORD(®->ictrl); 1411 1412 /* Shadow registers. */ 1413 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1414 RD_REG_DWORD(®->iobase_addr); 1415 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1416 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1417 1418 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1419 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1420 1421 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1422 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1423 1424 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1425 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1426 1427 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1428 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1429 1430 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1431 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1432 1433 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1434 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1435 1436 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1437 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1438 1439 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1440 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1441 1442 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1443 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1444 1445 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1446 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1447 1448 /* RISC I/O register. */ 1449 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1450 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1451 1452 /* Mailbox registers. */ 1453 mbx_reg = ®->mailbox0; 1454 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 1455 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 1456 1457 /* Transfer sequence registers. */ 1458 iter_reg = fw->xseq_gp_reg; 1459 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1460 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1461 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1462 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1463 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1464 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1465 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1466 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1467 1468 iter_reg = fw->xseq_0_reg; 1469 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1470 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1471 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1472 1473 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1474 1475 /* Receive sequence registers. */ 1476 iter_reg = fw->rseq_gp_reg; 1477 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1478 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1479 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1480 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1481 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1482 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1483 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1484 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1485 1486 iter_reg = fw->rseq_0_reg; 1487 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1488 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1489 1490 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1491 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1492 1493 /* Auxiliary sequence registers. */ 1494 iter_reg = fw->aseq_gp_reg; 1495 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1496 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1497 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1498 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1499 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1500 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1501 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1502 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1503 1504 iter_reg = fw->aseq_0_reg; 1505 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1506 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1507 1508 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1509 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1510 1511 /* Command DMA registers. */ 1512 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1513 1514 /* Queues. */ 1515 iter_reg = fw->req0_dma_reg; 1516 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1517 dmp_reg = ®->iobase_q; 1518 for (cnt = 0; cnt < 7; cnt++) 1519 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1520 1521 iter_reg = fw->resp0_dma_reg; 1522 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1523 dmp_reg = ®->iobase_q; 1524 for (cnt = 0; cnt < 7; cnt++) 1525 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1526 1527 iter_reg = fw->req1_dma_reg; 1528 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1529 dmp_reg = ®->iobase_q; 1530 for (cnt = 0; cnt < 7; cnt++) 1531 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1532 1533 /* Transmit DMA registers. */ 1534 iter_reg = fw->xmt0_dma_reg; 1535 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1536 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1537 1538 iter_reg = fw->xmt1_dma_reg; 1539 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1540 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1541 1542 iter_reg = fw->xmt2_dma_reg; 1543 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1544 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1545 1546 iter_reg = fw->xmt3_dma_reg; 1547 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1548 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1549 1550 iter_reg = fw->xmt4_dma_reg; 1551 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1552 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1553 1554 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1555 1556 /* Receive DMA registers. */ 1557 iter_reg = fw->rcvt0_data_dma_reg; 1558 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1559 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1560 1561 iter_reg = fw->rcvt1_data_dma_reg; 1562 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1563 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1564 1565 /* RISC registers. */ 1566 iter_reg = fw->risc_gp_reg; 1567 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1568 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1569 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1570 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1571 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1572 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1573 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1574 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1575 1576 /* Local memory controller registers. */ 1577 iter_reg = fw->lmc_reg; 1578 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1579 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1580 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1581 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1582 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1583 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1584 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1585 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1586 1587 /* Fibre Protocol Module registers. */ 1588 iter_reg = fw->fpm_hdw_reg; 1589 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1590 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1591 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1592 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1593 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1594 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1595 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1596 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1597 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1598 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1599 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1600 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1601 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 1602 qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 1603 1604 /* Frame Buffer registers. */ 1605 iter_reg = fw->fb_hdw_reg; 1606 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1607 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1608 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1609 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1610 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1611 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1612 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1613 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1614 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1615 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1616 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1617 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 1618 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1619 1620 /* Multi queue registers */ 1621 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1622 &last_chain); 1623 1624 rval = qla24xx_soft_reset(ha); 1625 if (rval != QLA_SUCCESS) 1626 goto qla81xx_fw_dump_failed_0; 1627 1628 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1629 &nxt); 1630 if (rval != QLA_SUCCESS) 1631 goto qla81xx_fw_dump_failed_0; 1632 1633 nxt = qla2xxx_copy_queues(ha, nxt); 1634 1635 nxt = qla24xx_copy_eft(ha, nxt); 1636 1637 /* Chain entries -- started with MQ. */ 1638 qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1639 if (last_chain) { 1640 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1641 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1642 } 1643 1644 qla81xx_fw_dump_failed_0: 1645 qla2xxx_dump_post_process(base_vha, rval); 1646 1647 qla81xx_fw_dump_failed: 1648 if (!hardware_locked) 1649 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1650 } 1651 1652 /****************************************************************************/ 1653 /* Driver Debug Functions. */ 1654 /****************************************************************************/ 1655 1656 static inline int 1657 ql_mask_match(uint32_t level) 1658 { 1659 if (ql2xextended_error_logging == 1) 1660 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 1661 return (level & ql2xextended_error_logging) == level; 1662 } 1663 1664 /* 1665 * This function is for formatting and logging debug information. 1666 * It is to be used when vha is available. It formats the message 1667 * and logs it to the messages file. 1668 * parameters: 1669 * level: The level of the debug messages to be printed. 1670 * If ql2xextended_error_logging value is correctly set, 1671 * this message will appear in the messages file. 1672 * vha: Pointer to the scsi_qla_host_t. 1673 * id: This is a unique identifier for the level. It identifies the 1674 * part of the code from where the message originated. 1675 * msg: The message to be displayed. 1676 */ 1677 void 1678 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) 1679 { 1680 va_list va; 1681 struct va_format vaf; 1682 1683 if (!ql_mask_match(level)) 1684 return; 1685 1686 va_start(va, fmt); 1687 1688 vaf.fmt = fmt; 1689 vaf.va = &va; 1690 1691 if (vha != NULL) { 1692 const struct pci_dev *pdev = vha->hw->pdev; 1693 /* <module-name> <pci-name> <msg-id>:<host> Message */ 1694 pr_warn("%s [%s]-%04x:%ld: %pV", 1695 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, 1696 vha->host_no, &vaf); 1697 } else { 1698 pr_warn("%s [%s]-%04x: : %pV", 1699 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); 1700 } 1701 1702 va_end(va); 1703 1704 } 1705 1706 /* 1707 * This function is for formatting and logging debug information. 1708 * It is to be used when vha is not available and pci is availble, 1709 * i.e., before host allocation. It formats the message and logs it 1710 * to the messages file. 1711 * parameters: 1712 * level: The level of the debug messages to be printed. 1713 * If ql2xextended_error_logging value is correctly set, 1714 * this message will appear in the messages file. 1715 * pdev: Pointer to the struct pci_dev. 1716 * id: This is a unique id for the level. It identifies the part 1717 * of the code from where the message originated. 1718 * msg: The message to be displayed. 1719 */ 1720 void 1721 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, 1722 const char *fmt, ...) 1723 { 1724 va_list va; 1725 struct va_format vaf; 1726 1727 if (pdev == NULL) 1728 return; 1729 if (!ql_mask_match(level)) 1730 return; 1731 1732 va_start(va, fmt); 1733 1734 vaf.fmt = fmt; 1735 vaf.va = &va; 1736 1737 /* <module-name> <dev-name>:<msg-id> Message */ 1738 pr_warn("%s [%s]-%04x: : %pV", 1739 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf); 1740 1741 va_end(va); 1742 } 1743 1744 /* 1745 * This function is for formatting and logging log messages. 1746 * It is to be used when vha is available. It formats the message 1747 * and logs it to the messages file. All the messages will be logged 1748 * irrespective of value of ql2xextended_error_logging. 1749 * parameters: 1750 * level: The level of the log messages to be printed in the 1751 * messages file. 1752 * vha: Pointer to the scsi_qla_host_t 1753 * id: This is a unique id for the level. It identifies the 1754 * part of the code from where the message originated. 1755 * msg: The message to be displayed. 1756 */ 1757 void 1758 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) 1759 { 1760 va_list va; 1761 struct va_format vaf; 1762 char pbuf[128]; 1763 1764 if (level > ql_errlev) 1765 return; 1766 1767 if (vha != NULL) { 1768 const struct pci_dev *pdev = vha->hw->pdev; 1769 /* <module-name> <msg-id>:<host> Message */ 1770 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", 1771 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no); 1772 } else { 1773 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", 1774 QL_MSGHDR, "0000:00:00.0", id); 1775 } 1776 pbuf[sizeof(pbuf) - 1] = 0; 1777 1778 va_start(va, fmt); 1779 1780 vaf.fmt = fmt; 1781 vaf.va = &va; 1782 1783 switch (level) { 1784 case 0: /* FATAL LOG */ 1785 pr_crit("%s%pV", pbuf, &vaf); 1786 break; 1787 case 1: 1788 pr_err("%s%pV", pbuf, &vaf); 1789 break; 1790 case 2: 1791 pr_warn("%s%pV", pbuf, &vaf); 1792 break; 1793 default: 1794 pr_info("%s%pV", pbuf, &vaf); 1795 break; 1796 } 1797 1798 va_end(va); 1799 } 1800 1801 /* 1802 * This function is for formatting and logging log messages. 1803 * It is to be used when vha is not available and pci is availble, 1804 * i.e., before host allocation. It formats the message and logs 1805 * it to the messages file. All the messages are logged irrespective 1806 * of the value of ql2xextended_error_logging. 1807 * parameters: 1808 * level: The level of the log messages to be printed in the 1809 * messages file. 1810 * pdev: Pointer to the struct pci_dev. 1811 * id: This is a unique id for the level. It identifies the 1812 * part of the code from where the message originated. 1813 * msg: The message to be displayed. 1814 */ 1815 void 1816 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, 1817 const char *fmt, ...) 1818 { 1819 va_list va; 1820 struct va_format vaf; 1821 char pbuf[128]; 1822 1823 if (pdev == NULL) 1824 return; 1825 if (level > ql_errlev) 1826 return; 1827 1828 /* <module-name> <dev-name>:<msg-id> Message */ 1829 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", 1830 QL_MSGHDR, dev_name(&(pdev->dev)), id); 1831 pbuf[sizeof(pbuf) - 1] = 0; 1832 1833 va_start(va, fmt); 1834 1835 vaf.fmt = fmt; 1836 vaf.va = &va; 1837 1838 switch (level) { 1839 case 0: /* FATAL LOG */ 1840 pr_crit("%s%pV", pbuf, &vaf); 1841 break; 1842 case 1: 1843 pr_err("%s%pV", pbuf, &vaf); 1844 break; 1845 case 2: 1846 pr_warn("%s%pV", pbuf, &vaf); 1847 break; 1848 default: 1849 pr_info("%s%pV", pbuf, &vaf); 1850 break; 1851 } 1852 1853 va_end(va); 1854 } 1855 1856 void 1857 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) 1858 { 1859 int i; 1860 struct qla_hw_data *ha = vha->hw; 1861 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1862 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1863 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1864 uint16_t __iomem *mbx_reg; 1865 1866 if (!ql_mask_match(level)) 1867 return; 1868 1869 if (IS_QLA82XX(ha)) 1870 mbx_reg = ®82->mailbox_in[0]; 1871 else if (IS_FWI2_CAPABLE(ha)) 1872 mbx_reg = ®24->mailbox0; 1873 else 1874 mbx_reg = MAILBOX_REG(ha, reg, 0); 1875 1876 ql_dbg(level, vha, id, "Mailbox registers:\n"); 1877 for (i = 0; i < 6; i++) 1878 ql_dbg(level, vha, id, 1879 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); 1880 } 1881 1882 1883 void 1884 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, 1885 uint8_t *b, uint32_t size) 1886 { 1887 uint32_t cnt; 1888 uint8_t c; 1889 1890 if (!ql_mask_match(level)) 1891 return; 1892 1893 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " 1894 "9 Ah Bh Ch Dh Eh Fh\n"); 1895 ql_dbg(level, vha, id, "----------------------------------" 1896 "----------------------------\n"); 1897 1898 ql_dbg(level, vha, id, " "); 1899 for (cnt = 0; cnt < size;) { 1900 c = *b++; 1901 printk("%02x", (uint32_t) c); 1902 cnt++; 1903 if (!(cnt % 16)) 1904 printk("\n"); 1905 else 1906 printk(" "); 1907 } 1908 if (cnt % 16) 1909 ql_dbg(level, vha, id, "\n"); 1910 } 1911