1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 8 /* 9 * Table for showing the current message id in use for particular level 10 * Change this table for addition of log/debug messages. 11 * ---------------------------------------------------------------------- 12 * | Level | Last Value Used | Holes | 13 * ---------------------------------------------------------------------- 14 * | Module Init and Probe | 0x0116 | | 15 * | Mailbox commands | 0x1129 | | 16 * | Device Discovery | 0x2083 | | 17 * | Queue Command and IO tracing | 0x302e | 0x3008 | 18 * | DPC Thread | 0x401c | | 19 * | Async Events | 0x5059 | | 20 * | Timer Routines | 0x600d | | 21 * | User Space Interactions | 0x709d | | 22 * | Task Management | 0x8041 | | 23 * | AER/EEH | 0x900f | | 24 * | Virtual Port | 0xa007 | | 25 * | ISP82XX Specific | 0xb051 | | 26 * | MultiQ | 0xc00b | | 27 * | Misc | 0xd00b | | 28 * ---------------------------------------------------------------------- 29 */ 30 31 #include "qla_def.h" 32 33 #include <linux/delay.h> 34 35 static uint32_t ql_dbg_offset = 0x800; 36 37 static inline void 38 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 39 { 40 fw_dump->fw_major_version = htonl(ha->fw_major_version); 41 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 42 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); 43 fw_dump->fw_attributes = htonl(ha->fw_attributes); 44 45 fw_dump->vendor = htonl(ha->pdev->vendor); 46 fw_dump->device = htonl(ha->pdev->device); 47 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); 48 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); 49 } 50 51 static inline void * 52 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) 53 { 54 struct req_que *req = ha->req_q_map[0]; 55 struct rsp_que *rsp = ha->rsp_q_map[0]; 56 /* Request queue. */ 57 memcpy(ptr, req->ring, req->length * 58 sizeof(request_t)); 59 60 /* Response queue. */ 61 ptr += req->length * sizeof(request_t); 62 memcpy(ptr, rsp->ring, rsp->length * 63 sizeof(response_t)); 64 65 return ptr + (rsp->length * sizeof(response_t)); 66 } 67 68 static int 69 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 70 uint32_t ram_dwords, void **nxt) 71 { 72 int rval; 73 uint32_t cnt, stat, timer, dwords, idx; 74 uint16_t mb0; 75 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 76 dma_addr_t dump_dma = ha->gid_list_dma; 77 uint32_t *dump = (uint32_t *)ha->gid_list; 78 79 rval = QLA_SUCCESS; 80 mb0 = 0; 81 82 WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); 83 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 84 85 dwords = GID_LIST_SIZE / 4; 86 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; 87 cnt += dwords, addr += dwords) { 88 if (cnt + dwords > ram_dwords) 89 dwords = ram_dwords - cnt; 90 91 WRT_REG_WORD(®->mailbox1, LSW(addr)); 92 WRT_REG_WORD(®->mailbox8, MSW(addr)); 93 94 WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); 95 WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); 96 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); 97 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); 98 99 WRT_REG_WORD(®->mailbox4, MSW(dwords)); 100 WRT_REG_WORD(®->mailbox5, LSW(dwords)); 101 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 102 103 for (timer = 6000000; timer; timer--) { 104 /* Check for pending interrupts. */ 105 stat = RD_REG_DWORD(®->host_status); 106 if (stat & HSRX_RISC_INT) { 107 stat &= 0xff; 108 109 if (stat == 0x1 || stat == 0x2 || 110 stat == 0x10 || stat == 0x11) { 111 set_bit(MBX_INTERRUPT, 112 &ha->mbx_cmd_flags); 113 114 mb0 = RD_REG_WORD(®->mailbox0); 115 116 WRT_REG_DWORD(®->hccr, 117 HCCRX_CLR_RISC_INT); 118 RD_REG_DWORD(®->hccr); 119 break; 120 } 121 122 /* Clear this intr; it wasn't a mailbox intr */ 123 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 124 RD_REG_DWORD(®->hccr); 125 } 126 udelay(5); 127 } 128 129 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 130 rval = mb0 & MBS_MASK; 131 for (idx = 0; idx < dwords; idx++) 132 ram[cnt + idx] = swab32(dump[idx]); 133 } else { 134 rval = QLA_FUNCTION_FAILED; 135 } 136 } 137 138 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; 139 return rval; 140 } 141 142 static int 143 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, 144 uint32_t cram_size, void **nxt) 145 { 146 int rval; 147 148 /* Code RAM. */ 149 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); 150 if (rval != QLA_SUCCESS) 151 return rval; 152 153 /* External Memory. */ 154 return qla24xx_dump_ram(ha, 0x100000, *nxt, 155 ha->fw_memory_size - 0x100000 + 1, nxt); 156 } 157 158 static uint32_t * 159 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, 160 uint32_t count, uint32_t *buf) 161 { 162 uint32_t __iomem *dmp_reg; 163 164 WRT_REG_DWORD(®->iobase_addr, iobase); 165 dmp_reg = ®->iobase_window; 166 while (count--) 167 *buf++ = htonl(RD_REG_DWORD(dmp_reg++)); 168 169 return buf; 170 } 171 172 static inline int 173 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) 174 { 175 int rval = QLA_SUCCESS; 176 uint32_t cnt; 177 178 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); 179 for (cnt = 30000; 180 ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && 181 rval == QLA_SUCCESS; cnt--) { 182 if (cnt) 183 udelay(100); 184 else 185 rval = QLA_FUNCTION_TIMEOUT; 186 } 187 188 return rval; 189 } 190 191 static int 192 qla24xx_soft_reset(struct qla_hw_data *ha) 193 { 194 int rval = QLA_SUCCESS; 195 uint32_t cnt; 196 uint16_t mb0, wd; 197 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 198 199 /* Reset RISC. */ 200 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 201 for (cnt = 0; cnt < 30000; cnt++) { 202 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 203 break; 204 205 udelay(10); 206 } 207 208 WRT_REG_DWORD(®->ctrl_status, 209 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 210 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 211 212 udelay(100); 213 /* Wait for firmware to complete NVRAM accesses. */ 214 mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); 215 for (cnt = 10000 ; cnt && mb0; cnt--) { 216 udelay(5); 217 mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); 218 barrier(); 219 } 220 221 /* Wait for soft-reset to complete. */ 222 for (cnt = 0; cnt < 30000; cnt++) { 223 if ((RD_REG_DWORD(®->ctrl_status) & 224 CSRX_ISP_SOFT_RESET) == 0) 225 break; 226 227 udelay(10); 228 } 229 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 230 RD_REG_DWORD(®->hccr); /* PCI Posting. */ 231 232 for (cnt = 30000; RD_REG_WORD(®->mailbox0) != 0 && 233 rval == QLA_SUCCESS; cnt--) { 234 if (cnt) 235 udelay(100); 236 else 237 rval = QLA_FUNCTION_TIMEOUT; 238 } 239 240 return rval; 241 } 242 243 static int 244 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, 245 uint32_t ram_words, void **nxt) 246 { 247 int rval; 248 uint32_t cnt, stat, timer, words, idx; 249 uint16_t mb0; 250 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 251 dma_addr_t dump_dma = ha->gid_list_dma; 252 uint16_t *dump = (uint16_t *)ha->gid_list; 253 254 rval = QLA_SUCCESS; 255 mb0 = 0; 256 257 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); 258 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 259 260 words = GID_LIST_SIZE / 2; 261 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; 262 cnt += words, addr += words) { 263 if (cnt + words > ram_words) 264 words = ram_words - cnt; 265 266 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); 267 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); 268 269 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); 270 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); 271 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); 272 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); 273 274 WRT_MAILBOX_REG(ha, reg, 4, words); 275 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 276 277 for (timer = 6000000; timer; timer--) { 278 /* Check for pending interrupts. */ 279 stat = RD_REG_DWORD(®->u.isp2300.host_status); 280 if (stat & HSR_RISC_INT) { 281 stat &= 0xff; 282 283 if (stat == 0x1 || stat == 0x2) { 284 set_bit(MBX_INTERRUPT, 285 &ha->mbx_cmd_flags); 286 287 mb0 = RD_MAILBOX_REG(ha, reg, 0); 288 289 /* Release mailbox registers. */ 290 WRT_REG_WORD(®->semaphore, 0); 291 WRT_REG_WORD(®->hccr, 292 HCCR_CLR_RISC_INT); 293 RD_REG_WORD(®->hccr); 294 break; 295 } else if (stat == 0x10 || stat == 0x11) { 296 set_bit(MBX_INTERRUPT, 297 &ha->mbx_cmd_flags); 298 299 mb0 = RD_MAILBOX_REG(ha, reg, 0); 300 301 WRT_REG_WORD(®->hccr, 302 HCCR_CLR_RISC_INT); 303 RD_REG_WORD(®->hccr); 304 break; 305 } 306 307 /* clear this intr; it wasn't a mailbox intr */ 308 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 309 RD_REG_WORD(®->hccr); 310 } 311 udelay(5); 312 } 313 314 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 315 rval = mb0 & MBS_MASK; 316 for (idx = 0; idx < words; idx++) 317 ram[cnt + idx] = swab16(dump[idx]); 318 } else { 319 rval = QLA_FUNCTION_FAILED; 320 } 321 } 322 323 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; 324 return rval; 325 } 326 327 static inline void 328 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, 329 uint16_t *buf) 330 { 331 uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; 332 333 while (count--) 334 *buf++ = htons(RD_REG_WORD(dmp_reg++)); 335 } 336 337 static inline void * 338 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) 339 { 340 if (!ha->eft) 341 return ptr; 342 343 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); 344 return ptr + ntohl(ha->fw_dump->eft_size); 345 } 346 347 static inline void * 348 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 349 { 350 uint32_t cnt; 351 uint32_t *iter_reg; 352 struct qla2xxx_fce_chain *fcec = ptr; 353 354 if (!ha->fce) 355 return ptr; 356 357 *last_chain = &fcec->type; 358 fcec->type = __constant_htonl(DUMP_CHAIN_FCE); 359 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 360 fce_calc_size(ha->fce_bufs)); 361 fcec->size = htonl(fce_calc_size(ha->fce_bufs)); 362 fcec->addr_l = htonl(LSD(ha->fce_dma)); 363 fcec->addr_h = htonl(MSD(ha->fce_dma)); 364 365 iter_reg = fcec->eregs; 366 for (cnt = 0; cnt < 8; cnt++) 367 *iter_reg++ = htonl(ha->fce_mb[cnt]); 368 369 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 370 371 return iter_reg; 372 } 373 374 static inline void * 375 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 376 { 377 uint32_t cnt, que_idx; 378 uint8_t que_cnt; 379 struct qla2xxx_mq_chain *mq = ptr; 380 struct device_reg_25xxmq __iomem *reg; 381 382 if (!ha->mqenable) 383 return ptr; 384 385 mq = ptr; 386 *last_chain = &mq->type; 387 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 388 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 389 390 que_cnt = ha->max_req_queues > ha->max_rsp_queues ? 391 ha->max_req_queues : ha->max_rsp_queues; 392 mq->count = htonl(que_cnt); 393 for (cnt = 0; cnt < que_cnt; cnt++) { 394 reg = (struct device_reg_25xxmq *) ((void *) 395 ha->mqiobase + cnt * QLA_QUE_PAGE); 396 que_idx = cnt * 4; 397 mq->qregs[que_idx] = htonl(RD_REG_DWORD(®->req_q_in)); 398 mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(®->req_q_out)); 399 mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(®->rsp_q_in)); 400 mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(®->rsp_q_out)); 401 } 402 403 return ptr + sizeof(struct qla2xxx_mq_chain); 404 } 405 406 void 407 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 408 { 409 struct qla_hw_data *ha = vha->hw; 410 411 if (rval != QLA_SUCCESS) { 412 ql_log(ql_log_warn, vha, 0xd000, 413 "Failed to dump firmware (%x).\n", rval); 414 ha->fw_dumped = 0; 415 } else { 416 ql_log(ql_log_info, vha, 0xd001, 417 "Firmware dump saved to temp buffer (%ld/%p).\n", 418 vha->host_no, ha->fw_dump); 419 ha->fw_dumped = 1; 420 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 421 } 422 } 423 424 /** 425 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 426 * @ha: HA context 427 * @hardware_locked: Called with the hardware_lock 428 */ 429 void 430 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 431 { 432 int rval; 433 uint32_t cnt; 434 struct qla_hw_data *ha = vha->hw; 435 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 436 uint16_t __iomem *dmp_reg; 437 unsigned long flags; 438 struct qla2300_fw_dump *fw; 439 void *nxt; 440 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 441 442 flags = 0; 443 444 if (!hardware_locked) 445 spin_lock_irqsave(&ha->hardware_lock, flags); 446 447 if (!ha->fw_dump) { 448 ql_log(ql_log_warn, vha, 0xd002, 449 "No buffer available for dump.\n"); 450 goto qla2300_fw_dump_failed; 451 } 452 453 if (ha->fw_dumped) { 454 ql_log(ql_log_warn, vha, 0xd003, 455 "Firmware has been previously dumped (%p) " 456 "-- ignoring request.\n", 457 ha->fw_dump); 458 goto qla2300_fw_dump_failed; 459 } 460 fw = &ha->fw_dump->isp.isp23; 461 qla2xxx_prep_dump(ha, ha->fw_dump); 462 463 rval = QLA_SUCCESS; 464 fw->hccr = htons(RD_REG_WORD(®->hccr)); 465 466 /* Pause RISC. */ 467 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 468 if (IS_QLA2300(ha)) { 469 for (cnt = 30000; 470 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 471 rval == QLA_SUCCESS; cnt--) { 472 if (cnt) 473 udelay(100); 474 else 475 rval = QLA_FUNCTION_TIMEOUT; 476 } 477 } else { 478 RD_REG_WORD(®->hccr); /* PCI Posting. */ 479 udelay(10); 480 } 481 482 if (rval == QLA_SUCCESS) { 483 dmp_reg = ®->flash_address; 484 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 485 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 486 487 dmp_reg = ®->u.isp2300.req_q_in; 488 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) 489 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 490 491 dmp_reg = ®->u.isp2300.mailbox0; 492 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 493 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 494 495 WRT_REG_WORD(®->ctrl_status, 0x40); 496 qla2xxx_read_window(reg, 32, fw->resp_dma_reg); 497 498 WRT_REG_WORD(®->ctrl_status, 0x50); 499 qla2xxx_read_window(reg, 48, fw->dma_reg); 500 501 WRT_REG_WORD(®->ctrl_status, 0x00); 502 dmp_reg = ®->risc_hw; 503 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 504 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 505 506 WRT_REG_WORD(®->pcr, 0x2000); 507 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 508 509 WRT_REG_WORD(®->pcr, 0x2200); 510 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 511 512 WRT_REG_WORD(®->pcr, 0x2400); 513 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 514 515 WRT_REG_WORD(®->pcr, 0x2600); 516 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 517 518 WRT_REG_WORD(®->pcr, 0x2800); 519 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 520 521 WRT_REG_WORD(®->pcr, 0x2A00); 522 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 523 524 WRT_REG_WORD(®->pcr, 0x2C00); 525 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 526 527 WRT_REG_WORD(®->pcr, 0x2E00); 528 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 529 530 WRT_REG_WORD(®->ctrl_status, 0x10); 531 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); 532 533 WRT_REG_WORD(®->ctrl_status, 0x20); 534 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 535 536 WRT_REG_WORD(®->ctrl_status, 0x30); 537 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 538 539 /* Reset RISC. */ 540 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 541 for (cnt = 0; cnt < 30000; cnt++) { 542 if ((RD_REG_WORD(®->ctrl_status) & 543 CSR_ISP_SOFT_RESET) == 0) 544 break; 545 546 udelay(10); 547 } 548 } 549 550 if (!IS_QLA2300(ha)) { 551 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 552 rval == QLA_SUCCESS; cnt--) { 553 if (cnt) 554 udelay(100); 555 else 556 rval = QLA_FUNCTION_TIMEOUT; 557 } 558 } 559 560 /* Get RISC SRAM. */ 561 if (rval == QLA_SUCCESS) 562 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, 563 sizeof(fw->risc_ram) / 2, &nxt); 564 565 /* Get stack SRAM. */ 566 if (rval == QLA_SUCCESS) 567 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, 568 sizeof(fw->stack_ram) / 2, &nxt); 569 570 /* Get data SRAM. */ 571 if (rval == QLA_SUCCESS) 572 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, 573 ha->fw_memory_size - 0x11000 + 1, &nxt); 574 575 if (rval == QLA_SUCCESS) 576 qla2xxx_copy_queues(ha, nxt); 577 578 qla2xxx_dump_post_process(base_vha, rval); 579 580 qla2300_fw_dump_failed: 581 if (!hardware_locked) 582 spin_unlock_irqrestore(&ha->hardware_lock, flags); 583 } 584 585 /** 586 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 587 * @ha: HA context 588 * @hardware_locked: Called with the hardware_lock 589 */ 590 void 591 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 592 { 593 int rval; 594 uint32_t cnt, timer; 595 uint16_t risc_address; 596 uint16_t mb0, mb2; 597 struct qla_hw_data *ha = vha->hw; 598 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 599 uint16_t __iomem *dmp_reg; 600 unsigned long flags; 601 struct qla2100_fw_dump *fw; 602 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 603 604 risc_address = 0; 605 mb0 = mb2 = 0; 606 flags = 0; 607 608 if (!hardware_locked) 609 spin_lock_irqsave(&ha->hardware_lock, flags); 610 611 if (!ha->fw_dump) { 612 ql_log(ql_log_warn, vha, 0xd004, 613 "No buffer available for dump.\n"); 614 goto qla2100_fw_dump_failed; 615 } 616 617 if (ha->fw_dumped) { 618 ql_log(ql_log_warn, vha, 0xd005, 619 "Firmware has been previously dumped (%p) " 620 "-- ignoring request.\n", 621 ha->fw_dump); 622 goto qla2100_fw_dump_failed; 623 } 624 fw = &ha->fw_dump->isp.isp21; 625 qla2xxx_prep_dump(ha, ha->fw_dump); 626 627 rval = QLA_SUCCESS; 628 fw->hccr = htons(RD_REG_WORD(®->hccr)); 629 630 /* Pause RISC. */ 631 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 632 for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 633 rval == QLA_SUCCESS; cnt--) { 634 if (cnt) 635 udelay(100); 636 else 637 rval = QLA_FUNCTION_TIMEOUT; 638 } 639 if (rval == QLA_SUCCESS) { 640 dmp_reg = ®->flash_address; 641 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 642 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 643 644 dmp_reg = ®->u.isp2100.mailbox0; 645 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 646 if (cnt == 8) 647 dmp_reg = ®->u_end.isp2200.mailbox8; 648 649 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 650 } 651 652 dmp_reg = ®->u.isp2100.unused_2[0]; 653 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 654 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 655 656 WRT_REG_WORD(®->ctrl_status, 0x00); 657 dmp_reg = ®->risc_hw; 658 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 659 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); 660 661 WRT_REG_WORD(®->pcr, 0x2000); 662 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); 663 664 WRT_REG_WORD(®->pcr, 0x2100); 665 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); 666 667 WRT_REG_WORD(®->pcr, 0x2200); 668 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); 669 670 WRT_REG_WORD(®->pcr, 0x2300); 671 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); 672 673 WRT_REG_WORD(®->pcr, 0x2400); 674 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); 675 676 WRT_REG_WORD(®->pcr, 0x2500); 677 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); 678 679 WRT_REG_WORD(®->pcr, 0x2600); 680 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); 681 682 WRT_REG_WORD(®->pcr, 0x2700); 683 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); 684 685 WRT_REG_WORD(®->ctrl_status, 0x10); 686 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); 687 688 WRT_REG_WORD(®->ctrl_status, 0x20); 689 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); 690 691 WRT_REG_WORD(®->ctrl_status, 0x30); 692 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); 693 694 /* Reset the ISP. */ 695 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 696 } 697 698 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && 699 rval == QLA_SUCCESS; cnt--) { 700 if (cnt) 701 udelay(100); 702 else 703 rval = QLA_FUNCTION_TIMEOUT; 704 } 705 706 /* Pause RISC. */ 707 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && 708 (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { 709 710 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 711 for (cnt = 30000; 712 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && 713 rval == QLA_SUCCESS; cnt--) { 714 if (cnt) 715 udelay(100); 716 else 717 rval = QLA_FUNCTION_TIMEOUT; 718 } 719 if (rval == QLA_SUCCESS) { 720 /* Set memory configuration and timing. */ 721 if (IS_QLA2100(ha)) 722 WRT_REG_WORD(®->mctr, 0xf1); 723 else 724 WRT_REG_WORD(®->mctr, 0xf2); 725 RD_REG_WORD(®->mctr); /* PCI Posting. */ 726 727 /* Release RISC. */ 728 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 729 } 730 } 731 732 if (rval == QLA_SUCCESS) { 733 /* Get RISC SRAM. */ 734 risc_address = 0x1000; 735 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); 736 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 737 } 738 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; 739 cnt++, risc_address++) { 740 WRT_MAILBOX_REG(ha, reg, 1, risc_address); 741 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); 742 743 for (timer = 6000000; timer != 0; timer--) { 744 /* Check for pending interrupts. */ 745 if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { 746 if (RD_REG_WORD(®->semaphore) & BIT_0) { 747 set_bit(MBX_INTERRUPT, 748 &ha->mbx_cmd_flags); 749 750 mb0 = RD_MAILBOX_REG(ha, reg, 0); 751 mb2 = RD_MAILBOX_REG(ha, reg, 2); 752 753 WRT_REG_WORD(®->semaphore, 0); 754 WRT_REG_WORD(®->hccr, 755 HCCR_CLR_RISC_INT); 756 RD_REG_WORD(®->hccr); 757 break; 758 } 759 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 760 RD_REG_WORD(®->hccr); 761 } 762 udelay(5); 763 } 764 765 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 766 rval = mb0 & MBS_MASK; 767 fw->risc_ram[cnt] = htons(mb2); 768 } else { 769 rval = QLA_FUNCTION_FAILED; 770 } 771 } 772 773 if (rval == QLA_SUCCESS) 774 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); 775 776 qla2xxx_dump_post_process(base_vha, rval); 777 778 qla2100_fw_dump_failed: 779 if (!hardware_locked) 780 spin_unlock_irqrestore(&ha->hardware_lock, flags); 781 } 782 783 void 784 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 785 { 786 int rval; 787 uint32_t cnt; 788 uint32_t risc_address; 789 struct qla_hw_data *ha = vha->hw; 790 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 791 uint32_t __iomem *dmp_reg; 792 uint32_t *iter_reg; 793 uint16_t __iomem *mbx_reg; 794 unsigned long flags; 795 struct qla24xx_fw_dump *fw; 796 uint32_t ext_mem_cnt; 797 void *nxt; 798 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 799 800 if (IS_QLA82XX(ha)) 801 return; 802 803 risc_address = ext_mem_cnt = 0; 804 flags = 0; 805 806 if (!hardware_locked) 807 spin_lock_irqsave(&ha->hardware_lock, flags); 808 809 if (!ha->fw_dump) { 810 ql_log(ql_log_warn, vha, 0xd006, 811 "No buffer available for dump.\n"); 812 goto qla24xx_fw_dump_failed; 813 } 814 815 if (ha->fw_dumped) { 816 ql_log(ql_log_warn, vha, 0xd007, 817 "Firmware has been previously dumped (%p) " 818 "-- ignoring request.\n", 819 ha->fw_dump); 820 goto qla24xx_fw_dump_failed; 821 } 822 fw = &ha->fw_dump->isp.isp24; 823 qla2xxx_prep_dump(ha, ha->fw_dump); 824 825 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 826 827 /* Pause RISC. */ 828 rval = qla24xx_pause_risc(reg); 829 if (rval != QLA_SUCCESS) 830 goto qla24xx_fw_dump_failed_0; 831 832 /* Host interface registers. */ 833 dmp_reg = ®->flash_addr; 834 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 835 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 836 837 /* Disable interrupts. */ 838 WRT_REG_DWORD(®->ictrl, 0); 839 RD_REG_DWORD(®->ictrl); 840 841 /* Shadow registers. */ 842 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 843 RD_REG_DWORD(®->iobase_addr); 844 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 845 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 846 847 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 848 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 849 850 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 851 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 852 853 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 854 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 855 856 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 857 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 858 859 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 860 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 861 862 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 863 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 864 865 /* Mailbox registers. */ 866 mbx_reg = ®->mailbox0; 867 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 868 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 869 870 /* Transfer sequence registers. */ 871 iter_reg = fw->xseq_gp_reg; 872 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 873 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 874 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 875 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 876 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 877 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 878 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 879 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 880 881 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); 882 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 883 884 /* Receive sequence registers. */ 885 iter_reg = fw->rseq_gp_reg; 886 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 887 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 888 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 889 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 890 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 891 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 892 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 893 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 894 895 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); 896 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 897 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 898 899 /* Command DMA registers. */ 900 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 901 902 /* Queues. */ 903 iter_reg = fw->req0_dma_reg; 904 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 905 dmp_reg = ®->iobase_q; 906 for (cnt = 0; cnt < 7; cnt++) 907 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 908 909 iter_reg = fw->resp0_dma_reg; 910 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 911 dmp_reg = ®->iobase_q; 912 for (cnt = 0; cnt < 7; cnt++) 913 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 914 915 iter_reg = fw->req1_dma_reg; 916 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 917 dmp_reg = ®->iobase_q; 918 for (cnt = 0; cnt < 7; cnt++) 919 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 920 921 /* Transmit DMA registers. */ 922 iter_reg = fw->xmt0_dma_reg; 923 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 924 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 925 926 iter_reg = fw->xmt1_dma_reg; 927 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 928 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 929 930 iter_reg = fw->xmt2_dma_reg; 931 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 932 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 933 934 iter_reg = fw->xmt3_dma_reg; 935 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 936 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 937 938 iter_reg = fw->xmt4_dma_reg; 939 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 940 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 941 942 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 943 944 /* Receive DMA registers. */ 945 iter_reg = fw->rcvt0_data_dma_reg; 946 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 947 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 948 949 iter_reg = fw->rcvt1_data_dma_reg; 950 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 951 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 952 953 /* RISC registers. */ 954 iter_reg = fw->risc_gp_reg; 955 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 956 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 957 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 958 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 959 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 960 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 961 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 962 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 963 964 /* Local memory controller registers. */ 965 iter_reg = fw->lmc_reg; 966 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 967 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 968 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 969 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 970 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 971 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 972 qla24xx_read_window(reg, 0x3060, 16, iter_reg); 973 974 /* Fibre Protocol Module registers. */ 975 iter_reg = fw->fpm_hdw_reg; 976 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 977 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 978 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 979 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 980 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 981 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 982 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 983 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 984 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 985 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 986 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 987 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 988 989 /* Frame Buffer registers. */ 990 iter_reg = fw->fb_hdw_reg; 991 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 992 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 993 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 994 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 995 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 996 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 997 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 998 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 999 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1000 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1001 qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1002 1003 rval = qla24xx_soft_reset(ha); 1004 if (rval != QLA_SUCCESS) 1005 goto qla24xx_fw_dump_failed_0; 1006 1007 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1008 &nxt); 1009 if (rval != QLA_SUCCESS) 1010 goto qla24xx_fw_dump_failed_0; 1011 1012 nxt = qla2xxx_copy_queues(ha, nxt); 1013 1014 qla24xx_copy_eft(ha, nxt); 1015 1016 qla24xx_fw_dump_failed_0: 1017 qla2xxx_dump_post_process(base_vha, rval); 1018 1019 qla24xx_fw_dump_failed: 1020 if (!hardware_locked) 1021 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1022 } 1023 1024 void 1025 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1026 { 1027 int rval; 1028 uint32_t cnt; 1029 uint32_t risc_address; 1030 struct qla_hw_data *ha = vha->hw; 1031 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1032 uint32_t __iomem *dmp_reg; 1033 uint32_t *iter_reg; 1034 uint16_t __iomem *mbx_reg; 1035 unsigned long flags; 1036 struct qla25xx_fw_dump *fw; 1037 uint32_t ext_mem_cnt; 1038 void *nxt, *nxt_chain; 1039 uint32_t *last_chain = NULL; 1040 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1041 1042 risc_address = ext_mem_cnt = 0; 1043 flags = 0; 1044 1045 if (!hardware_locked) 1046 spin_lock_irqsave(&ha->hardware_lock, flags); 1047 1048 if (!ha->fw_dump) { 1049 ql_log(ql_log_warn, vha, 0xd008, 1050 "No buffer available for dump.\n"); 1051 goto qla25xx_fw_dump_failed; 1052 } 1053 1054 if (ha->fw_dumped) { 1055 ql_log(ql_log_warn, vha, 0xd009, 1056 "Firmware has been previously dumped (%p) " 1057 "-- ignoring request.\n", 1058 ha->fw_dump); 1059 goto qla25xx_fw_dump_failed; 1060 } 1061 fw = &ha->fw_dump->isp.isp25; 1062 qla2xxx_prep_dump(ha, ha->fw_dump); 1063 ha->fw_dump->version = __constant_htonl(2); 1064 1065 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1066 1067 /* Pause RISC. */ 1068 rval = qla24xx_pause_risc(reg); 1069 if (rval != QLA_SUCCESS) 1070 goto qla25xx_fw_dump_failed_0; 1071 1072 /* Host/Risc registers. */ 1073 iter_reg = fw->host_risc_reg; 1074 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1075 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1076 1077 /* PCIe registers. */ 1078 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1079 RD_REG_DWORD(®->iobase_addr); 1080 WRT_REG_DWORD(®->iobase_window, 0x01); 1081 dmp_reg = ®->iobase_c4; 1082 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); 1083 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1084 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1085 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1086 1087 WRT_REG_DWORD(®->iobase_window, 0x00); 1088 RD_REG_DWORD(®->iobase_window); 1089 1090 /* Host interface registers. */ 1091 dmp_reg = ®->flash_addr; 1092 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 1093 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 1094 1095 /* Disable interrupts. */ 1096 WRT_REG_DWORD(®->ictrl, 0); 1097 RD_REG_DWORD(®->ictrl); 1098 1099 /* Shadow registers. */ 1100 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1101 RD_REG_DWORD(®->iobase_addr); 1102 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1103 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1104 1105 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1106 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1107 1108 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1109 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1110 1111 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1112 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1113 1114 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1115 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1116 1117 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1118 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1119 1120 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1121 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1122 1123 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1124 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1125 1126 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1127 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1128 1129 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1130 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1131 1132 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1133 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1134 1135 /* RISC I/O register. */ 1136 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1137 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1138 1139 /* Mailbox registers. */ 1140 mbx_reg = ®->mailbox0; 1141 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 1142 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 1143 1144 /* Transfer sequence registers. */ 1145 iter_reg = fw->xseq_gp_reg; 1146 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1147 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1148 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1149 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1150 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1151 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1152 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1153 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1154 1155 iter_reg = fw->xseq_0_reg; 1156 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1157 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1158 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1159 1160 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1161 1162 /* Receive sequence registers. */ 1163 iter_reg = fw->rseq_gp_reg; 1164 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1165 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1166 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1167 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1168 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1169 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1170 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1171 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1172 1173 iter_reg = fw->rseq_0_reg; 1174 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1175 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1176 1177 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1178 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1179 1180 /* Auxiliary sequence registers. */ 1181 iter_reg = fw->aseq_gp_reg; 1182 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1183 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1184 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1185 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1186 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1187 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1188 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1189 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1190 1191 iter_reg = fw->aseq_0_reg; 1192 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1193 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1194 1195 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1196 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1197 1198 /* Command DMA registers. */ 1199 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1200 1201 /* Queues. */ 1202 iter_reg = fw->req0_dma_reg; 1203 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1204 dmp_reg = ®->iobase_q; 1205 for (cnt = 0; cnt < 7; cnt++) 1206 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1207 1208 iter_reg = fw->resp0_dma_reg; 1209 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1210 dmp_reg = ®->iobase_q; 1211 for (cnt = 0; cnt < 7; cnt++) 1212 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1213 1214 iter_reg = fw->req1_dma_reg; 1215 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1216 dmp_reg = ®->iobase_q; 1217 for (cnt = 0; cnt < 7; cnt++) 1218 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1219 1220 /* Transmit DMA registers. */ 1221 iter_reg = fw->xmt0_dma_reg; 1222 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1223 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1224 1225 iter_reg = fw->xmt1_dma_reg; 1226 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1227 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1228 1229 iter_reg = fw->xmt2_dma_reg; 1230 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1231 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1232 1233 iter_reg = fw->xmt3_dma_reg; 1234 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1235 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1236 1237 iter_reg = fw->xmt4_dma_reg; 1238 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1239 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1240 1241 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1242 1243 /* Receive DMA registers. */ 1244 iter_reg = fw->rcvt0_data_dma_reg; 1245 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1246 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1247 1248 iter_reg = fw->rcvt1_data_dma_reg; 1249 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1250 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1251 1252 /* RISC registers. */ 1253 iter_reg = fw->risc_gp_reg; 1254 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1255 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1256 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1257 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1258 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1259 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1260 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1261 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1262 1263 /* Local memory controller registers. */ 1264 iter_reg = fw->lmc_reg; 1265 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1266 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1267 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1268 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1269 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1270 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1271 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1272 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1273 1274 /* Fibre Protocol Module registers. */ 1275 iter_reg = fw->fpm_hdw_reg; 1276 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1277 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1278 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1279 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1280 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1281 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1282 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1283 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1284 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1285 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1286 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1287 qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1288 1289 /* Frame Buffer registers. */ 1290 iter_reg = fw->fb_hdw_reg; 1291 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1292 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1293 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1294 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1295 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1296 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1297 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1298 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1299 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1300 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1301 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1302 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1303 1304 /* Multi queue registers */ 1305 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1306 &last_chain); 1307 1308 rval = qla24xx_soft_reset(ha); 1309 if (rval != QLA_SUCCESS) 1310 goto qla25xx_fw_dump_failed_0; 1311 1312 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1313 &nxt); 1314 if (rval != QLA_SUCCESS) 1315 goto qla25xx_fw_dump_failed_0; 1316 1317 nxt = qla2xxx_copy_queues(ha, nxt); 1318 1319 nxt = qla24xx_copy_eft(ha, nxt); 1320 1321 /* Chain entries -- started with MQ. */ 1322 qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1323 if (last_chain) { 1324 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1325 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1326 } 1327 1328 qla25xx_fw_dump_failed_0: 1329 qla2xxx_dump_post_process(base_vha, rval); 1330 1331 qla25xx_fw_dump_failed: 1332 if (!hardware_locked) 1333 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1334 } 1335 1336 void 1337 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1338 { 1339 int rval; 1340 uint32_t cnt; 1341 uint32_t risc_address; 1342 struct qla_hw_data *ha = vha->hw; 1343 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1344 uint32_t __iomem *dmp_reg; 1345 uint32_t *iter_reg; 1346 uint16_t __iomem *mbx_reg; 1347 unsigned long flags; 1348 struct qla81xx_fw_dump *fw; 1349 uint32_t ext_mem_cnt; 1350 void *nxt, *nxt_chain; 1351 uint32_t *last_chain = NULL; 1352 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1353 1354 risc_address = ext_mem_cnt = 0; 1355 flags = 0; 1356 1357 if (!hardware_locked) 1358 spin_lock_irqsave(&ha->hardware_lock, flags); 1359 1360 if (!ha->fw_dump) { 1361 ql_log(ql_log_warn, vha, 0xd00a, 1362 "No buffer available for dump.\n"); 1363 goto qla81xx_fw_dump_failed; 1364 } 1365 1366 if (ha->fw_dumped) { 1367 ql_log(ql_log_warn, vha, 0xd00b, 1368 "Firmware has been previously dumped (%p) " 1369 "-- ignoring request.\n", 1370 ha->fw_dump); 1371 goto qla81xx_fw_dump_failed; 1372 } 1373 fw = &ha->fw_dump->isp.isp81; 1374 qla2xxx_prep_dump(ha, ha->fw_dump); 1375 1376 fw->host_status = htonl(RD_REG_DWORD(®->host_status)); 1377 1378 /* Pause RISC. */ 1379 rval = qla24xx_pause_risc(reg); 1380 if (rval != QLA_SUCCESS) 1381 goto qla81xx_fw_dump_failed_0; 1382 1383 /* Host/Risc registers. */ 1384 iter_reg = fw->host_risc_reg; 1385 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); 1386 qla24xx_read_window(reg, 0x7010, 16, iter_reg); 1387 1388 /* PCIe registers. */ 1389 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1390 RD_REG_DWORD(®->iobase_addr); 1391 WRT_REG_DWORD(®->iobase_window, 0x01); 1392 dmp_reg = ®->iobase_c4; 1393 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); 1394 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1395 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1396 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); 1397 1398 WRT_REG_DWORD(®->iobase_window, 0x00); 1399 RD_REG_DWORD(®->iobase_window); 1400 1401 /* Host interface registers. */ 1402 dmp_reg = ®->flash_addr; 1403 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 1404 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); 1405 1406 /* Disable interrupts. */ 1407 WRT_REG_DWORD(®->ictrl, 0); 1408 RD_REG_DWORD(®->ictrl); 1409 1410 /* Shadow registers. */ 1411 WRT_REG_DWORD(®->iobase_addr, 0x0F70); 1412 RD_REG_DWORD(®->iobase_addr); 1413 WRT_REG_DWORD(®->iobase_select, 0xB0000000); 1414 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1415 1416 WRT_REG_DWORD(®->iobase_select, 0xB0100000); 1417 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1418 1419 WRT_REG_DWORD(®->iobase_select, 0xB0200000); 1420 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1421 1422 WRT_REG_DWORD(®->iobase_select, 0xB0300000); 1423 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1424 1425 WRT_REG_DWORD(®->iobase_select, 0xB0400000); 1426 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1427 1428 WRT_REG_DWORD(®->iobase_select, 0xB0500000); 1429 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1430 1431 WRT_REG_DWORD(®->iobase_select, 0xB0600000); 1432 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1433 1434 WRT_REG_DWORD(®->iobase_select, 0xB0700000); 1435 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1436 1437 WRT_REG_DWORD(®->iobase_select, 0xB0800000); 1438 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1439 1440 WRT_REG_DWORD(®->iobase_select, 0xB0900000); 1441 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1442 1443 WRT_REG_DWORD(®->iobase_select, 0xB0A00000); 1444 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); 1445 1446 /* RISC I/O register. */ 1447 WRT_REG_DWORD(®->iobase_addr, 0x0010); 1448 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); 1449 1450 /* Mailbox registers. */ 1451 mbx_reg = ®->mailbox0; 1452 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 1453 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); 1454 1455 /* Transfer sequence registers. */ 1456 iter_reg = fw->xseq_gp_reg; 1457 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); 1458 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); 1459 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); 1460 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); 1461 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); 1462 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); 1463 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); 1464 qla24xx_read_window(reg, 0xBF70, 16, iter_reg); 1465 1466 iter_reg = fw->xseq_0_reg; 1467 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); 1468 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); 1469 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); 1470 1471 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); 1472 1473 /* Receive sequence registers. */ 1474 iter_reg = fw->rseq_gp_reg; 1475 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); 1476 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); 1477 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); 1478 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); 1479 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); 1480 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); 1481 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); 1482 qla24xx_read_window(reg, 0xFF70, 16, iter_reg); 1483 1484 iter_reg = fw->rseq_0_reg; 1485 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); 1486 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); 1487 1488 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); 1489 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); 1490 1491 /* Auxiliary sequence registers. */ 1492 iter_reg = fw->aseq_gp_reg; 1493 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); 1494 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); 1495 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); 1496 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); 1497 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); 1498 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); 1499 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); 1500 qla24xx_read_window(reg, 0xB070, 16, iter_reg); 1501 1502 iter_reg = fw->aseq_0_reg; 1503 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); 1504 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); 1505 1506 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); 1507 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); 1508 1509 /* Command DMA registers. */ 1510 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); 1511 1512 /* Queues. */ 1513 iter_reg = fw->req0_dma_reg; 1514 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); 1515 dmp_reg = ®->iobase_q; 1516 for (cnt = 0; cnt < 7; cnt++) 1517 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1518 1519 iter_reg = fw->resp0_dma_reg; 1520 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); 1521 dmp_reg = ®->iobase_q; 1522 for (cnt = 0; cnt < 7; cnt++) 1523 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1524 1525 iter_reg = fw->req1_dma_reg; 1526 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); 1527 dmp_reg = ®->iobase_q; 1528 for (cnt = 0; cnt < 7; cnt++) 1529 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); 1530 1531 /* Transmit DMA registers. */ 1532 iter_reg = fw->xmt0_dma_reg; 1533 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); 1534 qla24xx_read_window(reg, 0x7610, 16, iter_reg); 1535 1536 iter_reg = fw->xmt1_dma_reg; 1537 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); 1538 qla24xx_read_window(reg, 0x7630, 16, iter_reg); 1539 1540 iter_reg = fw->xmt2_dma_reg; 1541 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); 1542 qla24xx_read_window(reg, 0x7650, 16, iter_reg); 1543 1544 iter_reg = fw->xmt3_dma_reg; 1545 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); 1546 qla24xx_read_window(reg, 0x7670, 16, iter_reg); 1547 1548 iter_reg = fw->xmt4_dma_reg; 1549 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); 1550 qla24xx_read_window(reg, 0x7690, 16, iter_reg); 1551 1552 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); 1553 1554 /* Receive DMA registers. */ 1555 iter_reg = fw->rcvt0_data_dma_reg; 1556 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); 1557 qla24xx_read_window(reg, 0x7710, 16, iter_reg); 1558 1559 iter_reg = fw->rcvt1_data_dma_reg; 1560 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); 1561 qla24xx_read_window(reg, 0x7730, 16, iter_reg); 1562 1563 /* RISC registers. */ 1564 iter_reg = fw->risc_gp_reg; 1565 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); 1566 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); 1567 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); 1568 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); 1569 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); 1570 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); 1571 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); 1572 qla24xx_read_window(reg, 0x0F70, 16, iter_reg); 1573 1574 /* Local memory controller registers. */ 1575 iter_reg = fw->lmc_reg; 1576 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); 1577 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); 1578 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); 1579 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); 1580 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); 1581 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); 1582 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); 1583 qla24xx_read_window(reg, 0x3070, 16, iter_reg); 1584 1585 /* Fibre Protocol Module registers. */ 1586 iter_reg = fw->fpm_hdw_reg; 1587 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); 1588 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); 1589 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); 1590 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); 1591 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); 1592 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); 1593 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); 1594 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); 1595 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); 1596 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); 1597 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); 1598 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); 1599 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); 1600 qla24xx_read_window(reg, 0x40D0, 16, iter_reg); 1601 1602 /* Frame Buffer registers. */ 1603 iter_reg = fw->fb_hdw_reg; 1604 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); 1605 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); 1606 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); 1607 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); 1608 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); 1609 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); 1610 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); 1611 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); 1612 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); 1613 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); 1614 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1615 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); 1616 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1617 1618 /* Multi queue registers */ 1619 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, 1620 &last_chain); 1621 1622 rval = qla24xx_soft_reset(ha); 1623 if (rval != QLA_SUCCESS) 1624 goto qla81xx_fw_dump_failed_0; 1625 1626 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 1627 &nxt); 1628 if (rval != QLA_SUCCESS) 1629 goto qla81xx_fw_dump_failed_0; 1630 1631 nxt = qla2xxx_copy_queues(ha, nxt); 1632 1633 nxt = qla24xx_copy_eft(ha, nxt); 1634 1635 /* Chain entries -- started with MQ. */ 1636 qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1637 if (last_chain) { 1638 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1639 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1640 } 1641 1642 qla81xx_fw_dump_failed_0: 1643 qla2xxx_dump_post_process(base_vha, rval); 1644 1645 qla81xx_fw_dump_failed: 1646 if (!hardware_locked) 1647 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1648 } 1649 1650 /****************************************************************************/ 1651 /* Driver Debug Functions. */ 1652 /****************************************************************************/ 1653 /* 1654 * This function is for formatting and logging debug information. 1655 * It is to be used when vha is available. It formats the message 1656 * and logs it to the messages file. 1657 * parameters: 1658 * level: The level of the debug messages to be printed. 1659 * If ql2xextended_error_logging value is correctly set, 1660 * this message will appear in the messages file. 1661 * vha: Pointer to the scsi_qla_host_t. 1662 * id: This is a unique identifier for the level. It identifies the 1663 * part of the code from where the message originated. 1664 * msg: The message to be displayed. 1665 */ 1666 void 1667 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { 1668 1669 char pbuf[QL_DBG_BUF_LEN]; 1670 va_list ap; 1671 uint32_t len; 1672 struct pci_dev *pdev = NULL; 1673 1674 memset(pbuf, 0, QL_DBG_BUF_LEN); 1675 1676 va_start(ap, msg); 1677 1678 if ((level & ql2xextended_error_logging) == level) { 1679 if (vha != NULL) { 1680 pdev = vha->hw->pdev; 1681 /* <module-name> <pci-name> <msg-id>:<host> Message */ 1682 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR, 1683 dev_name(&(pdev->dev)), id + ql_dbg_offset, 1684 vha->host_no); 1685 } else 1686 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, 1687 "0000:00:00.0", id + ql_dbg_offset); 1688 1689 len = strlen(pbuf); 1690 vsprintf(pbuf+len, msg, ap); 1691 pr_warning("%s", pbuf); 1692 } 1693 1694 va_end(ap); 1695 1696 } 1697 1698 /* 1699 * This function is for formatting and logging debug information. 1700 * It is to be used when vha is not available and pci is availble, 1701 * i.e., before host allocation. It formats the message and logs it 1702 * to the messages file. 1703 * parameters: 1704 * level: The level of the debug messages to be printed. 1705 * If ql2xextended_error_logging value is correctly set, 1706 * this message will appear in the messages file. 1707 * pdev: Pointer to the struct pci_dev. 1708 * id: This is a unique id for the level. It identifies the part 1709 * of the code from where the message originated. 1710 * msg: The message to be displayed. 1711 */ 1712 void 1713 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { 1714 1715 char pbuf[QL_DBG_BUF_LEN]; 1716 va_list ap; 1717 uint32_t len; 1718 1719 if (pdev == NULL) 1720 return; 1721 1722 memset(pbuf, 0, QL_DBG_BUF_LEN); 1723 1724 va_start(ap, msg); 1725 1726 if ((level & ql2xextended_error_logging) == level) { 1727 /* <module-name> <dev-name>:<msg-id> Message */ 1728 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, 1729 dev_name(&(pdev->dev)), id + ql_dbg_offset); 1730 1731 len = strlen(pbuf); 1732 vsprintf(pbuf+len, msg, ap); 1733 pr_warning("%s", pbuf); 1734 } 1735 1736 va_end(ap); 1737 1738 } 1739 1740 /* 1741 * This function is for formatting and logging log messages. 1742 * It is to be used when vha is available. It formats the message 1743 * and logs it to the messages file. All the messages will be logged 1744 * irrespective of value of ql2xextended_error_logging. 1745 * parameters: 1746 * level: The level of the log messages to be printed in the 1747 * messages file. 1748 * vha: Pointer to the scsi_qla_host_t 1749 * id: This is a unique id for the level. It identifies the 1750 * part of the code from where the message originated. 1751 * msg: The message to be displayed. 1752 */ 1753 void 1754 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { 1755 1756 char pbuf[QL_DBG_BUF_LEN]; 1757 va_list ap; 1758 uint32_t len; 1759 struct pci_dev *pdev = NULL; 1760 1761 memset(pbuf, 0, QL_DBG_BUF_LEN); 1762 1763 va_start(ap, msg); 1764 1765 if (level <= ql_errlev) { 1766 if (vha != NULL) { 1767 pdev = vha->hw->pdev; 1768 /* <module-name> <msg-id>:<host> Message */ 1769 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR, 1770 dev_name(&(pdev->dev)), id, vha->host_no); 1771 } else 1772 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, 1773 "0000:00:00.0", id); 1774 1775 len = strlen(pbuf); 1776 vsprintf(pbuf+len, msg, ap); 1777 1778 switch (level) { 1779 case 0: /* FATAL LOG */ 1780 pr_crit("%s", pbuf); 1781 break; 1782 case 1: 1783 pr_err("%s", pbuf); 1784 break; 1785 case 2: 1786 pr_warn("%s", pbuf); 1787 break; 1788 default: 1789 pr_info("%s", pbuf); 1790 break; 1791 } 1792 } 1793 1794 va_end(ap); 1795 } 1796 1797 /* 1798 * This function is for formatting and logging log messages. 1799 * It is to be used when vha is not available and pci is availble, 1800 * i.e., before host allocation. It formats the message and logs 1801 * it to the messages file. All the messages are logged irrespective 1802 * of the value of ql2xextended_error_logging. 1803 * parameters: 1804 * level: The level of the log messages to be printed in the 1805 * messages file. 1806 * pdev: Pointer to the struct pci_dev. 1807 * id: This is a unique id for the level. It identifies the 1808 * part of the code from where the message originated. 1809 * msg: The message to be displayed. 1810 */ 1811 void 1812 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { 1813 1814 char pbuf[QL_DBG_BUF_LEN]; 1815 va_list ap; 1816 uint32_t len; 1817 1818 if (pdev == NULL) 1819 return; 1820 1821 memset(pbuf, 0, QL_DBG_BUF_LEN); 1822 1823 va_start(ap, msg); 1824 1825 if (level <= ql_errlev) { 1826 /* <module-name> <dev-name>:<msg-id> Message */ 1827 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, 1828 dev_name(&(pdev->dev)), id); 1829 1830 len = strlen(pbuf); 1831 vsprintf(pbuf+len, msg, ap); 1832 switch (level) { 1833 case 0: /* FATAL LOG */ 1834 pr_crit("%s", pbuf); 1835 break; 1836 case 1: 1837 pr_err("%s", pbuf); 1838 break; 1839 case 2: 1840 pr_warn("%s", pbuf); 1841 break; 1842 default: 1843 pr_info("%s", pbuf); 1844 break; 1845 } 1846 } 1847 1848 va_end(ap); 1849 } 1850 1851 void 1852 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) 1853 { 1854 int i; 1855 struct qla_hw_data *ha = vha->hw; 1856 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1857 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1858 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1859 uint16_t __iomem *mbx_reg; 1860 1861 if ((level & ql2xextended_error_logging) == level) { 1862 1863 if (IS_QLA82XX(ha)) 1864 mbx_reg = ®82->mailbox_in[0]; 1865 else if (IS_FWI2_CAPABLE(ha)) 1866 mbx_reg = ®24->mailbox0; 1867 else 1868 mbx_reg = MAILBOX_REG(ha, reg, 0); 1869 1870 ql_dbg(level, vha, id, "Mailbox registers:\n"); 1871 for (i = 0; i < 6; i++) 1872 ql_dbg(level, vha, id, 1873 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); 1874 } 1875 } 1876 1877 1878 void 1879 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, 1880 uint8_t *b, uint32_t size) 1881 { 1882 uint32_t cnt; 1883 uint8_t c; 1884 if ((level & ql2xextended_error_logging) == level) { 1885 1886 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " 1887 "9 Ah Bh Ch Dh Eh Fh\n"); 1888 ql_dbg(level, vha, id, "----------------------------------" 1889 "----------------------------\n"); 1890 1891 ql_dbg(level, vha, id, ""); 1892 for (cnt = 0; cnt < size;) { 1893 c = *b++; 1894 printk("%02x", (uint32_t) c); 1895 cnt++; 1896 if (!(cnt % 16)) 1897 printk("\n"); 1898 else 1899 printk(" "); 1900 } 1901 if (cnt % 16) 1902 ql_dbg(level, vha, id, "\n"); 1903 } 1904 } 1905