1 /* 2 * linux/drivers/scsi/esas2r/esas2r_io.c 3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers 4 * 5 * Copyright (c) 2001-2013 ATTO Technology, Inc. 6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * NO WARRANTY 19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 23 * solely responsible for determining the appropriateness of using and 24 * distributing the Program and assumes all risks associated with its 25 * exercise of rights under this Agreement, including but not limited to 26 * the risks and costs of program errors, damage to or loss of data, 27 * programs or equipment, and unavailability or interruption of operations. 28 * 29 * DISCLAIMER OF LIABILITY 30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 37 * 38 * You should have received a copy of the GNU General Public License 39 * along with this program; if not, write to the Free Software 40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 41 * USA. 42 */ 43 44 #include "esas2r.h" 45 46 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) 47 { 48 struct esas2r_target *t = NULL; 49 struct esas2r_request *startrq = rq; 50 unsigned long flags; 51 52 if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) { 53 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) 54 rq->req_stat = RS_SEL2; 55 else 56 rq->req_stat = RS_DEGRADED; 57 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { 58 t = a->targetdb + rq->target_id; 59 60 if (unlikely(t >= a->targetdb_end 61 || !(t->flags & TF_USED))) { 62 rq->req_stat = RS_SEL; 63 } else { 64 /* copy in the target ID. */ 65 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); 66 67 /* 68 * Test if we want to report RS_SEL for missing target. 69 * Note that if AF_DISC_PENDING is set than this will 70 * go on the defer queue. 71 */ 72 if (unlikely(t->target_state != TS_PRESENT 73 && !(a->flags & AF_DISC_PENDING))) 74 rq->req_stat = RS_SEL; 75 } 76 } 77 78 if (unlikely(rq->req_stat != RS_PENDING)) { 79 esas2r_complete_request(a, rq); 80 return; 81 } 82 83 esas2r_trace("rq=%p", rq); 84 esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle); 85 86 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { 87 esas2r_trace("rq->target_id=%d", rq->target_id); 88 esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags); 89 } 90 91 spin_lock_irqsave(&a->queue_lock, flags); 92 93 if (likely(list_empty(&a->defer_list) && 94 !(a->flags & 95 (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING)))) 96 esas2r_local_start_request(a, startrq); 97 else 98 list_add_tail(&startrq->req_list, &a->defer_list); 99 100 spin_unlock_irqrestore(&a->queue_lock, flags); 101 } 102 103 /* 104 * Starts the specified request. all requests have RS_PENDING set when this 105 * routine is called. The caller is usually esas2r_start_request, but 106 * esas2r_do_deferred_processes will start request that are deferred. 107 * 108 * The caller must ensure that requests can be started. 109 * 110 * esas2r_start_request will defer a request if there are already requests 111 * waiting or there is a chip reset pending. once the reset condition clears, 112 * esas2r_do_deferred_processes will call this function to start the request. 113 * 114 * When a request is started, it is placed on the active list and queued to 115 * the controller. 116 */ 117 void esas2r_local_start_request(struct esas2r_adapter *a, 118 struct esas2r_request *rq) 119 { 120 esas2r_trace_enter(); 121 esas2r_trace("rq=%p", rq); 122 esas2r_trace("rq->vrq:%p", rq->vrq); 123 esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr); 124 125 if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH 126 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) 127 esas2r_lock_set_flags(&a->flags, AF_FLASHING); 128 129 list_add_tail(&rq->req_list, &a->active_list); 130 esas2r_start_vda_request(a, rq); 131 esas2r_trace_exit(); 132 return; 133 } 134 135 void esas2r_start_vda_request(struct esas2r_adapter *a, 136 struct esas2r_request *rq) 137 { 138 struct esas2r_inbound_list_source_entry *element; 139 u32 dw; 140 141 rq->req_stat = RS_STARTED; 142 /* 143 * Calculate the inbound list entry location and the current state of 144 * toggle bit. 145 */ 146 a->last_write++; 147 if (a->last_write >= a->list_size) { 148 a->last_write = 0; 149 /* update the toggle bit */ 150 if (a->flags & AF_COMM_LIST_TOGGLE) 151 esas2r_lock_clear_flags(&a->flags, 152 AF_COMM_LIST_TOGGLE); 153 else 154 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); 155 } 156 157 element = 158 (struct esas2r_inbound_list_source_entry *)a->inbound_list_md. 159 virt_addr 160 + a->last_write; 161 162 /* Set the VDA request size if it was never modified */ 163 if (rq->vda_req_sz == RQ_SIZE_DEFAULT) 164 rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32)); 165 166 element->address = cpu_to_le64(rq->vrq_md->phys_addr); 167 element->length = cpu_to_le32(rq->vda_req_sz); 168 169 /* Update the write pointer */ 170 dw = a->last_write; 171 172 if (a->flags & AF_COMM_LIST_TOGGLE) 173 dw |= MU_ILW_TOGGLE; 174 175 esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); 176 esas2r_trace("dw:%x", dw); 177 esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz); 178 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw); 179 } 180 181 /* 182 * Build the scatter/gather list for an I/O request according to the 183 * specifications placed in the s/g context. The caller must initialize 184 * context prior to the initial call by calling esas2r_sgc_init(). 185 */ 186 bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, 187 struct esas2r_sg_context *sgc) 188 { 189 struct esas2r_request *rq = sgc->first_req; 190 union atto_vda_req *vrq = rq->vrq; 191 192 while (sgc->length) { 193 u32 rem = 0; 194 u64 addr; 195 u32 len; 196 197 len = (*sgc->get_phys_addr)(sgc, &addr); 198 199 if (unlikely(len == 0)) 200 return false; 201 202 /* if current length is more than what's left, stop there */ 203 if (unlikely(len > sgc->length)) 204 len = sgc->length; 205 206 another_entry: 207 /* limit to a round number less than the maximum length */ 208 if (len > SGE_LEN_MAX) { 209 /* 210 * Save the remainder of the split. Whenever we limit 211 * an entry we come back around to build entries out 212 * of the leftover. We do this to prevent multiple 213 * calls to the get_phys_addr() function for an SGE 214 * that is too large. 215 */ 216 rem = len - SGE_LEN_MAX; 217 len = SGE_LEN_MAX; 218 } 219 220 /* See if we need to allocate a new SGL */ 221 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { 222 u8 sgelen; 223 struct esas2r_mem_desc *sgl; 224 225 /* 226 * If no SGls are available, return failure. The 227 * caller can call us later with the current context 228 * to pick up here. 229 */ 230 sgl = esas2r_alloc_sgl(a); 231 232 if (unlikely(sgl == NULL)) 233 return false; 234 235 /* Calculate the length of the last SGE filled in */ 236 sgelen = (u8)((u8 *)sgc->sge.a64.curr 237 - (u8 *)sgc->sge.a64.last); 238 239 /* 240 * Copy the last SGE filled in to the first entry of 241 * the new SGL to make room for the chain entry. 242 */ 243 memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); 244 245 /* Figure out the new curr pointer in the new segment */ 246 sgc->sge.a64.curr = 247 (struct atto_vda_sge *)((u8 *)sgl->virt_addr + 248 sgelen); 249 250 /* Set the limit pointer and build the chain entry */ 251 sgc->sge.a64.limit = 252 (struct atto_vda_sge *)((u8 *)sgl->virt_addr 253 + sgl_page_size 254 - sizeof(struct 255 atto_vda_sge)); 256 sgc->sge.a64.last->length = cpu_to_le32( 257 SGE_CHAIN | SGE_ADDR_64); 258 sgc->sge.a64.last->address = 259 cpu_to_le64(sgl->phys_addr); 260 261 /* 262 * Now, if there was a previous chain entry, then 263 * update it to contain the length of this segment 264 * and size of this chain. otherwise this is the 265 * first SGL, so set the chain_offset in the request. 266 */ 267 if (sgc->sge.a64.chain) { 268 sgc->sge.a64.chain->length |= 269 cpu_to_le32( 270 ((u8 *)(sgc->sge.a64. 271 last + 1) 272 - (u8 *)rq->sg_table-> 273 virt_addr) 274 + sizeof(struct atto_vda_sge) * 275 LOBIT(SGE_CHAIN_SZ)); 276 } else { 277 vrq->scsi.chain_offset = (u8) 278 ((u8 *)sgc-> 279 sge.a64.last - 280 (u8 *)vrq); 281 282 /* 283 * This is the first SGL, so set the 284 * chain_offset and the VDA request size in 285 * the request. 286 */ 287 rq->vda_req_sz = 288 (vrq->scsi.chain_offset + 289 sizeof(struct atto_vda_sge) + 290 3) 291 / sizeof(u32); 292 } 293 294 /* 295 * Remember this so when we get a new SGL filled in we 296 * can update the length of this chain entry. 297 */ 298 sgc->sge.a64.chain = sgc->sge.a64.last; 299 300 /* Now link the new SGL onto the primary request. */ 301 list_add(&sgl->next_desc, &rq->sg_table_head); 302 } 303 304 /* Update last one filled in */ 305 sgc->sge.a64.last = sgc->sge.a64.curr; 306 307 /* Build the new SGE and update the S/G context */ 308 sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len); 309 sgc->sge.a64.curr->address = cpu_to_le32(addr); 310 sgc->sge.a64.curr++; 311 sgc->cur_offset += len; 312 sgc->length -= len; 313 314 /* 315 * Check if we previously split an entry. If so we have to 316 * pick up where we left off. 317 */ 318 if (rem) { 319 addr += len; 320 len = rem; 321 rem = 0; 322 goto another_entry; 323 } 324 } 325 326 /* Mark the end of the SGL */ 327 sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST); 328 329 /* 330 * If there was a previous chain entry, update the length to indicate 331 * the length of this last segment. 332 */ 333 if (sgc->sge.a64.chain) { 334 sgc->sge.a64.chain->length |= cpu_to_le32( 335 ((u8 *)(sgc->sge.a64.curr) - 336 (u8 *)rq->sg_table->virt_addr)); 337 } else { 338 u16 reqsize; 339 340 /* 341 * The entire VDA request was not used so lets 342 * set the size of the VDA request to be DMA'd 343 */ 344 reqsize = 345 ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq) 346 + sizeof(struct atto_vda_sge) + 3) / sizeof(u32); 347 348 /* 349 * Only update the request size if it is bigger than what is 350 * already there. We can come in here twice for some management 351 * commands. 352 */ 353 if (reqsize > rq->vda_req_sz) 354 rq->vda_req_sz = reqsize; 355 } 356 return true; 357 } 358 359 360 /* 361 * Create PRD list for each I-block consumed by the command. This routine 362 * determines how much data is required from each I-block being consumed 363 * by the command. The first and last I-blocks can be partials and all of 364 * the I-blocks in between are for a full I-block of data. 365 * 366 * The interleave size is used to determine the number of bytes in the 1st 367 * I-block and the remaining I-blocks are what remeains. 368 */ 369 static bool esas2r_build_prd_iblk(struct esas2r_adapter *a, 370 struct esas2r_sg_context *sgc) 371 { 372 struct esas2r_request *rq = sgc->first_req; 373 u64 addr; 374 u32 len; 375 struct esas2r_mem_desc *sgl; 376 u32 numchain = 1; 377 u32 rem = 0; 378 379 while (sgc->length) { 380 /* Get the next address/length pair */ 381 382 len = (*sgc->get_phys_addr)(sgc, &addr); 383 384 if (unlikely(len == 0)) 385 return false; 386 387 /* If current length is more than what's left, stop there */ 388 389 if (unlikely(len > sgc->length)) 390 len = sgc->length; 391 392 another_entry: 393 /* Limit to a round number less than the maximum length */ 394 395 if (len > PRD_LEN_MAX) { 396 /* 397 * Save the remainder of the split. whenever we limit 398 * an entry we come back around to build entries out 399 * of the leftover. We do this to prevent multiple 400 * calls to the get_phys_addr() function for an SGE 401 * that is too large. 402 */ 403 rem = len - PRD_LEN_MAX; 404 len = PRD_LEN_MAX; 405 } 406 407 /* See if we need to allocate a new SGL */ 408 if (sgc->sge.prd.sge_cnt == 0) { 409 if (len == sgc->length) { 410 /* 411 * We only have 1 PRD entry left. 412 * It can be placed where the chain 413 * entry would have gone 414 */ 415 416 /* Build the simple SGE */ 417 sgc->sge.prd.curr->ctl_len = cpu_to_le32( 418 PRD_DATA | len); 419 sgc->sge.prd.curr->address = cpu_to_le64(addr); 420 421 /* Adjust length related fields */ 422 sgc->cur_offset += len; 423 sgc->length -= len; 424 425 /* We use the reserved chain entry for data */ 426 numchain = 0; 427 428 break; 429 } 430 431 if (sgc->sge.prd.chain) { 432 /* 433 * Fill # of entries of current SGL in previous 434 * chain the length of this current SGL may not 435 * full. 436 */ 437 438 sgc->sge.prd.chain->ctl_len |= cpu_to_le32( 439 sgc->sge.prd.sgl_max_cnt); 440 } 441 442 /* 443 * If no SGls are available, return failure. The 444 * caller can call us later with the current context 445 * to pick up here. 446 */ 447 448 sgl = esas2r_alloc_sgl(a); 449 450 if (unlikely(sgl == NULL)) 451 return false; 452 453 /* 454 * Link the new SGL onto the chain 455 * They are in reverse order 456 */ 457 list_add(&sgl->next_desc, &rq->sg_table_head); 458 459 /* 460 * An SGL was just filled in and we are starting 461 * a new SGL. Prime the chain of the ending SGL with 462 * info that points to the new SGL. The length gets 463 * filled in when the new SGL is filled or ended 464 */ 465 466 sgc->sge.prd.chain = sgc->sge.prd.curr; 467 468 sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN); 469 sgc->sge.prd.chain->address = 470 cpu_to_le64(sgl->phys_addr); 471 472 /* 473 * Start a new segment. 474 * Take one away and save for chain SGE 475 */ 476 477 sgc->sge.prd.curr = 478 (struct atto_physical_region_description *)sgl 479 -> 480 virt_addr; 481 sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1; 482 } 483 484 sgc->sge.prd.sge_cnt--; 485 /* Build the simple SGE */ 486 sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len); 487 sgc->sge.prd.curr->address = cpu_to_le64(addr); 488 489 /* Used another element. Point to the next one */ 490 491 sgc->sge.prd.curr++; 492 493 /* Adjust length related fields */ 494 495 sgc->cur_offset += len; 496 sgc->length -= len; 497 498 /* 499 * Check if we previously split an entry. If so we have to 500 * pick up where we left off. 501 */ 502 503 if (rem) { 504 addr += len; 505 len = rem; 506 rem = 0; 507 goto another_entry; 508 } 509 } 510 511 if (!list_empty(&rq->sg_table_head)) { 512 if (sgc->sge.prd.chain) { 513 sgc->sge.prd.chain->ctl_len |= 514 cpu_to_le32(sgc->sge.prd.sgl_max_cnt 515 - sgc->sge.prd.sge_cnt 516 - numchain); 517 } 518 } 519 520 return true; 521 } 522 523 bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, 524 struct esas2r_sg_context *sgc) 525 { 526 struct esas2r_request *rq = sgc->first_req; 527 u32 len = sgc->length; 528 struct esas2r_target *t = a->targetdb + rq->target_id; 529 u8 is_i_o = 0; 530 u16 reqsize; 531 struct atto_physical_region_description *curr_iblk_chn; 532 u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0]; 533 534 /* 535 * extract LBA from command so we can determine 536 * the I-Block boundary 537 */ 538 539 if (rq->vrq->scsi.function == VDA_FUNC_SCSI 540 && t->target_state == TS_PRESENT 541 && !(t->flags & TF_PASS_THRU)) { 542 u32 lbalo = 0; 543 544 switch (rq->vrq->scsi.cdb[0]) { 545 case READ_16: 546 case WRITE_16: 547 { 548 lbalo = 549 MAKEDWORD(MAKEWORD(cdb[9], 550 cdb[8]), 551 MAKEWORD(cdb[7], 552 cdb[6])); 553 is_i_o = 1; 554 break; 555 } 556 557 case READ_12: 558 case WRITE_12: 559 case READ_10: 560 case WRITE_10: 561 { 562 lbalo = 563 MAKEDWORD(MAKEWORD(cdb[5], 564 cdb[4]), 565 MAKEWORD(cdb[3], 566 cdb[2])); 567 is_i_o = 1; 568 break; 569 } 570 571 case READ_6: 572 case WRITE_6: 573 { 574 lbalo = 575 MAKEDWORD(MAKEWORD(cdb[3], 576 cdb[2]), 577 MAKEWORD(cdb[1] & 0x1F, 578 0)); 579 is_i_o = 1; 580 break; 581 } 582 583 default: 584 break; 585 } 586 587 if (is_i_o) { 588 u32 startlba; 589 590 rq->vrq->scsi.iblk_cnt_prd = 0; 591 592 /* Determine size of 1st I-block PRD list */ 593 startlba = t->inter_block - (lbalo & (t->inter_block - 594 1)); 595 sgc->length = startlba * t->block_size; 596 597 /* Chk if the 1st iblk chain starts at base of Iblock */ 598 if ((lbalo & (t->inter_block - 1)) == 0) 599 rq->flags |= RF_1ST_IBLK_BASE; 600 601 if (sgc->length > len) 602 sgc->length = len; 603 } else { 604 sgc->length = len; 605 } 606 } else { 607 sgc->length = len; 608 } 609 610 /* get our starting chain address */ 611 612 curr_iblk_chn = 613 (struct atto_physical_region_description *)sgc->sge.a64.curr; 614 615 sgc->sge.prd.sgl_max_cnt = sgl_page_size / 616 sizeof(struct 617 atto_physical_region_description); 618 619 /* create all of the I-block PRD lists */ 620 621 while (len) { 622 sgc->sge.prd.sge_cnt = 0; 623 sgc->sge.prd.chain = NULL; 624 sgc->sge.prd.curr = curr_iblk_chn; 625 626 /* increment to next I-Block */ 627 628 len -= sgc->length; 629 630 /* go build the next I-Block PRD list */ 631 632 if (unlikely(!esas2r_build_prd_iblk(a, sgc))) 633 return false; 634 635 curr_iblk_chn++; 636 637 if (is_i_o) { 638 rq->vrq->scsi.iblk_cnt_prd++; 639 640 if (len > t->inter_byte) 641 sgc->length = t->inter_byte; 642 else 643 sgc->length = len; 644 } 645 } 646 647 /* figure out the size used of the VDA request */ 648 649 reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq)) 650 / sizeof(u32); 651 652 /* 653 * only update the request size if it is bigger than what is 654 * already there. we can come in here twice for some management 655 * commands. 656 */ 657 658 if (reqsize > rq->vda_req_sz) 659 rq->vda_req_sz = reqsize; 660 661 return true; 662 } 663 664 static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime) 665 { 666 u32 delta = currtime - a->chip_init_time; 667 668 if (delta <= ESAS2R_CHPRST_WAIT_TIME) { 669 /* Wait before accessing registers */ 670 } else if (delta >= ESAS2R_CHPRST_TIME) { 671 /* 672 * The last reset failed so try again. Reset 673 * processing will give up after three tries. 674 */ 675 esas2r_local_reset_adapter(a); 676 } else { 677 /* We can now see if the firmware is ready */ 678 u32 doorbell; 679 680 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 681 if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) { 682 esas2r_force_interrupt(a); 683 } else { 684 u32 ver = (doorbell & DRBL_FW_VER_MSK); 685 686 /* Driver supports API version 0 and 1 */ 687 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 688 doorbell); 689 if (ver == DRBL_FW_VER_0) { 690 esas2r_lock_set_flags(&a->flags, 691 AF_CHPRST_DETECTED); 692 esas2r_lock_set_flags(&a->flags, 693 AF_LEGACY_SGE_MODE); 694 695 a->max_vdareq_size = 128; 696 a->build_sgl = esas2r_build_sg_list_sge; 697 } else if (ver == DRBL_FW_VER_1) { 698 esas2r_lock_set_flags(&a->flags, 699 AF_CHPRST_DETECTED); 700 esas2r_lock_clear_flags(&a->flags, 701 AF_LEGACY_SGE_MODE); 702 703 a->max_vdareq_size = 1024; 704 a->build_sgl = esas2r_build_sg_list_prd; 705 } else { 706 esas2r_local_reset_adapter(a); 707 } 708 } 709 } 710 } 711 712 713 /* This function must be called once per timer tick */ 714 void esas2r_timer_tick(struct esas2r_adapter *a) 715 { 716 u32 currtime = jiffies_to_msecs(jiffies); 717 u32 deltatime = currtime - a->last_tick_time; 718 719 a->last_tick_time = currtime; 720 721 /* count down the uptime */ 722 if (a->chip_uptime 723 && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { 724 if (deltatime >= a->chip_uptime) 725 a->chip_uptime = 0; 726 else 727 a->chip_uptime -= deltatime; 728 } 729 730 if (a->flags & AF_CHPRST_PENDING) { 731 if (!(a->flags & AF_CHPRST_NEEDED) 732 && !(a->flags & AF_CHPRST_DETECTED)) 733 esas2r_handle_pending_reset(a, currtime); 734 } else { 735 if (a->flags & AF_DISC_PENDING) 736 esas2r_disc_check_complete(a); 737 738 if (a->flags & AF_HEARTBEAT_ENB) { 739 if (a->flags & AF_HEARTBEAT) { 740 if ((currtime - a->heartbeat_time) >= 741 ESAS2R_HEARTBEAT_TIME) { 742 esas2r_lock_clear_flags(&a->flags, 743 AF_HEARTBEAT); 744 esas2r_hdebug("heartbeat failed"); 745 esas2r_log(ESAS2R_LOG_CRIT, 746 "heartbeat failed"); 747 esas2r_bugon(); 748 esas2r_local_reset_adapter(a); 749 } 750 } else { 751 esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT); 752 a->heartbeat_time = currtime; 753 esas2r_force_interrupt(a); 754 } 755 } 756 } 757 758 if (atomic_read(&a->disable_cnt) == 0) 759 esas2r_do_deferred_processes(a); 760 } 761 762 /* 763 * Send the specified task management function to the target and LUN 764 * specified in rqaux. in addition, immediately abort any commands that 765 * are queued but not sent to the device according to the rules specified 766 * by the task management function. 767 */ 768 bool esas2r_send_task_mgmt(struct esas2r_adapter *a, 769 struct esas2r_request *rqaux, u8 task_mgt_func) 770 { 771 u16 targetid = rqaux->target_id; 772 u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags); 773 bool ret = false; 774 struct esas2r_request *rq; 775 struct list_head *next, *element; 776 unsigned long flags; 777 778 LIST_HEAD(comp_list); 779 780 esas2r_trace_enter(); 781 esas2r_trace("rqaux:%p", rqaux); 782 esas2r_trace("task_mgt_func:%x", task_mgt_func); 783 spin_lock_irqsave(&a->queue_lock, flags); 784 785 /* search the defer queue looking for requests for the device */ 786 list_for_each_safe(element, next, &a->defer_list) { 787 rq = list_entry(element, struct esas2r_request, req_list); 788 789 if (rq->vrq->scsi.function == VDA_FUNC_SCSI 790 && rq->target_id == targetid 791 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun 792 || task_mgt_func == 0x20)) { /* target reset */ 793 /* Found a request affected by the task management */ 794 if (rq->req_stat == RS_PENDING) { 795 /* 796 * The request is pending or waiting. We can 797 * safelycomplete the request now. 798 */ 799 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) 800 list_add_tail(&rq->comp_list, 801 &comp_list); 802 } 803 } 804 } 805 806 /* Send the task management request to the firmware */ 807 rqaux->sense_len = 0; 808 rqaux->vrq->scsi.length = 0; 809 rqaux->target_id = targetid; 810 rqaux->vrq->scsi.flags |= cpu_to_le32(lun); 811 memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb)); 812 rqaux->vrq->scsi.flags |= 813 cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); 814 815 if (a->flags & AF_FLASHING) { 816 /* Assume success. if there are active requests, return busy */ 817 rqaux->req_stat = RS_SUCCESS; 818 819 list_for_each_safe(element, next, &a->active_list) { 820 rq = list_entry(element, struct esas2r_request, 821 req_list); 822 if (rq->vrq->scsi.function == VDA_FUNC_SCSI 823 && rq->target_id == targetid 824 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun 825 || task_mgt_func == 0x20)) /* target reset */ 826 rqaux->req_stat = RS_BUSY; 827 } 828 829 ret = true; 830 } 831 832 spin_unlock_irqrestore(&a->queue_lock, flags); 833 834 if (!(a->flags & AF_FLASHING)) 835 esas2r_start_request(a, rqaux); 836 837 esas2r_comp_list_drain(a, &comp_list); 838 839 if (atomic_read(&a->disable_cnt) == 0) 840 esas2r_do_deferred_processes(a); 841 842 esas2r_trace_exit(); 843 844 return ret; 845 } 846 847 void esas2r_reset_bus(struct esas2r_adapter *a) 848 { 849 esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); 850 851 if (!(a->flags & AF_DEGRADED_MODE) 852 && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { 853 esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED); 854 esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING); 855 esas2r_lock_set_flags(&a->flags, AF_OS_RESET); 856 857 esas2r_schedule_tasklet(a); 858 } 859 } 860 861 bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, 862 u8 status) 863 { 864 esas2r_trace_enter(); 865 esas2r_trace("rq:%p", rq); 866 list_del_init(&rq->req_list); 867 if (rq->timeout > RQ_MAX_TIMEOUT) { 868 /* 869 * The request timed out, but we could not abort it because a 870 * chip reset occurred. Return busy status. 871 */ 872 rq->req_stat = RS_BUSY; 873 esas2r_trace_exit(); 874 return true; 875 } 876 877 rq->req_stat = status; 878 esas2r_trace_exit(); 879 return true; 880 } 881