1 /* esp_scsi.c: ESP SCSI driver. 2 * 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <linux/slab.h> 9 #include <linux/delay.h> 10 #include <linux/list.h> 11 #include <linux/completion.h> 12 #include <linux/kallsyms.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/init.h> 16 #include <linux/irqreturn.h> 17 18 #include <asm/irq.h> 19 #include <asm/io.h> 20 #include <asm/dma.h> 21 22 #include <scsi/scsi.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_tcq.h> 27 #include <scsi/scsi_dbg.h> 28 #include <scsi/scsi_transport_spi.h> 29 30 #include "esp_scsi.h" 31 32 #define DRV_MODULE_NAME "esp" 33 #define PFX DRV_MODULE_NAME ": " 34 #define DRV_VERSION "2.000" 35 #define DRV_MODULE_RELDATE "April 19, 2007" 36 37 /* SCSI bus reset settle time in seconds. */ 38 static int esp_bus_reset_settle = 3; 39 40 static u32 esp_debug; 41 #define ESP_DEBUG_INTR 0x00000001 42 #define ESP_DEBUG_SCSICMD 0x00000002 43 #define ESP_DEBUG_RESET 0x00000004 44 #define ESP_DEBUG_MSGIN 0x00000008 45 #define ESP_DEBUG_MSGOUT 0x00000010 46 #define ESP_DEBUG_CMDDONE 0x00000020 47 #define ESP_DEBUG_DISCONNECT 0x00000040 48 #define ESP_DEBUG_DATASTART 0x00000080 49 #define ESP_DEBUG_DATADONE 0x00000100 50 #define ESP_DEBUG_RECONNECT 0x00000200 51 #define ESP_DEBUG_AUTOSENSE 0x00000400 52 53 #define esp_log_intr(f, a...) \ 54 do { if (esp_debug & ESP_DEBUG_INTR) \ 55 printk(f, ## a); \ 56 } while (0) 57 58 #define esp_log_reset(f, a...) \ 59 do { if (esp_debug & ESP_DEBUG_RESET) \ 60 printk(f, ## a); \ 61 } while (0) 62 63 #define esp_log_msgin(f, a...) \ 64 do { if (esp_debug & ESP_DEBUG_MSGIN) \ 65 printk(f, ## a); \ 66 } while (0) 67 68 #define esp_log_msgout(f, a...) \ 69 do { if (esp_debug & ESP_DEBUG_MSGOUT) \ 70 printk(f, ## a); \ 71 } while (0) 72 73 #define esp_log_cmddone(f, a...) \ 74 do { if (esp_debug & ESP_DEBUG_CMDDONE) \ 75 printk(f, ## a); \ 76 } while (0) 77 78 #define esp_log_disconnect(f, a...) \ 79 do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ 80 printk(f, ## a); \ 81 } while (0) 82 83 #define esp_log_datastart(f, a...) \ 84 do { if (esp_debug & ESP_DEBUG_DATASTART) \ 85 printk(f, ## a); \ 86 } while (0) 87 88 #define esp_log_datadone(f, a...) \ 89 do { if (esp_debug & ESP_DEBUG_DATADONE) \ 90 printk(f, ## a); \ 91 } while (0) 92 93 #define esp_log_reconnect(f, a...) \ 94 do { if (esp_debug & ESP_DEBUG_RECONNECT) \ 95 printk(f, ## a); \ 96 } while (0) 97 98 #define esp_log_autosense(f, a...) \ 99 do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ 100 printk(f, ## a); \ 101 } while (0) 102 103 #define esp_read8(REG) esp->ops->esp_read8(esp, REG) 104 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) 105 106 static void esp_log_fill_regs(struct esp *esp, 107 struct esp_event_ent *p) 108 { 109 p->sreg = esp->sreg; 110 p->seqreg = esp->seqreg; 111 p->sreg2 = esp->sreg2; 112 p->ireg = esp->ireg; 113 p->select_state = esp->select_state; 114 p->event = esp->event; 115 } 116 117 void scsi_esp_cmd(struct esp *esp, u8 val) 118 { 119 struct esp_event_ent *p; 120 int idx = esp->esp_event_cur; 121 122 p = &esp->esp_event_log[idx]; 123 p->type = ESP_EVENT_TYPE_CMD; 124 p->val = val; 125 esp_log_fill_regs(esp, p); 126 127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 128 129 esp_write8(val, ESP_CMD); 130 } 131 EXPORT_SYMBOL(scsi_esp_cmd); 132 133 static void esp_event(struct esp *esp, u8 val) 134 { 135 struct esp_event_ent *p; 136 int idx = esp->esp_event_cur; 137 138 p = &esp->esp_event_log[idx]; 139 p->type = ESP_EVENT_TYPE_EVENT; 140 p->val = val; 141 esp_log_fill_regs(esp, p); 142 143 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 144 145 esp->event = val; 146 } 147 148 static void esp_dump_cmd_log(struct esp *esp) 149 { 150 int idx = esp->esp_event_cur; 151 int stop = idx; 152 153 printk(KERN_INFO PFX "esp%d: Dumping command log\n", 154 esp->host->unique_id); 155 do { 156 struct esp_event_ent *p = &esp->esp_event_log[idx]; 157 158 printk(KERN_INFO PFX "esp%d: ent[%d] %s ", 159 esp->host->unique_id, idx, 160 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); 161 162 printk("val[%02x] sreg[%02x] seqreg[%02x] " 163 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", 164 p->val, p->sreg, p->seqreg, 165 p->sreg2, p->ireg, p->select_state, p->event); 166 167 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 168 } while (idx != stop); 169 } 170 171 static void esp_flush_fifo(struct esp *esp) 172 { 173 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 174 if (esp->rev == ESP236) { 175 int lim = 1000; 176 177 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { 178 if (--lim == 0) { 179 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " 180 "will not clear!\n", 181 esp->host->unique_id); 182 break; 183 } 184 udelay(1); 185 } 186 } 187 } 188 189 static void hme_read_fifo(struct esp *esp) 190 { 191 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 192 int idx = 0; 193 194 while (fcnt--) { 195 esp->fifo[idx++] = esp_read8(ESP_FDATA); 196 esp->fifo[idx++] = esp_read8(ESP_FDATA); 197 } 198 if (esp->sreg2 & ESP_STAT2_F1BYTE) { 199 esp_write8(0, ESP_FDATA); 200 esp->fifo[idx++] = esp_read8(ESP_FDATA); 201 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 202 } 203 esp->fifo_cnt = idx; 204 } 205 206 static void esp_set_all_config3(struct esp *esp, u8 val) 207 { 208 int i; 209 210 for (i = 0; i < ESP_MAX_TARGET; i++) 211 esp->target[i].esp_config3 = val; 212 } 213 214 /* Reset the ESP chip, _not_ the SCSI bus. */ 215 static void esp_reset_esp(struct esp *esp) 216 { 217 u8 family_code, version; 218 219 /* Now reset the ESP chip */ 220 scsi_esp_cmd(esp, ESP_CMD_RC); 221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 222 if (esp->rev == FAST) 223 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2); 224 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 225 226 /* This is the only point at which it is reliable to read 227 * the ID-code for a fast ESP chip variants. 228 */ 229 esp->max_period = ((35 * esp->ccycle) / 1000); 230 if (esp->rev == FAST) { 231 version = esp_read8(ESP_UID); 232 family_code = (version & 0xf8) >> 3; 233 if (family_code == 0x02) 234 esp->rev = FAS236; 235 else if (family_code == 0x0a) 236 esp->rev = FASHME; /* Version is usually '5'. */ 237 else 238 esp->rev = FAS100A; 239 esp->min_period = ((4 * esp->ccycle) / 1000); 240 } else { 241 esp->min_period = ((5 * esp->ccycle) / 1000); 242 } 243 esp->max_period = (esp->max_period + 3)>>2; 244 esp->min_period = (esp->min_period + 3)>>2; 245 246 esp_write8(esp->config1, ESP_CFG1); 247 switch (esp->rev) { 248 case ESP100: 249 /* nothing to do */ 250 break; 251 252 case ESP100A: 253 esp_write8(esp->config2, ESP_CFG2); 254 break; 255 256 case ESP236: 257 /* Slow 236 */ 258 esp_write8(esp->config2, ESP_CFG2); 259 esp->prev_cfg3 = esp->target[0].esp_config3; 260 esp_write8(esp->prev_cfg3, ESP_CFG3); 261 break; 262 263 case FASHME: 264 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); 265 /* fallthrough... */ 266 267 case FAS236: 268 /* Fast 236 or HME */ 269 esp_write8(esp->config2, ESP_CFG2); 270 if (esp->rev == FASHME) { 271 u8 cfg3 = esp->target[0].esp_config3; 272 273 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; 274 if (esp->scsi_id >= 8) 275 cfg3 |= ESP_CONFIG3_IDBIT3; 276 esp_set_all_config3(esp, cfg3); 277 } else { 278 u32 cfg3 = esp->target[0].esp_config3; 279 280 cfg3 |= ESP_CONFIG3_FCLK; 281 esp_set_all_config3(esp, cfg3); 282 } 283 esp->prev_cfg3 = esp->target[0].esp_config3; 284 esp_write8(esp->prev_cfg3, ESP_CFG3); 285 if (esp->rev == FASHME) { 286 esp->radelay = 80; 287 } else { 288 if (esp->flags & ESP_FLAG_DIFFERENTIAL) 289 esp->radelay = 0; 290 else 291 esp->radelay = 96; 292 } 293 break; 294 295 case FAS100A: 296 /* Fast 100a */ 297 esp_write8(esp->config2, ESP_CFG2); 298 esp_set_all_config3(esp, 299 (esp->target[0].esp_config3 | 300 ESP_CONFIG3_FCLOCK)); 301 esp->prev_cfg3 = esp->target[0].esp_config3; 302 esp_write8(esp->prev_cfg3, ESP_CFG3); 303 esp->radelay = 32; 304 break; 305 306 default: 307 break; 308 } 309 310 /* Reload the configuration registers */ 311 esp_write8(esp->cfact, ESP_CFACT); 312 313 esp->prev_stp = 0; 314 esp_write8(esp->prev_stp, ESP_STP); 315 316 esp->prev_soff = 0; 317 esp_write8(esp->prev_soff, ESP_SOFF); 318 319 esp_write8(esp->neg_defp, ESP_TIMEO); 320 321 /* Eat any bitrot in the chip */ 322 esp_read8(ESP_INTRPT); 323 udelay(100); 324 } 325 326 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) 327 { 328 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 329 struct scatterlist *sg = scsi_sglist(cmd); 330 int dir = cmd->sc_data_direction; 331 int total, i; 332 333 if (dir == DMA_NONE) 334 return; 335 336 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir); 337 spriv->cur_residue = sg_dma_len(sg); 338 spriv->cur_sg = sg; 339 340 total = 0; 341 for (i = 0; i < spriv->u.num_sg; i++) 342 total += sg_dma_len(&sg[i]); 343 spriv->tot_residue = total; 344 } 345 346 static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, 347 struct scsi_cmnd *cmd) 348 { 349 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 350 351 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 352 return ent->sense_dma + 353 (ent->sense_ptr - cmd->sense_buffer); 354 } 355 356 return sg_dma_address(p->cur_sg) + 357 (sg_dma_len(p->cur_sg) - 358 p->cur_residue); 359 } 360 361 static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, 362 struct scsi_cmnd *cmd) 363 { 364 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 365 366 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 367 return SCSI_SENSE_BUFFERSIZE - 368 (ent->sense_ptr - cmd->sense_buffer); 369 } 370 return p->cur_residue; 371 } 372 373 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, 374 struct scsi_cmnd *cmd, unsigned int len) 375 { 376 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 377 378 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 379 ent->sense_ptr += len; 380 return; 381 } 382 383 p->cur_residue -= len; 384 p->tot_residue -= len; 385 if (p->cur_residue < 0 || p->tot_residue < 0) { 386 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", 387 esp->host->unique_id); 388 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " 389 "len[%u]\n", 390 esp->host->unique_id, 391 p->cur_residue, p->tot_residue, len); 392 p->cur_residue = 0; 393 p->tot_residue = 0; 394 } 395 if (!p->cur_residue && p->tot_residue) { 396 p->cur_sg++; 397 p->cur_residue = sg_dma_len(p->cur_sg); 398 } 399 } 400 401 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) 402 { 403 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 404 int dir = cmd->sc_data_direction; 405 406 if (dir == DMA_NONE) 407 return; 408 409 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir); 410 } 411 412 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) 413 { 414 struct scsi_cmnd *cmd = ent->cmd; 415 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 416 417 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 418 ent->saved_sense_ptr = ent->sense_ptr; 419 return; 420 } 421 ent->saved_cur_residue = spriv->cur_residue; 422 ent->saved_cur_sg = spriv->cur_sg; 423 ent->saved_tot_residue = spriv->tot_residue; 424 } 425 426 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) 427 { 428 struct scsi_cmnd *cmd = ent->cmd; 429 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 430 431 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 432 ent->sense_ptr = ent->saved_sense_ptr; 433 return; 434 } 435 spriv->cur_residue = ent->saved_cur_residue; 436 spriv->cur_sg = ent->saved_cur_sg; 437 spriv->tot_residue = ent->saved_tot_residue; 438 } 439 440 static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd) 441 { 442 if (cmd->cmd_len == 6 || 443 cmd->cmd_len == 10 || 444 cmd->cmd_len == 12) { 445 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; 446 } else { 447 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 448 } 449 } 450 451 static void esp_write_tgt_config3(struct esp *esp, int tgt) 452 { 453 if (esp->rev > ESP100A) { 454 u8 val = esp->target[tgt].esp_config3; 455 456 if (val != esp->prev_cfg3) { 457 esp->prev_cfg3 = val; 458 esp_write8(val, ESP_CFG3); 459 } 460 } 461 } 462 463 static void esp_write_tgt_sync(struct esp *esp, int tgt) 464 { 465 u8 off = esp->target[tgt].esp_offset; 466 u8 per = esp->target[tgt].esp_period; 467 468 if (off != esp->prev_soff) { 469 esp->prev_soff = off; 470 esp_write8(off, ESP_SOFF); 471 } 472 if (per != esp->prev_stp) { 473 esp->prev_stp = per; 474 esp_write8(per, ESP_STP); 475 } 476 } 477 478 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) 479 { 480 if (esp->rev == FASHME) { 481 /* Arbitrary segment boundaries, 24-bit counts. */ 482 if (dma_len > (1U << 24)) 483 dma_len = (1U << 24); 484 } else { 485 u32 base, end; 486 487 /* ESP chip limits other variants by 16-bits of transfer 488 * count. Actually on FAS100A and FAS236 we could get 489 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB 490 * in the ESP_CFG2 register but that causes other unwanted 491 * changes so we don't use it currently. 492 */ 493 if (dma_len > (1U << 16)) 494 dma_len = (1U << 16); 495 496 /* All of the DMA variants hooked up to these chips 497 * cannot handle crossing a 24-bit address boundary. 498 */ 499 base = dma_addr & ((1U << 24) - 1U); 500 end = base + dma_len; 501 if (end > (1U << 24)) 502 end = (1U <<24); 503 dma_len = end - base; 504 } 505 return dma_len; 506 } 507 508 static int esp_need_to_nego_wide(struct esp_target_data *tp) 509 { 510 struct scsi_target *target = tp->starget; 511 512 return spi_width(target) != tp->nego_goal_width; 513 } 514 515 static int esp_need_to_nego_sync(struct esp_target_data *tp) 516 { 517 struct scsi_target *target = tp->starget; 518 519 /* When offset is zero, period is "don't care". */ 520 if (!spi_offset(target) && !tp->nego_goal_offset) 521 return 0; 522 523 if (spi_offset(target) == tp->nego_goal_offset && 524 spi_period(target) == tp->nego_goal_period) 525 return 0; 526 527 return 1; 528 } 529 530 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, 531 struct esp_lun_data *lp) 532 { 533 if (!ent->orig_tag[0]) { 534 /* Non-tagged, slot already taken? */ 535 if (lp->non_tagged_cmd) 536 return -EBUSY; 537 538 if (lp->hold) { 539 /* We are being held by active tagged 540 * commands. 541 */ 542 if (lp->num_tagged) 543 return -EBUSY; 544 545 /* Tagged commands completed, we can unplug 546 * the queue and run this untagged command. 547 */ 548 lp->hold = 0; 549 } else if (lp->num_tagged) { 550 /* Plug the queue until num_tagged decreases 551 * to zero in esp_free_lun_tag. 552 */ 553 lp->hold = 1; 554 return -EBUSY; 555 } 556 557 lp->non_tagged_cmd = ent; 558 return 0; 559 } else { 560 /* Tagged command, see if blocked by a 561 * non-tagged one. 562 */ 563 if (lp->non_tagged_cmd || lp->hold) 564 return -EBUSY; 565 } 566 567 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); 568 569 lp->tagged_cmds[ent->orig_tag[1]] = ent; 570 lp->num_tagged++; 571 572 return 0; 573 } 574 575 static void esp_free_lun_tag(struct esp_cmd_entry *ent, 576 struct esp_lun_data *lp) 577 { 578 if (ent->orig_tag[0]) { 579 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); 580 lp->tagged_cmds[ent->orig_tag[1]] = NULL; 581 lp->num_tagged--; 582 } else { 583 BUG_ON(lp->non_tagged_cmd != ent); 584 lp->non_tagged_cmd = NULL; 585 } 586 } 587 588 /* When a contingent allegiance conditon is created, we force feed a 589 * REQUEST_SENSE command to the device to fetch the sense data. I 590 * tried many other schemes, relying on the scsi error handling layer 591 * to send out the REQUEST_SENSE automatically, but this was difficult 592 * to get right especially in the presence of applications like smartd 593 * which use SG_IO to send out their own REQUEST_SENSE commands. 594 */ 595 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) 596 { 597 struct scsi_cmnd *cmd = ent->cmd; 598 struct scsi_device *dev = cmd->device; 599 int tgt, lun; 600 u8 *p, val; 601 602 tgt = dev->id; 603 lun = dev->lun; 604 605 606 if (!ent->sense_ptr) { 607 esp_log_autosense("esp%d: Doing auto-sense for " 608 "tgt[%d] lun[%d]\n", 609 esp->host->unique_id, tgt, lun); 610 611 ent->sense_ptr = cmd->sense_buffer; 612 ent->sense_dma = esp->ops->map_single(esp, 613 ent->sense_ptr, 614 SCSI_SENSE_BUFFERSIZE, 615 DMA_FROM_DEVICE); 616 } 617 ent->saved_sense_ptr = ent->sense_ptr; 618 619 esp->active_cmd = ent; 620 621 p = esp->command_block; 622 esp->msg_out_len = 0; 623 624 *p++ = IDENTIFY(0, lun); 625 *p++ = REQUEST_SENSE; 626 *p++ = ((dev->scsi_level <= SCSI_2) ? 627 (lun << 5) : 0); 628 *p++ = 0; 629 *p++ = 0; 630 *p++ = SCSI_SENSE_BUFFERSIZE; 631 *p++ = 0; 632 633 esp->select_state = ESP_SELECT_BASIC; 634 635 val = tgt; 636 if (esp->rev == FASHME) 637 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; 638 esp_write8(val, ESP_BUSID); 639 640 esp_write_tgt_sync(esp, tgt); 641 esp_write_tgt_config3(esp, tgt); 642 643 val = (p - esp->command_block); 644 645 if (esp->rev == FASHME) 646 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 647 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 648 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA); 649 } 650 651 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) 652 { 653 struct esp_cmd_entry *ent; 654 655 list_for_each_entry(ent, &esp->queued_cmds, list) { 656 struct scsi_cmnd *cmd = ent->cmd; 657 struct scsi_device *dev = cmd->device; 658 struct esp_lun_data *lp = dev->hostdata; 659 660 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 661 ent->tag[0] = 0; 662 ent->tag[1] = 0; 663 return ent; 664 } 665 666 if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) { 667 ent->tag[0] = 0; 668 ent->tag[1] = 0; 669 } 670 ent->orig_tag[0] = ent->tag[0]; 671 ent->orig_tag[1] = ent->tag[1]; 672 673 if (esp_alloc_lun_tag(ent, lp) < 0) 674 continue; 675 676 return ent; 677 } 678 679 return NULL; 680 } 681 682 static void esp_maybe_execute_command(struct esp *esp) 683 { 684 struct esp_target_data *tp; 685 struct esp_lun_data *lp; 686 struct scsi_device *dev; 687 struct scsi_cmnd *cmd; 688 struct esp_cmd_entry *ent; 689 int tgt, lun, i; 690 u32 val, start_cmd; 691 u8 *p; 692 693 if (esp->active_cmd || 694 (esp->flags & ESP_FLAG_RESETTING)) 695 return; 696 697 ent = find_and_prep_issuable_command(esp); 698 if (!ent) 699 return; 700 701 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 702 esp_autosense(esp, ent); 703 return; 704 } 705 706 cmd = ent->cmd; 707 dev = cmd->device; 708 tgt = dev->id; 709 lun = dev->lun; 710 tp = &esp->target[tgt]; 711 lp = dev->hostdata; 712 713 list_move(&ent->list, &esp->active_cmds); 714 715 esp->active_cmd = ent; 716 717 esp_map_dma(esp, cmd); 718 esp_save_pointers(esp, ent); 719 720 esp_check_command_len(esp, cmd); 721 722 p = esp->command_block; 723 724 esp->msg_out_len = 0; 725 if (tp->flags & ESP_TGT_CHECK_NEGO) { 726 /* Need to negotiate. If the target is broken 727 * go for synchronous transfers and non-wide. 728 */ 729 if (tp->flags & ESP_TGT_BROKEN) { 730 tp->flags &= ~ESP_TGT_DISCONNECT; 731 tp->nego_goal_period = 0; 732 tp->nego_goal_offset = 0; 733 tp->nego_goal_width = 0; 734 tp->nego_goal_tags = 0; 735 } 736 737 /* If the settings are not changing, skip this. */ 738 if (spi_width(tp->starget) == tp->nego_goal_width && 739 spi_period(tp->starget) == tp->nego_goal_period && 740 spi_offset(tp->starget) == tp->nego_goal_offset) { 741 tp->flags &= ~ESP_TGT_CHECK_NEGO; 742 goto build_identify; 743 } 744 745 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { 746 esp->msg_out_len = 747 spi_populate_width_msg(&esp->msg_out[0], 748 (tp->nego_goal_width ? 749 1 : 0)); 750 tp->flags |= ESP_TGT_NEGO_WIDE; 751 } else if (esp_need_to_nego_sync(tp)) { 752 esp->msg_out_len = 753 spi_populate_sync_msg(&esp->msg_out[0], 754 tp->nego_goal_period, 755 tp->nego_goal_offset); 756 tp->flags |= ESP_TGT_NEGO_SYNC; 757 } else { 758 tp->flags &= ~ESP_TGT_CHECK_NEGO; 759 } 760 761 /* Process it like a slow command. */ 762 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) 763 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 764 } 765 766 build_identify: 767 /* If we don't have a lun-data struct yet, we're probing 768 * so do not disconnect. Also, do not disconnect unless 769 * we have a tag on this command. 770 */ 771 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0]) 772 *p++ = IDENTIFY(1, lun); 773 else 774 *p++ = IDENTIFY(0, lun); 775 776 if (ent->tag[0] && esp->rev == ESP100) { 777 /* ESP100 lacks select w/atn3 command, use select 778 * and stop instead. 779 */ 780 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 781 } 782 783 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { 784 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; 785 if (ent->tag[0]) { 786 *p++ = ent->tag[0]; 787 *p++ = ent->tag[1]; 788 789 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; 790 } 791 792 for (i = 0; i < cmd->cmd_len; i++) 793 *p++ = cmd->cmnd[i]; 794 795 esp->select_state = ESP_SELECT_BASIC; 796 } else { 797 esp->cmd_bytes_left = cmd->cmd_len; 798 esp->cmd_bytes_ptr = &cmd->cmnd[0]; 799 800 if (ent->tag[0]) { 801 for (i = esp->msg_out_len - 1; 802 i >= 0; i--) 803 esp->msg_out[i + 2] = esp->msg_out[i]; 804 esp->msg_out[0] = ent->tag[0]; 805 esp->msg_out[1] = ent->tag[1]; 806 esp->msg_out_len += 2; 807 } 808 809 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; 810 esp->select_state = ESP_SELECT_MSGOUT; 811 } 812 val = tgt; 813 if (esp->rev == FASHME) 814 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; 815 esp_write8(val, ESP_BUSID); 816 817 esp_write_tgt_sync(esp, tgt); 818 esp_write_tgt_config3(esp, tgt); 819 820 val = (p - esp->command_block); 821 822 if (esp_debug & ESP_DEBUG_SCSICMD) { 823 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); 824 for (i = 0; i < cmd->cmd_len; i++) 825 printk("%02x ", cmd->cmnd[i]); 826 printk("]\n"); 827 } 828 829 if (esp->rev == FASHME) 830 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 831 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 832 val, 16, 0, start_cmd); 833 } 834 835 static struct esp_cmd_entry *esp_get_ent(struct esp *esp) 836 { 837 struct list_head *head = &esp->esp_cmd_pool; 838 struct esp_cmd_entry *ret; 839 840 if (list_empty(head)) { 841 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); 842 } else { 843 ret = list_entry(head->next, struct esp_cmd_entry, list); 844 list_del(&ret->list); 845 memset(ret, 0, sizeof(*ret)); 846 } 847 return ret; 848 } 849 850 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) 851 { 852 list_add(&ent->list, &esp->esp_cmd_pool); 853 } 854 855 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, 856 struct scsi_cmnd *cmd, unsigned int result) 857 { 858 struct scsi_device *dev = cmd->device; 859 int tgt = dev->id; 860 int lun = dev->lun; 861 862 esp->active_cmd = NULL; 863 esp_unmap_dma(esp, cmd); 864 esp_free_lun_tag(ent, dev->hostdata); 865 cmd->result = result; 866 867 if (ent->eh_done) { 868 complete(ent->eh_done); 869 ent->eh_done = NULL; 870 } 871 872 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 873 esp->ops->unmap_single(esp, ent->sense_dma, 874 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 875 ent->sense_ptr = NULL; 876 877 /* Restore the message/status bytes to what we actually 878 * saw originally. Also, report that we are providing 879 * the sense data. 880 */ 881 cmd->result = ((DRIVER_SENSE << 24) | 882 (DID_OK << 16) | 883 (COMMAND_COMPLETE << 8) | 884 (SAM_STAT_CHECK_CONDITION << 0)); 885 886 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; 887 if (esp_debug & ESP_DEBUG_AUTOSENSE) { 888 int i; 889 890 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", 891 esp->host->unique_id, tgt, lun); 892 for (i = 0; i < 18; i++) 893 printk("%02x ", cmd->sense_buffer[i]); 894 printk("]\n"); 895 } 896 } 897 898 cmd->scsi_done(cmd); 899 900 list_del(&ent->list); 901 esp_put_ent(esp, ent); 902 903 esp_maybe_execute_command(esp); 904 } 905 906 static unsigned int compose_result(unsigned int status, unsigned int message, 907 unsigned int driver_code) 908 { 909 return (status | (message << 8) | (driver_code << 16)); 910 } 911 912 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) 913 { 914 struct scsi_device *dev = ent->cmd->device; 915 struct esp_lun_data *lp = dev->hostdata; 916 917 scsi_track_queue_full(dev, lp->num_tagged - 1); 918 } 919 920 static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 921 { 922 struct scsi_device *dev = cmd->device; 923 struct esp *esp = shost_priv(dev->host); 924 struct esp_cmd_priv *spriv; 925 struct esp_cmd_entry *ent; 926 927 ent = esp_get_ent(esp); 928 if (!ent) 929 return SCSI_MLQUEUE_HOST_BUSY; 930 931 ent->cmd = cmd; 932 933 cmd->scsi_done = done; 934 935 spriv = ESP_CMD_PRIV(cmd); 936 spriv->u.dma_addr = ~(dma_addr_t)0x0; 937 938 list_add_tail(&ent->list, &esp->queued_cmds); 939 940 esp_maybe_execute_command(esp); 941 942 return 0; 943 } 944 945 static DEF_SCSI_QCMD(esp_queuecommand) 946 947 static int esp_check_gross_error(struct esp *esp) 948 { 949 if (esp->sreg & ESP_STAT_SPAM) { 950 /* Gross Error, could be one of: 951 * - top of fifo overwritten 952 * - top of command register overwritten 953 * - DMA programmed with wrong direction 954 * - improper phase change 955 */ 956 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", 957 esp->host->unique_id, esp->sreg); 958 /* XXX Reset the chip. XXX */ 959 return 1; 960 } 961 return 0; 962 } 963 964 static int esp_check_spur_intr(struct esp *esp) 965 { 966 switch (esp->rev) { 967 case ESP100: 968 case ESP100A: 969 /* The interrupt pending bit of the status register cannot 970 * be trusted on these revisions. 971 */ 972 esp->sreg &= ~ESP_STAT_INTR; 973 break; 974 975 default: 976 if (!(esp->sreg & ESP_STAT_INTR)) { 977 esp->ireg = esp_read8(ESP_INTRPT); 978 if (esp->ireg & ESP_INTR_SR) 979 return 1; 980 981 /* If the DMA is indicating interrupt pending and the 982 * ESP is not, the only possibility is a DMA error. 983 */ 984 if (!esp->ops->dma_error(esp)) { 985 printk(KERN_ERR PFX "esp%d: Spurious irq, " 986 "sreg=%02x.\n", 987 esp->host->unique_id, esp->sreg); 988 return -1; 989 } 990 991 printk(KERN_ERR PFX "esp%d: DMA error\n", 992 esp->host->unique_id); 993 994 /* XXX Reset the chip. XXX */ 995 return -1; 996 } 997 break; 998 } 999 1000 return 0; 1001 } 1002 1003 static void esp_schedule_reset(struct esp *esp) 1004 { 1005 esp_log_reset("ESP: esp_schedule_reset() from %pf\n", 1006 __builtin_return_address(0)); 1007 esp->flags |= ESP_FLAG_RESETTING; 1008 esp_event(esp, ESP_EVENT_RESET); 1009 } 1010 1011 /* In order to avoid having to add a special half-reconnected state 1012 * into the driver we just sit here and poll through the rest of 1013 * the reselection process to get the tag message bytes. 1014 */ 1015 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, 1016 struct esp_lun_data *lp) 1017 { 1018 struct esp_cmd_entry *ent; 1019 int i; 1020 1021 if (!lp->num_tagged) { 1022 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", 1023 esp->host->unique_id); 1024 return NULL; 1025 } 1026 1027 esp_log_reconnect("ESP: reconnect tag, "); 1028 1029 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 1030 if (esp->ops->irq_pending(esp)) 1031 break; 1032 } 1033 if (i == ESP_QUICKIRQ_LIMIT) { 1034 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", 1035 esp->host->unique_id); 1036 return NULL; 1037 } 1038 1039 esp->sreg = esp_read8(ESP_STATUS); 1040 esp->ireg = esp_read8(ESP_INTRPT); 1041 1042 esp_log_reconnect("IRQ(%d:%x:%x), ", 1043 i, esp->ireg, esp->sreg); 1044 1045 if (esp->ireg & ESP_INTR_DC) { 1046 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", 1047 esp->host->unique_id); 1048 return NULL; 1049 } 1050 1051 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { 1052 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", 1053 esp->host->unique_id, esp->sreg); 1054 return NULL; 1055 } 1056 1057 /* DMA in the tag bytes... */ 1058 esp->command_block[0] = 0xff; 1059 esp->command_block[1] = 0xff; 1060 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 1061 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); 1062 1063 /* ACK the message. */ 1064 scsi_esp_cmd(esp, ESP_CMD_MOK); 1065 1066 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { 1067 if (esp->ops->irq_pending(esp)) { 1068 esp->sreg = esp_read8(ESP_STATUS); 1069 esp->ireg = esp_read8(ESP_INTRPT); 1070 if (esp->ireg & ESP_INTR_FDONE) 1071 break; 1072 } 1073 udelay(1); 1074 } 1075 if (i == ESP_RESELECT_TAG_LIMIT) { 1076 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", 1077 esp->host->unique_id); 1078 return NULL; 1079 } 1080 esp->ops->dma_drain(esp); 1081 esp->ops->dma_invalidate(esp); 1082 1083 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", 1084 i, esp->ireg, esp->sreg, 1085 esp->command_block[0], 1086 esp->command_block[1]); 1087 1088 if (esp->command_block[0] < SIMPLE_QUEUE_TAG || 1089 esp->command_block[0] > ORDERED_QUEUE_TAG) { 1090 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " 1091 "type %02x.\n", 1092 esp->host->unique_id, esp->command_block[0]); 1093 return NULL; 1094 } 1095 1096 ent = lp->tagged_cmds[esp->command_block[1]]; 1097 if (!ent) { 1098 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " 1099 "tag %02x.\n", 1100 esp->host->unique_id, esp->command_block[1]); 1101 return NULL; 1102 } 1103 1104 return ent; 1105 } 1106 1107 static int esp_reconnect(struct esp *esp) 1108 { 1109 struct esp_cmd_entry *ent; 1110 struct esp_target_data *tp; 1111 struct esp_lun_data *lp; 1112 struct scsi_device *dev; 1113 int target, lun; 1114 1115 BUG_ON(esp->active_cmd); 1116 if (esp->rev == FASHME) { 1117 /* FASHME puts the target and lun numbers directly 1118 * into the fifo. 1119 */ 1120 target = esp->fifo[0]; 1121 lun = esp->fifo[1] & 0x7; 1122 } else { 1123 u8 bits = esp_read8(ESP_FDATA); 1124 1125 /* Older chips put the lun directly into the fifo, but 1126 * the target is given as a sample of the arbitration 1127 * lines on the bus at reselection time. So we should 1128 * see the ID of the ESP and the one reconnecting target 1129 * set in the bitmap. 1130 */ 1131 if (!(bits & esp->scsi_id_mask)) 1132 goto do_reset; 1133 bits &= ~esp->scsi_id_mask; 1134 if (!bits || (bits & (bits - 1))) 1135 goto do_reset; 1136 1137 target = ffs(bits) - 1; 1138 lun = (esp_read8(ESP_FDATA) & 0x7); 1139 1140 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1141 if (esp->rev == ESP100) { 1142 u8 ireg = esp_read8(ESP_INTRPT); 1143 /* This chip has a bug during reselection that can 1144 * cause a spurious illegal-command interrupt, which 1145 * we simply ACK here. Another possibility is a bus 1146 * reset so we must check for that. 1147 */ 1148 if (ireg & ESP_INTR_SR) 1149 goto do_reset; 1150 } 1151 scsi_esp_cmd(esp, ESP_CMD_NULL); 1152 } 1153 1154 esp_write_tgt_sync(esp, target); 1155 esp_write_tgt_config3(esp, target); 1156 1157 scsi_esp_cmd(esp, ESP_CMD_MOK); 1158 1159 if (esp->rev == FASHME) 1160 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, 1161 ESP_BUSID); 1162 1163 tp = &esp->target[target]; 1164 dev = __scsi_device_lookup_by_target(tp->starget, lun); 1165 if (!dev) { 1166 printk(KERN_ERR PFX "esp%d: Reconnect, no lp " 1167 "tgt[%u] lun[%u]\n", 1168 esp->host->unique_id, target, lun); 1169 goto do_reset; 1170 } 1171 lp = dev->hostdata; 1172 1173 ent = lp->non_tagged_cmd; 1174 if (!ent) { 1175 ent = esp_reconnect_with_tag(esp, lp); 1176 if (!ent) 1177 goto do_reset; 1178 } 1179 1180 esp->active_cmd = ent; 1181 1182 if (ent->flags & ESP_CMD_FLAG_ABORT) { 1183 esp->msg_out[0] = ABORT_TASK_SET; 1184 esp->msg_out_len = 1; 1185 scsi_esp_cmd(esp, ESP_CMD_SATN); 1186 } 1187 1188 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1189 esp_restore_pointers(esp, ent); 1190 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1191 return 1; 1192 1193 do_reset: 1194 esp_schedule_reset(esp); 1195 return 0; 1196 } 1197 1198 static int esp_finish_select(struct esp *esp) 1199 { 1200 struct esp_cmd_entry *ent; 1201 struct scsi_cmnd *cmd; 1202 u8 orig_select_state; 1203 1204 orig_select_state = esp->select_state; 1205 1206 /* No longer selecting. */ 1207 esp->select_state = ESP_SELECT_NONE; 1208 1209 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; 1210 ent = esp->active_cmd; 1211 cmd = ent->cmd; 1212 1213 if (esp->ops->dma_error(esp)) { 1214 /* If we see a DMA error during or as a result of selection, 1215 * all bets are off. 1216 */ 1217 esp_schedule_reset(esp); 1218 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); 1219 return 0; 1220 } 1221 1222 esp->ops->dma_invalidate(esp); 1223 1224 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { 1225 struct esp_target_data *tp = &esp->target[cmd->device->id]; 1226 1227 /* Carefully back out of the selection attempt. Release 1228 * resources (such as DMA mapping & TAG) and reset state (such 1229 * as message out and command delivery variables). 1230 */ 1231 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { 1232 esp_unmap_dma(esp, cmd); 1233 esp_free_lun_tag(ent, cmd->device->hostdata); 1234 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); 1235 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; 1236 esp->cmd_bytes_ptr = NULL; 1237 esp->cmd_bytes_left = 0; 1238 } else { 1239 esp->ops->unmap_single(esp, ent->sense_dma, 1240 SCSI_SENSE_BUFFERSIZE, 1241 DMA_FROM_DEVICE); 1242 ent->sense_ptr = NULL; 1243 } 1244 1245 /* Now that the state is unwound properly, put back onto 1246 * the issue queue. This command is no longer active. 1247 */ 1248 list_move(&ent->list, &esp->queued_cmds); 1249 esp->active_cmd = NULL; 1250 1251 /* Return value ignored by caller, it directly invokes 1252 * esp_reconnect(). 1253 */ 1254 return 0; 1255 } 1256 1257 if (esp->ireg == ESP_INTR_DC) { 1258 struct scsi_device *dev = cmd->device; 1259 1260 /* Disconnect. Make sure we re-negotiate sync and 1261 * wide parameters if this target starts responding 1262 * again in the future. 1263 */ 1264 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; 1265 1266 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1267 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); 1268 return 1; 1269 } 1270 1271 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { 1272 /* Selection successful. On pre-FAST chips we have 1273 * to do a NOP and possibly clean out the FIFO. 1274 */ 1275 if (esp->rev <= ESP236) { 1276 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 1277 1278 scsi_esp_cmd(esp, ESP_CMD_NULL); 1279 1280 if (!fcnt && 1281 (!esp->prev_soff || 1282 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) 1283 esp_flush_fifo(esp); 1284 } 1285 1286 /* If we are doing a slow command, negotiation, etc. 1287 * we'll do the right thing as we transition to the 1288 * next phase. 1289 */ 1290 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1291 return 0; 1292 } 1293 1294 printk("ESP: Unexpected selection completion ireg[%x].\n", 1295 esp->ireg); 1296 esp_schedule_reset(esp); 1297 return 0; 1298 } 1299 1300 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, 1301 struct scsi_cmnd *cmd) 1302 { 1303 int fifo_cnt, ecount, bytes_sent, flush_fifo; 1304 1305 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 1306 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) 1307 fifo_cnt <<= 1; 1308 1309 ecount = 0; 1310 if (!(esp->sreg & ESP_STAT_TCNT)) { 1311 ecount = ((unsigned int)esp_read8(ESP_TCLOW) | 1312 (((unsigned int)esp_read8(ESP_TCMED)) << 8)); 1313 if (esp->rev == FASHME) 1314 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; 1315 } 1316 1317 bytes_sent = esp->data_dma_len; 1318 bytes_sent -= ecount; 1319 1320 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1321 bytes_sent -= fifo_cnt; 1322 1323 flush_fifo = 0; 1324 if (!esp->prev_soff) { 1325 /* Synchronous data transfer, always flush fifo. */ 1326 flush_fifo = 1; 1327 } else { 1328 if (esp->rev == ESP100) { 1329 u32 fflags, phase; 1330 1331 /* ESP100 has a chip bug where in the synchronous data 1332 * phase it can mistake a final long REQ pulse from the 1333 * target as an extra data byte. Fun. 1334 * 1335 * To detect this case we resample the status register 1336 * and fifo flags. If we're still in a data phase and 1337 * we see spurious chunks in the fifo, we return error 1338 * to the caller which should reset and set things up 1339 * such that we only try future transfers to this 1340 * target in synchronous mode. 1341 */ 1342 esp->sreg = esp_read8(ESP_STATUS); 1343 phase = esp->sreg & ESP_STAT_PMASK; 1344 fflags = esp_read8(ESP_FFLAGS); 1345 1346 if ((phase == ESP_DOP && 1347 (fflags & ESP_FF_ONOTZERO)) || 1348 (phase == ESP_DIP && 1349 (fflags & ESP_FF_FBYTES))) 1350 return -1; 1351 } 1352 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1353 flush_fifo = 1; 1354 } 1355 1356 if (flush_fifo) 1357 esp_flush_fifo(esp); 1358 1359 return bytes_sent; 1360 } 1361 1362 static void esp_setsync(struct esp *esp, struct esp_target_data *tp, 1363 u8 scsi_period, u8 scsi_offset, 1364 u8 esp_stp, u8 esp_soff) 1365 { 1366 spi_period(tp->starget) = scsi_period; 1367 spi_offset(tp->starget) = scsi_offset; 1368 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; 1369 1370 if (esp_soff) { 1371 esp_stp &= 0x1f; 1372 esp_soff |= esp->radelay; 1373 if (esp->rev >= FAS236) { 1374 u8 bit = ESP_CONFIG3_FSCSI; 1375 if (esp->rev >= FAS100A) 1376 bit = ESP_CONFIG3_FAST; 1377 1378 if (scsi_period < 50) { 1379 if (esp->rev == FASHME) 1380 esp_soff &= ~esp->radelay; 1381 tp->esp_config3 |= bit; 1382 } else { 1383 tp->esp_config3 &= ~bit; 1384 } 1385 esp->prev_cfg3 = tp->esp_config3; 1386 esp_write8(esp->prev_cfg3, ESP_CFG3); 1387 } 1388 } 1389 1390 tp->esp_period = esp->prev_stp = esp_stp; 1391 tp->esp_offset = esp->prev_soff = esp_soff; 1392 1393 esp_write8(esp_soff, ESP_SOFF); 1394 esp_write8(esp_stp, ESP_STP); 1395 1396 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); 1397 1398 spi_display_xfer_agreement(tp->starget); 1399 } 1400 1401 static void esp_msgin_reject(struct esp *esp) 1402 { 1403 struct esp_cmd_entry *ent = esp->active_cmd; 1404 struct scsi_cmnd *cmd = ent->cmd; 1405 struct esp_target_data *tp; 1406 int tgt; 1407 1408 tgt = cmd->device->id; 1409 tp = &esp->target[tgt]; 1410 1411 if (tp->flags & ESP_TGT_NEGO_WIDE) { 1412 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); 1413 1414 if (!esp_need_to_nego_sync(tp)) { 1415 tp->flags &= ~ESP_TGT_CHECK_NEGO; 1416 scsi_esp_cmd(esp, ESP_CMD_RATN); 1417 } else { 1418 esp->msg_out_len = 1419 spi_populate_sync_msg(&esp->msg_out[0], 1420 tp->nego_goal_period, 1421 tp->nego_goal_offset); 1422 tp->flags |= ESP_TGT_NEGO_SYNC; 1423 scsi_esp_cmd(esp, ESP_CMD_SATN); 1424 } 1425 return; 1426 } 1427 1428 if (tp->flags & ESP_TGT_NEGO_SYNC) { 1429 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); 1430 tp->esp_period = 0; 1431 tp->esp_offset = 0; 1432 esp_setsync(esp, tp, 0, 0, 0, 0); 1433 scsi_esp_cmd(esp, ESP_CMD_RATN); 1434 return; 1435 } 1436 1437 esp->msg_out[0] = ABORT_TASK_SET; 1438 esp->msg_out_len = 1; 1439 scsi_esp_cmd(esp, ESP_CMD_SATN); 1440 } 1441 1442 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) 1443 { 1444 u8 period = esp->msg_in[3]; 1445 u8 offset = esp->msg_in[4]; 1446 u8 stp; 1447 1448 if (!(tp->flags & ESP_TGT_NEGO_SYNC)) 1449 goto do_reject; 1450 1451 if (offset > 15) 1452 goto do_reject; 1453 1454 if (offset) { 1455 int one_clock; 1456 1457 if (period > esp->max_period) { 1458 period = offset = 0; 1459 goto do_sdtr; 1460 } 1461 if (period < esp->min_period) 1462 goto do_reject; 1463 1464 one_clock = esp->ccycle / 1000; 1465 stp = DIV_ROUND_UP(period << 2, one_clock); 1466 if (stp && esp->rev >= FAS236) { 1467 if (stp >= 50) 1468 stp--; 1469 } 1470 } else { 1471 stp = 0; 1472 } 1473 1474 esp_setsync(esp, tp, period, offset, stp, offset); 1475 return; 1476 1477 do_reject: 1478 esp->msg_out[0] = MESSAGE_REJECT; 1479 esp->msg_out_len = 1; 1480 scsi_esp_cmd(esp, ESP_CMD_SATN); 1481 return; 1482 1483 do_sdtr: 1484 tp->nego_goal_period = period; 1485 tp->nego_goal_offset = offset; 1486 esp->msg_out_len = 1487 spi_populate_sync_msg(&esp->msg_out[0], 1488 tp->nego_goal_period, 1489 tp->nego_goal_offset); 1490 scsi_esp_cmd(esp, ESP_CMD_SATN); 1491 } 1492 1493 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) 1494 { 1495 int size = 8 << esp->msg_in[3]; 1496 u8 cfg3; 1497 1498 if (esp->rev != FASHME) 1499 goto do_reject; 1500 1501 if (size != 8 && size != 16) 1502 goto do_reject; 1503 1504 if (!(tp->flags & ESP_TGT_NEGO_WIDE)) 1505 goto do_reject; 1506 1507 cfg3 = tp->esp_config3; 1508 if (size == 16) { 1509 tp->flags |= ESP_TGT_WIDE; 1510 cfg3 |= ESP_CONFIG3_EWIDE; 1511 } else { 1512 tp->flags &= ~ESP_TGT_WIDE; 1513 cfg3 &= ~ESP_CONFIG3_EWIDE; 1514 } 1515 tp->esp_config3 = cfg3; 1516 esp->prev_cfg3 = cfg3; 1517 esp_write8(cfg3, ESP_CFG3); 1518 1519 tp->flags &= ~ESP_TGT_NEGO_WIDE; 1520 1521 spi_period(tp->starget) = 0; 1522 spi_offset(tp->starget) = 0; 1523 if (!esp_need_to_nego_sync(tp)) { 1524 tp->flags &= ~ESP_TGT_CHECK_NEGO; 1525 scsi_esp_cmd(esp, ESP_CMD_RATN); 1526 } else { 1527 esp->msg_out_len = 1528 spi_populate_sync_msg(&esp->msg_out[0], 1529 tp->nego_goal_period, 1530 tp->nego_goal_offset); 1531 tp->flags |= ESP_TGT_NEGO_SYNC; 1532 scsi_esp_cmd(esp, ESP_CMD_SATN); 1533 } 1534 return; 1535 1536 do_reject: 1537 esp->msg_out[0] = MESSAGE_REJECT; 1538 esp->msg_out_len = 1; 1539 scsi_esp_cmd(esp, ESP_CMD_SATN); 1540 } 1541 1542 static void esp_msgin_extended(struct esp *esp) 1543 { 1544 struct esp_cmd_entry *ent = esp->active_cmd; 1545 struct scsi_cmnd *cmd = ent->cmd; 1546 struct esp_target_data *tp; 1547 int tgt = cmd->device->id; 1548 1549 tp = &esp->target[tgt]; 1550 if (esp->msg_in[2] == EXTENDED_SDTR) { 1551 esp_msgin_sdtr(esp, tp); 1552 return; 1553 } 1554 if (esp->msg_in[2] == EXTENDED_WDTR) { 1555 esp_msgin_wdtr(esp, tp); 1556 return; 1557 } 1558 1559 printk("ESP: Unexpected extended msg type %x\n", 1560 esp->msg_in[2]); 1561 1562 esp->msg_out[0] = ABORT_TASK_SET; 1563 esp->msg_out_len = 1; 1564 scsi_esp_cmd(esp, ESP_CMD_SATN); 1565 } 1566 1567 /* Analyze msgin bytes received from target so far. Return non-zero 1568 * if there are more bytes needed to complete the message. 1569 */ 1570 static int esp_msgin_process(struct esp *esp) 1571 { 1572 u8 msg0 = esp->msg_in[0]; 1573 int len = esp->msg_in_len; 1574 1575 if (msg0 & 0x80) { 1576 /* Identify */ 1577 printk("ESP: Unexpected msgin identify\n"); 1578 return 0; 1579 } 1580 1581 switch (msg0) { 1582 case EXTENDED_MESSAGE: 1583 if (len == 1) 1584 return 1; 1585 if (len < esp->msg_in[1] + 2) 1586 return 1; 1587 esp_msgin_extended(esp); 1588 return 0; 1589 1590 case IGNORE_WIDE_RESIDUE: { 1591 struct esp_cmd_entry *ent; 1592 struct esp_cmd_priv *spriv; 1593 if (len == 1) 1594 return 1; 1595 1596 if (esp->msg_in[1] != 1) 1597 goto do_reject; 1598 1599 ent = esp->active_cmd; 1600 spriv = ESP_CMD_PRIV(ent->cmd); 1601 1602 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { 1603 spriv->cur_sg--; 1604 spriv->cur_residue = 1; 1605 } else 1606 spriv->cur_residue++; 1607 spriv->tot_residue++; 1608 return 0; 1609 } 1610 case NOP: 1611 return 0; 1612 case RESTORE_POINTERS: 1613 esp_restore_pointers(esp, esp->active_cmd); 1614 return 0; 1615 case SAVE_POINTERS: 1616 esp_save_pointers(esp, esp->active_cmd); 1617 return 0; 1618 1619 case COMMAND_COMPLETE: 1620 case DISCONNECT: { 1621 struct esp_cmd_entry *ent = esp->active_cmd; 1622 1623 ent->message = msg0; 1624 esp_event(esp, ESP_EVENT_FREE_BUS); 1625 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1626 return 0; 1627 } 1628 case MESSAGE_REJECT: 1629 esp_msgin_reject(esp); 1630 return 0; 1631 1632 default: 1633 do_reject: 1634 esp->msg_out[0] = MESSAGE_REJECT; 1635 esp->msg_out_len = 1; 1636 scsi_esp_cmd(esp, ESP_CMD_SATN); 1637 return 0; 1638 } 1639 } 1640 1641 static int esp_process_event(struct esp *esp) 1642 { 1643 int write; 1644 1645 again: 1646 write = 0; 1647 switch (esp->event) { 1648 case ESP_EVENT_CHECK_PHASE: 1649 switch (esp->sreg & ESP_STAT_PMASK) { 1650 case ESP_DOP: 1651 esp_event(esp, ESP_EVENT_DATA_OUT); 1652 break; 1653 case ESP_DIP: 1654 esp_event(esp, ESP_EVENT_DATA_IN); 1655 break; 1656 case ESP_STATP: 1657 esp_flush_fifo(esp); 1658 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); 1659 esp_event(esp, ESP_EVENT_STATUS); 1660 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1661 return 1; 1662 1663 case ESP_MOP: 1664 esp_event(esp, ESP_EVENT_MSGOUT); 1665 break; 1666 1667 case ESP_MIP: 1668 esp_event(esp, ESP_EVENT_MSGIN); 1669 break; 1670 1671 case ESP_CMDP: 1672 esp_event(esp, ESP_EVENT_CMD_START); 1673 break; 1674 1675 default: 1676 printk("ESP: Unexpected phase, sreg=%02x\n", 1677 esp->sreg); 1678 esp_schedule_reset(esp); 1679 return 0; 1680 } 1681 goto again; 1682 break; 1683 1684 case ESP_EVENT_DATA_IN: 1685 write = 1; 1686 /* fallthru */ 1687 1688 case ESP_EVENT_DATA_OUT: { 1689 struct esp_cmd_entry *ent = esp->active_cmd; 1690 struct scsi_cmnd *cmd = ent->cmd; 1691 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); 1692 unsigned int dma_len = esp_cur_dma_len(ent, cmd); 1693 1694 if (esp->rev == ESP100) 1695 scsi_esp_cmd(esp, ESP_CMD_NULL); 1696 1697 if (write) 1698 ent->flags |= ESP_CMD_FLAG_WRITE; 1699 else 1700 ent->flags &= ~ESP_CMD_FLAG_WRITE; 1701 1702 if (esp->ops->dma_length_limit) 1703 dma_len = esp->ops->dma_length_limit(esp, dma_addr, 1704 dma_len); 1705 else 1706 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); 1707 1708 esp->data_dma_len = dma_len; 1709 1710 if (!dma_len) { 1711 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", 1712 esp->host->unique_id); 1713 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n", 1714 esp->host->unique_id, 1715 (unsigned long long)esp_cur_dma_addr(ent, cmd), 1716 esp_cur_dma_len(ent, cmd)); 1717 esp_schedule_reset(esp); 1718 return 0; 1719 } 1720 1721 esp_log_datastart("ESP: start data addr[%08llx] len[%u] " 1722 "write(%d)\n", 1723 (unsigned long long)dma_addr, dma_len, write); 1724 1725 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, 1726 write, ESP_CMD_DMA | ESP_CMD_TI); 1727 esp_event(esp, ESP_EVENT_DATA_DONE); 1728 break; 1729 } 1730 case ESP_EVENT_DATA_DONE: { 1731 struct esp_cmd_entry *ent = esp->active_cmd; 1732 struct scsi_cmnd *cmd = ent->cmd; 1733 int bytes_sent; 1734 1735 if (esp->ops->dma_error(esp)) { 1736 printk("ESP: data done, DMA error, resetting\n"); 1737 esp_schedule_reset(esp); 1738 return 0; 1739 } 1740 1741 if (ent->flags & ESP_CMD_FLAG_WRITE) { 1742 /* XXX parity errors, etc. XXX */ 1743 1744 esp->ops->dma_drain(esp); 1745 } 1746 esp->ops->dma_invalidate(esp); 1747 1748 if (esp->ireg != ESP_INTR_BSERV) { 1749 /* We should always see exactly a bus-service 1750 * interrupt at the end of a successful transfer. 1751 */ 1752 printk("ESP: data done, not BSERV, resetting\n"); 1753 esp_schedule_reset(esp); 1754 return 0; 1755 } 1756 1757 bytes_sent = esp_data_bytes_sent(esp, ent, cmd); 1758 1759 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", 1760 ent->flags, bytes_sent); 1761 1762 if (bytes_sent < 0) { 1763 /* XXX force sync mode for this target XXX */ 1764 esp_schedule_reset(esp); 1765 return 0; 1766 } 1767 1768 esp_advance_dma(esp, ent, cmd, bytes_sent); 1769 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1770 goto again; 1771 } 1772 1773 case ESP_EVENT_STATUS: { 1774 struct esp_cmd_entry *ent = esp->active_cmd; 1775 1776 if (esp->ireg & ESP_INTR_FDONE) { 1777 ent->status = esp_read8(ESP_FDATA); 1778 ent->message = esp_read8(ESP_FDATA); 1779 scsi_esp_cmd(esp, ESP_CMD_MOK); 1780 } else if (esp->ireg == ESP_INTR_BSERV) { 1781 ent->status = esp_read8(ESP_FDATA); 1782 ent->message = 0xff; 1783 esp_event(esp, ESP_EVENT_MSGIN); 1784 return 0; 1785 } 1786 1787 if (ent->message != COMMAND_COMPLETE) { 1788 printk("ESP: Unexpected message %x in status\n", 1789 ent->message); 1790 esp_schedule_reset(esp); 1791 return 0; 1792 } 1793 1794 esp_event(esp, ESP_EVENT_FREE_BUS); 1795 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1796 break; 1797 } 1798 case ESP_EVENT_FREE_BUS: { 1799 struct esp_cmd_entry *ent = esp->active_cmd; 1800 struct scsi_cmnd *cmd = ent->cmd; 1801 1802 if (ent->message == COMMAND_COMPLETE || 1803 ent->message == DISCONNECT) 1804 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1805 1806 if (ent->message == COMMAND_COMPLETE) { 1807 esp_log_cmddone("ESP: Command done status[%x] " 1808 "message[%x]\n", 1809 ent->status, ent->message); 1810 if (ent->status == SAM_STAT_TASK_SET_FULL) 1811 esp_event_queue_full(esp, ent); 1812 1813 if (ent->status == SAM_STAT_CHECK_CONDITION && 1814 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { 1815 ent->flags |= ESP_CMD_FLAG_AUTOSENSE; 1816 esp_autosense(esp, ent); 1817 } else { 1818 esp_cmd_is_done(esp, ent, cmd, 1819 compose_result(ent->status, 1820 ent->message, 1821 DID_OK)); 1822 } 1823 } else if (ent->message == DISCONNECT) { 1824 esp_log_disconnect("ESP: Disconnecting tgt[%d] " 1825 "tag[%x:%x]\n", 1826 cmd->device->id, 1827 ent->tag[0], ent->tag[1]); 1828 1829 esp->active_cmd = NULL; 1830 esp_maybe_execute_command(esp); 1831 } else { 1832 printk("ESP: Unexpected message %x in freebus\n", 1833 ent->message); 1834 esp_schedule_reset(esp); 1835 return 0; 1836 } 1837 if (esp->active_cmd) 1838 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1839 break; 1840 } 1841 case ESP_EVENT_MSGOUT: { 1842 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1843 1844 if (esp_debug & ESP_DEBUG_MSGOUT) { 1845 int i; 1846 printk("ESP: Sending message [ "); 1847 for (i = 0; i < esp->msg_out_len; i++) 1848 printk("%02x ", esp->msg_out[i]); 1849 printk("]\n"); 1850 } 1851 1852 if (esp->rev == FASHME) { 1853 int i; 1854 1855 /* Always use the fifo. */ 1856 for (i = 0; i < esp->msg_out_len; i++) { 1857 esp_write8(esp->msg_out[i], ESP_FDATA); 1858 esp_write8(0, ESP_FDATA); 1859 } 1860 scsi_esp_cmd(esp, ESP_CMD_TI); 1861 } else { 1862 if (esp->msg_out_len == 1) { 1863 esp_write8(esp->msg_out[0], ESP_FDATA); 1864 scsi_esp_cmd(esp, ESP_CMD_TI); 1865 } else { 1866 /* Use DMA. */ 1867 memcpy(esp->command_block, 1868 esp->msg_out, 1869 esp->msg_out_len); 1870 1871 esp->ops->send_dma_cmd(esp, 1872 esp->command_block_dma, 1873 esp->msg_out_len, 1874 esp->msg_out_len, 1875 0, 1876 ESP_CMD_DMA|ESP_CMD_TI); 1877 } 1878 } 1879 esp_event(esp, ESP_EVENT_MSGOUT_DONE); 1880 break; 1881 } 1882 case ESP_EVENT_MSGOUT_DONE: 1883 if (esp->rev == FASHME) { 1884 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1885 } else { 1886 if (esp->msg_out_len > 1) 1887 esp->ops->dma_invalidate(esp); 1888 } 1889 1890 if (!(esp->ireg & ESP_INTR_DC)) { 1891 if (esp->rev != FASHME) 1892 scsi_esp_cmd(esp, ESP_CMD_NULL); 1893 } 1894 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1895 goto again; 1896 case ESP_EVENT_MSGIN: 1897 if (esp->ireg & ESP_INTR_BSERV) { 1898 if (esp->rev == FASHME) { 1899 if (!(esp_read8(ESP_STATUS2) & 1900 ESP_STAT2_FEMPTY)) 1901 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1902 } else { 1903 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1904 if (esp->rev == ESP100) 1905 scsi_esp_cmd(esp, ESP_CMD_NULL); 1906 } 1907 scsi_esp_cmd(esp, ESP_CMD_TI); 1908 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1909 return 1; 1910 } 1911 if (esp->ireg & ESP_INTR_FDONE) { 1912 u8 val; 1913 1914 if (esp->rev == FASHME) 1915 val = esp->fifo[0]; 1916 else 1917 val = esp_read8(ESP_FDATA); 1918 esp->msg_in[esp->msg_in_len++] = val; 1919 1920 esp_log_msgin("ESP: Got msgin byte %x\n", val); 1921 1922 if (!esp_msgin_process(esp)) 1923 esp->msg_in_len = 0; 1924 1925 if (esp->rev == FASHME) 1926 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1927 1928 scsi_esp_cmd(esp, ESP_CMD_MOK); 1929 1930 if (esp->event != ESP_EVENT_FREE_BUS) 1931 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1932 } else { 1933 printk("ESP: MSGIN neither BSERV not FDON, resetting"); 1934 esp_schedule_reset(esp); 1935 return 0; 1936 } 1937 break; 1938 case ESP_EVENT_CMD_START: 1939 memcpy(esp->command_block, esp->cmd_bytes_ptr, 1940 esp->cmd_bytes_left); 1941 if (esp->rev == FASHME) 1942 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1943 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 1944 esp->cmd_bytes_left, 16, 0, 1945 ESP_CMD_DMA | ESP_CMD_TI); 1946 esp_event(esp, ESP_EVENT_CMD_DONE); 1947 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1948 break; 1949 case ESP_EVENT_CMD_DONE: 1950 esp->ops->dma_invalidate(esp); 1951 if (esp->ireg & ESP_INTR_BSERV) { 1952 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1953 goto again; 1954 } 1955 esp_schedule_reset(esp); 1956 return 0; 1957 break; 1958 1959 case ESP_EVENT_RESET: 1960 scsi_esp_cmd(esp, ESP_CMD_RS); 1961 break; 1962 1963 default: 1964 printk("ESP: Unexpected event %x, resetting\n", 1965 esp->event); 1966 esp_schedule_reset(esp); 1967 return 0; 1968 break; 1969 } 1970 return 1; 1971 } 1972 1973 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) 1974 { 1975 struct scsi_cmnd *cmd = ent->cmd; 1976 1977 esp_unmap_dma(esp, cmd); 1978 esp_free_lun_tag(ent, cmd->device->hostdata); 1979 cmd->result = DID_RESET << 16; 1980 1981 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 1982 esp->ops->unmap_single(esp, ent->sense_dma, 1983 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 1984 ent->sense_ptr = NULL; 1985 } 1986 1987 cmd->scsi_done(cmd); 1988 list_del(&ent->list); 1989 esp_put_ent(esp, ent); 1990 } 1991 1992 static void esp_clear_hold(struct scsi_device *dev, void *data) 1993 { 1994 struct esp_lun_data *lp = dev->hostdata; 1995 1996 BUG_ON(lp->num_tagged); 1997 lp->hold = 0; 1998 } 1999 2000 static void esp_reset_cleanup(struct esp *esp) 2001 { 2002 struct esp_cmd_entry *ent, *tmp; 2003 int i; 2004 2005 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { 2006 struct scsi_cmnd *cmd = ent->cmd; 2007 2008 list_del(&ent->list); 2009 cmd->result = DID_RESET << 16; 2010 cmd->scsi_done(cmd); 2011 esp_put_ent(esp, ent); 2012 } 2013 2014 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { 2015 if (ent == esp->active_cmd) 2016 esp->active_cmd = NULL; 2017 esp_reset_cleanup_one(esp, ent); 2018 } 2019 2020 BUG_ON(esp->active_cmd != NULL); 2021 2022 /* Force renegotiation of sync/wide transfers. */ 2023 for (i = 0; i < ESP_MAX_TARGET; i++) { 2024 struct esp_target_data *tp = &esp->target[i]; 2025 2026 tp->esp_period = 0; 2027 tp->esp_offset = 0; 2028 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | 2029 ESP_CONFIG3_FSCSI | 2030 ESP_CONFIG3_FAST); 2031 tp->flags &= ~ESP_TGT_WIDE; 2032 tp->flags |= ESP_TGT_CHECK_NEGO; 2033 2034 if (tp->starget) 2035 __starget_for_each_device(tp->starget, NULL, 2036 esp_clear_hold); 2037 } 2038 esp->flags &= ~ESP_FLAG_RESETTING; 2039 } 2040 2041 /* Runs under host->lock */ 2042 static void __esp_interrupt(struct esp *esp) 2043 { 2044 int finish_reset, intr_done; 2045 u8 phase; 2046 2047 esp->sreg = esp_read8(ESP_STATUS); 2048 2049 if (esp->flags & ESP_FLAG_RESETTING) { 2050 finish_reset = 1; 2051 } else { 2052 if (esp_check_gross_error(esp)) 2053 return; 2054 2055 finish_reset = esp_check_spur_intr(esp); 2056 if (finish_reset < 0) 2057 return; 2058 } 2059 2060 esp->ireg = esp_read8(ESP_INTRPT); 2061 2062 if (esp->ireg & ESP_INTR_SR) 2063 finish_reset = 1; 2064 2065 if (finish_reset) { 2066 esp_reset_cleanup(esp); 2067 if (esp->eh_reset) { 2068 complete(esp->eh_reset); 2069 esp->eh_reset = NULL; 2070 } 2071 return; 2072 } 2073 2074 phase = (esp->sreg & ESP_STAT_PMASK); 2075 if (esp->rev == FASHME) { 2076 if (((phase != ESP_DIP && phase != ESP_DOP) && 2077 esp->select_state == ESP_SELECT_NONE && 2078 esp->event != ESP_EVENT_STATUS && 2079 esp->event != ESP_EVENT_DATA_DONE) || 2080 (esp->ireg & ESP_INTR_RSEL)) { 2081 esp->sreg2 = esp_read8(ESP_STATUS2); 2082 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || 2083 (esp->sreg2 & ESP_STAT2_F1BYTE)) 2084 hme_read_fifo(esp); 2085 } 2086 } 2087 2088 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " 2089 "sreg2[%02x] ireg[%02x]\n", 2090 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); 2091 2092 intr_done = 0; 2093 2094 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { 2095 printk("ESP: unexpected IREG %02x\n", esp->ireg); 2096 if (esp->ireg & ESP_INTR_IC) 2097 esp_dump_cmd_log(esp); 2098 2099 esp_schedule_reset(esp); 2100 } else { 2101 if (!(esp->ireg & ESP_INTR_RSEL)) { 2102 /* Some combination of FDONE, BSERV, DC. */ 2103 if (esp->select_state != ESP_SELECT_NONE) 2104 intr_done = esp_finish_select(esp); 2105 } else if (esp->ireg & ESP_INTR_RSEL) { 2106 if (esp->active_cmd) 2107 (void) esp_finish_select(esp); 2108 intr_done = esp_reconnect(esp); 2109 } 2110 } 2111 while (!intr_done) 2112 intr_done = esp_process_event(esp); 2113 } 2114 2115 irqreturn_t scsi_esp_intr(int irq, void *dev_id) 2116 { 2117 struct esp *esp = dev_id; 2118 unsigned long flags; 2119 irqreturn_t ret; 2120 2121 spin_lock_irqsave(esp->host->host_lock, flags); 2122 ret = IRQ_NONE; 2123 if (esp->ops->irq_pending(esp)) { 2124 ret = IRQ_HANDLED; 2125 for (;;) { 2126 int i; 2127 2128 __esp_interrupt(esp); 2129 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) 2130 break; 2131 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; 2132 2133 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 2134 if (esp->ops->irq_pending(esp)) 2135 break; 2136 } 2137 if (i == ESP_QUICKIRQ_LIMIT) 2138 break; 2139 } 2140 } 2141 spin_unlock_irqrestore(esp->host->host_lock, flags); 2142 2143 return ret; 2144 } 2145 EXPORT_SYMBOL(scsi_esp_intr); 2146 2147 static void esp_get_revision(struct esp *esp) 2148 { 2149 u8 val; 2150 2151 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); 2152 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); 2153 esp_write8(esp->config2, ESP_CFG2); 2154 2155 val = esp_read8(ESP_CFG2); 2156 val &= ~ESP_CONFIG2_MAGIC; 2157 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { 2158 /* If what we write to cfg2 does not come back, cfg2 is not 2159 * implemented, therefore this must be a plain esp100. 2160 */ 2161 esp->rev = ESP100; 2162 } else { 2163 esp->config2 = 0; 2164 esp_set_all_config3(esp, 5); 2165 esp->prev_cfg3 = 5; 2166 esp_write8(esp->config2, ESP_CFG2); 2167 esp_write8(0, ESP_CFG3); 2168 esp_write8(esp->prev_cfg3, ESP_CFG3); 2169 2170 val = esp_read8(ESP_CFG3); 2171 if (val != 5) { 2172 /* The cfg2 register is implemented, however 2173 * cfg3 is not, must be esp100a. 2174 */ 2175 esp->rev = ESP100A; 2176 } else { 2177 esp_set_all_config3(esp, 0); 2178 esp->prev_cfg3 = 0; 2179 esp_write8(esp->prev_cfg3, ESP_CFG3); 2180 2181 /* All of cfg{1,2,3} implemented, must be one of 2182 * the fas variants, figure out which one. 2183 */ 2184 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { 2185 esp->rev = FAST; 2186 esp->sync_defp = SYNC_DEFP_FAST; 2187 } else { 2188 esp->rev = ESP236; 2189 } 2190 esp->config2 = 0; 2191 esp_write8(esp->config2, ESP_CFG2); 2192 } 2193 } 2194 } 2195 2196 static void esp_init_swstate(struct esp *esp) 2197 { 2198 int i; 2199 2200 INIT_LIST_HEAD(&esp->queued_cmds); 2201 INIT_LIST_HEAD(&esp->active_cmds); 2202 INIT_LIST_HEAD(&esp->esp_cmd_pool); 2203 2204 /* Start with a clear state, domain validation (via ->slave_configure, 2205 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged 2206 * commands. 2207 */ 2208 for (i = 0 ; i < ESP_MAX_TARGET; i++) { 2209 esp->target[i].flags = 0; 2210 esp->target[i].nego_goal_period = 0; 2211 esp->target[i].nego_goal_offset = 0; 2212 esp->target[i].nego_goal_width = 0; 2213 esp->target[i].nego_goal_tags = 0; 2214 } 2215 } 2216 2217 /* This places the ESP into a known state at boot time. */ 2218 static void esp_bootup_reset(struct esp *esp) 2219 { 2220 u8 val; 2221 2222 /* Reset the DMA */ 2223 esp->ops->reset_dma(esp); 2224 2225 /* Reset the ESP */ 2226 esp_reset_esp(esp); 2227 2228 /* Reset the SCSI bus, but tell ESP not to generate an irq */ 2229 val = esp_read8(ESP_CFG1); 2230 val |= ESP_CONFIG1_SRRDISAB; 2231 esp_write8(val, ESP_CFG1); 2232 2233 scsi_esp_cmd(esp, ESP_CMD_RS); 2234 udelay(400); 2235 2236 esp_write8(esp->config1, ESP_CFG1); 2237 2238 /* Eat any bitrot in the chip and we are done... */ 2239 esp_read8(ESP_INTRPT); 2240 } 2241 2242 static void esp_set_clock_params(struct esp *esp) 2243 { 2244 int fhz; 2245 u8 ccf; 2246 2247 /* This is getting messy but it has to be done correctly or else 2248 * you get weird behavior all over the place. We are trying to 2249 * basically figure out three pieces of information. 2250 * 2251 * a) Clock Conversion Factor 2252 * 2253 * This is a representation of the input crystal clock frequency 2254 * going into the ESP on this machine. Any operation whose timing 2255 * is longer than 400ns depends on this value being correct. For 2256 * example, you'll get blips for arbitration/selection during high 2257 * load or with multiple targets if this is not set correctly. 2258 * 2259 * b) Selection Time-Out 2260 * 2261 * The ESP isn't very bright and will arbitrate for the bus and try 2262 * to select a target forever if you let it. This value tells the 2263 * ESP when it has taken too long to negotiate and that it should 2264 * interrupt the CPU so we can see what happened. The value is 2265 * computed as follows (from NCR/Symbios chip docs). 2266 * 2267 * (Time Out Period) * (Input Clock) 2268 * STO = ---------------------------------- 2269 * (8192) * (Clock Conversion Factor) 2270 * 2271 * We use a time out period of 250ms (ESP_BUS_TIMEOUT). 2272 * 2273 * c) Imperical constants for synchronous offset and transfer period 2274 * register values 2275 * 2276 * This entails the smallest and largest sync period we could ever 2277 * handle on this ESP. 2278 */ 2279 fhz = esp->cfreq; 2280 2281 ccf = ((fhz / 1000000) + 4) / 5; 2282 if (ccf == 1) 2283 ccf = 2; 2284 2285 /* If we can't find anything reasonable, just assume 20MHZ. 2286 * This is the clock frequency of the older sun4c's where I've 2287 * been unable to find the clock-frequency PROM property. All 2288 * other machines provide useful values it seems. 2289 */ 2290 if (fhz <= 5000000 || ccf < 1 || ccf > 8) { 2291 fhz = 20000000; 2292 ccf = 4; 2293 } 2294 2295 esp->cfact = (ccf == 8 ? 0 : ccf); 2296 esp->cfreq = fhz; 2297 esp->ccycle = ESP_HZ_TO_CYCLE(fhz); 2298 esp->ctick = ESP_TICK(ccf, esp->ccycle); 2299 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf); 2300 esp->sync_defp = SYNC_DEFP_SLOW; 2301 } 2302 2303 static const char *esp_chip_names[] = { 2304 "ESP100", 2305 "ESP100A", 2306 "ESP236", 2307 "FAS236", 2308 "FAS100A", 2309 "FAST", 2310 "FASHME", 2311 }; 2312 2313 static struct scsi_transport_template *esp_transport_template; 2314 2315 int scsi_esp_register(struct esp *esp, struct device *dev) 2316 { 2317 static int instance; 2318 int err; 2319 2320 esp->host->transportt = esp_transport_template; 2321 esp->host->max_lun = ESP_MAX_LUN; 2322 esp->host->cmd_per_lun = 2; 2323 esp->host->unique_id = instance; 2324 2325 esp_set_clock_params(esp); 2326 2327 esp_get_revision(esp); 2328 2329 esp_init_swstate(esp); 2330 2331 esp_bootup_reset(esp); 2332 2333 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", 2334 esp->host->unique_id, esp->regs, esp->dma_regs, 2335 esp->host->irq); 2336 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", 2337 esp->host->unique_id, esp_chip_names[esp->rev], 2338 esp->cfreq / 1000000, esp->cfact, esp->scsi_id); 2339 2340 /* Let the SCSI bus reset settle. */ 2341 ssleep(esp_bus_reset_settle); 2342 2343 err = scsi_add_host(esp->host, dev); 2344 if (err) 2345 return err; 2346 2347 instance++; 2348 2349 scsi_scan_host(esp->host); 2350 2351 return 0; 2352 } 2353 EXPORT_SYMBOL(scsi_esp_register); 2354 2355 void scsi_esp_unregister(struct esp *esp) 2356 { 2357 scsi_remove_host(esp->host); 2358 } 2359 EXPORT_SYMBOL(scsi_esp_unregister); 2360 2361 static int esp_target_alloc(struct scsi_target *starget) 2362 { 2363 struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); 2364 struct esp_target_data *tp = &esp->target[starget->id]; 2365 2366 tp->starget = starget; 2367 2368 return 0; 2369 } 2370 2371 static void esp_target_destroy(struct scsi_target *starget) 2372 { 2373 struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); 2374 struct esp_target_data *tp = &esp->target[starget->id]; 2375 2376 tp->starget = NULL; 2377 } 2378 2379 static int esp_slave_alloc(struct scsi_device *dev) 2380 { 2381 struct esp *esp = shost_priv(dev->host); 2382 struct esp_target_data *tp = &esp->target[dev->id]; 2383 struct esp_lun_data *lp; 2384 2385 lp = kzalloc(sizeof(*lp), GFP_KERNEL); 2386 if (!lp) 2387 return -ENOMEM; 2388 dev->hostdata = lp; 2389 2390 spi_min_period(tp->starget) = esp->min_period; 2391 spi_max_offset(tp->starget) = 15; 2392 2393 if (esp->flags & ESP_FLAG_WIDE_CAPABLE) 2394 spi_max_width(tp->starget) = 1; 2395 else 2396 spi_max_width(tp->starget) = 0; 2397 2398 return 0; 2399 } 2400 2401 static int esp_slave_configure(struct scsi_device *dev) 2402 { 2403 struct esp *esp = shost_priv(dev->host); 2404 struct esp_target_data *tp = &esp->target[dev->id]; 2405 int goal_tags, queue_depth; 2406 2407 goal_tags = 0; 2408 2409 if (dev->tagged_supported) { 2410 /* XXX make this configurable somehow XXX */ 2411 goal_tags = ESP_DEFAULT_TAGS; 2412 2413 if (goal_tags > ESP_MAX_TAG) 2414 goal_tags = ESP_MAX_TAG; 2415 } 2416 2417 queue_depth = goal_tags; 2418 if (queue_depth < dev->host->cmd_per_lun) 2419 queue_depth = dev->host->cmd_per_lun; 2420 2421 if (goal_tags) { 2422 scsi_set_tag_type(dev, MSG_ORDERED_TAG); 2423 scsi_activate_tcq(dev, queue_depth); 2424 } else { 2425 scsi_deactivate_tcq(dev, queue_depth); 2426 } 2427 tp->flags |= ESP_TGT_DISCONNECT; 2428 2429 if (!spi_initial_dv(dev->sdev_target)) 2430 spi_dv_device(dev); 2431 2432 return 0; 2433 } 2434 2435 static void esp_slave_destroy(struct scsi_device *dev) 2436 { 2437 struct esp_lun_data *lp = dev->hostdata; 2438 2439 kfree(lp); 2440 dev->hostdata = NULL; 2441 } 2442 2443 static int esp_eh_abort_handler(struct scsi_cmnd *cmd) 2444 { 2445 struct esp *esp = shost_priv(cmd->device->host); 2446 struct esp_cmd_entry *ent, *tmp; 2447 struct completion eh_done; 2448 unsigned long flags; 2449 2450 /* XXX This helps a lot with debugging but might be a bit 2451 * XXX much for the final driver. 2452 */ 2453 spin_lock_irqsave(esp->host->host_lock, flags); 2454 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", 2455 esp->host->unique_id, cmd, cmd->cmnd[0]); 2456 ent = esp->active_cmd; 2457 if (ent) 2458 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", 2459 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2460 list_for_each_entry(ent, &esp->queued_cmds, list) { 2461 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", 2462 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2463 } 2464 list_for_each_entry(ent, &esp->active_cmds, list) { 2465 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", 2466 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2467 } 2468 esp_dump_cmd_log(esp); 2469 spin_unlock_irqrestore(esp->host->host_lock, flags); 2470 2471 spin_lock_irqsave(esp->host->host_lock, flags); 2472 2473 ent = NULL; 2474 list_for_each_entry(tmp, &esp->queued_cmds, list) { 2475 if (tmp->cmd == cmd) { 2476 ent = tmp; 2477 break; 2478 } 2479 } 2480 2481 if (ent) { 2482 /* Easiest case, we didn't even issue the command 2483 * yet so it is trivial to abort. 2484 */ 2485 list_del(&ent->list); 2486 2487 cmd->result = DID_ABORT << 16; 2488 cmd->scsi_done(cmd); 2489 2490 esp_put_ent(esp, ent); 2491 2492 goto out_success; 2493 } 2494 2495 init_completion(&eh_done); 2496 2497 ent = esp->active_cmd; 2498 if (ent && ent->cmd == cmd) { 2499 /* Command is the currently active command on 2500 * the bus. If we already have an output message 2501 * pending, no dice. 2502 */ 2503 if (esp->msg_out_len) 2504 goto out_failure; 2505 2506 /* Send out an abort, encouraging the target to 2507 * go to MSGOUT phase by asserting ATN. 2508 */ 2509 esp->msg_out[0] = ABORT_TASK_SET; 2510 esp->msg_out_len = 1; 2511 ent->eh_done = &eh_done; 2512 2513 scsi_esp_cmd(esp, ESP_CMD_SATN); 2514 } else { 2515 /* The command is disconnected. This is not easy to 2516 * abort. For now we fail and let the scsi error 2517 * handling layer go try a scsi bus reset or host 2518 * reset. 2519 * 2520 * What we could do is put together a scsi command 2521 * solely for the purpose of sending an abort message 2522 * to the target. Coming up with all the code to 2523 * cook up scsi commands, special case them everywhere, 2524 * etc. is for questionable gain and it would be better 2525 * if the generic scsi error handling layer could do at 2526 * least some of that for us. 2527 * 2528 * Anyways this is an area for potential future improvement 2529 * in this driver. 2530 */ 2531 goto out_failure; 2532 } 2533 2534 spin_unlock_irqrestore(esp->host->host_lock, flags); 2535 2536 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { 2537 spin_lock_irqsave(esp->host->host_lock, flags); 2538 ent->eh_done = NULL; 2539 spin_unlock_irqrestore(esp->host->host_lock, flags); 2540 2541 return FAILED; 2542 } 2543 2544 return SUCCESS; 2545 2546 out_success: 2547 spin_unlock_irqrestore(esp->host->host_lock, flags); 2548 return SUCCESS; 2549 2550 out_failure: 2551 /* XXX This might be a good location to set ESP_TGT_BROKEN 2552 * XXX since we know which target/lun in particular is 2553 * XXX causing trouble. 2554 */ 2555 spin_unlock_irqrestore(esp->host->host_lock, flags); 2556 return FAILED; 2557 } 2558 2559 static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) 2560 { 2561 struct esp *esp = shost_priv(cmd->device->host); 2562 struct completion eh_reset; 2563 unsigned long flags; 2564 2565 init_completion(&eh_reset); 2566 2567 spin_lock_irqsave(esp->host->host_lock, flags); 2568 2569 esp->eh_reset = &eh_reset; 2570 2571 /* XXX This is too simple... We should add lots of 2572 * XXX checks here so that if we find that the chip is 2573 * XXX very wedged we return failure immediately so 2574 * XXX that we can perform a full chip reset. 2575 */ 2576 esp->flags |= ESP_FLAG_RESETTING; 2577 scsi_esp_cmd(esp, ESP_CMD_RS); 2578 2579 spin_unlock_irqrestore(esp->host->host_lock, flags); 2580 2581 ssleep(esp_bus_reset_settle); 2582 2583 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { 2584 spin_lock_irqsave(esp->host->host_lock, flags); 2585 esp->eh_reset = NULL; 2586 spin_unlock_irqrestore(esp->host->host_lock, flags); 2587 2588 return FAILED; 2589 } 2590 2591 return SUCCESS; 2592 } 2593 2594 /* All bets are off, reset the entire device. */ 2595 static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) 2596 { 2597 struct esp *esp = shost_priv(cmd->device->host); 2598 unsigned long flags; 2599 2600 spin_lock_irqsave(esp->host->host_lock, flags); 2601 esp_bootup_reset(esp); 2602 esp_reset_cleanup(esp); 2603 spin_unlock_irqrestore(esp->host->host_lock, flags); 2604 2605 ssleep(esp_bus_reset_settle); 2606 2607 return SUCCESS; 2608 } 2609 2610 static const char *esp_info(struct Scsi_Host *host) 2611 { 2612 return "esp"; 2613 } 2614 2615 struct scsi_host_template scsi_esp_template = { 2616 .module = THIS_MODULE, 2617 .name = "esp", 2618 .info = esp_info, 2619 .queuecommand = esp_queuecommand, 2620 .target_alloc = esp_target_alloc, 2621 .target_destroy = esp_target_destroy, 2622 .slave_alloc = esp_slave_alloc, 2623 .slave_configure = esp_slave_configure, 2624 .slave_destroy = esp_slave_destroy, 2625 .eh_abort_handler = esp_eh_abort_handler, 2626 .eh_bus_reset_handler = esp_eh_bus_reset_handler, 2627 .eh_host_reset_handler = esp_eh_host_reset_handler, 2628 .can_queue = 7, 2629 .this_id = 7, 2630 .sg_tablesize = SG_ALL, 2631 .use_clustering = ENABLE_CLUSTERING, 2632 .max_sectors = 0xffff, 2633 .skip_settle_delay = 1, 2634 }; 2635 EXPORT_SYMBOL(scsi_esp_template); 2636 2637 static void esp_get_signalling(struct Scsi_Host *host) 2638 { 2639 struct esp *esp = shost_priv(host); 2640 enum spi_signal_type type; 2641 2642 if (esp->flags & ESP_FLAG_DIFFERENTIAL) 2643 type = SPI_SIGNAL_HVD; 2644 else 2645 type = SPI_SIGNAL_SE; 2646 2647 spi_signalling(host) = type; 2648 } 2649 2650 static void esp_set_offset(struct scsi_target *target, int offset) 2651 { 2652 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2653 struct esp *esp = shost_priv(host); 2654 struct esp_target_data *tp = &esp->target[target->id]; 2655 2656 if (esp->flags & ESP_FLAG_DISABLE_SYNC) 2657 tp->nego_goal_offset = 0; 2658 else 2659 tp->nego_goal_offset = offset; 2660 tp->flags |= ESP_TGT_CHECK_NEGO; 2661 } 2662 2663 static void esp_set_period(struct scsi_target *target, int period) 2664 { 2665 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2666 struct esp *esp = shost_priv(host); 2667 struct esp_target_data *tp = &esp->target[target->id]; 2668 2669 tp->nego_goal_period = period; 2670 tp->flags |= ESP_TGT_CHECK_NEGO; 2671 } 2672 2673 static void esp_set_width(struct scsi_target *target, int width) 2674 { 2675 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2676 struct esp *esp = shost_priv(host); 2677 struct esp_target_data *tp = &esp->target[target->id]; 2678 2679 tp->nego_goal_width = (width ? 1 : 0); 2680 tp->flags |= ESP_TGT_CHECK_NEGO; 2681 } 2682 2683 static struct spi_function_template esp_transport_ops = { 2684 .set_offset = esp_set_offset, 2685 .show_offset = 1, 2686 .set_period = esp_set_period, 2687 .show_period = 1, 2688 .set_width = esp_set_width, 2689 .show_width = 1, 2690 .get_signalling = esp_get_signalling, 2691 }; 2692 2693 static int __init esp_init(void) 2694 { 2695 BUILD_BUG_ON(sizeof(struct scsi_pointer) < 2696 sizeof(struct esp_cmd_priv)); 2697 2698 esp_transport_template = spi_attach_transport(&esp_transport_ops); 2699 if (!esp_transport_template) 2700 return -ENODEV; 2701 2702 return 0; 2703 } 2704 2705 static void __exit esp_exit(void) 2706 { 2707 spi_release_transport(esp_transport_template); 2708 } 2709 2710 MODULE_DESCRIPTION("ESP SCSI driver core"); 2711 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 2712 MODULE_LICENSE("GPL"); 2713 MODULE_VERSION(DRV_VERSION); 2714 2715 module_param(esp_bus_reset_settle, int, 0); 2716 MODULE_PARM_DESC(esp_bus_reset_settle, 2717 "ESP scsi bus reset delay in seconds"); 2718 2719 module_param(esp_debug, int, 0); 2720 MODULE_PARM_DESC(esp_debug, 2721 "ESP bitmapped debugging message enable value:\n" 2722 " 0x00000001 Log interrupt events\n" 2723 " 0x00000002 Log scsi commands\n" 2724 " 0x00000004 Log resets\n" 2725 " 0x00000008 Log message in events\n" 2726 " 0x00000010 Log message out events\n" 2727 " 0x00000020 Log command completion\n" 2728 " 0x00000040 Log disconnects\n" 2729 " 0x00000080 Log data start\n" 2730 " 0x00000100 Log data done\n" 2731 " 0x00000200 Log reconnects\n" 2732 " 0x00000400 Log auto-sense data\n" 2733 ); 2734 2735 module_init(esp_init); 2736 module_exit(esp_exit); 2737