1 /* esp_scsi.c: ESP SCSI driver. 2 * 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <linux/slab.h> 9 #include <linux/delay.h> 10 #include <linux/list.h> 11 #include <linux/completion.h> 12 #include <linux/kallsyms.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/init.h> 16 #include <linux/irqreturn.h> 17 18 #include <asm/irq.h> 19 #include <asm/io.h> 20 #include <asm/dma.h> 21 22 #include <scsi/scsi.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_tcq.h> 27 #include <scsi/scsi_dbg.h> 28 #include <scsi/scsi_transport_spi.h> 29 30 #include "esp_scsi.h" 31 32 #define DRV_MODULE_NAME "esp" 33 #define PFX DRV_MODULE_NAME ": " 34 #define DRV_VERSION "2.000" 35 #define DRV_MODULE_RELDATE "April 19, 2007" 36 37 /* SCSI bus reset settle time in seconds. */ 38 static int esp_bus_reset_settle = 3; 39 40 static u32 esp_debug; 41 #define ESP_DEBUG_INTR 0x00000001 42 #define ESP_DEBUG_SCSICMD 0x00000002 43 #define ESP_DEBUG_RESET 0x00000004 44 #define ESP_DEBUG_MSGIN 0x00000008 45 #define ESP_DEBUG_MSGOUT 0x00000010 46 #define ESP_DEBUG_CMDDONE 0x00000020 47 #define ESP_DEBUG_DISCONNECT 0x00000040 48 #define ESP_DEBUG_DATASTART 0x00000080 49 #define ESP_DEBUG_DATADONE 0x00000100 50 #define ESP_DEBUG_RECONNECT 0x00000200 51 #define ESP_DEBUG_AUTOSENSE 0x00000400 52 #define ESP_DEBUG_EVENT 0x00000800 53 #define ESP_DEBUG_COMMAND 0x00001000 54 55 #define esp_log_intr(f, a...) \ 56 do { if (esp_debug & ESP_DEBUG_INTR) \ 57 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 58 } while (0) 59 60 #define esp_log_reset(f, a...) \ 61 do { if (esp_debug & ESP_DEBUG_RESET) \ 62 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 63 } while (0) 64 65 #define esp_log_msgin(f, a...) \ 66 do { if (esp_debug & ESP_DEBUG_MSGIN) \ 67 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 68 } while (0) 69 70 #define esp_log_msgout(f, a...) \ 71 do { if (esp_debug & ESP_DEBUG_MSGOUT) \ 72 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 73 } while (0) 74 75 #define esp_log_cmddone(f, a...) \ 76 do { if (esp_debug & ESP_DEBUG_CMDDONE) \ 77 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 78 } while (0) 79 80 #define esp_log_disconnect(f, a...) \ 81 do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ 82 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 83 } while (0) 84 85 #define esp_log_datastart(f, a...) \ 86 do { if (esp_debug & ESP_DEBUG_DATASTART) \ 87 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 88 } while (0) 89 90 #define esp_log_datadone(f, a...) \ 91 do { if (esp_debug & ESP_DEBUG_DATADONE) \ 92 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 93 } while (0) 94 95 #define esp_log_reconnect(f, a...) \ 96 do { if (esp_debug & ESP_DEBUG_RECONNECT) \ 97 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 98 } while (0) 99 100 #define esp_log_autosense(f, a...) \ 101 do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ 102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 103 } while (0) 104 105 #define esp_log_event(f, a...) \ 106 do { if (esp_debug & ESP_DEBUG_EVENT) \ 107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 108 } while (0) 109 110 #define esp_log_command(f, a...) \ 111 do { if (esp_debug & ESP_DEBUG_COMMAND) \ 112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 113 } while (0) 114 115 #define esp_read8(REG) esp->ops->esp_read8(esp, REG) 116 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) 117 118 static void esp_log_fill_regs(struct esp *esp, 119 struct esp_event_ent *p) 120 { 121 p->sreg = esp->sreg; 122 p->seqreg = esp->seqreg; 123 p->sreg2 = esp->sreg2; 124 p->ireg = esp->ireg; 125 p->select_state = esp->select_state; 126 p->event = esp->event; 127 } 128 129 void scsi_esp_cmd(struct esp *esp, u8 val) 130 { 131 struct esp_event_ent *p; 132 int idx = esp->esp_event_cur; 133 134 p = &esp->esp_event_log[idx]; 135 p->type = ESP_EVENT_TYPE_CMD; 136 p->val = val; 137 esp_log_fill_regs(esp, p); 138 139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 140 141 esp_log_command("cmd[%02x]\n", val); 142 esp_write8(val, ESP_CMD); 143 } 144 EXPORT_SYMBOL(scsi_esp_cmd); 145 146 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd) 147 { 148 if (esp->flags & ESP_FLAG_USE_FIFO) { 149 int i; 150 151 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 152 for (i = 0; i < len; i++) 153 esp_write8(esp->command_block[i], ESP_FDATA); 154 scsi_esp_cmd(esp, cmd); 155 } else { 156 if (esp->rev == FASHME) 157 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 158 cmd |= ESP_CMD_DMA; 159 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 160 len, max_len, 0, cmd); 161 } 162 } 163 164 static void esp_event(struct esp *esp, u8 val) 165 { 166 struct esp_event_ent *p; 167 int idx = esp->esp_event_cur; 168 169 p = &esp->esp_event_log[idx]; 170 p->type = ESP_EVENT_TYPE_EVENT; 171 p->val = val; 172 esp_log_fill_regs(esp, p); 173 174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 175 176 esp->event = val; 177 } 178 179 static void esp_dump_cmd_log(struct esp *esp) 180 { 181 int idx = esp->esp_event_cur; 182 int stop = idx; 183 184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n"); 185 do { 186 struct esp_event_ent *p = &esp->esp_event_log[idx]; 187 188 shost_printk(KERN_INFO, esp->host, 189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] " 190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", 191 idx, 192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT", 193 p->val, p->sreg, p->seqreg, 194 p->sreg2, p->ireg, p->select_state, p->event); 195 196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 197 } while (idx != stop); 198 } 199 200 static void esp_flush_fifo(struct esp *esp) 201 { 202 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 203 if (esp->rev == ESP236) { 204 int lim = 1000; 205 206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { 207 if (--lim == 0) { 208 shost_printk(KERN_ALERT, esp->host, 209 "ESP_FF_BYTES will not clear!\n"); 210 break; 211 } 212 udelay(1); 213 } 214 } 215 } 216 217 static void hme_read_fifo(struct esp *esp) 218 { 219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 220 int idx = 0; 221 222 while (fcnt--) { 223 esp->fifo[idx++] = esp_read8(ESP_FDATA); 224 esp->fifo[idx++] = esp_read8(ESP_FDATA); 225 } 226 if (esp->sreg2 & ESP_STAT2_F1BYTE) { 227 esp_write8(0, ESP_FDATA); 228 esp->fifo[idx++] = esp_read8(ESP_FDATA); 229 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 230 } 231 esp->fifo_cnt = idx; 232 } 233 234 static void esp_set_all_config3(struct esp *esp, u8 val) 235 { 236 int i; 237 238 for (i = 0; i < ESP_MAX_TARGET; i++) 239 esp->target[i].esp_config3 = val; 240 } 241 242 /* Reset the ESP chip, _not_ the SCSI bus. */ 243 static void esp_reset_esp(struct esp *esp) 244 { 245 u8 family_code, version; 246 247 /* Now reset the ESP chip */ 248 scsi_esp_cmd(esp, ESP_CMD_RC); 249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 250 if (esp->rev == FAST) 251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2); 252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 253 254 /* This is the only point at which it is reliable to read 255 * the ID-code for a fast ESP chip variants. 256 */ 257 esp->max_period = ((35 * esp->ccycle) / 1000); 258 if (esp->rev == FAST) { 259 version = esp_read8(ESP_UID); 260 family_code = (version & 0xf8) >> 3; 261 if (family_code == 0x02) 262 esp->rev = FAS236; 263 else if (family_code == 0x0a) 264 esp->rev = FASHME; /* Version is usually '5'. */ 265 else 266 esp->rev = FAS100A; 267 esp->min_period = ((4 * esp->ccycle) / 1000); 268 } else { 269 esp->min_period = ((5 * esp->ccycle) / 1000); 270 } 271 if (esp->rev == FAS236) { 272 /* 273 * The AM53c974 chip returns the same ID as FAS236; 274 * try to configure glitch eater. 275 */ 276 u8 config4 = ESP_CONFIG4_GE1; 277 esp_write8(config4, ESP_CFG4); 278 config4 = esp_read8(ESP_CFG4); 279 if (config4 & ESP_CONFIG4_GE1) { 280 esp->rev = PCSCSI; 281 esp_write8(esp->config4, ESP_CFG4); 282 } 283 } 284 esp->max_period = (esp->max_period + 3)>>2; 285 esp->min_period = (esp->min_period + 3)>>2; 286 287 esp_write8(esp->config1, ESP_CFG1); 288 switch (esp->rev) { 289 case ESP100: 290 /* nothing to do */ 291 break; 292 293 case ESP100A: 294 esp_write8(esp->config2, ESP_CFG2); 295 break; 296 297 case ESP236: 298 /* Slow 236 */ 299 esp_write8(esp->config2, ESP_CFG2); 300 esp->prev_cfg3 = esp->target[0].esp_config3; 301 esp_write8(esp->prev_cfg3, ESP_CFG3); 302 break; 303 304 case FASHME: 305 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); 306 /* fallthrough... */ 307 308 case FAS236: 309 case PCSCSI: 310 /* Fast 236, AM53c974 or HME */ 311 esp_write8(esp->config2, ESP_CFG2); 312 if (esp->rev == FASHME) { 313 u8 cfg3 = esp->target[0].esp_config3; 314 315 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; 316 if (esp->scsi_id >= 8) 317 cfg3 |= ESP_CONFIG3_IDBIT3; 318 esp_set_all_config3(esp, cfg3); 319 } else { 320 u32 cfg3 = esp->target[0].esp_config3; 321 322 cfg3 |= ESP_CONFIG3_FCLK; 323 esp_set_all_config3(esp, cfg3); 324 } 325 esp->prev_cfg3 = esp->target[0].esp_config3; 326 esp_write8(esp->prev_cfg3, ESP_CFG3); 327 if (esp->rev == FASHME) { 328 esp->radelay = 80; 329 } else { 330 if (esp->flags & ESP_FLAG_DIFFERENTIAL) 331 esp->radelay = 0; 332 else 333 esp->radelay = 96; 334 } 335 break; 336 337 case FAS100A: 338 /* Fast 100a */ 339 esp_write8(esp->config2, ESP_CFG2); 340 esp_set_all_config3(esp, 341 (esp->target[0].esp_config3 | 342 ESP_CONFIG3_FCLOCK)); 343 esp->prev_cfg3 = esp->target[0].esp_config3; 344 esp_write8(esp->prev_cfg3, ESP_CFG3); 345 esp->radelay = 32; 346 break; 347 348 default: 349 break; 350 } 351 352 /* Reload the configuration registers */ 353 esp_write8(esp->cfact, ESP_CFACT); 354 355 esp->prev_stp = 0; 356 esp_write8(esp->prev_stp, ESP_STP); 357 358 esp->prev_soff = 0; 359 esp_write8(esp->prev_soff, ESP_SOFF); 360 361 esp_write8(esp->neg_defp, ESP_TIMEO); 362 363 /* Eat any bitrot in the chip */ 364 esp_read8(ESP_INTRPT); 365 udelay(100); 366 } 367 368 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) 369 { 370 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 371 struct scatterlist *sg = scsi_sglist(cmd); 372 int dir = cmd->sc_data_direction; 373 int total, i; 374 375 if (dir == DMA_NONE) 376 return; 377 378 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir); 379 spriv->cur_residue = sg_dma_len(sg); 380 spriv->cur_sg = sg; 381 382 total = 0; 383 for (i = 0; i < spriv->u.num_sg; i++) 384 total += sg_dma_len(&sg[i]); 385 spriv->tot_residue = total; 386 } 387 388 static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, 389 struct scsi_cmnd *cmd) 390 { 391 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 392 393 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 394 return ent->sense_dma + 395 (ent->sense_ptr - cmd->sense_buffer); 396 } 397 398 return sg_dma_address(p->cur_sg) + 399 (sg_dma_len(p->cur_sg) - 400 p->cur_residue); 401 } 402 403 static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, 404 struct scsi_cmnd *cmd) 405 { 406 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 407 408 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 409 return SCSI_SENSE_BUFFERSIZE - 410 (ent->sense_ptr - cmd->sense_buffer); 411 } 412 return p->cur_residue; 413 } 414 415 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, 416 struct scsi_cmnd *cmd, unsigned int len) 417 { 418 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 419 420 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 421 ent->sense_ptr += len; 422 return; 423 } 424 425 p->cur_residue -= len; 426 p->tot_residue -= len; 427 if (p->cur_residue < 0 || p->tot_residue < 0) { 428 shost_printk(KERN_ERR, esp->host, 429 "Data transfer overflow.\n"); 430 shost_printk(KERN_ERR, esp->host, 431 "cur_residue[%d] tot_residue[%d] len[%u]\n", 432 p->cur_residue, p->tot_residue, len); 433 p->cur_residue = 0; 434 p->tot_residue = 0; 435 } 436 if (!p->cur_residue && p->tot_residue) { 437 p->cur_sg++; 438 p->cur_residue = sg_dma_len(p->cur_sg); 439 } 440 } 441 442 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) 443 { 444 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 445 int dir = cmd->sc_data_direction; 446 447 if (dir == DMA_NONE) 448 return; 449 450 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir); 451 } 452 453 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) 454 { 455 struct scsi_cmnd *cmd = ent->cmd; 456 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 457 458 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 459 ent->saved_sense_ptr = ent->sense_ptr; 460 return; 461 } 462 ent->saved_cur_residue = spriv->cur_residue; 463 ent->saved_cur_sg = spriv->cur_sg; 464 ent->saved_tot_residue = spriv->tot_residue; 465 } 466 467 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) 468 { 469 struct scsi_cmnd *cmd = ent->cmd; 470 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 471 472 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 473 ent->sense_ptr = ent->saved_sense_ptr; 474 return; 475 } 476 spriv->cur_residue = ent->saved_cur_residue; 477 spriv->cur_sg = ent->saved_cur_sg; 478 spriv->tot_residue = ent->saved_tot_residue; 479 } 480 481 static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd) 482 { 483 if (cmd->cmd_len == 6 || 484 cmd->cmd_len == 10 || 485 cmd->cmd_len == 12) { 486 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; 487 } else { 488 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 489 } 490 } 491 492 static void esp_write_tgt_config3(struct esp *esp, int tgt) 493 { 494 if (esp->rev > ESP100A) { 495 u8 val = esp->target[tgt].esp_config3; 496 497 if (val != esp->prev_cfg3) { 498 esp->prev_cfg3 = val; 499 esp_write8(val, ESP_CFG3); 500 } 501 } 502 } 503 504 static void esp_write_tgt_sync(struct esp *esp, int tgt) 505 { 506 u8 off = esp->target[tgt].esp_offset; 507 u8 per = esp->target[tgt].esp_period; 508 509 if (off != esp->prev_soff) { 510 esp->prev_soff = off; 511 esp_write8(off, ESP_SOFF); 512 } 513 if (per != esp->prev_stp) { 514 esp->prev_stp = per; 515 esp_write8(per, ESP_STP); 516 } 517 } 518 519 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) 520 { 521 if (esp->rev == FASHME) { 522 /* Arbitrary segment boundaries, 24-bit counts. */ 523 if (dma_len > (1U << 24)) 524 dma_len = (1U << 24); 525 } else { 526 u32 base, end; 527 528 /* ESP chip limits other variants by 16-bits of transfer 529 * count. Actually on FAS100A and FAS236 we could get 530 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB 531 * in the ESP_CFG2 register but that causes other unwanted 532 * changes so we don't use it currently. 533 */ 534 if (dma_len > (1U << 16)) 535 dma_len = (1U << 16); 536 537 /* All of the DMA variants hooked up to these chips 538 * cannot handle crossing a 24-bit address boundary. 539 */ 540 base = dma_addr & ((1U << 24) - 1U); 541 end = base + dma_len; 542 if (end > (1U << 24)) 543 end = (1U <<24); 544 dma_len = end - base; 545 } 546 return dma_len; 547 } 548 549 static int esp_need_to_nego_wide(struct esp_target_data *tp) 550 { 551 struct scsi_target *target = tp->starget; 552 553 return spi_width(target) != tp->nego_goal_width; 554 } 555 556 static int esp_need_to_nego_sync(struct esp_target_data *tp) 557 { 558 struct scsi_target *target = tp->starget; 559 560 /* When offset is zero, period is "don't care". */ 561 if (!spi_offset(target) && !tp->nego_goal_offset) 562 return 0; 563 564 if (spi_offset(target) == tp->nego_goal_offset && 565 spi_period(target) == tp->nego_goal_period) 566 return 0; 567 568 return 1; 569 } 570 571 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, 572 struct esp_lun_data *lp) 573 { 574 if (!ent->orig_tag[0]) { 575 /* Non-tagged, slot already taken? */ 576 if (lp->non_tagged_cmd) 577 return -EBUSY; 578 579 if (lp->hold) { 580 /* We are being held by active tagged 581 * commands. 582 */ 583 if (lp->num_tagged) 584 return -EBUSY; 585 586 /* Tagged commands completed, we can unplug 587 * the queue and run this untagged command. 588 */ 589 lp->hold = 0; 590 } else if (lp->num_tagged) { 591 /* Plug the queue until num_tagged decreases 592 * to zero in esp_free_lun_tag. 593 */ 594 lp->hold = 1; 595 return -EBUSY; 596 } 597 598 lp->non_tagged_cmd = ent; 599 return 0; 600 } 601 602 /* Tagged command. Check that it isn't blocked by a non-tagged one. */ 603 if (lp->non_tagged_cmd || lp->hold) 604 return -EBUSY; 605 606 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); 607 608 lp->tagged_cmds[ent->orig_tag[1]] = ent; 609 lp->num_tagged++; 610 611 return 0; 612 } 613 614 static void esp_free_lun_tag(struct esp_cmd_entry *ent, 615 struct esp_lun_data *lp) 616 { 617 if (ent->orig_tag[0]) { 618 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); 619 lp->tagged_cmds[ent->orig_tag[1]] = NULL; 620 lp->num_tagged--; 621 } else { 622 BUG_ON(lp->non_tagged_cmd != ent); 623 lp->non_tagged_cmd = NULL; 624 } 625 } 626 627 /* When a contingent allegiance conditon is created, we force feed a 628 * REQUEST_SENSE command to the device to fetch the sense data. I 629 * tried many other schemes, relying on the scsi error handling layer 630 * to send out the REQUEST_SENSE automatically, but this was difficult 631 * to get right especially in the presence of applications like smartd 632 * which use SG_IO to send out their own REQUEST_SENSE commands. 633 */ 634 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) 635 { 636 struct scsi_cmnd *cmd = ent->cmd; 637 struct scsi_device *dev = cmd->device; 638 int tgt, lun; 639 u8 *p, val; 640 641 tgt = dev->id; 642 lun = dev->lun; 643 644 645 if (!ent->sense_ptr) { 646 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", 647 tgt, lun); 648 649 ent->sense_ptr = cmd->sense_buffer; 650 ent->sense_dma = esp->ops->map_single(esp, 651 ent->sense_ptr, 652 SCSI_SENSE_BUFFERSIZE, 653 DMA_FROM_DEVICE); 654 } 655 ent->saved_sense_ptr = ent->sense_ptr; 656 657 esp->active_cmd = ent; 658 659 p = esp->command_block; 660 esp->msg_out_len = 0; 661 662 *p++ = IDENTIFY(0, lun); 663 *p++ = REQUEST_SENSE; 664 *p++ = ((dev->scsi_level <= SCSI_2) ? 665 (lun << 5) : 0); 666 *p++ = 0; 667 *p++ = 0; 668 *p++ = SCSI_SENSE_BUFFERSIZE; 669 *p++ = 0; 670 671 esp->select_state = ESP_SELECT_BASIC; 672 673 val = tgt; 674 if (esp->rev == FASHME) 675 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; 676 esp_write8(val, ESP_BUSID); 677 678 esp_write_tgt_sync(esp, tgt); 679 esp_write_tgt_config3(esp, tgt); 680 681 val = (p - esp->command_block); 682 683 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA); 684 } 685 686 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) 687 { 688 struct esp_cmd_entry *ent; 689 690 list_for_each_entry(ent, &esp->queued_cmds, list) { 691 struct scsi_cmnd *cmd = ent->cmd; 692 struct scsi_device *dev = cmd->device; 693 struct esp_lun_data *lp = dev->hostdata; 694 695 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 696 ent->tag[0] = 0; 697 ent->tag[1] = 0; 698 return ent; 699 } 700 701 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) { 702 ent->tag[0] = 0; 703 ent->tag[1] = 0; 704 } 705 ent->orig_tag[0] = ent->tag[0]; 706 ent->orig_tag[1] = ent->tag[1]; 707 708 if (esp_alloc_lun_tag(ent, lp) < 0) 709 continue; 710 711 return ent; 712 } 713 714 return NULL; 715 } 716 717 static void esp_maybe_execute_command(struct esp *esp) 718 { 719 struct esp_target_data *tp; 720 struct esp_lun_data *lp; 721 struct scsi_device *dev; 722 struct scsi_cmnd *cmd; 723 struct esp_cmd_entry *ent; 724 int tgt, lun, i; 725 u32 val, start_cmd; 726 u8 *p; 727 728 if (esp->active_cmd || 729 (esp->flags & ESP_FLAG_RESETTING)) 730 return; 731 732 ent = find_and_prep_issuable_command(esp); 733 if (!ent) 734 return; 735 736 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 737 esp_autosense(esp, ent); 738 return; 739 } 740 741 cmd = ent->cmd; 742 dev = cmd->device; 743 tgt = dev->id; 744 lun = dev->lun; 745 tp = &esp->target[tgt]; 746 lp = dev->hostdata; 747 748 list_move(&ent->list, &esp->active_cmds); 749 750 esp->active_cmd = ent; 751 752 esp_map_dma(esp, cmd); 753 esp_save_pointers(esp, ent); 754 755 esp_check_command_len(esp, cmd); 756 757 p = esp->command_block; 758 759 esp->msg_out_len = 0; 760 if (tp->flags & ESP_TGT_CHECK_NEGO) { 761 /* Need to negotiate. If the target is broken 762 * go for synchronous transfers and non-wide. 763 */ 764 if (tp->flags & ESP_TGT_BROKEN) { 765 tp->flags &= ~ESP_TGT_DISCONNECT; 766 tp->nego_goal_period = 0; 767 tp->nego_goal_offset = 0; 768 tp->nego_goal_width = 0; 769 tp->nego_goal_tags = 0; 770 } 771 772 /* If the settings are not changing, skip this. */ 773 if (spi_width(tp->starget) == tp->nego_goal_width && 774 spi_period(tp->starget) == tp->nego_goal_period && 775 spi_offset(tp->starget) == tp->nego_goal_offset) { 776 tp->flags &= ~ESP_TGT_CHECK_NEGO; 777 goto build_identify; 778 } 779 780 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { 781 esp->msg_out_len = 782 spi_populate_width_msg(&esp->msg_out[0], 783 (tp->nego_goal_width ? 784 1 : 0)); 785 tp->flags |= ESP_TGT_NEGO_WIDE; 786 } else if (esp_need_to_nego_sync(tp)) { 787 esp->msg_out_len = 788 spi_populate_sync_msg(&esp->msg_out[0], 789 tp->nego_goal_period, 790 tp->nego_goal_offset); 791 tp->flags |= ESP_TGT_NEGO_SYNC; 792 } else { 793 tp->flags &= ~ESP_TGT_CHECK_NEGO; 794 } 795 796 /* Process it like a slow command. */ 797 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) 798 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 799 } 800 801 build_identify: 802 /* If we don't have a lun-data struct yet, we're probing 803 * so do not disconnect. Also, do not disconnect unless 804 * we have a tag on this command. 805 */ 806 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0]) 807 *p++ = IDENTIFY(1, lun); 808 else 809 *p++ = IDENTIFY(0, lun); 810 811 if (ent->tag[0] && esp->rev == ESP100) { 812 /* ESP100 lacks select w/atn3 command, use select 813 * and stop instead. 814 */ 815 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 816 } 817 818 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { 819 start_cmd = ESP_CMD_SELA; 820 if (ent->tag[0]) { 821 *p++ = ent->tag[0]; 822 *p++ = ent->tag[1]; 823 824 start_cmd = ESP_CMD_SA3; 825 } 826 827 for (i = 0; i < cmd->cmd_len; i++) 828 *p++ = cmd->cmnd[i]; 829 830 esp->select_state = ESP_SELECT_BASIC; 831 } else { 832 esp->cmd_bytes_left = cmd->cmd_len; 833 esp->cmd_bytes_ptr = &cmd->cmnd[0]; 834 835 if (ent->tag[0]) { 836 for (i = esp->msg_out_len - 1; 837 i >= 0; i--) 838 esp->msg_out[i + 2] = esp->msg_out[i]; 839 esp->msg_out[0] = ent->tag[0]; 840 esp->msg_out[1] = ent->tag[1]; 841 esp->msg_out_len += 2; 842 } 843 844 start_cmd = ESP_CMD_SELAS; 845 esp->select_state = ESP_SELECT_MSGOUT; 846 } 847 val = tgt; 848 if (esp->rev == FASHME) 849 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; 850 esp_write8(val, ESP_BUSID); 851 852 esp_write_tgt_sync(esp, tgt); 853 esp_write_tgt_config3(esp, tgt); 854 855 val = (p - esp->command_block); 856 857 if (esp_debug & ESP_DEBUG_SCSICMD) { 858 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); 859 for (i = 0; i < cmd->cmd_len; i++) 860 printk("%02x ", cmd->cmnd[i]); 861 printk("]\n"); 862 } 863 864 esp_send_dma_cmd(esp, val, 16, start_cmd); 865 } 866 867 static struct esp_cmd_entry *esp_get_ent(struct esp *esp) 868 { 869 struct list_head *head = &esp->esp_cmd_pool; 870 struct esp_cmd_entry *ret; 871 872 if (list_empty(head)) { 873 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); 874 } else { 875 ret = list_entry(head->next, struct esp_cmd_entry, list); 876 list_del(&ret->list); 877 memset(ret, 0, sizeof(*ret)); 878 } 879 return ret; 880 } 881 882 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) 883 { 884 list_add(&ent->list, &esp->esp_cmd_pool); 885 } 886 887 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, 888 struct scsi_cmnd *cmd, unsigned int result) 889 { 890 struct scsi_device *dev = cmd->device; 891 int tgt = dev->id; 892 int lun = dev->lun; 893 894 esp->active_cmd = NULL; 895 esp_unmap_dma(esp, cmd); 896 esp_free_lun_tag(ent, dev->hostdata); 897 cmd->result = result; 898 899 if (ent->eh_done) { 900 complete(ent->eh_done); 901 ent->eh_done = NULL; 902 } 903 904 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 905 esp->ops->unmap_single(esp, ent->sense_dma, 906 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 907 ent->sense_ptr = NULL; 908 909 /* Restore the message/status bytes to what we actually 910 * saw originally. Also, report that we are providing 911 * the sense data. 912 */ 913 cmd->result = ((DRIVER_SENSE << 24) | 914 (DID_OK << 16) | 915 (COMMAND_COMPLETE << 8) | 916 (SAM_STAT_CHECK_CONDITION << 0)); 917 918 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; 919 if (esp_debug & ESP_DEBUG_AUTOSENSE) { 920 int i; 921 922 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", 923 esp->host->unique_id, tgt, lun); 924 for (i = 0; i < 18; i++) 925 printk("%02x ", cmd->sense_buffer[i]); 926 printk("]\n"); 927 } 928 } 929 930 cmd->scsi_done(cmd); 931 932 list_del(&ent->list); 933 esp_put_ent(esp, ent); 934 935 esp_maybe_execute_command(esp); 936 } 937 938 static unsigned int compose_result(unsigned int status, unsigned int message, 939 unsigned int driver_code) 940 { 941 return (status | (message << 8) | (driver_code << 16)); 942 } 943 944 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) 945 { 946 struct scsi_device *dev = ent->cmd->device; 947 struct esp_lun_data *lp = dev->hostdata; 948 949 scsi_track_queue_full(dev, lp->num_tagged - 1); 950 } 951 952 static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 953 { 954 struct scsi_device *dev = cmd->device; 955 struct esp *esp = shost_priv(dev->host); 956 struct esp_cmd_priv *spriv; 957 struct esp_cmd_entry *ent; 958 959 ent = esp_get_ent(esp); 960 if (!ent) 961 return SCSI_MLQUEUE_HOST_BUSY; 962 963 ent->cmd = cmd; 964 965 cmd->scsi_done = done; 966 967 spriv = ESP_CMD_PRIV(cmd); 968 spriv->u.dma_addr = ~(dma_addr_t)0x0; 969 970 list_add_tail(&ent->list, &esp->queued_cmds); 971 972 esp_maybe_execute_command(esp); 973 974 return 0; 975 } 976 977 static DEF_SCSI_QCMD(esp_queuecommand) 978 979 static int esp_check_gross_error(struct esp *esp) 980 { 981 if (esp->sreg & ESP_STAT_SPAM) { 982 /* Gross Error, could be one of: 983 * - top of fifo overwritten 984 * - top of command register overwritten 985 * - DMA programmed with wrong direction 986 * - improper phase change 987 */ 988 shost_printk(KERN_ERR, esp->host, 989 "Gross error sreg[%02x]\n", esp->sreg); 990 /* XXX Reset the chip. XXX */ 991 return 1; 992 } 993 return 0; 994 } 995 996 static int esp_check_spur_intr(struct esp *esp) 997 { 998 switch (esp->rev) { 999 case ESP100: 1000 case ESP100A: 1001 /* The interrupt pending bit of the status register cannot 1002 * be trusted on these revisions. 1003 */ 1004 esp->sreg &= ~ESP_STAT_INTR; 1005 break; 1006 1007 default: 1008 if (!(esp->sreg & ESP_STAT_INTR)) { 1009 if (esp->ireg & ESP_INTR_SR) 1010 return 1; 1011 1012 /* If the DMA is indicating interrupt pending and the 1013 * ESP is not, the only possibility is a DMA error. 1014 */ 1015 if (!esp->ops->dma_error(esp)) { 1016 shost_printk(KERN_ERR, esp->host, 1017 "Spurious irq, sreg=%02x.\n", 1018 esp->sreg); 1019 return -1; 1020 } 1021 1022 shost_printk(KERN_ERR, esp->host, "DMA error\n"); 1023 1024 /* XXX Reset the chip. XXX */ 1025 return -1; 1026 } 1027 break; 1028 } 1029 1030 return 0; 1031 } 1032 1033 static void esp_schedule_reset(struct esp *esp) 1034 { 1035 esp_log_reset("esp_schedule_reset() from %pf\n", 1036 __builtin_return_address(0)); 1037 esp->flags |= ESP_FLAG_RESETTING; 1038 esp_event(esp, ESP_EVENT_RESET); 1039 } 1040 1041 /* In order to avoid having to add a special half-reconnected state 1042 * into the driver we just sit here and poll through the rest of 1043 * the reselection process to get the tag message bytes. 1044 */ 1045 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, 1046 struct esp_lun_data *lp) 1047 { 1048 struct esp_cmd_entry *ent; 1049 int i; 1050 1051 if (!lp->num_tagged) { 1052 shost_printk(KERN_ERR, esp->host, 1053 "Reconnect w/num_tagged==0\n"); 1054 return NULL; 1055 } 1056 1057 esp_log_reconnect("reconnect tag, "); 1058 1059 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 1060 if (esp->ops->irq_pending(esp)) 1061 break; 1062 } 1063 if (i == ESP_QUICKIRQ_LIMIT) { 1064 shost_printk(KERN_ERR, esp->host, 1065 "Reconnect IRQ1 timeout\n"); 1066 return NULL; 1067 } 1068 1069 esp->sreg = esp_read8(ESP_STATUS); 1070 esp->ireg = esp_read8(ESP_INTRPT); 1071 1072 esp_log_reconnect("IRQ(%d:%x:%x), ", 1073 i, esp->ireg, esp->sreg); 1074 1075 if (esp->ireg & ESP_INTR_DC) { 1076 shost_printk(KERN_ERR, esp->host, 1077 "Reconnect, got disconnect.\n"); 1078 return NULL; 1079 } 1080 1081 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { 1082 shost_printk(KERN_ERR, esp->host, 1083 "Reconnect, not MIP sreg[%02x].\n", esp->sreg); 1084 return NULL; 1085 } 1086 1087 /* DMA in the tag bytes... */ 1088 esp->command_block[0] = 0xff; 1089 esp->command_block[1] = 0xff; 1090 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 1091 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); 1092 1093 /* ACK the message. */ 1094 scsi_esp_cmd(esp, ESP_CMD_MOK); 1095 1096 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { 1097 if (esp->ops->irq_pending(esp)) { 1098 esp->sreg = esp_read8(ESP_STATUS); 1099 esp->ireg = esp_read8(ESP_INTRPT); 1100 if (esp->ireg & ESP_INTR_FDONE) 1101 break; 1102 } 1103 udelay(1); 1104 } 1105 if (i == ESP_RESELECT_TAG_LIMIT) { 1106 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n"); 1107 return NULL; 1108 } 1109 esp->ops->dma_drain(esp); 1110 esp->ops->dma_invalidate(esp); 1111 1112 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", 1113 i, esp->ireg, esp->sreg, 1114 esp->command_block[0], 1115 esp->command_block[1]); 1116 1117 if (esp->command_block[0] < SIMPLE_QUEUE_TAG || 1118 esp->command_block[0] > ORDERED_QUEUE_TAG) { 1119 shost_printk(KERN_ERR, esp->host, 1120 "Reconnect, bad tag type %02x.\n", 1121 esp->command_block[0]); 1122 return NULL; 1123 } 1124 1125 ent = lp->tagged_cmds[esp->command_block[1]]; 1126 if (!ent) { 1127 shost_printk(KERN_ERR, esp->host, 1128 "Reconnect, no entry for tag %02x.\n", 1129 esp->command_block[1]); 1130 return NULL; 1131 } 1132 1133 return ent; 1134 } 1135 1136 static int esp_reconnect(struct esp *esp) 1137 { 1138 struct esp_cmd_entry *ent; 1139 struct esp_target_data *tp; 1140 struct esp_lun_data *lp; 1141 struct scsi_device *dev; 1142 int target, lun; 1143 1144 BUG_ON(esp->active_cmd); 1145 if (esp->rev == FASHME) { 1146 /* FASHME puts the target and lun numbers directly 1147 * into the fifo. 1148 */ 1149 target = esp->fifo[0]; 1150 lun = esp->fifo[1] & 0x7; 1151 } else { 1152 u8 bits = esp_read8(ESP_FDATA); 1153 1154 /* Older chips put the lun directly into the fifo, but 1155 * the target is given as a sample of the arbitration 1156 * lines on the bus at reselection time. So we should 1157 * see the ID of the ESP and the one reconnecting target 1158 * set in the bitmap. 1159 */ 1160 if (!(bits & esp->scsi_id_mask)) 1161 goto do_reset; 1162 bits &= ~esp->scsi_id_mask; 1163 if (!bits || (bits & (bits - 1))) 1164 goto do_reset; 1165 1166 target = ffs(bits) - 1; 1167 lun = (esp_read8(ESP_FDATA) & 0x7); 1168 1169 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1170 if (esp->rev == ESP100) { 1171 u8 ireg = esp_read8(ESP_INTRPT); 1172 /* This chip has a bug during reselection that can 1173 * cause a spurious illegal-command interrupt, which 1174 * we simply ACK here. Another possibility is a bus 1175 * reset so we must check for that. 1176 */ 1177 if (ireg & ESP_INTR_SR) 1178 goto do_reset; 1179 } 1180 scsi_esp_cmd(esp, ESP_CMD_NULL); 1181 } 1182 1183 esp_write_tgt_sync(esp, target); 1184 esp_write_tgt_config3(esp, target); 1185 1186 scsi_esp_cmd(esp, ESP_CMD_MOK); 1187 1188 if (esp->rev == FASHME) 1189 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, 1190 ESP_BUSID); 1191 1192 tp = &esp->target[target]; 1193 dev = __scsi_device_lookup_by_target(tp->starget, lun); 1194 if (!dev) { 1195 shost_printk(KERN_ERR, esp->host, 1196 "Reconnect, no lp tgt[%u] lun[%u]\n", 1197 target, lun); 1198 goto do_reset; 1199 } 1200 lp = dev->hostdata; 1201 1202 ent = lp->non_tagged_cmd; 1203 if (!ent) { 1204 ent = esp_reconnect_with_tag(esp, lp); 1205 if (!ent) 1206 goto do_reset; 1207 } 1208 1209 esp->active_cmd = ent; 1210 1211 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1212 esp_restore_pointers(esp, ent); 1213 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1214 return 1; 1215 1216 do_reset: 1217 esp_schedule_reset(esp); 1218 return 0; 1219 } 1220 1221 static int esp_finish_select(struct esp *esp) 1222 { 1223 struct esp_cmd_entry *ent; 1224 struct scsi_cmnd *cmd; 1225 1226 /* No longer selecting. */ 1227 esp->select_state = ESP_SELECT_NONE; 1228 1229 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; 1230 ent = esp->active_cmd; 1231 cmd = ent->cmd; 1232 1233 if (esp->ops->dma_error(esp)) { 1234 /* If we see a DMA error during or as a result of selection, 1235 * all bets are off. 1236 */ 1237 esp_schedule_reset(esp); 1238 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); 1239 return 0; 1240 } 1241 1242 esp->ops->dma_invalidate(esp); 1243 1244 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { 1245 struct esp_target_data *tp = &esp->target[cmd->device->id]; 1246 1247 /* Carefully back out of the selection attempt. Release 1248 * resources (such as DMA mapping & TAG) and reset state (such 1249 * as message out and command delivery variables). 1250 */ 1251 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { 1252 esp_unmap_dma(esp, cmd); 1253 esp_free_lun_tag(ent, cmd->device->hostdata); 1254 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); 1255 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; 1256 esp->cmd_bytes_ptr = NULL; 1257 esp->cmd_bytes_left = 0; 1258 } else { 1259 esp->ops->unmap_single(esp, ent->sense_dma, 1260 SCSI_SENSE_BUFFERSIZE, 1261 DMA_FROM_DEVICE); 1262 ent->sense_ptr = NULL; 1263 } 1264 1265 /* Now that the state is unwound properly, put back onto 1266 * the issue queue. This command is no longer active. 1267 */ 1268 list_move(&ent->list, &esp->queued_cmds); 1269 esp->active_cmd = NULL; 1270 1271 /* Return value ignored by caller, it directly invokes 1272 * esp_reconnect(). 1273 */ 1274 return 0; 1275 } 1276 1277 if (esp->ireg == ESP_INTR_DC) { 1278 struct scsi_device *dev = cmd->device; 1279 1280 /* Disconnect. Make sure we re-negotiate sync and 1281 * wide parameters if this target starts responding 1282 * again in the future. 1283 */ 1284 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; 1285 1286 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1287 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); 1288 return 1; 1289 } 1290 1291 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { 1292 /* Selection successful. On pre-FAST chips we have 1293 * to do a NOP and possibly clean out the FIFO. 1294 */ 1295 if (esp->rev <= ESP236) { 1296 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 1297 1298 scsi_esp_cmd(esp, ESP_CMD_NULL); 1299 1300 if (!fcnt && 1301 (!esp->prev_soff || 1302 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) 1303 esp_flush_fifo(esp); 1304 } 1305 1306 /* If we are doing a slow command, negotiation, etc. 1307 * we'll do the right thing as we transition to the 1308 * next phase. 1309 */ 1310 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1311 return 0; 1312 } 1313 1314 shost_printk(KERN_INFO, esp->host, 1315 "Unexpected selection completion ireg[%x]\n", esp->ireg); 1316 esp_schedule_reset(esp); 1317 return 0; 1318 } 1319 1320 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, 1321 struct scsi_cmnd *cmd) 1322 { 1323 int fifo_cnt, ecount, bytes_sent, flush_fifo; 1324 1325 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 1326 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) 1327 fifo_cnt <<= 1; 1328 1329 ecount = 0; 1330 if (!(esp->sreg & ESP_STAT_TCNT)) { 1331 ecount = ((unsigned int)esp_read8(ESP_TCLOW) | 1332 (((unsigned int)esp_read8(ESP_TCMED)) << 8)); 1333 if (esp->rev == FASHME) 1334 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; 1335 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB)) 1336 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16; 1337 } 1338 1339 bytes_sent = esp->data_dma_len; 1340 bytes_sent -= ecount; 1341 1342 /* 1343 * The am53c974 has a DMA 'pecularity'. The doc states: 1344 * In some odd byte conditions, one residual byte will 1345 * be left in the SCSI FIFO, and the FIFO Flags will 1346 * never count to '0 '. When this happens, the residual 1347 * byte should be retrieved via PIO following completion 1348 * of the BLAST operation. 1349 */ 1350 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) { 1351 size_t count = 1; 1352 size_t offset = bytes_sent; 1353 u8 bval = esp_read8(ESP_FDATA); 1354 1355 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) 1356 ent->sense_ptr[bytes_sent] = bval; 1357 else { 1358 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 1359 u8 *ptr; 1360 1361 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg, 1362 &offset, &count); 1363 if (likely(ptr)) { 1364 *(ptr + offset) = bval; 1365 scsi_kunmap_atomic_sg(ptr); 1366 } 1367 } 1368 bytes_sent += fifo_cnt; 1369 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL; 1370 } 1371 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1372 bytes_sent -= fifo_cnt; 1373 1374 flush_fifo = 0; 1375 if (!esp->prev_soff) { 1376 /* Synchronous data transfer, always flush fifo. */ 1377 flush_fifo = 1; 1378 } else { 1379 if (esp->rev == ESP100) { 1380 u32 fflags, phase; 1381 1382 /* ESP100 has a chip bug where in the synchronous data 1383 * phase it can mistake a final long REQ pulse from the 1384 * target as an extra data byte. Fun. 1385 * 1386 * To detect this case we resample the status register 1387 * and fifo flags. If we're still in a data phase and 1388 * we see spurious chunks in the fifo, we return error 1389 * to the caller which should reset and set things up 1390 * such that we only try future transfers to this 1391 * target in synchronous mode. 1392 */ 1393 esp->sreg = esp_read8(ESP_STATUS); 1394 phase = esp->sreg & ESP_STAT_PMASK; 1395 fflags = esp_read8(ESP_FFLAGS); 1396 1397 if ((phase == ESP_DOP && 1398 (fflags & ESP_FF_ONOTZERO)) || 1399 (phase == ESP_DIP && 1400 (fflags & ESP_FF_FBYTES))) 1401 return -1; 1402 } 1403 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1404 flush_fifo = 1; 1405 } 1406 1407 if (flush_fifo) 1408 esp_flush_fifo(esp); 1409 1410 return bytes_sent; 1411 } 1412 1413 static void esp_setsync(struct esp *esp, struct esp_target_data *tp, 1414 u8 scsi_period, u8 scsi_offset, 1415 u8 esp_stp, u8 esp_soff) 1416 { 1417 spi_period(tp->starget) = scsi_period; 1418 spi_offset(tp->starget) = scsi_offset; 1419 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; 1420 1421 if (esp_soff) { 1422 esp_stp &= 0x1f; 1423 esp_soff |= esp->radelay; 1424 if (esp->rev >= FAS236) { 1425 u8 bit = ESP_CONFIG3_FSCSI; 1426 if (esp->rev >= FAS100A) 1427 bit = ESP_CONFIG3_FAST; 1428 1429 if (scsi_period < 50) { 1430 if (esp->rev == FASHME) 1431 esp_soff &= ~esp->radelay; 1432 tp->esp_config3 |= bit; 1433 } else { 1434 tp->esp_config3 &= ~bit; 1435 } 1436 esp->prev_cfg3 = tp->esp_config3; 1437 esp_write8(esp->prev_cfg3, ESP_CFG3); 1438 } 1439 } 1440 1441 tp->esp_period = esp->prev_stp = esp_stp; 1442 tp->esp_offset = esp->prev_soff = esp_soff; 1443 1444 esp_write8(esp_soff, ESP_SOFF); 1445 esp_write8(esp_stp, ESP_STP); 1446 1447 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); 1448 1449 spi_display_xfer_agreement(tp->starget); 1450 } 1451 1452 static void esp_msgin_reject(struct esp *esp) 1453 { 1454 struct esp_cmd_entry *ent = esp->active_cmd; 1455 struct scsi_cmnd *cmd = ent->cmd; 1456 struct esp_target_data *tp; 1457 int tgt; 1458 1459 tgt = cmd->device->id; 1460 tp = &esp->target[tgt]; 1461 1462 if (tp->flags & ESP_TGT_NEGO_WIDE) { 1463 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); 1464 1465 if (!esp_need_to_nego_sync(tp)) { 1466 tp->flags &= ~ESP_TGT_CHECK_NEGO; 1467 scsi_esp_cmd(esp, ESP_CMD_RATN); 1468 } else { 1469 esp->msg_out_len = 1470 spi_populate_sync_msg(&esp->msg_out[0], 1471 tp->nego_goal_period, 1472 tp->nego_goal_offset); 1473 tp->flags |= ESP_TGT_NEGO_SYNC; 1474 scsi_esp_cmd(esp, ESP_CMD_SATN); 1475 } 1476 return; 1477 } 1478 1479 if (tp->flags & ESP_TGT_NEGO_SYNC) { 1480 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); 1481 tp->esp_period = 0; 1482 tp->esp_offset = 0; 1483 esp_setsync(esp, tp, 0, 0, 0, 0); 1484 scsi_esp_cmd(esp, ESP_CMD_RATN); 1485 return; 1486 } 1487 1488 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n"); 1489 esp_schedule_reset(esp); 1490 } 1491 1492 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) 1493 { 1494 u8 period = esp->msg_in[3]; 1495 u8 offset = esp->msg_in[4]; 1496 u8 stp; 1497 1498 if (!(tp->flags & ESP_TGT_NEGO_SYNC)) 1499 goto do_reject; 1500 1501 if (offset > 15) 1502 goto do_reject; 1503 1504 if (offset) { 1505 int one_clock; 1506 1507 if (period > esp->max_period) { 1508 period = offset = 0; 1509 goto do_sdtr; 1510 } 1511 if (period < esp->min_period) 1512 goto do_reject; 1513 1514 one_clock = esp->ccycle / 1000; 1515 stp = DIV_ROUND_UP(period << 2, one_clock); 1516 if (stp && esp->rev >= FAS236) { 1517 if (stp >= 50) 1518 stp--; 1519 } 1520 } else { 1521 stp = 0; 1522 } 1523 1524 esp_setsync(esp, tp, period, offset, stp, offset); 1525 return; 1526 1527 do_reject: 1528 esp->msg_out[0] = MESSAGE_REJECT; 1529 esp->msg_out_len = 1; 1530 scsi_esp_cmd(esp, ESP_CMD_SATN); 1531 return; 1532 1533 do_sdtr: 1534 tp->nego_goal_period = period; 1535 tp->nego_goal_offset = offset; 1536 esp->msg_out_len = 1537 spi_populate_sync_msg(&esp->msg_out[0], 1538 tp->nego_goal_period, 1539 tp->nego_goal_offset); 1540 scsi_esp_cmd(esp, ESP_CMD_SATN); 1541 } 1542 1543 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) 1544 { 1545 int size = 8 << esp->msg_in[3]; 1546 u8 cfg3; 1547 1548 if (esp->rev != FASHME) 1549 goto do_reject; 1550 1551 if (size != 8 && size != 16) 1552 goto do_reject; 1553 1554 if (!(tp->flags & ESP_TGT_NEGO_WIDE)) 1555 goto do_reject; 1556 1557 cfg3 = tp->esp_config3; 1558 if (size == 16) { 1559 tp->flags |= ESP_TGT_WIDE; 1560 cfg3 |= ESP_CONFIG3_EWIDE; 1561 } else { 1562 tp->flags &= ~ESP_TGT_WIDE; 1563 cfg3 &= ~ESP_CONFIG3_EWIDE; 1564 } 1565 tp->esp_config3 = cfg3; 1566 esp->prev_cfg3 = cfg3; 1567 esp_write8(cfg3, ESP_CFG3); 1568 1569 tp->flags &= ~ESP_TGT_NEGO_WIDE; 1570 1571 spi_period(tp->starget) = 0; 1572 spi_offset(tp->starget) = 0; 1573 if (!esp_need_to_nego_sync(tp)) { 1574 tp->flags &= ~ESP_TGT_CHECK_NEGO; 1575 scsi_esp_cmd(esp, ESP_CMD_RATN); 1576 } else { 1577 esp->msg_out_len = 1578 spi_populate_sync_msg(&esp->msg_out[0], 1579 tp->nego_goal_period, 1580 tp->nego_goal_offset); 1581 tp->flags |= ESP_TGT_NEGO_SYNC; 1582 scsi_esp_cmd(esp, ESP_CMD_SATN); 1583 } 1584 return; 1585 1586 do_reject: 1587 esp->msg_out[0] = MESSAGE_REJECT; 1588 esp->msg_out_len = 1; 1589 scsi_esp_cmd(esp, ESP_CMD_SATN); 1590 } 1591 1592 static void esp_msgin_extended(struct esp *esp) 1593 { 1594 struct esp_cmd_entry *ent = esp->active_cmd; 1595 struct scsi_cmnd *cmd = ent->cmd; 1596 struct esp_target_data *tp; 1597 int tgt = cmd->device->id; 1598 1599 tp = &esp->target[tgt]; 1600 if (esp->msg_in[2] == EXTENDED_SDTR) { 1601 esp_msgin_sdtr(esp, tp); 1602 return; 1603 } 1604 if (esp->msg_in[2] == EXTENDED_WDTR) { 1605 esp_msgin_wdtr(esp, tp); 1606 return; 1607 } 1608 1609 shost_printk(KERN_INFO, esp->host, 1610 "Unexpected extended msg type %x\n", esp->msg_in[2]); 1611 1612 esp->msg_out[0] = MESSAGE_REJECT; 1613 esp->msg_out_len = 1; 1614 scsi_esp_cmd(esp, ESP_CMD_SATN); 1615 } 1616 1617 /* Analyze msgin bytes received from target so far. Return non-zero 1618 * if there are more bytes needed to complete the message. 1619 */ 1620 static int esp_msgin_process(struct esp *esp) 1621 { 1622 u8 msg0 = esp->msg_in[0]; 1623 int len = esp->msg_in_len; 1624 1625 if (msg0 & 0x80) { 1626 /* Identify */ 1627 shost_printk(KERN_INFO, esp->host, 1628 "Unexpected msgin identify\n"); 1629 return 0; 1630 } 1631 1632 switch (msg0) { 1633 case EXTENDED_MESSAGE: 1634 if (len == 1) 1635 return 1; 1636 if (len < esp->msg_in[1] + 2) 1637 return 1; 1638 esp_msgin_extended(esp); 1639 return 0; 1640 1641 case IGNORE_WIDE_RESIDUE: { 1642 struct esp_cmd_entry *ent; 1643 struct esp_cmd_priv *spriv; 1644 if (len == 1) 1645 return 1; 1646 1647 if (esp->msg_in[1] != 1) 1648 goto do_reject; 1649 1650 ent = esp->active_cmd; 1651 spriv = ESP_CMD_PRIV(ent->cmd); 1652 1653 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { 1654 spriv->cur_sg--; 1655 spriv->cur_residue = 1; 1656 } else 1657 spriv->cur_residue++; 1658 spriv->tot_residue++; 1659 return 0; 1660 } 1661 case NOP: 1662 return 0; 1663 case RESTORE_POINTERS: 1664 esp_restore_pointers(esp, esp->active_cmd); 1665 return 0; 1666 case SAVE_POINTERS: 1667 esp_save_pointers(esp, esp->active_cmd); 1668 return 0; 1669 1670 case COMMAND_COMPLETE: 1671 case DISCONNECT: { 1672 struct esp_cmd_entry *ent = esp->active_cmd; 1673 1674 ent->message = msg0; 1675 esp_event(esp, ESP_EVENT_FREE_BUS); 1676 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1677 return 0; 1678 } 1679 case MESSAGE_REJECT: 1680 esp_msgin_reject(esp); 1681 return 0; 1682 1683 default: 1684 do_reject: 1685 esp->msg_out[0] = MESSAGE_REJECT; 1686 esp->msg_out_len = 1; 1687 scsi_esp_cmd(esp, ESP_CMD_SATN); 1688 return 0; 1689 } 1690 } 1691 1692 static int esp_process_event(struct esp *esp) 1693 { 1694 int write, i; 1695 1696 again: 1697 write = 0; 1698 esp_log_event("process event %d phase %x\n", 1699 esp->event, esp->sreg & ESP_STAT_PMASK); 1700 switch (esp->event) { 1701 case ESP_EVENT_CHECK_PHASE: 1702 switch (esp->sreg & ESP_STAT_PMASK) { 1703 case ESP_DOP: 1704 esp_event(esp, ESP_EVENT_DATA_OUT); 1705 break; 1706 case ESP_DIP: 1707 esp_event(esp, ESP_EVENT_DATA_IN); 1708 break; 1709 case ESP_STATP: 1710 esp_flush_fifo(esp); 1711 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); 1712 esp_event(esp, ESP_EVENT_STATUS); 1713 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1714 return 1; 1715 1716 case ESP_MOP: 1717 esp_event(esp, ESP_EVENT_MSGOUT); 1718 break; 1719 1720 case ESP_MIP: 1721 esp_event(esp, ESP_EVENT_MSGIN); 1722 break; 1723 1724 case ESP_CMDP: 1725 esp_event(esp, ESP_EVENT_CMD_START); 1726 break; 1727 1728 default: 1729 shost_printk(KERN_INFO, esp->host, 1730 "Unexpected phase, sreg=%02x\n", 1731 esp->sreg); 1732 esp_schedule_reset(esp); 1733 return 0; 1734 } 1735 goto again; 1736 1737 case ESP_EVENT_DATA_IN: 1738 write = 1; 1739 /* fallthru */ 1740 1741 case ESP_EVENT_DATA_OUT: { 1742 struct esp_cmd_entry *ent = esp->active_cmd; 1743 struct scsi_cmnd *cmd = ent->cmd; 1744 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); 1745 unsigned int dma_len = esp_cur_dma_len(ent, cmd); 1746 1747 if (esp->rev == ESP100) 1748 scsi_esp_cmd(esp, ESP_CMD_NULL); 1749 1750 if (write) 1751 ent->flags |= ESP_CMD_FLAG_WRITE; 1752 else 1753 ent->flags &= ~ESP_CMD_FLAG_WRITE; 1754 1755 if (esp->ops->dma_length_limit) 1756 dma_len = esp->ops->dma_length_limit(esp, dma_addr, 1757 dma_len); 1758 else 1759 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); 1760 1761 esp->data_dma_len = dma_len; 1762 1763 if (!dma_len) { 1764 shost_printk(KERN_ERR, esp->host, 1765 "DMA length is zero!\n"); 1766 shost_printk(KERN_ERR, esp->host, 1767 "cur adr[%08llx] len[%08x]\n", 1768 (unsigned long long)esp_cur_dma_addr(ent, cmd), 1769 esp_cur_dma_len(ent, cmd)); 1770 esp_schedule_reset(esp); 1771 return 0; 1772 } 1773 1774 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n", 1775 (unsigned long long)dma_addr, dma_len, write); 1776 1777 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, 1778 write, ESP_CMD_DMA | ESP_CMD_TI); 1779 esp_event(esp, ESP_EVENT_DATA_DONE); 1780 break; 1781 } 1782 case ESP_EVENT_DATA_DONE: { 1783 struct esp_cmd_entry *ent = esp->active_cmd; 1784 struct scsi_cmnd *cmd = ent->cmd; 1785 int bytes_sent; 1786 1787 if (esp->ops->dma_error(esp)) { 1788 shost_printk(KERN_INFO, esp->host, 1789 "data done, DMA error, resetting\n"); 1790 esp_schedule_reset(esp); 1791 return 0; 1792 } 1793 1794 if (ent->flags & ESP_CMD_FLAG_WRITE) { 1795 /* XXX parity errors, etc. XXX */ 1796 1797 esp->ops->dma_drain(esp); 1798 } 1799 esp->ops->dma_invalidate(esp); 1800 1801 if (esp->ireg != ESP_INTR_BSERV) { 1802 /* We should always see exactly a bus-service 1803 * interrupt at the end of a successful transfer. 1804 */ 1805 shost_printk(KERN_INFO, esp->host, 1806 "data done, not BSERV, resetting\n"); 1807 esp_schedule_reset(esp); 1808 return 0; 1809 } 1810 1811 bytes_sent = esp_data_bytes_sent(esp, ent, cmd); 1812 1813 esp_log_datadone("data done flgs[%x] sent[%d]\n", 1814 ent->flags, bytes_sent); 1815 1816 if (bytes_sent < 0) { 1817 /* XXX force sync mode for this target XXX */ 1818 esp_schedule_reset(esp); 1819 return 0; 1820 } 1821 1822 esp_advance_dma(esp, ent, cmd, bytes_sent); 1823 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1824 goto again; 1825 } 1826 1827 case ESP_EVENT_STATUS: { 1828 struct esp_cmd_entry *ent = esp->active_cmd; 1829 1830 if (esp->ireg & ESP_INTR_FDONE) { 1831 ent->status = esp_read8(ESP_FDATA); 1832 ent->message = esp_read8(ESP_FDATA); 1833 scsi_esp_cmd(esp, ESP_CMD_MOK); 1834 } else if (esp->ireg == ESP_INTR_BSERV) { 1835 ent->status = esp_read8(ESP_FDATA); 1836 ent->message = 0xff; 1837 esp_event(esp, ESP_EVENT_MSGIN); 1838 return 0; 1839 } 1840 1841 if (ent->message != COMMAND_COMPLETE) { 1842 shost_printk(KERN_INFO, esp->host, 1843 "Unexpected message %x in status\n", 1844 ent->message); 1845 esp_schedule_reset(esp); 1846 return 0; 1847 } 1848 1849 esp_event(esp, ESP_EVENT_FREE_BUS); 1850 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1851 break; 1852 } 1853 case ESP_EVENT_FREE_BUS: { 1854 struct esp_cmd_entry *ent = esp->active_cmd; 1855 struct scsi_cmnd *cmd = ent->cmd; 1856 1857 if (ent->message == COMMAND_COMPLETE || 1858 ent->message == DISCONNECT) 1859 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1860 1861 if (ent->message == COMMAND_COMPLETE) { 1862 esp_log_cmddone("Command done status[%x] message[%x]\n", 1863 ent->status, ent->message); 1864 if (ent->status == SAM_STAT_TASK_SET_FULL) 1865 esp_event_queue_full(esp, ent); 1866 1867 if (ent->status == SAM_STAT_CHECK_CONDITION && 1868 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { 1869 ent->flags |= ESP_CMD_FLAG_AUTOSENSE; 1870 esp_autosense(esp, ent); 1871 } else { 1872 esp_cmd_is_done(esp, ent, cmd, 1873 compose_result(ent->status, 1874 ent->message, 1875 DID_OK)); 1876 } 1877 } else if (ent->message == DISCONNECT) { 1878 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n", 1879 cmd->device->id, 1880 ent->tag[0], ent->tag[1]); 1881 1882 esp->active_cmd = NULL; 1883 esp_maybe_execute_command(esp); 1884 } else { 1885 shost_printk(KERN_INFO, esp->host, 1886 "Unexpected message %x in freebus\n", 1887 ent->message); 1888 esp_schedule_reset(esp); 1889 return 0; 1890 } 1891 if (esp->active_cmd) 1892 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1893 break; 1894 } 1895 case ESP_EVENT_MSGOUT: { 1896 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1897 1898 if (esp_debug & ESP_DEBUG_MSGOUT) { 1899 int i; 1900 printk("ESP: Sending message [ "); 1901 for (i = 0; i < esp->msg_out_len; i++) 1902 printk("%02x ", esp->msg_out[i]); 1903 printk("]\n"); 1904 } 1905 1906 if (esp->rev == FASHME) { 1907 int i; 1908 1909 /* Always use the fifo. */ 1910 for (i = 0; i < esp->msg_out_len; i++) { 1911 esp_write8(esp->msg_out[i], ESP_FDATA); 1912 esp_write8(0, ESP_FDATA); 1913 } 1914 scsi_esp_cmd(esp, ESP_CMD_TI); 1915 } else { 1916 if (esp->msg_out_len == 1) { 1917 esp_write8(esp->msg_out[0], ESP_FDATA); 1918 scsi_esp_cmd(esp, ESP_CMD_TI); 1919 } else if (esp->flags & ESP_FLAG_USE_FIFO) { 1920 for (i = 0; i < esp->msg_out_len; i++) 1921 esp_write8(esp->msg_out[i], ESP_FDATA); 1922 scsi_esp_cmd(esp, ESP_CMD_TI); 1923 } else { 1924 /* Use DMA. */ 1925 memcpy(esp->command_block, 1926 esp->msg_out, 1927 esp->msg_out_len); 1928 1929 esp->ops->send_dma_cmd(esp, 1930 esp->command_block_dma, 1931 esp->msg_out_len, 1932 esp->msg_out_len, 1933 0, 1934 ESP_CMD_DMA|ESP_CMD_TI); 1935 } 1936 } 1937 esp_event(esp, ESP_EVENT_MSGOUT_DONE); 1938 break; 1939 } 1940 case ESP_EVENT_MSGOUT_DONE: 1941 if (esp->rev == FASHME) { 1942 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1943 } else { 1944 if (esp->msg_out_len > 1) 1945 esp->ops->dma_invalidate(esp); 1946 1947 /* XXX if the chip went into disconnected mode, 1948 * we can't run the phase state machine anyway. 1949 */ 1950 if (!(esp->ireg & ESP_INTR_DC)) 1951 scsi_esp_cmd(esp, ESP_CMD_NULL); 1952 } 1953 1954 esp->msg_out_len = 0; 1955 1956 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1957 goto again; 1958 case ESP_EVENT_MSGIN: 1959 if (esp->ireg & ESP_INTR_BSERV) { 1960 if (esp->rev == FASHME) { 1961 if (!(esp_read8(ESP_STATUS2) & 1962 ESP_STAT2_FEMPTY)) 1963 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1964 } else { 1965 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1966 if (esp->rev == ESP100) 1967 scsi_esp_cmd(esp, ESP_CMD_NULL); 1968 } 1969 scsi_esp_cmd(esp, ESP_CMD_TI); 1970 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1971 return 1; 1972 } 1973 if (esp->ireg & ESP_INTR_FDONE) { 1974 u8 val; 1975 1976 if (esp->rev == FASHME) 1977 val = esp->fifo[0]; 1978 else 1979 val = esp_read8(ESP_FDATA); 1980 esp->msg_in[esp->msg_in_len++] = val; 1981 1982 esp_log_msgin("Got msgin byte %x\n", val); 1983 1984 if (!esp_msgin_process(esp)) 1985 esp->msg_in_len = 0; 1986 1987 if (esp->rev == FASHME) 1988 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1989 1990 scsi_esp_cmd(esp, ESP_CMD_MOK); 1991 1992 /* Check whether a bus reset is to be done next */ 1993 if (esp->event == ESP_EVENT_RESET) 1994 return 0; 1995 1996 if (esp->event != ESP_EVENT_FREE_BUS) 1997 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1998 } else { 1999 shost_printk(KERN_INFO, esp->host, 2000 "MSGIN neither BSERV not FDON, resetting"); 2001 esp_schedule_reset(esp); 2002 return 0; 2003 } 2004 break; 2005 case ESP_EVENT_CMD_START: 2006 memcpy(esp->command_block, esp->cmd_bytes_ptr, 2007 esp->cmd_bytes_left); 2008 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI); 2009 esp_event(esp, ESP_EVENT_CMD_DONE); 2010 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 2011 break; 2012 case ESP_EVENT_CMD_DONE: 2013 esp->ops->dma_invalidate(esp); 2014 if (esp->ireg & ESP_INTR_BSERV) { 2015 esp_event(esp, ESP_EVENT_CHECK_PHASE); 2016 goto again; 2017 } 2018 esp_schedule_reset(esp); 2019 return 0; 2020 2021 case ESP_EVENT_RESET: 2022 scsi_esp_cmd(esp, ESP_CMD_RS); 2023 break; 2024 2025 default: 2026 shost_printk(KERN_INFO, esp->host, 2027 "Unexpected event %x, resetting\n", esp->event); 2028 esp_schedule_reset(esp); 2029 return 0; 2030 } 2031 return 1; 2032 } 2033 2034 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) 2035 { 2036 struct scsi_cmnd *cmd = ent->cmd; 2037 2038 esp_unmap_dma(esp, cmd); 2039 esp_free_lun_tag(ent, cmd->device->hostdata); 2040 cmd->result = DID_RESET << 16; 2041 2042 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 2043 esp->ops->unmap_single(esp, ent->sense_dma, 2044 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 2045 ent->sense_ptr = NULL; 2046 } 2047 2048 cmd->scsi_done(cmd); 2049 list_del(&ent->list); 2050 esp_put_ent(esp, ent); 2051 } 2052 2053 static void esp_clear_hold(struct scsi_device *dev, void *data) 2054 { 2055 struct esp_lun_data *lp = dev->hostdata; 2056 2057 BUG_ON(lp->num_tagged); 2058 lp->hold = 0; 2059 } 2060 2061 static void esp_reset_cleanup(struct esp *esp) 2062 { 2063 struct esp_cmd_entry *ent, *tmp; 2064 int i; 2065 2066 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { 2067 struct scsi_cmnd *cmd = ent->cmd; 2068 2069 list_del(&ent->list); 2070 cmd->result = DID_RESET << 16; 2071 cmd->scsi_done(cmd); 2072 esp_put_ent(esp, ent); 2073 } 2074 2075 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { 2076 if (ent == esp->active_cmd) 2077 esp->active_cmd = NULL; 2078 esp_reset_cleanup_one(esp, ent); 2079 } 2080 2081 BUG_ON(esp->active_cmd != NULL); 2082 2083 /* Force renegotiation of sync/wide transfers. */ 2084 for (i = 0; i < ESP_MAX_TARGET; i++) { 2085 struct esp_target_data *tp = &esp->target[i]; 2086 2087 tp->esp_period = 0; 2088 tp->esp_offset = 0; 2089 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | 2090 ESP_CONFIG3_FSCSI | 2091 ESP_CONFIG3_FAST); 2092 tp->flags &= ~ESP_TGT_WIDE; 2093 tp->flags |= ESP_TGT_CHECK_NEGO; 2094 2095 if (tp->starget) 2096 __starget_for_each_device(tp->starget, NULL, 2097 esp_clear_hold); 2098 } 2099 esp->flags &= ~ESP_FLAG_RESETTING; 2100 } 2101 2102 /* Runs under host->lock */ 2103 static void __esp_interrupt(struct esp *esp) 2104 { 2105 int finish_reset, intr_done; 2106 u8 phase; 2107 2108 /* 2109 * Once INTRPT is read STATUS and SSTEP are cleared. 2110 */ 2111 esp->sreg = esp_read8(ESP_STATUS); 2112 esp->seqreg = esp_read8(ESP_SSTEP); 2113 esp->ireg = esp_read8(ESP_INTRPT); 2114 2115 if (esp->flags & ESP_FLAG_RESETTING) { 2116 finish_reset = 1; 2117 } else { 2118 if (esp_check_gross_error(esp)) 2119 return; 2120 2121 finish_reset = esp_check_spur_intr(esp); 2122 if (finish_reset < 0) 2123 return; 2124 } 2125 2126 if (esp->ireg & ESP_INTR_SR) 2127 finish_reset = 1; 2128 2129 if (finish_reset) { 2130 esp_reset_cleanup(esp); 2131 if (esp->eh_reset) { 2132 complete(esp->eh_reset); 2133 esp->eh_reset = NULL; 2134 } 2135 return; 2136 } 2137 2138 phase = (esp->sreg & ESP_STAT_PMASK); 2139 if (esp->rev == FASHME) { 2140 if (((phase != ESP_DIP && phase != ESP_DOP) && 2141 esp->select_state == ESP_SELECT_NONE && 2142 esp->event != ESP_EVENT_STATUS && 2143 esp->event != ESP_EVENT_DATA_DONE) || 2144 (esp->ireg & ESP_INTR_RSEL)) { 2145 esp->sreg2 = esp_read8(ESP_STATUS2); 2146 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || 2147 (esp->sreg2 & ESP_STAT2_F1BYTE)) 2148 hme_read_fifo(esp); 2149 } 2150 } 2151 2152 esp_log_intr("intr sreg[%02x] seqreg[%02x] " 2153 "sreg2[%02x] ireg[%02x]\n", 2154 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); 2155 2156 intr_done = 0; 2157 2158 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { 2159 shost_printk(KERN_INFO, esp->host, 2160 "unexpected IREG %02x\n", esp->ireg); 2161 if (esp->ireg & ESP_INTR_IC) 2162 esp_dump_cmd_log(esp); 2163 2164 esp_schedule_reset(esp); 2165 } else { 2166 if (esp->ireg & ESP_INTR_RSEL) { 2167 if (esp->active_cmd) 2168 (void) esp_finish_select(esp); 2169 intr_done = esp_reconnect(esp); 2170 } else { 2171 /* Some combination of FDONE, BSERV, DC. */ 2172 if (esp->select_state != ESP_SELECT_NONE) 2173 intr_done = esp_finish_select(esp); 2174 } 2175 } 2176 while (!intr_done) 2177 intr_done = esp_process_event(esp); 2178 } 2179 2180 irqreturn_t scsi_esp_intr(int irq, void *dev_id) 2181 { 2182 struct esp *esp = dev_id; 2183 unsigned long flags; 2184 irqreturn_t ret; 2185 2186 spin_lock_irqsave(esp->host->host_lock, flags); 2187 ret = IRQ_NONE; 2188 if (esp->ops->irq_pending(esp)) { 2189 ret = IRQ_HANDLED; 2190 for (;;) { 2191 int i; 2192 2193 __esp_interrupt(esp); 2194 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) 2195 break; 2196 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; 2197 2198 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 2199 if (esp->ops->irq_pending(esp)) 2200 break; 2201 } 2202 if (i == ESP_QUICKIRQ_LIMIT) 2203 break; 2204 } 2205 } 2206 spin_unlock_irqrestore(esp->host->host_lock, flags); 2207 2208 return ret; 2209 } 2210 EXPORT_SYMBOL(scsi_esp_intr); 2211 2212 static void esp_get_revision(struct esp *esp) 2213 { 2214 u8 val; 2215 2216 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); 2217 if (esp->config2 == 0) { 2218 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); 2219 esp_write8(esp->config2, ESP_CFG2); 2220 2221 val = esp_read8(ESP_CFG2); 2222 val &= ~ESP_CONFIG2_MAGIC; 2223 2224 esp->config2 = 0; 2225 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { 2226 /* 2227 * If what we write to cfg2 does not come back, 2228 * cfg2 is not implemented. 2229 * Therefore this must be a plain esp100. 2230 */ 2231 esp->rev = ESP100; 2232 return; 2233 } 2234 } 2235 2236 esp_set_all_config3(esp, 5); 2237 esp->prev_cfg3 = 5; 2238 esp_write8(esp->config2, ESP_CFG2); 2239 esp_write8(0, ESP_CFG3); 2240 esp_write8(esp->prev_cfg3, ESP_CFG3); 2241 2242 val = esp_read8(ESP_CFG3); 2243 if (val != 5) { 2244 /* The cfg2 register is implemented, however 2245 * cfg3 is not, must be esp100a. 2246 */ 2247 esp->rev = ESP100A; 2248 } else { 2249 esp_set_all_config3(esp, 0); 2250 esp->prev_cfg3 = 0; 2251 esp_write8(esp->prev_cfg3, ESP_CFG3); 2252 2253 /* All of cfg{1,2,3} implemented, must be one of 2254 * the fas variants, figure out which one. 2255 */ 2256 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { 2257 esp->rev = FAST; 2258 esp->sync_defp = SYNC_DEFP_FAST; 2259 } else { 2260 esp->rev = ESP236; 2261 } 2262 } 2263 } 2264 2265 static void esp_init_swstate(struct esp *esp) 2266 { 2267 int i; 2268 2269 INIT_LIST_HEAD(&esp->queued_cmds); 2270 INIT_LIST_HEAD(&esp->active_cmds); 2271 INIT_LIST_HEAD(&esp->esp_cmd_pool); 2272 2273 /* Start with a clear state, domain validation (via ->slave_configure, 2274 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged 2275 * commands. 2276 */ 2277 for (i = 0 ; i < ESP_MAX_TARGET; i++) { 2278 esp->target[i].flags = 0; 2279 esp->target[i].nego_goal_period = 0; 2280 esp->target[i].nego_goal_offset = 0; 2281 esp->target[i].nego_goal_width = 0; 2282 esp->target[i].nego_goal_tags = 0; 2283 } 2284 } 2285 2286 /* This places the ESP into a known state at boot time. */ 2287 static void esp_bootup_reset(struct esp *esp) 2288 { 2289 u8 val; 2290 2291 /* Reset the DMA */ 2292 esp->ops->reset_dma(esp); 2293 2294 /* Reset the ESP */ 2295 esp_reset_esp(esp); 2296 2297 /* Reset the SCSI bus, but tell ESP not to generate an irq */ 2298 val = esp_read8(ESP_CFG1); 2299 val |= ESP_CONFIG1_SRRDISAB; 2300 esp_write8(val, ESP_CFG1); 2301 2302 scsi_esp_cmd(esp, ESP_CMD_RS); 2303 udelay(400); 2304 2305 esp_write8(esp->config1, ESP_CFG1); 2306 2307 /* Eat any bitrot in the chip and we are done... */ 2308 esp_read8(ESP_INTRPT); 2309 } 2310 2311 static void esp_set_clock_params(struct esp *esp) 2312 { 2313 int fhz; 2314 u8 ccf; 2315 2316 /* This is getting messy but it has to be done correctly or else 2317 * you get weird behavior all over the place. We are trying to 2318 * basically figure out three pieces of information. 2319 * 2320 * a) Clock Conversion Factor 2321 * 2322 * This is a representation of the input crystal clock frequency 2323 * going into the ESP on this machine. Any operation whose timing 2324 * is longer than 400ns depends on this value being correct. For 2325 * example, you'll get blips for arbitration/selection during high 2326 * load or with multiple targets if this is not set correctly. 2327 * 2328 * b) Selection Time-Out 2329 * 2330 * The ESP isn't very bright and will arbitrate for the bus and try 2331 * to select a target forever if you let it. This value tells the 2332 * ESP when it has taken too long to negotiate and that it should 2333 * interrupt the CPU so we can see what happened. The value is 2334 * computed as follows (from NCR/Symbios chip docs). 2335 * 2336 * (Time Out Period) * (Input Clock) 2337 * STO = ---------------------------------- 2338 * (8192) * (Clock Conversion Factor) 2339 * 2340 * We use a time out period of 250ms (ESP_BUS_TIMEOUT). 2341 * 2342 * c) Imperical constants for synchronous offset and transfer period 2343 * register values 2344 * 2345 * This entails the smallest and largest sync period we could ever 2346 * handle on this ESP. 2347 */ 2348 fhz = esp->cfreq; 2349 2350 ccf = ((fhz / 1000000) + 4) / 5; 2351 if (ccf == 1) 2352 ccf = 2; 2353 2354 /* If we can't find anything reasonable, just assume 20MHZ. 2355 * This is the clock frequency of the older sun4c's where I've 2356 * been unable to find the clock-frequency PROM property. All 2357 * other machines provide useful values it seems. 2358 */ 2359 if (fhz <= 5000000 || ccf < 1 || ccf > 8) { 2360 fhz = 20000000; 2361 ccf = 4; 2362 } 2363 2364 esp->cfact = (ccf == 8 ? 0 : ccf); 2365 esp->cfreq = fhz; 2366 esp->ccycle = ESP_HZ_TO_CYCLE(fhz); 2367 esp->ctick = ESP_TICK(ccf, esp->ccycle); 2368 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf); 2369 esp->sync_defp = SYNC_DEFP_SLOW; 2370 } 2371 2372 static const char *esp_chip_names[] = { 2373 "ESP100", 2374 "ESP100A", 2375 "ESP236", 2376 "FAS236", 2377 "FAS100A", 2378 "FAST", 2379 "FASHME", 2380 "AM53C974", 2381 }; 2382 2383 static struct scsi_transport_template *esp_transport_template; 2384 2385 int scsi_esp_register(struct esp *esp, struct device *dev) 2386 { 2387 static int instance; 2388 int err; 2389 2390 if (!esp->num_tags) 2391 esp->num_tags = ESP_DEFAULT_TAGS; 2392 esp->host->transportt = esp_transport_template; 2393 esp->host->max_lun = ESP_MAX_LUN; 2394 esp->host->cmd_per_lun = 2; 2395 esp->host->unique_id = instance; 2396 2397 esp_set_clock_params(esp); 2398 2399 esp_get_revision(esp); 2400 2401 esp_init_swstate(esp); 2402 2403 esp_bootup_reset(esp); 2404 2405 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n", 2406 esp->host->unique_id, esp->regs, esp->dma_regs, 2407 esp->host->irq); 2408 dev_printk(KERN_INFO, dev, 2409 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", 2410 esp->host->unique_id, esp_chip_names[esp->rev], 2411 esp->cfreq / 1000000, esp->cfact, esp->scsi_id); 2412 2413 /* Let the SCSI bus reset settle. */ 2414 ssleep(esp_bus_reset_settle); 2415 2416 err = scsi_add_host(esp->host, dev); 2417 if (err) 2418 return err; 2419 2420 instance++; 2421 2422 scsi_scan_host(esp->host); 2423 2424 return 0; 2425 } 2426 EXPORT_SYMBOL(scsi_esp_register); 2427 2428 void scsi_esp_unregister(struct esp *esp) 2429 { 2430 scsi_remove_host(esp->host); 2431 } 2432 EXPORT_SYMBOL(scsi_esp_unregister); 2433 2434 static int esp_target_alloc(struct scsi_target *starget) 2435 { 2436 struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); 2437 struct esp_target_data *tp = &esp->target[starget->id]; 2438 2439 tp->starget = starget; 2440 2441 return 0; 2442 } 2443 2444 static void esp_target_destroy(struct scsi_target *starget) 2445 { 2446 struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); 2447 struct esp_target_data *tp = &esp->target[starget->id]; 2448 2449 tp->starget = NULL; 2450 } 2451 2452 static int esp_slave_alloc(struct scsi_device *dev) 2453 { 2454 struct esp *esp = shost_priv(dev->host); 2455 struct esp_target_data *tp = &esp->target[dev->id]; 2456 struct esp_lun_data *lp; 2457 2458 lp = kzalloc(sizeof(*lp), GFP_KERNEL); 2459 if (!lp) 2460 return -ENOMEM; 2461 dev->hostdata = lp; 2462 2463 spi_min_period(tp->starget) = esp->min_period; 2464 spi_max_offset(tp->starget) = 15; 2465 2466 if (esp->flags & ESP_FLAG_WIDE_CAPABLE) 2467 spi_max_width(tp->starget) = 1; 2468 else 2469 spi_max_width(tp->starget) = 0; 2470 2471 return 0; 2472 } 2473 2474 static int esp_slave_configure(struct scsi_device *dev) 2475 { 2476 struct esp *esp = shost_priv(dev->host); 2477 struct esp_target_data *tp = &esp->target[dev->id]; 2478 2479 if (dev->tagged_supported) 2480 scsi_change_queue_depth(dev, esp->num_tags); 2481 2482 tp->flags |= ESP_TGT_DISCONNECT; 2483 2484 if (!spi_initial_dv(dev->sdev_target)) 2485 spi_dv_device(dev); 2486 2487 return 0; 2488 } 2489 2490 static void esp_slave_destroy(struct scsi_device *dev) 2491 { 2492 struct esp_lun_data *lp = dev->hostdata; 2493 2494 kfree(lp); 2495 dev->hostdata = NULL; 2496 } 2497 2498 static int esp_eh_abort_handler(struct scsi_cmnd *cmd) 2499 { 2500 struct esp *esp = shost_priv(cmd->device->host); 2501 struct esp_cmd_entry *ent, *tmp; 2502 struct completion eh_done; 2503 unsigned long flags; 2504 2505 /* XXX This helps a lot with debugging but might be a bit 2506 * XXX much for the final driver. 2507 */ 2508 spin_lock_irqsave(esp->host->host_lock, flags); 2509 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n", 2510 cmd, cmd->cmnd[0]); 2511 ent = esp->active_cmd; 2512 if (ent) 2513 shost_printk(KERN_ERR, esp->host, 2514 "Current command [%p:%02x]\n", 2515 ent->cmd, ent->cmd->cmnd[0]); 2516 list_for_each_entry(ent, &esp->queued_cmds, list) { 2517 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n", 2518 ent->cmd, ent->cmd->cmnd[0]); 2519 } 2520 list_for_each_entry(ent, &esp->active_cmds, list) { 2521 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n", 2522 ent->cmd, ent->cmd->cmnd[0]); 2523 } 2524 esp_dump_cmd_log(esp); 2525 spin_unlock_irqrestore(esp->host->host_lock, flags); 2526 2527 spin_lock_irqsave(esp->host->host_lock, flags); 2528 2529 ent = NULL; 2530 list_for_each_entry(tmp, &esp->queued_cmds, list) { 2531 if (tmp->cmd == cmd) { 2532 ent = tmp; 2533 break; 2534 } 2535 } 2536 2537 if (ent) { 2538 /* Easiest case, we didn't even issue the command 2539 * yet so it is trivial to abort. 2540 */ 2541 list_del(&ent->list); 2542 2543 cmd->result = DID_ABORT << 16; 2544 cmd->scsi_done(cmd); 2545 2546 esp_put_ent(esp, ent); 2547 2548 goto out_success; 2549 } 2550 2551 init_completion(&eh_done); 2552 2553 ent = esp->active_cmd; 2554 if (ent && ent->cmd == cmd) { 2555 /* Command is the currently active command on 2556 * the bus. If we already have an output message 2557 * pending, no dice. 2558 */ 2559 if (esp->msg_out_len) 2560 goto out_failure; 2561 2562 /* Send out an abort, encouraging the target to 2563 * go to MSGOUT phase by asserting ATN. 2564 */ 2565 esp->msg_out[0] = ABORT_TASK_SET; 2566 esp->msg_out_len = 1; 2567 ent->eh_done = &eh_done; 2568 2569 scsi_esp_cmd(esp, ESP_CMD_SATN); 2570 } else { 2571 /* The command is disconnected. This is not easy to 2572 * abort. For now we fail and let the scsi error 2573 * handling layer go try a scsi bus reset or host 2574 * reset. 2575 * 2576 * What we could do is put together a scsi command 2577 * solely for the purpose of sending an abort message 2578 * to the target. Coming up with all the code to 2579 * cook up scsi commands, special case them everywhere, 2580 * etc. is for questionable gain and it would be better 2581 * if the generic scsi error handling layer could do at 2582 * least some of that for us. 2583 * 2584 * Anyways this is an area for potential future improvement 2585 * in this driver. 2586 */ 2587 goto out_failure; 2588 } 2589 2590 spin_unlock_irqrestore(esp->host->host_lock, flags); 2591 2592 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { 2593 spin_lock_irqsave(esp->host->host_lock, flags); 2594 ent->eh_done = NULL; 2595 spin_unlock_irqrestore(esp->host->host_lock, flags); 2596 2597 return FAILED; 2598 } 2599 2600 return SUCCESS; 2601 2602 out_success: 2603 spin_unlock_irqrestore(esp->host->host_lock, flags); 2604 return SUCCESS; 2605 2606 out_failure: 2607 /* XXX This might be a good location to set ESP_TGT_BROKEN 2608 * XXX since we know which target/lun in particular is 2609 * XXX causing trouble. 2610 */ 2611 spin_unlock_irqrestore(esp->host->host_lock, flags); 2612 return FAILED; 2613 } 2614 2615 static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) 2616 { 2617 struct esp *esp = shost_priv(cmd->device->host); 2618 struct completion eh_reset; 2619 unsigned long flags; 2620 2621 init_completion(&eh_reset); 2622 2623 spin_lock_irqsave(esp->host->host_lock, flags); 2624 2625 esp->eh_reset = &eh_reset; 2626 2627 /* XXX This is too simple... We should add lots of 2628 * XXX checks here so that if we find that the chip is 2629 * XXX very wedged we return failure immediately so 2630 * XXX that we can perform a full chip reset. 2631 */ 2632 esp->flags |= ESP_FLAG_RESETTING; 2633 scsi_esp_cmd(esp, ESP_CMD_RS); 2634 2635 spin_unlock_irqrestore(esp->host->host_lock, flags); 2636 2637 ssleep(esp_bus_reset_settle); 2638 2639 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { 2640 spin_lock_irqsave(esp->host->host_lock, flags); 2641 esp->eh_reset = NULL; 2642 spin_unlock_irqrestore(esp->host->host_lock, flags); 2643 2644 return FAILED; 2645 } 2646 2647 return SUCCESS; 2648 } 2649 2650 /* All bets are off, reset the entire device. */ 2651 static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) 2652 { 2653 struct esp *esp = shost_priv(cmd->device->host); 2654 unsigned long flags; 2655 2656 spin_lock_irqsave(esp->host->host_lock, flags); 2657 esp_bootup_reset(esp); 2658 esp_reset_cleanup(esp); 2659 spin_unlock_irqrestore(esp->host->host_lock, flags); 2660 2661 ssleep(esp_bus_reset_settle); 2662 2663 return SUCCESS; 2664 } 2665 2666 static const char *esp_info(struct Scsi_Host *host) 2667 { 2668 return "esp"; 2669 } 2670 2671 struct scsi_host_template scsi_esp_template = { 2672 .module = THIS_MODULE, 2673 .name = "esp", 2674 .info = esp_info, 2675 .queuecommand = esp_queuecommand, 2676 .target_alloc = esp_target_alloc, 2677 .target_destroy = esp_target_destroy, 2678 .slave_alloc = esp_slave_alloc, 2679 .slave_configure = esp_slave_configure, 2680 .slave_destroy = esp_slave_destroy, 2681 .eh_abort_handler = esp_eh_abort_handler, 2682 .eh_bus_reset_handler = esp_eh_bus_reset_handler, 2683 .eh_host_reset_handler = esp_eh_host_reset_handler, 2684 .can_queue = 7, 2685 .this_id = 7, 2686 .sg_tablesize = SG_ALL, 2687 .use_clustering = ENABLE_CLUSTERING, 2688 .max_sectors = 0xffff, 2689 .skip_settle_delay = 1, 2690 }; 2691 EXPORT_SYMBOL(scsi_esp_template); 2692 2693 static void esp_get_signalling(struct Scsi_Host *host) 2694 { 2695 struct esp *esp = shost_priv(host); 2696 enum spi_signal_type type; 2697 2698 if (esp->flags & ESP_FLAG_DIFFERENTIAL) 2699 type = SPI_SIGNAL_HVD; 2700 else 2701 type = SPI_SIGNAL_SE; 2702 2703 spi_signalling(host) = type; 2704 } 2705 2706 static void esp_set_offset(struct scsi_target *target, int offset) 2707 { 2708 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2709 struct esp *esp = shost_priv(host); 2710 struct esp_target_data *tp = &esp->target[target->id]; 2711 2712 if (esp->flags & ESP_FLAG_DISABLE_SYNC) 2713 tp->nego_goal_offset = 0; 2714 else 2715 tp->nego_goal_offset = offset; 2716 tp->flags |= ESP_TGT_CHECK_NEGO; 2717 } 2718 2719 static void esp_set_period(struct scsi_target *target, int period) 2720 { 2721 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2722 struct esp *esp = shost_priv(host); 2723 struct esp_target_data *tp = &esp->target[target->id]; 2724 2725 tp->nego_goal_period = period; 2726 tp->flags |= ESP_TGT_CHECK_NEGO; 2727 } 2728 2729 static void esp_set_width(struct scsi_target *target, int width) 2730 { 2731 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2732 struct esp *esp = shost_priv(host); 2733 struct esp_target_data *tp = &esp->target[target->id]; 2734 2735 tp->nego_goal_width = (width ? 1 : 0); 2736 tp->flags |= ESP_TGT_CHECK_NEGO; 2737 } 2738 2739 static struct spi_function_template esp_transport_ops = { 2740 .set_offset = esp_set_offset, 2741 .show_offset = 1, 2742 .set_period = esp_set_period, 2743 .show_period = 1, 2744 .set_width = esp_set_width, 2745 .show_width = 1, 2746 .get_signalling = esp_get_signalling, 2747 }; 2748 2749 static int __init esp_init(void) 2750 { 2751 BUILD_BUG_ON(sizeof(struct scsi_pointer) < 2752 sizeof(struct esp_cmd_priv)); 2753 2754 esp_transport_template = spi_attach_transport(&esp_transport_ops); 2755 if (!esp_transport_template) 2756 return -ENODEV; 2757 2758 return 0; 2759 } 2760 2761 static void __exit esp_exit(void) 2762 { 2763 spi_release_transport(esp_transport_template); 2764 } 2765 2766 MODULE_DESCRIPTION("ESP SCSI driver core"); 2767 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 2768 MODULE_LICENSE("GPL"); 2769 MODULE_VERSION(DRV_VERSION); 2770 2771 module_param(esp_bus_reset_settle, int, 0); 2772 MODULE_PARM_DESC(esp_bus_reset_settle, 2773 "ESP scsi bus reset delay in seconds"); 2774 2775 module_param(esp_debug, int, 0); 2776 MODULE_PARM_DESC(esp_debug, 2777 "ESP bitmapped debugging message enable value:\n" 2778 " 0x00000001 Log interrupt events\n" 2779 " 0x00000002 Log scsi commands\n" 2780 " 0x00000004 Log resets\n" 2781 " 0x00000008 Log message in events\n" 2782 " 0x00000010 Log message out events\n" 2783 " 0x00000020 Log command completion\n" 2784 " 0x00000040 Log disconnects\n" 2785 " 0x00000080 Log data start\n" 2786 " 0x00000100 Log data done\n" 2787 " 0x00000200 Log reconnects\n" 2788 " 0x00000400 Log auto-sense data\n" 2789 ); 2790 2791 module_init(esp_init); 2792 module_exit(esp_exit); 2793