1 // SPDX-License-Identifier: GPL-2.0-only 2 /* esp_scsi.c: ESP SCSI driver. 3 * 4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/delay.h> 11 #include <linux/list.h> 12 #include <linux/completion.h> 13 #include <linux/kallsyms.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/irqreturn.h> 18 19 #include <asm/irq.h> 20 #include <asm/io.h> 21 #include <asm/dma.h> 22 23 #include <scsi/scsi.h> 24 #include <scsi/scsi_host.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_tcq.h> 28 #include <scsi/scsi_dbg.h> 29 #include <scsi/scsi_transport_spi.h> 30 31 #include "esp_scsi.h" 32 33 #define DRV_MODULE_NAME "esp" 34 #define PFX DRV_MODULE_NAME ": " 35 #define DRV_VERSION "2.000" 36 #define DRV_MODULE_RELDATE "April 19, 2007" 37 38 /* SCSI bus reset settle time in seconds. */ 39 static int esp_bus_reset_settle = 3; 40 41 static u32 esp_debug; 42 #define ESP_DEBUG_INTR 0x00000001 43 #define ESP_DEBUG_SCSICMD 0x00000002 44 #define ESP_DEBUG_RESET 0x00000004 45 #define ESP_DEBUG_MSGIN 0x00000008 46 #define ESP_DEBUG_MSGOUT 0x00000010 47 #define ESP_DEBUG_CMDDONE 0x00000020 48 #define ESP_DEBUG_DISCONNECT 0x00000040 49 #define ESP_DEBUG_DATASTART 0x00000080 50 #define ESP_DEBUG_DATADONE 0x00000100 51 #define ESP_DEBUG_RECONNECT 0x00000200 52 #define ESP_DEBUG_AUTOSENSE 0x00000400 53 #define ESP_DEBUG_EVENT 0x00000800 54 #define ESP_DEBUG_COMMAND 0x00001000 55 56 #define esp_log_intr(f, a...) \ 57 do { if (esp_debug & ESP_DEBUG_INTR) \ 58 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 59 } while (0) 60 61 #define esp_log_reset(f, a...) \ 62 do { if (esp_debug & ESP_DEBUG_RESET) \ 63 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 64 } while (0) 65 66 #define esp_log_msgin(f, a...) \ 67 do { if (esp_debug & ESP_DEBUG_MSGIN) \ 68 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 69 } while (0) 70 71 #define esp_log_msgout(f, a...) \ 72 do { if (esp_debug & ESP_DEBUG_MSGOUT) \ 73 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 74 } while (0) 75 76 #define esp_log_cmddone(f, a...) \ 77 do { if (esp_debug & ESP_DEBUG_CMDDONE) \ 78 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 79 } while (0) 80 81 #define esp_log_disconnect(f, a...) \ 82 do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ 83 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 84 } while (0) 85 86 #define esp_log_datastart(f, a...) \ 87 do { if (esp_debug & ESP_DEBUG_DATASTART) \ 88 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 89 } while (0) 90 91 #define esp_log_datadone(f, a...) \ 92 do { if (esp_debug & ESP_DEBUG_DATADONE) \ 93 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 94 } while (0) 95 96 #define esp_log_reconnect(f, a...) \ 97 do { if (esp_debug & ESP_DEBUG_RECONNECT) \ 98 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 99 } while (0) 100 101 #define esp_log_autosense(f, a...) \ 102 do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ 103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 104 } while (0) 105 106 #define esp_log_event(f, a...) \ 107 do { if (esp_debug & ESP_DEBUG_EVENT) \ 108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 109 } while (0) 110 111 #define esp_log_command(f, a...) \ 112 do { if (esp_debug & ESP_DEBUG_COMMAND) \ 113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \ 114 } while (0) 115 116 #define esp_read8(REG) esp->ops->esp_read8(esp, REG) 117 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) 118 119 static void esp_log_fill_regs(struct esp *esp, 120 struct esp_event_ent *p) 121 { 122 p->sreg = esp->sreg; 123 p->seqreg = esp->seqreg; 124 p->sreg2 = esp->sreg2; 125 p->ireg = esp->ireg; 126 p->select_state = esp->select_state; 127 p->event = esp->event; 128 } 129 130 void scsi_esp_cmd(struct esp *esp, u8 val) 131 { 132 struct esp_event_ent *p; 133 int idx = esp->esp_event_cur; 134 135 p = &esp->esp_event_log[idx]; 136 p->type = ESP_EVENT_TYPE_CMD; 137 p->val = val; 138 esp_log_fill_regs(esp, p); 139 140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 141 142 esp_log_command("cmd[%02x]\n", val); 143 esp_write8(val, ESP_CMD); 144 } 145 EXPORT_SYMBOL(scsi_esp_cmd); 146 147 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd) 148 { 149 if (esp->flags & ESP_FLAG_USE_FIFO) { 150 int i; 151 152 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 153 for (i = 0; i < len; i++) 154 esp_write8(esp->command_block[i], ESP_FDATA); 155 scsi_esp_cmd(esp, cmd); 156 } else { 157 if (esp->rev == FASHME) 158 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 159 cmd |= ESP_CMD_DMA; 160 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 161 len, max_len, 0, cmd); 162 } 163 } 164 165 static void esp_event(struct esp *esp, u8 val) 166 { 167 struct esp_event_ent *p; 168 int idx = esp->esp_event_cur; 169 170 p = &esp->esp_event_log[idx]; 171 p->type = ESP_EVENT_TYPE_EVENT; 172 p->val = val; 173 esp_log_fill_regs(esp, p); 174 175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 176 177 esp->event = val; 178 } 179 180 static void esp_dump_cmd_log(struct esp *esp) 181 { 182 int idx = esp->esp_event_cur; 183 int stop = idx; 184 185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n"); 186 do { 187 struct esp_event_ent *p = &esp->esp_event_log[idx]; 188 189 shost_printk(KERN_INFO, esp->host, 190 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] " 191 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", 192 idx, 193 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT", 194 p->val, p->sreg, p->seqreg, 195 p->sreg2, p->ireg, p->select_state, p->event); 196 197 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 198 } while (idx != stop); 199 } 200 201 static void esp_flush_fifo(struct esp *esp) 202 { 203 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 204 if (esp->rev == ESP236) { 205 int lim = 1000; 206 207 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { 208 if (--lim == 0) { 209 shost_printk(KERN_ALERT, esp->host, 210 "ESP_FF_BYTES will not clear!\n"); 211 break; 212 } 213 udelay(1); 214 } 215 } 216 } 217 218 static void hme_read_fifo(struct esp *esp) 219 { 220 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 221 int idx = 0; 222 223 while (fcnt--) { 224 esp->fifo[idx++] = esp_read8(ESP_FDATA); 225 esp->fifo[idx++] = esp_read8(ESP_FDATA); 226 } 227 if (esp->sreg2 & ESP_STAT2_F1BYTE) { 228 esp_write8(0, ESP_FDATA); 229 esp->fifo[idx++] = esp_read8(ESP_FDATA); 230 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 231 } 232 esp->fifo_cnt = idx; 233 } 234 235 static void esp_set_all_config3(struct esp *esp, u8 val) 236 { 237 int i; 238 239 for (i = 0; i < ESP_MAX_TARGET; i++) 240 esp->target[i].esp_config3 = val; 241 } 242 243 /* Reset the ESP chip, _not_ the SCSI bus. */ 244 static void esp_reset_esp(struct esp *esp) 245 { 246 /* Now reset the ESP chip */ 247 scsi_esp_cmd(esp, ESP_CMD_RC); 248 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 249 if (esp->rev == FAST) 250 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2); 251 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 252 253 /* This is the only point at which it is reliable to read 254 * the ID-code for a fast ESP chip variants. 255 */ 256 esp->max_period = ((35 * esp->ccycle) / 1000); 257 if (esp->rev == FAST) { 258 u8 family_code = ESP_FAMILY(esp_read8(ESP_UID)); 259 260 if (family_code == ESP_UID_F236) { 261 esp->rev = FAS236; 262 } else if (family_code == ESP_UID_HME) { 263 esp->rev = FASHME; /* Version is usually '5'. */ 264 } else if (family_code == ESP_UID_FSC) { 265 esp->rev = FSC; 266 /* Enable Active Negation */ 267 esp_write8(ESP_CONFIG4_RADE, ESP_CFG4); 268 } else { 269 esp->rev = FAS100A; 270 } 271 esp->min_period = ((4 * esp->ccycle) / 1000); 272 } else { 273 esp->min_period = ((5 * esp->ccycle) / 1000); 274 } 275 if (esp->rev == FAS236) { 276 /* 277 * The AM53c974 chip returns the same ID as FAS236; 278 * try to configure glitch eater. 279 */ 280 u8 config4 = ESP_CONFIG4_GE1; 281 esp_write8(config4, ESP_CFG4); 282 config4 = esp_read8(ESP_CFG4); 283 if (config4 & ESP_CONFIG4_GE1) { 284 esp->rev = PCSCSI; 285 esp_write8(esp->config4, ESP_CFG4); 286 } 287 } 288 esp->max_period = (esp->max_period + 3)>>2; 289 esp->min_period = (esp->min_period + 3)>>2; 290 291 esp_write8(esp->config1, ESP_CFG1); 292 switch (esp->rev) { 293 case ESP100: 294 /* nothing to do */ 295 break; 296 297 case ESP100A: 298 esp_write8(esp->config2, ESP_CFG2); 299 break; 300 301 case ESP236: 302 /* Slow 236 */ 303 esp_write8(esp->config2, ESP_CFG2); 304 esp->prev_cfg3 = esp->target[0].esp_config3; 305 esp_write8(esp->prev_cfg3, ESP_CFG3); 306 break; 307 308 case FASHME: 309 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); 310 /* fallthrough... */ 311 312 case FAS236: 313 case PCSCSI: 314 case FSC: 315 esp_write8(esp->config2, ESP_CFG2); 316 if (esp->rev == FASHME) { 317 u8 cfg3 = esp->target[0].esp_config3; 318 319 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; 320 if (esp->scsi_id >= 8) 321 cfg3 |= ESP_CONFIG3_IDBIT3; 322 esp_set_all_config3(esp, cfg3); 323 } else { 324 u32 cfg3 = esp->target[0].esp_config3; 325 326 cfg3 |= ESP_CONFIG3_FCLK; 327 esp_set_all_config3(esp, cfg3); 328 } 329 esp->prev_cfg3 = esp->target[0].esp_config3; 330 esp_write8(esp->prev_cfg3, ESP_CFG3); 331 if (esp->rev == FASHME) { 332 esp->radelay = 80; 333 } else { 334 if (esp->flags & ESP_FLAG_DIFFERENTIAL) 335 esp->radelay = 0; 336 else 337 esp->radelay = 96; 338 } 339 break; 340 341 case FAS100A: 342 /* Fast 100a */ 343 esp_write8(esp->config2, ESP_CFG2); 344 esp_set_all_config3(esp, 345 (esp->target[0].esp_config3 | 346 ESP_CONFIG3_FCLOCK)); 347 esp->prev_cfg3 = esp->target[0].esp_config3; 348 esp_write8(esp->prev_cfg3, ESP_CFG3); 349 esp->radelay = 32; 350 break; 351 352 default: 353 break; 354 } 355 356 /* Reload the configuration registers */ 357 esp_write8(esp->cfact, ESP_CFACT); 358 359 esp->prev_stp = 0; 360 esp_write8(esp->prev_stp, ESP_STP); 361 362 esp->prev_soff = 0; 363 esp_write8(esp->prev_soff, ESP_SOFF); 364 365 esp_write8(esp->neg_defp, ESP_TIMEO); 366 367 /* Eat any bitrot in the chip */ 368 esp_read8(ESP_INTRPT); 369 udelay(100); 370 } 371 372 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) 373 { 374 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 375 struct scatterlist *sg = scsi_sglist(cmd); 376 int total = 0, i; 377 struct scatterlist *s; 378 379 if (cmd->sc_data_direction == DMA_NONE) 380 return; 381 382 if (esp->flags & ESP_FLAG_NO_DMA_MAP) { 383 /* 384 * For pseudo DMA and PIO we need the virtual address instead of 385 * a dma address, so perform an identity mapping. 386 */ 387 spriv->num_sg = scsi_sg_count(cmd); 388 389 scsi_for_each_sg(cmd, s, spriv->num_sg, i) { 390 s->dma_address = (uintptr_t)sg_virt(s); 391 total += sg_dma_len(s); 392 } 393 } else { 394 spriv->num_sg = scsi_dma_map(cmd); 395 scsi_for_each_sg(cmd, s, spriv->num_sg, i) 396 total += sg_dma_len(s); 397 } 398 spriv->cur_residue = sg_dma_len(sg); 399 spriv->prv_sg = NULL; 400 spriv->cur_sg = sg; 401 spriv->tot_residue = total; 402 } 403 404 static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, 405 struct scsi_cmnd *cmd) 406 { 407 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 408 409 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 410 return ent->sense_dma + 411 (ent->sense_ptr - cmd->sense_buffer); 412 } 413 414 return sg_dma_address(p->cur_sg) + 415 (sg_dma_len(p->cur_sg) - 416 p->cur_residue); 417 } 418 419 static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, 420 struct scsi_cmnd *cmd) 421 { 422 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 423 424 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 425 return SCSI_SENSE_BUFFERSIZE - 426 (ent->sense_ptr - cmd->sense_buffer); 427 } 428 return p->cur_residue; 429 } 430 431 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, 432 struct scsi_cmnd *cmd, unsigned int len) 433 { 434 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 435 436 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 437 ent->sense_ptr += len; 438 return; 439 } 440 441 p->cur_residue -= len; 442 p->tot_residue -= len; 443 if (p->cur_residue < 0 || p->tot_residue < 0) { 444 shost_printk(KERN_ERR, esp->host, 445 "Data transfer overflow.\n"); 446 shost_printk(KERN_ERR, esp->host, 447 "cur_residue[%d] tot_residue[%d] len[%u]\n", 448 p->cur_residue, p->tot_residue, len); 449 p->cur_residue = 0; 450 p->tot_residue = 0; 451 } 452 if (!p->cur_residue && p->tot_residue) { 453 p->prv_sg = p->cur_sg; 454 p->cur_sg = sg_next(p->cur_sg); 455 p->cur_residue = sg_dma_len(p->cur_sg); 456 } 457 } 458 459 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) 460 { 461 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) 462 scsi_dma_unmap(cmd); 463 } 464 465 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) 466 { 467 struct scsi_cmnd *cmd = ent->cmd; 468 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 469 470 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 471 ent->saved_sense_ptr = ent->sense_ptr; 472 return; 473 } 474 ent->saved_cur_residue = spriv->cur_residue; 475 ent->saved_prv_sg = spriv->prv_sg; 476 ent->saved_cur_sg = spriv->cur_sg; 477 ent->saved_tot_residue = spriv->tot_residue; 478 } 479 480 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) 481 { 482 struct scsi_cmnd *cmd = ent->cmd; 483 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 484 485 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 486 ent->sense_ptr = ent->saved_sense_ptr; 487 return; 488 } 489 spriv->cur_residue = ent->saved_cur_residue; 490 spriv->prv_sg = ent->saved_prv_sg; 491 spriv->cur_sg = ent->saved_cur_sg; 492 spriv->tot_residue = ent->saved_tot_residue; 493 } 494 495 static void esp_write_tgt_config3(struct esp *esp, int tgt) 496 { 497 if (esp->rev > ESP100A) { 498 u8 val = esp->target[tgt].esp_config3; 499 500 if (val != esp->prev_cfg3) { 501 esp->prev_cfg3 = val; 502 esp_write8(val, ESP_CFG3); 503 } 504 } 505 } 506 507 static void esp_write_tgt_sync(struct esp *esp, int tgt) 508 { 509 u8 off = esp->target[tgt].esp_offset; 510 u8 per = esp->target[tgt].esp_period; 511 512 if (off != esp->prev_soff) { 513 esp->prev_soff = off; 514 esp_write8(off, ESP_SOFF); 515 } 516 if (per != esp->prev_stp) { 517 esp->prev_stp = per; 518 esp_write8(per, ESP_STP); 519 } 520 } 521 522 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) 523 { 524 if (esp->rev == FASHME) { 525 /* Arbitrary segment boundaries, 24-bit counts. */ 526 if (dma_len > (1U << 24)) 527 dma_len = (1U << 24); 528 } else { 529 u32 base, end; 530 531 /* ESP chip limits other variants by 16-bits of transfer 532 * count. Actually on FAS100A and FAS236 we could get 533 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB 534 * in the ESP_CFG2 register but that causes other unwanted 535 * changes so we don't use it currently. 536 */ 537 if (dma_len > (1U << 16)) 538 dma_len = (1U << 16); 539 540 /* All of the DMA variants hooked up to these chips 541 * cannot handle crossing a 24-bit address boundary. 542 */ 543 base = dma_addr & ((1U << 24) - 1U); 544 end = base + dma_len; 545 if (end > (1U << 24)) 546 end = (1U <<24); 547 dma_len = end - base; 548 } 549 return dma_len; 550 } 551 552 static int esp_need_to_nego_wide(struct esp_target_data *tp) 553 { 554 struct scsi_target *target = tp->starget; 555 556 return spi_width(target) != tp->nego_goal_width; 557 } 558 559 static int esp_need_to_nego_sync(struct esp_target_data *tp) 560 { 561 struct scsi_target *target = tp->starget; 562 563 /* When offset is zero, period is "don't care". */ 564 if (!spi_offset(target) && !tp->nego_goal_offset) 565 return 0; 566 567 if (spi_offset(target) == tp->nego_goal_offset && 568 spi_period(target) == tp->nego_goal_period) 569 return 0; 570 571 return 1; 572 } 573 574 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, 575 struct esp_lun_data *lp) 576 { 577 if (!ent->orig_tag[0]) { 578 /* Non-tagged, slot already taken? */ 579 if (lp->non_tagged_cmd) 580 return -EBUSY; 581 582 if (lp->hold) { 583 /* We are being held by active tagged 584 * commands. 585 */ 586 if (lp->num_tagged) 587 return -EBUSY; 588 589 /* Tagged commands completed, we can unplug 590 * the queue and run this untagged command. 591 */ 592 lp->hold = 0; 593 } else if (lp->num_tagged) { 594 /* Plug the queue until num_tagged decreases 595 * to zero in esp_free_lun_tag. 596 */ 597 lp->hold = 1; 598 return -EBUSY; 599 } 600 601 lp->non_tagged_cmd = ent; 602 return 0; 603 } 604 605 /* Tagged command. Check that it isn't blocked by a non-tagged one. */ 606 if (lp->non_tagged_cmd || lp->hold) 607 return -EBUSY; 608 609 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); 610 611 lp->tagged_cmds[ent->orig_tag[1]] = ent; 612 lp->num_tagged++; 613 614 return 0; 615 } 616 617 static void esp_free_lun_tag(struct esp_cmd_entry *ent, 618 struct esp_lun_data *lp) 619 { 620 if (ent->orig_tag[0]) { 621 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); 622 lp->tagged_cmds[ent->orig_tag[1]] = NULL; 623 lp->num_tagged--; 624 } else { 625 BUG_ON(lp->non_tagged_cmd != ent); 626 lp->non_tagged_cmd = NULL; 627 } 628 } 629 630 static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent) 631 { 632 ent->sense_ptr = ent->cmd->sense_buffer; 633 if (esp->flags & ESP_FLAG_NO_DMA_MAP) { 634 ent->sense_dma = (uintptr_t)ent->sense_ptr; 635 return; 636 } 637 638 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr, 639 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 640 } 641 642 static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent) 643 { 644 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) 645 dma_unmap_single(esp->dev, ent->sense_dma, 646 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 647 ent->sense_ptr = NULL; 648 } 649 650 /* When a contingent allegiance conditon is created, we force feed a 651 * REQUEST_SENSE command to the device to fetch the sense data. I 652 * tried many other schemes, relying on the scsi error handling layer 653 * to send out the REQUEST_SENSE automatically, but this was difficult 654 * to get right especially in the presence of applications like smartd 655 * which use SG_IO to send out their own REQUEST_SENSE commands. 656 */ 657 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) 658 { 659 struct scsi_cmnd *cmd = ent->cmd; 660 struct scsi_device *dev = cmd->device; 661 int tgt, lun; 662 u8 *p, val; 663 664 tgt = dev->id; 665 lun = dev->lun; 666 667 668 if (!ent->sense_ptr) { 669 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", 670 tgt, lun); 671 esp_map_sense(esp, ent); 672 } 673 ent->saved_sense_ptr = ent->sense_ptr; 674 675 esp->active_cmd = ent; 676 677 p = esp->command_block; 678 esp->msg_out_len = 0; 679 680 *p++ = IDENTIFY(0, lun); 681 *p++ = REQUEST_SENSE; 682 *p++ = ((dev->scsi_level <= SCSI_2) ? 683 (lun << 5) : 0); 684 *p++ = 0; 685 *p++ = 0; 686 *p++ = SCSI_SENSE_BUFFERSIZE; 687 *p++ = 0; 688 689 esp->select_state = ESP_SELECT_BASIC; 690 691 val = tgt; 692 if (esp->rev == FASHME) 693 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; 694 esp_write8(val, ESP_BUSID); 695 696 esp_write_tgt_sync(esp, tgt); 697 esp_write_tgt_config3(esp, tgt); 698 699 val = (p - esp->command_block); 700 701 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA); 702 } 703 704 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) 705 { 706 struct esp_cmd_entry *ent; 707 708 list_for_each_entry(ent, &esp->queued_cmds, list) { 709 struct scsi_cmnd *cmd = ent->cmd; 710 struct scsi_device *dev = cmd->device; 711 struct esp_lun_data *lp = dev->hostdata; 712 713 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 714 ent->tag[0] = 0; 715 ent->tag[1] = 0; 716 return ent; 717 } 718 719 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) { 720 ent->tag[0] = 0; 721 ent->tag[1] = 0; 722 } 723 ent->orig_tag[0] = ent->tag[0]; 724 ent->orig_tag[1] = ent->tag[1]; 725 726 if (esp_alloc_lun_tag(ent, lp) < 0) 727 continue; 728 729 return ent; 730 } 731 732 return NULL; 733 } 734 735 static void esp_maybe_execute_command(struct esp *esp) 736 { 737 struct esp_target_data *tp; 738 struct scsi_device *dev; 739 struct scsi_cmnd *cmd; 740 struct esp_cmd_entry *ent; 741 bool select_and_stop = false; 742 int tgt, lun, i; 743 u32 val, start_cmd; 744 u8 *p; 745 746 if (esp->active_cmd || 747 (esp->flags & ESP_FLAG_RESETTING)) 748 return; 749 750 ent = find_and_prep_issuable_command(esp); 751 if (!ent) 752 return; 753 754 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 755 esp_autosense(esp, ent); 756 return; 757 } 758 759 cmd = ent->cmd; 760 dev = cmd->device; 761 tgt = dev->id; 762 lun = dev->lun; 763 tp = &esp->target[tgt]; 764 765 list_move(&ent->list, &esp->active_cmds); 766 767 esp->active_cmd = ent; 768 769 esp_map_dma(esp, cmd); 770 esp_save_pointers(esp, ent); 771 772 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12)) 773 select_and_stop = true; 774 775 p = esp->command_block; 776 777 esp->msg_out_len = 0; 778 if (tp->flags & ESP_TGT_CHECK_NEGO) { 779 /* Need to negotiate. If the target is broken 780 * go for synchronous transfers and non-wide. 781 */ 782 if (tp->flags & ESP_TGT_BROKEN) { 783 tp->flags &= ~ESP_TGT_DISCONNECT; 784 tp->nego_goal_period = 0; 785 tp->nego_goal_offset = 0; 786 tp->nego_goal_width = 0; 787 tp->nego_goal_tags = 0; 788 } 789 790 /* If the settings are not changing, skip this. */ 791 if (spi_width(tp->starget) == tp->nego_goal_width && 792 spi_period(tp->starget) == tp->nego_goal_period && 793 spi_offset(tp->starget) == tp->nego_goal_offset) { 794 tp->flags &= ~ESP_TGT_CHECK_NEGO; 795 goto build_identify; 796 } 797 798 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { 799 esp->msg_out_len = 800 spi_populate_width_msg(&esp->msg_out[0], 801 (tp->nego_goal_width ? 802 1 : 0)); 803 tp->flags |= ESP_TGT_NEGO_WIDE; 804 } else if (esp_need_to_nego_sync(tp)) { 805 esp->msg_out_len = 806 spi_populate_sync_msg(&esp->msg_out[0], 807 tp->nego_goal_period, 808 tp->nego_goal_offset); 809 tp->flags |= ESP_TGT_NEGO_SYNC; 810 } else { 811 tp->flags &= ~ESP_TGT_CHECK_NEGO; 812 } 813 814 /* If there are multiple message bytes, use Select and Stop */ 815 if (esp->msg_out_len) 816 select_and_stop = true; 817 } 818 819 build_identify: 820 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun); 821 822 if (ent->tag[0] && esp->rev == ESP100) { 823 /* ESP100 lacks select w/atn3 command, use select 824 * and stop instead. 825 */ 826 select_and_stop = true; 827 } 828 829 if (select_and_stop) { 830 esp->cmd_bytes_left = cmd->cmd_len; 831 esp->cmd_bytes_ptr = &cmd->cmnd[0]; 832 833 if (ent->tag[0]) { 834 for (i = esp->msg_out_len - 1; 835 i >= 0; i--) 836 esp->msg_out[i + 2] = esp->msg_out[i]; 837 esp->msg_out[0] = ent->tag[0]; 838 esp->msg_out[1] = ent->tag[1]; 839 esp->msg_out_len += 2; 840 } 841 842 start_cmd = ESP_CMD_SELAS; 843 esp->select_state = ESP_SELECT_MSGOUT; 844 } else { 845 start_cmd = ESP_CMD_SELA; 846 if (ent->tag[0]) { 847 *p++ = ent->tag[0]; 848 *p++ = ent->tag[1]; 849 850 start_cmd = ESP_CMD_SA3; 851 } 852 853 for (i = 0; i < cmd->cmd_len; i++) 854 *p++ = cmd->cmnd[i]; 855 856 esp->select_state = ESP_SELECT_BASIC; 857 } 858 val = tgt; 859 if (esp->rev == FASHME) 860 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; 861 esp_write8(val, ESP_BUSID); 862 863 esp_write_tgt_sync(esp, tgt); 864 esp_write_tgt_config3(esp, tgt); 865 866 val = (p - esp->command_block); 867 868 if (esp_debug & ESP_DEBUG_SCSICMD) { 869 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); 870 for (i = 0; i < cmd->cmd_len; i++) 871 printk("%02x ", cmd->cmnd[i]); 872 printk("]\n"); 873 } 874 875 esp_send_dma_cmd(esp, val, 16, start_cmd); 876 } 877 878 static struct esp_cmd_entry *esp_get_ent(struct esp *esp) 879 { 880 struct list_head *head = &esp->esp_cmd_pool; 881 struct esp_cmd_entry *ret; 882 883 if (list_empty(head)) { 884 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); 885 } else { 886 ret = list_entry(head->next, struct esp_cmd_entry, list); 887 list_del(&ret->list); 888 memset(ret, 0, sizeof(*ret)); 889 } 890 return ret; 891 } 892 893 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) 894 { 895 list_add(&ent->list, &esp->esp_cmd_pool); 896 } 897 898 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, 899 struct scsi_cmnd *cmd, unsigned int result) 900 { 901 struct scsi_device *dev = cmd->device; 902 int tgt = dev->id; 903 int lun = dev->lun; 904 905 esp->active_cmd = NULL; 906 esp_unmap_dma(esp, cmd); 907 esp_free_lun_tag(ent, dev->hostdata); 908 cmd->result = result; 909 910 if (ent->eh_done) { 911 complete(ent->eh_done); 912 ent->eh_done = NULL; 913 } 914 915 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 916 esp_unmap_sense(esp, ent); 917 918 /* Restore the message/status bytes to what we actually 919 * saw originally. Also, report that we are providing 920 * the sense data. 921 */ 922 cmd->result = ((DRIVER_SENSE << 24) | 923 (DID_OK << 16) | 924 (COMMAND_COMPLETE << 8) | 925 (SAM_STAT_CHECK_CONDITION << 0)); 926 927 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; 928 if (esp_debug & ESP_DEBUG_AUTOSENSE) { 929 int i; 930 931 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", 932 esp->host->unique_id, tgt, lun); 933 for (i = 0; i < 18; i++) 934 printk("%02x ", cmd->sense_buffer[i]); 935 printk("]\n"); 936 } 937 } 938 939 cmd->scsi_done(cmd); 940 941 list_del(&ent->list); 942 esp_put_ent(esp, ent); 943 944 esp_maybe_execute_command(esp); 945 } 946 947 static unsigned int compose_result(unsigned int status, unsigned int message, 948 unsigned int driver_code) 949 { 950 return (status | (message << 8) | (driver_code << 16)); 951 } 952 953 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) 954 { 955 struct scsi_device *dev = ent->cmd->device; 956 struct esp_lun_data *lp = dev->hostdata; 957 958 scsi_track_queue_full(dev, lp->num_tagged - 1); 959 } 960 961 static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 962 { 963 struct scsi_device *dev = cmd->device; 964 struct esp *esp = shost_priv(dev->host); 965 struct esp_cmd_priv *spriv; 966 struct esp_cmd_entry *ent; 967 968 ent = esp_get_ent(esp); 969 if (!ent) 970 return SCSI_MLQUEUE_HOST_BUSY; 971 972 ent->cmd = cmd; 973 974 cmd->scsi_done = done; 975 976 spriv = ESP_CMD_PRIV(cmd); 977 spriv->num_sg = 0; 978 979 list_add_tail(&ent->list, &esp->queued_cmds); 980 981 esp_maybe_execute_command(esp); 982 983 return 0; 984 } 985 986 static DEF_SCSI_QCMD(esp_queuecommand) 987 988 static int esp_check_gross_error(struct esp *esp) 989 { 990 if (esp->sreg & ESP_STAT_SPAM) { 991 /* Gross Error, could be one of: 992 * - top of fifo overwritten 993 * - top of command register overwritten 994 * - DMA programmed with wrong direction 995 * - improper phase change 996 */ 997 shost_printk(KERN_ERR, esp->host, 998 "Gross error sreg[%02x]\n", esp->sreg); 999 /* XXX Reset the chip. XXX */ 1000 return 1; 1001 } 1002 return 0; 1003 } 1004 1005 static int esp_check_spur_intr(struct esp *esp) 1006 { 1007 switch (esp->rev) { 1008 case ESP100: 1009 case ESP100A: 1010 /* The interrupt pending bit of the status register cannot 1011 * be trusted on these revisions. 1012 */ 1013 esp->sreg &= ~ESP_STAT_INTR; 1014 break; 1015 1016 default: 1017 if (!(esp->sreg & ESP_STAT_INTR)) { 1018 if (esp->ireg & ESP_INTR_SR) 1019 return 1; 1020 1021 /* If the DMA is indicating interrupt pending and the 1022 * ESP is not, the only possibility is a DMA error. 1023 */ 1024 if (!esp->ops->dma_error(esp)) { 1025 shost_printk(KERN_ERR, esp->host, 1026 "Spurious irq, sreg=%02x.\n", 1027 esp->sreg); 1028 return -1; 1029 } 1030 1031 shost_printk(KERN_ERR, esp->host, "DMA error\n"); 1032 1033 /* XXX Reset the chip. XXX */ 1034 return -1; 1035 } 1036 break; 1037 } 1038 1039 return 0; 1040 } 1041 1042 static void esp_schedule_reset(struct esp *esp) 1043 { 1044 esp_log_reset("esp_schedule_reset() from %ps\n", 1045 __builtin_return_address(0)); 1046 esp->flags |= ESP_FLAG_RESETTING; 1047 esp_event(esp, ESP_EVENT_RESET); 1048 } 1049 1050 /* In order to avoid having to add a special half-reconnected state 1051 * into the driver we just sit here and poll through the rest of 1052 * the reselection process to get the tag message bytes. 1053 */ 1054 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, 1055 struct esp_lun_data *lp) 1056 { 1057 struct esp_cmd_entry *ent; 1058 int i; 1059 1060 if (!lp->num_tagged) { 1061 shost_printk(KERN_ERR, esp->host, 1062 "Reconnect w/num_tagged==0\n"); 1063 return NULL; 1064 } 1065 1066 esp_log_reconnect("reconnect tag, "); 1067 1068 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 1069 if (esp->ops->irq_pending(esp)) 1070 break; 1071 } 1072 if (i == ESP_QUICKIRQ_LIMIT) { 1073 shost_printk(KERN_ERR, esp->host, 1074 "Reconnect IRQ1 timeout\n"); 1075 return NULL; 1076 } 1077 1078 esp->sreg = esp_read8(ESP_STATUS); 1079 esp->ireg = esp_read8(ESP_INTRPT); 1080 1081 esp_log_reconnect("IRQ(%d:%x:%x), ", 1082 i, esp->ireg, esp->sreg); 1083 1084 if (esp->ireg & ESP_INTR_DC) { 1085 shost_printk(KERN_ERR, esp->host, 1086 "Reconnect, got disconnect.\n"); 1087 return NULL; 1088 } 1089 1090 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { 1091 shost_printk(KERN_ERR, esp->host, 1092 "Reconnect, not MIP sreg[%02x].\n", esp->sreg); 1093 return NULL; 1094 } 1095 1096 /* DMA in the tag bytes... */ 1097 esp->command_block[0] = 0xff; 1098 esp->command_block[1] = 0xff; 1099 esp->ops->send_dma_cmd(esp, esp->command_block_dma, 1100 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); 1101 1102 /* ACK the message. */ 1103 scsi_esp_cmd(esp, ESP_CMD_MOK); 1104 1105 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { 1106 if (esp->ops->irq_pending(esp)) { 1107 esp->sreg = esp_read8(ESP_STATUS); 1108 esp->ireg = esp_read8(ESP_INTRPT); 1109 if (esp->ireg & ESP_INTR_FDONE) 1110 break; 1111 } 1112 udelay(1); 1113 } 1114 if (i == ESP_RESELECT_TAG_LIMIT) { 1115 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n"); 1116 return NULL; 1117 } 1118 esp->ops->dma_drain(esp); 1119 esp->ops->dma_invalidate(esp); 1120 1121 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", 1122 i, esp->ireg, esp->sreg, 1123 esp->command_block[0], 1124 esp->command_block[1]); 1125 1126 if (esp->command_block[0] < SIMPLE_QUEUE_TAG || 1127 esp->command_block[0] > ORDERED_QUEUE_TAG) { 1128 shost_printk(KERN_ERR, esp->host, 1129 "Reconnect, bad tag type %02x.\n", 1130 esp->command_block[0]); 1131 return NULL; 1132 } 1133 1134 ent = lp->tagged_cmds[esp->command_block[1]]; 1135 if (!ent) { 1136 shost_printk(KERN_ERR, esp->host, 1137 "Reconnect, no entry for tag %02x.\n", 1138 esp->command_block[1]); 1139 return NULL; 1140 } 1141 1142 return ent; 1143 } 1144 1145 static int esp_reconnect(struct esp *esp) 1146 { 1147 struct esp_cmd_entry *ent; 1148 struct esp_target_data *tp; 1149 struct esp_lun_data *lp; 1150 struct scsi_device *dev; 1151 int target, lun; 1152 1153 BUG_ON(esp->active_cmd); 1154 if (esp->rev == FASHME) { 1155 /* FASHME puts the target and lun numbers directly 1156 * into the fifo. 1157 */ 1158 target = esp->fifo[0]; 1159 lun = esp->fifo[1] & 0x7; 1160 } else { 1161 u8 bits = esp_read8(ESP_FDATA); 1162 1163 /* Older chips put the lun directly into the fifo, but 1164 * the target is given as a sample of the arbitration 1165 * lines on the bus at reselection time. So we should 1166 * see the ID of the ESP and the one reconnecting target 1167 * set in the bitmap. 1168 */ 1169 if (!(bits & esp->scsi_id_mask)) 1170 goto do_reset; 1171 bits &= ~esp->scsi_id_mask; 1172 if (!bits || (bits & (bits - 1))) 1173 goto do_reset; 1174 1175 target = ffs(bits) - 1; 1176 lun = (esp_read8(ESP_FDATA) & 0x7); 1177 1178 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1179 if (esp->rev == ESP100) { 1180 u8 ireg = esp_read8(ESP_INTRPT); 1181 /* This chip has a bug during reselection that can 1182 * cause a spurious illegal-command interrupt, which 1183 * we simply ACK here. Another possibility is a bus 1184 * reset so we must check for that. 1185 */ 1186 if (ireg & ESP_INTR_SR) 1187 goto do_reset; 1188 } 1189 scsi_esp_cmd(esp, ESP_CMD_NULL); 1190 } 1191 1192 esp_write_tgt_sync(esp, target); 1193 esp_write_tgt_config3(esp, target); 1194 1195 scsi_esp_cmd(esp, ESP_CMD_MOK); 1196 1197 if (esp->rev == FASHME) 1198 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, 1199 ESP_BUSID); 1200 1201 tp = &esp->target[target]; 1202 dev = __scsi_device_lookup_by_target(tp->starget, lun); 1203 if (!dev) { 1204 shost_printk(KERN_ERR, esp->host, 1205 "Reconnect, no lp tgt[%u] lun[%u]\n", 1206 target, lun); 1207 goto do_reset; 1208 } 1209 lp = dev->hostdata; 1210 1211 ent = lp->non_tagged_cmd; 1212 if (!ent) { 1213 ent = esp_reconnect_with_tag(esp, lp); 1214 if (!ent) 1215 goto do_reset; 1216 } 1217 1218 esp->active_cmd = ent; 1219 1220 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1221 esp_restore_pointers(esp, ent); 1222 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1223 return 1; 1224 1225 do_reset: 1226 esp_schedule_reset(esp); 1227 return 0; 1228 } 1229 1230 static int esp_finish_select(struct esp *esp) 1231 { 1232 struct esp_cmd_entry *ent; 1233 struct scsi_cmnd *cmd; 1234 1235 /* No longer selecting. */ 1236 esp->select_state = ESP_SELECT_NONE; 1237 1238 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; 1239 ent = esp->active_cmd; 1240 cmd = ent->cmd; 1241 1242 if (esp->ops->dma_error(esp)) { 1243 /* If we see a DMA error during or as a result of selection, 1244 * all bets are off. 1245 */ 1246 esp_schedule_reset(esp); 1247 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); 1248 return 0; 1249 } 1250 1251 esp->ops->dma_invalidate(esp); 1252 1253 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { 1254 struct esp_target_data *tp = &esp->target[cmd->device->id]; 1255 1256 /* Carefully back out of the selection attempt. Release 1257 * resources (such as DMA mapping & TAG) and reset state (such 1258 * as message out and command delivery variables). 1259 */ 1260 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { 1261 esp_unmap_dma(esp, cmd); 1262 esp_free_lun_tag(ent, cmd->device->hostdata); 1263 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); 1264 esp->cmd_bytes_ptr = NULL; 1265 esp->cmd_bytes_left = 0; 1266 } else { 1267 esp_unmap_sense(esp, ent); 1268 } 1269 1270 /* Now that the state is unwound properly, put back onto 1271 * the issue queue. This command is no longer active. 1272 */ 1273 list_move(&ent->list, &esp->queued_cmds); 1274 esp->active_cmd = NULL; 1275 1276 /* Return value ignored by caller, it directly invokes 1277 * esp_reconnect(). 1278 */ 1279 return 0; 1280 } 1281 1282 if (esp->ireg == ESP_INTR_DC) { 1283 struct scsi_device *dev = cmd->device; 1284 1285 /* Disconnect. Make sure we re-negotiate sync and 1286 * wide parameters if this target starts responding 1287 * again in the future. 1288 */ 1289 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; 1290 1291 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1292 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); 1293 return 1; 1294 } 1295 1296 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { 1297 /* Selection successful. On pre-FAST chips we have 1298 * to do a NOP and possibly clean out the FIFO. 1299 */ 1300 if (esp->rev <= ESP236) { 1301 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 1302 1303 scsi_esp_cmd(esp, ESP_CMD_NULL); 1304 1305 if (!fcnt && 1306 (!esp->prev_soff || 1307 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) 1308 esp_flush_fifo(esp); 1309 } 1310 1311 /* If we are doing a Select And Stop command, negotiation, etc. 1312 * we'll do the right thing as we transition to the next phase. 1313 */ 1314 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1315 return 0; 1316 } 1317 1318 shost_printk(KERN_INFO, esp->host, 1319 "Unexpected selection completion ireg[%x]\n", esp->ireg); 1320 esp_schedule_reset(esp); 1321 return 0; 1322 } 1323 1324 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, 1325 struct scsi_cmnd *cmd) 1326 { 1327 int fifo_cnt, ecount, bytes_sent, flush_fifo; 1328 1329 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 1330 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) 1331 fifo_cnt <<= 1; 1332 1333 ecount = 0; 1334 if (!(esp->sreg & ESP_STAT_TCNT)) { 1335 ecount = ((unsigned int)esp_read8(ESP_TCLOW) | 1336 (((unsigned int)esp_read8(ESP_TCMED)) << 8)); 1337 if (esp->rev == FASHME) 1338 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; 1339 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB)) 1340 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16; 1341 } 1342 1343 bytes_sent = esp->data_dma_len; 1344 bytes_sent -= ecount; 1345 bytes_sent -= esp->send_cmd_residual; 1346 1347 /* 1348 * The am53c974 has a DMA 'pecularity'. The doc states: 1349 * In some odd byte conditions, one residual byte will 1350 * be left in the SCSI FIFO, and the FIFO Flags will 1351 * never count to '0 '. When this happens, the residual 1352 * byte should be retrieved via PIO following completion 1353 * of the BLAST operation. 1354 */ 1355 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) { 1356 size_t count = 1; 1357 size_t offset = bytes_sent; 1358 u8 bval = esp_read8(ESP_FDATA); 1359 1360 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) 1361 ent->sense_ptr[bytes_sent] = bval; 1362 else { 1363 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 1364 u8 *ptr; 1365 1366 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg, 1367 &offset, &count); 1368 if (likely(ptr)) { 1369 *(ptr + offset) = bval; 1370 scsi_kunmap_atomic_sg(ptr); 1371 } 1372 } 1373 bytes_sent += fifo_cnt; 1374 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL; 1375 } 1376 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1377 bytes_sent -= fifo_cnt; 1378 1379 flush_fifo = 0; 1380 if (!esp->prev_soff) { 1381 /* Synchronous data transfer, always flush fifo. */ 1382 flush_fifo = 1; 1383 } else { 1384 if (esp->rev == ESP100) { 1385 u32 fflags, phase; 1386 1387 /* ESP100 has a chip bug where in the synchronous data 1388 * phase it can mistake a final long REQ pulse from the 1389 * target as an extra data byte. Fun. 1390 * 1391 * To detect this case we resample the status register 1392 * and fifo flags. If we're still in a data phase and 1393 * we see spurious chunks in the fifo, we return error 1394 * to the caller which should reset and set things up 1395 * such that we only try future transfers to this 1396 * target in synchronous mode. 1397 */ 1398 esp->sreg = esp_read8(ESP_STATUS); 1399 phase = esp->sreg & ESP_STAT_PMASK; 1400 fflags = esp_read8(ESP_FFLAGS); 1401 1402 if ((phase == ESP_DOP && 1403 (fflags & ESP_FF_ONOTZERO)) || 1404 (phase == ESP_DIP && 1405 (fflags & ESP_FF_FBYTES))) 1406 return -1; 1407 } 1408 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1409 flush_fifo = 1; 1410 } 1411 1412 if (flush_fifo) 1413 esp_flush_fifo(esp); 1414 1415 return bytes_sent; 1416 } 1417 1418 static void esp_setsync(struct esp *esp, struct esp_target_data *tp, 1419 u8 scsi_period, u8 scsi_offset, 1420 u8 esp_stp, u8 esp_soff) 1421 { 1422 spi_period(tp->starget) = scsi_period; 1423 spi_offset(tp->starget) = scsi_offset; 1424 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; 1425 1426 if (esp_soff) { 1427 esp_stp &= 0x1f; 1428 esp_soff |= esp->radelay; 1429 if (esp->rev >= FAS236) { 1430 u8 bit = ESP_CONFIG3_FSCSI; 1431 if (esp->rev >= FAS100A) 1432 bit = ESP_CONFIG3_FAST; 1433 1434 if (scsi_period < 50) { 1435 if (esp->rev == FASHME) 1436 esp_soff &= ~esp->radelay; 1437 tp->esp_config3 |= bit; 1438 } else { 1439 tp->esp_config3 &= ~bit; 1440 } 1441 esp->prev_cfg3 = tp->esp_config3; 1442 esp_write8(esp->prev_cfg3, ESP_CFG3); 1443 } 1444 } 1445 1446 tp->esp_period = esp->prev_stp = esp_stp; 1447 tp->esp_offset = esp->prev_soff = esp_soff; 1448 1449 esp_write8(esp_soff, ESP_SOFF); 1450 esp_write8(esp_stp, ESP_STP); 1451 1452 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); 1453 1454 spi_display_xfer_agreement(tp->starget); 1455 } 1456 1457 static void esp_msgin_reject(struct esp *esp) 1458 { 1459 struct esp_cmd_entry *ent = esp->active_cmd; 1460 struct scsi_cmnd *cmd = ent->cmd; 1461 struct esp_target_data *tp; 1462 int tgt; 1463 1464 tgt = cmd->device->id; 1465 tp = &esp->target[tgt]; 1466 1467 if (tp->flags & ESP_TGT_NEGO_WIDE) { 1468 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); 1469 1470 if (!esp_need_to_nego_sync(tp)) { 1471 tp->flags &= ~ESP_TGT_CHECK_NEGO; 1472 scsi_esp_cmd(esp, ESP_CMD_RATN); 1473 } else { 1474 esp->msg_out_len = 1475 spi_populate_sync_msg(&esp->msg_out[0], 1476 tp->nego_goal_period, 1477 tp->nego_goal_offset); 1478 tp->flags |= ESP_TGT_NEGO_SYNC; 1479 scsi_esp_cmd(esp, ESP_CMD_SATN); 1480 } 1481 return; 1482 } 1483 1484 if (tp->flags & ESP_TGT_NEGO_SYNC) { 1485 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); 1486 tp->esp_period = 0; 1487 tp->esp_offset = 0; 1488 esp_setsync(esp, tp, 0, 0, 0, 0); 1489 scsi_esp_cmd(esp, ESP_CMD_RATN); 1490 return; 1491 } 1492 1493 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n"); 1494 esp_schedule_reset(esp); 1495 } 1496 1497 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) 1498 { 1499 u8 period = esp->msg_in[3]; 1500 u8 offset = esp->msg_in[4]; 1501 u8 stp; 1502 1503 if (!(tp->flags & ESP_TGT_NEGO_SYNC)) 1504 goto do_reject; 1505 1506 if (offset > 15) 1507 goto do_reject; 1508 1509 if (offset) { 1510 int one_clock; 1511 1512 if (period > esp->max_period) { 1513 period = offset = 0; 1514 goto do_sdtr; 1515 } 1516 if (period < esp->min_period) 1517 goto do_reject; 1518 1519 one_clock = esp->ccycle / 1000; 1520 stp = DIV_ROUND_UP(period << 2, one_clock); 1521 if (stp && esp->rev >= FAS236) { 1522 if (stp >= 50) 1523 stp--; 1524 } 1525 } else { 1526 stp = 0; 1527 } 1528 1529 esp_setsync(esp, tp, period, offset, stp, offset); 1530 return; 1531 1532 do_reject: 1533 esp->msg_out[0] = MESSAGE_REJECT; 1534 esp->msg_out_len = 1; 1535 scsi_esp_cmd(esp, ESP_CMD_SATN); 1536 return; 1537 1538 do_sdtr: 1539 tp->nego_goal_period = period; 1540 tp->nego_goal_offset = offset; 1541 esp->msg_out_len = 1542 spi_populate_sync_msg(&esp->msg_out[0], 1543 tp->nego_goal_period, 1544 tp->nego_goal_offset); 1545 scsi_esp_cmd(esp, ESP_CMD_SATN); 1546 } 1547 1548 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) 1549 { 1550 int size = 8 << esp->msg_in[3]; 1551 u8 cfg3; 1552 1553 if (esp->rev != FASHME) 1554 goto do_reject; 1555 1556 if (size != 8 && size != 16) 1557 goto do_reject; 1558 1559 if (!(tp->flags & ESP_TGT_NEGO_WIDE)) 1560 goto do_reject; 1561 1562 cfg3 = tp->esp_config3; 1563 if (size == 16) { 1564 tp->flags |= ESP_TGT_WIDE; 1565 cfg3 |= ESP_CONFIG3_EWIDE; 1566 } else { 1567 tp->flags &= ~ESP_TGT_WIDE; 1568 cfg3 &= ~ESP_CONFIG3_EWIDE; 1569 } 1570 tp->esp_config3 = cfg3; 1571 esp->prev_cfg3 = cfg3; 1572 esp_write8(cfg3, ESP_CFG3); 1573 1574 tp->flags &= ~ESP_TGT_NEGO_WIDE; 1575 1576 spi_period(tp->starget) = 0; 1577 spi_offset(tp->starget) = 0; 1578 if (!esp_need_to_nego_sync(tp)) { 1579 tp->flags &= ~ESP_TGT_CHECK_NEGO; 1580 scsi_esp_cmd(esp, ESP_CMD_RATN); 1581 } else { 1582 esp->msg_out_len = 1583 spi_populate_sync_msg(&esp->msg_out[0], 1584 tp->nego_goal_period, 1585 tp->nego_goal_offset); 1586 tp->flags |= ESP_TGT_NEGO_SYNC; 1587 scsi_esp_cmd(esp, ESP_CMD_SATN); 1588 } 1589 return; 1590 1591 do_reject: 1592 esp->msg_out[0] = MESSAGE_REJECT; 1593 esp->msg_out_len = 1; 1594 scsi_esp_cmd(esp, ESP_CMD_SATN); 1595 } 1596 1597 static void esp_msgin_extended(struct esp *esp) 1598 { 1599 struct esp_cmd_entry *ent = esp->active_cmd; 1600 struct scsi_cmnd *cmd = ent->cmd; 1601 struct esp_target_data *tp; 1602 int tgt = cmd->device->id; 1603 1604 tp = &esp->target[tgt]; 1605 if (esp->msg_in[2] == EXTENDED_SDTR) { 1606 esp_msgin_sdtr(esp, tp); 1607 return; 1608 } 1609 if (esp->msg_in[2] == EXTENDED_WDTR) { 1610 esp_msgin_wdtr(esp, tp); 1611 return; 1612 } 1613 1614 shost_printk(KERN_INFO, esp->host, 1615 "Unexpected extended msg type %x\n", esp->msg_in[2]); 1616 1617 esp->msg_out[0] = MESSAGE_REJECT; 1618 esp->msg_out_len = 1; 1619 scsi_esp_cmd(esp, ESP_CMD_SATN); 1620 } 1621 1622 /* Analyze msgin bytes received from target so far. Return non-zero 1623 * if there are more bytes needed to complete the message. 1624 */ 1625 static int esp_msgin_process(struct esp *esp) 1626 { 1627 u8 msg0 = esp->msg_in[0]; 1628 int len = esp->msg_in_len; 1629 1630 if (msg0 & 0x80) { 1631 /* Identify */ 1632 shost_printk(KERN_INFO, esp->host, 1633 "Unexpected msgin identify\n"); 1634 return 0; 1635 } 1636 1637 switch (msg0) { 1638 case EXTENDED_MESSAGE: 1639 if (len == 1) 1640 return 1; 1641 if (len < esp->msg_in[1] + 2) 1642 return 1; 1643 esp_msgin_extended(esp); 1644 return 0; 1645 1646 case IGNORE_WIDE_RESIDUE: { 1647 struct esp_cmd_entry *ent; 1648 struct esp_cmd_priv *spriv; 1649 if (len == 1) 1650 return 1; 1651 1652 if (esp->msg_in[1] != 1) 1653 goto do_reject; 1654 1655 ent = esp->active_cmd; 1656 spriv = ESP_CMD_PRIV(ent->cmd); 1657 1658 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { 1659 spriv->cur_sg = spriv->prv_sg; 1660 spriv->cur_residue = 1; 1661 } else 1662 spriv->cur_residue++; 1663 spriv->tot_residue++; 1664 return 0; 1665 } 1666 case NOP: 1667 return 0; 1668 case RESTORE_POINTERS: 1669 esp_restore_pointers(esp, esp->active_cmd); 1670 return 0; 1671 case SAVE_POINTERS: 1672 esp_save_pointers(esp, esp->active_cmd); 1673 return 0; 1674 1675 case COMMAND_COMPLETE: 1676 case DISCONNECT: { 1677 struct esp_cmd_entry *ent = esp->active_cmd; 1678 1679 ent->message = msg0; 1680 esp_event(esp, ESP_EVENT_FREE_BUS); 1681 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1682 return 0; 1683 } 1684 case MESSAGE_REJECT: 1685 esp_msgin_reject(esp); 1686 return 0; 1687 1688 default: 1689 do_reject: 1690 esp->msg_out[0] = MESSAGE_REJECT; 1691 esp->msg_out_len = 1; 1692 scsi_esp_cmd(esp, ESP_CMD_SATN); 1693 return 0; 1694 } 1695 } 1696 1697 static int esp_process_event(struct esp *esp) 1698 { 1699 int write, i; 1700 1701 again: 1702 write = 0; 1703 esp_log_event("process event %d phase %x\n", 1704 esp->event, esp->sreg & ESP_STAT_PMASK); 1705 switch (esp->event) { 1706 case ESP_EVENT_CHECK_PHASE: 1707 switch (esp->sreg & ESP_STAT_PMASK) { 1708 case ESP_DOP: 1709 esp_event(esp, ESP_EVENT_DATA_OUT); 1710 break; 1711 case ESP_DIP: 1712 esp_event(esp, ESP_EVENT_DATA_IN); 1713 break; 1714 case ESP_STATP: 1715 esp_flush_fifo(esp); 1716 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); 1717 esp_event(esp, ESP_EVENT_STATUS); 1718 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1719 return 1; 1720 1721 case ESP_MOP: 1722 esp_event(esp, ESP_EVENT_MSGOUT); 1723 break; 1724 1725 case ESP_MIP: 1726 esp_event(esp, ESP_EVENT_MSGIN); 1727 break; 1728 1729 case ESP_CMDP: 1730 esp_event(esp, ESP_EVENT_CMD_START); 1731 break; 1732 1733 default: 1734 shost_printk(KERN_INFO, esp->host, 1735 "Unexpected phase, sreg=%02x\n", 1736 esp->sreg); 1737 esp_schedule_reset(esp); 1738 return 0; 1739 } 1740 goto again; 1741 1742 case ESP_EVENT_DATA_IN: 1743 write = 1; 1744 /* fallthru */ 1745 1746 case ESP_EVENT_DATA_OUT: { 1747 struct esp_cmd_entry *ent = esp->active_cmd; 1748 struct scsi_cmnd *cmd = ent->cmd; 1749 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); 1750 unsigned int dma_len = esp_cur_dma_len(ent, cmd); 1751 1752 if (esp->rev == ESP100) 1753 scsi_esp_cmd(esp, ESP_CMD_NULL); 1754 1755 if (write) 1756 ent->flags |= ESP_CMD_FLAG_WRITE; 1757 else 1758 ent->flags &= ~ESP_CMD_FLAG_WRITE; 1759 1760 if (esp->ops->dma_length_limit) 1761 dma_len = esp->ops->dma_length_limit(esp, dma_addr, 1762 dma_len); 1763 else 1764 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); 1765 1766 esp->data_dma_len = dma_len; 1767 1768 if (!dma_len) { 1769 shost_printk(KERN_ERR, esp->host, 1770 "DMA length is zero!\n"); 1771 shost_printk(KERN_ERR, esp->host, 1772 "cur adr[%08llx] len[%08x]\n", 1773 (unsigned long long)esp_cur_dma_addr(ent, cmd), 1774 esp_cur_dma_len(ent, cmd)); 1775 esp_schedule_reset(esp); 1776 return 0; 1777 } 1778 1779 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n", 1780 (unsigned long long)dma_addr, dma_len, write); 1781 1782 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, 1783 write, ESP_CMD_DMA | ESP_CMD_TI); 1784 esp_event(esp, ESP_EVENT_DATA_DONE); 1785 break; 1786 } 1787 case ESP_EVENT_DATA_DONE: { 1788 struct esp_cmd_entry *ent = esp->active_cmd; 1789 struct scsi_cmnd *cmd = ent->cmd; 1790 int bytes_sent; 1791 1792 if (esp->ops->dma_error(esp)) { 1793 shost_printk(KERN_INFO, esp->host, 1794 "data done, DMA error, resetting\n"); 1795 esp_schedule_reset(esp); 1796 return 0; 1797 } 1798 1799 if (ent->flags & ESP_CMD_FLAG_WRITE) { 1800 /* XXX parity errors, etc. XXX */ 1801 1802 esp->ops->dma_drain(esp); 1803 } 1804 esp->ops->dma_invalidate(esp); 1805 1806 if (esp->ireg != ESP_INTR_BSERV) { 1807 /* We should always see exactly a bus-service 1808 * interrupt at the end of a successful transfer. 1809 */ 1810 shost_printk(KERN_INFO, esp->host, 1811 "data done, not BSERV, resetting\n"); 1812 esp_schedule_reset(esp); 1813 return 0; 1814 } 1815 1816 bytes_sent = esp_data_bytes_sent(esp, ent, cmd); 1817 1818 esp_log_datadone("data done flgs[%x] sent[%d]\n", 1819 ent->flags, bytes_sent); 1820 1821 if (bytes_sent < 0) { 1822 /* XXX force sync mode for this target XXX */ 1823 esp_schedule_reset(esp); 1824 return 0; 1825 } 1826 1827 esp_advance_dma(esp, ent, cmd, bytes_sent); 1828 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1829 goto again; 1830 } 1831 1832 case ESP_EVENT_STATUS: { 1833 struct esp_cmd_entry *ent = esp->active_cmd; 1834 1835 if (esp->ireg & ESP_INTR_FDONE) { 1836 ent->status = esp_read8(ESP_FDATA); 1837 ent->message = esp_read8(ESP_FDATA); 1838 scsi_esp_cmd(esp, ESP_CMD_MOK); 1839 } else if (esp->ireg == ESP_INTR_BSERV) { 1840 ent->status = esp_read8(ESP_FDATA); 1841 ent->message = 0xff; 1842 esp_event(esp, ESP_EVENT_MSGIN); 1843 return 0; 1844 } 1845 1846 if (ent->message != COMMAND_COMPLETE) { 1847 shost_printk(KERN_INFO, esp->host, 1848 "Unexpected message %x in status\n", 1849 ent->message); 1850 esp_schedule_reset(esp); 1851 return 0; 1852 } 1853 1854 esp_event(esp, ESP_EVENT_FREE_BUS); 1855 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1856 break; 1857 } 1858 case ESP_EVENT_FREE_BUS: { 1859 struct esp_cmd_entry *ent = esp->active_cmd; 1860 struct scsi_cmnd *cmd = ent->cmd; 1861 1862 if (ent->message == COMMAND_COMPLETE || 1863 ent->message == DISCONNECT) 1864 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1865 1866 if (ent->message == COMMAND_COMPLETE) { 1867 esp_log_cmddone("Command done status[%x] message[%x]\n", 1868 ent->status, ent->message); 1869 if (ent->status == SAM_STAT_TASK_SET_FULL) 1870 esp_event_queue_full(esp, ent); 1871 1872 if (ent->status == SAM_STAT_CHECK_CONDITION && 1873 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { 1874 ent->flags |= ESP_CMD_FLAG_AUTOSENSE; 1875 esp_autosense(esp, ent); 1876 } else { 1877 esp_cmd_is_done(esp, ent, cmd, 1878 compose_result(ent->status, 1879 ent->message, 1880 DID_OK)); 1881 } 1882 } else if (ent->message == DISCONNECT) { 1883 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n", 1884 cmd->device->id, 1885 ent->tag[0], ent->tag[1]); 1886 1887 esp->active_cmd = NULL; 1888 esp_maybe_execute_command(esp); 1889 } else { 1890 shost_printk(KERN_INFO, esp->host, 1891 "Unexpected message %x in freebus\n", 1892 ent->message); 1893 esp_schedule_reset(esp); 1894 return 0; 1895 } 1896 if (esp->active_cmd) 1897 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1898 break; 1899 } 1900 case ESP_EVENT_MSGOUT: { 1901 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1902 1903 if (esp_debug & ESP_DEBUG_MSGOUT) { 1904 int i; 1905 printk("ESP: Sending message [ "); 1906 for (i = 0; i < esp->msg_out_len; i++) 1907 printk("%02x ", esp->msg_out[i]); 1908 printk("]\n"); 1909 } 1910 1911 if (esp->rev == FASHME) { 1912 int i; 1913 1914 /* Always use the fifo. */ 1915 for (i = 0; i < esp->msg_out_len; i++) { 1916 esp_write8(esp->msg_out[i], ESP_FDATA); 1917 esp_write8(0, ESP_FDATA); 1918 } 1919 scsi_esp_cmd(esp, ESP_CMD_TI); 1920 } else { 1921 if (esp->msg_out_len == 1) { 1922 esp_write8(esp->msg_out[0], ESP_FDATA); 1923 scsi_esp_cmd(esp, ESP_CMD_TI); 1924 } else if (esp->flags & ESP_FLAG_USE_FIFO) { 1925 for (i = 0; i < esp->msg_out_len; i++) 1926 esp_write8(esp->msg_out[i], ESP_FDATA); 1927 scsi_esp_cmd(esp, ESP_CMD_TI); 1928 } else { 1929 /* Use DMA. */ 1930 memcpy(esp->command_block, 1931 esp->msg_out, 1932 esp->msg_out_len); 1933 1934 esp->ops->send_dma_cmd(esp, 1935 esp->command_block_dma, 1936 esp->msg_out_len, 1937 esp->msg_out_len, 1938 0, 1939 ESP_CMD_DMA|ESP_CMD_TI); 1940 } 1941 } 1942 esp_event(esp, ESP_EVENT_MSGOUT_DONE); 1943 break; 1944 } 1945 case ESP_EVENT_MSGOUT_DONE: 1946 if (esp->rev == FASHME) { 1947 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1948 } else { 1949 if (esp->msg_out_len > 1) 1950 esp->ops->dma_invalidate(esp); 1951 1952 /* XXX if the chip went into disconnected mode, 1953 * we can't run the phase state machine anyway. 1954 */ 1955 if (!(esp->ireg & ESP_INTR_DC)) 1956 scsi_esp_cmd(esp, ESP_CMD_NULL); 1957 } 1958 1959 esp->msg_out_len = 0; 1960 1961 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1962 goto again; 1963 case ESP_EVENT_MSGIN: 1964 if (esp->ireg & ESP_INTR_BSERV) { 1965 if (esp->rev == FASHME) { 1966 if (!(esp_read8(ESP_STATUS2) & 1967 ESP_STAT2_FEMPTY)) 1968 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1969 } else { 1970 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1971 if (esp->rev == ESP100) 1972 scsi_esp_cmd(esp, ESP_CMD_NULL); 1973 } 1974 scsi_esp_cmd(esp, ESP_CMD_TI); 1975 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 1976 return 1; 1977 } 1978 if (esp->ireg & ESP_INTR_FDONE) { 1979 u8 val; 1980 1981 if (esp->rev == FASHME) 1982 val = esp->fifo[0]; 1983 else 1984 val = esp_read8(ESP_FDATA); 1985 esp->msg_in[esp->msg_in_len++] = val; 1986 1987 esp_log_msgin("Got msgin byte %x\n", val); 1988 1989 if (!esp_msgin_process(esp)) 1990 esp->msg_in_len = 0; 1991 1992 if (esp->rev == FASHME) 1993 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 1994 1995 scsi_esp_cmd(esp, ESP_CMD_MOK); 1996 1997 /* Check whether a bus reset is to be done next */ 1998 if (esp->event == ESP_EVENT_RESET) 1999 return 0; 2000 2001 if (esp->event != ESP_EVENT_FREE_BUS) 2002 esp_event(esp, ESP_EVENT_CHECK_PHASE); 2003 } else { 2004 shost_printk(KERN_INFO, esp->host, 2005 "MSGIN neither BSERV not FDON, resetting"); 2006 esp_schedule_reset(esp); 2007 return 0; 2008 } 2009 break; 2010 case ESP_EVENT_CMD_START: 2011 memcpy(esp->command_block, esp->cmd_bytes_ptr, 2012 esp->cmd_bytes_left); 2013 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI); 2014 esp_event(esp, ESP_EVENT_CMD_DONE); 2015 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 2016 break; 2017 case ESP_EVENT_CMD_DONE: 2018 esp->ops->dma_invalidate(esp); 2019 if (esp->ireg & ESP_INTR_BSERV) { 2020 esp_event(esp, ESP_EVENT_CHECK_PHASE); 2021 goto again; 2022 } 2023 esp_schedule_reset(esp); 2024 return 0; 2025 2026 case ESP_EVENT_RESET: 2027 scsi_esp_cmd(esp, ESP_CMD_RS); 2028 break; 2029 2030 default: 2031 shost_printk(KERN_INFO, esp->host, 2032 "Unexpected event %x, resetting\n", esp->event); 2033 esp_schedule_reset(esp); 2034 return 0; 2035 } 2036 return 1; 2037 } 2038 2039 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) 2040 { 2041 struct scsi_cmnd *cmd = ent->cmd; 2042 2043 esp_unmap_dma(esp, cmd); 2044 esp_free_lun_tag(ent, cmd->device->hostdata); 2045 cmd->result = DID_RESET << 16; 2046 2047 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) 2048 esp_unmap_sense(esp, ent); 2049 2050 cmd->scsi_done(cmd); 2051 list_del(&ent->list); 2052 esp_put_ent(esp, ent); 2053 } 2054 2055 static void esp_clear_hold(struct scsi_device *dev, void *data) 2056 { 2057 struct esp_lun_data *lp = dev->hostdata; 2058 2059 BUG_ON(lp->num_tagged); 2060 lp->hold = 0; 2061 } 2062 2063 static void esp_reset_cleanup(struct esp *esp) 2064 { 2065 struct esp_cmd_entry *ent, *tmp; 2066 int i; 2067 2068 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { 2069 struct scsi_cmnd *cmd = ent->cmd; 2070 2071 list_del(&ent->list); 2072 cmd->result = DID_RESET << 16; 2073 cmd->scsi_done(cmd); 2074 esp_put_ent(esp, ent); 2075 } 2076 2077 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { 2078 if (ent == esp->active_cmd) 2079 esp->active_cmd = NULL; 2080 esp_reset_cleanup_one(esp, ent); 2081 } 2082 2083 BUG_ON(esp->active_cmd != NULL); 2084 2085 /* Force renegotiation of sync/wide transfers. */ 2086 for (i = 0; i < ESP_MAX_TARGET; i++) { 2087 struct esp_target_data *tp = &esp->target[i]; 2088 2089 tp->esp_period = 0; 2090 tp->esp_offset = 0; 2091 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | 2092 ESP_CONFIG3_FSCSI | 2093 ESP_CONFIG3_FAST); 2094 tp->flags &= ~ESP_TGT_WIDE; 2095 tp->flags |= ESP_TGT_CHECK_NEGO; 2096 2097 if (tp->starget) 2098 __starget_for_each_device(tp->starget, NULL, 2099 esp_clear_hold); 2100 } 2101 esp->flags &= ~ESP_FLAG_RESETTING; 2102 } 2103 2104 /* Runs under host->lock */ 2105 static void __esp_interrupt(struct esp *esp) 2106 { 2107 int finish_reset, intr_done; 2108 u8 phase; 2109 2110 /* 2111 * Once INTRPT is read STATUS and SSTEP are cleared. 2112 */ 2113 esp->sreg = esp_read8(ESP_STATUS); 2114 esp->seqreg = esp_read8(ESP_SSTEP); 2115 esp->ireg = esp_read8(ESP_INTRPT); 2116 2117 if (esp->flags & ESP_FLAG_RESETTING) { 2118 finish_reset = 1; 2119 } else { 2120 if (esp_check_gross_error(esp)) 2121 return; 2122 2123 finish_reset = esp_check_spur_intr(esp); 2124 if (finish_reset < 0) 2125 return; 2126 } 2127 2128 if (esp->ireg & ESP_INTR_SR) 2129 finish_reset = 1; 2130 2131 if (finish_reset) { 2132 esp_reset_cleanup(esp); 2133 if (esp->eh_reset) { 2134 complete(esp->eh_reset); 2135 esp->eh_reset = NULL; 2136 } 2137 return; 2138 } 2139 2140 phase = (esp->sreg & ESP_STAT_PMASK); 2141 if (esp->rev == FASHME) { 2142 if (((phase != ESP_DIP && phase != ESP_DOP) && 2143 esp->select_state == ESP_SELECT_NONE && 2144 esp->event != ESP_EVENT_STATUS && 2145 esp->event != ESP_EVENT_DATA_DONE) || 2146 (esp->ireg & ESP_INTR_RSEL)) { 2147 esp->sreg2 = esp_read8(ESP_STATUS2); 2148 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || 2149 (esp->sreg2 & ESP_STAT2_F1BYTE)) 2150 hme_read_fifo(esp); 2151 } 2152 } 2153 2154 esp_log_intr("intr sreg[%02x] seqreg[%02x] " 2155 "sreg2[%02x] ireg[%02x]\n", 2156 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); 2157 2158 intr_done = 0; 2159 2160 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { 2161 shost_printk(KERN_INFO, esp->host, 2162 "unexpected IREG %02x\n", esp->ireg); 2163 if (esp->ireg & ESP_INTR_IC) 2164 esp_dump_cmd_log(esp); 2165 2166 esp_schedule_reset(esp); 2167 } else { 2168 if (esp->ireg & ESP_INTR_RSEL) { 2169 if (esp->active_cmd) 2170 (void) esp_finish_select(esp); 2171 intr_done = esp_reconnect(esp); 2172 } else { 2173 /* Some combination of FDONE, BSERV, DC. */ 2174 if (esp->select_state != ESP_SELECT_NONE) 2175 intr_done = esp_finish_select(esp); 2176 } 2177 } 2178 while (!intr_done) 2179 intr_done = esp_process_event(esp); 2180 } 2181 2182 irqreturn_t scsi_esp_intr(int irq, void *dev_id) 2183 { 2184 struct esp *esp = dev_id; 2185 unsigned long flags; 2186 irqreturn_t ret; 2187 2188 spin_lock_irqsave(esp->host->host_lock, flags); 2189 ret = IRQ_NONE; 2190 if (esp->ops->irq_pending(esp)) { 2191 ret = IRQ_HANDLED; 2192 for (;;) { 2193 int i; 2194 2195 __esp_interrupt(esp); 2196 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) 2197 break; 2198 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; 2199 2200 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 2201 if (esp->ops->irq_pending(esp)) 2202 break; 2203 } 2204 if (i == ESP_QUICKIRQ_LIMIT) 2205 break; 2206 } 2207 } 2208 spin_unlock_irqrestore(esp->host->host_lock, flags); 2209 2210 return ret; 2211 } 2212 EXPORT_SYMBOL(scsi_esp_intr); 2213 2214 static void esp_get_revision(struct esp *esp) 2215 { 2216 u8 val; 2217 2218 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); 2219 if (esp->config2 == 0) { 2220 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); 2221 esp_write8(esp->config2, ESP_CFG2); 2222 2223 val = esp_read8(ESP_CFG2); 2224 val &= ~ESP_CONFIG2_MAGIC; 2225 2226 esp->config2 = 0; 2227 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { 2228 /* 2229 * If what we write to cfg2 does not come back, 2230 * cfg2 is not implemented. 2231 * Therefore this must be a plain esp100. 2232 */ 2233 esp->rev = ESP100; 2234 return; 2235 } 2236 } 2237 2238 esp_set_all_config3(esp, 5); 2239 esp->prev_cfg3 = 5; 2240 esp_write8(esp->config2, ESP_CFG2); 2241 esp_write8(0, ESP_CFG3); 2242 esp_write8(esp->prev_cfg3, ESP_CFG3); 2243 2244 val = esp_read8(ESP_CFG3); 2245 if (val != 5) { 2246 /* The cfg2 register is implemented, however 2247 * cfg3 is not, must be esp100a. 2248 */ 2249 esp->rev = ESP100A; 2250 } else { 2251 esp_set_all_config3(esp, 0); 2252 esp->prev_cfg3 = 0; 2253 esp_write8(esp->prev_cfg3, ESP_CFG3); 2254 2255 /* All of cfg{1,2,3} implemented, must be one of 2256 * the fas variants, figure out which one. 2257 */ 2258 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { 2259 esp->rev = FAST; 2260 esp->sync_defp = SYNC_DEFP_FAST; 2261 } else { 2262 esp->rev = ESP236; 2263 } 2264 } 2265 } 2266 2267 static void esp_init_swstate(struct esp *esp) 2268 { 2269 int i; 2270 2271 INIT_LIST_HEAD(&esp->queued_cmds); 2272 INIT_LIST_HEAD(&esp->active_cmds); 2273 INIT_LIST_HEAD(&esp->esp_cmd_pool); 2274 2275 /* Start with a clear state, domain validation (via ->slave_configure, 2276 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged 2277 * commands. 2278 */ 2279 for (i = 0 ; i < ESP_MAX_TARGET; i++) { 2280 esp->target[i].flags = 0; 2281 esp->target[i].nego_goal_period = 0; 2282 esp->target[i].nego_goal_offset = 0; 2283 esp->target[i].nego_goal_width = 0; 2284 esp->target[i].nego_goal_tags = 0; 2285 } 2286 } 2287 2288 /* This places the ESP into a known state at boot time. */ 2289 static void esp_bootup_reset(struct esp *esp) 2290 { 2291 u8 val; 2292 2293 /* Reset the DMA */ 2294 esp->ops->reset_dma(esp); 2295 2296 /* Reset the ESP */ 2297 esp_reset_esp(esp); 2298 2299 /* Reset the SCSI bus, but tell ESP not to generate an irq */ 2300 val = esp_read8(ESP_CFG1); 2301 val |= ESP_CONFIG1_SRRDISAB; 2302 esp_write8(val, ESP_CFG1); 2303 2304 scsi_esp_cmd(esp, ESP_CMD_RS); 2305 udelay(400); 2306 2307 esp_write8(esp->config1, ESP_CFG1); 2308 2309 /* Eat any bitrot in the chip and we are done... */ 2310 esp_read8(ESP_INTRPT); 2311 } 2312 2313 static void esp_set_clock_params(struct esp *esp) 2314 { 2315 int fhz; 2316 u8 ccf; 2317 2318 /* This is getting messy but it has to be done correctly or else 2319 * you get weird behavior all over the place. We are trying to 2320 * basically figure out three pieces of information. 2321 * 2322 * a) Clock Conversion Factor 2323 * 2324 * This is a representation of the input crystal clock frequency 2325 * going into the ESP on this machine. Any operation whose timing 2326 * is longer than 400ns depends on this value being correct. For 2327 * example, you'll get blips for arbitration/selection during high 2328 * load or with multiple targets if this is not set correctly. 2329 * 2330 * b) Selection Time-Out 2331 * 2332 * The ESP isn't very bright and will arbitrate for the bus and try 2333 * to select a target forever if you let it. This value tells the 2334 * ESP when it has taken too long to negotiate and that it should 2335 * interrupt the CPU so we can see what happened. The value is 2336 * computed as follows (from NCR/Symbios chip docs). 2337 * 2338 * (Time Out Period) * (Input Clock) 2339 * STO = ---------------------------------- 2340 * (8192) * (Clock Conversion Factor) 2341 * 2342 * We use a time out period of 250ms (ESP_BUS_TIMEOUT). 2343 * 2344 * c) Imperical constants for synchronous offset and transfer period 2345 * register values 2346 * 2347 * This entails the smallest and largest sync period we could ever 2348 * handle on this ESP. 2349 */ 2350 fhz = esp->cfreq; 2351 2352 ccf = ((fhz / 1000000) + 4) / 5; 2353 if (ccf == 1) 2354 ccf = 2; 2355 2356 /* If we can't find anything reasonable, just assume 20MHZ. 2357 * This is the clock frequency of the older sun4c's where I've 2358 * been unable to find the clock-frequency PROM property. All 2359 * other machines provide useful values it seems. 2360 */ 2361 if (fhz <= 5000000 || ccf < 1 || ccf > 8) { 2362 fhz = 20000000; 2363 ccf = 4; 2364 } 2365 2366 esp->cfact = (ccf == 8 ? 0 : ccf); 2367 esp->cfreq = fhz; 2368 esp->ccycle = ESP_HZ_TO_CYCLE(fhz); 2369 esp->ctick = ESP_TICK(ccf, esp->ccycle); 2370 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf); 2371 esp->sync_defp = SYNC_DEFP_SLOW; 2372 } 2373 2374 static const char *esp_chip_names[] = { 2375 "ESP100", 2376 "ESP100A", 2377 "ESP236", 2378 "FAS236", 2379 "AM53C974", 2380 "53CF9x-2", 2381 "FAS100A", 2382 "FAST", 2383 "FASHME", 2384 }; 2385 2386 static struct scsi_transport_template *esp_transport_template; 2387 2388 int scsi_esp_register(struct esp *esp) 2389 { 2390 static int instance; 2391 int err; 2392 2393 if (!esp->num_tags) 2394 esp->num_tags = ESP_DEFAULT_TAGS; 2395 esp->host->transportt = esp_transport_template; 2396 esp->host->max_lun = ESP_MAX_LUN; 2397 esp->host->cmd_per_lun = 2; 2398 esp->host->unique_id = instance; 2399 2400 esp_set_clock_params(esp); 2401 2402 esp_get_revision(esp); 2403 2404 esp_init_swstate(esp); 2405 2406 esp_bootup_reset(esp); 2407 2408 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n", 2409 esp->host->unique_id, esp->regs, esp->dma_regs, 2410 esp->host->irq); 2411 dev_printk(KERN_INFO, esp->dev, 2412 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", 2413 esp->host->unique_id, esp_chip_names[esp->rev], 2414 esp->cfreq / 1000000, esp->cfact, esp->scsi_id); 2415 2416 /* Let the SCSI bus reset settle. */ 2417 ssleep(esp_bus_reset_settle); 2418 2419 err = scsi_add_host(esp->host, esp->dev); 2420 if (err) 2421 return err; 2422 2423 instance++; 2424 2425 scsi_scan_host(esp->host); 2426 2427 return 0; 2428 } 2429 EXPORT_SYMBOL(scsi_esp_register); 2430 2431 void scsi_esp_unregister(struct esp *esp) 2432 { 2433 scsi_remove_host(esp->host); 2434 } 2435 EXPORT_SYMBOL(scsi_esp_unregister); 2436 2437 static int esp_target_alloc(struct scsi_target *starget) 2438 { 2439 struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); 2440 struct esp_target_data *tp = &esp->target[starget->id]; 2441 2442 tp->starget = starget; 2443 2444 return 0; 2445 } 2446 2447 static void esp_target_destroy(struct scsi_target *starget) 2448 { 2449 struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); 2450 struct esp_target_data *tp = &esp->target[starget->id]; 2451 2452 tp->starget = NULL; 2453 } 2454 2455 static int esp_slave_alloc(struct scsi_device *dev) 2456 { 2457 struct esp *esp = shost_priv(dev->host); 2458 struct esp_target_data *tp = &esp->target[dev->id]; 2459 struct esp_lun_data *lp; 2460 2461 lp = kzalloc(sizeof(*lp), GFP_KERNEL); 2462 if (!lp) 2463 return -ENOMEM; 2464 dev->hostdata = lp; 2465 2466 spi_min_period(tp->starget) = esp->min_period; 2467 spi_max_offset(tp->starget) = 15; 2468 2469 if (esp->flags & ESP_FLAG_WIDE_CAPABLE) 2470 spi_max_width(tp->starget) = 1; 2471 else 2472 spi_max_width(tp->starget) = 0; 2473 2474 return 0; 2475 } 2476 2477 static int esp_slave_configure(struct scsi_device *dev) 2478 { 2479 struct esp *esp = shost_priv(dev->host); 2480 struct esp_target_data *tp = &esp->target[dev->id]; 2481 2482 if (dev->tagged_supported) 2483 scsi_change_queue_depth(dev, esp->num_tags); 2484 2485 tp->flags |= ESP_TGT_DISCONNECT; 2486 2487 if (!spi_initial_dv(dev->sdev_target)) 2488 spi_dv_device(dev); 2489 2490 return 0; 2491 } 2492 2493 static void esp_slave_destroy(struct scsi_device *dev) 2494 { 2495 struct esp_lun_data *lp = dev->hostdata; 2496 2497 kfree(lp); 2498 dev->hostdata = NULL; 2499 } 2500 2501 static int esp_eh_abort_handler(struct scsi_cmnd *cmd) 2502 { 2503 struct esp *esp = shost_priv(cmd->device->host); 2504 struct esp_cmd_entry *ent, *tmp; 2505 struct completion eh_done; 2506 unsigned long flags; 2507 2508 /* XXX This helps a lot with debugging but might be a bit 2509 * XXX much for the final driver. 2510 */ 2511 spin_lock_irqsave(esp->host->host_lock, flags); 2512 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n", 2513 cmd, cmd->cmnd[0]); 2514 ent = esp->active_cmd; 2515 if (ent) 2516 shost_printk(KERN_ERR, esp->host, 2517 "Current command [%p:%02x]\n", 2518 ent->cmd, ent->cmd->cmnd[0]); 2519 list_for_each_entry(ent, &esp->queued_cmds, list) { 2520 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n", 2521 ent->cmd, ent->cmd->cmnd[0]); 2522 } 2523 list_for_each_entry(ent, &esp->active_cmds, list) { 2524 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n", 2525 ent->cmd, ent->cmd->cmnd[0]); 2526 } 2527 esp_dump_cmd_log(esp); 2528 spin_unlock_irqrestore(esp->host->host_lock, flags); 2529 2530 spin_lock_irqsave(esp->host->host_lock, flags); 2531 2532 ent = NULL; 2533 list_for_each_entry(tmp, &esp->queued_cmds, list) { 2534 if (tmp->cmd == cmd) { 2535 ent = tmp; 2536 break; 2537 } 2538 } 2539 2540 if (ent) { 2541 /* Easiest case, we didn't even issue the command 2542 * yet so it is trivial to abort. 2543 */ 2544 list_del(&ent->list); 2545 2546 cmd->result = DID_ABORT << 16; 2547 cmd->scsi_done(cmd); 2548 2549 esp_put_ent(esp, ent); 2550 2551 goto out_success; 2552 } 2553 2554 init_completion(&eh_done); 2555 2556 ent = esp->active_cmd; 2557 if (ent && ent->cmd == cmd) { 2558 /* Command is the currently active command on 2559 * the bus. If we already have an output message 2560 * pending, no dice. 2561 */ 2562 if (esp->msg_out_len) 2563 goto out_failure; 2564 2565 /* Send out an abort, encouraging the target to 2566 * go to MSGOUT phase by asserting ATN. 2567 */ 2568 esp->msg_out[0] = ABORT_TASK_SET; 2569 esp->msg_out_len = 1; 2570 ent->eh_done = &eh_done; 2571 2572 scsi_esp_cmd(esp, ESP_CMD_SATN); 2573 } else { 2574 /* The command is disconnected. This is not easy to 2575 * abort. For now we fail and let the scsi error 2576 * handling layer go try a scsi bus reset or host 2577 * reset. 2578 * 2579 * What we could do is put together a scsi command 2580 * solely for the purpose of sending an abort message 2581 * to the target. Coming up with all the code to 2582 * cook up scsi commands, special case them everywhere, 2583 * etc. is for questionable gain and it would be better 2584 * if the generic scsi error handling layer could do at 2585 * least some of that for us. 2586 * 2587 * Anyways this is an area for potential future improvement 2588 * in this driver. 2589 */ 2590 goto out_failure; 2591 } 2592 2593 spin_unlock_irqrestore(esp->host->host_lock, flags); 2594 2595 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { 2596 spin_lock_irqsave(esp->host->host_lock, flags); 2597 ent->eh_done = NULL; 2598 spin_unlock_irqrestore(esp->host->host_lock, flags); 2599 2600 return FAILED; 2601 } 2602 2603 return SUCCESS; 2604 2605 out_success: 2606 spin_unlock_irqrestore(esp->host->host_lock, flags); 2607 return SUCCESS; 2608 2609 out_failure: 2610 /* XXX This might be a good location to set ESP_TGT_BROKEN 2611 * XXX since we know which target/lun in particular is 2612 * XXX causing trouble. 2613 */ 2614 spin_unlock_irqrestore(esp->host->host_lock, flags); 2615 return FAILED; 2616 } 2617 2618 static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) 2619 { 2620 struct esp *esp = shost_priv(cmd->device->host); 2621 struct completion eh_reset; 2622 unsigned long flags; 2623 2624 init_completion(&eh_reset); 2625 2626 spin_lock_irqsave(esp->host->host_lock, flags); 2627 2628 esp->eh_reset = &eh_reset; 2629 2630 /* XXX This is too simple... We should add lots of 2631 * XXX checks here so that if we find that the chip is 2632 * XXX very wedged we return failure immediately so 2633 * XXX that we can perform a full chip reset. 2634 */ 2635 esp->flags |= ESP_FLAG_RESETTING; 2636 scsi_esp_cmd(esp, ESP_CMD_RS); 2637 2638 spin_unlock_irqrestore(esp->host->host_lock, flags); 2639 2640 ssleep(esp_bus_reset_settle); 2641 2642 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { 2643 spin_lock_irqsave(esp->host->host_lock, flags); 2644 esp->eh_reset = NULL; 2645 spin_unlock_irqrestore(esp->host->host_lock, flags); 2646 2647 return FAILED; 2648 } 2649 2650 return SUCCESS; 2651 } 2652 2653 /* All bets are off, reset the entire device. */ 2654 static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) 2655 { 2656 struct esp *esp = shost_priv(cmd->device->host); 2657 unsigned long flags; 2658 2659 spin_lock_irqsave(esp->host->host_lock, flags); 2660 esp_bootup_reset(esp); 2661 esp_reset_cleanup(esp); 2662 spin_unlock_irqrestore(esp->host->host_lock, flags); 2663 2664 ssleep(esp_bus_reset_settle); 2665 2666 return SUCCESS; 2667 } 2668 2669 static const char *esp_info(struct Scsi_Host *host) 2670 { 2671 return "esp"; 2672 } 2673 2674 struct scsi_host_template scsi_esp_template = { 2675 .module = THIS_MODULE, 2676 .name = "esp", 2677 .info = esp_info, 2678 .queuecommand = esp_queuecommand, 2679 .target_alloc = esp_target_alloc, 2680 .target_destroy = esp_target_destroy, 2681 .slave_alloc = esp_slave_alloc, 2682 .slave_configure = esp_slave_configure, 2683 .slave_destroy = esp_slave_destroy, 2684 .eh_abort_handler = esp_eh_abort_handler, 2685 .eh_bus_reset_handler = esp_eh_bus_reset_handler, 2686 .eh_host_reset_handler = esp_eh_host_reset_handler, 2687 .can_queue = 7, 2688 .this_id = 7, 2689 .sg_tablesize = SG_ALL, 2690 .max_sectors = 0xffff, 2691 .skip_settle_delay = 1, 2692 }; 2693 EXPORT_SYMBOL(scsi_esp_template); 2694 2695 static void esp_get_signalling(struct Scsi_Host *host) 2696 { 2697 struct esp *esp = shost_priv(host); 2698 enum spi_signal_type type; 2699 2700 if (esp->flags & ESP_FLAG_DIFFERENTIAL) 2701 type = SPI_SIGNAL_HVD; 2702 else 2703 type = SPI_SIGNAL_SE; 2704 2705 spi_signalling(host) = type; 2706 } 2707 2708 static void esp_set_offset(struct scsi_target *target, int offset) 2709 { 2710 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2711 struct esp *esp = shost_priv(host); 2712 struct esp_target_data *tp = &esp->target[target->id]; 2713 2714 if (esp->flags & ESP_FLAG_DISABLE_SYNC) 2715 tp->nego_goal_offset = 0; 2716 else 2717 tp->nego_goal_offset = offset; 2718 tp->flags |= ESP_TGT_CHECK_NEGO; 2719 } 2720 2721 static void esp_set_period(struct scsi_target *target, int period) 2722 { 2723 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2724 struct esp *esp = shost_priv(host); 2725 struct esp_target_data *tp = &esp->target[target->id]; 2726 2727 tp->nego_goal_period = period; 2728 tp->flags |= ESP_TGT_CHECK_NEGO; 2729 } 2730 2731 static void esp_set_width(struct scsi_target *target, int width) 2732 { 2733 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2734 struct esp *esp = shost_priv(host); 2735 struct esp_target_data *tp = &esp->target[target->id]; 2736 2737 tp->nego_goal_width = (width ? 1 : 0); 2738 tp->flags |= ESP_TGT_CHECK_NEGO; 2739 } 2740 2741 static struct spi_function_template esp_transport_ops = { 2742 .set_offset = esp_set_offset, 2743 .show_offset = 1, 2744 .set_period = esp_set_period, 2745 .show_period = 1, 2746 .set_width = esp_set_width, 2747 .show_width = 1, 2748 .get_signalling = esp_get_signalling, 2749 }; 2750 2751 static int __init esp_init(void) 2752 { 2753 BUILD_BUG_ON(sizeof(struct scsi_pointer) < 2754 sizeof(struct esp_cmd_priv)); 2755 2756 esp_transport_template = spi_attach_transport(&esp_transport_ops); 2757 if (!esp_transport_template) 2758 return -ENODEV; 2759 2760 return 0; 2761 } 2762 2763 static void __exit esp_exit(void) 2764 { 2765 spi_release_transport(esp_transport_template); 2766 } 2767 2768 MODULE_DESCRIPTION("ESP SCSI driver core"); 2769 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 2770 MODULE_LICENSE("GPL"); 2771 MODULE_VERSION(DRV_VERSION); 2772 2773 module_param(esp_bus_reset_settle, int, 0); 2774 MODULE_PARM_DESC(esp_bus_reset_settle, 2775 "ESP scsi bus reset delay in seconds"); 2776 2777 module_param(esp_debug, int, 0); 2778 MODULE_PARM_DESC(esp_debug, 2779 "ESP bitmapped debugging message enable value:\n" 2780 " 0x00000001 Log interrupt events\n" 2781 " 0x00000002 Log scsi commands\n" 2782 " 0x00000004 Log resets\n" 2783 " 0x00000008 Log message in events\n" 2784 " 0x00000010 Log message out events\n" 2785 " 0x00000020 Log command completion\n" 2786 " 0x00000040 Log disconnects\n" 2787 " 0x00000080 Log data start\n" 2788 " 0x00000100 Log data done\n" 2789 " 0x00000200 Log reconnects\n" 2790 " 0x00000400 Log auto-sense data\n" 2791 ); 2792 2793 module_init(esp_init); 2794 module_exit(esp_exit); 2795 2796 #ifdef CONFIG_SCSI_ESP_PIO 2797 static inline unsigned int esp_wait_for_fifo(struct esp *esp) 2798 { 2799 int i = 500000; 2800 2801 do { 2802 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 2803 2804 if (fbytes) 2805 return fbytes; 2806 2807 udelay(1); 2808 } while (--i); 2809 2810 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n", 2811 esp_read8(ESP_STATUS)); 2812 return 0; 2813 } 2814 2815 static inline int esp_wait_for_intr(struct esp *esp) 2816 { 2817 int i = 500000; 2818 2819 do { 2820 esp->sreg = esp_read8(ESP_STATUS); 2821 if (esp->sreg & ESP_STAT_INTR) 2822 return 0; 2823 2824 udelay(1); 2825 } while (--i); 2826 2827 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n", 2828 esp->sreg); 2829 return 1; 2830 } 2831 2832 #define ESP_FIFO_SIZE 16 2833 2834 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, 2835 u32 dma_count, int write, u8 cmd) 2836 { 2837 u8 phase = esp->sreg & ESP_STAT_PMASK; 2838 2839 cmd &= ~ESP_CMD_DMA; 2840 esp->send_cmd_error = 0; 2841 2842 if (write) { 2843 u8 *dst = (u8 *)addr; 2844 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV); 2845 2846 scsi_esp_cmd(esp, cmd); 2847 2848 while (1) { 2849 if (!esp_wait_for_fifo(esp)) 2850 break; 2851 2852 *dst++ = readb(esp->fifo_reg); 2853 --esp_count; 2854 2855 if (!esp_count) 2856 break; 2857 2858 if (esp_wait_for_intr(esp)) { 2859 esp->send_cmd_error = 1; 2860 break; 2861 } 2862 2863 if ((esp->sreg & ESP_STAT_PMASK) != phase) 2864 break; 2865 2866 esp->ireg = esp_read8(ESP_INTRPT); 2867 if (esp->ireg & mask) { 2868 esp->send_cmd_error = 1; 2869 break; 2870 } 2871 2872 if (phase == ESP_MIP) 2873 esp_write8(ESP_CMD_MOK, ESP_CMD); 2874 2875 esp_write8(ESP_CMD_TI, ESP_CMD); 2876 } 2877 } else { 2878 unsigned int n = ESP_FIFO_SIZE; 2879 u8 *src = (u8 *)addr; 2880 2881 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 2882 2883 if (n > esp_count) 2884 n = esp_count; 2885 writesb(esp->fifo_reg, src, n); 2886 src += n; 2887 esp_count -= n; 2888 2889 scsi_esp_cmd(esp, cmd); 2890 2891 while (esp_count) { 2892 if (esp_wait_for_intr(esp)) { 2893 esp->send_cmd_error = 1; 2894 break; 2895 } 2896 2897 if ((esp->sreg & ESP_STAT_PMASK) != phase) 2898 break; 2899 2900 esp->ireg = esp_read8(ESP_INTRPT); 2901 if (esp->ireg & ~ESP_INTR_BSERV) { 2902 esp->send_cmd_error = 1; 2903 break; 2904 } 2905 2906 n = ESP_FIFO_SIZE - 2907 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); 2908 2909 if (n > esp_count) 2910 n = esp_count; 2911 writesb(esp->fifo_reg, src, n); 2912 src += n; 2913 esp_count -= n; 2914 2915 esp_write8(ESP_CMD_TI, ESP_CMD); 2916 } 2917 } 2918 2919 esp->send_cmd_residual = esp_count; 2920 } 2921 EXPORT_SYMBOL(esp_send_pio_cmd); 2922 #endif 2923