1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. 2 * 3 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net) 4 * 5 * A lot of this driver was directly stolen from Erik H. Moe's PCI 6 * Qlogic ISP driver. Mucho kudos to him for this code. 7 * 8 * An even bigger kudos to John Grana at Performance Technologies 9 * for providing me with the hardware to write this driver, you rule 10 * John you really do. 11 * 12 * May, 2, 1997: Added support for QLGC,isp --jj 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/delay.h> 17 #include <linux/types.h> 18 #include <linux/string.h> 19 #include <linux/slab.h> 20 #include <linux/blkdev.h> 21 #include <linux/proc_fs.h> 22 #include <linux/stat.h> 23 #include <linux/init.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/module.h> 27 #include <linux/jiffies.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 32 #include <asm/byteorder.h> 33 34 #include "qlogicpti.h" 35 36 #include <asm/dma.h> 37 #include <asm/system.h> 38 #include <asm/ptrace.h> 39 #include <asm/pgtable.h> 40 #include <asm/oplib.h> 41 #include <asm/io.h> 42 #include <asm/irq.h> 43 44 #include <scsi/scsi.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_eh.h> 48 #include <scsi/scsi_tcq.h> 49 #include <scsi/scsi_host.h> 50 51 #define MAX_TARGETS 16 52 #define MAX_LUNS 8 /* 32 for 1.31 F/W */ 53 54 #define DEFAULT_LOOP_COUNT 10000 55 56 #include "qlogicpti_asm.c" 57 58 static struct qlogicpti *qptichain = NULL; 59 static DEFINE_SPINLOCK(qptichain_lock); 60 61 #define PACKB(a, b) (((a)<<4)|(b)) 62 63 static const u_char mbox_param[] = { 64 PACKB(1, 1), /* MBOX_NO_OP */ 65 PACKB(5, 5), /* MBOX_LOAD_RAM */ 66 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */ 67 PACKB(5, 5), /* MBOX_DUMP_RAM */ 68 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */ 69 PACKB(2, 3), /* MBOX_READ_RAM_WORD */ 70 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */ 71 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */ 72 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */ 73 PACKB(0, 0), /* 0x0009 */ 74 PACKB(0, 0), /* 0x000a */ 75 PACKB(0, 0), /* 0x000b */ 76 PACKB(0, 0), /* 0x000c */ 77 PACKB(0, 0), /* 0x000d */ 78 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */ 79 PACKB(0, 0), /* 0x000f */ 80 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */ 81 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */ 82 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */ 83 PACKB(2, 2), /* MBOX_WAKE_UP */ 84 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */ 85 PACKB(4, 4), /* MBOX_ABORT */ 86 PACKB(2, 2), /* MBOX_ABORT_DEVICE */ 87 PACKB(3, 3), /* MBOX_ABORT_TARGET */ 88 PACKB(2, 2), /* MBOX_BUS_RESET */ 89 PACKB(2, 3), /* MBOX_STOP_QUEUE */ 90 PACKB(2, 3), /* MBOX_START_QUEUE */ 91 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */ 92 PACKB(2, 3), /* MBOX_ABORT_QUEUE */ 93 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */ 94 PACKB(0, 0), /* 0x001e */ 95 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */ 96 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */ 97 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */ 98 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */ 99 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */ 100 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */ 101 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */ 102 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */ 103 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */ 104 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */ 105 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */ 106 PACKB(0, 0), /* 0x002a */ 107 PACKB(0, 0), /* 0x002b */ 108 PACKB(0, 0), /* 0x002c */ 109 PACKB(0, 0), /* 0x002d */ 110 PACKB(0, 0), /* 0x002e */ 111 PACKB(0, 0), /* 0x002f */ 112 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */ 113 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */ 114 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */ 115 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */ 116 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */ 117 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */ 118 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */ 119 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */ 120 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */ 121 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */ 122 PACKB(0, 0), /* 0x003a */ 123 PACKB(0, 0), /* 0x003b */ 124 PACKB(0, 0), /* 0x003c */ 125 PACKB(0, 0), /* 0x003d */ 126 PACKB(0, 0), /* 0x003e */ 127 PACKB(0, 0), /* 0x003f */ 128 PACKB(0, 0), /* 0x0040 */ 129 PACKB(0, 0), /* 0x0041 */ 130 PACKB(0, 0) /* 0x0042 */ 131 }; 132 133 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param) 134 135 /* queue length's _must_ be power of two: */ 136 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql)) 137 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \ 138 QLOGICPTI_REQ_QUEUE_LEN) 139 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN) 140 141 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti) 142 { 143 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB, 144 qpti->qregs + SBUS_CTRL); 145 } 146 147 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti) 148 { 149 sbus_writew(0, qpti->qregs + SBUS_CTRL); 150 } 151 152 static inline void set_sbus_cfg1(struct qlogicpti *qpti) 153 { 154 u16 val; 155 u8 bursts = qpti->bursts; 156 157 #if 0 /* It appears that at least PTI cards do not support 158 * 64-byte bursts and that setting the B64 bit actually 159 * is a nop and the chip ends up using the smallest burst 160 * size. -DaveM 161 */ 162 if (sbus_can_burst64() && (bursts & DMA_BURST64)) { 163 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64); 164 } else 165 #endif 166 if (bursts & DMA_BURST32) { 167 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32); 168 } else if (bursts & DMA_BURST16) { 169 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16); 170 } else if (bursts & DMA_BURST8) { 171 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8); 172 } else { 173 val = 0; /* No sbus bursts for you... */ 174 } 175 sbus_writew(val, qpti->qregs + SBUS_CFG1); 176 } 177 178 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force) 179 { 180 int loop_count; 181 u16 tmp; 182 183 if (mbox_param[param[0]] == 0) 184 return 1; 185 186 /* Set SBUS semaphore. */ 187 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); 188 tmp |= SBUS_SEMAPHORE_LCK; 189 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); 190 191 /* Wait for host IRQ bit to clear. */ 192 loop_count = DEFAULT_LOOP_COUNT; 193 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) { 194 barrier(); 195 cpu_relax(); 196 } 197 if (!loop_count) 198 printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n", 199 qpti->qpti_id); 200 201 /* Write mailbox command registers. */ 202 switch (mbox_param[param[0]] >> 4) { 203 case 6: sbus_writew(param[5], qpti->qregs + MBOX5); 204 case 5: sbus_writew(param[4], qpti->qregs + MBOX4); 205 case 4: sbus_writew(param[3], qpti->qregs + MBOX3); 206 case 3: sbus_writew(param[2], qpti->qregs + MBOX2); 207 case 2: sbus_writew(param[1], qpti->qregs + MBOX1); 208 case 1: sbus_writew(param[0], qpti->qregs + MBOX0); 209 } 210 211 /* Clear RISC interrupt. */ 212 tmp = sbus_readw(qpti->qregs + HCCTRL); 213 tmp |= HCCTRL_CRIRQ; 214 sbus_writew(tmp, qpti->qregs + HCCTRL); 215 216 /* Clear SBUS semaphore. */ 217 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 218 219 /* Set HOST interrupt. */ 220 tmp = sbus_readw(qpti->qregs + HCCTRL); 221 tmp |= HCCTRL_SHIRQ; 222 sbus_writew(tmp, qpti->qregs + HCCTRL); 223 224 /* Wait for HOST interrupt clears. */ 225 loop_count = DEFAULT_LOOP_COUNT; 226 while (--loop_count && 227 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ)) 228 udelay(20); 229 if (!loop_count) 230 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n", 231 qpti->qpti_id, param[0]); 232 233 /* Wait for SBUS semaphore to get set. */ 234 loop_count = DEFAULT_LOOP_COUNT; 235 while (--loop_count && 236 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) { 237 udelay(20); 238 239 /* Workaround for some buggy chips. */ 240 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000) 241 break; 242 } 243 if (!loop_count) 244 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n", 245 qpti->qpti_id, param[0]); 246 247 /* Wait for MBOX busy condition to go away. */ 248 loop_count = DEFAULT_LOOP_COUNT; 249 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04)) 250 udelay(20); 251 if (!loop_count) 252 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n", 253 qpti->qpti_id, param[0]); 254 255 /* Read back output parameters. */ 256 switch (mbox_param[param[0]] & 0xf) { 257 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); 258 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); 259 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); 260 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); 261 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); 262 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); 263 } 264 265 /* Clear RISC interrupt. */ 266 tmp = sbus_readw(qpti->qregs + HCCTRL); 267 tmp |= HCCTRL_CRIRQ; 268 sbus_writew(tmp, qpti->qregs + HCCTRL); 269 270 /* Release SBUS semaphore. */ 271 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); 272 tmp &= ~(SBUS_SEMAPHORE_LCK); 273 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); 274 275 /* We're done. */ 276 return 0; 277 } 278 279 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti) 280 { 281 int i; 282 283 qpti->host_param.initiator_scsi_id = qpti->scsi_id; 284 qpti->host_param.bus_reset_delay = 3; 285 qpti->host_param.retry_count = 0; 286 qpti->host_param.retry_delay = 5; 287 qpti->host_param.async_data_setup_time = 3; 288 qpti->host_param.req_ack_active_negation = 1; 289 qpti->host_param.data_line_active_negation = 1; 290 qpti->host_param.data_dma_burst_enable = 1; 291 qpti->host_param.command_dma_burst_enable = 1; 292 qpti->host_param.tag_aging = 8; 293 qpti->host_param.selection_timeout = 250; 294 qpti->host_param.max_queue_depth = 256; 295 296 for(i = 0; i < MAX_TARGETS; i++) { 297 /* 298 * disconnect, parity, arq, reneg on reset, and, oddly enough 299 * tags...the midlayer's notion of tagged support has to match 300 * our device settings, and since we base whether we enable a 301 * tag on a per-cmnd basis upon what the midlayer sez, we 302 * actually enable the capability here. 303 */ 304 qpti->dev_param[i].device_flags = 0xcd; 305 qpti->dev_param[i].execution_throttle = 16; 306 if (qpti->ultra) { 307 qpti->dev_param[i].synchronous_period = 12; 308 qpti->dev_param[i].synchronous_offset = 8; 309 } else { 310 qpti->dev_param[i].synchronous_period = 25; 311 qpti->dev_param[i].synchronous_offset = 12; 312 } 313 qpti->dev_param[i].device_enable = 1; 314 } 315 } 316 317 static int qlogicpti_reset_hardware(struct Scsi_Host *host) 318 { 319 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 320 u_short param[6]; 321 unsigned short risc_code_addr; 322 int loop_count, i; 323 unsigned long flags; 324 325 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */ 326 327 spin_lock_irqsave(host->host_lock, flags); 328 329 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 330 331 /* Only reset the scsi bus if it is not free. */ 332 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) { 333 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE); 334 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD); 335 udelay(400); 336 } 337 338 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); 339 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); 340 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); 341 342 loop_count = DEFAULT_LOOP_COUNT; 343 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04)) 344 udelay(20); 345 if (!loop_count) 346 printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n", 347 qpti->qpti_id); 348 349 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 350 set_sbus_cfg1(qpti); 351 qlogicpti_enable_irqs(qpti); 352 353 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { 354 qpti->ultra = 1; 355 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), 356 qpti->qregs + RISC_MTREG); 357 } else { 358 qpti->ultra = 0; 359 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), 360 qpti->qregs + RISC_MTREG); 361 } 362 363 /* reset adapter and per-device default values. */ 364 /* do it after finding out whether we're ultra mode capable */ 365 qlogicpti_set_hostdev_defaults(qpti); 366 367 /* Release the RISC processor. */ 368 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 369 370 /* Get RISC to start executing the firmware code. */ 371 param[0] = MBOX_EXEC_FIRMWARE; 372 param[1] = risc_code_addr; 373 if (qlogicpti_mbox_command(qpti, param, 1)) { 374 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n", 375 qpti->qpti_id); 376 spin_unlock_irqrestore(host->host_lock, flags); 377 return 1; 378 } 379 380 /* Set initiator scsi ID. */ 381 param[0] = MBOX_SET_INIT_SCSI_ID; 382 param[1] = qpti->host_param.initiator_scsi_id; 383 if (qlogicpti_mbox_command(qpti, param, 1) || 384 (param[0] != MBOX_COMMAND_COMPLETE)) { 385 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n", 386 qpti->qpti_id); 387 spin_unlock_irqrestore(host->host_lock, flags); 388 return 1; 389 } 390 391 /* Initialize state of the queues, both hw and sw. */ 392 qpti->req_in_ptr = qpti->res_out_ptr = 0; 393 394 param[0] = MBOX_INIT_RES_QUEUE; 395 param[1] = RES_QUEUE_LEN + 1; 396 param[2] = (u_short) (qpti->res_dvma >> 16); 397 param[3] = (u_short) (qpti->res_dvma & 0xffff); 398 param[4] = param[5] = 0; 399 if (qlogicpti_mbox_command(qpti, param, 1)) { 400 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n", 401 qpti->qpti_id); 402 spin_unlock_irqrestore(host->host_lock, flags); 403 return 1; 404 } 405 406 param[0] = MBOX_INIT_REQ_QUEUE; 407 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1; 408 param[2] = (u_short) (qpti->req_dvma >> 16); 409 param[3] = (u_short) (qpti->req_dvma & 0xffff); 410 param[4] = param[5] = 0; 411 if (qlogicpti_mbox_command(qpti, param, 1)) { 412 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n", 413 qpti->qpti_id); 414 spin_unlock_irqrestore(host->host_lock, flags); 415 return 1; 416 } 417 418 param[0] = MBOX_SET_RETRY_COUNT; 419 param[1] = qpti->host_param.retry_count; 420 param[2] = qpti->host_param.retry_delay; 421 qlogicpti_mbox_command(qpti, param, 0); 422 423 param[0] = MBOX_SET_TAG_AGE_LIMIT; 424 param[1] = qpti->host_param.tag_aging; 425 qlogicpti_mbox_command(qpti, param, 0); 426 427 for (i = 0; i < MAX_TARGETS; i++) { 428 param[0] = MBOX_GET_DEV_QUEUE_PARAMS; 429 param[1] = (i << 8); 430 qlogicpti_mbox_command(qpti, param, 0); 431 } 432 433 param[0] = MBOX_GET_FIRMWARE_STATUS; 434 qlogicpti_mbox_command(qpti, param, 0); 435 436 param[0] = MBOX_SET_SELECT_TIMEOUT; 437 param[1] = qpti->host_param.selection_timeout; 438 qlogicpti_mbox_command(qpti, param, 0); 439 440 for (i = 0; i < MAX_TARGETS; i++) { 441 param[0] = MBOX_SET_TARGET_PARAMS; 442 param[1] = (i << 8); 443 param[2] = (qpti->dev_param[i].device_flags << 8); 444 /* 445 * Since we're now loading 1.31 f/w, force narrow/async. 446 */ 447 param[2] |= 0xc0; 448 param[3] = 0; /* no offset, we do not have sync mode yet */ 449 qlogicpti_mbox_command(qpti, param, 0); 450 } 451 452 /* 453 * Always (sigh) do an initial bus reset (kicks f/w). 454 */ 455 param[0] = MBOX_BUS_RESET; 456 param[1] = qpti->host_param.bus_reset_delay; 457 qlogicpti_mbox_command(qpti, param, 0); 458 qpti->send_marker = 1; 459 460 spin_unlock_irqrestore(host->host_lock, flags); 461 return 0; 462 } 463 464 #define PTI_RESET_LIMIT 400 465 466 static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti) 467 { 468 struct Scsi_Host *host = qpti->qhost; 469 unsigned short csum = 0; 470 unsigned short param[6]; 471 unsigned short *risc_code, risc_code_addr, risc_code_length; 472 unsigned long flags; 473 int i, timeout; 474 475 risc_code = &sbus_risc_code01[0]; 476 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */ 477 risc_code_length = sbus_risc_code_length01; 478 479 spin_lock_irqsave(host->host_lock, flags); 480 481 /* Verify the checksum twice, one before loading it, and once 482 * afterwards via the mailbox commands. 483 */ 484 for (i = 0; i < risc_code_length; i++) 485 csum += risc_code[i]; 486 if (csum) { 487 spin_unlock_irqrestore(host->host_lock, flags); 488 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!", 489 qpti->qpti_id); 490 return 1; 491 } 492 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); 493 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); 494 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); 495 timeout = PTI_RESET_LIMIT; 496 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET)) 497 udelay(20); 498 if (!timeout) { 499 spin_unlock_irqrestore(host->host_lock, flags); 500 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id); 501 return 1; 502 } 503 504 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); 505 mdelay(1); 506 507 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL); 508 set_sbus_cfg1(qpti); 509 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 510 511 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { 512 qpti->ultra = 1; 513 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), 514 qpti->qregs + RISC_MTREG); 515 } else { 516 qpti->ultra = 0; 517 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), 518 qpti->qregs + RISC_MTREG); 519 } 520 521 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 522 523 /* Pin lines are only stable while RISC is paused. */ 524 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 525 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE) 526 qpti->differential = 1; 527 else 528 qpti->differential = 0; 529 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 530 531 /* This shouldn't be necessary- we've reset things so we should be 532 running from the ROM now.. */ 533 534 param[0] = MBOX_STOP_FIRMWARE; 535 param[1] = param[2] = param[3] = param[4] = param[5] = 0; 536 if (qlogicpti_mbox_command(qpti, param, 1)) { 537 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n", 538 qpti->qpti_id); 539 spin_unlock_irqrestore(host->host_lock, flags); 540 return 1; 541 } 542 543 /* Load it up.. */ 544 for (i = 0; i < risc_code_length; i++) { 545 param[0] = MBOX_WRITE_RAM_WORD; 546 param[1] = risc_code_addr + i; 547 param[2] = risc_code[i]; 548 if (qlogicpti_mbox_command(qpti, param, 1) || 549 param[0] != MBOX_COMMAND_COMPLETE) { 550 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n", 551 qpti->qpti_id); 552 spin_unlock_irqrestore(host->host_lock, flags); 553 return 1; 554 } 555 } 556 557 /* Reset the ISP again. */ 558 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); 559 mdelay(1); 560 561 qlogicpti_enable_irqs(qpti); 562 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 563 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 564 565 /* Ask ISP to verify the checksum of the new code. */ 566 param[0] = MBOX_VERIFY_CHECKSUM; 567 param[1] = risc_code_addr; 568 if (qlogicpti_mbox_command(qpti, param, 1) || 569 (param[0] != MBOX_COMMAND_COMPLETE)) { 570 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n", 571 qpti->qpti_id); 572 spin_unlock_irqrestore(host->host_lock, flags); 573 return 1; 574 } 575 576 /* Start using newly downloaded firmware. */ 577 param[0] = MBOX_EXEC_FIRMWARE; 578 param[1] = risc_code_addr; 579 qlogicpti_mbox_command(qpti, param, 1); 580 581 param[0] = MBOX_ABOUT_FIRMWARE; 582 if (qlogicpti_mbox_command(qpti, param, 1) || 583 (param[0] != MBOX_COMMAND_COMPLETE)) { 584 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n", 585 qpti->qpti_id); 586 spin_unlock_irqrestore(host->host_lock, flags); 587 return 1; 588 } 589 590 /* Snag the major and minor revisions from the result. */ 591 qpti->fware_majrev = param[1]; 592 qpti->fware_minrev = param[2]; 593 qpti->fware_micrev = param[3]; 594 595 /* Set the clock rate */ 596 param[0] = MBOX_SET_CLOCK_RATE; 597 param[1] = qpti->clock; 598 if (qlogicpti_mbox_command(qpti, param, 1) || 599 (param[0] != MBOX_COMMAND_COMPLETE)) { 600 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n", 601 qpti->qpti_id); 602 spin_unlock_irqrestore(host->host_lock, flags); 603 return 1; 604 } 605 606 if (qpti->is_pti != 0) { 607 /* Load scsi initiator ID and interrupt level into sbus static ram. */ 608 param[0] = MBOX_WRITE_RAM_WORD; 609 param[1] = 0xff80; 610 param[2] = (unsigned short) qpti->scsi_id; 611 qlogicpti_mbox_command(qpti, param, 1); 612 613 param[0] = MBOX_WRITE_RAM_WORD; 614 param[1] = 0xff00; 615 param[2] = (unsigned short) 3; 616 qlogicpti_mbox_command(qpti, param, 1); 617 } 618 619 spin_unlock_irqrestore(host->host_lock, flags); 620 return 0; 621 } 622 623 static int qlogicpti_verify_tmon(struct qlogicpti *qpti) 624 { 625 int curstat = sbus_readb(qpti->sreg); 626 627 curstat &= 0xf0; 628 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE)) 629 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id); 630 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER)) 631 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id); 632 if (curstat != qpti->swsreg) { 633 int error = 0; 634 if (curstat & SREG_FUSE) { 635 error++; 636 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id); 637 } 638 if (curstat & SREG_TPOWER) { 639 error++; 640 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id); 641 } 642 if (qpti->differential && 643 (curstat & SREG_DSENSE) != SREG_DSENSE) { 644 error++; 645 printk("qlogicpti%d: You have a single ended device on a " 646 "differential bus! Please fix!\n", qpti->qpti_id); 647 } 648 qpti->swsreg = curstat; 649 return error; 650 } 651 return 0; 652 } 653 654 static irqreturn_t qpti_intr(int irq, void *dev_id); 655 656 static void __devinit qpti_chain_add(struct qlogicpti *qpti) 657 { 658 spin_lock_irq(&qptichain_lock); 659 if (qptichain != NULL) { 660 struct qlogicpti *qlink = qptichain; 661 662 while(qlink->next) 663 qlink = qlink->next; 664 qlink->next = qpti; 665 } else { 666 qptichain = qpti; 667 } 668 qpti->next = NULL; 669 spin_unlock_irq(&qptichain_lock); 670 } 671 672 static void __devexit qpti_chain_del(struct qlogicpti *qpti) 673 { 674 spin_lock_irq(&qptichain_lock); 675 if (qptichain == qpti) { 676 qptichain = qpti->next; 677 } else { 678 struct qlogicpti *qlink = qptichain; 679 while(qlink->next != qpti) 680 qlink = qlink->next; 681 qlink->next = qpti->next; 682 } 683 qpti->next = NULL; 684 spin_unlock_irq(&qptichain_lock); 685 } 686 687 static int __devinit qpti_map_regs(struct qlogicpti *qpti) 688 { 689 struct of_device *op = qpti->op; 690 691 qpti->qregs = of_ioremap(&op->resource[0], 0, 692 resource_size(&op->resource[0]), 693 "PTI Qlogic/ISP"); 694 if (!qpti->qregs) { 695 printk("PTI: Qlogic/ISP registers are unmappable\n"); 696 return -1; 697 } 698 if (qpti->is_pti) { 699 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096), 700 sizeof(unsigned char), 701 "PTI Qlogic/ISP statreg"); 702 if (!qpti->sreg) { 703 printk("PTI: Qlogic/ISP status register is unmappable\n"); 704 return -1; 705 } 706 } 707 return 0; 708 } 709 710 static int __devinit qpti_register_irq(struct qlogicpti *qpti) 711 { 712 struct of_device *op = qpti->op; 713 714 qpti->qhost->irq = qpti->irq = op->irqs[0]; 715 716 /* We used to try various overly-clever things to 717 * reduce the interrupt processing overhead on 718 * sun4c/sun4m when multiple PTI's shared the 719 * same IRQ. It was too complex and messy to 720 * sanely maintain. 721 */ 722 if (request_irq(qpti->irq, qpti_intr, 723 IRQF_SHARED, "Qlogic/PTI", qpti)) 724 goto fail; 725 726 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq); 727 728 return 0; 729 730 fail: 731 printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id); 732 return -1; 733 } 734 735 static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) 736 { 737 struct of_device *op = qpti->op; 738 struct device_node *dp; 739 740 dp = op->node; 741 742 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); 743 if (qpti->scsi_id == -1) 744 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 745 -1); 746 if (qpti->scsi_id == -1) 747 qpti->scsi_id = 748 of_getintprop_default(dp->parent, 749 "scsi-initiator-id", 7); 750 qpti->qhost->this_id = qpti->scsi_id; 751 qpti->qhost->max_sectors = 64; 752 753 printk("SCSI ID %d ", qpti->scsi_id); 754 } 755 756 static void qpti_get_bursts(struct qlogicpti *qpti) 757 { 758 struct of_device *op = qpti->op; 759 u8 bursts, bmask; 760 761 bursts = of_getintprop_default(op->node, "burst-sizes", 0xff); 762 bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff); 763 if (bmask != 0xff) 764 bursts &= bmask; 765 if (bursts == 0xff || 766 (bursts & DMA_BURST16) == 0 || 767 (bursts & DMA_BURST32) == 0) 768 bursts = (DMA_BURST32 - 1); 769 770 qpti->bursts = bursts; 771 } 772 773 static void qpti_get_clock(struct qlogicpti *qpti) 774 { 775 unsigned int cfreq; 776 777 /* Check for what the clock input to this card is. 778 * Default to 40Mhz. 779 */ 780 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000); 781 qpti->clock = (cfreq + 500000)/1000000; 782 if (qpti->clock == 0) /* bullshit */ 783 qpti->clock = 40; 784 } 785 786 /* The request and response queues must each be aligned 787 * on a page boundary. 788 */ 789 static int __devinit qpti_map_queues(struct qlogicpti *qpti) 790 { 791 struct of_device *op = qpti->op; 792 793 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 794 qpti->res_cpu = dma_alloc_coherent(&op->dev, 795 QSIZE(RES_QUEUE_LEN), 796 &qpti->res_dvma, GFP_ATOMIC); 797 if (qpti->res_cpu == NULL || 798 qpti->res_dvma == 0) { 799 printk("QPTI: Cannot map response queue.\n"); 800 return -1; 801 } 802 803 qpti->req_cpu = dma_alloc_coherent(&op->dev, 804 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 805 &qpti->req_dvma, GFP_ATOMIC); 806 if (qpti->req_cpu == NULL || 807 qpti->req_dvma == 0) { 808 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), 809 qpti->res_cpu, qpti->res_dvma); 810 printk("QPTI: Cannot map request queue.\n"); 811 return -1; 812 } 813 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN)); 814 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN)); 815 return 0; 816 } 817 818 const char *qlogicpti_info(struct Scsi_Host *host) 819 { 820 static char buf[80]; 821 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 822 823 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p", 824 qpti->qhost->irq, qpti->qregs); 825 return buf; 826 } 827 828 /* I am a certified frobtronicist. */ 829 static inline void marker_frob(struct Command_Entry *cmd) 830 { 831 struct Marker_Entry *marker = (struct Marker_Entry *) cmd; 832 833 memset(marker, 0, sizeof(struct Marker_Entry)); 834 marker->hdr.entry_cnt = 1; 835 marker->hdr.entry_type = ENTRY_MARKER; 836 marker->modifier = SYNC_ALL; 837 marker->rsvd = 0; 838 } 839 840 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, 841 struct qlogicpti *qpti) 842 { 843 memset(cmd, 0, sizeof(struct Command_Entry)); 844 cmd->hdr.entry_cnt = 1; 845 cmd->hdr.entry_type = ENTRY_COMMAND; 846 cmd->target_id = Cmnd->device->id; 847 cmd->target_lun = Cmnd->device->lun; 848 cmd->cdb_length = Cmnd->cmd_len; 849 cmd->control_flags = 0; 850 if (Cmnd->device->tagged_supported) { 851 if (qpti->cmd_count[Cmnd->device->id] == 0) 852 qpti->tag_ages[Cmnd->device->id] = jiffies; 853 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) { 854 cmd->control_flags = CFLAG_ORDERED_TAG; 855 qpti->tag_ages[Cmnd->device->id] = jiffies; 856 } else 857 cmd->control_flags = CFLAG_SIMPLE_TAG; 858 } 859 if ((Cmnd->cmnd[0] == WRITE_6) || 860 (Cmnd->cmnd[0] == WRITE_10) || 861 (Cmnd->cmnd[0] == WRITE_12)) 862 cmd->control_flags |= CFLAG_WRITE; 863 else 864 cmd->control_flags |= CFLAG_READ; 865 cmd->time_out = 30; 866 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); 867 } 868 869 /* Do it to it baby. */ 870 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, 871 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) 872 { 873 struct dataseg *ds; 874 struct scatterlist *sg, *s; 875 int i, n; 876 877 if (scsi_bufflen(Cmnd)) { 878 int sg_count; 879 880 sg = scsi_sglist(Cmnd); 881 sg_count = dma_map_sg(&qpti->op->dev, sg, 882 scsi_sg_count(Cmnd), 883 Cmnd->sc_data_direction); 884 885 ds = cmd->dataseg; 886 cmd->segment_cnt = sg_count; 887 888 /* Fill in first four sg entries: */ 889 n = sg_count; 890 if (n > 4) 891 n = 4; 892 for_each_sg(sg, s, n, i) { 893 ds[i].d_base = sg_dma_address(s); 894 ds[i].d_count = sg_dma_len(s); 895 } 896 sg_count -= 4; 897 sg = s; 898 while (sg_count > 0) { 899 struct Continuation_Entry *cont; 900 901 ++cmd->hdr.entry_cnt; 902 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr]; 903 in_ptr = NEXT_REQ_PTR(in_ptr); 904 if (in_ptr == out_ptr) 905 return -1; 906 907 cont->hdr.entry_type = ENTRY_CONTINUATION; 908 cont->hdr.entry_cnt = 0; 909 cont->hdr.sys_def_1 = 0; 910 cont->hdr.flags = 0; 911 cont->reserved = 0; 912 ds = cont->dataseg; 913 n = sg_count; 914 if (n > 7) 915 n = 7; 916 for_each_sg(sg, s, n, i) { 917 ds[i].d_base = sg_dma_address(s); 918 ds[i].d_count = sg_dma_len(s); 919 } 920 sg_count -= n; 921 sg = s; 922 } 923 } else { 924 cmd->dataseg[0].d_base = 0; 925 cmd->dataseg[0].d_count = 0; 926 cmd->segment_cnt = 1; /* Shouldn't this be 0? */ 927 } 928 929 /* Committed, record Scsi_Cmd so we can find it later. */ 930 cmd->handle = in_ptr; 931 qpti->cmd_slots[in_ptr] = Cmnd; 932 933 qpti->cmd_count[Cmnd->device->id]++; 934 sbus_writew(in_ptr, qpti->qregs + MBOX4); 935 qpti->req_in_ptr = in_ptr; 936 937 return in_ptr; 938 } 939 940 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr) 941 { 942 /* Temporary workaround until bug is found and fixed (one bug has been found 943 already, but fixing it makes things even worse) -jj */ 944 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; 945 host->can_queue = host->host_busy + num_free; 946 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); 947 } 948 949 static int qlogicpti_slave_configure(struct scsi_device *sdev) 950 { 951 struct qlogicpti *qpti = shost_priv(sdev->host); 952 int tgt = sdev->id; 953 u_short param[6]; 954 955 /* tags handled in midlayer */ 956 /* enable sync mode? */ 957 if (sdev->sdtr) { 958 qpti->dev_param[tgt].device_flags |= 0x10; 959 } else { 960 qpti->dev_param[tgt].synchronous_offset = 0; 961 qpti->dev_param[tgt].synchronous_period = 0; 962 } 963 /* are we wide capable? */ 964 if (sdev->wdtr) 965 qpti->dev_param[tgt].device_flags |= 0x20; 966 967 param[0] = MBOX_SET_TARGET_PARAMS; 968 param[1] = (tgt << 8); 969 param[2] = (qpti->dev_param[tgt].device_flags << 8); 970 if (qpti->dev_param[tgt].device_flags & 0x10) { 971 param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) | 972 qpti->dev_param[tgt].synchronous_period; 973 } else { 974 param[3] = 0; 975 } 976 qlogicpti_mbox_command(qpti, param, 0); 977 return 0; 978 } 979 980 /* 981 * The middle SCSI layer ensures that queuecommand never gets invoked 982 * concurrently with itself or the interrupt handler (though the 983 * interrupt handler may call this routine as part of 984 * request-completion handling). 985 * 986 * "This code must fly." -davem 987 */ 988 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) 989 { 990 struct Scsi_Host *host = Cmnd->device->host; 991 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 992 struct Command_Entry *cmd; 993 u_int out_ptr; 994 int in_ptr; 995 996 Cmnd->scsi_done = done; 997 998 in_ptr = qpti->req_in_ptr; 999 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; 1000 out_ptr = sbus_readw(qpti->qregs + MBOX4); 1001 in_ptr = NEXT_REQ_PTR(in_ptr); 1002 if (in_ptr == out_ptr) 1003 goto toss_command; 1004 1005 if (qpti->send_marker) { 1006 marker_frob(cmd); 1007 qpti->send_marker = 0; 1008 if (NEXT_REQ_PTR(in_ptr) == out_ptr) { 1009 sbus_writew(in_ptr, qpti->qregs + MBOX4); 1010 qpti->req_in_ptr = in_ptr; 1011 goto toss_command; 1012 } 1013 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; 1014 in_ptr = NEXT_REQ_PTR(in_ptr); 1015 } 1016 cmd_frob(cmd, Cmnd, qpti); 1017 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1) 1018 goto toss_command; 1019 1020 update_can_queue(host, in_ptr, out_ptr); 1021 1022 return 0; 1023 1024 toss_command: 1025 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n", 1026 qpti->qpti_id); 1027 1028 /* Unfortunately, unless you use the new EH code, which 1029 * we don't, the midlayer will ignore the return value, 1030 * which is insane. We pick up the pieces like this. 1031 */ 1032 Cmnd->result = DID_BUS_BUSY; 1033 done(Cmnd); 1034 return 1; 1035 } 1036 1037 static int qlogicpti_return_status(struct Status_Entry *sts, int id) 1038 { 1039 int host_status = DID_ERROR; 1040 1041 switch (sts->completion_status) { 1042 case CS_COMPLETE: 1043 host_status = DID_OK; 1044 break; 1045 case CS_INCOMPLETE: 1046 if (!(sts->state_flags & SF_GOT_BUS)) 1047 host_status = DID_NO_CONNECT; 1048 else if (!(sts->state_flags & SF_GOT_TARGET)) 1049 host_status = DID_BAD_TARGET; 1050 else if (!(sts->state_flags & SF_SENT_CDB)) 1051 host_status = DID_ERROR; 1052 else if (!(sts->state_flags & SF_TRANSFERRED_DATA)) 1053 host_status = DID_ERROR; 1054 else if (!(sts->state_flags & SF_GOT_STATUS)) 1055 host_status = DID_ERROR; 1056 else if (!(sts->state_flags & SF_GOT_SENSE)) 1057 host_status = DID_ERROR; 1058 break; 1059 case CS_DMA_ERROR: 1060 case CS_TRANSPORT_ERROR: 1061 host_status = DID_ERROR; 1062 break; 1063 case CS_RESET_OCCURRED: 1064 case CS_BUS_RESET: 1065 host_status = DID_RESET; 1066 break; 1067 case CS_ABORTED: 1068 host_status = DID_ABORT; 1069 break; 1070 case CS_TIMEOUT: 1071 host_status = DID_TIME_OUT; 1072 break; 1073 case CS_DATA_OVERRUN: 1074 case CS_COMMAND_OVERRUN: 1075 case CS_STATUS_OVERRUN: 1076 case CS_BAD_MESSAGE: 1077 case CS_NO_MESSAGE_OUT: 1078 case CS_EXT_ID_FAILED: 1079 case CS_IDE_MSG_FAILED: 1080 case CS_ABORT_MSG_FAILED: 1081 case CS_NOP_MSG_FAILED: 1082 case CS_PARITY_ERROR_MSG_FAILED: 1083 case CS_DEVICE_RESET_MSG_FAILED: 1084 case CS_ID_MSG_FAILED: 1085 case CS_UNEXP_BUS_FREE: 1086 host_status = DID_ERROR; 1087 break; 1088 case CS_DATA_UNDERRUN: 1089 host_status = DID_OK; 1090 break; 1091 default: 1092 printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n", 1093 id, sts->completion_status); 1094 host_status = DID_ERROR; 1095 break; 1096 } 1097 1098 return (sts->scsi_status & STATUS_MASK) | (host_status << 16); 1099 } 1100 1101 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) 1102 { 1103 struct scsi_cmnd *Cmnd, *done_queue = NULL; 1104 struct Status_Entry *sts; 1105 u_int in_ptr, out_ptr; 1106 1107 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT)) 1108 return NULL; 1109 1110 in_ptr = sbus_readw(qpti->qregs + MBOX5); 1111 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL); 1112 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) { 1113 switch (sbus_readw(qpti->qregs + MBOX0)) { 1114 case ASYNC_SCSI_BUS_RESET: 1115 case EXECUTION_TIMEOUT_RESET: 1116 qpti->send_marker = 1; 1117 break; 1118 case INVALID_COMMAND: 1119 case HOST_INTERFACE_ERROR: 1120 case COMMAND_ERROR: 1121 case COMMAND_PARAM_ERROR: 1122 break; 1123 }; 1124 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 1125 } 1126 1127 /* This looks like a network driver! */ 1128 out_ptr = qpti->res_out_ptr; 1129 while (out_ptr != in_ptr) { 1130 u_int cmd_slot; 1131 1132 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr]; 1133 out_ptr = NEXT_RES_PTR(out_ptr); 1134 1135 /* We store an index in the handle, not the pointer in 1136 * some form. This avoids problems due to the fact 1137 * that the handle provided is only 32-bits. -DaveM 1138 */ 1139 cmd_slot = sts->handle; 1140 Cmnd = qpti->cmd_slots[cmd_slot]; 1141 qpti->cmd_slots[cmd_slot] = NULL; 1142 1143 if (sts->completion_status == CS_RESET_OCCURRED || 1144 sts->completion_status == CS_ABORTED || 1145 (sts->status_flags & STF_BUS_RESET)) 1146 qpti->send_marker = 1; 1147 1148 if (sts->state_flags & SF_GOT_SENSE) 1149 memcpy(Cmnd->sense_buffer, sts->req_sense_data, 1150 SCSI_SENSE_BUFFERSIZE); 1151 1152 if (sts->hdr.entry_type == ENTRY_STATUS) 1153 Cmnd->result = 1154 qlogicpti_return_status(sts, qpti->qpti_id); 1155 else 1156 Cmnd->result = DID_ERROR << 16; 1157 1158 if (scsi_bufflen(Cmnd)) 1159 dma_unmap_sg(&qpti->op->dev, 1160 scsi_sglist(Cmnd), scsi_sg_count(Cmnd), 1161 Cmnd->sc_data_direction); 1162 1163 qpti->cmd_count[Cmnd->device->id]--; 1164 sbus_writew(out_ptr, qpti->qregs + MBOX5); 1165 Cmnd->host_scribble = (unsigned char *) done_queue; 1166 done_queue = Cmnd; 1167 } 1168 qpti->res_out_ptr = out_ptr; 1169 1170 return done_queue; 1171 } 1172 1173 static irqreturn_t qpti_intr(int irq, void *dev_id) 1174 { 1175 struct qlogicpti *qpti = dev_id; 1176 unsigned long flags; 1177 struct scsi_cmnd *dq; 1178 1179 spin_lock_irqsave(qpti->qhost->host_lock, flags); 1180 dq = qlogicpti_intr_handler(qpti); 1181 1182 if (dq != NULL) { 1183 do { 1184 struct scsi_cmnd *next; 1185 1186 next = (struct scsi_cmnd *) dq->host_scribble; 1187 dq->scsi_done(dq); 1188 dq = next; 1189 } while (dq != NULL); 1190 } 1191 spin_unlock_irqrestore(qpti->qhost->host_lock, flags); 1192 1193 return IRQ_HANDLED; 1194 } 1195 1196 static int qlogicpti_abort(struct scsi_cmnd *Cmnd) 1197 { 1198 u_short param[6]; 1199 struct Scsi_Host *host = Cmnd->device->host; 1200 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1201 int return_status = SUCCESS; 1202 u32 cmd_cookie; 1203 int i; 1204 1205 printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n", 1206 qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun); 1207 1208 qlogicpti_disable_irqs(qpti); 1209 1210 /* Find the 32-bit cookie we gave to the firmware for 1211 * this command. 1212 */ 1213 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++) 1214 if (qpti->cmd_slots[i] == Cmnd) 1215 break; 1216 cmd_cookie = i; 1217 1218 param[0] = MBOX_ABORT; 1219 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun; 1220 param[2] = cmd_cookie >> 16; 1221 param[3] = cmd_cookie & 0xffff; 1222 if (qlogicpti_mbox_command(qpti, param, 0) || 1223 (param[0] != MBOX_COMMAND_COMPLETE)) { 1224 printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n", 1225 qpti->qpti_id, param[0]); 1226 return_status = FAILED; 1227 } 1228 1229 qlogicpti_enable_irqs(qpti); 1230 1231 return return_status; 1232 } 1233 1234 static int qlogicpti_reset(struct scsi_cmnd *Cmnd) 1235 { 1236 u_short param[6]; 1237 struct Scsi_Host *host = Cmnd->device->host; 1238 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1239 int return_status = SUCCESS; 1240 1241 printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n", 1242 qpti->qpti_id); 1243 1244 qlogicpti_disable_irqs(qpti); 1245 1246 param[0] = MBOX_BUS_RESET; 1247 param[1] = qpti->host_param.bus_reset_delay; 1248 if (qlogicpti_mbox_command(qpti, param, 0) || 1249 (param[0] != MBOX_COMMAND_COMPLETE)) { 1250 printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n", 1251 qpti->qpti_id, param[0]); 1252 return_status = FAILED; 1253 } 1254 1255 qlogicpti_enable_irqs(qpti); 1256 1257 return return_status; 1258 } 1259 1260 static struct scsi_host_template qpti_template = { 1261 .module = THIS_MODULE, 1262 .name = "qlogicpti", 1263 .info = qlogicpti_info, 1264 .queuecommand = qlogicpti_queuecommand, 1265 .slave_configure = qlogicpti_slave_configure, 1266 .eh_abort_handler = qlogicpti_abort, 1267 .eh_bus_reset_handler = qlogicpti_reset, 1268 .can_queue = QLOGICPTI_REQ_QUEUE_LEN, 1269 .this_id = 7, 1270 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), 1271 .cmd_per_lun = 1, 1272 .use_clustering = ENABLE_CLUSTERING, 1273 }; 1274 1275 static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match) 1276 { 1277 struct scsi_host_template *tpnt = match->data; 1278 struct device_node *dp = op->node; 1279 struct Scsi_Host *host; 1280 struct qlogicpti *qpti; 1281 static int nqptis; 1282 const char *fcode; 1283 1284 /* Sometimes Antares cards come up not completely 1285 * setup, and we get a report of a zero IRQ. 1286 */ 1287 if (op->irqs[0] == 0) 1288 return -ENODEV; 1289 1290 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); 1291 if (!host) 1292 return -ENOMEM; 1293 1294 qpti = shost_priv(host); 1295 1296 host->max_id = MAX_TARGETS; 1297 qpti->qhost = host; 1298 qpti->op = op; 1299 qpti->qpti_id = nqptis; 1300 strcpy(qpti->prom_name, op->node->name); 1301 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); 1302 1303 if (qpti_map_regs(qpti) < 0) 1304 goto fail_unlink; 1305 1306 if (qpti_register_irq(qpti) < 0) 1307 goto fail_unmap_regs; 1308 1309 qpti_get_scsi_id(qpti); 1310 qpti_get_bursts(qpti); 1311 qpti_get_clock(qpti); 1312 1313 /* Clear out scsi_cmnd array. */ 1314 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots)); 1315 1316 if (qpti_map_queues(qpti) < 0) 1317 goto fail_free_irq; 1318 1319 /* Load the firmware. */ 1320 if (qlogicpti_load_firmware(qpti)) 1321 goto fail_unmap_queues; 1322 if (qpti->is_pti) { 1323 /* Check the PTI status reg. */ 1324 if (qlogicpti_verify_tmon(qpti)) 1325 goto fail_unmap_queues; 1326 } 1327 1328 /* Reset the ISP and init res/req queues. */ 1329 if (qlogicpti_reset_hardware(host)) 1330 goto fail_unmap_queues; 1331 1332 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev, 1333 qpti->fware_minrev, qpti->fware_micrev); 1334 1335 fcode = of_get_property(dp, "isp-fcode", NULL); 1336 if (fcode && fcode[0]) 1337 printk("(FCode %s)", fcode); 1338 if (of_find_property(dp, "differential", NULL) != NULL) 1339 qpti->differential = 1; 1340 1341 printk("\nqlogicpti%d: [%s Wide, using %s interface]\n", 1342 qpti->qpti_id, 1343 (qpti->ultra ? "Ultra" : "Fast"), 1344 (qpti->differential ? "differential" : "single ended")); 1345 1346 if (scsi_add_host(host, &op->dev)) { 1347 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id); 1348 goto fail_unmap_queues; 1349 } 1350 1351 dev_set_drvdata(&op->dev, qpti); 1352 1353 qpti_chain_add(qpti); 1354 1355 scsi_scan_host(host); 1356 nqptis++; 1357 1358 return 0; 1359 1360 fail_unmap_queues: 1361 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1362 dma_free_coherent(&op->dev, 1363 QSIZE(RES_QUEUE_LEN), 1364 qpti->res_cpu, qpti->res_dvma); 1365 dma_free_coherent(&op->dev, 1366 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1367 qpti->req_cpu, qpti->req_dvma); 1368 #undef QSIZE 1369 1370 fail_unmap_regs: 1371 of_iounmap(&op->resource[0], qpti->qregs, 1372 resource_size(&op->resource[0])); 1373 if (qpti->is_pti) 1374 of_iounmap(&op->resource[0], qpti->sreg, 1375 sizeof(unsigned char)); 1376 1377 fail_free_irq: 1378 free_irq(qpti->irq, qpti); 1379 1380 fail_unlink: 1381 scsi_host_put(host); 1382 1383 return -ENODEV; 1384 } 1385 1386 static int __devexit qpti_sbus_remove(struct of_device *op) 1387 { 1388 struct qlogicpti *qpti = dev_get_drvdata(&op->dev); 1389 1390 qpti_chain_del(qpti); 1391 1392 scsi_remove_host(qpti->qhost); 1393 1394 /* Shut up the card. */ 1395 sbus_writew(0, qpti->qregs + SBUS_CTRL); 1396 1397 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */ 1398 free_irq(qpti->irq, qpti); 1399 1400 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1401 dma_free_coherent(&op->dev, 1402 QSIZE(RES_QUEUE_LEN), 1403 qpti->res_cpu, qpti->res_dvma); 1404 dma_free_coherent(&op->dev, 1405 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1406 qpti->req_cpu, qpti->req_dvma); 1407 #undef QSIZE 1408 1409 of_iounmap(&op->resource[0], qpti->qregs, 1410 resource_size(&op->resource[0])); 1411 if (qpti->is_pti) 1412 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char)); 1413 1414 scsi_host_put(qpti->qhost); 1415 1416 return 0; 1417 } 1418 1419 static const struct of_device_id qpti_match[] = { 1420 { 1421 .name = "ptisp", 1422 .data = &qpti_template, 1423 }, 1424 { 1425 .name = "PTI,ptisp", 1426 .data = &qpti_template, 1427 }, 1428 { 1429 .name = "QLGC,isp", 1430 .data = &qpti_template, 1431 }, 1432 { 1433 .name = "SUNW,isp", 1434 .data = &qpti_template, 1435 }, 1436 {}, 1437 }; 1438 MODULE_DEVICE_TABLE(of, qpti_match); 1439 1440 static struct of_platform_driver qpti_sbus_driver = { 1441 .name = "qpti", 1442 .match_table = qpti_match, 1443 .probe = qpti_sbus_probe, 1444 .remove = __devexit_p(qpti_sbus_remove), 1445 }; 1446 1447 static int __init qpti_init(void) 1448 { 1449 return of_register_driver(&qpti_sbus_driver, &of_bus_type); 1450 } 1451 1452 static void __exit qpti_exit(void) 1453 { 1454 of_unregister_driver(&qpti_sbus_driver); 1455 } 1456 1457 MODULE_DESCRIPTION("QlogicISP SBUS driver"); 1458 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 1459 MODULE_LICENSE("GPL"); 1460 MODULE_VERSION("2.1"); 1461 1462 module_init(qpti_init); 1463 module_exit(qpti_exit); 1464