1 // SPDX-License-Identifier: GPL-2.0-only 2 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. 3 * 4 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net) 5 * 6 * A lot of this driver was directly stolen from Erik H. Moe's PCI 7 * Qlogic ISP driver. Mucho kudos to him for this code. 8 * 9 * An even bigger kudos to John Grana at Performance Technologies 10 * for providing me with the hardware to write this driver, you rule 11 * John you really do. 12 * 13 * May, 2, 1997: Added support for QLGC,isp --jj 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/delay.h> 18 #include <linux/types.h> 19 #include <linux/string.h> 20 #include <linux/gfp.h> 21 #include <linux/blkdev.h> 22 #include <linux/proc_fs.h> 23 #include <linux/stat.h> 24 #include <linux/init.h> 25 #include <linux/spinlock.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/jiffies.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/of.h> 31 #include <linux/platform_device.h> 32 #include <linux/firmware.h> 33 #include <linux/pgtable.h> 34 35 #include <asm/byteorder.h> 36 37 #include "qlogicpti.h" 38 39 #include <asm/dma.h> 40 #include <asm/ptrace.h> 41 #include <asm/oplib.h> 42 #include <asm/io.h> 43 #include <asm/irq.h> 44 45 #include <scsi/scsi.h> 46 #include <scsi/scsi_cmnd.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_eh.h> 49 #include <scsi/scsi_tcq.h> 50 #include <scsi/scsi_host.h> 51 52 #define MAX_TARGETS 16 53 #define MAX_LUNS 8 /* 32 for 1.31 F/W */ 54 55 #define DEFAULT_LOOP_COUNT 10000 56 57 static struct qlogicpti *qptichain = NULL; 58 static DEFINE_SPINLOCK(qptichain_lock); 59 60 #define PACKB(a, b) (((a)<<4)|(b)) 61 62 static const u_char mbox_param[] = { 63 PACKB(1, 1), /* MBOX_NO_OP */ 64 PACKB(5, 5), /* MBOX_LOAD_RAM */ 65 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */ 66 PACKB(5, 5), /* MBOX_DUMP_RAM */ 67 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */ 68 PACKB(2, 3), /* MBOX_READ_RAM_WORD */ 69 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */ 70 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */ 71 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */ 72 PACKB(0, 0), /* 0x0009 */ 73 PACKB(0, 0), /* 0x000a */ 74 PACKB(0, 0), /* 0x000b */ 75 PACKB(0, 0), /* 0x000c */ 76 PACKB(0, 0), /* 0x000d */ 77 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */ 78 PACKB(0, 0), /* 0x000f */ 79 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */ 80 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */ 81 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */ 82 PACKB(2, 2), /* MBOX_WAKE_UP */ 83 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */ 84 PACKB(4, 4), /* MBOX_ABORT */ 85 PACKB(2, 2), /* MBOX_ABORT_DEVICE */ 86 PACKB(3, 3), /* MBOX_ABORT_TARGET */ 87 PACKB(2, 2), /* MBOX_BUS_RESET */ 88 PACKB(2, 3), /* MBOX_STOP_QUEUE */ 89 PACKB(2, 3), /* MBOX_START_QUEUE */ 90 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */ 91 PACKB(2, 3), /* MBOX_ABORT_QUEUE */ 92 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */ 93 PACKB(0, 0), /* 0x001e */ 94 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */ 95 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */ 96 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */ 97 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */ 98 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */ 99 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */ 100 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */ 101 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */ 102 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */ 103 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */ 104 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */ 105 PACKB(0, 0), /* 0x002a */ 106 PACKB(0, 0), /* 0x002b */ 107 PACKB(0, 0), /* 0x002c */ 108 PACKB(0, 0), /* 0x002d */ 109 PACKB(0, 0), /* 0x002e */ 110 PACKB(0, 0), /* 0x002f */ 111 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */ 112 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */ 113 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */ 114 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */ 115 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */ 116 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */ 117 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */ 118 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */ 119 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */ 120 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */ 121 PACKB(0, 0), /* 0x003a */ 122 PACKB(0, 0), /* 0x003b */ 123 PACKB(0, 0), /* 0x003c */ 124 PACKB(0, 0), /* 0x003d */ 125 PACKB(0, 0), /* 0x003e */ 126 PACKB(0, 0), /* 0x003f */ 127 PACKB(0, 0), /* 0x0040 */ 128 PACKB(0, 0), /* 0x0041 */ 129 PACKB(0, 0) /* 0x0042 */ 130 }; 131 132 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param) 133 134 /* queue length's _must_ be power of two: */ 135 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql)) 136 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \ 137 QLOGICPTI_REQ_QUEUE_LEN) 138 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN) 139 140 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti) 141 { 142 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB, 143 qpti->qregs + SBUS_CTRL); 144 } 145 146 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti) 147 { 148 sbus_writew(0, qpti->qregs + SBUS_CTRL); 149 } 150 151 static inline void set_sbus_cfg1(struct qlogicpti *qpti) 152 { 153 u16 val; 154 u8 bursts = qpti->bursts; 155 156 #if 0 /* It appears that at least PTI cards do not support 157 * 64-byte bursts and that setting the B64 bit actually 158 * is a nop and the chip ends up using the smallest burst 159 * size. -DaveM 160 */ 161 if (sbus_can_burst64() && (bursts & DMA_BURST64)) { 162 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64); 163 } else 164 #endif 165 if (bursts & DMA_BURST32) { 166 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32); 167 } else if (bursts & DMA_BURST16) { 168 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16); 169 } else if (bursts & DMA_BURST8) { 170 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8); 171 } else { 172 val = 0; /* No sbus bursts for you... */ 173 } 174 sbus_writew(val, qpti->qregs + SBUS_CFG1); 175 } 176 177 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force) 178 { 179 int loop_count; 180 u16 tmp; 181 182 if (mbox_param[param[0]] == 0) 183 return 1; 184 185 /* Set SBUS semaphore. */ 186 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); 187 tmp |= SBUS_SEMAPHORE_LCK; 188 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); 189 190 /* Wait for host IRQ bit to clear. */ 191 loop_count = DEFAULT_LOOP_COUNT; 192 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) { 193 barrier(); 194 cpu_relax(); 195 } 196 if (!loop_count) 197 printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n", 198 qpti->qpti_id); 199 200 /* Write mailbox command registers. */ 201 switch (mbox_param[param[0]] >> 4) { 202 case 6: sbus_writew(param[5], qpti->qregs + MBOX5); 203 fallthrough; 204 case 5: sbus_writew(param[4], qpti->qregs + MBOX4); 205 fallthrough; 206 case 4: sbus_writew(param[3], qpti->qregs + MBOX3); 207 fallthrough; 208 case 3: sbus_writew(param[2], qpti->qregs + MBOX2); 209 fallthrough; 210 case 2: sbus_writew(param[1], qpti->qregs + MBOX1); 211 fallthrough; 212 case 1: sbus_writew(param[0], qpti->qregs + MBOX0); 213 } 214 215 /* Clear RISC interrupt. */ 216 tmp = sbus_readw(qpti->qregs + HCCTRL); 217 tmp |= HCCTRL_CRIRQ; 218 sbus_writew(tmp, qpti->qregs + HCCTRL); 219 220 /* Clear SBUS semaphore. */ 221 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 222 223 /* Set HOST interrupt. */ 224 tmp = sbus_readw(qpti->qregs + HCCTRL); 225 tmp |= HCCTRL_SHIRQ; 226 sbus_writew(tmp, qpti->qregs + HCCTRL); 227 228 /* Wait for HOST interrupt clears. */ 229 loop_count = DEFAULT_LOOP_COUNT; 230 while (--loop_count && 231 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ)) 232 udelay(20); 233 if (!loop_count) 234 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n", 235 qpti->qpti_id, param[0]); 236 237 /* Wait for SBUS semaphore to get set. */ 238 loop_count = DEFAULT_LOOP_COUNT; 239 while (--loop_count && 240 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) { 241 udelay(20); 242 243 /* Workaround for some buggy chips. */ 244 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000) 245 break; 246 } 247 if (!loop_count) 248 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n", 249 qpti->qpti_id, param[0]); 250 251 /* Wait for MBOX busy condition to go away. */ 252 loop_count = DEFAULT_LOOP_COUNT; 253 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04)) 254 udelay(20); 255 if (!loop_count) 256 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n", 257 qpti->qpti_id, param[0]); 258 259 /* Read back output parameters. */ 260 switch (mbox_param[param[0]] & 0xf) { 261 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); 262 fallthrough; 263 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); 264 fallthrough; 265 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); 266 fallthrough; 267 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); 268 fallthrough; 269 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); 270 fallthrough; 271 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); 272 } 273 274 /* Clear RISC interrupt. */ 275 tmp = sbus_readw(qpti->qregs + HCCTRL); 276 tmp |= HCCTRL_CRIRQ; 277 sbus_writew(tmp, qpti->qregs + HCCTRL); 278 279 /* Release SBUS semaphore. */ 280 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); 281 tmp &= ~(SBUS_SEMAPHORE_LCK); 282 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); 283 284 /* We're done. */ 285 return 0; 286 } 287 288 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti) 289 { 290 int i; 291 292 qpti->host_param.initiator_scsi_id = qpti->scsi_id; 293 qpti->host_param.bus_reset_delay = 3; 294 qpti->host_param.retry_count = 0; 295 qpti->host_param.retry_delay = 5; 296 qpti->host_param.async_data_setup_time = 3; 297 qpti->host_param.req_ack_active_negation = 1; 298 qpti->host_param.data_line_active_negation = 1; 299 qpti->host_param.data_dma_burst_enable = 1; 300 qpti->host_param.command_dma_burst_enable = 1; 301 qpti->host_param.tag_aging = 8; 302 qpti->host_param.selection_timeout = 250; 303 qpti->host_param.max_queue_depth = 256; 304 305 for(i = 0; i < MAX_TARGETS; i++) { 306 /* 307 * disconnect, parity, arq, reneg on reset, and, oddly enough 308 * tags...the midlayer's notion of tagged support has to match 309 * our device settings, and since we base whether we enable a 310 * tag on a per-cmnd basis upon what the midlayer sez, we 311 * actually enable the capability here. 312 */ 313 qpti->dev_param[i].device_flags = 0xcd; 314 qpti->dev_param[i].execution_throttle = 16; 315 if (qpti->ultra) { 316 qpti->dev_param[i].synchronous_period = 12; 317 qpti->dev_param[i].synchronous_offset = 8; 318 } else { 319 qpti->dev_param[i].synchronous_period = 25; 320 qpti->dev_param[i].synchronous_offset = 12; 321 } 322 qpti->dev_param[i].device_enable = 1; 323 } 324 } 325 326 static int qlogicpti_reset_hardware(struct Scsi_Host *host) 327 { 328 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 329 u_short param[6]; 330 unsigned short risc_code_addr; 331 int loop_count, i; 332 unsigned long flags; 333 334 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */ 335 336 spin_lock_irqsave(host->host_lock, flags); 337 338 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 339 340 /* Only reset the scsi bus if it is not free. */ 341 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) { 342 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE); 343 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD); 344 udelay(400); 345 } 346 347 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); 348 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); 349 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); 350 351 loop_count = DEFAULT_LOOP_COUNT; 352 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04)) 353 udelay(20); 354 if (!loop_count) 355 printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n", 356 qpti->qpti_id); 357 358 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 359 set_sbus_cfg1(qpti); 360 qlogicpti_enable_irqs(qpti); 361 362 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { 363 qpti->ultra = 1; 364 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), 365 qpti->qregs + RISC_MTREG); 366 } else { 367 qpti->ultra = 0; 368 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), 369 qpti->qregs + RISC_MTREG); 370 } 371 372 /* reset adapter and per-device default values. */ 373 /* do it after finding out whether we're ultra mode capable */ 374 qlogicpti_set_hostdev_defaults(qpti); 375 376 /* Release the RISC processor. */ 377 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 378 379 /* Get RISC to start executing the firmware code. */ 380 param[0] = MBOX_EXEC_FIRMWARE; 381 param[1] = risc_code_addr; 382 if (qlogicpti_mbox_command(qpti, param, 1)) { 383 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n", 384 qpti->qpti_id); 385 spin_unlock_irqrestore(host->host_lock, flags); 386 return 1; 387 } 388 389 /* Set initiator scsi ID. */ 390 param[0] = MBOX_SET_INIT_SCSI_ID; 391 param[1] = qpti->host_param.initiator_scsi_id; 392 if (qlogicpti_mbox_command(qpti, param, 1) || 393 (param[0] != MBOX_COMMAND_COMPLETE)) { 394 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n", 395 qpti->qpti_id); 396 spin_unlock_irqrestore(host->host_lock, flags); 397 return 1; 398 } 399 400 /* Initialize state of the queues, both hw and sw. */ 401 qpti->req_in_ptr = qpti->res_out_ptr = 0; 402 403 param[0] = MBOX_INIT_RES_QUEUE; 404 param[1] = RES_QUEUE_LEN + 1; 405 param[2] = (u_short) (qpti->res_dvma >> 16); 406 param[3] = (u_short) (qpti->res_dvma & 0xffff); 407 param[4] = param[5] = 0; 408 if (qlogicpti_mbox_command(qpti, param, 1)) { 409 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n", 410 qpti->qpti_id); 411 spin_unlock_irqrestore(host->host_lock, flags); 412 return 1; 413 } 414 415 param[0] = MBOX_INIT_REQ_QUEUE; 416 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1; 417 param[2] = (u_short) (qpti->req_dvma >> 16); 418 param[3] = (u_short) (qpti->req_dvma & 0xffff); 419 param[4] = param[5] = 0; 420 if (qlogicpti_mbox_command(qpti, param, 1)) { 421 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n", 422 qpti->qpti_id); 423 spin_unlock_irqrestore(host->host_lock, flags); 424 return 1; 425 } 426 427 param[0] = MBOX_SET_RETRY_COUNT; 428 param[1] = qpti->host_param.retry_count; 429 param[2] = qpti->host_param.retry_delay; 430 qlogicpti_mbox_command(qpti, param, 0); 431 432 param[0] = MBOX_SET_TAG_AGE_LIMIT; 433 param[1] = qpti->host_param.tag_aging; 434 qlogicpti_mbox_command(qpti, param, 0); 435 436 for (i = 0; i < MAX_TARGETS; i++) { 437 param[0] = MBOX_GET_DEV_QUEUE_PARAMS; 438 param[1] = (i << 8); 439 qlogicpti_mbox_command(qpti, param, 0); 440 } 441 442 param[0] = MBOX_GET_FIRMWARE_STATUS; 443 qlogicpti_mbox_command(qpti, param, 0); 444 445 param[0] = MBOX_SET_SELECT_TIMEOUT; 446 param[1] = qpti->host_param.selection_timeout; 447 qlogicpti_mbox_command(qpti, param, 0); 448 449 for (i = 0; i < MAX_TARGETS; i++) { 450 param[0] = MBOX_SET_TARGET_PARAMS; 451 param[1] = (i << 8); 452 param[2] = (qpti->dev_param[i].device_flags << 8); 453 /* 454 * Since we're now loading 1.31 f/w, force narrow/async. 455 */ 456 param[2] |= 0xc0; 457 param[3] = 0; /* no offset, we do not have sync mode yet */ 458 qlogicpti_mbox_command(qpti, param, 0); 459 } 460 461 /* 462 * Always (sigh) do an initial bus reset (kicks f/w). 463 */ 464 param[0] = MBOX_BUS_RESET; 465 param[1] = qpti->host_param.bus_reset_delay; 466 qlogicpti_mbox_command(qpti, param, 0); 467 qpti->send_marker = 1; 468 469 spin_unlock_irqrestore(host->host_lock, flags); 470 return 0; 471 } 472 473 #define PTI_RESET_LIMIT 400 474 475 static int qlogicpti_load_firmware(struct qlogicpti *qpti) 476 { 477 const struct firmware *fw; 478 const char fwname[] = "qlogic/isp1000.bin"; 479 const __le16 *fw_data; 480 struct Scsi_Host *host = qpti->qhost; 481 unsigned short csum = 0; 482 unsigned short param[6]; 483 unsigned short risc_code_addr, risc_code_length; 484 int err; 485 unsigned long flags; 486 int i, timeout; 487 488 err = request_firmware(&fw, fwname, &qpti->op->dev); 489 if (err) { 490 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 491 fwname, err); 492 return err; 493 } 494 if (fw->size % 2) { 495 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", 496 fw->size, fwname); 497 err = -EINVAL; 498 goto outfirm; 499 } 500 fw_data = (const __le16 *)&fw->data[0]; 501 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */ 502 risc_code_length = fw->size / 2; 503 504 spin_lock_irqsave(host->host_lock, flags); 505 506 /* Verify the checksum twice, one before loading it, and once 507 * afterwards via the mailbox commands. 508 */ 509 for (i = 0; i < risc_code_length; i++) 510 csum += __le16_to_cpu(fw_data[i]); 511 if (csum) { 512 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!", 513 qpti->qpti_id); 514 err = 1; 515 goto out; 516 } 517 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); 518 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); 519 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); 520 timeout = PTI_RESET_LIMIT; 521 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET)) 522 udelay(20); 523 if (!timeout) { 524 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id); 525 err = 1; 526 goto out; 527 } 528 529 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); 530 mdelay(1); 531 532 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL); 533 set_sbus_cfg1(qpti); 534 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 535 536 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { 537 qpti->ultra = 1; 538 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), 539 qpti->qregs + RISC_MTREG); 540 } else { 541 qpti->ultra = 0; 542 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), 543 qpti->qregs + RISC_MTREG); 544 } 545 546 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 547 548 /* Pin lines are only stable while RISC is paused. */ 549 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 550 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE) 551 qpti->differential = 1; 552 else 553 qpti->differential = 0; 554 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 555 556 /* This shouldn't be necessary- we've reset things so we should be 557 running from the ROM now.. */ 558 559 param[0] = MBOX_STOP_FIRMWARE; 560 param[1] = param[2] = param[3] = param[4] = param[5] = 0; 561 if (qlogicpti_mbox_command(qpti, param, 1)) { 562 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n", 563 qpti->qpti_id); 564 err = 1; 565 goto out; 566 } 567 568 /* Load it up.. */ 569 for (i = 0; i < risc_code_length; i++) { 570 param[0] = MBOX_WRITE_RAM_WORD; 571 param[1] = risc_code_addr + i; 572 param[2] = __le16_to_cpu(fw_data[i]); 573 if (qlogicpti_mbox_command(qpti, param, 1) || 574 param[0] != MBOX_COMMAND_COMPLETE) { 575 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n", 576 qpti->qpti_id); 577 err = 1; 578 goto out; 579 } 580 } 581 582 /* Reset the ISP again. */ 583 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); 584 mdelay(1); 585 586 qlogicpti_enable_irqs(qpti); 587 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 588 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 589 590 /* Ask ISP to verify the checksum of the new code. */ 591 param[0] = MBOX_VERIFY_CHECKSUM; 592 param[1] = risc_code_addr; 593 if (qlogicpti_mbox_command(qpti, param, 1) || 594 (param[0] != MBOX_COMMAND_COMPLETE)) { 595 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n", 596 qpti->qpti_id); 597 err = 1; 598 goto out; 599 } 600 601 /* Start using newly downloaded firmware. */ 602 param[0] = MBOX_EXEC_FIRMWARE; 603 param[1] = risc_code_addr; 604 qlogicpti_mbox_command(qpti, param, 1); 605 606 param[0] = MBOX_ABOUT_FIRMWARE; 607 if (qlogicpti_mbox_command(qpti, param, 1) || 608 (param[0] != MBOX_COMMAND_COMPLETE)) { 609 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n", 610 qpti->qpti_id); 611 err = 1; 612 goto out; 613 } 614 615 /* Snag the major and minor revisions from the result. */ 616 qpti->fware_majrev = param[1]; 617 qpti->fware_minrev = param[2]; 618 qpti->fware_micrev = param[3]; 619 620 /* Set the clock rate */ 621 param[0] = MBOX_SET_CLOCK_RATE; 622 param[1] = qpti->clock; 623 if (qlogicpti_mbox_command(qpti, param, 1) || 624 (param[0] != MBOX_COMMAND_COMPLETE)) { 625 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n", 626 qpti->qpti_id); 627 err = 1; 628 goto out; 629 } 630 631 if (qpti->is_pti != 0) { 632 /* Load scsi initiator ID and interrupt level into sbus static ram. */ 633 param[0] = MBOX_WRITE_RAM_WORD; 634 param[1] = 0xff80; 635 param[2] = (unsigned short) qpti->scsi_id; 636 qlogicpti_mbox_command(qpti, param, 1); 637 638 param[0] = MBOX_WRITE_RAM_WORD; 639 param[1] = 0xff00; 640 param[2] = (unsigned short) 3; 641 qlogicpti_mbox_command(qpti, param, 1); 642 } 643 644 out: 645 spin_unlock_irqrestore(host->host_lock, flags); 646 outfirm: 647 release_firmware(fw); 648 return err; 649 } 650 651 static int qlogicpti_verify_tmon(struct qlogicpti *qpti) 652 { 653 int curstat = sbus_readb(qpti->sreg); 654 655 curstat &= 0xf0; 656 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE)) 657 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id); 658 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER)) 659 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id); 660 if (curstat != qpti->swsreg) { 661 int error = 0; 662 if (curstat & SREG_FUSE) { 663 error++; 664 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id); 665 } 666 if (curstat & SREG_TPOWER) { 667 error++; 668 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id); 669 } 670 if (qpti->differential && 671 (curstat & SREG_DSENSE) != SREG_DSENSE) { 672 error++; 673 printk("qlogicpti%d: You have a single ended device on a " 674 "differential bus! Please fix!\n", qpti->qpti_id); 675 } 676 qpti->swsreg = curstat; 677 return error; 678 } 679 return 0; 680 } 681 682 static irqreturn_t qpti_intr(int irq, void *dev_id); 683 684 static void qpti_chain_add(struct qlogicpti *qpti) 685 { 686 spin_lock_irq(&qptichain_lock); 687 if (qptichain != NULL) { 688 struct qlogicpti *qlink = qptichain; 689 690 while(qlink->next) 691 qlink = qlink->next; 692 qlink->next = qpti; 693 } else { 694 qptichain = qpti; 695 } 696 qpti->next = NULL; 697 spin_unlock_irq(&qptichain_lock); 698 } 699 700 static void qpti_chain_del(struct qlogicpti *qpti) 701 { 702 spin_lock_irq(&qptichain_lock); 703 if (qptichain == qpti) { 704 qptichain = qpti->next; 705 } else { 706 struct qlogicpti *qlink = qptichain; 707 while(qlink->next != qpti) 708 qlink = qlink->next; 709 qlink->next = qpti->next; 710 } 711 qpti->next = NULL; 712 spin_unlock_irq(&qptichain_lock); 713 } 714 715 static int qpti_map_regs(struct qlogicpti *qpti) 716 { 717 struct platform_device *op = qpti->op; 718 719 qpti->qregs = of_ioremap(&op->resource[0], 0, 720 resource_size(&op->resource[0]), 721 "PTI Qlogic/ISP"); 722 if (!qpti->qregs) { 723 printk("PTI: Qlogic/ISP registers are unmappable\n"); 724 return -ENODEV; 725 } 726 if (qpti->is_pti) { 727 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096), 728 sizeof(unsigned char), 729 "PTI Qlogic/ISP statreg"); 730 if (!qpti->sreg) { 731 printk("PTI: Qlogic/ISP status register is unmappable\n"); 732 return -ENODEV; 733 } 734 } 735 return 0; 736 } 737 738 static int qpti_register_irq(struct qlogicpti *qpti) 739 { 740 struct platform_device *op = qpti->op; 741 742 qpti->qhost->irq = qpti->irq = op->archdata.irqs[0]; 743 744 /* We used to try various overly-clever things to 745 * reduce the interrupt processing overhead on 746 * sun4c/sun4m when multiple PTI's shared the 747 * same IRQ. It was too complex and messy to 748 * sanely maintain. 749 */ 750 if (request_irq(qpti->irq, qpti_intr, 751 IRQF_SHARED, "QlogicPTI", qpti)) 752 goto fail; 753 754 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq); 755 756 return 0; 757 758 fail: 759 printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id); 760 return -1; 761 } 762 763 static void qpti_get_scsi_id(struct qlogicpti *qpti) 764 { 765 struct platform_device *op = qpti->op; 766 struct device_node *dp; 767 768 dp = op->dev.of_node; 769 770 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); 771 if (qpti->scsi_id == -1) 772 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 773 -1); 774 if (qpti->scsi_id == -1) 775 qpti->scsi_id = 776 of_getintprop_default(dp->parent, 777 "scsi-initiator-id", 7); 778 qpti->qhost->this_id = qpti->scsi_id; 779 qpti->qhost->max_sectors = 64; 780 781 printk("SCSI ID %d ", qpti->scsi_id); 782 } 783 784 static void qpti_get_bursts(struct qlogicpti *qpti) 785 { 786 struct platform_device *op = qpti->op; 787 u8 bursts, bmask; 788 789 bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff); 790 bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff); 791 if (bmask != 0xff) 792 bursts &= bmask; 793 if (bursts == 0xff || 794 (bursts & DMA_BURST16) == 0 || 795 (bursts & DMA_BURST32) == 0) 796 bursts = (DMA_BURST32 - 1); 797 798 qpti->bursts = bursts; 799 } 800 801 static void qpti_get_clock(struct qlogicpti *qpti) 802 { 803 unsigned int cfreq; 804 805 /* Check for what the clock input to this card is. 806 * Default to 40Mhz. 807 */ 808 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000); 809 qpti->clock = (cfreq + 500000)/1000000; 810 if (qpti->clock == 0) /* bullshit */ 811 qpti->clock = 40; 812 } 813 814 /* The request and response queues must each be aligned 815 * on a page boundary. 816 */ 817 static int qpti_map_queues(struct qlogicpti *qpti) 818 { 819 struct platform_device *op = qpti->op; 820 821 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 822 qpti->res_cpu = dma_alloc_coherent(&op->dev, 823 QSIZE(RES_QUEUE_LEN), 824 &qpti->res_dvma, GFP_ATOMIC); 825 if (qpti->res_cpu == NULL || 826 qpti->res_dvma == 0) { 827 printk("QPTI: Cannot map response queue.\n"); 828 return -1; 829 } 830 831 qpti->req_cpu = dma_alloc_coherent(&op->dev, 832 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 833 &qpti->req_dvma, GFP_ATOMIC); 834 if (qpti->req_cpu == NULL || 835 qpti->req_dvma == 0) { 836 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), 837 qpti->res_cpu, qpti->res_dvma); 838 printk("QPTI: Cannot map request queue.\n"); 839 return -1; 840 } 841 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN)); 842 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN)); 843 return 0; 844 } 845 846 static const char *qlogicpti_info(struct Scsi_Host *host) 847 { 848 static char buf[80]; 849 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 850 851 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p", 852 qpti->qhost->irq, qpti->qregs); 853 return buf; 854 } 855 856 /* I am a certified frobtronicist. */ 857 static inline void marker_frob(struct Command_Entry *cmd) 858 { 859 struct Marker_Entry *marker = (struct Marker_Entry *) cmd; 860 861 memset(marker, 0, sizeof(struct Marker_Entry)); 862 marker->hdr.entry_cnt = 1; 863 marker->hdr.entry_type = ENTRY_MARKER; 864 marker->modifier = SYNC_ALL; 865 marker->rsvd = 0; 866 } 867 868 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, 869 struct qlogicpti *qpti) 870 { 871 memset(cmd, 0, sizeof(struct Command_Entry)); 872 cmd->hdr.entry_cnt = 1; 873 cmd->hdr.entry_type = ENTRY_COMMAND; 874 cmd->target_id = Cmnd->device->id; 875 cmd->target_lun = Cmnd->device->lun; 876 cmd->cdb_length = Cmnd->cmd_len; 877 cmd->control_flags = 0; 878 if (Cmnd->device->tagged_supported) { 879 if (qpti->cmd_count[Cmnd->device->id] == 0) 880 qpti->tag_ages[Cmnd->device->id] = jiffies; 881 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) { 882 cmd->control_flags = CFLAG_ORDERED_TAG; 883 qpti->tag_ages[Cmnd->device->id] = jiffies; 884 } else 885 cmd->control_flags = CFLAG_SIMPLE_TAG; 886 } 887 if ((Cmnd->cmnd[0] == WRITE_6) || 888 (Cmnd->cmnd[0] == WRITE_10) || 889 (Cmnd->cmnd[0] == WRITE_12)) 890 cmd->control_flags |= CFLAG_WRITE; 891 else 892 cmd->control_flags |= CFLAG_READ; 893 cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ; 894 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); 895 } 896 897 /* Do it to it baby. */ 898 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, 899 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) 900 { 901 struct dataseg *ds; 902 struct scatterlist *sg, *s; 903 int i, n; 904 905 if (scsi_bufflen(Cmnd)) { 906 int sg_count; 907 908 sg = scsi_sglist(Cmnd); 909 sg_count = dma_map_sg(&qpti->op->dev, sg, 910 scsi_sg_count(Cmnd), 911 Cmnd->sc_data_direction); 912 if (!sg_count) 913 return -1; 914 ds = cmd->dataseg; 915 cmd->segment_cnt = sg_count; 916 917 /* Fill in first four sg entries: */ 918 n = sg_count; 919 if (n > 4) 920 n = 4; 921 for_each_sg(sg, s, n, i) { 922 ds[i].d_base = sg_dma_address(s); 923 ds[i].d_count = sg_dma_len(s); 924 } 925 sg_count -= 4; 926 sg = s; 927 while (sg_count > 0) { 928 struct Continuation_Entry *cont; 929 930 ++cmd->hdr.entry_cnt; 931 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr]; 932 in_ptr = NEXT_REQ_PTR(in_ptr); 933 if (in_ptr == out_ptr) 934 return -1; 935 936 cont->hdr.entry_type = ENTRY_CONTINUATION; 937 cont->hdr.entry_cnt = 0; 938 cont->hdr.sys_def_1 = 0; 939 cont->hdr.flags = 0; 940 cont->reserved = 0; 941 ds = cont->dataseg; 942 n = sg_count; 943 if (n > 7) 944 n = 7; 945 for_each_sg(sg, s, n, i) { 946 ds[i].d_base = sg_dma_address(s); 947 ds[i].d_count = sg_dma_len(s); 948 } 949 sg_count -= n; 950 sg = s; 951 } 952 } else { 953 cmd->dataseg[0].d_base = 0; 954 cmd->dataseg[0].d_count = 0; 955 cmd->segment_cnt = 1; /* Shouldn't this be 0? */ 956 } 957 958 /* Committed, record Scsi_Cmd so we can find it later. */ 959 cmd->handle = in_ptr; 960 qpti->cmd_slots[in_ptr] = Cmnd; 961 962 qpti->cmd_count[Cmnd->device->id]++; 963 sbus_writew(in_ptr, qpti->qregs + MBOX4); 964 qpti->req_in_ptr = in_ptr; 965 966 return in_ptr; 967 } 968 969 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr) 970 { 971 /* Temporary workaround until bug is found and fixed (one bug has been found 972 already, but fixing it makes things even worse) -jj */ 973 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; 974 host->can_queue = scsi_host_busy(host) + num_free; 975 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); 976 } 977 978 static int qlogicpti_sdev_configure(struct scsi_device *sdev, 979 struct queue_limits *lim) 980 { 981 struct qlogicpti *qpti = shost_priv(sdev->host); 982 int tgt = sdev->id; 983 u_short param[6]; 984 985 /* tags handled in midlayer */ 986 /* enable sync mode? */ 987 if (sdev->sdtr) { 988 qpti->dev_param[tgt].device_flags |= 0x10; 989 } else { 990 qpti->dev_param[tgt].synchronous_offset = 0; 991 qpti->dev_param[tgt].synchronous_period = 0; 992 } 993 /* are we wide capable? */ 994 if (sdev->wdtr) 995 qpti->dev_param[tgt].device_flags |= 0x20; 996 997 param[0] = MBOX_SET_TARGET_PARAMS; 998 param[1] = (tgt << 8); 999 param[2] = (qpti->dev_param[tgt].device_flags << 8); 1000 if (qpti->dev_param[tgt].device_flags & 0x10) { 1001 param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) | 1002 qpti->dev_param[tgt].synchronous_period; 1003 } else { 1004 param[3] = 0; 1005 } 1006 qlogicpti_mbox_command(qpti, param, 0); 1007 return 0; 1008 } 1009 1010 /* 1011 * The middle SCSI layer ensures that queuecommand never gets invoked 1012 * concurrently with itself or the interrupt handler (though the 1013 * interrupt handler may call this routine as part of 1014 * request-completion handling). 1015 * 1016 * "This code must fly." -davem 1017 */ 1018 static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd) 1019 { 1020 void (*done)(struct scsi_cmnd *) = scsi_done; 1021 struct Scsi_Host *host = Cmnd->device->host; 1022 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1023 struct Command_Entry *cmd; 1024 u_int out_ptr; 1025 int in_ptr; 1026 1027 in_ptr = qpti->req_in_ptr; 1028 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; 1029 out_ptr = sbus_readw(qpti->qregs + MBOX4); 1030 in_ptr = NEXT_REQ_PTR(in_ptr); 1031 if (in_ptr == out_ptr) 1032 goto toss_command; 1033 1034 if (qpti->send_marker) { 1035 marker_frob(cmd); 1036 qpti->send_marker = 0; 1037 if (NEXT_REQ_PTR(in_ptr) == out_ptr) { 1038 sbus_writew(in_ptr, qpti->qregs + MBOX4); 1039 qpti->req_in_ptr = in_ptr; 1040 goto toss_command; 1041 } 1042 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; 1043 in_ptr = NEXT_REQ_PTR(in_ptr); 1044 } 1045 cmd_frob(cmd, Cmnd, qpti); 1046 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1) 1047 goto toss_command; 1048 1049 update_can_queue(host, in_ptr, out_ptr); 1050 1051 return 0; 1052 1053 toss_command: 1054 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n", 1055 qpti->qpti_id); 1056 1057 /* Unfortunately, unless you use the new EH code, which 1058 * we don't, the midlayer will ignore the return value, 1059 * which is insane. We pick up the pieces like this. 1060 */ 1061 Cmnd->result = DID_BUS_BUSY; 1062 done(Cmnd); 1063 return 1; 1064 } 1065 1066 static DEF_SCSI_QCMD(qlogicpti_queuecommand) 1067 1068 static int qlogicpti_return_status(struct Status_Entry *sts, int id) 1069 { 1070 int host_status = DID_ERROR; 1071 1072 switch (sts->completion_status) { 1073 case CS_COMPLETE: 1074 host_status = DID_OK; 1075 break; 1076 case CS_INCOMPLETE: 1077 if (!(sts->state_flags & SF_GOT_BUS)) 1078 host_status = DID_NO_CONNECT; 1079 else if (!(sts->state_flags & SF_GOT_TARGET)) 1080 host_status = DID_BAD_TARGET; 1081 else if (!(sts->state_flags & SF_SENT_CDB)) 1082 host_status = DID_ERROR; 1083 else if (!(sts->state_flags & SF_TRANSFERRED_DATA)) 1084 host_status = DID_ERROR; 1085 else if (!(sts->state_flags & SF_GOT_STATUS)) 1086 host_status = DID_ERROR; 1087 else if (!(sts->state_flags & SF_GOT_SENSE)) 1088 host_status = DID_ERROR; 1089 break; 1090 case CS_DMA_ERROR: 1091 case CS_TRANSPORT_ERROR: 1092 host_status = DID_ERROR; 1093 break; 1094 case CS_RESET_OCCURRED: 1095 case CS_BUS_RESET: 1096 host_status = DID_RESET; 1097 break; 1098 case CS_ABORTED: 1099 host_status = DID_ABORT; 1100 break; 1101 case CS_TIMEOUT: 1102 host_status = DID_TIME_OUT; 1103 break; 1104 case CS_DATA_OVERRUN: 1105 case CS_COMMAND_OVERRUN: 1106 case CS_STATUS_OVERRUN: 1107 case CS_BAD_MESSAGE: 1108 case CS_NO_MESSAGE_OUT: 1109 case CS_EXT_ID_FAILED: 1110 case CS_IDE_MSG_FAILED: 1111 case CS_ABORT_MSG_FAILED: 1112 case CS_NOP_MSG_FAILED: 1113 case CS_PARITY_ERROR_MSG_FAILED: 1114 case CS_DEVICE_RESET_MSG_FAILED: 1115 case CS_ID_MSG_FAILED: 1116 case CS_UNEXP_BUS_FREE: 1117 host_status = DID_ERROR; 1118 break; 1119 case CS_DATA_UNDERRUN: 1120 host_status = DID_OK; 1121 break; 1122 default: 1123 printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n", 1124 id, sts->completion_status); 1125 host_status = DID_ERROR; 1126 break; 1127 } 1128 1129 return (sts->scsi_status & STATUS_MASK) | (host_status << 16); 1130 } 1131 1132 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) 1133 { 1134 struct scsi_cmnd *Cmnd, *done_queue = NULL; 1135 struct Status_Entry *sts; 1136 u_int in_ptr, out_ptr; 1137 1138 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT)) 1139 return NULL; 1140 1141 in_ptr = sbus_readw(qpti->qregs + MBOX5); 1142 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL); 1143 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) { 1144 switch (sbus_readw(qpti->qregs + MBOX0)) { 1145 case ASYNC_SCSI_BUS_RESET: 1146 case EXECUTION_TIMEOUT_RESET: 1147 qpti->send_marker = 1; 1148 break; 1149 case INVALID_COMMAND: 1150 case HOST_INTERFACE_ERROR: 1151 case COMMAND_ERROR: 1152 case COMMAND_PARAM_ERROR: 1153 break; 1154 }; 1155 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 1156 } 1157 1158 /* This looks like a network driver! */ 1159 out_ptr = qpti->res_out_ptr; 1160 while (out_ptr != in_ptr) { 1161 u_int cmd_slot; 1162 1163 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr]; 1164 out_ptr = NEXT_RES_PTR(out_ptr); 1165 1166 /* We store an index in the handle, not the pointer in 1167 * some form. This avoids problems due to the fact 1168 * that the handle provided is only 32-bits. -DaveM 1169 */ 1170 cmd_slot = sts->handle; 1171 Cmnd = qpti->cmd_slots[cmd_slot]; 1172 qpti->cmd_slots[cmd_slot] = NULL; 1173 1174 if (sts->completion_status == CS_RESET_OCCURRED || 1175 sts->completion_status == CS_ABORTED || 1176 (sts->status_flags & STF_BUS_RESET)) 1177 qpti->send_marker = 1; 1178 1179 if (sts->state_flags & SF_GOT_SENSE) 1180 memcpy(Cmnd->sense_buffer, sts->req_sense_data, 1181 SCSI_SENSE_BUFFERSIZE); 1182 1183 if (sts->hdr.entry_type == ENTRY_STATUS) 1184 Cmnd->result = 1185 qlogicpti_return_status(sts, qpti->qpti_id); 1186 else 1187 Cmnd->result = DID_ERROR << 16; 1188 1189 if (scsi_bufflen(Cmnd)) 1190 dma_unmap_sg(&qpti->op->dev, 1191 scsi_sglist(Cmnd), scsi_sg_count(Cmnd), 1192 Cmnd->sc_data_direction); 1193 1194 qpti->cmd_count[Cmnd->device->id]--; 1195 sbus_writew(out_ptr, qpti->qregs + MBOX5); 1196 Cmnd->host_scribble = (unsigned char *) done_queue; 1197 done_queue = Cmnd; 1198 } 1199 qpti->res_out_ptr = out_ptr; 1200 1201 return done_queue; 1202 } 1203 1204 static irqreturn_t qpti_intr(int irq, void *dev_id) 1205 { 1206 struct qlogicpti *qpti = dev_id; 1207 unsigned long flags; 1208 struct scsi_cmnd *dq; 1209 1210 spin_lock_irqsave(qpti->qhost->host_lock, flags); 1211 dq = qlogicpti_intr_handler(qpti); 1212 1213 if (dq != NULL) { 1214 do { 1215 struct scsi_cmnd *next; 1216 1217 next = (struct scsi_cmnd *) dq->host_scribble; 1218 scsi_done(dq); 1219 dq = next; 1220 } while (dq != NULL); 1221 } 1222 spin_unlock_irqrestore(qpti->qhost->host_lock, flags); 1223 1224 return IRQ_HANDLED; 1225 } 1226 1227 static int qlogicpti_abort(struct scsi_cmnd *Cmnd) 1228 { 1229 u_short param[6]; 1230 struct Scsi_Host *host = Cmnd->device->host; 1231 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1232 int return_status = SUCCESS; 1233 u32 cmd_cookie; 1234 int i; 1235 1236 printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n", 1237 qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun); 1238 1239 qlogicpti_disable_irqs(qpti); 1240 1241 /* Find the 32-bit cookie we gave to the firmware for 1242 * this command. 1243 */ 1244 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++) 1245 if (qpti->cmd_slots[i] == Cmnd) 1246 break; 1247 cmd_cookie = i; 1248 1249 param[0] = MBOX_ABORT; 1250 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun; 1251 param[2] = cmd_cookie >> 16; 1252 param[3] = cmd_cookie & 0xffff; 1253 if (qlogicpti_mbox_command(qpti, param, 0) || 1254 (param[0] != MBOX_COMMAND_COMPLETE)) { 1255 printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n", 1256 qpti->qpti_id, param[0]); 1257 return_status = FAILED; 1258 } 1259 1260 qlogicpti_enable_irqs(qpti); 1261 1262 return return_status; 1263 } 1264 1265 static int qlogicpti_reset(struct scsi_cmnd *Cmnd) 1266 { 1267 u_short param[6]; 1268 struct Scsi_Host *host = Cmnd->device->host; 1269 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1270 int return_status = SUCCESS; 1271 1272 printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n", 1273 qpti->qpti_id); 1274 1275 qlogicpti_disable_irqs(qpti); 1276 1277 param[0] = MBOX_BUS_RESET; 1278 param[1] = qpti->host_param.bus_reset_delay; 1279 if (qlogicpti_mbox_command(qpti, param, 0) || 1280 (param[0] != MBOX_COMMAND_COMPLETE)) { 1281 printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n", 1282 qpti->qpti_id, param[0]); 1283 return_status = FAILED; 1284 } 1285 1286 qlogicpti_enable_irqs(qpti); 1287 1288 return return_status; 1289 } 1290 1291 static const struct scsi_host_template qpti_template = { 1292 .module = THIS_MODULE, 1293 .name = "qlogicpti", 1294 .info = qlogicpti_info, 1295 .queuecommand = qlogicpti_queuecommand, 1296 .sdev_configure = qlogicpti_sdev_configure, 1297 .eh_abort_handler = qlogicpti_abort, 1298 .eh_host_reset_handler = qlogicpti_reset, 1299 .can_queue = QLOGICPTI_REQ_QUEUE_LEN, 1300 .this_id = 7, 1301 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), 1302 }; 1303 1304 static const struct of_device_id qpti_match[]; 1305 static int qpti_sbus_probe(struct platform_device *op) 1306 { 1307 struct device_node *dp = op->dev.of_node; 1308 struct Scsi_Host *host; 1309 struct qlogicpti *qpti; 1310 static int nqptis; 1311 const char *fcode; 1312 1313 /* Sometimes Antares cards come up not completely 1314 * setup, and we get a report of a zero IRQ. 1315 */ 1316 if (op->archdata.irqs[0] == 0) 1317 return -ENODEV; 1318 1319 host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti)); 1320 if (!host) 1321 return -ENOMEM; 1322 1323 qpti = shost_priv(host); 1324 1325 host->max_id = MAX_TARGETS; 1326 qpti->qhost = host; 1327 qpti->op = op; 1328 qpti->qpti_id = nqptis; 1329 qpti->is_pti = !of_node_name_eq(op->dev.of_node, "QLGC,isp"); 1330 1331 if (qpti_map_regs(qpti) < 0) 1332 goto fail_unlink; 1333 1334 if (qpti_register_irq(qpti) < 0) 1335 goto fail_unmap_regs; 1336 1337 qpti_get_scsi_id(qpti); 1338 qpti_get_bursts(qpti); 1339 qpti_get_clock(qpti); 1340 1341 /* Clear out scsi_cmnd array. */ 1342 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots)); 1343 1344 if (qpti_map_queues(qpti) < 0) 1345 goto fail_free_irq; 1346 1347 /* Load the firmware. */ 1348 if (qlogicpti_load_firmware(qpti)) 1349 goto fail_unmap_queues; 1350 if (qpti->is_pti) { 1351 /* Check the PTI status reg. */ 1352 if (qlogicpti_verify_tmon(qpti)) 1353 goto fail_unmap_queues; 1354 } 1355 1356 /* Reset the ISP and init res/req queues. */ 1357 if (qlogicpti_reset_hardware(host)) 1358 goto fail_unmap_queues; 1359 1360 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev, 1361 qpti->fware_minrev, qpti->fware_micrev); 1362 1363 fcode = of_get_property(dp, "isp-fcode", NULL); 1364 if (fcode && fcode[0]) 1365 printk("(FCode %s)", fcode); 1366 qpti->differential = of_property_read_bool(dp, "differential"); 1367 1368 printk("\nqlogicpti%d: [%s Wide, using %s interface]\n", 1369 qpti->qpti_id, 1370 (qpti->ultra ? "Ultra" : "Fast"), 1371 (qpti->differential ? "differential" : "single ended")); 1372 1373 if (scsi_add_host(host, &op->dev)) { 1374 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id); 1375 goto fail_unmap_queues; 1376 } 1377 1378 dev_set_drvdata(&op->dev, qpti); 1379 1380 qpti_chain_add(qpti); 1381 1382 scsi_scan_host(host); 1383 nqptis++; 1384 1385 return 0; 1386 1387 fail_unmap_queues: 1388 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1389 dma_free_coherent(&op->dev, 1390 QSIZE(RES_QUEUE_LEN), 1391 qpti->res_cpu, qpti->res_dvma); 1392 dma_free_coherent(&op->dev, 1393 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1394 qpti->req_cpu, qpti->req_dvma); 1395 #undef QSIZE 1396 1397 fail_free_irq: 1398 free_irq(qpti->irq, qpti); 1399 1400 fail_unmap_regs: 1401 of_iounmap(&op->resource[0], qpti->qregs, 1402 resource_size(&op->resource[0])); 1403 if (qpti->is_pti) 1404 of_iounmap(&op->resource[0], qpti->sreg, 1405 sizeof(unsigned char)); 1406 1407 fail_unlink: 1408 scsi_host_put(host); 1409 1410 return -ENODEV; 1411 } 1412 1413 static void qpti_sbus_remove(struct platform_device *op) 1414 { 1415 struct qlogicpti *qpti = dev_get_drvdata(&op->dev); 1416 1417 qpti_chain_del(qpti); 1418 1419 scsi_remove_host(qpti->qhost); 1420 1421 /* Shut up the card. */ 1422 sbus_writew(0, qpti->qregs + SBUS_CTRL); 1423 1424 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */ 1425 free_irq(qpti->irq, qpti); 1426 1427 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1428 dma_free_coherent(&op->dev, 1429 QSIZE(RES_QUEUE_LEN), 1430 qpti->res_cpu, qpti->res_dvma); 1431 dma_free_coherent(&op->dev, 1432 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1433 qpti->req_cpu, qpti->req_dvma); 1434 #undef QSIZE 1435 1436 of_iounmap(&op->resource[0], qpti->qregs, 1437 resource_size(&op->resource[0])); 1438 if (qpti->is_pti) 1439 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char)); 1440 1441 scsi_host_put(qpti->qhost); 1442 } 1443 1444 static const struct of_device_id qpti_match[] = { 1445 { 1446 .name = "ptisp", 1447 }, 1448 { 1449 .name = "PTI,ptisp", 1450 }, 1451 { 1452 .name = "QLGC,isp", 1453 }, 1454 { 1455 .name = "SUNW,isp", 1456 }, 1457 {}, 1458 }; 1459 MODULE_DEVICE_TABLE(of, qpti_match); 1460 1461 static struct platform_driver qpti_sbus_driver = { 1462 .driver = { 1463 .name = "qpti", 1464 .of_match_table = qpti_match, 1465 }, 1466 .probe = qpti_sbus_probe, 1467 .remove = qpti_sbus_remove, 1468 }; 1469 module_platform_driver(qpti_sbus_driver); 1470 1471 MODULE_DESCRIPTION("QlogicISP SBUS driver"); 1472 MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); 1473 MODULE_LICENSE("GPL"); 1474 MODULE_VERSION("2.1"); 1475 MODULE_FIRMWARE("qlogic/isp1000.bin"); 1476