1 /****************************************************************************** 2 * QLOGIC LINUX SOFTWARE 3 * 4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver 5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com) 6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc. 7 * Copyright (C) 2003-2004 Christoph Hellwig 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 ******************************************************************************/ 20 #define QLA1280_VERSION "3.25" 21 /***************************************************************************** 22 Revision History: 23 Rev 3.25.1, February 10, 2005 Christoph Hellwig 24 - use pci_map_single to map non-S/G requests 25 - remove qla1280_proc_info 26 Rev 3.25, September 28, 2004, Christoph Hellwig 27 - add support for ISP1020/1040 28 - don't include "scsi.h" anymore for 2.6.x 29 Rev 3.24.4 June 7, 2004 Christoph Hellwig 30 - restructure firmware loading, cleanup initialization code 31 - prepare support for ISP1020/1040 chips 32 Rev 3.24.3 January 19, 2004, Jes Sorensen 33 - Handle PCI DMA mask settings correctly 34 - Correct order of error handling in probe_one, free_irq should not 35 be called if request_irq failed 36 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez 37 - Big endian fixes (James) 38 - Remove bogus IOCB content on zero data transfer commands (Andrew) 39 Rev 3.24.1 January 5, 2004, Jes Sorensen 40 - Initialize completion queue to avoid OOPS on probe 41 - Handle interrupts during mailbox testing 42 Rev 3.24 November 17, 2003, Christoph Hellwig 43 - use struct list_head for completion queue 44 - avoid old Scsi_FOO typedefs 45 - cleanup 2.4 compat glue a bit 46 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h" 47 - make initialization for memory mapped vs port I/O more similar 48 - remove broken pci config space manipulation 49 - kill more cruft 50 - this is an almost perfect 2.6 scsi driver now! ;) 51 Rev 3.23.39 December 17, 2003, Jes Sorensen 52 - Delete completion queue from srb if mailbox command failed to 53 to avoid qla1280_done completeting qla1280_error_action's 54 obsolete context 55 - Reduce arguments for qla1280_done 56 Rev 3.23.38 October 18, 2003, Christoph Hellwig 57 - Convert to new-style hotplugable driver for 2.6 58 - Fix missing scsi_unregister/scsi_host_put on HBA removal 59 - Kill some more cruft 60 Rev 3.23.37 October 1, 2003, Jes Sorensen 61 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another 62 random CONFIG option 63 - Clean up locking in probe path 64 Rev 3.23.36 October 1, 2003, Christoph Hellwig 65 - queuecommand only ever receives new commands - clear flags 66 - Reintegrate lost fixes from Linux 2.5 67 Rev 3.23.35 August 14, 2003, Jes Sorensen 68 - Build against 2.6 69 Rev 3.23.34 July 23, 2003, Jes Sorensen 70 - Remove pointless TRUE/FALSE macros 71 - Clean up vchan handling 72 Rev 3.23.33 July 3, 2003, Jes Sorensen 73 - Don't define register access macros before define determining MMIO. 74 This just happend to work out on ia64 but not elsewhere. 75 - Don't try and read from the card while it is in reset as 76 it won't respond and causes an MCA 77 Rev 3.23.32 June 23, 2003, Jes Sorensen 78 - Basic support for boot time arguments 79 Rev 3.23.31 June 8, 2003, Jes Sorensen 80 - Reduce boot time messages 81 Rev 3.23.30 June 6, 2003, Jes Sorensen 82 - Do not enable sync/wide/ppr before it has been determined 83 that the target device actually supports it 84 - Enable DMA arbitration for multi channel controllers 85 Rev 3.23.29 June 3, 2003, Jes Sorensen 86 - Port to 2.5.69 87 Rev 3.23.28 June 3, 2003, Jes Sorensen 88 - Eliminate duplicate marker commands on bus resets 89 - Handle outstanding commands appropriately on bus/device resets 90 Rev 3.23.27 May 28, 2003, Jes Sorensen 91 - Remove bogus input queue code, let the Linux SCSI layer do the work 92 - Clean up NVRAM handling, only read it once from the card 93 - Add a number of missing default nvram parameters 94 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen 95 - Use completion queue for mailbox commands instead of busy wait 96 Rev 3.23.25 Beta May 27, 2003, James Bottomley 97 - Migrate to use new error handling code 98 Rev 3.23.24 Beta May 21, 2003, James Bottomley 99 - Big endian support 100 - Cleanup data direction code 101 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen 102 - Switch to using MMIO instead of PIO 103 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen 104 - Fix PCI parity problem with 12160 during reset. 105 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen 106 - Use pci_map_page()/pci_unmap_page() instead of map_single version. 107 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen 108 - Remove < 2.4.x support 109 - Introduce HOST_LOCK to make the spin lock changes portable. 110 - Remove a bunch of idiotic and unnecessary typedef's 111 - Kill all leftovers of target-mode support which never worked anyway 112 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds 113 - Do qla1280_pci_config() before calling request_irq() and 114 request_region() 115 - Use pci_dma_hi32() to handle upper word of DMA addresses instead 116 of large shifts 117 - Hand correct arguments to free_irq() in case of failure 118 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen 119 - Run source through Lindent and clean up the output 120 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen 121 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32 122 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen 123 - Rely on mailbox commands generating interrupts - do not 124 run qla1280_isr() from ql1280_mailbox_command() 125 - Remove device_reg_t 126 - Integrate ql12160_set_target_parameters() with 1280 version 127 - Make qla1280_setup() non static 128 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request 129 sent to the card - this command pauses the firmare!!! 130 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen 131 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions 132 - Remove a pile of pointless and confusing (srb_t **) and 133 (scsi_lu_t *) typecasts 134 - Explicit mark that we do not use the new error handling (for now) 135 - Remove scsi_qla_host_t and use 'struct' instead 136 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled, 137 pci_64bit_slot flags which weren't used for anything anyway 138 - Grab host->host_lock while calling qla1280_isr() from abort() 139 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we 140 do not need to save/restore flags in the interrupt handler 141 - Enable interrupts early (before any mailbox access) in preparation 142 for cleaning up the mailbox handling 143 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen 144 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace 145 it with proper use of dprintk(). 146 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take 147 a debug level argument to determine if data is to be printed 148 - Add KERN_* info to printk() 149 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen 150 - Significant cosmetic cleanups 151 - Change debug code to use dprintk() and remove #if mess 152 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen 153 - More cosmetic cleanups, fix places treating return as function 154 - use cpu_relax() in qla1280_debounce_register() 155 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen 156 - Make it compile under 2.5.5 157 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen 158 - Do no typecast short * to long * in QL1280BoardTbl, this 159 broke miserably on big endian boxes 160 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen 161 - Remove pre 2.2 hack for checking for reentrance in interrupt handler 162 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32 163 unsigned int to match the types from struct scsi_cmnd 164 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen 165 - Remove bogus timer_t typedef from qla1280.h 166 - Remove obsolete pre 2.2 PCI setup code, use proper #define's 167 for PCI_ values, call pci_set_master() 168 - Fix memleak of qla1280_buffer on module unload 169 - Only compile module parsing code #ifdef MODULE - should be 170 changed to use individual MODULE_PARM's later 171 - Remove dummy_buffer that was never modified nor printed 172 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove 173 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls 174 - Remove \r from print statements, this is Linux, not DOS 175 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK} 176 dummy macros 177 - Remove C++ compile hack in header file as Linux driver are not 178 supposed to be compiled as C++ 179 - Kill MS_64BITS macro as it makes the code more readable 180 - Remove unnecessary flags.in_interrupts bit 181 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen 182 - Dont' check for set flags on q->q_flag one by one in qla1280_next() 183 - Check whether the interrupt was generated by the QLA1280 before 184 doing any processing 185 - qla1280_status_entry(): Only zero out part of sense_buffer that 186 is not being copied into 187 - Remove more superflouous typecasts 188 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy() 189 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel 190 - Don't walk the entire list in qla1280_putq_t() just to directly 191 grab the pointer to the last element afterwards 192 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen 193 - Don't use SA_INTERRUPT, it's use is deprecated for this kinda driver 194 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen 195 - Set dev->max_sectors to 1024 196 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen 197 - Provide compat macros for pci_enable_device(), pci_find_subsys() 198 and scsi_set_pci_device() 199 - Call scsi_set_pci_device() for all devices 200 - Reduce size of kernel version dependent device probe code 201 - Move duplicate probe/init code to separate function 202 - Handle error if qla1280_mem_alloc() fails 203 - Kill OFFSET() macro and use Linux's PCI definitions instead 204 - Kill private structure defining PCI config space (struct config_reg) 205 - Only allocate I/O port region if not in MMIO mode 206 - Remove duplicate (unused) sanity check of sife of srb_t 207 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen 208 - Change home-brew memset() implementations to use memset() 209 - Remove all references to COMTRACE() - accessing a PC's COM2 serial 210 port directly is not legal under Linux. 211 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen 212 - Remove pre 2.2 kernel support 213 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat) 214 - Fix MMIO access to use readl/writel instead of directly 215 dereferencing pointers 216 - Nuke MSDOS debugging code 217 - Change true/false data types to int from uint8_t 218 - Use int for counters instead of uint8_t etc. 219 - Clean up size & byte order conversion macro usage 220 Rev 3.23 Beta January 11, 2001 BN Qlogic 221 - Added check of device_id when handling non 222 QLA12160s during detect(). 223 Rev 3.22 Beta January 5, 2001 BN Qlogic 224 - Changed queue_task() to schedule_task() 225 for kernels 2.4.0 and higher. 226 Note: 2.4.0-testxx kernels released prior to 227 the actual 2.4.0 kernel release on January 2001 228 will get compile/link errors with schedule_task(). 229 Please update your kernel to released 2.4.0 level, 230 or comment lines in this file flagged with 3.22 231 to resolve compile/link error of schedule_task(). 232 - Added -DCONFIG_SMP in addition to -D__SMP__ 233 in Makefile for 2.4.0 builds of driver as module. 234 Rev 3.21 Beta January 4, 2001 BN Qlogic 235 - Changed criteria of 64/32 Bit mode of HBA 236 operation according to BITS_PER_LONG rather 237 than HBA's NVRAM setting of >4Gig memory bit; 238 so that the HBA auto-configures without the need 239 to setup each system individually. 240 Rev 3.20 Beta December 5, 2000 BN Qlogic 241 - Added priority handling to IA-64 onboard SCSI 242 ISP12160 chip for kernels greater than 2.3.18. 243 - Added irqrestore for qla1280_intr_handler. 244 - Enabled /proc/scsi/qla1280 interface. 245 - Clear /proc/scsi/qla1280 counters in detect(). 246 Rev 3.19 Beta October 13, 2000 BN Qlogic 247 - Declare driver_template for new kernel 248 (2.4.0 and greater) scsi initialization scheme. 249 - Update /proc/scsi entry for 2.3.18 kernels and 250 above as qla1280 251 Rev 3.18 Beta October 10, 2000 BN Qlogic 252 - Changed scan order of adapters to map 253 the QLA12160 followed by the QLA1280. 254 Rev 3.17 Beta September 18, 2000 BN Qlogic 255 - Removed warnings for 32 bit 2.4.x compiles 256 - Corrected declared size for request and response 257 DMA addresses that are kept in each ha 258 Rev. 3.16 Beta August 25, 2000 BN Qlogic 259 - Corrected 64 bit addressing issue on IA-64 260 where the upper 32 bits were not properly 261 passed to the RISC engine. 262 Rev. 3.15 Beta August 22, 2000 BN Qlogic 263 - Modified qla1280_setup_chip to properly load 264 ISP firmware for greater that 4 Gig memory on IA-64 265 Rev. 3.14 Beta August 16, 2000 BN Qlogic 266 - Added setting of dma_mask to full 64 bit 267 if flags.enable_64bit_addressing is set in NVRAM 268 Rev. 3.13 Beta August 16, 2000 BN Qlogic 269 - Use new PCI DMA mapping APIs for 2.4.x kernel 270 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic 271 - Added check of pci_enable_device to detect() for 2.3.x 272 - Use pci_resource_start() instead of 273 pdev->resource[0].start in detect() for 2.3.x 274 - Updated driver version 275 Rev. 3.11 July 14, 2000 BN Qlogic 276 - Updated SCSI Firmware to following versions: 277 qla1x80: 8.13.08 278 qla1x160: 10.04.08 279 - Updated driver version to 3.11 280 Rev. 3.10 June 23, 2000 BN Qlogic 281 - Added filtering of AMI SubSys Vendor ID devices 282 Rev. 3.9 283 - DEBUG_QLA1280 undefined and new version BN Qlogic 284 Rev. 3.08b May 9, 2000 MD Dell 285 - Added logic to check against AMI subsystem vendor ID 286 Rev. 3.08 May 4, 2000 DG Qlogic 287 - Added logic to check for PCI subsystem ID. 288 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic 289 - Updated SCSI Firmware to following versions: 290 qla12160: 10.01.19 291 qla1280: 8.09.00 292 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic 293 - Internal revision; not released 294 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic 295 - Edit correction for virt_to_bus and PROC. 296 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic 297 - Merge changes from ia64 port. 298 Rev. 3.03 Mar 28, 2000 BN Qlogic 299 - Increase version to reflect new code drop with compile fix 300 of issue with inclusion of linux/spinlock for 2.3 kernels 301 Rev. 3.02 Mar 15, 2000 BN Qlogic 302 - Merge qla1280_proc_info from 2.10 code base 303 Rev. 3.01 Feb 10, 2000 BN Qlogic 304 - Corrected code to compile on a 2.2.x kernel. 305 Rev. 3.00 Jan 17, 2000 DG Qlogic 306 - Added 64-bit support. 307 Rev. 2.07 Nov 9, 1999 DG Qlogic 308 - Added new routine to set target parameters for ISP12160. 309 Rev. 2.06 Sept 10, 1999 DG Qlogic 310 - Added support for ISP12160 Ultra 3 chip. 311 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont 312 - Modified code to remove errors generated when compiling with 313 Cygnus IA64 Compiler. 314 - Changed conversion of pointers to unsigned longs instead of integers. 315 - Changed type of I/O port variables from uint32_t to unsigned long. 316 - Modified OFFSET macro to work with 64-bit as well as 32-bit. 317 - Changed sprintf and printk format specifiers for pointers to %p. 318 - Changed some int to long type casts where needed in sprintf & printk. 319 - Added l modifiers to sprintf and printk format specifiers for longs. 320 - Removed unused local variables. 321 Rev. 1.20 June 8, 1999 DG, Qlogic 322 Changes to support RedHat release 6.0 (kernel 2.2.5). 323 - Added SCSI exclusive access lock (io_request_lock) when accessing 324 the adapter. 325 - Added changes for the new LINUX interface template. Some new error 326 handling routines have been added to the template, but for now we 327 will use the old ones. 328 - Initial Beta Release. 329 *****************************************************************************/ 330 331 332 #include <linux/config.h> 333 #include <linux/module.h> 334 335 #include <linux/version.h> 336 #include <linux/types.h> 337 #include <linux/string.h> 338 #include <linux/errno.h> 339 #include <linux/kernel.h> 340 #include <linux/ioport.h> 341 #include <linux/delay.h> 342 #include <linux/timer.h> 343 #include <linux/sched.h> 344 #include <linux/pci.h> 345 #include <linux/proc_fs.h> 346 #include <linux/stat.h> 347 #include <linux/slab.h> 348 #include <linux/pci_ids.h> 349 #include <linux/interrupt.h> 350 #include <linux/init.h> 351 352 #include <asm/io.h> 353 #include <asm/irq.h> 354 #include <asm/byteorder.h> 355 #include <asm/processor.h> 356 #include <asm/types.h> 357 #include <asm/system.h> 358 359 #if LINUX_VERSION_CODE >= 0x020545 360 #include <scsi/scsi.h> 361 #include <scsi/scsi_cmnd.h> 362 #include <scsi/scsi_device.h> 363 #include <scsi/scsi_host.h> 364 #include <scsi/scsi_tcq.h> 365 #else 366 #include <linux/blk.h> 367 #include "scsi.h" 368 #include <scsi/scsi_host.h> 369 #include "sd.h" 370 #endif 371 372 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 373 #include <asm/sn/io.h> 374 #endif 375 376 #if LINUX_VERSION_CODE < 0x020407 377 #error "Kernels older than 2.4.7 are no longer supported" 378 #endif 379 380 381 /* 382 * Compile time Options: 383 * 0 - Disable and 1 - Enable 384 */ 385 #define DEBUG_QLA1280_INTR 0 386 #define DEBUG_PRINT_NVRAM 0 387 #define DEBUG_QLA1280 0 388 389 /* 390 * The SGI VISWS is broken and doesn't support MMIO ;-( 391 */ 392 #ifdef CONFIG_X86_VISWS 393 #define MEMORY_MAPPED_IO 0 394 #else 395 #define MEMORY_MAPPED_IO 1 396 #endif 397 398 #define UNIQUE_FW_NAME 399 #include "qla1280.h" 400 #include "ql12160_fw.h" /* ISP RISC codes */ 401 #include "ql1280_fw.h" 402 #include "ql1040_fw.h" 403 404 405 /* 406 * Missing PCI ID's 407 */ 408 #ifndef PCI_DEVICE_ID_QLOGIC_ISP1080 409 #define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 410 #endif 411 #ifndef PCI_DEVICE_ID_QLOGIC_ISP1240 412 #define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 413 #endif 414 #ifndef PCI_DEVICE_ID_QLOGIC_ISP1280 415 #define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 416 #endif 417 #ifndef PCI_DEVICE_ID_QLOGIC_ISP10160 418 #define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 419 #endif 420 #ifndef PCI_DEVICE_ID_QLOGIC_ISP12160 421 #define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 422 #endif 423 424 #ifndef PCI_VENDOR_ID_AMI 425 #define PCI_VENDOR_ID_AMI 0x101e 426 #endif 427 428 #ifndef BITS_PER_LONG 429 #error "BITS_PER_LONG not defined!" 430 #endif 431 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM 432 #define QLA_64BIT_PTR 1 433 #endif 434 435 #ifdef QLA_64BIT_PTR 436 #define pci_dma_hi32(a) ((a >> 16) >> 16) 437 #else 438 #define pci_dma_hi32(a) 0 439 #endif 440 #define pci_dma_lo32(a) (a & 0xffffffff) 441 442 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */ 443 444 #if LINUX_VERSION_CODE < 0x020500 445 #define HOST_LOCK &io_request_lock 446 #define irqreturn_t void 447 #define IRQ_RETVAL(foo) 448 #define MSG_ORDERED_TAG 1 449 450 #define DMA_BIDIRECTIONAL SCSI_DATA_UNKNOWN 451 #define DMA_TO_DEVICE SCSI_DATA_WRITE 452 #define DMA_FROM_DEVICE SCSI_DATA_READ 453 #define DMA_NONE SCSI_DATA_NONE 454 455 #ifndef HAVE_SECTOR_T 456 typedef unsigned int sector_t; 457 #endif 458 459 static inline void 460 scsi_adjust_queue_depth(struct scsi_device *device, int tag, int depth) 461 { 462 if (tag) { 463 device->tagged_queue = tag; 464 device->current_tag = 0; 465 } 466 device->queue_depth = depth; 467 } 468 static inline struct Scsi_Host *scsi_host_alloc(Scsi_Host_Template *t, size_t s) 469 { 470 return scsi_register(t, s); 471 } 472 static inline void scsi_host_put(struct Scsi_Host *h) 473 { 474 scsi_unregister(h); 475 } 476 #else 477 #define HOST_LOCK ha->host->host_lock 478 #endif 479 #if LINUX_VERSION_CODE < 0x020600 480 #define DEV_SIMPLE_TAGS(device) device->tagged_queue 481 /* 482 * Hack around that qla1280_remove_one is called from 483 * qla1280_release in 2.4 484 */ 485 #undef __devexit 486 #define __devexit 487 #else 488 #define DEV_SIMPLE_TAGS(device) device->simple_tags 489 #endif 490 #if defined(__ia64__) && !defined(ia64_platform_is) 491 #define ia64_platform_is(foo) (!strcmp(x, platform_name)) 492 #endif 493 494 495 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) 496 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ 497 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240) 498 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \ 499 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160) 500 501 502 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *); 503 static void qla1280_remove_one(struct pci_dev *); 504 505 /* 506 * QLogic Driver Support Function Prototypes. 507 */ 508 static void qla1280_done(struct scsi_qla_host *); 509 #if LINUX_VERSION_CODE < 0x020545 510 static void qla1280_get_target_options(struct scsi_cmnd *, struct scsi_qla_host *); 511 #endif 512 static int qla1280_get_token(char *); 513 static int qla1280_setup(char *s) __init; 514 515 /* 516 * QLogic ISP1280 Hardware Support Function Prototypes. 517 */ 518 static int qla1280_load_firmware(struct scsi_qla_host *); 519 static int qla1280_init_rings(struct scsi_qla_host *); 520 static int qla1280_nvram_config(struct scsi_qla_host *); 521 static int qla1280_mailbox_command(struct scsi_qla_host *, 522 uint8_t, uint16_t *); 523 static int qla1280_bus_reset(struct scsi_qla_host *, int); 524 static int qla1280_device_reset(struct scsi_qla_host *, int, int); 525 static int qla1280_abort_device(struct scsi_qla_host *, int, int, int); 526 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 527 static int qla1280_abort_isp(struct scsi_qla_host *); 528 #ifdef QLA_64BIT_PTR 529 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *); 530 #else 531 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *); 532 #endif 533 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t); 534 static void qla1280_poll(struct scsi_qla_host *); 535 static void qla1280_reset_adapter(struct scsi_qla_host *); 536 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8); 537 static void qla1280_isp_cmd(struct scsi_qla_host *); 538 static void qla1280_isr(struct scsi_qla_host *, struct list_head *); 539 static void qla1280_rst_aen(struct scsi_qla_host *); 540 static void qla1280_status_entry(struct scsi_qla_host *, struct response *, 541 struct list_head *); 542 static void qla1280_error_entry(struct scsi_qla_host *, struct response *, 543 struct list_head *); 544 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t); 545 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t); 546 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *); 547 static request_t *qla1280_req_pkt(struct scsi_qla_host *); 548 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *, 549 unsigned int); 550 static void qla1280_get_target_parameters(struct scsi_qla_host *, 551 struct scsi_device *); 552 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int); 553 554 555 static struct qla_driver_setup driver_setup; 556 557 /* 558 * convert scsi data direction to request_t control flags 559 */ 560 static inline uint16_t 561 qla1280_data_direction(struct scsi_cmnd *cmnd) 562 { 563 switch(cmnd->sc_data_direction) { 564 case DMA_FROM_DEVICE: 565 return BIT_5; 566 case DMA_TO_DEVICE: 567 return BIT_6; 568 case DMA_BIDIRECTIONAL: 569 return BIT_5 | BIT_6; 570 /* 571 * We could BUG() on default here if one of the four cases aren't 572 * met, but then again if we receive something like that from the 573 * SCSI layer we have more serious problems. This shuts up GCC. 574 */ 575 case DMA_NONE: 576 default: 577 return 0; 578 } 579 } 580 581 #if DEBUG_QLA1280 582 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd); 583 static void __qla1280_dump_buffer(char *, int); 584 #endif 585 586 587 /* 588 * insmod needs to find the variable and make it point to something 589 */ 590 #ifdef MODULE 591 static char *qla1280; 592 593 /* insmod qla1280 options=verbose" */ 594 module_param(qla1280, charp, 0); 595 #else 596 __setup("qla1280=", qla1280_setup); 597 #endif 598 599 600 /* 601 * We use the scsi_pointer structure that's included with each scsi_command 602 * to overlay our struct srb over it. qla1280_init() checks that a srb is not 603 * bigger than a scsi_pointer. 604 */ 605 606 #define CMD_SP(Cmnd) &Cmnd->SCp 607 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len 608 #define CMD_CDBP(Cmnd) Cmnd->cmnd 609 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer 610 #define CMD_SNSLEN(Cmnd) sizeof(Cmnd->sense_buffer) 611 #define CMD_RESULT(Cmnd) Cmnd->result 612 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble 613 #if LINUX_VERSION_CODE < 0x020545 614 #define CMD_REQUEST(Cmnd) Cmnd->request.cmd 615 #else 616 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd 617 #endif 618 619 #define CMD_HOST(Cmnd) Cmnd->device->host 620 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel 621 #define SCSI_TCN_32(Cmnd) Cmnd->device->id 622 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun 623 624 625 /*****************************************/ 626 /* ISP Boards supported by this driver */ 627 /*****************************************/ 628 629 struct qla_boards { 630 unsigned char name[9]; /* Board ID String */ 631 int numPorts; /* Number of SCSI ports */ 632 unsigned short *fwcode; /* pointer to FW array */ 633 unsigned short *fwlen; /* number of words in array */ 634 unsigned short *fwstart; /* start address for F/W */ 635 unsigned char *fwver; /* Ptr to F/W version array */ 636 }; 637 638 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */ 639 static struct pci_device_id qla1280_pci_tbl[] = { 640 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160, 641 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 642 #ifdef CONFIG_SCSI_QLOGIC_1280_1040 643 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020, 644 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 645 #endif 646 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080, 647 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 648 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240, 649 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, 650 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280, 651 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 652 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160, 653 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, 654 {0,} 655 }; 656 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl); 657 658 static struct qla_boards ql1280_board_tbl[] = { 659 /* Name , Number of ports, FW details */ 660 {"QLA12160", 2, &fw12160i_code01[0], &fw12160i_length01, 661 &fw12160i_addr01, &fw12160i_version_str[0]}, 662 {"QLA1040", 1, &risc_code01[0], &risc_code_length01, 663 &risc_code_addr01, &firmware_version[0]}, 664 {"QLA1080", 1, &fw1280ei_code01[0], &fw1280ei_length01, 665 &fw1280ei_addr01, &fw1280ei_version_str[0]}, 666 {"QLA1240", 2, &fw1280ei_code01[0], &fw1280ei_length01, 667 &fw1280ei_addr01, &fw1280ei_version_str[0]}, 668 {"QLA1280", 2, &fw1280ei_code01[0], &fw1280ei_length01, 669 &fw1280ei_addr01, &fw1280ei_version_str[0]}, 670 {"QLA10160", 1, &fw12160i_code01[0], &fw12160i_length01, 671 &fw12160i_addr01, &fw12160i_version_str[0]}, 672 {" ", 0} 673 }; 674 675 static int qla1280_verbose = 1; 676 677 #if DEBUG_QLA1280 678 static int ql_debug_level = 1; 679 #define dprintk(level, format, a...) \ 680 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0) 681 #define qla1280_dump_buffer(level, buf, size) \ 682 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size) 683 #define qla1280_print_scsi_cmd(level, cmd) \ 684 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd) 685 #else 686 #define ql_debug_level 0 687 #define dprintk(level, format, a...) do{}while(0) 688 #define qla1280_dump_buffer(a, b, c) do{}while(0) 689 #define qla1280_print_scsi_cmd(a, b) do{}while(0) 690 #endif 691 692 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x); 693 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x); 694 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x); 695 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x); 696 697 698 static int qla1280_read_nvram(struct scsi_qla_host *ha) 699 { 700 uint16_t *wptr; 701 uint8_t chksum; 702 int cnt, i; 703 struct nvram *nv; 704 705 ENTER("qla1280_read_nvram"); 706 707 if (driver_setup.no_nvram) 708 return 1; 709 710 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no); 711 712 wptr = (uint16_t *)&ha->nvram; 713 nv = &ha->nvram; 714 chksum = 0; 715 for (cnt = 0; cnt < 3; cnt++) { 716 *wptr = qla1280_get_nvram_word(ha, cnt); 717 chksum += *wptr & 0xff; 718 chksum += (*wptr >> 8) & 0xff; 719 wptr++; 720 } 721 722 if (nv->id0 != 'I' || nv->id1 != 'S' || 723 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) { 724 dprintk(2, "Invalid nvram ID or version!\n"); 725 chksum = 1; 726 } else { 727 for (; cnt < sizeof(struct nvram); cnt++) { 728 *wptr = qla1280_get_nvram_word(ha, cnt); 729 chksum += *wptr & 0xff; 730 chksum += (*wptr >> 8) & 0xff; 731 wptr++; 732 } 733 } 734 735 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x" 736 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3, 737 nv->version); 738 739 740 if (chksum) { 741 if (!driver_setup.no_nvram) 742 printk(KERN_WARNING "scsi(%ld): Unable to identify or " 743 "validate NVRAM checksum, using default " 744 "settings\n", ha->host_no); 745 ha->nvram_valid = 0; 746 } else 747 ha->nvram_valid = 1; 748 749 /* The firmware interface is, um, interesting, in that the 750 * actual firmware image on the chip is little endian, thus, 751 * the process of taking that image to the CPU would end up 752 * little endian. However, the firmare interface requires it 753 * to be read a word (two bytes) at a time. 754 * 755 * The net result of this would be that the word (and 756 * doubleword) quantites in the firmware would be correct, but 757 * the bytes would be pairwise reversed. Since most of the 758 * firmware quantites are, in fact, bytes, we do an extra 759 * le16_to_cpu() in the firmware read routine. 760 * 761 * The upshot of all this is that the bytes in the firmware 762 * are in the correct places, but the 16 and 32 bit quantites 763 * are still in little endian format. We fix that up below by 764 * doing extra reverses on them */ 765 nv->isp_parameter = cpu_to_le16(nv->isp_parameter); 766 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w); 767 for(i = 0; i < MAX_BUSES; i++) { 768 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout); 769 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth); 770 } 771 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n"); 772 LEAVE("qla1280_read_nvram"); 773 774 return chksum; 775 } 776 777 /************************************************************************** 778 * qla1280_info 779 * Return a string describing the driver. 780 **************************************************************************/ 781 static const char * 782 qla1280_info(struct Scsi_Host *host) 783 { 784 static char qla1280_scsi_name_buffer[125]; 785 char *bp; 786 struct scsi_qla_host *ha; 787 struct qla_boards *bdp; 788 789 bp = &qla1280_scsi_name_buffer[0]; 790 ha = (struct scsi_qla_host *)host->hostdata; 791 bdp = &ql1280_board_tbl[ha->devnum]; 792 memset(bp, 0, sizeof(qla1280_scsi_name_buffer)); 793 794 sprintf (bp, 795 "QLogic %s PCI to SCSI Host Adapter\n" 796 " Firmware version: %2d.%02d.%02d, Driver version %s", 797 &bdp->name[0], bdp->fwver[0], bdp->fwver[1], bdp->fwver[2], 798 QLA1280_VERSION); 799 return bp; 800 } 801 802 /************************************************************************** 803 * qla1200_queuecommand 804 * Queue a command to the controller. 805 * 806 * Note: 807 * The mid-level driver tries to ensures that queuecommand never gets invoked 808 * concurrently with itself or the interrupt handler (although the 809 * interrupt handler may call this routine as part of request-completion 810 * handling). Unfortunely, it sometimes calls the scheduler in interrupt 811 * context which is a big NO! NO!. 812 **************************************************************************/ 813 static int 814 qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) 815 { 816 struct Scsi_Host *host = cmd->device->host; 817 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 818 struct srb *sp = (struct srb *)&cmd->SCp; 819 int status; 820 821 cmd->scsi_done = fn; 822 sp->cmd = cmd; 823 sp->flags = 0; 824 825 qla1280_print_scsi_cmd(5, cmd); 826 827 #ifdef QLA_64BIT_PTR 828 /* 829 * Using 64 bit commands if the PCI bridge doesn't support it is a 830 * bit wasteful, however this should really only happen if one's 831 * PCI controller is completely broken, like the BCM1250. For 832 * sane hardware this is not an issue. 833 */ 834 status = qla1280_64bit_start_scsi(ha, sp); 835 #else 836 status = qla1280_32bit_start_scsi(ha, sp); 837 #endif 838 return status; 839 } 840 841 enum action { 842 ABORT_COMMAND, 843 ABORT_DEVICE, 844 DEVICE_RESET, 845 BUS_RESET, 846 ADAPTER_RESET, 847 FAIL 848 }; 849 850 /* timer action for error action processor */ 851 static void qla1280_error_wait_timeout(unsigned long __data) 852 { 853 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data; 854 struct srb *sp = (struct srb *)CMD_SP(cmd); 855 856 complete(sp->wait); 857 } 858 859 static void qla1280_mailbox_timeout(unsigned long __data) 860 { 861 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data; 862 struct device_reg __iomem *reg; 863 reg = ha->iobase; 864 865 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0); 866 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, " 867 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0], 868 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus)); 869 complete(ha->mailbox_wait); 870 } 871 872 /************************************************************************** 873 * qla1200_error_action 874 * The function will attempt to perform a specified error action and 875 * wait for the results (or time out). 876 * 877 * Input: 878 * cmd = Linux SCSI command packet of the command that cause the 879 * bus reset. 880 * action = error action to take (see action_t) 881 * 882 * Returns: 883 * SUCCESS or FAILED 884 * 885 * Note: 886 * Resetting the bus always succeeds - is has to, otherwise the 887 * kernel will panic! Try a surgical technique - sending a BUS 888 * DEVICE RESET message - on the offending target before pulling 889 * the SCSI bus reset line. 890 **************************************************************************/ 891 static int 892 qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 893 { 894 struct scsi_qla_host *ha; 895 int bus, target, lun; 896 struct srb *sp; 897 uint16_t data; 898 unsigned char *handle; 899 int result, i; 900 DECLARE_COMPLETION(wait); 901 struct timer_list timer; 902 903 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 904 905 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 906 RD_REG_WORD(&ha->iobase->istatus)); 907 908 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n", 909 RD_REG_WORD(&ha->iobase->host_cmd), 910 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 911 912 ENTER("qla1280_error_action"); 913 if (qla1280_verbose) 914 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 915 "Handle=0x%p, action=0x%x\n", 916 ha->host_no, cmd, CMD_HANDLE(cmd), action); 917 918 if (cmd == NULL) { 919 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL " 920 "si_Cmnd pointer, failing.\n"); 921 LEAVE("qla1280_error_action"); 922 return FAILED; 923 } 924 925 ha = (struct scsi_qla_host *)cmd->device->host->hostdata; 926 sp = (struct srb *)CMD_SP(cmd); 927 handle = CMD_HANDLE(cmd); 928 929 /* Check for pending interrupts. */ 930 data = qla1280_debounce_register(&ha->iobase->istatus); 931 /* 932 * The io_request_lock is held when the reset handler is called, hence 933 * the interrupt handler cannot be running in parallel as it also 934 * grabs the lock. /Jes 935 */ 936 if (data & RISC_INT) 937 qla1280_isr(ha, &ha->done_q); 938 939 /* 940 * Determine the suggested action that the mid-level driver wants 941 * us to perform. 942 */ 943 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { 944 if(action == ABORT_COMMAND) { 945 /* we never got this command */ 946 printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); 947 return SUCCESS; /* no action - we don't have command */ 948 } 949 } else { 950 sp->wait = &wait; 951 } 952 953 bus = SCSI_BUS_32(cmd); 954 target = SCSI_TCN_32(cmd); 955 lun = SCSI_LUN_32(cmd); 956 957 /* Overloading result. Here it means the success or fail of the 958 * *issue* of the action. When we return from the routine, it must 959 * mean the actual success or fail of the action */ 960 result = FAILED; 961 switch (action) { 962 case FAIL: 963 break; 964 965 case ABORT_COMMAND: 966 if ((sp->flags & SRB_ABORT_PENDING)) { 967 printk(KERN_WARNING 968 "scsi(): Command has a pending abort " 969 "message - ABORT_PENDING.\n"); 970 /* This should technically be impossible since we 971 * now wait for abort completion */ 972 break; 973 } 974 975 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 976 if (sp == ha->outstanding_cmds[i]) { 977 dprintk(1, "qla1280: RISC aborting command\n"); 978 if (qla1280_abort_command(ha, sp, i) == 0) 979 result = SUCCESS; 980 else { 981 /* 982 * Since we don't know what might 983 * have happend to the command, it 984 * is unsafe to remove it from the 985 * device's queue at this point. 986 * Wait and let the escalation 987 * process take care of it. 988 */ 989 printk(KERN_WARNING 990 "scsi(%li:%i:%i:%i): Unable" 991 " to abort command!\n", 992 ha->host_no, bus, target, lun); 993 } 994 } 995 } 996 break; 997 998 case ABORT_DEVICE: 999 if (qla1280_verbose) 1000 printk(KERN_INFO 1001 "scsi(%ld:%d:%d:%d): Queueing abort device " 1002 "command.\n", ha->host_no, bus, target, lun); 1003 if (qla1280_abort_device(ha, bus, target, lun) == 0) 1004 result = SUCCESS; 1005 break; 1006 1007 case DEVICE_RESET: 1008 if (qla1280_verbose) 1009 printk(KERN_INFO 1010 "scsi(%ld:%d:%d:%d): Queueing device reset " 1011 "command.\n", ha->host_no, bus, target, lun); 1012 if (qla1280_device_reset(ha, bus, target) == 0) 1013 result = SUCCESS; 1014 break; 1015 1016 case BUS_RESET: 1017 if (qla1280_verbose) 1018 printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS " 1019 "DEVICE RESET\n", ha->host_no, bus); 1020 if (qla1280_bus_reset(ha, bus == 0)) 1021 result = SUCCESS; 1022 1023 break; 1024 1025 case ADAPTER_RESET: 1026 default: 1027 if (qla1280_verbose) { 1028 printk(KERN_INFO 1029 "scsi(%ld): Issued ADAPTER RESET\n", 1030 ha->host_no); 1031 printk(KERN_INFO "scsi(%ld): I/O processing will " 1032 "continue automatically\n", ha->host_no); 1033 } 1034 ha->flags.reset_active = 1; 1035 /* 1036 * We restarted all of the commands automatically, so the 1037 * mid-level code can expect completions momentitarily. 1038 */ 1039 if (qla1280_abort_isp(ha) == 0) 1040 result = SUCCESS; 1041 1042 ha->flags.reset_active = 0; 1043 } 1044 1045 if (!list_empty(&ha->done_q)) 1046 qla1280_done(ha); 1047 1048 /* If we didn't manage to issue the action, or we have no 1049 * command to wait for, exit here */ 1050 if (result == FAILED || handle == NULL || 1051 handle == (unsigned char *)INVALID_HANDLE) { 1052 /* 1053 * Clear completion queue to avoid qla1280_done() trying 1054 * to complete the command at a later stage after we 1055 * have exited the current context 1056 */ 1057 sp->wait = NULL; 1058 goto leave; 1059 } 1060 1061 /* set up a timer just in case we're really jammed */ 1062 init_timer(&timer); 1063 timer.expires = jiffies + 4*HZ; 1064 timer.data = (unsigned long)cmd; 1065 timer.function = qla1280_error_wait_timeout; 1066 add_timer(&timer); 1067 1068 /* wait for the action to complete (or the timer to expire) */ 1069 spin_unlock_irq(HOST_LOCK); 1070 wait_for_completion(&wait); 1071 del_timer_sync(&timer); 1072 spin_lock_irq(HOST_LOCK); 1073 sp->wait = NULL; 1074 1075 /* the only action we might get a fail for is abort */ 1076 if (action == ABORT_COMMAND) { 1077 if(sp->flags & SRB_ABORTED) 1078 result = SUCCESS; 1079 else 1080 result = FAILED; 1081 } 1082 1083 leave: 1084 dprintk(1, "RESET returning %d\n", result); 1085 1086 LEAVE("qla1280_error_action"); 1087 return result; 1088 } 1089 1090 /************************************************************************** 1091 * qla1280_abort 1092 * Abort the specified SCSI command(s). 1093 **************************************************************************/ 1094 static int 1095 qla1280_eh_abort(struct scsi_cmnd * cmd) 1096 { 1097 int rc; 1098 1099 spin_lock_irq(cmd->device->host->host_lock); 1100 rc = qla1280_error_action(cmd, ABORT_COMMAND); 1101 spin_unlock_irq(cmd->device->host->host_lock); 1102 1103 return rc; 1104 } 1105 1106 /************************************************************************** 1107 * qla1280_device_reset 1108 * Reset the specified SCSI device 1109 **************************************************************************/ 1110 static int 1111 qla1280_eh_device_reset(struct scsi_cmnd *cmd) 1112 { 1113 int rc; 1114 1115 spin_lock_irq(cmd->device->host->host_lock); 1116 rc = qla1280_error_action(cmd, DEVICE_RESET); 1117 spin_unlock_irq(cmd->device->host->host_lock); 1118 1119 return rc; 1120 } 1121 1122 /************************************************************************** 1123 * qla1280_bus_reset 1124 * Reset the specified bus. 1125 **************************************************************************/ 1126 static int 1127 qla1280_eh_bus_reset(struct scsi_cmnd *cmd) 1128 { 1129 int rc; 1130 1131 spin_lock_irq(cmd->device->host->host_lock); 1132 rc = qla1280_error_action(cmd, BUS_RESET); 1133 spin_unlock_irq(cmd->device->host->host_lock); 1134 1135 return rc; 1136 } 1137 1138 /************************************************************************** 1139 * qla1280_adapter_reset 1140 * Reset the specified adapter (both channels) 1141 **************************************************************************/ 1142 static int 1143 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd) 1144 { 1145 int rc; 1146 1147 spin_lock_irq(cmd->device->host->host_lock); 1148 rc = qla1280_error_action(cmd, ADAPTER_RESET); 1149 spin_unlock_irq(cmd->device->host->host_lock); 1150 1151 return rc; 1152 } 1153 1154 static int 1155 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev, 1156 sector_t capacity, int geom[]) 1157 { 1158 int heads, sectors, cylinders; 1159 1160 heads = 64; 1161 sectors = 32; 1162 cylinders = (unsigned long)capacity / (heads * sectors); 1163 if (cylinders > 1024) { 1164 heads = 255; 1165 sectors = 63; 1166 cylinders = (unsigned long)capacity / (heads * sectors); 1167 /* if (cylinders > 1023) 1168 cylinders = 1023; */ 1169 } 1170 1171 geom[0] = heads; 1172 geom[1] = sectors; 1173 geom[2] = cylinders; 1174 1175 return 0; 1176 } 1177 1178 #if LINUX_VERSION_CODE < 0x020600 1179 static int 1180 qla1280_detect(Scsi_Host_Template *template) 1181 { 1182 struct pci_device_id *id = &qla1280_pci_tbl[0]; 1183 struct pci_dev *pdev = NULL; 1184 int num_hosts = 0; 1185 1186 if (sizeof(struct srb) > sizeof(Scsi_Pointer)) { 1187 printk(KERN_WARNING 1188 "qla1280: struct srb too big, aborting\n"); 1189 return 0; 1190 } 1191 1192 if ((DMA_BIDIRECTIONAL != PCI_DMA_BIDIRECTIONAL) || 1193 (DMA_TO_DEVICE != PCI_DMA_TODEVICE) || 1194 (DMA_FROM_DEVICE != PCI_DMA_FROMDEVICE) || 1195 (DMA_NONE != PCI_DMA_NONE)) { 1196 printk(KERN_WARNING 1197 "qla1280: dma direction bits don't match\n"); 1198 return 0; 1199 } 1200 1201 #ifdef MODULE 1202 /* 1203 * If we are called as a module, the qla1280 pointer may not be null 1204 * and it would point to our bootup string, just like on the lilo 1205 * command line. IF not NULL, then process this config string with 1206 * qla1280_setup 1207 * 1208 * Boot time Options 1209 * To add options at boot time add a line to your lilo.conf file like: 1210 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}" 1211 * which will result in the first four devices on the first two 1212 * controllers being set to a tagged queue depth of 32. 1213 */ 1214 if (qla1280) 1215 qla1280_setup(qla1280); 1216 #endif 1217 1218 /* First Initialize QLA12160 on PCI Bus 1 Dev 2 */ 1219 while ((pdev = pci_find_device(id->vendor, id->device, pdev))) { 1220 if (pdev->bus->number == 1 && PCI_SLOT(pdev->devfn) == 2) { 1221 if (!qla1280_probe_one(pdev, id)) 1222 num_hosts++; 1223 } 1224 } 1225 1226 pdev = NULL; 1227 /* Try and find each different type of adapter we support */ 1228 for (id = &qla1280_pci_tbl[0]; id->device; id++) { 1229 while ((pdev = pci_find_device(id->vendor, id->device, pdev))) { 1230 /* 1231 * skip QLA12160 already initialized on 1232 * PCI Bus 1 Dev 2 since we already initialized 1233 * and presented it 1234 */ 1235 if (id->device == PCI_DEVICE_ID_QLOGIC_ISP12160 && 1236 pdev->bus->number == 1 && 1237 PCI_SLOT(pdev->devfn) == 2) 1238 continue; 1239 1240 if (!qla1280_probe_one(pdev, id)) 1241 num_hosts++; 1242 } 1243 } 1244 1245 return num_hosts; 1246 } 1247 1248 /* 1249 * This looks a bit ugly as we could just pass down host to 1250 * qla1280_remove_one, but I want to keep qla1280_release purely a wrapper 1251 * around pci_driver::remove as used from 2.6 onwards. 1252 */ 1253 static int 1254 qla1280_release(struct Scsi_Host *host) 1255 { 1256 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 1257 1258 qla1280_remove_one(ha->pdev); 1259 return 0; 1260 } 1261 1262 static int 1263 qla1280_biosparam_old(Disk * disk, kdev_t dev, int geom[]) 1264 { 1265 return qla1280_biosparam(disk->device, NULL, disk->capacity, geom); 1266 } 1267 #endif 1268 1269 /* disable risc and host interrupts */ 1270 static inline void 1271 qla1280_disable_intrs(struct scsi_qla_host *ha) 1272 { 1273 WRT_REG_WORD(&ha->iobase->ictrl, 0); 1274 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1275 } 1276 1277 /* enable risc and host interrupts */ 1278 static inline void 1279 qla1280_enable_intrs(struct scsi_qla_host *ha) 1280 { 1281 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC)); 1282 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1283 } 1284 1285 /************************************************************************** 1286 * qla1280_intr_handler 1287 * Handles the H/W interrupt 1288 **************************************************************************/ 1289 static irqreturn_t 1290 qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs) 1291 { 1292 struct scsi_qla_host *ha; 1293 struct device_reg __iomem *reg; 1294 u16 data; 1295 int handled = 0; 1296 1297 ENTER_INTR ("qla1280_intr_handler"); 1298 ha = (struct scsi_qla_host *)dev_id; 1299 1300 spin_lock(HOST_LOCK); 1301 1302 ha->isr_count++; 1303 reg = ha->iobase; 1304 1305 qla1280_disable_intrs(ha); 1306 1307 data = qla1280_debounce_register(®->istatus); 1308 /* Check for pending interrupts. */ 1309 if (data & RISC_INT) { 1310 qla1280_isr(ha, &ha->done_q); 1311 handled = 1; 1312 } 1313 if (!list_empty(&ha->done_q)) 1314 qla1280_done(ha); 1315 1316 spin_unlock(HOST_LOCK); 1317 1318 qla1280_enable_intrs(ha); 1319 1320 LEAVE_INTR("qla1280_intr_handler"); 1321 return IRQ_RETVAL(handled); 1322 } 1323 1324 1325 static int 1326 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) 1327 { 1328 uint8_t mr; 1329 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1330 struct nvram *nv; 1331 int status, lun; 1332 1333 nv = &ha->nvram; 1334 1335 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0; 1336 1337 /* Set Target Parameters. */ 1338 mb[0] = MBC_SET_TARGET_PARAMETERS; 1339 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1340 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8; 1341 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9; 1342 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10; 1343 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11; 1344 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12; 1345 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13; 1346 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14; 1347 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15; 1348 1349 if (IS_ISP1x160(ha)) { 1350 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; 1351 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8); 1352 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | 1353 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; 1354 mr |= BIT_6; 1355 } else { 1356 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8); 1357 } 1358 mb[3] |= nv->bus[bus].target[target].sync_period; 1359 1360 status = qla1280_mailbox_command(ha, mr, mb); 1361 1362 /* Set Device Queue Parameters. */ 1363 for (lun = 0; lun < MAX_LUNS; lun++) { 1364 mb[0] = MBC_SET_DEVICE_QUEUE; 1365 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1366 mb[1] |= lun; 1367 mb[2] = nv->bus[bus].max_queue_depth; 1368 mb[3] = nv->bus[bus].target[target].execution_throttle; 1369 status |= qla1280_mailbox_command(ha, 0x0f, mb); 1370 } 1371 1372 if (status) 1373 printk(KERN_WARNING "scsi(%ld:%i:%i): " 1374 "qla1280_set_target_parameters() failed\n", 1375 ha->host_no, bus, target); 1376 return status; 1377 } 1378 1379 1380 /************************************************************************** 1381 * qla1280_slave_configure 1382 * 1383 * Description: 1384 * Determines the queue depth for a given device. There are two ways 1385 * a queue depth can be obtained for a tagged queueing device. One 1386 * way is the default queue depth which is determined by whether 1387 * If it is defined, then it is used 1388 * as the default queue depth. Otherwise, we use either 4 or 8 as the 1389 * default queue depth (dependent on the number of hardware SCBs). 1390 **************************************************************************/ 1391 static int 1392 qla1280_slave_configure(struct scsi_device *device) 1393 { 1394 struct scsi_qla_host *ha; 1395 int default_depth = 3; 1396 int bus = device->channel; 1397 int target = device->id; 1398 int status = 0; 1399 struct nvram *nv; 1400 unsigned long flags; 1401 1402 ha = (struct scsi_qla_host *)device->host->hostdata; 1403 nv = &ha->nvram; 1404 1405 if (qla1280_check_for_dead_scsi_bus(ha, bus)) 1406 return 1; 1407 1408 if (device->tagged_supported && 1409 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { 1410 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, 1411 ha->bus_settings[bus].hiwat); 1412 } else { 1413 scsi_adjust_queue_depth(device, 0, default_depth); 1414 } 1415 1416 #if LINUX_VERSION_CODE > 0x020500 1417 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; 1418 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr; 1419 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; 1420 #endif 1421 1422 if (driver_setup.no_sync || 1423 (driver_setup.sync_mask && 1424 (~driver_setup.sync_mask & (1 << target)))) 1425 nv->bus[bus].target[target].parameter.enable_sync = 0; 1426 if (driver_setup.no_wide || 1427 (driver_setup.wide_mask && 1428 (~driver_setup.wide_mask & (1 << target)))) 1429 nv->bus[bus].target[target].parameter.enable_wide = 0; 1430 if (IS_ISP1x160(ha)) { 1431 if (driver_setup.no_ppr || 1432 (driver_setup.ppr_mask && 1433 (~driver_setup.ppr_mask & (1 << target)))) 1434 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 1435 } 1436 1437 spin_lock_irqsave(HOST_LOCK, flags); 1438 if (nv->bus[bus].target[target].parameter.enable_sync) 1439 status = qla1280_set_target_parameters(ha, bus, target); 1440 qla1280_get_target_parameters(ha, device); 1441 spin_unlock_irqrestore(HOST_LOCK, flags); 1442 return status; 1443 } 1444 1445 #if LINUX_VERSION_CODE < 0x020545 1446 /************************************************************************** 1447 * qla1280_select_queue_depth 1448 * 1449 * Sets the queue depth for each SCSI device hanging off the input 1450 * host adapter. We use a queue depth of 2 for devices that do not 1451 * support tagged queueing. 1452 **************************************************************************/ 1453 static void 1454 qla1280_select_queue_depth(struct Scsi_Host *host, struct scsi_device *sdev_q) 1455 { 1456 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 1457 struct scsi_device *sdev; 1458 1459 ENTER("qla1280_select_queue_depth"); 1460 for (sdev = sdev_q; sdev; sdev = sdev->next) 1461 if (sdev->host == host) 1462 qla1280_slave_configure(sdev); 1463 1464 if (sdev_q) 1465 qla1280_check_for_dead_scsi_bus(ha, sdev_q->channel); 1466 LEAVE("qla1280_select_queue_depth"); 1467 } 1468 #endif 1469 1470 /* 1471 * qla1280_done 1472 * Process completed commands. 1473 * 1474 * Input: 1475 * ha = adapter block pointer. 1476 */ 1477 static void 1478 qla1280_done(struct scsi_qla_host *ha) 1479 { 1480 struct srb *sp; 1481 struct list_head *done_q; 1482 int bus, target, lun; 1483 struct scsi_cmnd *cmd; 1484 1485 ENTER("qla1280_done"); 1486 1487 done_q = &ha->done_q; 1488 1489 while (!list_empty(done_q)) { 1490 sp = list_entry(done_q->next, struct srb, list); 1491 1492 list_del(&sp->list); 1493 1494 cmd = sp->cmd; 1495 bus = SCSI_BUS_32(cmd); 1496 target = SCSI_TCN_32(cmd); 1497 lun = SCSI_LUN_32(cmd); 1498 1499 switch ((CMD_RESULT(cmd) >> 16)) { 1500 case DID_RESET: 1501 /* Issue marker command. */ 1502 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1503 break; 1504 case DID_ABORT: 1505 sp->flags &= ~SRB_ABORT_PENDING; 1506 sp->flags |= SRB_ABORTED; 1507 if (sp->flags & SRB_TIMEOUT) 1508 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16; 1509 break; 1510 default: 1511 break; 1512 } 1513 1514 /* Release memory used for this I/O */ 1515 if (cmd->use_sg) { 1516 pci_unmap_sg(ha->pdev, cmd->request_buffer, 1517 cmd->use_sg, cmd->sc_data_direction); 1518 } else if (cmd->request_bufflen) { 1519 pci_unmap_single(ha->pdev, sp->saved_dma_handle, 1520 cmd->request_bufflen, 1521 cmd->sc_data_direction); 1522 } 1523 1524 /* Call the mid-level driver interrupt handler */ 1525 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; 1526 ha->actthreads--; 1527 1528 #if LINUX_VERSION_CODE < 0x020500 1529 if (cmd->cmnd[0] == INQUIRY) 1530 qla1280_get_target_options(cmd, ha); 1531 #endif 1532 (*(cmd)->scsi_done)(cmd); 1533 1534 if(sp->wait != NULL) 1535 complete(sp->wait); 1536 } 1537 LEAVE("qla1280_done"); 1538 } 1539 1540 /* 1541 * Translates a ISP error to a Linux SCSI error 1542 */ 1543 static int 1544 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) 1545 { 1546 int host_status = DID_ERROR; 1547 uint16_t comp_status = le16_to_cpu(sts->comp_status); 1548 uint16_t state_flags = le16_to_cpu(sts->state_flags); 1549 uint16_t residual_length = le32_to_cpu(sts->residual_length); 1550 uint16_t scsi_status = le16_to_cpu(sts->scsi_status); 1551 #if DEBUG_QLA1280_INTR 1552 static char *reason[] = { 1553 "DID_OK", 1554 "DID_NO_CONNECT", 1555 "DID_BUS_BUSY", 1556 "DID_TIME_OUT", 1557 "DID_BAD_TARGET", 1558 "DID_ABORT", 1559 "DID_PARITY", 1560 "DID_ERROR", 1561 "DID_RESET", 1562 "DID_BAD_INTR" 1563 }; 1564 #endif /* DEBUG_QLA1280_INTR */ 1565 1566 ENTER("qla1280_return_status"); 1567 1568 #if DEBUG_QLA1280_INTR 1569 /* 1570 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n", 1571 comp_status); 1572 */ 1573 #endif 1574 1575 switch (comp_status) { 1576 case CS_COMPLETE: 1577 host_status = DID_OK; 1578 break; 1579 1580 case CS_INCOMPLETE: 1581 if (!(state_flags & SF_GOT_BUS)) 1582 host_status = DID_NO_CONNECT; 1583 else if (!(state_flags & SF_GOT_TARGET)) 1584 host_status = DID_BAD_TARGET; 1585 else if (!(state_flags & SF_SENT_CDB)) 1586 host_status = DID_ERROR; 1587 else if (!(state_flags & SF_TRANSFERRED_DATA)) 1588 host_status = DID_ERROR; 1589 else if (!(state_flags & SF_GOT_STATUS)) 1590 host_status = DID_ERROR; 1591 else if (!(state_flags & SF_GOT_SENSE)) 1592 host_status = DID_ERROR; 1593 break; 1594 1595 case CS_RESET: 1596 host_status = DID_RESET; 1597 break; 1598 1599 case CS_ABORTED: 1600 host_status = DID_ABORT; 1601 break; 1602 1603 case CS_TIMEOUT: 1604 host_status = DID_TIME_OUT; 1605 break; 1606 1607 case CS_DATA_OVERRUN: 1608 dprintk(2, "Data overrun 0x%x\n", residual_length); 1609 dprintk(2, "qla1280_return_status: response packet data\n"); 1610 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); 1611 host_status = DID_ERROR; 1612 break; 1613 1614 case CS_DATA_UNDERRUN: 1615 if ((cp->request_bufflen - residual_length) < 1616 cp->underflow) { 1617 printk(KERN_WARNING 1618 "scsi: Underflow detected - retrying " 1619 "command.\n"); 1620 host_status = DID_ERROR; 1621 } else 1622 host_status = DID_OK; 1623 break; 1624 1625 default: 1626 host_status = DID_ERROR; 1627 break; 1628 } 1629 1630 #if DEBUG_QLA1280_INTR 1631 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n", 1632 reason[host_status], scsi_status); 1633 #endif 1634 1635 LEAVE("qla1280_return_status"); 1636 1637 return (scsi_status & 0xff) | (host_status << 16); 1638 } 1639 1640 /****************************************************************************/ 1641 /* QLogic ISP1280 Hardware Support Functions. */ 1642 /****************************************************************************/ 1643 1644 /* 1645 * qla1280_initialize_adapter 1646 * Initialize board. 1647 * 1648 * Input: 1649 * ha = adapter block pointer. 1650 * 1651 * Returns: 1652 * 0 = success 1653 */ 1654 static int __devinit 1655 qla1280_initialize_adapter(struct scsi_qla_host *ha) 1656 { 1657 struct device_reg __iomem *reg; 1658 int status; 1659 int bus; 1660 #if LINUX_VERSION_CODE > 0x020500 1661 unsigned long flags; 1662 #endif 1663 1664 ENTER("qla1280_initialize_adapter"); 1665 1666 /* Clear adapter flags. */ 1667 ha->flags.online = 0; 1668 ha->flags.disable_host_adapter = 0; 1669 ha->flags.reset_active = 0; 1670 ha->flags.abort_isp_active = 0; 1671 1672 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1673 if (ia64_platform_is("sn2")) { 1674 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 1675 "dual channel lockup workaround\n", ha->host_no); 1676 ha->flags.use_pci_vchannel = 1; 1677 driver_setup.no_nvram = 1; 1678 } 1679 #endif 1680 1681 /* TODO: implement support for the 1040 nvram format */ 1682 if (IS_ISP1040(ha)) 1683 driver_setup.no_nvram = 1; 1684 1685 dprintk(1, "Configure PCI space for adapter...\n"); 1686 1687 reg = ha->iobase; 1688 1689 /* Insure mailbox registers are free. */ 1690 WRT_REG_WORD(®->semaphore, 0); 1691 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 1692 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT); 1693 RD_REG_WORD(®->host_cmd); 1694 1695 if (qla1280_read_nvram(ha)) { 1696 dprintk(2, "qla1280_initialize_adapter: failed to read " 1697 "NVRAM\n"); 1698 } 1699 1700 #if LINUX_VERSION_CODE >= 0x020500 1701 /* 1702 * It's necessary to grab the spin here as qla1280_mailbox_command 1703 * needs to be able to drop the lock unconditionally to wait 1704 * for completion. 1705 * In 2.4 ->detect is called with the io_request_lock held. 1706 */ 1707 spin_lock_irqsave(HOST_LOCK, flags); 1708 #endif 1709 1710 status = qla1280_load_firmware(ha); 1711 if (status) { 1712 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n", 1713 ha->host_no); 1714 goto out; 1715 } 1716 1717 /* Setup adapter based on NVRAM parameters. */ 1718 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no); 1719 qla1280_nvram_config(ha); 1720 1721 if (ha->flags.disable_host_adapter) { 1722 status = 1; 1723 goto out; 1724 } 1725 1726 status = qla1280_init_rings(ha); 1727 if (status) 1728 goto out; 1729 1730 /* Issue SCSI reset, if we can't reset twice then bus is dead */ 1731 for (bus = 0; bus < ha->ports; bus++) { 1732 if (!ha->bus_settings[bus].disable_scsi_reset && 1733 qla1280_bus_reset(ha, bus) && 1734 qla1280_bus_reset(ha, bus)) 1735 ha->bus_settings[bus].scsi_bus_dead = 1; 1736 } 1737 1738 ha->flags.online = 1; 1739 out: 1740 #if LINUX_VERSION_CODE >= 0x020500 1741 spin_unlock_irqrestore(HOST_LOCK, flags); 1742 #endif 1743 if (status) 1744 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n"); 1745 1746 LEAVE("qla1280_initialize_adapter"); 1747 return status; 1748 } 1749 1750 /* 1751 * Chip diagnostics 1752 * Test chip for proper operation. 1753 * 1754 * Input: 1755 * ha = adapter block pointer. 1756 * 1757 * Returns: 1758 * 0 = success. 1759 */ 1760 static int 1761 qla1280_chip_diag(struct scsi_qla_host *ha) 1762 { 1763 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1764 struct device_reg __iomem *reg = ha->iobase; 1765 int status = 0; 1766 int cnt; 1767 uint16_t data; 1768 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l); 1769 1770 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no); 1771 1772 /* Soft reset chip and wait for it to finish. */ 1773 WRT_REG_WORD(®->ictrl, ISP_RESET); 1774 1775 /* 1776 * We can't do a traditional PCI write flush here by reading 1777 * back the register. The card will not respond once the reset 1778 * is in action and we end up with a machine check exception 1779 * instead. Nothing to do but wait and hope for the best. 1780 * A portable pci_write_flush(pdev) call would be very useful here. 1781 */ 1782 udelay(20); 1783 data = qla1280_debounce_register(®->ictrl); 1784 /* 1785 * Yet another QLogic gem ;-( 1786 */ 1787 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) { 1788 udelay(5); 1789 data = RD_REG_WORD(®->ictrl); 1790 } 1791 1792 if (!cnt) 1793 goto fail; 1794 1795 /* Reset register cleared by chip reset. */ 1796 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n"); 1797 1798 WRT_REG_WORD(®->cfg_1, 0); 1799 1800 /* Reset RISC and disable BIOS which 1801 allows RISC to execute out of RAM. */ 1802 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC | 1803 HC_RELEASE_RISC | HC_DISABLE_BIOS); 1804 1805 RD_REG_WORD(®->id_l); /* Flush PCI write */ 1806 data = qla1280_debounce_register(®->mailbox0); 1807 1808 /* 1809 * I *LOVE* this code! 1810 */ 1811 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) { 1812 udelay(5); 1813 data = RD_REG_WORD(®->mailbox0); 1814 } 1815 1816 if (!cnt) 1817 goto fail; 1818 1819 /* Check product ID of chip */ 1820 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n"); 1821 1822 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 || 1823 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 && 1824 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) || 1825 RD_REG_WORD(®->mailbox3) != PROD_ID_3 || 1826 RD_REG_WORD(®->mailbox4) != PROD_ID_4) { 1827 printk(KERN_INFO "qla1280: Wrong product ID = " 1828 "0x%x,0x%x,0x%x,0x%x\n", 1829 RD_REG_WORD(®->mailbox1), 1830 RD_REG_WORD(®->mailbox2), 1831 RD_REG_WORD(®->mailbox3), 1832 RD_REG_WORD(®->mailbox4)); 1833 goto fail; 1834 } 1835 1836 /* 1837 * Enable ints early!!! 1838 */ 1839 qla1280_enable_intrs(ha); 1840 1841 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n"); 1842 /* Wrap Incoming Mailboxes Test. */ 1843 mb[0] = MBC_MAILBOX_REGISTER_TEST; 1844 mb[1] = 0xAAAA; 1845 mb[2] = 0x5555; 1846 mb[3] = 0xAA55; 1847 mb[4] = 0x55AA; 1848 mb[5] = 0xA5A5; 1849 mb[6] = 0x5A5A; 1850 mb[7] = 0x2525; 1851 1852 status = qla1280_mailbox_command(ha, 0xff, mb); 1853 if (status) 1854 goto fail; 1855 1856 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 || 1857 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A || 1858 mb[7] != 0x2525) { 1859 printk(KERN_INFO "qla1280: Failed mbox check\n"); 1860 goto fail; 1861 } 1862 1863 dprintk(3, "qla1280_chip_diag: exiting normally\n"); 1864 return 0; 1865 fail: 1866 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n"); 1867 return status; 1868 } 1869 1870 static int 1871 qla1280_load_firmware_pio(struct scsi_qla_host *ha) 1872 { 1873 uint16_t risc_address, *risc_code_address, risc_code_size; 1874 uint16_t mb[MAILBOX_REGISTER_COUNT], i; 1875 int err; 1876 1877 /* Load RISC code. */ 1878 risc_address = *ql1280_board_tbl[ha->devnum].fwstart; 1879 risc_code_address = ql1280_board_tbl[ha->devnum].fwcode; 1880 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen; 1881 1882 for (i = 0; i < risc_code_size; i++) { 1883 mb[0] = MBC_WRITE_RAM_WORD; 1884 mb[1] = risc_address + i; 1885 mb[2] = risc_code_address[i]; 1886 1887 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb); 1888 if (err) { 1889 printk(KERN_ERR "scsi(%li): Failed to load firmware\n", 1890 ha->host_no); 1891 return err; 1892 } 1893 } 1894 1895 return 0; 1896 } 1897 1898 #define DUMP_IT_BACK 0 /* for debug of RISC loading */ 1899 static int 1900 qla1280_load_firmware_dma(struct scsi_qla_host *ha) 1901 { 1902 uint16_t risc_address, *risc_code_address, risc_code_size; 1903 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt; 1904 int err = 0, num, i; 1905 #if DUMP_IT_BACK 1906 uint8_t *sp, *tbuf; 1907 dma_addr_t p_tbuf; 1908 1909 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf); 1910 if (!tbuf) 1911 return -ENOMEM; 1912 #endif 1913 1914 /* Load RISC code. */ 1915 risc_address = *ql1280_board_tbl[ha->devnum].fwstart; 1916 risc_code_address = ql1280_board_tbl[ha->devnum].fwcode; 1917 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen; 1918 1919 dprintk(1, "%s: DMA RISC code (%i) words\n", 1920 __FUNCTION__, risc_code_size); 1921 1922 num = 0; 1923 while (risc_code_size > 0) { 1924 int warn __attribute__((unused)) = 0; 1925 1926 cnt = 2000 >> 1; 1927 1928 if (cnt > risc_code_size) 1929 cnt = risc_code_size; 1930 1931 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p)," 1932 "%d,%d(0x%x)\n", 1933 risc_code_address, cnt, num, risc_address); 1934 for(i = 0; i < cnt; i++) 1935 ((__le16 *)ha->request_ring)[i] = 1936 cpu_to_le16(risc_code_address[i]); 1937 1938 mb[0] = MBC_LOAD_RAM; 1939 mb[1] = risc_address; 1940 mb[4] = cnt; 1941 mb[3] = ha->request_dma & 0xffff; 1942 mb[2] = (ha->request_dma >> 16) & 0xffff; 1943 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1944 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1945 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1946 __FUNCTION__, mb[0], 1947 (void *)(long)ha->request_dma, 1948 mb[6], mb[7], mb[2], mb[3]); 1949 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1950 BIT_1 | BIT_0, mb); 1951 if (err) { 1952 printk(KERN_ERR "scsi(%li): Failed to load partial " 1953 "segment of f\n", ha->host_no); 1954 goto out; 1955 } 1956 1957 #if DUMP_IT_BACK 1958 mb[0] = MBC_DUMP_RAM; 1959 mb[1] = risc_address; 1960 mb[4] = cnt; 1961 mb[3] = p_tbuf & 0xffff; 1962 mb[2] = (p_tbuf >> 16) & 0xffff; 1963 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff; 1964 mb[6] = pci_dma_hi32(p_tbuf) >> 16; 1965 1966 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1967 BIT_1 | BIT_0, mb); 1968 if (err) { 1969 printk(KERN_ERR 1970 "Failed to dump partial segment of f/w\n"); 1971 goto out; 1972 } 1973 sp = (uint8_t *)ha->request_ring; 1974 for (i = 0; i < (cnt << 1); i++) { 1975 if (tbuf[i] != sp[i] && warn++ < 10) { 1976 printk(KERN_ERR "%s: FW compare error @ " 1977 "byte(0x%x) loop#=%x\n", 1978 __FUNCTION__, i, num); 1979 printk(KERN_ERR "%s: FWbyte=%x " 1980 "FWfromChip=%x\n", 1981 __FUNCTION__, sp[i], tbuf[i]); 1982 /*break; */ 1983 } 1984 } 1985 #endif 1986 risc_address += cnt; 1987 risc_code_size = risc_code_size - cnt; 1988 risc_code_address = risc_code_address + cnt; 1989 num++; 1990 } 1991 1992 out: 1993 #if DUMP_IT_BACK 1994 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf); 1995 #endif 1996 return err; 1997 } 1998 1999 static int 2000 qla1280_start_firmware(struct scsi_qla_host *ha) 2001 { 2002 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2003 int err; 2004 2005 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", 2006 __FUNCTION__); 2007 2008 /* Verify checksum of loaded RISC code. */ 2009 mb[0] = MBC_VERIFY_CHECKSUM; 2010 /* mb[1] = ql12_risc_code_addr01; */ 2011 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 2012 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2013 if (err) { 2014 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no); 2015 return err; 2016 } 2017 2018 /* Start firmware execution. */ 2019 dprintk(1, "%s: start firmware running.\n", __FUNCTION__); 2020 mb[0] = MBC_EXECUTE_FIRMWARE; 2021 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 2022 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2023 if (err) { 2024 printk(KERN_ERR "scsi(%li): Failed to start firmware\n", 2025 ha->host_no); 2026 } 2027 2028 return err; 2029 } 2030 2031 static int 2032 qla1280_load_firmware(struct scsi_qla_host *ha) 2033 { 2034 int err; 2035 2036 err = qla1280_chip_diag(ha); 2037 if (err) 2038 goto out; 2039 if (IS_ISP1040(ha)) 2040 err = qla1280_load_firmware_pio(ha); 2041 else 2042 err = qla1280_load_firmware_dma(ha); 2043 if (err) 2044 goto out; 2045 err = qla1280_start_firmware(ha); 2046 out: 2047 return err; 2048 } 2049 2050 /* 2051 * Initialize rings 2052 * 2053 * Input: 2054 * ha = adapter block pointer. 2055 * ha->request_ring = request ring virtual address 2056 * ha->response_ring = response ring virtual address 2057 * ha->request_dma = request ring physical address 2058 * ha->response_dma = response ring physical address 2059 * 2060 * Returns: 2061 * 0 = success. 2062 */ 2063 static int 2064 qla1280_init_rings(struct scsi_qla_host *ha) 2065 { 2066 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2067 int status = 0; 2068 2069 ENTER("qla1280_init_rings"); 2070 2071 /* Clear outstanding commands array. */ 2072 memset(ha->outstanding_cmds, 0, 2073 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS); 2074 2075 /* Initialize request queue. */ 2076 ha->request_ring_ptr = ha->request_ring; 2077 ha->req_ring_index = 0; 2078 ha->req_q_cnt = REQUEST_ENTRY_CNT; 2079 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */ 2080 mb[0] = MBC_INIT_REQUEST_QUEUE_A64; 2081 mb[1] = REQUEST_ENTRY_CNT; 2082 mb[3] = ha->request_dma & 0xffff; 2083 mb[2] = (ha->request_dma >> 16) & 0xffff; 2084 mb[4] = 0; 2085 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 2086 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 2087 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 | 2088 BIT_3 | BIT_2 | BIT_1 | BIT_0, 2089 &mb[0]))) { 2090 /* Initialize response queue. */ 2091 ha->response_ring_ptr = ha->response_ring; 2092 ha->rsp_ring_index = 0; 2093 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */ 2094 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64; 2095 mb[1] = RESPONSE_ENTRY_CNT; 2096 mb[3] = ha->response_dma & 0xffff; 2097 mb[2] = (ha->response_dma >> 16) & 0xffff; 2098 mb[5] = 0; 2099 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff; 2100 mb[6] = pci_dma_hi32(ha->response_dma) >> 16; 2101 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 | 2102 BIT_3 | BIT_2 | BIT_1 | BIT_0, 2103 &mb[0]); 2104 } 2105 2106 if (status) 2107 dprintk(2, "qla1280_init_rings: **** FAILED ****\n"); 2108 2109 LEAVE("qla1280_init_rings"); 2110 return status; 2111 } 2112 2113 static void 2114 qla1280_print_settings(struct nvram *nv) 2115 { 2116 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n", 2117 nv->bus[0].config_1.initiator_id); 2118 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n", 2119 nv->bus[1].config_1.initiator_id); 2120 2121 dprintk(1, "qla1280 : bus reset delay[0]=%d\n", 2122 nv->bus[0].bus_reset_delay); 2123 dprintk(1, "qla1280 : bus reset delay[1]=%d\n", 2124 nv->bus[1].bus_reset_delay); 2125 2126 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count); 2127 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay); 2128 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count); 2129 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay); 2130 2131 dprintk(1, "qla1280 : async data setup time[0]=%d\n", 2132 nv->bus[0].config_2.async_data_setup_time); 2133 dprintk(1, "qla1280 : async data setup time[1]=%d\n", 2134 nv->bus[1].config_2.async_data_setup_time); 2135 2136 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n", 2137 nv->bus[0].config_2.req_ack_active_negation); 2138 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n", 2139 nv->bus[1].config_2.req_ack_active_negation); 2140 2141 dprintk(1, "qla1280 : data line active negation[0]=%d\n", 2142 nv->bus[0].config_2.data_line_active_negation); 2143 dprintk(1, "qla1280 : data line active negation[1]=%d\n", 2144 nv->bus[1].config_2.data_line_active_negation); 2145 2146 dprintk(1, "qla1280 : disable loading risc code=%d\n", 2147 nv->cntr_flags_1.disable_loading_risc_code); 2148 2149 dprintk(1, "qla1280 : enable 64bit addressing=%d\n", 2150 nv->cntr_flags_1.enable_64bit_addressing); 2151 2152 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n", 2153 nv->bus[0].selection_timeout); 2154 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n", 2155 nv->bus[1].selection_timeout); 2156 2157 dprintk(1, "qla1280 : max queue depth[0]=%d\n", 2158 nv->bus[0].max_queue_depth); 2159 dprintk(1, "qla1280 : max queue depth[1]=%d\n", 2160 nv->bus[1].max_queue_depth); 2161 } 2162 2163 static void 2164 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target) 2165 { 2166 struct nvram *nv = &ha->nvram; 2167 2168 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1; 2169 nv->bus[bus].target[target].parameter.auto_request_sense = 1; 2170 nv->bus[bus].target[target].parameter.tag_queuing = 1; 2171 nv->bus[bus].target[target].parameter.enable_sync = 1; 2172 #if 1 /* Some SCSI Processors do not seem to like this */ 2173 nv->bus[bus].target[target].parameter.enable_wide = 1; 2174 #endif 2175 nv->bus[bus].target[target].execution_throttle = 2176 nv->bus[bus].max_queue_depth - 1; 2177 nv->bus[bus].target[target].parameter.parity_checking = 1; 2178 nv->bus[bus].target[target].parameter.disconnect_allowed = 1; 2179 2180 if (IS_ISP1x160(ha)) { 2181 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; 2182 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e; 2183 nv->bus[bus].target[target].sync_period = 9; 2184 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 2185 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2; 2186 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1; 2187 } else { 2188 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1; 2189 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12; 2190 nv->bus[bus].target[target].sync_period = 10; 2191 } 2192 } 2193 2194 static void 2195 qla1280_set_defaults(struct scsi_qla_host *ha) 2196 { 2197 struct nvram *nv = &ha->nvram; 2198 int bus, target; 2199 2200 dprintk(1, "Using defaults for NVRAM: \n"); 2201 memset(nv, 0, sizeof(struct nvram)); 2202 2203 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */ 2204 nv->firmware_feature.f.enable_fast_posting = 1; 2205 nv->firmware_feature.f.disable_synchronous_backoff = 1; 2206 nv->termination.scsi_bus_0_control = 3; 2207 nv->termination.scsi_bus_1_control = 3; 2208 nv->termination.auto_term_support = 1; 2209 2210 /* 2211 * Set default FIFO magic - What appropriate values would be here 2212 * is unknown. This is what I have found testing with 12160s. 2213 * 2214 * Now, I would love the magic decoder ring for this one, the 2215 * header file provided by QLogic seems to be bogus or incomplete 2216 * at best. 2217 */ 2218 nv->isp_config.burst_enable = 1; 2219 if (IS_ISP1040(ha)) 2220 nv->isp_config.fifo_threshold |= 3; 2221 else 2222 nv->isp_config.fifo_threshold |= 4; 2223 2224 if (IS_ISP1x160(ha)) 2225 nv->isp_parameter = 0x01; /* fast memory enable */ 2226 2227 for (bus = 0; bus < MAX_BUSES; bus++) { 2228 nv->bus[bus].config_1.initiator_id = 7; 2229 nv->bus[bus].config_2.req_ack_active_negation = 1; 2230 nv->bus[bus].config_2.data_line_active_negation = 1; 2231 nv->bus[bus].selection_timeout = 250; 2232 nv->bus[bus].max_queue_depth = 256; 2233 2234 if (IS_ISP1040(ha)) { 2235 nv->bus[bus].bus_reset_delay = 3; 2236 nv->bus[bus].config_2.async_data_setup_time = 6; 2237 nv->bus[bus].retry_delay = 1; 2238 } else { 2239 nv->bus[bus].bus_reset_delay = 5; 2240 nv->bus[bus].config_2.async_data_setup_time = 8; 2241 } 2242 2243 for (target = 0; target < MAX_TARGETS; target++) 2244 qla1280_set_target_defaults(ha, bus, target); 2245 } 2246 } 2247 2248 static int 2249 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target) 2250 { 2251 struct nvram *nv = &ha->nvram; 2252 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2253 int status, lun; 2254 uint16_t flag; 2255 2256 /* Set Target Parameters. */ 2257 mb[0] = MBC_SET_TARGET_PARAMETERS; 2258 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2259 2260 /* 2261 * Do not enable sync and ppr for the initial INQUIRY run. We 2262 * enable this later if we determine the target actually 2263 * supports it. 2264 */ 2265 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE 2266 | TP_WIDE | TP_PARITY | TP_DISCONNECT); 2267 2268 if (IS_ISP1x160(ha)) 2269 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; 2270 else 2271 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; 2272 mb[3] |= nv->bus[bus].target[target].sync_period; 2273 status = qla1280_mailbox_command(ha, 0x0f, mb); 2274 2275 /* Save Tag queuing enable flag. */ 2276 flag = (BIT_0 << target) & mb[0]; 2277 if (nv->bus[bus].target[target].parameter.tag_queuing) 2278 ha->bus_settings[bus].qtag_enables |= flag; 2279 2280 /* Save Device enable flag. */ 2281 if (IS_ISP1x160(ha)) { 2282 if (nv->bus[bus].target[target].flags.flags1x160.device_enable) 2283 ha->bus_settings[bus].device_enables |= flag; 2284 ha->bus_settings[bus].lun_disables |= 0; 2285 } else { 2286 if (nv->bus[bus].target[target].flags.flags1x80.device_enable) 2287 ha->bus_settings[bus].device_enables |= flag; 2288 /* Save LUN disable flag. */ 2289 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) 2290 ha->bus_settings[bus].lun_disables |= flag; 2291 } 2292 2293 /* Set Device Queue Parameters. */ 2294 for (lun = 0; lun < MAX_LUNS; lun++) { 2295 mb[0] = MBC_SET_DEVICE_QUEUE; 2296 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2297 mb[1] |= lun; 2298 mb[2] = nv->bus[bus].max_queue_depth; 2299 mb[3] = nv->bus[bus].target[target].execution_throttle; 2300 status |= qla1280_mailbox_command(ha, 0x0f, mb); 2301 } 2302 2303 return status; 2304 } 2305 2306 static int 2307 qla1280_config_bus(struct scsi_qla_host *ha, int bus) 2308 { 2309 struct nvram *nv = &ha->nvram; 2310 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2311 int target, status; 2312 2313 /* SCSI Reset Disable. */ 2314 ha->bus_settings[bus].disable_scsi_reset = 2315 nv->bus[bus].config_1.scsi_reset_disable; 2316 2317 /* Initiator ID. */ 2318 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id; 2319 mb[0] = MBC_SET_INITIATOR_ID; 2320 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 : 2321 ha->bus_settings[bus].id; 2322 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2323 2324 /* Reset Delay. */ 2325 ha->bus_settings[bus].bus_reset_delay = 2326 nv->bus[bus].bus_reset_delay; 2327 2328 /* Command queue depth per device. */ 2329 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1; 2330 2331 /* Set target parameters. */ 2332 for (target = 0; target < MAX_TARGETS; target++) 2333 status |= qla1280_config_target(ha, bus, target); 2334 2335 return status; 2336 } 2337 2338 static int 2339 qla1280_nvram_config(struct scsi_qla_host *ha) 2340 { 2341 struct device_reg __iomem *reg = ha->iobase; 2342 struct nvram *nv = &ha->nvram; 2343 int bus, target, status = 0; 2344 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2345 2346 ENTER("qla1280_nvram_config"); 2347 2348 if (ha->nvram_valid) { 2349 /* Always force AUTO sense for LINUX SCSI */ 2350 for (bus = 0; bus < MAX_BUSES; bus++) 2351 for (target = 0; target < MAX_TARGETS; target++) { 2352 nv->bus[bus].target[target].parameter. 2353 auto_request_sense = 1; 2354 } 2355 } else { 2356 qla1280_set_defaults(ha); 2357 } 2358 2359 qla1280_print_settings(nv); 2360 2361 /* Disable RISC load of firmware. */ 2362 ha->flags.disable_risc_code_load = 2363 nv->cntr_flags_1.disable_loading_risc_code; 2364 2365 if (IS_ISP1040(ha)) { 2366 uint16_t hwrev, cfg1, cdma_conf, ddma_conf; 2367 2368 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK; 2369 2370 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6); 2371 cdma_conf = RD_REG_WORD(®->cdma_cfg); 2372 ddma_conf = RD_REG_WORD(®->ddma_cfg); 2373 2374 /* Busted fifo, says mjacob. */ 2375 if (hwrev != ISP_CFG0_1040A) 2376 cfg1 |= nv->isp_config.fifo_threshold << 4; 2377 2378 cfg1 |= nv->isp_config.burst_enable << 2; 2379 WRT_REG_WORD(®->cfg_1, cfg1); 2380 2381 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); 2382 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); 2383 } else { 2384 uint16_t cfg1, term; 2385 2386 /* Set ISP hardware DMA burst */ 2387 cfg1 = nv->isp_config.fifo_threshold << 4; 2388 cfg1 |= nv->isp_config.burst_enable << 2; 2389 /* Enable DMA arbitration on dual channel controllers */ 2390 if (ha->ports > 1) 2391 cfg1 |= BIT_13; 2392 WRT_REG_WORD(®->cfg_1, cfg1); 2393 2394 /* Set SCSI termination. */ 2395 WRT_REG_WORD(®->gpio_enable, 2396 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0); 2397 term = nv->termination.scsi_bus_1_control; 2398 term |= nv->termination.scsi_bus_0_control << 2; 2399 term |= nv->termination.auto_term_support << 7; 2400 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2401 WRT_REG_WORD(®->gpio_data, term); 2402 } 2403 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2404 2405 /* ISP parameter word. */ 2406 mb[0] = MBC_SET_SYSTEM_PARAMETER; 2407 mb[1] = nv->isp_parameter; 2408 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2409 2410 if (IS_ISP1x40(ha)) { 2411 /* clock rate - for qla1240 and older, only */ 2412 mb[0] = MBC_SET_CLOCK_RATE; 2413 mb[1] = 40; 2414 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2415 } 2416 2417 /* Firmware feature word. */ 2418 mb[0] = MBC_SET_FIRMWARE_FEATURES; 2419 mb[1] = nv->firmware_feature.f.enable_fast_posting; 2420 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1; 2421 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5; 2422 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) 2423 if (ia64_platform_is("sn2")) { 2424 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 2425 "workaround\n", ha->host_no); 2426 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */ 2427 } 2428 #endif 2429 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2430 2431 /* Retry count and delay. */ 2432 mb[0] = MBC_SET_RETRY_COUNT; 2433 mb[1] = nv->bus[0].retry_count; 2434 mb[2] = nv->bus[0].retry_delay; 2435 mb[6] = nv->bus[1].retry_count; 2436 mb[7] = nv->bus[1].retry_delay; 2437 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 | 2438 BIT_1 | BIT_0, &mb[0]); 2439 2440 /* ASYNC data setup time. */ 2441 mb[0] = MBC_SET_ASYNC_DATA_SETUP; 2442 mb[1] = nv->bus[0].config_2.async_data_setup_time; 2443 mb[2] = nv->bus[1].config_2.async_data_setup_time; 2444 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2445 2446 /* Active negation states. */ 2447 mb[0] = MBC_SET_ACTIVE_NEGATION; 2448 mb[1] = 0; 2449 if (nv->bus[0].config_2.req_ack_active_negation) 2450 mb[1] |= BIT_5; 2451 if (nv->bus[0].config_2.data_line_active_negation) 2452 mb[1] |= BIT_4; 2453 mb[2] = 0; 2454 if (nv->bus[1].config_2.req_ack_active_negation) 2455 mb[2] |= BIT_5; 2456 if (nv->bus[1].config_2.data_line_active_negation) 2457 mb[2] |= BIT_4; 2458 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2459 2460 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; 2461 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ 2462 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2463 2464 /* thingy */ 2465 mb[0] = MBC_SET_PCI_CONTROL; 2466 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */ 2467 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */ 2468 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2469 2470 mb[0] = MBC_SET_TAG_AGE_LIMIT; 2471 mb[1] = 8; 2472 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2473 2474 /* Selection timeout. */ 2475 mb[0] = MBC_SET_SELECTION_TIMEOUT; 2476 mb[1] = nv->bus[0].selection_timeout; 2477 mb[2] = nv->bus[1].selection_timeout; 2478 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2479 2480 for (bus = 0; bus < ha->ports; bus++) 2481 status |= qla1280_config_bus(ha, bus); 2482 2483 if (status) 2484 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n"); 2485 2486 LEAVE("qla1280_nvram_config"); 2487 return status; 2488 } 2489 2490 /* 2491 * Get NVRAM data word 2492 * Calculates word position in NVRAM and calls request routine to 2493 * get the word from NVRAM. 2494 * 2495 * Input: 2496 * ha = adapter block pointer. 2497 * address = NVRAM word address. 2498 * 2499 * Returns: 2500 * data word. 2501 */ 2502 static uint16_t 2503 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address) 2504 { 2505 uint32_t nv_cmd; 2506 uint16_t data; 2507 2508 nv_cmd = address << 16; 2509 nv_cmd |= NV_READ_OP; 2510 2511 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd)); 2512 2513 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = " 2514 "0x%x", data); 2515 2516 return data; 2517 } 2518 2519 /* 2520 * NVRAM request 2521 * Sends read command to NVRAM and gets data from NVRAM. 2522 * 2523 * Input: 2524 * ha = adapter block pointer. 2525 * nv_cmd = Bit 26 = start bit 2526 * Bit 25, 24 = opcode 2527 * Bit 23-16 = address 2528 * Bit 15-0 = write data 2529 * 2530 * Returns: 2531 * data word. 2532 */ 2533 static uint16_t 2534 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd) 2535 { 2536 struct device_reg __iomem *reg = ha->iobase; 2537 int cnt; 2538 uint16_t data = 0; 2539 uint16_t reg_data; 2540 2541 /* Send command to NVRAM. */ 2542 2543 nv_cmd <<= 5; 2544 for (cnt = 0; cnt < 11; cnt++) { 2545 if (nv_cmd & BIT_31) 2546 qla1280_nv_write(ha, NV_DATA_OUT); 2547 else 2548 qla1280_nv_write(ha, 0); 2549 nv_cmd <<= 1; 2550 } 2551 2552 /* Read data from NVRAM. */ 2553 2554 for (cnt = 0; cnt < 16; cnt++) { 2555 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK)); 2556 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2557 NVRAM_DELAY(); 2558 data <<= 1; 2559 reg_data = RD_REG_WORD(®->nvram); 2560 if (reg_data & NV_DATA_IN) 2561 data |= BIT_0; 2562 WRT_REG_WORD(®->nvram, NV_SELECT); 2563 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2564 NVRAM_DELAY(); 2565 } 2566 2567 /* Deselect chip. */ 2568 2569 WRT_REG_WORD(®->nvram, NV_DESELECT); 2570 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2571 NVRAM_DELAY(); 2572 2573 return data; 2574 } 2575 2576 static void 2577 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data) 2578 { 2579 struct device_reg __iomem *reg = ha->iobase; 2580 2581 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2582 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2583 NVRAM_DELAY(); 2584 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK); 2585 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2586 NVRAM_DELAY(); 2587 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2588 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2589 NVRAM_DELAY(); 2590 } 2591 2592 /* 2593 * Mailbox Command 2594 * Issue mailbox command and waits for completion. 2595 * 2596 * Input: 2597 * ha = adapter block pointer. 2598 * mr = mailbox registers to load. 2599 * mb = data pointer for mailbox registers. 2600 * 2601 * Output: 2602 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data. 2603 * 2604 * Returns: 2605 * 0 = success 2606 */ 2607 static int 2608 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2609 { 2610 struct device_reg __iomem *reg = ha->iobase; 2611 #if 0 2612 LIST_HEAD(done_q); 2613 #endif 2614 int status = 0; 2615 int cnt; 2616 uint16_t *optr, *iptr; 2617 uint16_t __iomem *mptr; 2618 uint16_t data; 2619 DECLARE_COMPLETION(wait); 2620 struct timer_list timer; 2621 2622 ENTER("qla1280_mailbox_command"); 2623 2624 if (ha->mailbox_wait) { 2625 printk(KERN_ERR "Warning mailbox wait already in use!\n"); 2626 } 2627 ha->mailbox_wait = &wait; 2628 2629 /* 2630 * We really should start out by verifying that the mailbox is 2631 * available before starting sending the command data 2632 */ 2633 /* Load mailbox registers. */ 2634 mptr = (uint16_t __iomem *) ®->mailbox0; 2635 iptr = mb; 2636 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) { 2637 if (mr & BIT_0) { 2638 WRT_REG_WORD(mptr, (*iptr)); 2639 } 2640 2641 mr >>= 1; 2642 mptr++; 2643 iptr++; 2644 } 2645 2646 /* Issue set host interrupt command. */ 2647 2648 /* set up a timer just in case we're really jammed */ 2649 init_timer(&timer); 2650 timer.expires = jiffies + 20*HZ; 2651 timer.data = (unsigned long)ha; 2652 timer.function = qla1280_mailbox_timeout; 2653 add_timer(&timer); 2654 2655 spin_unlock_irq(HOST_LOCK); 2656 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT); 2657 data = qla1280_debounce_register(®->istatus); 2658 2659 wait_for_completion(&wait); 2660 del_timer_sync(&timer); 2661 2662 spin_lock_irq(HOST_LOCK); 2663 2664 ha->mailbox_wait = NULL; 2665 2666 /* Check for mailbox command timeout. */ 2667 if (ha->mailbox_out[0] != MBS_CMD_CMP) { 2668 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, " 2669 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = " 2670 "0x%04x\n", 2671 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus)); 2672 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n", 2673 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1), 2674 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3)); 2675 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n", 2676 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5), 2677 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7)); 2678 status = 1; 2679 } 2680 2681 /* Load return mailbox registers. */ 2682 optr = mb; 2683 iptr = (uint16_t *) &ha->mailbox_out[0]; 2684 mr = MAILBOX_REGISTER_COUNT; 2685 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2686 2687 #if 0 2688 /* Go check for any response interrupts pending. */ 2689 qla1280_isr(ha, &done_q); 2690 #endif 2691 2692 if (ha->flags.reset_marker) 2693 qla1280_rst_aen(ha); 2694 2695 #if 0 2696 if (!list_empty(&done_q)) 2697 qla1280_done(ha, &done_q); 2698 #endif 2699 2700 if (status) 2701 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2702 "0x%x ****\n", mb[0]); 2703 2704 LEAVE("qla1280_mailbox_command"); 2705 return status; 2706 } 2707 2708 /* 2709 * qla1280_poll 2710 * Polls ISP for interrupts. 2711 * 2712 * Input: 2713 * ha = adapter block pointer. 2714 */ 2715 static void 2716 qla1280_poll(struct scsi_qla_host *ha) 2717 { 2718 struct device_reg __iomem *reg = ha->iobase; 2719 uint16_t data; 2720 LIST_HEAD(done_q); 2721 2722 /* ENTER("qla1280_poll"); */ 2723 2724 /* Check for pending interrupts. */ 2725 data = RD_REG_WORD(®->istatus); 2726 if (data & RISC_INT) 2727 qla1280_isr(ha, &done_q); 2728 2729 if (!ha->mailbox_wait) { 2730 if (ha->flags.reset_marker) 2731 qla1280_rst_aen(ha); 2732 } 2733 2734 if (!list_empty(&done_q)) 2735 qla1280_done(ha); 2736 2737 /* LEAVE("qla1280_poll"); */ 2738 } 2739 2740 /* 2741 * qla1280_bus_reset 2742 * Issue SCSI bus reset. 2743 * 2744 * Input: 2745 * ha = adapter block pointer. 2746 * bus = SCSI bus number. 2747 * 2748 * Returns: 2749 * 0 = success 2750 */ 2751 static int 2752 qla1280_bus_reset(struct scsi_qla_host *ha, int bus) 2753 { 2754 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2755 uint16_t reset_delay; 2756 int status; 2757 2758 dprintk(3, "qla1280_bus_reset: entered\n"); 2759 2760 if (qla1280_verbose) 2761 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n", 2762 ha->host_no, bus); 2763 2764 reset_delay = ha->bus_settings[bus].bus_reset_delay; 2765 mb[0] = MBC_BUS_RESET; 2766 mb[1] = reset_delay; 2767 mb[2] = (uint16_t) bus; 2768 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2769 2770 if (status) { 2771 if (ha->bus_settings[bus].failed_reset_count > 2) 2772 ha->bus_settings[bus].scsi_bus_dead = 1; 2773 ha->bus_settings[bus].failed_reset_count++; 2774 } else { 2775 spin_unlock_irq(HOST_LOCK); 2776 ssleep(reset_delay); 2777 spin_lock_irq(HOST_LOCK); 2778 2779 ha->bus_settings[bus].scsi_bus_dead = 0; 2780 ha->bus_settings[bus].failed_reset_count = 0; 2781 ha->bus_settings[bus].reset_marker = 0; 2782 /* Issue marker command. */ 2783 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL); 2784 } 2785 2786 /* 2787 * We should probably call qla1280_set_target_parameters() 2788 * here as well for all devices on the bus. 2789 */ 2790 2791 if (status) 2792 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n"); 2793 else 2794 dprintk(3, "qla1280_bus_reset: exiting normally\n"); 2795 2796 return status; 2797 } 2798 2799 /* 2800 * qla1280_device_reset 2801 * Issue bus device reset message to the target. 2802 * 2803 * Input: 2804 * ha = adapter block pointer. 2805 * bus = SCSI BUS number. 2806 * target = SCSI ID. 2807 * 2808 * Returns: 2809 * 0 = success 2810 */ 2811 static int 2812 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) 2813 { 2814 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2815 int status; 2816 2817 ENTER("qla1280_device_reset"); 2818 2819 mb[0] = MBC_ABORT_TARGET; 2820 mb[1] = (bus ? (target | BIT_7) : target) << 8; 2821 mb[2] = 1; 2822 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2823 2824 /* Issue marker command. */ 2825 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 2826 2827 if (status) 2828 dprintk(2, "qla1280_device_reset: **** FAILED ****\n"); 2829 2830 LEAVE("qla1280_device_reset"); 2831 return status; 2832 } 2833 2834 /* 2835 * qla1280_abort_device 2836 * Issue an abort message to the device 2837 * 2838 * Input: 2839 * ha = adapter block pointer. 2840 * bus = SCSI BUS. 2841 * target = SCSI ID. 2842 * lun = SCSI LUN. 2843 * 2844 * Returns: 2845 * 0 = success 2846 */ 2847 static int 2848 qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun) 2849 { 2850 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2851 int status; 2852 2853 ENTER("qla1280_abort_device"); 2854 2855 mb[0] = MBC_ABORT_DEVICE; 2856 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; 2857 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2858 2859 /* Issue marker command. */ 2860 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN); 2861 2862 if (status) 2863 dprintk(2, "qla1280_abort_device: **** FAILED ****\n"); 2864 2865 LEAVE("qla1280_abort_device"); 2866 return status; 2867 } 2868 2869 /* 2870 * qla1280_abort_command 2871 * Abort command aborts a specified IOCB. 2872 * 2873 * Input: 2874 * ha = adapter block pointer. 2875 * sp = SB structure pointer. 2876 * 2877 * Returns: 2878 * 0 = success 2879 */ 2880 static int 2881 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle) 2882 { 2883 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2884 unsigned int bus, target, lun; 2885 int status; 2886 2887 ENTER("qla1280_abort_command"); 2888 2889 bus = SCSI_BUS_32(sp->cmd); 2890 target = SCSI_TCN_32(sp->cmd); 2891 lun = SCSI_LUN_32(sp->cmd); 2892 2893 sp->flags |= SRB_ABORT_PENDING; 2894 2895 mb[0] = MBC_ABORT_COMMAND; 2896 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; 2897 mb[2] = handle >> 16; 2898 mb[3] = handle & 0xffff; 2899 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]); 2900 2901 if (status) { 2902 dprintk(2, "qla1280_abort_command: **** FAILED ****\n"); 2903 sp->flags &= ~SRB_ABORT_PENDING; 2904 } 2905 2906 2907 LEAVE("qla1280_abort_command"); 2908 return status; 2909 } 2910 2911 /* 2912 * qla1280_reset_adapter 2913 * Reset adapter. 2914 * 2915 * Input: 2916 * ha = adapter block pointer. 2917 */ 2918 static void 2919 qla1280_reset_adapter(struct scsi_qla_host *ha) 2920 { 2921 struct device_reg __iomem *reg = ha->iobase; 2922 2923 ENTER("qla1280_reset_adapter"); 2924 2925 /* Disable ISP chip */ 2926 ha->flags.online = 0; 2927 WRT_REG_WORD(®->ictrl, ISP_RESET); 2928 WRT_REG_WORD(®->host_cmd, 2929 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS); 2930 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2931 2932 LEAVE("qla1280_reset_adapter"); 2933 } 2934 2935 /* 2936 * Issue marker command. 2937 * Function issues marker IOCB. 2938 * 2939 * Input: 2940 * ha = adapter block pointer. 2941 * bus = SCSI BUS number 2942 * id = SCSI ID 2943 * lun = SCSI LUN 2944 * type = marker modifier 2945 */ 2946 static void 2947 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type) 2948 { 2949 struct mrk_entry *pkt; 2950 2951 ENTER("qla1280_marker"); 2952 2953 /* Get request packet. */ 2954 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) { 2955 pkt->entry_type = MARKER_TYPE; 2956 pkt->lun = (uint8_t) lun; 2957 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id); 2958 pkt->modifier = type; 2959 pkt->entry_status = 0; 2960 2961 /* Issue command to ISP */ 2962 qla1280_isp_cmd(ha); 2963 } 2964 2965 LEAVE("qla1280_marker"); 2966 } 2967 2968 2969 /* 2970 * qla1280_64bit_start_scsi 2971 * The start SCSI is responsible for building request packets on 2972 * request ring and modifying ISP input pointer. 2973 * 2974 * Input: 2975 * ha = adapter block pointer. 2976 * sp = SB structure pointer. 2977 * 2978 * Returns: 2979 * 0 = success, was able to issue command. 2980 */ 2981 #ifdef QLA_64BIT_PTR 2982 static int 2983 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 2984 { 2985 struct device_reg __iomem *reg = ha->iobase; 2986 struct scsi_cmnd *cmd = sp->cmd; 2987 cmd_a64_entry_t *pkt; 2988 struct scatterlist *sg = NULL; 2989 __le32 *dword_ptr; 2990 dma_addr_t dma_handle; 2991 int status = 0; 2992 int cnt; 2993 int req_cnt; 2994 u16 seg_cnt; 2995 u8 dir; 2996 2997 ENTER("qla1280_64bit_start_scsi:"); 2998 2999 /* Calculate number of entries and segments required. */ 3000 req_cnt = 1; 3001 if (cmd->use_sg) { 3002 sg = (struct scatterlist *) cmd->request_buffer; 3003 seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, 3004 cmd->sc_data_direction); 3005 3006 if (seg_cnt > 2) { 3007 req_cnt += (seg_cnt - 2) / 5; 3008 if ((seg_cnt - 2) % 5) 3009 req_cnt++; 3010 } 3011 } else if (cmd->request_bufflen) { /* If data transfer. */ 3012 seg_cnt = 1; 3013 } else { 3014 seg_cnt = 0; 3015 } 3016 3017 if ((req_cnt + 2) >= ha->req_q_cnt) { 3018 /* Calculate number of free request entries. */ 3019 cnt = RD_REG_WORD(®->mailbox4); 3020 if (ha->req_ring_index < cnt) 3021 ha->req_q_cnt = cnt - ha->req_ring_index; 3022 else 3023 ha->req_q_cnt = 3024 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3025 } 3026 3027 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 3028 ha->req_q_cnt, seg_cnt); 3029 3030 /* If room for request in request ring. */ 3031 if ((req_cnt + 2) >= ha->req_q_cnt) { 3032 status = 1; 3033 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 3034 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 3035 req_cnt); 3036 goto out; 3037 } 3038 3039 /* Check for room in outstanding command list. */ 3040 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 3041 ha->outstanding_cmds[cnt] != 0; cnt++); 3042 3043 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3044 status = 1; 3045 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 3046 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 3047 goto out; 3048 } 3049 3050 ha->outstanding_cmds[cnt] = sp; 3051 ha->req_q_cnt -= req_cnt; 3052 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); 3053 3054 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, 3055 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); 3056 dprintk(2, " bus %i, target %i, lun %i\n", 3057 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3058 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE); 3059 3060 /* 3061 * Build command packet. 3062 */ 3063 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr; 3064 3065 pkt->entry_type = COMMAND_A64_TYPE; 3066 pkt->entry_count = (uint8_t) req_cnt; 3067 pkt->sys_define = (uint8_t) ha->req_ring_index; 3068 pkt->entry_status = 0; 3069 pkt->handle = cpu_to_le32(cnt); 3070 3071 /* Zero out remaining portion of packet. */ 3072 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3073 3074 /* Set ISP command timeout. */ 3075 pkt->timeout = cpu_to_le16(30); 3076 3077 /* Set device target ID and LUN */ 3078 pkt->lun = SCSI_LUN_32(cmd); 3079 pkt->target = SCSI_BUS_32(cmd) ? 3080 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 3081 3082 /* Enable simple tag queuing if device supports it. */ 3083 if (DEV_SIMPLE_TAGS(cmd->device)) 3084 pkt->control_flags |= cpu_to_le16(BIT_3); 3085 3086 /* Load SCSI command packet. */ 3087 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3088 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 3089 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3090 3091 /* Set transfer direction. */ 3092 dir = qla1280_data_direction(cmd); 3093 pkt->control_flags |= cpu_to_le16(dir); 3094 3095 /* Set total data segment count. */ 3096 pkt->dseg_count = cpu_to_le16(seg_cnt); 3097 3098 /* 3099 * Load data segments. 3100 */ 3101 if (seg_cnt) { /* If data transfer. */ 3102 /* Setup packet address segment pointer. */ 3103 dword_ptr = (u32 *)&pkt->dseg_0_address; 3104 3105 if (cmd->use_sg) { /* If scatter gather */ 3106 /* Load command entry data segments. */ 3107 for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) { 3108 dma_handle = sg_dma_address(sg); 3109 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 3110 if (ha->flags.use_pci_vchannel) 3111 sn_pci_set_vchan(ha->pdev, 3112 (unsigned long *)&dma_handle, 3113 SCSI_BUS_32(cmd)); 3114 #endif 3115 *dword_ptr++ = 3116 cpu_to_le32(pci_dma_lo32(dma_handle)); 3117 *dword_ptr++ = 3118 cpu_to_le32(pci_dma_hi32(dma_handle)); 3119 *dword_ptr++ = cpu_to_le32(sg_dma_len(sg)); 3120 sg++; 3121 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 3122 cpu_to_le32(pci_dma_hi32(dma_handle)), 3123 cpu_to_le32(pci_dma_lo32(dma_handle)), 3124 cpu_to_le32(sg_dma_len(sg))); 3125 } 3126 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " 3127 "command packet data - b %i, t %i, l %i \n", 3128 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), 3129 SCSI_LUN_32(cmd)); 3130 qla1280_dump_buffer(5, (char *)pkt, 3131 REQUEST_ENTRY_SIZE); 3132 3133 /* 3134 * Build continuation packets. 3135 */ 3136 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " 3137 "remains\n", seg_cnt); 3138 3139 while (seg_cnt > 0) { 3140 /* Adjust ring index. */ 3141 ha->req_ring_index++; 3142 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3143 ha->req_ring_index = 0; 3144 ha->request_ring_ptr = 3145 ha->request_ring; 3146 } else 3147 ha->request_ring_ptr++; 3148 3149 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; 3150 3151 /* Zero out packet. */ 3152 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3153 3154 /* Load packet defaults. */ 3155 ((struct cont_a64_entry *) pkt)->entry_type = 3156 CONTINUE_A64_TYPE; 3157 ((struct cont_a64_entry *) pkt)->entry_count = 1; 3158 ((struct cont_a64_entry *) pkt)->sys_define = 3159 (uint8_t)ha->req_ring_index; 3160 /* Setup packet address segment pointer. */ 3161 dword_ptr = 3162 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; 3163 3164 /* Load continuation entry data segments. */ 3165 for (cnt = 0; cnt < 5 && seg_cnt; 3166 cnt++, seg_cnt--) { 3167 dma_handle = sg_dma_address(sg); 3168 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 3169 if (ha->flags.use_pci_vchannel) 3170 sn_pci_set_vchan(ha->pdev, 3171 (unsigned long *)&dma_handle, 3172 SCSI_BUS_32(cmd)); 3173 #endif 3174 *dword_ptr++ = 3175 cpu_to_le32(pci_dma_lo32(dma_handle)); 3176 *dword_ptr++ = 3177 cpu_to_le32(pci_dma_hi32(dma_handle)); 3178 *dword_ptr++ = 3179 cpu_to_le32(sg_dma_len(sg)); 3180 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", 3181 cpu_to_le32(pci_dma_hi32(dma_handle)), 3182 cpu_to_le32(pci_dma_lo32(dma_handle)), 3183 cpu_to_le32(sg_dma_len(sg))); 3184 sg++; 3185 } 3186 dprintk(5, "qla1280_64bit_start_scsi: " 3187 "continuation packet data - b %i, t " 3188 "%i, l %i \n", SCSI_BUS_32(cmd), 3189 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3190 qla1280_dump_buffer(5, (char *)pkt, 3191 REQUEST_ENTRY_SIZE); 3192 } 3193 } else { /* No scatter gather data transfer */ 3194 dma_handle = pci_map_single(ha->pdev, 3195 cmd->request_buffer, 3196 cmd->request_bufflen, 3197 cmd->sc_data_direction); 3198 3199 sp->saved_dma_handle = dma_handle; 3200 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 3201 if (ha->flags.use_pci_vchannel) 3202 sn_pci_set_vchan(ha->pdev, 3203 (unsigned long *)&dma_handle, 3204 SCSI_BUS_32(cmd)); 3205 #endif 3206 *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); 3207 *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle)); 3208 *dword_ptr = cpu_to_le32(cmd->request_bufflen); 3209 3210 dprintk(5, "qla1280_64bit_start_scsi: No scatter/" 3211 "gather command packet data - b %i, t %i, " 3212 "l %i \n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), 3213 SCSI_LUN_32(cmd)); 3214 qla1280_dump_buffer(5, (char *)pkt, 3215 REQUEST_ENTRY_SIZE); 3216 } 3217 } else { /* No data transfer */ 3218 dprintk(5, "qla1280_64bit_start_scsi: No data, command " 3219 "packet data - b %i, t %i, l %i \n", 3220 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3221 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3222 } 3223 /* Adjust ring index. */ 3224 ha->req_ring_index++; 3225 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3226 ha->req_ring_index = 0; 3227 ha->request_ring_ptr = ha->request_ring; 3228 } else 3229 ha->request_ring_ptr++; 3230 3231 /* Set chip new ring index. */ 3232 dprintk(2, 3233 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n"); 3234 sp->flags |= SRB_SENT; 3235 ha->actthreads++; 3236 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3237 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3238 mmiowb(); 3239 3240 out: 3241 if (status) 3242 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n"); 3243 else 3244 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n"); 3245 3246 return status; 3247 } 3248 #else /* !QLA_64BIT_PTR */ 3249 3250 /* 3251 * qla1280_32bit_start_scsi 3252 * The start SCSI is responsible for building request packets on 3253 * request ring and modifying ISP input pointer. 3254 * 3255 * The Qlogic firmware interface allows every queue slot to have a SCSI 3256 * command and up to 4 scatter/gather (SG) entries. If we need more 3257 * than 4 SG entries, then continuation entries are used that can 3258 * hold another 7 entries each. The start routine determines if there 3259 * is eought empty slots then build the combination of requests to 3260 * fulfill the OS request. 3261 * 3262 * Input: 3263 * ha = adapter block pointer. 3264 * sp = SCSI Request Block structure pointer. 3265 * 3266 * Returns: 3267 * 0 = success, was able to issue command. 3268 */ 3269 static int 3270 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 3271 { 3272 struct device_reg __iomem *reg = ha->iobase; 3273 struct scsi_cmnd *cmd = sp->cmd; 3274 struct cmd_entry *pkt; 3275 struct scatterlist *sg = NULL; 3276 __le32 *dword_ptr; 3277 int status = 0; 3278 int cnt; 3279 int req_cnt; 3280 uint16_t seg_cnt; 3281 dma_addr_t dma_handle; 3282 u8 dir; 3283 3284 ENTER("qla1280_32bit_start_scsi"); 3285 3286 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp, 3287 cmd->cmnd[0]); 3288 3289 /* Calculate number of entries and segments required. */ 3290 req_cnt = 1; 3291 if (cmd->use_sg) { 3292 /* 3293 * We must build an SG list in adapter format, as the kernel's 3294 * SG list cannot be used directly because of data field size 3295 * (__alpha__) differences and the kernel SG list uses virtual 3296 * addresses where we need physical addresses. 3297 */ 3298 sg = (struct scatterlist *) cmd->request_buffer; 3299 seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, 3300 cmd->sc_data_direction); 3301 3302 /* 3303 * if greater than four sg entries then we need to allocate 3304 * continuation entries 3305 */ 3306 if (seg_cnt > 4) { 3307 req_cnt += (seg_cnt - 4) / 7; 3308 if ((seg_cnt - 4) % 7) 3309 req_cnt++; 3310 } 3311 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", 3312 cmd, seg_cnt, req_cnt); 3313 } else if (cmd->request_bufflen) { /* If data transfer. */ 3314 dprintk(3, "No S/G transfer t=%x cmd=%p len=%x CDB=%x\n", 3315 SCSI_TCN_32(cmd), cmd, cmd->request_bufflen, 3316 cmd->cmnd[0]); 3317 seg_cnt = 1; 3318 } else { 3319 /* dprintk(1, "No data transfer \n"); */ 3320 seg_cnt = 0; 3321 } 3322 3323 if ((req_cnt + 2) >= ha->req_q_cnt) { 3324 /* Calculate number of free request entries. */ 3325 cnt = RD_REG_WORD(®->mailbox4); 3326 if (ha->req_ring_index < cnt) 3327 ha->req_q_cnt = cnt - ha->req_ring_index; 3328 else 3329 ha->req_q_cnt = 3330 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3331 } 3332 3333 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 3334 ha->req_q_cnt, seg_cnt); 3335 /* If room for request in request ring. */ 3336 if ((req_cnt + 2) >= ha->req_q_cnt) { 3337 status = 1; 3338 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3339 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3340 ha->req_q_cnt, req_cnt); 3341 goto out; 3342 } 3343 3344 /* Check for empty slot in outstanding command list. */ 3345 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 3346 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3347 3348 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3349 status = 1; 3350 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3351 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3352 goto out; 3353 } 3354 3355 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1); 3356 ha->outstanding_cmds[cnt] = sp; 3357 ha->req_q_cnt -= req_cnt; 3358 3359 /* 3360 * Build command packet. 3361 */ 3362 pkt = (struct cmd_entry *) ha->request_ring_ptr; 3363 3364 pkt->entry_type = COMMAND_TYPE; 3365 pkt->entry_count = (uint8_t) req_cnt; 3366 pkt->sys_define = (uint8_t) ha->req_ring_index; 3367 pkt->entry_status = 0; 3368 pkt->handle = cpu_to_le32(cnt); 3369 3370 /* Zero out remaining portion of packet. */ 3371 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3372 3373 /* Set ISP command timeout. */ 3374 pkt->timeout = cpu_to_le16(30); 3375 3376 /* Set device target ID and LUN */ 3377 pkt->lun = SCSI_LUN_32(cmd); 3378 pkt->target = SCSI_BUS_32(cmd) ? 3379 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 3380 3381 /* Enable simple tag queuing if device supports it. */ 3382 if (DEV_SIMPLE_TAGS(cmd->device)) 3383 pkt->control_flags |= cpu_to_le16(BIT_3); 3384 3385 /* Load SCSI command packet. */ 3386 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3387 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 3388 3389 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3390 /* Set transfer direction. */ 3391 dir = qla1280_data_direction(cmd); 3392 pkt->control_flags |= cpu_to_le16(dir); 3393 3394 /* Set total data segment count. */ 3395 pkt->dseg_count = cpu_to_le16(seg_cnt); 3396 3397 /* 3398 * Load data segments. 3399 */ 3400 if (seg_cnt) { 3401 /* Setup packet address segment pointer. */ 3402 dword_ptr = &pkt->dseg_0_address; 3403 3404 if (cmd->use_sg) { /* If scatter gather */ 3405 dprintk(3, "Building S/G data segments..\n"); 3406 qla1280_dump_buffer(1, (char *)sg, 4 * 16); 3407 3408 /* Load command entry data segments. */ 3409 for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) { 3410 *dword_ptr++ = 3411 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3412 *dword_ptr++ = 3413 cpu_to_le32(sg_dma_len(sg)); 3414 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3415 (pci_dma_lo32(sg_dma_address(sg))), 3416 (sg_dma_len(sg))); 3417 sg++; 3418 } 3419 /* 3420 * Build continuation packets. 3421 */ 3422 dprintk(3, "S/G Building Continuation" 3423 "...seg_cnt=0x%x remains\n", seg_cnt); 3424 while (seg_cnt > 0) { 3425 /* Adjust ring index. */ 3426 ha->req_ring_index++; 3427 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3428 ha->req_ring_index = 0; 3429 ha->request_ring_ptr = 3430 ha->request_ring; 3431 } else 3432 ha->request_ring_ptr++; 3433 3434 pkt = (struct cmd_entry *)ha->request_ring_ptr; 3435 3436 /* Zero out packet. */ 3437 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3438 3439 /* Load packet defaults. */ 3440 ((struct cont_entry *) pkt)-> 3441 entry_type = CONTINUE_TYPE; 3442 ((struct cont_entry *) pkt)->entry_count = 1; 3443 3444 ((struct cont_entry *) pkt)->sys_define = 3445 (uint8_t) ha->req_ring_index; 3446 3447 /* Setup packet address segment pointer. */ 3448 dword_ptr = 3449 &((struct cont_entry *) pkt)->dseg_0_address; 3450 3451 /* Load continuation entry data segments. */ 3452 for (cnt = 0; cnt < 7 && seg_cnt; 3453 cnt++, seg_cnt--) { 3454 *dword_ptr++ = 3455 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3456 *dword_ptr++ = 3457 cpu_to_le32(sg_dma_len(sg)); 3458 dprintk(1, 3459 "S/G Segment Cont. phys_addr=0x%x, " 3460 "len=0x%x\n", 3461 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))), 3462 cpu_to_le32(sg_dma_len(sg))); 3463 sg++; 3464 } 3465 dprintk(5, "qla1280_32bit_start_scsi: " 3466 "continuation packet data - " 3467 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), 3468 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3469 qla1280_dump_buffer(5, (char *)pkt, 3470 REQUEST_ENTRY_SIZE); 3471 } 3472 } else { /* No S/G data transfer */ 3473 dma_handle = pci_map_single(ha->pdev, 3474 cmd->request_buffer, 3475 cmd->request_bufflen, 3476 cmd->sc_data_direction); 3477 sp->saved_dma_handle = dma_handle; 3478 3479 *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); 3480 *dword_ptr = cpu_to_le32(cmd->request_bufflen); 3481 } 3482 } else { /* No data transfer at all */ 3483 dprintk(5, "qla1280_32bit_start_scsi: No data, command " 3484 "packet data - \n"); 3485 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3486 } 3487 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n"); 3488 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3489 REQUEST_ENTRY_SIZE); 3490 3491 /* Adjust ring index. */ 3492 ha->req_ring_index++; 3493 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3494 ha->req_ring_index = 0; 3495 ha->request_ring_ptr = ha->request_ring; 3496 } else 3497 ha->request_ring_ptr++; 3498 3499 /* Set chip new ring index. */ 3500 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC " 3501 "for pending command\n"); 3502 sp->flags |= SRB_SENT; 3503 ha->actthreads++; 3504 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3505 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3506 mmiowb(); 3507 3508 out: 3509 if (status) 3510 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n"); 3511 3512 LEAVE("qla1280_32bit_start_scsi"); 3513 3514 return status; 3515 } 3516 #endif 3517 3518 /* 3519 * qla1280_req_pkt 3520 * Function is responsible for locking ring and 3521 * getting a zeroed out request packet. 3522 * 3523 * Input: 3524 * ha = adapter block pointer. 3525 * 3526 * Returns: 3527 * 0 = failed to get slot. 3528 */ 3529 static request_t * 3530 qla1280_req_pkt(struct scsi_qla_host *ha) 3531 { 3532 struct device_reg __iomem *reg = ha->iobase; 3533 request_t *pkt = NULL; 3534 int cnt; 3535 uint32_t timer; 3536 3537 ENTER("qla1280_req_pkt"); 3538 3539 /* 3540 * This can be called from interrupt context, damn it!!! 3541 */ 3542 /* Wait for 30 seconds for slot. */ 3543 for (timer = 15000000; timer; timer--) { 3544 if (ha->req_q_cnt > 0) { 3545 /* Calculate number of free request entries. */ 3546 cnt = RD_REG_WORD(®->mailbox4); 3547 if (ha->req_ring_index < cnt) 3548 ha->req_q_cnt = cnt - ha->req_ring_index; 3549 else 3550 ha->req_q_cnt = 3551 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3552 } 3553 3554 /* Found empty request ring slot? */ 3555 if (ha->req_q_cnt > 0) { 3556 ha->req_q_cnt--; 3557 pkt = ha->request_ring_ptr; 3558 3559 /* Zero out packet. */ 3560 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3561 3562 /* 3563 * How can this be right when we have a ring 3564 * size of 512??? 3565 */ 3566 /* Set system defined field. */ 3567 pkt->sys_define = (uint8_t) ha->req_ring_index; 3568 3569 /* Set entry count. */ 3570 pkt->entry_count = 1; 3571 3572 break; 3573 } 3574 3575 udelay(2); /* 10 */ 3576 3577 /* Check for pending interrupts. */ 3578 qla1280_poll(ha); 3579 } 3580 3581 if (!pkt) 3582 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n"); 3583 else 3584 dprintk(3, "qla1280_req_pkt: exiting normally\n"); 3585 3586 return pkt; 3587 } 3588 3589 /* 3590 * qla1280_isp_cmd 3591 * Function is responsible for modifying ISP input pointer. 3592 * Releases ring lock. 3593 * 3594 * Input: 3595 * ha = adapter block pointer. 3596 */ 3597 static void 3598 qla1280_isp_cmd(struct scsi_qla_host *ha) 3599 { 3600 struct device_reg __iomem *reg = ha->iobase; 3601 3602 ENTER("qla1280_isp_cmd"); 3603 3604 dprintk(5, "qla1280_isp_cmd: IOCB data:\n"); 3605 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3606 REQUEST_ENTRY_SIZE); 3607 3608 /* Adjust ring index. */ 3609 ha->req_ring_index++; 3610 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3611 ha->req_ring_index = 0; 3612 ha->request_ring_ptr = ha->request_ring; 3613 } else 3614 ha->request_ring_ptr++; 3615 3616 /* 3617 * Update request index to mailbox4 (Request Queue In). 3618 * The mmiowb() ensures that this write is ordered with writes by other 3619 * CPUs. Without the mmiowb(), it is possible for the following: 3620 * CPUA posts write of index 5 to mailbox4 3621 * CPUA releases host lock 3622 * CPUB acquires host lock 3623 * CPUB posts write of index 6 to mailbox4 3624 * On PCI bus, order reverses and write of 6 posts, then index 5, 3625 * causing chip to issue full queue of stale commands 3626 * The mmiowb() prevents future writes from crossing the barrier. 3627 * See Documentation/DocBook/deviceiobook.tmpl for more information. 3628 */ 3629 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3630 mmiowb(); 3631 3632 LEAVE("qla1280_isp_cmd"); 3633 } 3634 3635 /****************************************************************************/ 3636 /* Interrupt Service Routine. */ 3637 /****************************************************************************/ 3638 3639 /**************************************************************************** 3640 * qla1280_isr 3641 * Calls I/O done on command completion. 3642 * 3643 * Input: 3644 * ha = adapter block pointer. 3645 * done_q = done queue. 3646 ****************************************************************************/ 3647 static void 3648 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) 3649 { 3650 struct device_reg __iomem *reg = ha->iobase; 3651 struct response *pkt; 3652 struct srb *sp = NULL; 3653 uint16_t mailbox[MAILBOX_REGISTER_COUNT]; 3654 uint16_t *wptr; 3655 uint32_t index; 3656 u16 istatus; 3657 3658 ENTER("qla1280_isr"); 3659 3660 istatus = RD_REG_WORD(®->istatus); 3661 if (!(istatus & (RISC_INT | PCI_INT))) 3662 return; 3663 3664 /* Save mailbox register 5 */ 3665 mailbox[5] = RD_REG_WORD(®->mailbox5); 3666 3667 /* Check for mailbox interrupt. */ 3668 3669 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore); 3670 3671 if (mailbox[0] & BIT_0) { 3672 /* Get mailbox data. */ 3673 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */ 3674 3675 wptr = &mailbox[0]; 3676 *wptr++ = RD_REG_WORD(®->mailbox0); 3677 *wptr++ = RD_REG_WORD(®->mailbox1); 3678 *wptr = RD_REG_WORD(®->mailbox2); 3679 if (mailbox[0] != MBA_SCSI_COMPLETION) { 3680 wptr++; 3681 *wptr++ = RD_REG_WORD(®->mailbox3); 3682 *wptr++ = RD_REG_WORD(®->mailbox4); 3683 wptr++; 3684 *wptr++ = RD_REG_WORD(®->mailbox6); 3685 *wptr = RD_REG_WORD(®->mailbox7); 3686 } 3687 3688 /* Release mailbox registers. */ 3689 3690 WRT_REG_WORD(®->semaphore, 0); 3691 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3692 3693 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x", 3694 mailbox[0]); 3695 3696 /* Handle asynchronous event */ 3697 switch (mailbox[0]) { 3698 case MBA_SCSI_COMPLETION: /* Response completion */ 3699 dprintk(5, "qla1280_isr: mailbox SCSI response " 3700 "completion\n"); 3701 3702 if (ha->flags.online) { 3703 /* Get outstanding command index. */ 3704 index = mailbox[2] << 16 | mailbox[1]; 3705 3706 /* Validate handle. */ 3707 if (index < MAX_OUTSTANDING_COMMANDS) 3708 sp = ha->outstanding_cmds[index]; 3709 else 3710 sp = NULL; 3711 3712 if (sp) { 3713 /* Free outstanding command slot. */ 3714 ha->outstanding_cmds[index] = NULL; 3715 3716 /* Save ISP completion status */ 3717 CMD_RESULT(sp->cmd) = 0; 3718 3719 /* Place block on done queue */ 3720 list_add_tail(&sp->list, done_q); 3721 } else { 3722 /* 3723 * If we get here we have a real problem! 3724 */ 3725 printk(KERN_WARNING 3726 "qla1280: ISP invalid handle"); 3727 } 3728 } 3729 break; 3730 3731 case MBA_BUS_RESET: /* SCSI Bus Reset */ 3732 ha->flags.reset_marker = 1; 3733 index = mailbox[6] & BIT_0; 3734 ha->bus_settings[index].reset_marker = 1; 3735 3736 printk(KERN_DEBUG "qla1280_isr(): index %i " 3737 "asynchronous BUS_RESET\n", index); 3738 break; 3739 3740 case MBA_SYSTEM_ERR: /* System Error */ 3741 printk(KERN_WARNING 3742 "qla1280: ISP System Error - mbx1=%xh, mbx2=" 3743 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2], 3744 mailbox[3]); 3745 break; 3746 3747 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3748 printk(KERN_WARNING 3749 "qla1280: ISP Request Transfer Error\n"); 3750 break; 3751 3752 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3753 printk(KERN_WARNING 3754 "qla1280: ISP Response Transfer Error\n"); 3755 break; 3756 3757 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 3758 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n"); 3759 break; 3760 3761 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */ 3762 dprintk(2, 3763 "qla1280_isr: asynchronous TIMEOUT_RESET\n"); 3764 break; 3765 3766 case MBA_DEVICE_RESET: /* Bus Device Reset */ 3767 printk(KERN_INFO "qla1280_isr(): asynchronous " 3768 "BUS_DEVICE_RESET\n"); 3769 3770 ha->flags.reset_marker = 1; 3771 index = mailbox[6] & BIT_0; 3772 ha->bus_settings[index].reset_marker = 1; 3773 break; 3774 3775 case MBA_BUS_MODE_CHANGE: 3776 dprintk(2, 3777 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n"); 3778 break; 3779 3780 default: 3781 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */ 3782 if (mailbox[0] < MBA_ASYNC_EVENT) { 3783 wptr = &mailbox[0]; 3784 memcpy((uint16_t *) ha->mailbox_out, wptr, 3785 MAILBOX_REGISTER_COUNT * 3786 sizeof(uint16_t)); 3787 3788 if(ha->mailbox_wait != NULL) 3789 complete(ha->mailbox_wait); 3790 } 3791 break; 3792 } 3793 } else { 3794 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3795 } 3796 3797 /* 3798 * We will receive interrupts during mailbox testing prior to 3799 * the card being marked online, hence the double check. 3800 */ 3801 if (!(ha->flags.online && !ha->mailbox_wait)) { 3802 dprintk(2, "qla1280_isr: Response pointer Error\n"); 3803 goto out; 3804 } 3805 3806 if (mailbox[5] >= RESPONSE_ENTRY_CNT) 3807 goto out; 3808 3809 while (ha->rsp_ring_index != mailbox[5]) { 3810 pkt = ha->response_ring_ptr; 3811 3812 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]" 3813 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]); 3814 dprintk(5,"qla1280_isr: response packet data\n"); 3815 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE); 3816 3817 if (pkt->entry_type == STATUS_TYPE) { 3818 if ((le16_to_cpu(pkt->scsi_status) & 0xff) 3819 || pkt->comp_status || pkt->entry_status) { 3820 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3821 "0x%x mailbox[5] = 0x%x, comp_status " 3822 "= 0x%x, scsi_status = 0x%x\n", 3823 ha->rsp_ring_index, mailbox[5], 3824 le16_to_cpu(pkt->comp_status), 3825 le16_to_cpu(pkt->scsi_status)); 3826 } 3827 } else { 3828 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3829 "0x%x, mailbox[5] = 0x%x\n", 3830 ha->rsp_ring_index, mailbox[5]); 3831 dprintk(2, "qla1280_isr: response packet data\n"); 3832 qla1280_dump_buffer(2, (char *)pkt, 3833 RESPONSE_ENTRY_SIZE); 3834 } 3835 3836 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) { 3837 dprintk(2, "status: Cmd %p, handle %i\n", 3838 ha->outstanding_cmds[pkt->handle]->cmd, 3839 pkt->handle); 3840 if (pkt->entry_type == STATUS_TYPE) 3841 qla1280_status_entry(ha, pkt, done_q); 3842 else 3843 qla1280_error_entry(ha, pkt, done_q); 3844 /* Adjust ring index. */ 3845 ha->rsp_ring_index++; 3846 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) { 3847 ha->rsp_ring_index = 0; 3848 ha->response_ring_ptr = ha->response_ring; 3849 } else 3850 ha->response_ring_ptr++; 3851 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index); 3852 } 3853 } 3854 3855 out: 3856 LEAVE("qla1280_isr"); 3857 } 3858 3859 /* 3860 * qla1280_rst_aen 3861 * Processes asynchronous reset. 3862 * 3863 * Input: 3864 * ha = adapter block pointer. 3865 */ 3866 static void 3867 qla1280_rst_aen(struct scsi_qla_host *ha) 3868 { 3869 uint8_t bus; 3870 3871 ENTER("qla1280_rst_aen"); 3872 3873 if (ha->flags.online && !ha->flags.reset_active && 3874 !ha->flags.abort_isp_active) { 3875 ha->flags.reset_active = 1; 3876 while (ha->flags.reset_marker) { 3877 /* Issue marker command. */ 3878 ha->flags.reset_marker = 0; 3879 for (bus = 0; bus < ha->ports && 3880 !ha->flags.reset_marker; bus++) { 3881 if (ha->bus_settings[bus].reset_marker) { 3882 ha->bus_settings[bus].reset_marker = 0; 3883 qla1280_marker(ha, bus, 0, 0, 3884 MK_SYNC_ALL); 3885 } 3886 } 3887 } 3888 } 3889 3890 LEAVE("qla1280_rst_aen"); 3891 } 3892 3893 3894 #if LINUX_VERSION_CODE < 0x020500 3895 /* 3896 * 3897 */ 3898 static void 3899 qla1280_get_target_options(struct scsi_cmnd *cmd, struct scsi_qla_host *ha) 3900 { 3901 unsigned char *result; 3902 struct nvram *n; 3903 int bus, target, lun; 3904 3905 bus = SCSI_BUS_32(cmd); 3906 target = SCSI_TCN_32(cmd); 3907 lun = SCSI_LUN_32(cmd); 3908 3909 /* 3910 * Make sure to not touch anything if someone is using the 3911 * sg interface. 3912 */ 3913 if (cmd->use_sg || (CMD_RESULT(cmd) >> 16) != DID_OK || lun) 3914 return; 3915 3916 result = cmd->request_buffer; 3917 n = &ha->nvram; 3918 3919 n->bus[bus].target[target].parameter.enable_wide = 0; 3920 n->bus[bus].target[target].parameter.enable_sync = 0; 3921 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 3922 3923 if (result[7] & 0x60) 3924 n->bus[bus].target[target].parameter.enable_wide = 1; 3925 if (result[7] & 0x10) 3926 n->bus[bus].target[target].parameter.enable_sync = 1; 3927 if ((result[2] >= 3) && (result[4] + 5 > 56) && 3928 (result[56] & 0x4)) 3929 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 3930 3931 dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n", 3932 n->bus[bus].target[target].parameter.enable_wide, 3933 n->bus[bus].target[target].parameter.enable_sync, 3934 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr); 3935 } 3936 #endif 3937 3938 /* 3939 * qla1280_status_entry 3940 * Processes received ISP status entry. 3941 * 3942 * Input: 3943 * ha = adapter block pointer. 3944 * pkt = entry pointer. 3945 * done_q = done queue. 3946 */ 3947 static void 3948 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, 3949 struct list_head *done_q) 3950 { 3951 unsigned int bus, target, lun; 3952 int sense_sz; 3953 struct srb *sp; 3954 struct scsi_cmnd *cmd; 3955 uint32_t handle = le32_to_cpu(pkt->handle); 3956 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status); 3957 uint16_t comp_status = le16_to_cpu(pkt->comp_status); 3958 3959 ENTER("qla1280_status_entry"); 3960 3961 /* Validate handle. */ 3962 if (handle < MAX_OUTSTANDING_COMMANDS) 3963 sp = ha->outstanding_cmds[handle]; 3964 else 3965 sp = NULL; 3966 3967 if (!sp) { 3968 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n"); 3969 goto out; 3970 } 3971 3972 /* Free outstanding command slot. */ 3973 ha->outstanding_cmds[handle] = NULL; 3974 3975 cmd = sp->cmd; 3976 3977 /* Generate LU queue on cntrl, target, LUN */ 3978 bus = SCSI_BUS_32(cmd); 3979 target = SCSI_TCN_32(cmd); 3980 lun = SCSI_LUN_32(cmd); 3981 3982 if (comp_status || scsi_status) { 3983 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = " 3984 "0x%x, handle = 0x%x\n", comp_status, 3985 scsi_status, handle); 3986 } 3987 3988 /* Target busy or queue full */ 3989 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL || 3990 (scsi_status & 0xFF) == SAM_STAT_BUSY) { 3991 CMD_RESULT(cmd) = scsi_status & 0xff; 3992 } else { 3993 3994 /* Save ISP completion status */ 3995 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); 3996 3997 if (scsi_status & SAM_STAT_CHECK_CONDITION) { 3998 if (comp_status != CS_ARS_FAILED) { 3999 uint16_t req_sense_length = 4000 le16_to_cpu(pkt->req_sense_length); 4001 if (req_sense_length < CMD_SNSLEN(cmd)) 4002 sense_sz = req_sense_length; 4003 else 4004 /* 4005 * scsi_cmnd->sense_buffer is 4006 * 64 bytes, why only copy 63? 4007 * This looks wrong! /Jes 4008 */ 4009 sense_sz = CMD_SNSLEN(cmd) - 1; 4010 4011 memcpy(cmd->sense_buffer, 4012 &pkt->req_sense_data, sense_sz); 4013 } else 4014 sense_sz = 0; 4015 memset(cmd->sense_buffer + sense_sz, 0, 4016 sizeof(cmd->sense_buffer) - sense_sz); 4017 4018 dprintk(2, "qla1280_status_entry: Check " 4019 "condition Sense data, b %i, t %i, " 4020 "l %i\n", bus, target, lun); 4021 if (sense_sz) 4022 qla1280_dump_buffer(2, 4023 (char *)cmd->sense_buffer, 4024 sense_sz); 4025 } 4026 } 4027 4028 /* Place command on done queue. */ 4029 list_add_tail(&sp->list, done_q); 4030 out: 4031 LEAVE("qla1280_status_entry"); 4032 } 4033 4034 /* 4035 * qla1280_error_entry 4036 * Processes error entry. 4037 * 4038 * Input: 4039 * ha = adapter block pointer. 4040 * pkt = entry pointer. 4041 * done_q = done queue. 4042 */ 4043 static void 4044 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, 4045 struct list_head *done_q) 4046 { 4047 struct srb *sp; 4048 uint32_t handle = le32_to_cpu(pkt->handle); 4049 4050 ENTER("qla1280_error_entry"); 4051 4052 if (pkt->entry_status & BIT_3) 4053 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n"); 4054 else if (pkt->entry_status & BIT_2) 4055 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n"); 4056 else if (pkt->entry_status & BIT_1) 4057 dprintk(2, "qla1280_error_entry: FULL flag error\n"); 4058 else 4059 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n"); 4060 4061 /* Validate handle. */ 4062 if (handle < MAX_OUTSTANDING_COMMANDS) 4063 sp = ha->outstanding_cmds[handle]; 4064 else 4065 sp = NULL; 4066 4067 if (sp) { 4068 /* Free outstanding command slot. */ 4069 ha->outstanding_cmds[handle] = NULL; 4070 4071 /* Bad payload or header */ 4072 if (pkt->entry_status & (BIT_3 + BIT_2)) { 4073 /* Bad payload or header, set error status. */ 4074 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */ 4075 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 4076 } else if (pkt->entry_status & BIT_1) { /* FULL flag */ 4077 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16; 4078 } else { 4079 /* Set error status. */ 4080 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 4081 } 4082 4083 /* Place command on done queue. */ 4084 list_add_tail(&sp->list, done_q); 4085 } 4086 #ifdef QLA_64BIT_PTR 4087 else if (pkt->entry_type == COMMAND_A64_TYPE) { 4088 printk(KERN_WARNING "!qla1280: Error Entry invalid handle"); 4089 } 4090 #endif 4091 4092 LEAVE("qla1280_error_entry"); 4093 } 4094 4095 /* 4096 * qla1280_abort_isp 4097 * Resets ISP and aborts all outstanding commands. 4098 * 4099 * Input: 4100 * ha = adapter block pointer. 4101 * 4102 * Returns: 4103 * 0 = success 4104 */ 4105 static int 4106 qla1280_abort_isp(struct scsi_qla_host *ha) 4107 { 4108 struct device_reg __iomem *reg = ha->iobase; 4109 struct srb *sp; 4110 int status = 0; 4111 int cnt; 4112 int bus; 4113 4114 ENTER("qla1280_abort_isp"); 4115 4116 if (ha->flags.abort_isp_active || !ha->flags.online) 4117 goto out; 4118 4119 ha->flags.abort_isp_active = 1; 4120 4121 /* Disable ISP interrupts. */ 4122 qla1280_disable_intrs(ha); 4123 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 4124 RD_REG_WORD(®->id_l); 4125 4126 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n", 4127 ha->host_no); 4128 /* Dequeue all commands in outstanding command list. */ 4129 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 4130 struct scsi_cmnd *cmd; 4131 sp = ha->outstanding_cmds[cnt]; 4132 if (sp) { 4133 4134 cmd = sp->cmd; 4135 CMD_RESULT(cmd) = DID_RESET << 16; 4136 4137 sp->cmd = NULL; 4138 ha->outstanding_cmds[cnt] = NULL; 4139 4140 (*cmd->scsi_done)(cmd); 4141 4142 sp->flags = 0; 4143 } 4144 } 4145 4146 status = qla1280_load_firmware(ha); 4147 if (status) 4148 goto out; 4149 4150 /* Setup adapter based on NVRAM parameters. */ 4151 qla1280_nvram_config (ha); 4152 4153 status = qla1280_init_rings(ha); 4154 if (status) 4155 goto out; 4156 4157 /* Issue SCSI reset. */ 4158 for (bus = 0; bus < ha->ports; bus++) 4159 qla1280_bus_reset(ha, bus); 4160 4161 ha->flags.abort_isp_active = 0; 4162 out: 4163 if (status) { 4164 printk(KERN_WARNING 4165 "qla1280: ISP error recovery failed, board disabled"); 4166 qla1280_reset_adapter(ha); 4167 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n"); 4168 } 4169 4170 LEAVE("qla1280_abort_isp"); 4171 return status; 4172 } 4173 4174 4175 /* 4176 * qla1280_debounce_register 4177 * Debounce register. 4178 * 4179 * Input: 4180 * port = register address. 4181 * 4182 * Returns: 4183 * register value. 4184 */ 4185 static u16 4186 qla1280_debounce_register(volatile u16 __iomem * addr) 4187 { 4188 volatile u16 ret; 4189 volatile u16 ret2; 4190 4191 ret = RD_REG_WORD(addr); 4192 ret2 = RD_REG_WORD(addr); 4193 4194 if (ret == ret2) 4195 return ret; 4196 4197 do { 4198 cpu_relax(); 4199 ret = RD_REG_WORD(addr); 4200 ret2 = RD_REG_WORD(addr); 4201 } while (ret != ret2); 4202 4203 return ret; 4204 } 4205 4206 4207 /************************************************************************ 4208 * qla1280_check_for_dead_scsi_bus * 4209 * * 4210 * This routine checks for a dead SCSI bus * 4211 ************************************************************************/ 4212 #define SET_SXP_BANK 0x0100 4213 #define SCSI_PHASE_INVALID 0x87FF 4214 static int 4215 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) 4216 { 4217 uint16_t config_reg, scsi_control; 4218 struct device_reg __iomem *reg = ha->iobase; 4219 4220 if (ha->bus_settings[bus].scsi_bus_dead) { 4221 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 4222 config_reg = RD_REG_WORD(®->cfg_1); 4223 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK); 4224 scsi_control = RD_REG_WORD(®->scsiControlPins); 4225 WRT_REG_WORD(®->cfg_1, config_reg); 4226 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC); 4227 4228 if (scsi_control == SCSI_PHASE_INVALID) { 4229 ha->bus_settings[bus].scsi_bus_dead = 1; 4230 #if 0 4231 CMD_RESULT(cp) = DID_NO_CONNECT << 16; 4232 CMD_HANDLE(cp) = INVALID_HANDLE; 4233 /* ha->actthreads--; */ 4234 4235 (*(cp)->scsi_done)(cp); 4236 #endif 4237 return 1; /* bus is dead */ 4238 } else { 4239 ha->bus_settings[bus].scsi_bus_dead = 0; 4240 ha->bus_settings[bus].failed_reset_count = 0; 4241 } 4242 } 4243 return 0; /* bus is not dead */ 4244 } 4245 4246 static void 4247 qla1280_get_target_parameters(struct scsi_qla_host *ha, 4248 struct scsi_device *device) 4249 { 4250 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4251 int bus, target, lun; 4252 4253 bus = device->channel; 4254 target = device->id; 4255 lun = device->lun; 4256 4257 4258 mb[0] = MBC_GET_TARGET_PARAMETERS; 4259 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 4260 mb[1] <<= 8; 4261 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0, 4262 &mb[0]); 4263 4264 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun); 4265 4266 if (mb[3] != 0) { 4267 printk(" Sync: period %d, offset %d", 4268 (mb[3] & 0xff), (mb[3] >> 8)); 4269 if (mb[2] & BIT_13) 4270 printk(", Wide"); 4271 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2) 4272 printk(", DT"); 4273 } else 4274 printk(" Async"); 4275 4276 if (DEV_SIMPLE_TAGS(device)) 4277 printk(", Tagged queuing: depth %d", device->queue_depth); 4278 printk("\n"); 4279 } 4280 4281 4282 #if DEBUG_QLA1280 4283 static void 4284 __qla1280_dump_buffer(char *b, int size) 4285 { 4286 int cnt; 4287 u8 c; 4288 4289 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah " 4290 "Bh Ch Dh Eh Fh\n"); 4291 printk(KERN_DEBUG "---------------------------------------------" 4292 "------------------\n"); 4293 4294 for (cnt = 0; cnt < size;) { 4295 c = *b++; 4296 4297 printk("0x%02x", c); 4298 cnt++; 4299 if (!(cnt % 16)) 4300 printk("\n"); 4301 else 4302 printk(" "); 4303 } 4304 if (cnt % 16) 4305 printk("\n"); 4306 } 4307 4308 /************************************************************************** 4309 * ql1280_print_scsi_cmd 4310 * 4311 **************************************************************************/ 4312 static void 4313 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) 4314 { 4315 struct scsi_qla_host *ha; 4316 struct Scsi_Host *host = CMD_HOST(cmd); 4317 struct srb *sp; 4318 /* struct scatterlist *sg; */ 4319 4320 int i; 4321 ha = (struct scsi_qla_host *)host->hostdata; 4322 4323 sp = (struct srb *)CMD_SP(cmd); 4324 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd)); 4325 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n", 4326 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd), 4327 CMD_CDBLEN(cmd)); 4328 printk(" CDB = "); 4329 for (i = 0; i < cmd->cmd_len; i++) { 4330 printk("0x%02x ", cmd->cmnd[i]); 4331 } 4332 printk(" seg_cnt =%d\n", cmd->use_sg); 4333 printk(" request buffer=0x%p, request buffer len=0x%x\n", 4334 cmd->request_buffer, cmd->request_bufflen); 4335 /* if (cmd->use_sg) 4336 { 4337 sg = (struct scatterlist *) cmd->request_buffer; 4338 printk(" SG buffer: \n"); 4339 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist))); 4340 } */ 4341 printk(" tag=%d, transfersize=0x%x \n", 4342 cmd->tag, cmd->transfersize); 4343 printk(" Pid=%li, SP=0x%p\n", cmd->pid, CMD_SP(cmd)); 4344 printk(" underflow size = 0x%x, direction=0x%x\n", 4345 cmd->underflow, cmd->sc_data_direction); 4346 } 4347 4348 /************************************************************************** 4349 * ql1280_dump_device 4350 * 4351 **************************************************************************/ 4352 static void 4353 ql1280_dump_device(struct scsi_qla_host *ha) 4354 { 4355 4356 struct scsi_cmnd *cp; 4357 struct srb *sp; 4358 int i; 4359 4360 printk(KERN_DEBUG "Outstanding Commands on controller:\n"); 4361 4362 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 4363 if ((sp = ha->outstanding_cmds[i]) == NULL) 4364 continue; 4365 if ((cp = sp->cmd) == NULL) 4366 continue; 4367 qla1280_print_scsi_cmd(1, cp); 4368 } 4369 } 4370 #endif 4371 4372 4373 enum tokens { 4374 TOKEN_NVRAM, 4375 TOKEN_SYNC, 4376 TOKEN_WIDE, 4377 TOKEN_PPR, 4378 TOKEN_VERBOSE, 4379 TOKEN_DEBUG, 4380 }; 4381 4382 struct setup_tokens { 4383 char *token; 4384 int val; 4385 }; 4386 4387 static struct setup_tokens setup_token[] __initdata = 4388 { 4389 { "nvram", TOKEN_NVRAM }, 4390 { "sync", TOKEN_SYNC }, 4391 { "wide", TOKEN_WIDE }, 4392 { "ppr", TOKEN_PPR }, 4393 { "verbose", TOKEN_VERBOSE }, 4394 { "debug", TOKEN_DEBUG }, 4395 }; 4396 4397 4398 /************************************************************************** 4399 * qla1280_setup 4400 * 4401 * Handle boot parameters. This really needs to be changed so one 4402 * can specify per adapter parameters. 4403 **************************************************************************/ 4404 static int __init 4405 qla1280_setup(char *s) 4406 { 4407 char *cp, *ptr; 4408 unsigned long val; 4409 int toke; 4410 4411 cp = s; 4412 4413 while (cp && (ptr = strchr(cp, ':'))) { 4414 ptr++; 4415 if (!strcmp(ptr, "yes")) { 4416 val = 0x10000; 4417 ptr += 3; 4418 } else if (!strcmp(ptr, "no")) { 4419 val = 0; 4420 ptr += 2; 4421 } else 4422 val = simple_strtoul(ptr, &ptr, 0); 4423 4424 switch ((toke = qla1280_get_token(cp))) { 4425 case TOKEN_NVRAM: 4426 if (!val) 4427 driver_setup.no_nvram = 1; 4428 break; 4429 case TOKEN_SYNC: 4430 if (!val) 4431 driver_setup.no_sync = 1; 4432 else if (val != 0x10000) 4433 driver_setup.sync_mask = val; 4434 break; 4435 case TOKEN_WIDE: 4436 if (!val) 4437 driver_setup.no_wide = 1; 4438 else if (val != 0x10000) 4439 driver_setup.wide_mask = val; 4440 break; 4441 case TOKEN_PPR: 4442 if (!val) 4443 driver_setup.no_ppr = 1; 4444 else if (val != 0x10000) 4445 driver_setup.ppr_mask = val; 4446 break; 4447 case TOKEN_VERBOSE: 4448 qla1280_verbose = val; 4449 break; 4450 default: 4451 printk(KERN_INFO "qla1280: unknown boot option %s\n", 4452 cp); 4453 } 4454 4455 cp = strchr(ptr, ';'); 4456 if (cp) 4457 cp++; 4458 else { 4459 break; 4460 } 4461 } 4462 return 1; 4463 } 4464 4465 4466 static int 4467 qla1280_get_token(char *str) 4468 { 4469 char *sep; 4470 long ret = -1; 4471 int i, len; 4472 4473 len = sizeof(setup_token)/sizeof(struct setup_tokens); 4474 4475 sep = strchr(str, ':'); 4476 4477 if (sep) { 4478 for (i = 0; i < len; i++){ 4479 4480 if (!strncmp(setup_token[i].token, str, (sep - str))) { 4481 ret = setup_token[i].val; 4482 break; 4483 } 4484 } 4485 } 4486 4487 return ret; 4488 } 4489 4490 #if LINUX_VERSION_CODE >= 0x020600 4491 static struct scsi_host_template qla1280_driver_template = { 4492 .module = THIS_MODULE, 4493 .proc_name = "qla1280", 4494 .name = "Qlogic ISP 1280/12160", 4495 .info = qla1280_info, 4496 .slave_configure = qla1280_slave_configure, 4497 .queuecommand = qla1280_queuecommand, 4498 .eh_abort_handler = qla1280_eh_abort, 4499 .eh_device_reset_handler= qla1280_eh_device_reset, 4500 .eh_bus_reset_handler = qla1280_eh_bus_reset, 4501 .eh_host_reset_handler = qla1280_eh_adapter_reset, 4502 .bios_param = qla1280_biosparam, 4503 .can_queue = 0xfffff, 4504 .this_id = -1, 4505 .sg_tablesize = SG_ALL, 4506 .cmd_per_lun = 1, 4507 .use_clustering = ENABLE_CLUSTERING, 4508 }; 4509 #else 4510 static Scsi_Host_Template qla1280_driver_template = { 4511 .proc_name = "qla1280", 4512 .name = "Qlogic ISP 1280/12160", 4513 .detect = qla1280_detect, 4514 .release = qla1280_release, 4515 .info = qla1280_info, 4516 .queuecommand = qla1280_queuecommand, 4517 .eh_abort_handler = qla1280_eh_abort, 4518 .eh_device_reset_handler= qla1280_eh_device_reset, 4519 .eh_bus_reset_handler = qla1280_eh_bus_reset, 4520 .eh_host_reset_handler = qla1280_eh_adapter_reset, 4521 .bios_param = qla1280_biosparam_old, 4522 .can_queue = 0xfffff, 4523 .this_id = -1, 4524 .sg_tablesize = SG_ALL, 4525 .cmd_per_lun = 1, 4526 .use_clustering = ENABLE_CLUSTERING, 4527 .use_new_eh_code = 1, 4528 }; 4529 #endif 4530 4531 static int __devinit 4532 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4533 { 4534 int devnum = id->driver_data; 4535 struct qla_boards *bdp = &ql1280_board_tbl[devnum]; 4536 struct Scsi_Host *host; 4537 struct scsi_qla_host *ha; 4538 int error = -ENODEV; 4539 4540 /* Bypass all AMI SUBSYS VENDOR IDs */ 4541 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) { 4542 printk(KERN_INFO 4543 "qla1280: Skipping AMI SubSys Vendor ID Chip\n"); 4544 goto error; 4545 } 4546 4547 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n", 4548 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn)); 4549 4550 if (pci_enable_device(pdev)) { 4551 printk(KERN_WARNING 4552 "qla1280: Failed to enabled pci device, aborting.\n"); 4553 goto error; 4554 } 4555 4556 pci_set_master(pdev); 4557 4558 error = -ENOMEM; 4559 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha)); 4560 if (!host) { 4561 printk(KERN_WARNING 4562 "qla1280: Failed to register host, aborting.\n"); 4563 goto error_disable_device; 4564 } 4565 4566 ha = (struct scsi_qla_host *)host->hostdata; 4567 memset(ha, 0, sizeof(struct scsi_qla_host)); 4568 4569 ha->pdev = pdev; 4570 ha->devnum = devnum; /* specifies microcode load address */ 4571 4572 #ifdef QLA_64BIT_PTR 4573 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { 4574 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4575 printk(KERN_WARNING "scsi(%li): Unable to set a " 4576 "suitable DMA mask - aborting\n", ha->host_no); 4577 error = -ENODEV; 4578 goto error_free_irq; 4579 } 4580 } else 4581 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", 4582 ha->host_no); 4583 #else 4584 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4585 printk(KERN_WARNING "scsi(%li): Unable to set a " 4586 "suitable DMA mask - aborting\n", ha->host_no); 4587 error = -ENODEV; 4588 goto error_free_irq; 4589 } 4590 #endif 4591 4592 ha->request_ring = pci_alloc_consistent(ha->pdev, 4593 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4594 &ha->request_dma); 4595 if (!ha->request_ring) { 4596 printk(KERN_INFO "qla1280: Failed to get request memory\n"); 4597 goto error_put_host; 4598 } 4599 4600 ha->response_ring = pci_alloc_consistent(ha->pdev, 4601 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4602 &ha->response_dma); 4603 if (!ha->response_ring) { 4604 printk(KERN_INFO "qla1280: Failed to get response memory\n"); 4605 goto error_free_request_ring; 4606 } 4607 4608 ha->ports = bdp->numPorts; 4609 4610 ha->host = host; 4611 ha->host_no = host->host_no; 4612 4613 host->irq = pdev->irq; 4614 host->max_channel = bdp->numPorts - 1; 4615 host->max_lun = MAX_LUNS - 1; 4616 host->max_id = MAX_TARGETS; 4617 host->max_sectors = 1024; 4618 host->unique_id = host->host_no; 4619 4620 #if LINUX_VERSION_CODE < 0x020545 4621 host->select_queue_depths = qla1280_select_queue_depth; 4622 #endif 4623 4624 error = -ENODEV; 4625 4626 #if MEMORY_MAPPED_IO 4627 ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1), 4628 pci_resource_len(ha->pdev, 1)); 4629 if (!ha->mmpbase) { 4630 printk(KERN_INFO "qla1280: Unable to map I/O memory\n"); 4631 goto error_free_response_ring; 4632 } 4633 4634 host->base = (unsigned long)ha->mmpbase; 4635 ha->iobase = (struct device_reg __iomem *)ha->mmpbase; 4636 #else 4637 host->io_port = pci_resource_start(ha->pdev, 0); 4638 if (!request_region(host->io_port, 0xff, "qla1280")) { 4639 printk(KERN_INFO "qla1280: Failed to reserve i/o region " 4640 "0x%04lx-0x%04lx - already in use\n", 4641 host->io_port, host->io_port + 0xff); 4642 goto error_free_response_ring; 4643 } 4644 4645 ha->iobase = (struct device_reg *)host->io_port; 4646 #endif 4647 4648 INIT_LIST_HEAD(&ha->done_q); 4649 4650 /* Disable ISP interrupts. */ 4651 qla1280_disable_intrs(ha); 4652 4653 if (request_irq(pdev->irq, qla1280_intr_handler, SA_SHIRQ, 4654 "qla1280", ha)) { 4655 printk("qla1280 : Failed to reserve interrupt %d already " 4656 "in use\n", pdev->irq); 4657 goto error_release_region; 4658 } 4659 4660 /* load the F/W, read paramaters, and init the H/W */ 4661 if (qla1280_initialize_adapter(ha)) { 4662 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n"); 4663 goto error_free_irq; 4664 } 4665 4666 /* set our host ID (need to do something about our two IDs) */ 4667 host->this_id = ha->bus_settings[0].id; 4668 4669 pci_set_drvdata(pdev, host); 4670 4671 #if LINUX_VERSION_CODE >= 0x020600 4672 error = scsi_add_host(host, &pdev->dev); 4673 if (error) 4674 goto error_disable_adapter; 4675 scsi_scan_host(host); 4676 #else 4677 scsi_set_pci_device(host, pdev); 4678 #endif 4679 4680 return 0; 4681 4682 #if LINUX_VERSION_CODE >= 0x020600 4683 error_disable_adapter: 4684 qla1280_disable_intrs(ha); 4685 #endif 4686 error_free_irq: 4687 free_irq(pdev->irq, ha); 4688 error_release_region: 4689 #if MEMORY_MAPPED_IO 4690 iounmap(ha->mmpbase); 4691 #else 4692 release_region(host->io_port, 0xff); 4693 #endif 4694 error_free_response_ring: 4695 pci_free_consistent(ha->pdev, 4696 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4697 ha->response_ring, ha->response_dma); 4698 error_free_request_ring: 4699 pci_free_consistent(ha->pdev, 4700 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4701 ha->request_ring, ha->request_dma); 4702 error_put_host: 4703 scsi_host_put(host); 4704 error_disable_device: 4705 pci_disable_device(pdev); 4706 error: 4707 return error; 4708 } 4709 4710 4711 static void __devexit 4712 qla1280_remove_one(struct pci_dev *pdev) 4713 { 4714 struct Scsi_Host *host = pci_get_drvdata(pdev); 4715 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 4716 4717 #if LINUX_VERSION_CODE >= 0x020600 4718 scsi_remove_host(host); 4719 #endif 4720 4721 qla1280_disable_intrs(ha); 4722 4723 free_irq(pdev->irq, ha); 4724 4725 #if MEMORY_MAPPED_IO 4726 iounmap(ha->mmpbase); 4727 #else 4728 release_region(host->io_port, 0xff); 4729 #endif 4730 4731 pci_free_consistent(ha->pdev, 4732 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4733 ha->request_ring, ha->request_dma); 4734 pci_free_consistent(ha->pdev, 4735 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4736 ha->response_ring, ha->response_dma); 4737 4738 pci_disable_device(pdev); 4739 4740 scsi_host_put(host); 4741 } 4742 4743 #if LINUX_VERSION_CODE >= 0x020600 4744 static struct pci_driver qla1280_pci_driver = { 4745 .name = "qla1280", 4746 .id_table = qla1280_pci_tbl, 4747 .probe = qla1280_probe_one, 4748 .remove = __devexit_p(qla1280_remove_one), 4749 }; 4750 4751 static int __init 4752 qla1280_init(void) 4753 { 4754 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) { 4755 printk(KERN_WARNING 4756 "qla1280: struct srb too big, aborting\n"); 4757 return -EINVAL; 4758 } 4759 4760 #ifdef MODULE 4761 /* 4762 * If we are called as a module, the qla1280 pointer may not be null 4763 * and it would point to our bootup string, just like on the lilo 4764 * command line. IF not NULL, then process this config string with 4765 * qla1280_setup 4766 * 4767 * Boot time Options 4768 * To add options at boot time add a line to your lilo.conf file like: 4769 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}" 4770 * which will result in the first four devices on the first two 4771 * controllers being set to a tagged queue depth of 32. 4772 */ 4773 if (qla1280) 4774 qla1280_setup(qla1280); 4775 #endif 4776 4777 return pci_module_init(&qla1280_pci_driver); 4778 } 4779 4780 static void __exit 4781 qla1280_exit(void) 4782 { 4783 pci_unregister_driver(&qla1280_pci_driver); 4784 } 4785 4786 module_init(qla1280_init); 4787 module_exit(qla1280_exit); 4788 4789 #else 4790 # define driver_template qla1280_driver_template 4791 # include "scsi_module.c" 4792 #endif 4793 4794 MODULE_AUTHOR("Qlogic & Jes Sorensen"); 4795 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver"); 4796 MODULE_LICENSE("GPL"); 4797 MODULE_VERSION(QLA1280_VERSION); 4798 4799 /* 4800 * Overrides for Emacs so that we almost follow Linus's tabbing style. 4801 * Emacs will notice this stuff at the end of the file and automatically 4802 * adjust the settings for this buffer only. This must remain at the end 4803 * of the file. 4804 * --------------------------------------------------------------------------- 4805 * Local variables: 4806 * c-basic-offset: 8 4807 * tab-width: 8 4808 * End: 4809 */ 4810