1 /* 2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. 3 4 Written By: Adam Radford <linuxraid@lsi.com> 5 Modifications By: Tom Couch <linuxraid@lsi.com> 6 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation. 8 Copyright (C) 2010 LSI Corporation. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; version 2 of the License. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 NO WARRANTY 20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 solely responsible for determining the appropriateness of using and 25 distributing the Program and assumes all risks associated with its 26 exercise of rights under this Agreement, including but not limited to 27 the risks and costs of program errors, damage to or loss of data, 28 programs or equipment, and unavailability or interruption of operations. 29 30 DISCLAIMER OF LIABILITY 31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 You should have received a copy of the GNU General Public License 40 along with this program; if not, write to the Free Software 41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 42 43 Bugs/Comments/Suggestions should be mailed to: 44 linuxraid@lsi.com 45 46 For more information, goto: 47 http://www.lsi.com 48 49 Note: This version of the driver does not contain a bundled firmware 50 image. 51 52 History 53 ------- 54 2.26.02.000 - Driver cleanup for kernel submission. 55 2.26.02.001 - Replace schedule_timeout() calls with msleep(). 56 2.26.02.002 - Add support for PAE mode. 57 Add lun support. 58 Fix twa_remove() to free irq handler/unregister_chrdev() 59 before shutting down card. 60 Change to new 'change_queue_depth' api. 61 Fix 'handled=1' ISR usage, remove bogus IRQ check. 62 Remove un-needed eh_abort handler. 63 Add support for embedded firmware error strings. 64 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 65 2.26.02.004 - Add support for 9550SX controllers. 66 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 67 2.26.02.006 - Fix 9550SX pchip reset timeout. 68 Add big endian support. 69 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic(). 70 2.26.02.008 - Free irq handler in __twa_shutdown(). 71 Serialize reset code. 72 Add support for 9650SE controllers. 73 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 74 2.26.02.010 - Add support for 9690SA controllers. 75 2.26.02.011 - Increase max AENs drained to 256. 76 Add MSI support and "use_msi" module parameter. 77 Fix bug in twa_get_param() on 4GB+. 78 Use pci_resource_len() for ioremap(). 79 2.26.02.012 - Add power management support. 80 2.26.02.013 - Fix bug in twa_load_sgl(). 81 2.26.02.014 - Force 60 second timeout default. 82 */ 83 84 #include <linux/module.h> 85 #include <linux/reboot.h> 86 #include <linux/spinlock.h> 87 #include <linux/interrupt.h> 88 #include <linux/moduleparam.h> 89 #include <linux/errno.h> 90 #include <linux/types.h> 91 #include <linux/delay.h> 92 #include <linux/pci.h> 93 #include <linux/time.h> 94 #include <linux/mutex.h> 95 #include <linux/slab.h> 96 #include <asm/io.h> 97 #include <asm/irq.h> 98 #include <asm/uaccess.h> 99 #include <scsi/scsi.h> 100 #include <scsi/scsi_host.h> 101 #include <scsi/scsi_tcq.h> 102 #include <scsi/scsi_cmnd.h> 103 #include "3w-9xxx.h" 104 105 /* Globals */ 106 #define TW_DRIVER_VERSION "2.26.02.014" 107 static DEFINE_MUTEX(twa_chrdev_mutex); 108 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 109 static unsigned int twa_device_extension_count; 110 static int twa_major = -1; 111 extern struct timezone sys_tz; 112 113 /* Module parameters */ 114 MODULE_AUTHOR ("LSI"); 115 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); 116 MODULE_LICENSE("GPL"); 117 MODULE_VERSION(TW_DRIVER_VERSION); 118 119 static int use_msi = 0; 120 module_param(use_msi, int, S_IRUGO); 121 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); 122 123 /* Function prototypes */ 124 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); 125 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); 126 static char *twa_aen_severity_lookup(unsigned char severity_code); 127 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); 128 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 129 static int twa_chrdev_open(struct inode *inode, struct file *file); 130 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); 131 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); 132 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); 133 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, 134 u32 set_features, unsigned short current_fw_srl, 135 unsigned short current_fw_arch_id, 136 unsigned short current_fw_branch, 137 unsigned short current_fw_build, 138 unsigned short *fw_on_ctlr_srl, 139 unsigned short *fw_on_ctlr_arch_id, 140 unsigned short *fw_on_ctlr_branch, 141 unsigned short *fw_on_ctlr_build, 142 u32 *init_connect_result); 143 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); 144 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); 145 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); 146 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); 147 static int twa_reset_device_extension(TW_Device_Extension *tw_dev); 148 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); 149 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); 150 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); 151 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); 152 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); 153 154 /* Functions */ 155 156 /* Show some statistics about the card */ 157 static ssize_t twa_show_stats(struct device *dev, 158 struct device_attribute *attr, char *buf) 159 { 160 struct Scsi_Host *host = class_to_shost(dev); 161 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 162 unsigned long flags = 0; 163 ssize_t len; 164 165 spin_lock_irqsave(tw_dev->host->host_lock, flags); 166 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n" 167 "Current commands posted: %4d\n" 168 "Max commands posted: %4d\n" 169 "Current pending commands: %4d\n" 170 "Max pending commands: %4d\n" 171 "Last sgl length: %4d\n" 172 "Max sgl length: %4d\n" 173 "Last sector count: %4d\n" 174 "Max sector count: %4d\n" 175 "SCSI Host Resets: %4d\n" 176 "AEN's: %4d\n", 177 TW_DRIVER_VERSION, 178 tw_dev->posted_request_count, 179 tw_dev->max_posted_request_count, 180 tw_dev->pending_request_count, 181 tw_dev->max_pending_request_count, 182 tw_dev->sgl_entries, 183 tw_dev->max_sgl_entries, 184 tw_dev->sector_count, 185 tw_dev->max_sector_count, 186 tw_dev->num_resets, 187 tw_dev->aen_count); 188 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 189 return len; 190 } /* End twa_show_stats() */ 191 192 /* Create sysfs 'stats' entry */ 193 static struct device_attribute twa_host_stats_attr = { 194 .attr = { 195 .name = "stats", 196 .mode = S_IRUGO, 197 }, 198 .show = twa_show_stats 199 }; 200 201 /* Host attributes initializer */ 202 static struct device_attribute *twa_host_attrs[] = { 203 &twa_host_stats_attr, 204 NULL, 205 }; 206 207 /* File operations struct for character device */ 208 static const struct file_operations twa_fops = { 209 .owner = THIS_MODULE, 210 .unlocked_ioctl = twa_chrdev_ioctl, 211 .open = twa_chrdev_open, 212 .release = NULL, 213 .llseek = noop_llseek, 214 }; 215 216 /* This function will complete an aen request from the isr */ 217 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) 218 { 219 TW_Command_Full *full_command_packet; 220 TW_Command *command_packet; 221 TW_Command_Apache_Header *header; 222 unsigned short aen; 223 int retval = 1; 224 225 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; 226 tw_dev->posted_request_count--; 227 aen = le16_to_cpu(header->status_block.error); 228 full_command_packet = tw_dev->command_packet_virt[request_id]; 229 command_packet = &full_command_packet->command.oldcommand; 230 231 /* First check for internal completion of set param for time sync */ 232 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { 233 /* Keep reading the queue in case there are more aen's */ 234 if (twa_aen_read_queue(tw_dev, request_id)) 235 goto out2; 236 else { 237 retval = 0; 238 goto out; 239 } 240 } 241 242 switch (aen) { 243 case TW_AEN_QUEUE_EMPTY: 244 /* Quit reading the queue if this is the last one */ 245 break; 246 case TW_AEN_SYNC_TIME_WITH_HOST: 247 twa_aen_sync_time(tw_dev, request_id); 248 retval = 0; 249 goto out; 250 default: 251 twa_aen_queue_event(tw_dev, header); 252 253 /* If there are more aen's, keep reading the queue */ 254 if (twa_aen_read_queue(tw_dev, request_id)) 255 goto out2; 256 else { 257 retval = 0; 258 goto out; 259 } 260 } 261 retval = 0; 262 out2: 263 tw_dev->state[request_id] = TW_S_COMPLETED; 264 twa_free_request_id(tw_dev, request_id); 265 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); 266 out: 267 return retval; 268 } /* End twa_aen_complete() */ 269 270 /* This function will drain aen queue */ 271 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) 272 { 273 int request_id = 0; 274 char cdb[TW_MAX_CDB_LEN]; 275 TW_SG_Entry sglist[1]; 276 int finished = 0, count = 0; 277 TW_Command_Full *full_command_packet; 278 TW_Command_Apache_Header *header; 279 unsigned short aen; 280 int first_reset = 0, queue = 0, retval = 1; 281 282 if (no_check_reset) 283 first_reset = 0; 284 else 285 first_reset = 1; 286 287 full_command_packet = tw_dev->command_packet_virt[request_id]; 288 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 289 290 /* Initialize cdb */ 291 memset(&cdb, 0, TW_MAX_CDB_LEN); 292 cdb[0] = REQUEST_SENSE; /* opcode */ 293 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ 294 295 /* Initialize sglist */ 296 memset(&sglist, 0, sizeof(TW_SG_Entry)); 297 sglist[0].length = TW_SECTOR_SIZE; 298 sglist[0].address = tw_dev->generic_buffer_phys[request_id]; 299 300 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) { 301 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); 302 goto out; 303 } 304 305 /* Mark internal command */ 306 tw_dev->srb[request_id] = NULL; 307 308 do { 309 /* Send command to the board */ 310 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { 311 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); 312 goto out; 313 } 314 315 /* Now poll for completion */ 316 if (twa_poll_response(tw_dev, request_id, 30)) { 317 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); 318 tw_dev->posted_request_count--; 319 goto out; 320 } 321 322 tw_dev->posted_request_count--; 323 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; 324 aen = le16_to_cpu(header->status_block.error); 325 queue = 0; 326 count++; 327 328 switch (aen) { 329 case TW_AEN_QUEUE_EMPTY: 330 if (first_reset != 1) 331 goto out; 332 else 333 finished = 1; 334 break; 335 case TW_AEN_SOFT_RESET: 336 if (first_reset == 0) 337 first_reset = 1; 338 else 339 queue = 1; 340 break; 341 case TW_AEN_SYNC_TIME_WITH_HOST: 342 break; 343 default: 344 queue = 1; 345 } 346 347 /* Now queue an event info */ 348 if (queue) 349 twa_aen_queue_event(tw_dev, header); 350 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); 351 352 if (count == TW_MAX_AEN_DRAIN) 353 goto out; 354 355 retval = 0; 356 out: 357 tw_dev->state[request_id] = TW_S_INITIAL; 358 return retval; 359 } /* End twa_aen_drain_queue() */ 360 361 /* This function will queue an event */ 362 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) 363 { 364 u32 local_time; 365 struct timeval time; 366 TW_Event *event; 367 unsigned short aen; 368 char host[16]; 369 char *error_str; 370 371 tw_dev->aen_count++; 372 373 /* Fill out event info */ 374 event = tw_dev->event_queue[tw_dev->error_index]; 375 376 /* Check for clobber */ 377 host[0] = '\0'; 378 if (tw_dev->host) { 379 sprintf(host, " scsi%d:", tw_dev->host->host_no); 380 if (event->retrieved == TW_AEN_NOT_RETRIEVED) 381 tw_dev->aen_clobber = 1; 382 } 383 384 aen = le16_to_cpu(header->status_block.error); 385 memset(event, 0, sizeof(TW_Event)); 386 387 event->severity = TW_SEV_OUT(header->status_block.severity__reserved); 388 do_gettimeofday(&time); 389 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); 390 event->time_stamp_sec = local_time; 391 event->aen_code = aen; 392 event->retrieved = TW_AEN_NOT_RETRIEVED; 393 event->sequence_id = tw_dev->error_sequence_id; 394 tw_dev->error_sequence_id++; 395 396 /* Check for embedded error string */ 397 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); 398 399 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; 400 event->parameter_len = strlen(header->err_specific_desc); 401 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str)))); 402 if (event->severity != TW_AEN_SEVERITY_DEBUG) 403 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", 404 host, 405 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), 406 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, 407 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str, 408 header->err_specific_desc); 409 else 410 tw_dev->aen_count--; 411 412 if ((tw_dev->error_index + 1) == TW_Q_LENGTH) 413 tw_dev->event_queue_wrapped = 1; 414 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; 415 } /* End twa_aen_queue_event() */ 416 417 /* This function will read the aen queue from the isr */ 418 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) 419 { 420 char cdb[TW_MAX_CDB_LEN]; 421 TW_SG_Entry sglist[1]; 422 TW_Command_Full *full_command_packet; 423 int retval = 1; 424 425 full_command_packet = tw_dev->command_packet_virt[request_id]; 426 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 427 428 /* Initialize cdb */ 429 memset(&cdb, 0, TW_MAX_CDB_LEN); 430 cdb[0] = REQUEST_SENSE; /* opcode */ 431 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ 432 433 /* Initialize sglist */ 434 memset(&sglist, 0, sizeof(TW_SG_Entry)); 435 sglist[0].length = TW_SECTOR_SIZE; 436 sglist[0].address = tw_dev->generic_buffer_phys[request_id]; 437 438 /* Mark internal command */ 439 tw_dev->srb[request_id] = NULL; 440 441 /* Now post the command packet */ 442 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { 443 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); 444 goto out; 445 } 446 retval = 0; 447 out: 448 return retval; 449 } /* End twa_aen_read_queue() */ 450 451 /* This function will look up an AEN severity string */ 452 static char *twa_aen_severity_lookup(unsigned char severity_code) 453 { 454 char *retval = NULL; 455 456 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || 457 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) 458 goto out; 459 460 retval = twa_aen_severity_table[severity_code]; 461 out: 462 return retval; 463 } /* End twa_aen_severity_lookup() */ 464 465 /* This function will sync firmware time with the host time */ 466 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) 467 { 468 u32 schedulertime; 469 struct timeval utc; 470 TW_Command_Full *full_command_packet; 471 TW_Command *command_packet; 472 TW_Param_Apache *param; 473 u32 local_time; 474 475 /* Fill out the command packet */ 476 full_command_packet = tw_dev->command_packet_virt[request_id]; 477 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 478 command_packet = &full_command_packet->command.oldcommand; 479 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); 480 command_packet->request_id = request_id; 481 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 482 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 483 command_packet->size = TW_COMMAND_SIZE; 484 command_packet->byte6_offset.parameter_count = cpu_to_le16(1); 485 486 /* Setup the param */ 487 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; 488 memset(param, 0, TW_SECTOR_SIZE); 489 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ 490 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ 491 param->parameter_size_bytes = cpu_to_le16(4); 492 493 /* Convert system time in UTC to local time seconds since last 494 Sunday 12:00AM */ 495 do_gettimeofday(&utc); 496 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); 497 schedulertime = local_time - (3 * 86400); 498 schedulertime = cpu_to_le32(schedulertime % 604800); 499 500 memcpy(param->data, &schedulertime, sizeof(u32)); 501 502 /* Mark internal command */ 503 tw_dev->srb[request_id] = NULL; 504 505 /* Now post the command */ 506 twa_post_command_packet(tw_dev, request_id, 1); 507 } /* End twa_aen_sync_time() */ 508 509 /* This function will allocate memory and check if it is correctly aligned */ 510 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) 511 { 512 int i; 513 dma_addr_t dma_handle; 514 unsigned long *cpu_addr; 515 int retval = 1; 516 517 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); 518 if (!cpu_addr) { 519 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 520 goto out; 521 } 522 523 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { 524 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); 525 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); 526 goto out; 527 } 528 529 memset(cpu_addr, 0, size*TW_Q_LENGTH); 530 531 for (i = 0; i < TW_Q_LENGTH; i++) { 532 switch(which) { 533 case 0: 534 tw_dev->command_packet_phys[i] = dma_handle+(i*size); 535 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); 536 break; 537 case 1: 538 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); 539 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); 540 break; 541 } 542 } 543 retval = 0; 544 out: 545 return retval; 546 } /* End twa_allocate_memory() */ 547 548 /* This function will check the status register for unexpected bits */ 549 static int twa_check_bits(u32 status_reg_value) 550 { 551 int retval = 1; 552 553 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) 554 goto out; 555 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) 556 goto out; 557 558 retval = 0; 559 out: 560 return retval; 561 } /* End twa_check_bits() */ 562 563 /* This function will check the srl and decide if we are compatible */ 564 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) 565 { 566 int retval = 1; 567 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; 568 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; 569 u32 init_connect_result = 0; 570 571 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, 572 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, 573 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, 574 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, 575 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, 576 &fw_on_ctlr_build, &init_connect_result)) { 577 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); 578 goto out; 579 } 580 581 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl; 582 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch; 583 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build; 584 585 /* Try base mode compatibility */ 586 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { 587 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, 588 TW_EXTENDED_INIT_CONNECT, 589 TW_BASE_FW_SRL, TW_9000_ARCH_ID, 590 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, 591 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, 592 &fw_on_ctlr_branch, &fw_on_ctlr_build, 593 &init_connect_result)) { 594 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); 595 goto out; 596 } 597 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { 598 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) { 599 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); 600 } else { 601 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); 602 } 603 goto out; 604 } 605 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL; 606 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH; 607 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD; 608 } 609 610 /* Load rest of compatibility struct */ 611 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); 612 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; 613 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; 614 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; 615 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; 616 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; 617 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; 618 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; 619 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; 620 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; 621 622 retval = 0; 623 out: 624 return retval; 625 } /* End twa_check_srl() */ 626 627 /* This function handles ioctl for the character device */ 628 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 629 { 630 struct inode *inode = file_inode(file); 631 long timeout; 632 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; 633 dma_addr_t dma_handle; 634 int request_id = 0; 635 unsigned int sequence_id = 0; 636 unsigned char event_index, start_index; 637 TW_Ioctl_Driver_Command driver_command; 638 TW_Ioctl_Buf_Apache *tw_ioctl; 639 TW_Lock *tw_lock; 640 TW_Command_Full *full_command_packet; 641 TW_Compatibility_Info *tw_compat_info; 642 TW_Event *event; 643 struct timeval current_time; 644 u32 current_time_ms; 645 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; 646 int retval = TW_IOCTL_ERROR_OS_EFAULT; 647 void __user *argp = (void __user *)arg; 648 649 mutex_lock(&twa_chrdev_mutex); 650 651 /* Only let one of these through at a time */ 652 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { 653 retval = TW_IOCTL_ERROR_OS_EINTR; 654 goto out; 655 } 656 657 /* First copy down the driver command */ 658 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) 659 goto out2; 660 661 /* Check data buffer size */ 662 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { 663 retval = TW_IOCTL_ERROR_OS_EINVAL; 664 goto out2; 665 } 666 667 /* Hardware can only do multiple of 512 byte transfers */ 668 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; 669 670 /* Now allocate ioctl buf memory */ 671 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL); 672 if (!cpu_addr) { 673 retval = TW_IOCTL_ERROR_OS_ENOMEM; 674 goto out2; 675 } 676 677 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; 678 679 /* Now copy down the entire ioctl */ 680 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) 681 goto out3; 682 683 /* See which ioctl we are doing */ 684 switch (cmd) { 685 case TW_IOCTL_FIRMWARE_PASS_THROUGH: 686 spin_lock_irqsave(tw_dev->host->host_lock, flags); 687 twa_get_request_id(tw_dev, &request_id); 688 689 /* Flag internal command */ 690 tw_dev->srb[request_id] = NULL; 691 692 /* Flag chrdev ioctl */ 693 tw_dev->chrdev_request_id = request_id; 694 695 full_command_packet = &tw_ioctl->firmware_command; 696 697 /* Load request id and sglist for both command types */ 698 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); 699 700 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); 701 702 /* Now post the command packet to the controller */ 703 twa_post_command_packet(tw_dev, request_id, 1); 704 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 705 706 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; 707 708 /* Now wait for command to complete */ 709 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); 710 711 /* We timed out, and didn't get an interrupt */ 712 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { 713 /* Now we need to reset the board */ 714 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", 715 tw_dev->host->host_no, TW_DRIVER, 0x37, 716 cmd); 717 retval = TW_IOCTL_ERROR_OS_EIO; 718 twa_reset_device_extension(tw_dev); 719 goto out3; 720 } 721 722 /* Now copy in the command packet response */ 723 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); 724 725 /* Now complete the io */ 726 spin_lock_irqsave(tw_dev->host->host_lock, flags); 727 tw_dev->posted_request_count--; 728 tw_dev->state[request_id] = TW_S_COMPLETED; 729 twa_free_request_id(tw_dev, request_id); 730 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 731 break; 732 case TW_IOCTL_GET_COMPATIBILITY_INFO: 733 tw_ioctl->driver_command.status = 0; 734 /* Copy compatibility struct into ioctl data buffer */ 735 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; 736 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); 737 break; 738 case TW_IOCTL_GET_LAST_EVENT: 739 if (tw_dev->event_queue_wrapped) { 740 if (tw_dev->aen_clobber) { 741 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 742 tw_dev->aen_clobber = 0; 743 } else 744 tw_ioctl->driver_command.status = 0; 745 } else { 746 if (!tw_dev->error_index) { 747 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 748 break; 749 } 750 tw_ioctl->driver_command.status = 0; 751 } 752 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; 753 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 754 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 755 break; 756 case TW_IOCTL_GET_FIRST_EVENT: 757 if (tw_dev->event_queue_wrapped) { 758 if (tw_dev->aen_clobber) { 759 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 760 tw_dev->aen_clobber = 0; 761 } else 762 tw_ioctl->driver_command.status = 0; 763 event_index = tw_dev->error_index; 764 } else { 765 if (!tw_dev->error_index) { 766 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 767 break; 768 } 769 tw_ioctl->driver_command.status = 0; 770 event_index = 0; 771 } 772 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 773 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 774 break; 775 case TW_IOCTL_GET_NEXT_EVENT: 776 event = (TW_Event *)tw_ioctl->data_buffer; 777 sequence_id = event->sequence_id; 778 tw_ioctl->driver_command.status = 0; 779 780 if (tw_dev->event_queue_wrapped) { 781 if (tw_dev->aen_clobber) { 782 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 783 tw_dev->aen_clobber = 0; 784 } 785 start_index = tw_dev->error_index; 786 } else { 787 if (!tw_dev->error_index) { 788 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 789 break; 790 } 791 start_index = 0; 792 } 793 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; 794 795 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { 796 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) 797 tw_dev->aen_clobber = 1; 798 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 799 break; 800 } 801 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 802 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 803 break; 804 case TW_IOCTL_GET_PREVIOUS_EVENT: 805 event = (TW_Event *)tw_ioctl->data_buffer; 806 sequence_id = event->sequence_id; 807 tw_ioctl->driver_command.status = 0; 808 809 if (tw_dev->event_queue_wrapped) { 810 if (tw_dev->aen_clobber) { 811 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; 812 tw_dev->aen_clobber = 0; 813 } 814 start_index = tw_dev->error_index; 815 } else { 816 if (!tw_dev->error_index) { 817 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 818 break; 819 } 820 start_index = 0; 821 } 822 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; 823 824 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { 825 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) 826 tw_dev->aen_clobber = 1; 827 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; 828 break; 829 } 830 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); 831 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; 832 break; 833 case TW_IOCTL_GET_LOCK: 834 tw_lock = (TW_Lock *)tw_ioctl->data_buffer; 835 do_gettimeofday(¤t_time); 836 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000); 837 838 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) { 839 tw_dev->ioctl_sem_lock = 1; 840 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec; 841 tw_ioctl->driver_command.status = 0; 842 tw_lock->time_remaining_msec = tw_lock->timeout_msec; 843 } else { 844 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; 845 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms; 846 } 847 break; 848 case TW_IOCTL_RELEASE_LOCK: 849 if (tw_dev->ioctl_sem_lock == 1) { 850 tw_dev->ioctl_sem_lock = 0; 851 tw_ioctl->driver_command.status = 0; 852 } else { 853 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; 854 } 855 break; 856 default: 857 retval = TW_IOCTL_ERROR_OS_ENOTTY; 858 goto out3; 859 } 860 861 /* Now copy the entire response to userspace */ 862 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) 863 retval = 0; 864 out3: 865 /* Now free ioctl buf memory */ 866 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); 867 out2: 868 mutex_unlock(&tw_dev->ioctl_lock); 869 out: 870 mutex_unlock(&twa_chrdev_mutex); 871 return retval; 872 } /* End twa_chrdev_ioctl() */ 873 874 /* This function handles open for the character device */ 875 /* NOTE that this function will race with remove. */ 876 static int twa_chrdev_open(struct inode *inode, struct file *file) 877 { 878 unsigned int minor_number; 879 int retval = TW_IOCTL_ERROR_OS_ENODEV; 880 881 minor_number = iminor(inode); 882 if (minor_number >= twa_device_extension_count) 883 goto out; 884 retval = 0; 885 out: 886 return retval; 887 } /* End twa_chrdev_open() */ 888 889 /* This function will print readable messages from status register errors */ 890 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) 891 { 892 int retval = 1; 893 894 /* Check for various error conditions and handle them appropriately */ 895 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { 896 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); 897 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 898 } 899 900 if (status_reg_value & TW_STATUS_PCI_ABORT) { 901 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); 902 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); 903 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); 904 } 905 906 if (status_reg_value & TW_STATUS_QUEUE_ERROR) { 907 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) && 908 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) || 909 (!test_bit(TW_IN_RESET, &tw_dev->flags))) 910 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); 911 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 912 } 913 914 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { 915 if (tw_dev->reset_print == 0) { 916 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); 917 tw_dev->reset_print = 1; 918 } 919 goto out; 920 } 921 retval = 0; 922 out: 923 return retval; 924 } /* End twa_decode_bits() */ 925 926 /* This function will empty the response queue */ 927 static int twa_empty_response_queue(TW_Device_Extension *tw_dev) 928 { 929 u32 status_reg_value, response_que_value; 930 int count = 0, retval = 1; 931 932 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 933 934 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { 935 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 936 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 937 count++; 938 } 939 if (count == TW_MAX_RESPONSE_DRAIN) 940 goto out; 941 942 retval = 0; 943 out: 944 return retval; 945 } /* End twa_empty_response_queue() */ 946 947 /* This function will clear the pchip/response queue on 9550SX */ 948 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev) 949 { 950 u32 response_que_value = 0; 951 unsigned long before; 952 int retval = 1; 953 954 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) { 955 before = jiffies; 956 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) { 957 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); 958 msleep(1); 959 if (time_after(jiffies, before + HZ * 30)) 960 goto out; 961 } 962 /* P-chip settle time */ 963 msleep(500); 964 retval = 0; 965 } else 966 retval = 0; 967 out: 968 return retval; 969 } /* End twa_empty_response_queue_large() */ 970 971 /* This function passes sense keys from firmware to scsi layer */ 972 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) 973 { 974 TW_Command_Full *full_command_packet; 975 unsigned short error; 976 int retval = 1; 977 char *error_str; 978 979 full_command_packet = tw_dev->command_packet_virt[request_id]; 980 981 /* Check for embedded error string */ 982 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]); 983 984 /* Don't print error for Logical unit not supported during rollcall */ 985 error = le16_to_cpu(full_command_packet->header.status_block.error); 986 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { 987 if (print_host) 988 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", 989 tw_dev->host->host_no, 990 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, 991 full_command_packet->header.status_block.error, 992 error_str[0] == '\0' ? 993 twa_string_lookup(twa_error_table, 994 full_command_packet->header.status_block.error) : error_str, 995 full_command_packet->header.err_specific_desc); 996 else 997 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", 998 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, 999 full_command_packet->header.status_block.error, 1000 error_str[0] == '\0' ? 1001 twa_string_lookup(twa_error_table, 1002 full_command_packet->header.status_block.error) : error_str, 1003 full_command_packet->header.err_specific_desc); 1004 } 1005 1006 if (copy_sense) { 1007 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); 1008 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); 1009 retval = TW_ISR_DONT_RESULT; 1010 goto out; 1011 } 1012 retval = 0; 1013 out: 1014 return retval; 1015 } /* End twa_fill_sense() */ 1016 1017 /* This function will free up device extension resources */ 1018 static void twa_free_device_extension(TW_Device_Extension *tw_dev) 1019 { 1020 if (tw_dev->command_packet_virt[0]) 1021 pci_free_consistent(tw_dev->tw_pci_dev, 1022 sizeof(TW_Command_Full)*TW_Q_LENGTH, 1023 tw_dev->command_packet_virt[0], 1024 tw_dev->command_packet_phys[0]); 1025 1026 if (tw_dev->generic_buffer_virt[0]) 1027 pci_free_consistent(tw_dev->tw_pci_dev, 1028 TW_SECTOR_SIZE*TW_Q_LENGTH, 1029 tw_dev->generic_buffer_virt[0], 1030 tw_dev->generic_buffer_phys[0]); 1031 1032 kfree(tw_dev->event_queue[0]); 1033 } /* End twa_free_device_extension() */ 1034 1035 /* This function will free a request id */ 1036 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) 1037 { 1038 tw_dev->free_queue[tw_dev->free_tail] = request_id; 1039 tw_dev->state[request_id] = TW_S_FINISHED; 1040 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; 1041 } /* End twa_free_request_id() */ 1042 1043 /* This function will get parameter table entries from the firmware */ 1044 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) 1045 { 1046 TW_Command_Full *full_command_packet; 1047 TW_Command *command_packet; 1048 TW_Param_Apache *param; 1049 void *retval = NULL; 1050 1051 /* Setup the command packet */ 1052 full_command_packet = tw_dev->command_packet_virt[request_id]; 1053 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 1054 command_packet = &full_command_packet->command.oldcommand; 1055 1056 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); 1057 command_packet->size = TW_COMMAND_SIZE; 1058 command_packet->request_id = request_id; 1059 command_packet->byte6_offset.block_count = cpu_to_le16(1); 1060 1061 /* Now setup the param */ 1062 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; 1063 memset(param, 0, TW_SECTOR_SIZE); 1064 param->table_id = cpu_to_le16(table_id | 0x8000); 1065 param->parameter_id = cpu_to_le16(parameter_id); 1066 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); 1067 1068 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1069 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 1070 1071 /* Post the command packet to the board */ 1072 twa_post_command_packet(tw_dev, request_id, 1); 1073 1074 /* Poll for completion */ 1075 if (twa_poll_response(tw_dev, request_id, 30)) 1076 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") 1077 else 1078 retval = (void *)&(param->data[0]); 1079 1080 tw_dev->posted_request_count--; 1081 tw_dev->state[request_id] = TW_S_INITIAL; 1082 1083 return retval; 1084 } /* End twa_get_param() */ 1085 1086 /* This function will assign an available request id */ 1087 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) 1088 { 1089 *request_id = tw_dev->free_queue[tw_dev->free_head]; 1090 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; 1091 tw_dev->state[*request_id] = TW_S_STARTED; 1092 } /* End twa_get_request_id() */ 1093 1094 /* This function will send an initconnection command to controller */ 1095 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, 1096 u32 set_features, unsigned short current_fw_srl, 1097 unsigned short current_fw_arch_id, 1098 unsigned short current_fw_branch, 1099 unsigned short current_fw_build, 1100 unsigned short *fw_on_ctlr_srl, 1101 unsigned short *fw_on_ctlr_arch_id, 1102 unsigned short *fw_on_ctlr_branch, 1103 unsigned short *fw_on_ctlr_build, 1104 u32 *init_connect_result) 1105 { 1106 TW_Command_Full *full_command_packet; 1107 TW_Initconnect *tw_initconnect; 1108 int request_id = 0, retval = 1; 1109 1110 /* Initialize InitConnection command packet */ 1111 full_command_packet = tw_dev->command_packet_virt[request_id]; 1112 memset(full_command_packet, 0, sizeof(TW_Command_Full)); 1113 full_command_packet->header.header_desc.size_header = 128; 1114 1115 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; 1116 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); 1117 tw_initconnect->request_id = request_id; 1118 tw_initconnect->message_credits = cpu_to_le16(message_credits); 1119 tw_initconnect->features = set_features; 1120 1121 /* Turn on 64-bit sgl support if we need to */ 1122 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0; 1123 1124 tw_initconnect->features = cpu_to_le32(tw_initconnect->features); 1125 1126 if (set_features & TW_EXTENDED_INIT_CONNECT) { 1127 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; 1128 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); 1129 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); 1130 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); 1131 tw_initconnect->fw_build = cpu_to_le16(current_fw_build); 1132 } else 1133 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; 1134 1135 /* Send command packet to the board */ 1136 twa_post_command_packet(tw_dev, request_id, 1); 1137 1138 /* Poll for completion */ 1139 if (twa_poll_response(tw_dev, request_id, 30)) { 1140 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); 1141 } else { 1142 if (set_features & TW_EXTENDED_INIT_CONNECT) { 1143 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); 1144 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); 1145 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); 1146 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); 1147 *init_connect_result = le32_to_cpu(tw_initconnect->result); 1148 } 1149 retval = 0; 1150 } 1151 1152 tw_dev->posted_request_count--; 1153 tw_dev->state[request_id] = TW_S_INITIAL; 1154 1155 return retval; 1156 } /* End twa_initconnection() */ 1157 1158 /* This function will initialize the fields of a device extension */ 1159 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) 1160 { 1161 int i, retval = 1; 1162 1163 /* Initialize command packet buffers */ 1164 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { 1165 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); 1166 goto out; 1167 } 1168 1169 /* Initialize generic buffer */ 1170 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { 1171 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); 1172 goto out; 1173 } 1174 1175 /* Allocate event info space */ 1176 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); 1177 if (!tw_dev->event_queue[0]) { 1178 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); 1179 goto out; 1180 } 1181 1182 1183 for (i = 0; i < TW_Q_LENGTH; i++) { 1184 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); 1185 tw_dev->free_queue[i] = i; 1186 tw_dev->state[i] = TW_S_INITIAL; 1187 } 1188 1189 tw_dev->pending_head = TW_Q_START; 1190 tw_dev->pending_tail = TW_Q_START; 1191 tw_dev->free_head = TW_Q_START; 1192 tw_dev->free_tail = TW_Q_START; 1193 tw_dev->error_sequence_id = 1; 1194 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1195 1196 mutex_init(&tw_dev->ioctl_lock); 1197 init_waitqueue_head(&tw_dev->ioctl_wqueue); 1198 1199 retval = 0; 1200 out: 1201 return retval; 1202 } /* End twa_initialize_device_extension() */ 1203 1204 /* This function is the interrupt service routine */ 1205 static irqreturn_t twa_interrupt(int irq, void *dev_instance) 1206 { 1207 int request_id, error = 0; 1208 u32 status_reg_value; 1209 TW_Response_Queue response_que; 1210 TW_Command_Full *full_command_packet; 1211 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; 1212 int handled = 0; 1213 1214 /* Get the per adapter lock */ 1215 spin_lock(tw_dev->host->host_lock); 1216 1217 /* Read the registers */ 1218 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1219 1220 /* Check if this is our interrupt, otherwise bail */ 1221 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) 1222 goto twa_interrupt_bail; 1223 1224 handled = 1; 1225 1226 /* If we are resetting, bail */ 1227 if (test_bit(TW_IN_RESET, &tw_dev->flags)) 1228 goto twa_interrupt_bail; 1229 1230 /* Check controller for errors */ 1231 if (twa_check_bits(status_reg_value)) { 1232 if (twa_decode_bits(tw_dev, status_reg_value)) { 1233 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1234 goto twa_interrupt_bail; 1235 } 1236 } 1237 1238 /* Handle host interrupt */ 1239 if (status_reg_value & TW_STATUS_HOST_INTERRUPT) 1240 TW_CLEAR_HOST_INTERRUPT(tw_dev); 1241 1242 /* Handle attention interrupt */ 1243 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { 1244 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); 1245 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { 1246 twa_get_request_id(tw_dev, &request_id); 1247 1248 error = twa_aen_read_queue(tw_dev, request_id); 1249 if (error) { 1250 tw_dev->state[request_id] = TW_S_COMPLETED; 1251 twa_free_request_id(tw_dev, request_id); 1252 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); 1253 } 1254 } 1255 } 1256 1257 /* Handle command interrupt */ 1258 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { 1259 TW_MASK_COMMAND_INTERRUPT(tw_dev); 1260 /* Drain as many pending commands as we can */ 1261 while (tw_dev->pending_request_count > 0) { 1262 request_id = tw_dev->pending_queue[tw_dev->pending_head]; 1263 if (tw_dev->state[request_id] != TW_S_PENDING) { 1264 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); 1265 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1266 goto twa_interrupt_bail; 1267 } 1268 if (twa_post_command_packet(tw_dev, request_id, 1)==0) { 1269 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; 1270 tw_dev->pending_request_count--; 1271 } else { 1272 /* If we get here, we will continue re-posting on the next command interrupt */ 1273 break; 1274 } 1275 } 1276 } 1277 1278 /* Handle response interrupt */ 1279 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { 1280 1281 /* Drain the response queue from the board */ 1282 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { 1283 /* Complete the response */ 1284 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 1285 request_id = TW_RESID_OUT(response_que.response_id); 1286 full_command_packet = tw_dev->command_packet_virt[request_id]; 1287 error = 0; 1288 /* Check for command packet errors */ 1289 if (full_command_packet->command.newcommand.status != 0) { 1290 if (tw_dev->srb[request_id] != NULL) { 1291 error = twa_fill_sense(tw_dev, request_id, 1, 1); 1292 } else { 1293 /* Skip ioctl error prints */ 1294 if (request_id != tw_dev->chrdev_request_id) { 1295 error = twa_fill_sense(tw_dev, request_id, 0, 1); 1296 } 1297 } 1298 } 1299 1300 /* Check for correct state */ 1301 if (tw_dev->state[request_id] != TW_S_POSTED) { 1302 if (tw_dev->srb[request_id] != NULL) { 1303 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); 1304 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1305 goto twa_interrupt_bail; 1306 } 1307 } 1308 1309 /* Check for internal command completion */ 1310 if (tw_dev->srb[request_id] == NULL) { 1311 if (request_id != tw_dev->chrdev_request_id) { 1312 if (twa_aen_complete(tw_dev, request_id)) 1313 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); 1314 } else { 1315 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1316 wake_up(&tw_dev->ioctl_wqueue); 1317 } 1318 } else { 1319 struct scsi_cmnd *cmd; 1320 1321 cmd = tw_dev->srb[request_id]; 1322 1323 twa_scsiop_execute_scsi_complete(tw_dev, request_id); 1324 /* If no error command was a success */ 1325 if (error == 0) { 1326 cmd->result = (DID_OK << 16); 1327 } 1328 1329 /* If error, command failed */ 1330 if (error == 1) { 1331 /* Ask for a host reset */ 1332 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 1333 } 1334 1335 /* Report residual bytes for single sgl */ 1336 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { 1337 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) 1338 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); 1339 } 1340 1341 /* Now complete the io */ 1342 tw_dev->state[request_id] = TW_S_COMPLETED; 1343 twa_free_request_id(tw_dev, request_id); 1344 tw_dev->posted_request_count--; 1345 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 1346 twa_unmap_scsi_data(tw_dev, request_id); 1347 } 1348 1349 /* Check for valid status after each drain */ 1350 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1351 if (twa_check_bits(status_reg_value)) { 1352 if (twa_decode_bits(tw_dev, status_reg_value)) { 1353 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1354 goto twa_interrupt_bail; 1355 } 1356 } 1357 } 1358 } 1359 1360 twa_interrupt_bail: 1361 spin_unlock(tw_dev->host->host_lock); 1362 return IRQ_RETVAL(handled); 1363 } /* End twa_interrupt() */ 1364 1365 /* This function will load the request id and various sgls for ioctls */ 1366 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) 1367 { 1368 TW_Command *oldcommand; 1369 TW_Command_Apache *newcommand; 1370 TW_SG_Entry *sgl; 1371 unsigned int pae = 0; 1372 1373 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) 1374 pae = 1; 1375 1376 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { 1377 newcommand = &full_command_packet->command.newcommand; 1378 newcommand->request_id__lunl = 1379 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); 1380 if (length) { 1381 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1382 newcommand->sg_list[0].length = cpu_to_le32(length); 1383 } 1384 newcommand->sgl_entries__lunh = 1385 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0)); 1386 } else { 1387 oldcommand = &full_command_packet->command.oldcommand; 1388 oldcommand->request_id = request_id; 1389 1390 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { 1391 /* Load the sg list */ 1392 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA) 1393 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae); 1394 else 1395 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); 1396 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1397 sgl->length = cpu_to_le32(length); 1398 1399 oldcommand->size += pae; 1400 } 1401 } 1402 } /* End twa_load_sgl() */ 1403 1404 /* This function will perform a pci-dma mapping for a scatter gather list */ 1405 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) 1406 { 1407 int use_sg; 1408 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1409 1410 use_sg = scsi_dma_map(cmd); 1411 if (!use_sg) 1412 return 0; 1413 else if (use_sg < 0) { 1414 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); 1415 return 0; 1416 } 1417 1418 cmd->SCp.phase = TW_PHASE_SGLIST; 1419 cmd->SCp.have_data_in = use_sg; 1420 1421 return use_sg; 1422 } /* End twa_map_scsi_sg_data() */ 1423 1424 /* This function will poll for a response interrupt of a request */ 1425 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) 1426 { 1427 int retval = 1, found = 0, response_request_id; 1428 TW_Response_Queue response_queue; 1429 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; 1430 1431 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { 1432 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); 1433 response_request_id = TW_RESID_OUT(response_queue.response_id); 1434 if (request_id != response_request_id) { 1435 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); 1436 goto out; 1437 } 1438 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { 1439 if (full_command_packet->command.newcommand.status != 0) { 1440 /* bad response */ 1441 twa_fill_sense(tw_dev, request_id, 0, 0); 1442 goto out; 1443 } 1444 found = 1; 1445 } else { 1446 if (full_command_packet->command.oldcommand.status != 0) { 1447 /* bad response */ 1448 twa_fill_sense(tw_dev, request_id, 0, 0); 1449 goto out; 1450 } 1451 found = 1; 1452 } 1453 } 1454 1455 if (found) 1456 retval = 0; 1457 out: 1458 return retval; 1459 } /* End twa_poll_response() */ 1460 1461 /* This function will poll the status register for a flag */ 1462 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) 1463 { 1464 u32 status_reg_value; 1465 unsigned long before; 1466 int retval = 1; 1467 1468 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1469 before = jiffies; 1470 1471 if (twa_check_bits(status_reg_value)) 1472 twa_decode_bits(tw_dev, status_reg_value); 1473 1474 while ((status_reg_value & flag) != flag) { 1475 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1476 1477 if (twa_check_bits(status_reg_value)) 1478 twa_decode_bits(tw_dev, status_reg_value); 1479 1480 if (time_after(jiffies, before + HZ * seconds)) 1481 goto out; 1482 1483 msleep(50); 1484 } 1485 retval = 0; 1486 out: 1487 return retval; 1488 } /* End twa_poll_status() */ 1489 1490 /* This function will poll the status register for disappearance of a flag */ 1491 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) 1492 { 1493 u32 status_reg_value; 1494 unsigned long before; 1495 int retval = 1; 1496 1497 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1498 before = jiffies; 1499 1500 if (twa_check_bits(status_reg_value)) 1501 twa_decode_bits(tw_dev, status_reg_value); 1502 1503 while ((status_reg_value & flag) != 0) { 1504 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1505 if (twa_check_bits(status_reg_value)) 1506 twa_decode_bits(tw_dev, status_reg_value); 1507 1508 if (time_after(jiffies, before + HZ * seconds)) 1509 goto out; 1510 1511 msleep(50); 1512 } 1513 retval = 0; 1514 out: 1515 return retval; 1516 } /* End twa_poll_status_gone() */ 1517 1518 /* This function will attempt to post a command packet to the board */ 1519 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) 1520 { 1521 u32 status_reg_value; 1522 dma_addr_t command_que_value; 1523 int retval = 1; 1524 1525 command_que_value = tw_dev->command_packet_phys[request_id]; 1526 1527 /* For 9650SE write low 4 bytes first */ 1528 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || 1529 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { 1530 command_que_value += TW_COMMAND_OFFSET; 1531 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev)); 1532 } 1533 1534 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); 1535 1536 if (twa_check_bits(status_reg_value)) 1537 twa_decode_bits(tw_dev, status_reg_value); 1538 1539 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { 1540 1541 /* Only pend internal driver commands */ 1542 if (!internal) { 1543 retval = SCSI_MLQUEUE_HOST_BUSY; 1544 goto out; 1545 } 1546 1547 /* Couldn't post the command packet, so we do it later */ 1548 if (tw_dev->state[request_id] != TW_S_PENDING) { 1549 tw_dev->state[request_id] = TW_S_PENDING; 1550 tw_dev->pending_request_count++; 1551 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { 1552 tw_dev->max_pending_request_count = tw_dev->pending_request_count; 1553 } 1554 tw_dev->pending_queue[tw_dev->pending_tail] = request_id; 1555 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; 1556 } 1557 TW_UNMASK_COMMAND_INTERRUPT(tw_dev); 1558 goto out; 1559 } else { 1560 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || 1561 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { 1562 /* Now write upper 4 bytes */ 1563 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4); 1564 } else { 1565 if (sizeof(dma_addr_t) > 4) { 1566 command_que_value += TW_COMMAND_OFFSET; 1567 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); 1568 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4); 1569 } else { 1570 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); 1571 } 1572 } 1573 tw_dev->state[request_id] = TW_S_POSTED; 1574 tw_dev->posted_request_count++; 1575 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { 1576 tw_dev->max_posted_request_count = tw_dev->posted_request_count; 1577 } 1578 } 1579 retval = 0; 1580 out: 1581 return retval; 1582 } /* End twa_post_command_packet() */ 1583 1584 /* This function will reset a device extension */ 1585 static int twa_reset_device_extension(TW_Device_Extension *tw_dev) 1586 { 1587 int i = 0; 1588 int retval = 1; 1589 unsigned long flags = 0; 1590 1591 set_bit(TW_IN_RESET, &tw_dev->flags); 1592 TW_DISABLE_INTERRUPTS(tw_dev); 1593 TW_MASK_COMMAND_INTERRUPT(tw_dev); 1594 spin_lock_irqsave(tw_dev->host->host_lock, flags); 1595 1596 /* Abort all requests that are in progress */ 1597 for (i = 0; i < TW_Q_LENGTH; i++) { 1598 if ((tw_dev->state[i] != TW_S_FINISHED) && 1599 (tw_dev->state[i] != TW_S_INITIAL) && 1600 (tw_dev->state[i] != TW_S_COMPLETED)) { 1601 if (tw_dev->srb[i]) { 1602 tw_dev->srb[i]->result = (DID_RESET << 16); 1603 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1604 twa_unmap_scsi_data(tw_dev, i); 1605 } 1606 } 1607 } 1608 1609 /* Reset queues and counts */ 1610 for (i = 0; i < TW_Q_LENGTH; i++) { 1611 tw_dev->free_queue[i] = i; 1612 tw_dev->state[i] = TW_S_INITIAL; 1613 } 1614 tw_dev->free_head = TW_Q_START; 1615 tw_dev->free_tail = TW_Q_START; 1616 tw_dev->posted_request_count = 0; 1617 tw_dev->pending_request_count = 0; 1618 tw_dev->pending_head = TW_Q_START; 1619 tw_dev->pending_tail = TW_Q_START; 1620 tw_dev->reset_print = 0; 1621 1622 spin_unlock_irqrestore(tw_dev->host->host_lock, flags); 1623 1624 if (twa_reset_sequence(tw_dev, 1)) 1625 goto out; 1626 1627 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 1628 clear_bit(TW_IN_RESET, &tw_dev->flags); 1629 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; 1630 1631 retval = 0; 1632 out: 1633 return retval; 1634 } /* End twa_reset_device_extension() */ 1635 1636 /* This function will reset a controller */ 1637 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) 1638 { 1639 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; 1640 1641 while (tries < TW_MAX_RESET_TRIES) { 1642 if (do_soft_reset) { 1643 TW_SOFT_RESET(tw_dev); 1644 /* Clear pchip/response queue on 9550SX */ 1645 if (twa_empty_response_queue_large(tw_dev)) { 1646 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence"); 1647 do_soft_reset = 1; 1648 tries++; 1649 continue; 1650 } 1651 } 1652 1653 /* Make sure controller is in a good state */ 1654 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { 1655 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); 1656 do_soft_reset = 1; 1657 tries++; 1658 continue; 1659 } 1660 1661 /* Empty response queue */ 1662 if (twa_empty_response_queue(tw_dev)) { 1663 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); 1664 do_soft_reset = 1; 1665 tries++; 1666 continue; 1667 } 1668 1669 flashed = 0; 1670 1671 /* Check for compatibility/flash */ 1672 if (twa_check_srl(tw_dev, &flashed)) { 1673 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); 1674 do_soft_reset = 1; 1675 tries++; 1676 continue; 1677 } else { 1678 if (flashed) { 1679 tries++; 1680 continue; 1681 } 1682 } 1683 1684 /* Drain the AEN queue */ 1685 if (twa_aen_drain_queue(tw_dev, soft_reset)) { 1686 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); 1687 do_soft_reset = 1; 1688 tries++; 1689 continue; 1690 } 1691 1692 /* If we got here, controller is in a good state */ 1693 retval = 0; 1694 goto out; 1695 } 1696 out: 1697 return retval; 1698 } /* End twa_reset_sequence() */ 1699 1700 /* This funciton returns unit geometry in cylinders/heads/sectors */ 1701 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) 1702 { 1703 int heads, sectors, cylinders; 1704 TW_Device_Extension *tw_dev; 1705 1706 tw_dev = (TW_Device_Extension *)sdev->host->hostdata; 1707 1708 if (capacity >= 0x200000) { 1709 heads = 255; 1710 sectors = 63; 1711 cylinders = sector_div(capacity, heads * sectors); 1712 } else { 1713 heads = 64; 1714 sectors = 32; 1715 cylinders = sector_div(capacity, heads * sectors); 1716 } 1717 1718 geom[0] = heads; 1719 geom[1] = sectors; 1720 geom[2] = cylinders; 1721 1722 return 0; 1723 } /* End twa_scsi_biosparam() */ 1724 1725 /* This is the new scsi eh reset function */ 1726 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) 1727 { 1728 TW_Device_Extension *tw_dev = NULL; 1729 int retval = FAILED; 1730 1731 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1732 1733 tw_dev->num_resets++; 1734 1735 sdev_printk(KERN_WARNING, SCpnt->device, 1736 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", 1737 TW_DRIVER, 0x2c, SCpnt->cmnd[0]); 1738 1739 /* Make sure we are not issuing an ioctl or resetting from ioctl */ 1740 mutex_lock(&tw_dev->ioctl_lock); 1741 1742 /* Now reset the card and some of the device extension data */ 1743 if (twa_reset_device_extension(tw_dev)) { 1744 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); 1745 goto out; 1746 } 1747 1748 retval = SUCCESS; 1749 out: 1750 mutex_unlock(&tw_dev->ioctl_lock); 1751 return retval; 1752 } /* End twa_scsi_eh_reset() */ 1753 1754 /* This is the main scsi queue function to handle scsi opcodes */ 1755 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1756 { 1757 int request_id, retval; 1758 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1759 1760 /* If we are resetting due to timed out ioctl, report as busy */ 1761 if (test_bit(TW_IN_RESET, &tw_dev->flags)) { 1762 retval = SCSI_MLQUEUE_HOST_BUSY; 1763 goto out; 1764 } 1765 1766 /* Check if this FW supports luns */ 1767 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) { 1768 SCpnt->result = (DID_BAD_TARGET << 16); 1769 done(SCpnt); 1770 retval = 0; 1771 goto out; 1772 } 1773 1774 /* Save done function into scsi_cmnd struct */ 1775 SCpnt->scsi_done = done; 1776 1777 /* Get a free request id */ 1778 twa_get_request_id(tw_dev, &request_id); 1779 1780 /* Save the scsi command for use by the ISR */ 1781 tw_dev->srb[request_id] = SCpnt; 1782 1783 /* Initialize phase to zero */ 1784 SCpnt->SCp.phase = TW_PHASE_INITIAL; 1785 1786 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1787 switch (retval) { 1788 case SCSI_MLQUEUE_HOST_BUSY: 1789 twa_free_request_id(tw_dev, request_id); 1790 twa_unmap_scsi_data(tw_dev, request_id); 1791 break; 1792 case 1: 1793 tw_dev->state[request_id] = TW_S_COMPLETED; 1794 twa_free_request_id(tw_dev, request_id); 1795 twa_unmap_scsi_data(tw_dev, request_id); 1796 SCpnt->result = (DID_ERROR << 16); 1797 done(SCpnt); 1798 retval = 0; 1799 } 1800 out: 1801 return retval; 1802 } /* End twa_scsi_queue() */ 1803 1804 static DEF_SCSI_QCMD(twa_scsi_queue) 1805 1806 /* This function hands scsi cdb's to the firmware */ 1807 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) 1808 { 1809 TW_Command_Full *full_command_packet; 1810 TW_Command_Apache *command_packet; 1811 u32 num_sectors = 0x0; 1812 int i, sg_count; 1813 struct scsi_cmnd *srb = NULL; 1814 struct scatterlist *sglist = NULL, *sg; 1815 int retval = 1; 1816 1817 if (tw_dev->srb[request_id]) { 1818 srb = tw_dev->srb[request_id]; 1819 if (scsi_sglist(srb)) 1820 sglist = scsi_sglist(srb); 1821 } 1822 1823 /* Initialize command packet */ 1824 full_command_packet = tw_dev->command_packet_virt[request_id]; 1825 full_command_packet->header.header_desc.size_header = 128; 1826 full_command_packet->header.status_block.error = 0; 1827 full_command_packet->header.status_block.severity__reserved = 0; 1828 1829 command_packet = &full_command_packet->command.newcommand; 1830 command_packet->status = 0; 1831 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); 1832 1833 /* We forced 16 byte cdb use earlier */ 1834 if (!cdb) 1835 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); 1836 else 1837 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); 1838 1839 if (srb) { 1840 command_packet->unit = srb->device->id; 1841 command_packet->request_id__lunl = 1842 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id)); 1843 } else { 1844 command_packet->request_id__lunl = 1845 cpu_to_le16(TW_REQ_LUN_IN(0, request_id)); 1846 command_packet->unit = 0; 1847 } 1848 1849 command_packet->sgl_offset = 16; 1850 1851 if (!sglistarg) { 1852 /* Map sglist from scsi layer to cmd packet */ 1853 1854 if (scsi_sg_count(srb)) { 1855 if ((scsi_sg_count(srb) == 1) && 1856 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { 1857 if (srb->sc_data_direction == DMA_TO_DEVICE || 1858 srb->sc_data_direction == DMA_BIDIRECTIONAL) 1859 scsi_sg_copy_to_buffer(srb, 1860 tw_dev->generic_buffer_virt[request_id], 1861 TW_SECTOR_SIZE); 1862 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1863 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); 1864 } else { 1865 sg_count = twa_map_scsi_sg_data(tw_dev, request_id); 1866 if (sg_count == 0) 1867 goto out; 1868 1869 scsi_for_each_sg(srb, sg, sg_count, i) { 1870 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); 1871 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); 1872 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1873 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); 1874 goto out; 1875 } 1876 } 1877 } 1878 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]))); 1879 } 1880 } else { 1881 /* Internal cdb post */ 1882 for (i = 0; i < use_sg; i++) { 1883 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); 1884 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length); 1885 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1886 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); 1887 goto out; 1888 } 1889 } 1890 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg)); 1891 } 1892 1893 if (srb) { 1894 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) 1895 num_sectors = (u32)srb->cmnd[4]; 1896 1897 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) 1898 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); 1899 } 1900 1901 /* Update sector statistic */ 1902 tw_dev->sector_count = num_sectors; 1903 if (tw_dev->sector_count > tw_dev->max_sector_count) 1904 tw_dev->max_sector_count = tw_dev->sector_count; 1905 1906 /* Update SG statistics */ 1907 if (srb) { 1908 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]); 1909 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) 1910 tw_dev->max_sgl_entries = tw_dev->sgl_entries; 1911 } 1912 1913 /* Now post the command to the board */ 1914 if (srb) { 1915 retval = twa_post_command_packet(tw_dev, request_id, 0); 1916 } else { 1917 twa_post_command_packet(tw_dev, request_id, 1); 1918 retval = 0; 1919 } 1920 out: 1921 return retval; 1922 } /* End twa_scsiop_execute_scsi() */ 1923 1924 /* This function completes an execute scsi operation */ 1925 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) 1926 { 1927 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1928 1929 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && 1930 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1931 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1932 if (scsi_sg_count(cmd) == 1) { 1933 void *buf = tw_dev->generic_buffer_virt[request_id]; 1934 1935 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); 1936 } 1937 } 1938 } /* End twa_scsiop_execute_scsi_complete() */ 1939 1940 /* This function tells the controller to shut down */ 1941 static void __twa_shutdown(TW_Device_Extension *tw_dev) 1942 { 1943 /* Disable interrupts */ 1944 TW_DISABLE_INTERRUPTS(tw_dev); 1945 1946 /* Free up the IRQ */ 1947 free_irq(tw_dev->tw_pci_dev->irq, tw_dev); 1948 1949 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); 1950 1951 /* Tell the card we are shutting down */ 1952 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { 1953 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); 1954 } else { 1955 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); 1956 } 1957 1958 /* Clear all interrupts just before exit */ 1959 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 1960 } /* End __twa_shutdown() */ 1961 1962 /* Wrapper for __twa_shutdown */ 1963 static void twa_shutdown(struct pci_dev *pdev) 1964 { 1965 struct Scsi_Host *host = pci_get_drvdata(pdev); 1966 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 1967 1968 __twa_shutdown(tw_dev); 1969 } /* End twa_shutdown() */ 1970 1971 /* This function will look up a string */ 1972 static char *twa_string_lookup(twa_message_type *table, unsigned int code) 1973 { 1974 int index; 1975 1976 for (index = 0; ((code != table[index].code) && 1977 (table[index].text != (char *)0)); index++); 1978 return(table[index].text); 1979 } /* End twa_string_lookup() */ 1980 1981 /* This function will perform a pci-dma unmap */ 1982 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) 1983 { 1984 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1985 1986 if (cmd->SCp.phase == TW_PHASE_SGLIST) 1987 scsi_dma_unmap(cmd); 1988 } /* End twa_unmap_scsi_data() */ 1989 1990 /* This function gets called when a disk is coming on-line */ 1991 static int twa_slave_configure(struct scsi_device *sdev) 1992 { 1993 /* Force 60 second timeout */ 1994 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1995 1996 return 0; 1997 } /* End twa_slave_configure() */ 1998 1999 /* scsi_host_template initializer */ 2000 static struct scsi_host_template driver_template = { 2001 .module = THIS_MODULE, 2002 .name = "3ware 9000 Storage Controller", 2003 .queuecommand = twa_scsi_queue, 2004 .eh_host_reset_handler = twa_scsi_eh_reset, 2005 .bios_param = twa_scsi_biosparam, 2006 .change_queue_depth = scsi_change_queue_depth, 2007 .can_queue = TW_Q_LENGTH-2, 2008 .slave_configure = twa_slave_configure, 2009 .this_id = -1, 2010 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, 2011 .max_sectors = TW_MAX_SECTORS, 2012 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2013 .use_clustering = ENABLE_CLUSTERING, 2014 .shost_attrs = twa_host_attrs, 2015 .emulated = 1, 2016 .no_write_same = 1, 2017 }; 2018 2019 /* This function will probe and initialize a card */ 2020 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 2021 { 2022 struct Scsi_Host *host = NULL; 2023 TW_Device_Extension *tw_dev; 2024 unsigned long mem_addr, mem_len; 2025 int retval = -ENODEV; 2026 2027 retval = pci_enable_device(pdev); 2028 if (retval) { 2029 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); 2030 goto out_disable_device; 2031 } 2032 2033 pci_set_master(pdev); 2034 pci_try_set_mwi(pdev); 2035 2036 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 2037 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2038 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2039 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 2040 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); 2041 retval = -ENODEV; 2042 goto out_disable_device; 2043 } 2044 2045 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); 2046 if (!host) { 2047 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); 2048 retval = -ENOMEM; 2049 goto out_disable_device; 2050 } 2051 tw_dev = (TW_Device_Extension *)host->hostdata; 2052 2053 /* Save values to device extension */ 2054 tw_dev->host = host; 2055 tw_dev->tw_pci_dev = pdev; 2056 2057 if (twa_initialize_device_extension(tw_dev)) { 2058 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); 2059 goto out_free_device_extension; 2060 } 2061 2062 /* Request IO regions */ 2063 retval = pci_request_regions(pdev, "3w-9xxx"); 2064 if (retval) { 2065 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); 2066 goto out_free_device_extension; 2067 } 2068 2069 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) { 2070 mem_addr = pci_resource_start(pdev, 1); 2071 mem_len = pci_resource_len(pdev, 1); 2072 } else { 2073 mem_addr = pci_resource_start(pdev, 2); 2074 mem_len = pci_resource_len(pdev, 2); 2075 } 2076 2077 /* Save base address */ 2078 tw_dev->base_addr = ioremap(mem_addr, mem_len); 2079 if (!tw_dev->base_addr) { 2080 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); 2081 goto out_release_mem_region; 2082 } 2083 2084 /* Disable interrupts on the card */ 2085 TW_DISABLE_INTERRUPTS(tw_dev); 2086 2087 /* Initialize the card */ 2088 if (twa_reset_sequence(tw_dev, 0)) 2089 goto out_iounmap; 2090 2091 /* Set host specific parameters */ 2092 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || 2093 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA)) 2094 host->max_id = TW_MAX_UNITS_9650SE; 2095 else 2096 host->max_id = TW_MAX_UNITS; 2097 2098 host->max_cmd_len = TW_MAX_CDB_LEN; 2099 2100 /* Channels aren't supported by adapter */ 2101 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl); 2102 host->max_channel = 0; 2103 2104 /* Register the card with the kernel SCSI layer */ 2105 retval = scsi_add_host(host, &pdev->dev); 2106 if (retval) { 2107 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); 2108 goto out_iounmap; 2109 } 2110 2111 pci_set_drvdata(pdev, host); 2112 2113 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n", 2114 host->host_no, mem_addr, pdev->irq); 2115 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", 2116 host->host_no, 2117 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, 2118 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), 2119 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, 2120 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), 2121 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, 2122 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); 2123 2124 /* Try to enable MSI */ 2125 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) && 2126 !pci_enable_msi(pdev)) 2127 set_bit(TW_USING_MSI, &tw_dev->flags); 2128 2129 /* Now setup the interrupt handler */ 2130 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2131 if (retval) { 2132 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); 2133 goto out_remove_host; 2134 } 2135 2136 twa_device_extension_list[twa_device_extension_count] = tw_dev; 2137 twa_device_extension_count++; 2138 2139 /* Re-enable interrupts on the card */ 2140 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 2141 2142 /* Finally, scan the host */ 2143 scsi_scan_host(host); 2144 2145 if (twa_major == -1) { 2146 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) 2147 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); 2148 } 2149 return 0; 2150 2151 out_remove_host: 2152 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2153 pci_disable_msi(pdev); 2154 scsi_remove_host(host); 2155 out_iounmap: 2156 iounmap(tw_dev->base_addr); 2157 out_release_mem_region: 2158 pci_release_regions(pdev); 2159 out_free_device_extension: 2160 twa_free_device_extension(tw_dev); 2161 scsi_host_put(host); 2162 out_disable_device: 2163 pci_disable_device(pdev); 2164 2165 return retval; 2166 } /* End twa_probe() */ 2167 2168 /* This function is called to remove a device */ 2169 static void twa_remove(struct pci_dev *pdev) 2170 { 2171 struct Scsi_Host *host = pci_get_drvdata(pdev); 2172 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2173 2174 scsi_remove_host(tw_dev->host); 2175 2176 /* Unregister character device */ 2177 if (twa_major >= 0) { 2178 unregister_chrdev(twa_major, "twa"); 2179 twa_major = -1; 2180 } 2181 2182 /* Shutdown the card */ 2183 __twa_shutdown(tw_dev); 2184 2185 /* Disable MSI if enabled */ 2186 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2187 pci_disable_msi(pdev); 2188 2189 /* Free IO remapping */ 2190 iounmap(tw_dev->base_addr); 2191 2192 /* Free up the mem region */ 2193 pci_release_regions(pdev); 2194 2195 /* Free up device extension resources */ 2196 twa_free_device_extension(tw_dev); 2197 2198 scsi_host_put(tw_dev->host); 2199 pci_disable_device(pdev); 2200 twa_device_extension_count--; 2201 } /* End twa_remove() */ 2202 2203 #ifdef CONFIG_PM 2204 /* This function is called on PCI suspend */ 2205 static int twa_suspend(struct pci_dev *pdev, pm_message_t state) 2206 { 2207 struct Scsi_Host *host = pci_get_drvdata(pdev); 2208 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2209 2210 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no); 2211 2212 TW_DISABLE_INTERRUPTS(tw_dev); 2213 free_irq(tw_dev->tw_pci_dev->irq, tw_dev); 2214 2215 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2216 pci_disable_msi(pdev); 2217 2218 /* Tell the card we are shutting down */ 2219 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { 2220 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend"); 2221 } else { 2222 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n"); 2223 } 2224 TW_CLEAR_ALL_INTERRUPTS(tw_dev); 2225 2226 pci_save_state(pdev); 2227 pci_disable_device(pdev); 2228 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2229 2230 return 0; 2231 } /* End twa_suspend() */ 2232 2233 /* This function is called on PCI resume */ 2234 static int twa_resume(struct pci_dev *pdev) 2235 { 2236 int retval = 0; 2237 struct Scsi_Host *host = pci_get_drvdata(pdev); 2238 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2239 2240 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no); 2241 pci_set_power_state(pdev, PCI_D0); 2242 pci_enable_wake(pdev, PCI_D0, 0); 2243 pci_restore_state(pdev); 2244 2245 retval = pci_enable_device(pdev); 2246 if (retval) { 2247 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume"); 2248 return retval; 2249 } 2250 2251 pci_set_master(pdev); 2252 pci_try_set_mwi(pdev); 2253 2254 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 2255 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2256 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2257 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 2258 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); 2259 retval = -ENODEV; 2260 goto out_disable_device; 2261 } 2262 2263 /* Initialize the card */ 2264 if (twa_reset_sequence(tw_dev, 0)) { 2265 retval = -ENODEV; 2266 goto out_disable_device; 2267 } 2268 2269 /* Now setup the interrupt handler */ 2270 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2271 if (retval) { 2272 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume"); 2273 retval = -ENODEV; 2274 goto out_disable_device; 2275 } 2276 2277 /* Now enable MSI if enabled */ 2278 if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2279 pci_enable_msi(pdev); 2280 2281 /* Re-enable interrupts on the card */ 2282 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); 2283 2284 printk(KERN_WARNING "3w-9xxx: Resume complete.\n"); 2285 return 0; 2286 2287 out_disable_device: 2288 scsi_remove_host(host); 2289 pci_disable_device(pdev); 2290 2291 return retval; 2292 } /* End twa_resume() */ 2293 #endif 2294 2295 /* PCI Devices supported by this driver */ 2296 static struct pci_device_id twa_pci_tbl[] = { 2297 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, 2298 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2299 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, 2300 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2301 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE, 2302 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2303 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA, 2304 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2305 { } 2306 }; 2307 MODULE_DEVICE_TABLE(pci, twa_pci_tbl); 2308 2309 /* pci_driver initializer */ 2310 static struct pci_driver twa_driver = { 2311 .name = "3w-9xxx", 2312 .id_table = twa_pci_tbl, 2313 .probe = twa_probe, 2314 .remove = twa_remove, 2315 #ifdef CONFIG_PM 2316 .suspend = twa_suspend, 2317 .resume = twa_resume, 2318 #endif 2319 .shutdown = twa_shutdown 2320 }; 2321 2322 /* This function is called on driver initialization */ 2323 static int __init twa_init(void) 2324 { 2325 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); 2326 2327 return pci_register_driver(&twa_driver); 2328 } /* End twa_init() */ 2329 2330 /* This function is called on driver exit */ 2331 static void __exit twa_exit(void) 2332 { 2333 pci_unregister_driver(&twa_driver); 2334 } /* End twa_exit() */ 2335 2336 module_init(twa_init); 2337 module_exit(twa_exit); 2338 2339