1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Linux MegaRAID device driver 5 * 6 * Copyright (c) 2002 LSI Logic Corporation. 7 * 8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 9 * - fixes 10 * - speed-ups (list handling fixes, issued_list, optimizations.) 11 * - lots of cleanups. 12 * 13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de> 14 * - new-style, hotplug-aware pci probing and scsi registration 15 * 16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju 17 * <Seokmann.Ju@lsil.com> 18 * 19 * Description: Linux device driver for LSI Logic MegaRAID controller 20 * 21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 22 * 518, 520, 531, 532 23 * 24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, 25 * and others. Please send updates to the mailing list 26 * linux-scsi@vger.kernel.org . 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/fs.h> 31 #include <linux/blkdev.h> 32 #include <linux/uaccess.h> 33 #include <asm/io.h> 34 #include <linux/completion.h> 35 #include <linux/delay.h> 36 #include <linux/proc_fs.h> 37 #include <linux/seq_file.h> 38 #include <linux/reboot.h> 39 #include <linux/module.h> 40 #include <linux/list.h> 41 #include <linux/interrupt.h> 42 #include <linux/pci.h> 43 #include <linux/init.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/mutex.h> 46 #include <linux/slab.h> 47 #include <scsi/scsicam.h> 48 49 #include "scsi.h" 50 #include <scsi/scsi_host.h> 51 52 #include "megaraid.h" 53 54 #define MEGARAID_MODULE_VERSION "2.00.4" 55 56 MODULE_AUTHOR ("sju@lsil.com"); 57 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); 58 MODULE_LICENSE ("GPL"); 59 MODULE_VERSION(MEGARAID_MODULE_VERSION); 60 61 static DEFINE_MUTEX(megadev_mutex); 62 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; 63 module_param(max_cmd_per_lun, uint, 0); 64 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); 65 66 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; 67 module_param(max_sectors_per_io, ushort, 0); 68 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); 69 70 71 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; 72 module_param(max_mbox_busy_wait, ushort, 0); 73 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 74 75 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) 76 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) 77 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) 78 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) 79 80 /* 81 * Global variables 82 */ 83 84 static int hba_count; 85 static adapter_t *hba_soft_state[MAX_CONTROLLERS]; 86 static struct proc_dir_entry *mega_proc_dir_entry; 87 88 /* For controller re-ordering */ 89 static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; 90 91 static long 92 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 93 94 /* 95 * The File Operations structure for the serial/ioctl interface of the driver 96 */ 97 static const struct file_operations megadev_fops = { 98 .owner = THIS_MODULE, 99 .unlocked_ioctl = megadev_unlocked_ioctl, 100 .open = megadev_open, 101 .llseek = noop_llseek, 102 }; 103 104 /* 105 * Array to structures for storing the information about the controllers. This 106 * information is sent to the user level applications, when they do an ioctl 107 * for this information. 108 */ 109 static struct mcontroller mcontroller[MAX_CONTROLLERS]; 110 111 /* The current driver version */ 112 static u32 driver_ver = 0x02000000; 113 114 /* major number used by the device for character interface */ 115 static int major; 116 117 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) 118 119 120 /* 121 * Debug variable to print some diagnostic messages 122 */ 123 static int trace_level; 124 125 /** 126 * mega_setup_mailbox() 127 * @adapter: pointer to our soft state 128 * 129 * Allocates a 8 byte aligned memory for the handshake mailbox. 130 */ 131 static int 132 mega_setup_mailbox(adapter_t *adapter) 133 { 134 unsigned long align; 135 136 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev, 137 sizeof(mbox64_t), 138 &adapter->una_mbox64_dma, 139 GFP_KERNEL); 140 141 if( !adapter->una_mbox64 ) return -1; 142 143 adapter->mbox = &adapter->una_mbox64->mbox; 144 145 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & 146 (~0UL ^ 0xFUL)); 147 148 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); 149 150 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); 151 152 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; 153 154 /* 155 * Register the mailbox if the controller is an io-mapped controller 156 */ 157 if( adapter->flag & BOARD_IOMAP ) { 158 159 outb(adapter->mbox_dma & 0xFF, 160 adapter->host->io_port + MBOX_PORT0); 161 162 outb((adapter->mbox_dma >> 8) & 0xFF, 163 adapter->host->io_port + MBOX_PORT1); 164 165 outb((adapter->mbox_dma >> 16) & 0xFF, 166 adapter->host->io_port + MBOX_PORT2); 167 168 outb((adapter->mbox_dma >> 24) & 0xFF, 169 adapter->host->io_port + MBOX_PORT3); 170 171 outb(ENABLE_MBOX_BYTE, 172 adapter->host->io_port + ENABLE_MBOX_REGION); 173 174 irq_ack(adapter); 175 176 irq_enable(adapter); 177 } 178 179 return 0; 180 } 181 182 183 /* 184 * mega_query_adapter() 185 * @adapter - pointer to our soft state 186 * 187 * Issue the adapter inquiry commands to the controller and find out 188 * information and parameter about the devices attached 189 */ 190 static int 191 mega_query_adapter(adapter_t *adapter) 192 { 193 dma_addr_t prod_info_dma_handle; 194 mega_inquiry3 *inquiry3; 195 u8 raw_mbox[sizeof(struct mbox_out)]; 196 mbox_t *mbox; 197 int retval; 198 199 /* Initialize adapter inquiry mailbox */ 200 201 mbox = (mbox_t *)raw_mbox; 202 203 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 204 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 205 206 /* 207 * Try to issue Inquiry3 command 208 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 209 * update enquiry3 structure 210 */ 211 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 212 213 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 214 215 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 216 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ 217 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ 218 219 /* Issue a blocking command to the card */ 220 if ((retval = issue_scb_block(adapter, raw_mbox))) { 221 /* the adapter does not support 40ld */ 222 223 mraid_ext_inquiry *ext_inq; 224 mraid_inquiry *inq; 225 dma_addr_t dma_handle; 226 227 ext_inq = dma_alloc_coherent(&adapter->dev->dev, 228 sizeof(mraid_ext_inquiry), 229 &dma_handle, GFP_KERNEL); 230 231 if( ext_inq == NULL ) return -1; 232 233 inq = &ext_inq->raid_inq; 234 235 mbox->m_out.xferaddr = (u32)dma_handle; 236 237 /*issue old 0x04 command to adapter */ 238 mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ; 239 240 issue_scb_block(adapter, raw_mbox); 241 242 /* 243 * update Enquiry3 and ProductInfo structures with 244 * mraid_inquiry structure 245 */ 246 mega_8_to_40ld(inq, inquiry3, 247 (mega_product_info *)&adapter->product_info); 248 249 dma_free_coherent(&adapter->dev->dev, 250 sizeof(mraid_ext_inquiry), ext_inq, 251 dma_handle); 252 253 } else { /*adapter supports 40ld */ 254 adapter->flag |= BOARD_40LD; 255 256 /* 257 * get product_info, which is static information and will be 258 * unchanged 259 */ 260 prod_info_dma_handle = dma_map_single(&adapter->dev->dev, 261 (void *)&adapter->product_info, 262 sizeof(mega_product_info), 263 DMA_FROM_DEVICE); 264 265 mbox->m_out.xferaddr = prod_info_dma_handle; 266 267 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ 269 270 if ((retval = issue_scb_block(adapter, raw_mbox))) 271 dev_warn(&adapter->dev->dev, 272 "Product_info cmd failed with error: %d\n", 273 retval); 274 275 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle, 276 sizeof(mega_product_info), DMA_FROM_DEVICE); 277 } 278 279 280 /* 281 * kernel scans the channels from 0 to <= max_channel 282 */ 283 adapter->host->max_channel = 284 adapter->product_info.nchannels + NVIRT_CHAN -1; 285 286 adapter->host->max_id = 16; /* max targets per channel */ 287 288 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ 289 290 adapter->host->cmd_per_lun = max_cmd_per_lun; 291 292 adapter->numldrv = inquiry3->num_ldrv; 293 294 adapter->max_cmds = adapter->product_info.max_commands; 295 296 if(adapter->max_cmds > MAX_COMMANDS) 297 adapter->max_cmds = MAX_COMMANDS; 298 299 adapter->host->can_queue = adapter->max_cmds - 1; 300 301 /* 302 * Get the maximum number of scatter-gather elements supported by this 303 * firmware 304 */ 305 mega_get_max_sgl(adapter); 306 307 adapter->host->sg_tablesize = adapter->sglen; 308 309 /* use HP firmware and bios version encoding 310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted 311 right 8 bits making them zero. This 0 value was hardcoded to fix 312 sparse warnings. */ 313 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { 314 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 315 "%c%d%d.%d%d", 316 adapter->product_info.fw_version[2], 317 0, 318 adapter->product_info.fw_version[1] & 0x0f, 319 0, 320 adapter->product_info.fw_version[0] & 0x0f); 321 snprintf(adapter->bios_version, sizeof(adapter->fw_version), 322 "%c%d%d.%d%d", 323 adapter->product_info.bios_version[2], 324 0, 325 adapter->product_info.bios_version[1] & 0x0f, 326 0, 327 adapter->product_info.bios_version[0] & 0x0f); 328 } else { 329 memcpy(adapter->fw_version, 330 (char *)adapter->product_info.fw_version, 4); 331 adapter->fw_version[4] = 0; 332 333 memcpy(adapter->bios_version, 334 (char *)adapter->product_info.bios_version, 4); 335 336 adapter->bios_version[4] = 0; 337 } 338 339 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", 340 adapter->fw_version, adapter->bios_version, adapter->numldrv); 341 342 /* 343 * Do we support extended (>10 bytes) cdbs 344 */ 345 adapter->support_ext_cdb = mega_support_ext_cdb(adapter); 346 if (adapter->support_ext_cdb) 347 dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); 348 349 350 return 0; 351 } 352 353 /** 354 * mega_runpendq() 355 * @adapter: pointer to our soft state 356 * 357 * Runs through the list of pending requests. 358 */ 359 static inline void 360 mega_runpendq(adapter_t *adapter) 361 { 362 if(!list_empty(&adapter->pending_list)) 363 __mega_runpendq(adapter); 364 } 365 366 /* 367 * megaraid_queue() 368 * @scmd - Issue this scsi command 369 * @done - the callback hook into the scsi mid-layer 370 * 371 * The command queuing entry point for the mid-layer. 372 */ 373 static int 374 megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 375 { 376 adapter_t *adapter; 377 scb_t *scb; 378 int busy=0; 379 unsigned long flags; 380 381 adapter = (adapter_t *)scmd->device->host->hostdata; 382 383 scmd->scsi_done = done; 384 385 386 /* 387 * Allocate and build a SCB request 388 * busy flag will be set if mega_build_cmd() command could not 389 * allocate scb. We will return non-zero status in that case. 390 * NOTE: scb can be null even though certain commands completed 391 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would 392 * return 0 in that case. 393 */ 394 395 spin_lock_irqsave(&adapter->lock, flags); 396 scb = mega_build_cmd(adapter, scmd, &busy); 397 if (!scb) 398 goto out; 399 400 scb->state |= SCB_PENDQ; 401 list_add_tail(&scb->list, &adapter->pending_list); 402 403 /* 404 * Check if the HBA is in quiescent state, e.g., during a 405 * delete logical drive opertion. If it is, don't run 406 * the pending_list. 407 */ 408 if (atomic_read(&adapter->quiescent) == 0) 409 mega_runpendq(adapter); 410 411 busy = 0; 412 out: 413 spin_unlock_irqrestore(&adapter->lock, flags); 414 return busy; 415 } 416 417 static DEF_SCSI_QCMD(megaraid_queue) 418 419 /** 420 * mega_allocate_scb() 421 * @adapter: pointer to our soft state 422 * @cmd: scsi command from the mid-layer 423 * 424 * Allocate a SCB structure. This is the central structure for controller 425 * commands. 426 */ 427 static inline scb_t * 428 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) 429 { 430 struct list_head *head = &adapter->free_list; 431 scb_t *scb; 432 433 /* Unlink command from Free List */ 434 if( !list_empty(head) ) { 435 436 scb = list_entry(head->next, scb_t, list); 437 438 list_del_init(head->next); 439 440 scb->state = SCB_ACTIVE; 441 scb->cmd = cmd; 442 scb->dma_type = MEGA_DMA_TYPE_NONE; 443 444 return scb; 445 } 446 447 return NULL; 448 } 449 450 /** 451 * mega_get_ldrv_num() 452 * @adapter: pointer to our soft state 453 * @cmd: scsi mid layer command 454 * @channel: channel on the controller 455 * 456 * Calculate the logical drive number based on the information in scsi command 457 * and the channel number. 458 */ 459 static inline int 460 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) 461 { 462 int tgt; 463 int ldrv_num; 464 465 tgt = cmd->device->id; 466 467 if ( tgt > adapter->this_id ) 468 tgt--; /* we do not get inquires for initiator id */ 469 470 ldrv_num = (channel * 15) + tgt; 471 472 473 /* 474 * If we have a logical drive with boot enabled, project it first 475 */ 476 if( adapter->boot_ldrv_enabled ) { 477 if( ldrv_num == 0 ) { 478 ldrv_num = adapter->boot_ldrv; 479 } 480 else { 481 if( ldrv_num <= adapter->boot_ldrv ) { 482 ldrv_num--; 483 } 484 } 485 } 486 487 /* 488 * If "delete logical drive" feature is enabled on this controller. 489 * Do only if at least one delete logical drive operation was done. 490 * 491 * Also, after logical drive deletion, instead of logical drive number, 492 * the value returned should be 0x80+logical drive id. 493 * 494 * These is valid only for IO commands. 495 */ 496 497 if (adapter->support_random_del && adapter->read_ldidmap ) 498 switch (cmd->cmnd[0]) { 499 case READ_6: 500 case WRITE_6: 501 case READ_10: 502 case WRITE_10: 503 ldrv_num += 0x80; 504 } 505 506 return ldrv_num; 507 } 508 509 /** 510 * mega_build_cmd() 511 * @adapter: pointer to our soft state 512 * @cmd: Prepare using this scsi command 513 * @busy: busy flag if no resources 514 * 515 * Prepares a command and scatter gather list for the controller. This routine 516 * also finds out if the commands is intended for a logical drive or a 517 * physical device and prepares the controller command accordingly. 518 * 519 * We also re-order the logical drives and physical devices based on their 520 * boot settings. 521 */ 522 static scb_t * 523 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) 524 { 525 mega_passthru *pthru; 526 scb_t *scb; 527 mbox_t *mbox; 528 u32 seg; 529 char islogical; 530 int max_ldrv_num; 531 int channel = 0; 532 int target = 0; 533 int ldrv_num = 0; /* logical drive number */ 534 535 /* 536 * We know what channels our logical drives are on - mega_find_card() 537 */ 538 islogical = adapter->logdrv_chan[cmd->device->channel]; 539 540 /* 541 * The theory: If physical drive is chosen for boot, all the physical 542 * devices are exported before the logical drives, otherwise physical 543 * devices are pushed after logical drives, in which case - Kernel sees 544 * the physical devices on virtual channel which is obviously converted 545 * to actual channel on the HBA. 546 */ 547 if( adapter->boot_pdrv_enabled ) { 548 if( islogical ) { 549 /* logical channel */ 550 channel = cmd->device->channel - 551 adapter->product_info.nchannels; 552 } 553 else { 554 /* this is physical channel */ 555 channel = cmd->device->channel; 556 target = cmd->device->id; 557 558 /* 559 * boot from a physical disk, that disk needs to be 560 * exposed first IF both the channels are SCSI, then 561 * booting from the second channel is not allowed. 562 */ 563 if( target == 0 ) { 564 target = adapter->boot_pdrv_tgt; 565 } 566 else if( target == adapter->boot_pdrv_tgt ) { 567 target = 0; 568 } 569 } 570 } 571 else { 572 if( islogical ) { 573 /* this is the logical channel */ 574 channel = cmd->device->channel; 575 } 576 else { 577 /* physical channel */ 578 channel = cmd->device->channel - NVIRT_CHAN; 579 target = cmd->device->id; 580 } 581 } 582 583 584 if(islogical) { 585 586 /* have just LUN 0 for each target on virtual channels */ 587 if (cmd->device->lun) { 588 cmd->result = (DID_BAD_TARGET << 16); 589 cmd->scsi_done(cmd); 590 return NULL; 591 } 592 593 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); 594 595 596 max_ldrv_num = (adapter->flag & BOARD_40LD) ? 597 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; 598 599 /* 600 * max_ldrv_num increases by 0x80 if some logical drive was 601 * deleted. 602 */ 603 if(adapter->read_ldidmap) 604 max_ldrv_num += 0x80; 605 606 if(ldrv_num > max_ldrv_num ) { 607 cmd->result = (DID_BAD_TARGET << 16); 608 cmd->scsi_done(cmd); 609 return NULL; 610 } 611 612 } 613 else { 614 if( cmd->device->lun > 7) { 615 /* 616 * Do not support lun >7 for physically accessed 617 * devices 618 */ 619 cmd->result = (DID_BAD_TARGET << 16); 620 cmd->scsi_done(cmd); 621 return NULL; 622 } 623 } 624 625 /* 626 * 627 * Logical drive commands 628 * 629 */ 630 if(islogical) { 631 switch (cmd->cmnd[0]) { 632 case TEST_UNIT_READY: 633 #if MEGA_HAVE_CLUSTERING 634 /* 635 * Do we support clustering and is the support enabled 636 * If no, return success always 637 */ 638 if( !adapter->has_cluster ) { 639 cmd->result = (DID_OK << 16); 640 cmd->scsi_done(cmd); 641 return NULL; 642 } 643 644 if(!(scb = mega_allocate_scb(adapter, cmd))) { 645 *busy = 1; 646 return NULL; 647 } 648 649 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 650 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; 651 scb->raw_mbox[3] = ldrv_num; 652 653 scb->dma_direction = DMA_NONE; 654 655 return scb; 656 #else 657 cmd->result = (DID_OK << 16); 658 cmd->scsi_done(cmd); 659 return NULL; 660 #endif 661 662 case MODE_SENSE: { 663 char *buf; 664 struct scatterlist *sg; 665 666 sg = scsi_sglist(cmd); 667 buf = kmap_atomic(sg_page(sg)) + sg->offset; 668 669 memset(buf, 0, cmd->cmnd[4]); 670 kunmap_atomic(buf - sg->offset); 671 672 cmd->result = (DID_OK << 16); 673 cmd->scsi_done(cmd); 674 return NULL; 675 } 676 677 case READ_CAPACITY: 678 case INQUIRY: 679 680 if(!(adapter->flag & (1L << cmd->device->channel))) { 681 682 dev_notice(&adapter->dev->dev, 683 "scsi%d: scanning scsi channel %d " 684 "for logical drives\n", 685 adapter->host->host_no, 686 cmd->device->channel); 687 688 adapter->flag |= (1L << cmd->device->channel); 689 } 690 691 /* Allocate a SCB and initialize passthru */ 692 if(!(scb = mega_allocate_scb(adapter, cmd))) { 693 *busy = 1; 694 return NULL; 695 } 696 pthru = scb->pthru; 697 698 mbox = (mbox_t *)scb->raw_mbox; 699 memset(mbox, 0, sizeof(scb->raw_mbox)); 700 memset(pthru, 0, sizeof(mega_passthru)); 701 702 pthru->timeout = 0; 703 pthru->ars = 1; 704 pthru->reqsenselen = 14; 705 pthru->islogical = 1; 706 pthru->logdrv = ldrv_num; 707 pthru->cdblen = cmd->cmd_len; 708 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 709 710 if( adapter->has_64bit_addr ) { 711 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 712 } 713 else { 714 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 715 } 716 717 scb->dma_direction = DMA_FROM_DEVICE; 718 719 pthru->numsgelements = mega_build_sglist(adapter, scb, 720 &pthru->dataxferaddr, &pthru->dataxferlen); 721 722 mbox->m_out.xferaddr = scb->pthru_dma_addr; 723 724 return scb; 725 726 case READ_6: 727 case WRITE_6: 728 case READ_10: 729 case WRITE_10: 730 case READ_12: 731 case WRITE_12: 732 733 /* Allocate a SCB and initialize mailbox */ 734 if(!(scb = mega_allocate_scb(adapter, cmd))) { 735 *busy = 1; 736 return NULL; 737 } 738 mbox = (mbox_t *)scb->raw_mbox; 739 740 memset(mbox, 0, sizeof(scb->raw_mbox)); 741 mbox->m_out.logdrv = ldrv_num; 742 743 /* 744 * A little hack: 2nd bit is zero for all scsi read 745 * commands and is set for all scsi write commands 746 */ 747 if( adapter->has_64bit_addr ) { 748 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 749 MEGA_MBOXCMD_LWRITE64: 750 MEGA_MBOXCMD_LREAD64 ; 751 } 752 else { 753 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 754 MEGA_MBOXCMD_LWRITE: 755 MEGA_MBOXCMD_LREAD ; 756 } 757 758 /* 759 * 6-byte READ(0x08) or WRITE(0x0A) cdb 760 */ 761 if( cmd->cmd_len == 6 ) { 762 mbox->m_out.numsectors = (u32) cmd->cmnd[4]; 763 mbox->m_out.lba = 764 ((u32)cmd->cmnd[1] << 16) | 765 ((u32)cmd->cmnd[2] << 8) | 766 (u32)cmd->cmnd[3]; 767 768 mbox->m_out.lba &= 0x1FFFFF; 769 770 #if MEGA_HAVE_STATS 771 /* 772 * Take modulo 0x80, since the logical drive 773 * number increases by 0x80 when a logical 774 * drive was deleted 775 */ 776 if (*cmd->cmnd == READ_6) { 777 adapter->nreads[ldrv_num%0x80]++; 778 adapter->nreadblocks[ldrv_num%0x80] += 779 mbox->m_out.numsectors; 780 } else { 781 adapter->nwrites[ldrv_num%0x80]++; 782 adapter->nwriteblocks[ldrv_num%0x80] += 783 mbox->m_out.numsectors; 784 } 785 #endif 786 } 787 788 /* 789 * 10-byte READ(0x28) or WRITE(0x2A) cdb 790 */ 791 if( cmd->cmd_len == 10 ) { 792 mbox->m_out.numsectors = 793 (u32)cmd->cmnd[8] | 794 ((u32)cmd->cmnd[7] << 8); 795 mbox->m_out.lba = 796 ((u32)cmd->cmnd[2] << 24) | 797 ((u32)cmd->cmnd[3] << 16) | 798 ((u32)cmd->cmnd[4] << 8) | 799 (u32)cmd->cmnd[5]; 800 801 #if MEGA_HAVE_STATS 802 if (*cmd->cmnd == READ_10) { 803 adapter->nreads[ldrv_num%0x80]++; 804 adapter->nreadblocks[ldrv_num%0x80] += 805 mbox->m_out.numsectors; 806 } else { 807 adapter->nwrites[ldrv_num%0x80]++; 808 adapter->nwriteblocks[ldrv_num%0x80] += 809 mbox->m_out.numsectors; 810 } 811 #endif 812 } 813 814 /* 815 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 816 */ 817 if( cmd->cmd_len == 12 ) { 818 mbox->m_out.lba = 819 ((u32)cmd->cmnd[2] << 24) | 820 ((u32)cmd->cmnd[3] << 16) | 821 ((u32)cmd->cmnd[4] << 8) | 822 (u32)cmd->cmnd[5]; 823 824 mbox->m_out.numsectors = 825 ((u32)cmd->cmnd[6] << 24) | 826 ((u32)cmd->cmnd[7] << 16) | 827 ((u32)cmd->cmnd[8] << 8) | 828 (u32)cmd->cmnd[9]; 829 830 #if MEGA_HAVE_STATS 831 if (*cmd->cmnd == READ_12) { 832 adapter->nreads[ldrv_num%0x80]++; 833 adapter->nreadblocks[ldrv_num%0x80] += 834 mbox->m_out.numsectors; 835 } else { 836 adapter->nwrites[ldrv_num%0x80]++; 837 adapter->nwriteblocks[ldrv_num%0x80] += 838 mbox->m_out.numsectors; 839 } 840 #endif 841 } 842 843 /* 844 * If it is a read command 845 */ 846 if( (*cmd->cmnd & 0x0F) == 0x08 ) { 847 scb->dma_direction = DMA_FROM_DEVICE; 848 } 849 else { 850 scb->dma_direction = DMA_TO_DEVICE; 851 } 852 853 /* Calculate Scatter-Gather info */ 854 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, 855 (u32 *)&mbox->m_out.xferaddr, &seg); 856 857 return scb; 858 859 #if MEGA_HAVE_CLUSTERING 860 case RESERVE: 861 case RELEASE: 862 863 /* 864 * Do we support clustering and is the support enabled 865 */ 866 if( ! adapter->has_cluster ) { 867 868 cmd->result = (DID_BAD_TARGET << 16); 869 cmd->scsi_done(cmd); 870 return NULL; 871 } 872 873 /* Allocate a SCB and initialize mailbox */ 874 if(!(scb = mega_allocate_scb(adapter, cmd))) { 875 *busy = 1; 876 return NULL; 877 } 878 879 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 880 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? 881 MEGA_RESERVE_LD : MEGA_RELEASE_LD; 882 883 scb->raw_mbox[3] = ldrv_num; 884 885 scb->dma_direction = DMA_NONE; 886 887 return scb; 888 #endif 889 890 default: 891 cmd->result = (DID_BAD_TARGET << 16); 892 cmd->scsi_done(cmd); 893 return NULL; 894 } 895 } 896 897 /* 898 * Passthru drive commands 899 */ 900 else { 901 /* Allocate a SCB and initialize passthru */ 902 if(!(scb = mega_allocate_scb(adapter, cmd))) { 903 *busy = 1; 904 return NULL; 905 } 906 907 mbox = (mbox_t *)scb->raw_mbox; 908 memset(mbox, 0, sizeof(scb->raw_mbox)); 909 910 if( adapter->support_ext_cdb ) { 911 912 mega_prepare_extpassthru(adapter, scb, cmd, 913 channel, target); 914 915 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; 916 917 mbox->m_out.xferaddr = scb->epthru_dma_addr; 918 919 } 920 else { 921 922 pthru = mega_prepare_passthru(adapter, scb, cmd, 923 channel, target); 924 925 /* Initialize mailbox */ 926 if( adapter->has_64bit_addr ) { 927 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 928 } 929 else { 930 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 931 } 932 933 mbox->m_out.xferaddr = scb->pthru_dma_addr; 934 935 } 936 return scb; 937 } 938 return NULL; 939 } 940 941 942 /** 943 * mega_prepare_passthru() 944 * @adapter: pointer to our soft state 945 * @scb: our scsi control block 946 * @cmd: scsi command from the mid-layer 947 * @channel: actual channel on the controller 948 * @target: actual id on the controller. 949 * 950 * prepare a command for the scsi physical devices. 951 */ 952 static mega_passthru * 953 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, 954 int channel, int target) 955 { 956 mega_passthru *pthru; 957 958 pthru = scb->pthru; 959 memset(pthru, 0, sizeof (mega_passthru)); 960 961 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 962 pthru->timeout = 2; 963 964 pthru->ars = 1; 965 pthru->reqsenselen = 14; 966 pthru->islogical = 0; 967 968 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 969 970 pthru->target = (adapter->flag & BOARD_40LD) ? 971 (channel << 4) | target : target; 972 973 pthru->cdblen = cmd->cmd_len; 974 pthru->logdrv = cmd->device->lun; 975 976 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 977 978 /* Not sure about the direction */ 979 scb->dma_direction = DMA_BIDIRECTIONAL; 980 981 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ 982 switch (cmd->cmnd[0]) { 983 case INQUIRY: 984 case READ_CAPACITY: 985 if(!(adapter->flag & (1L << cmd->device->channel))) { 986 987 dev_notice(&adapter->dev->dev, 988 "scsi%d: scanning scsi channel %d [P%d] " 989 "for physical devices\n", 990 adapter->host->host_no, 991 cmd->device->channel, channel); 992 993 adapter->flag |= (1L << cmd->device->channel); 994 } 995 fallthrough; 996 default: 997 pthru->numsgelements = mega_build_sglist(adapter, scb, 998 &pthru->dataxferaddr, &pthru->dataxferlen); 999 break; 1000 } 1001 return pthru; 1002 } 1003 1004 1005 /** 1006 * mega_prepare_extpassthru() 1007 * @adapter: pointer to our soft state 1008 * @scb: our scsi control block 1009 * @cmd: scsi command from the mid-layer 1010 * @channel: actual channel on the controller 1011 * @target: actual id on the controller. 1012 * 1013 * prepare a command for the scsi physical devices. This rountine prepares 1014 * commands for devices which can take extended CDBs (>10 bytes) 1015 */ 1016 static mega_ext_passthru * 1017 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, 1018 struct scsi_cmnd *cmd, 1019 int channel, int target) 1020 { 1021 mega_ext_passthru *epthru; 1022 1023 epthru = scb->epthru; 1024 memset(epthru, 0, sizeof(mega_ext_passthru)); 1025 1026 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 1027 epthru->timeout = 2; 1028 1029 epthru->ars = 1; 1030 epthru->reqsenselen = 14; 1031 epthru->islogical = 0; 1032 1033 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 1034 epthru->target = (adapter->flag & BOARD_40LD) ? 1035 (channel << 4) | target : target; 1036 1037 epthru->cdblen = cmd->cmd_len; 1038 epthru->logdrv = cmd->device->lun; 1039 1040 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); 1041 1042 /* Not sure about the direction */ 1043 scb->dma_direction = DMA_BIDIRECTIONAL; 1044 1045 switch(cmd->cmnd[0]) { 1046 case INQUIRY: 1047 case READ_CAPACITY: 1048 if(!(adapter->flag & (1L << cmd->device->channel))) { 1049 1050 dev_notice(&adapter->dev->dev, 1051 "scsi%d: scanning scsi channel %d [P%d] " 1052 "for physical devices\n", 1053 adapter->host->host_no, 1054 cmd->device->channel, channel); 1055 1056 adapter->flag |= (1L << cmd->device->channel); 1057 } 1058 fallthrough; 1059 default: 1060 epthru->numsgelements = mega_build_sglist(adapter, scb, 1061 &epthru->dataxferaddr, &epthru->dataxferlen); 1062 break; 1063 } 1064 1065 return epthru; 1066 } 1067 1068 static void 1069 __mega_runpendq(adapter_t *adapter) 1070 { 1071 scb_t *scb; 1072 struct list_head *pos, *next; 1073 1074 /* Issue any pending commands to the card */ 1075 list_for_each_safe(pos, next, &adapter->pending_list) { 1076 1077 scb = list_entry(pos, scb_t, list); 1078 1079 if( !(scb->state & SCB_ISSUED) ) { 1080 1081 if( issue_scb(adapter, scb) != 0 ) 1082 return; 1083 } 1084 } 1085 1086 return; 1087 } 1088 1089 1090 /** 1091 * issue_scb() 1092 * @adapter: pointer to our soft state 1093 * @scb: scsi control block 1094 * 1095 * Post a command to the card if the mailbox is available, otherwise return 1096 * busy. We also take the scb from the pending list if the mailbox is 1097 * available. 1098 */ 1099 static int 1100 issue_scb(adapter_t *adapter, scb_t *scb) 1101 { 1102 volatile mbox64_t *mbox64 = adapter->mbox64; 1103 volatile mbox_t *mbox = adapter->mbox; 1104 unsigned int i = 0; 1105 1106 if(unlikely(mbox->m_in.busy)) { 1107 do { 1108 udelay(1); 1109 i++; 1110 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); 1111 1112 if(mbox->m_in.busy) return -1; 1113 } 1114 1115 /* Copy mailbox data into host structure */ 1116 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, 1117 sizeof(struct mbox_out)); 1118 1119 mbox->m_out.cmdid = scb->idx; /* Set cmdid */ 1120 mbox->m_in.busy = 1; /* Set busy */ 1121 1122 1123 /* 1124 * Increment the pending queue counter 1125 */ 1126 atomic_inc(&adapter->pend_cmds); 1127 1128 switch (mbox->m_out.cmd) { 1129 case MEGA_MBOXCMD_LREAD64: 1130 case MEGA_MBOXCMD_LWRITE64: 1131 case MEGA_MBOXCMD_PASSTHRU64: 1132 case MEGA_MBOXCMD_EXTPTHRU: 1133 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1134 mbox64->xfer_segment_hi = 0; 1135 mbox->m_out.xferaddr = 0xFFFFFFFF; 1136 break; 1137 default: 1138 mbox64->xfer_segment_lo = 0; 1139 mbox64->xfer_segment_hi = 0; 1140 } 1141 1142 /* 1143 * post the command 1144 */ 1145 scb->state |= SCB_ISSUED; 1146 1147 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1148 mbox->m_in.poll = 0; 1149 mbox->m_in.ack = 0; 1150 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1151 } 1152 else { 1153 irq_enable(adapter); 1154 issue_command(adapter); 1155 } 1156 1157 return 0; 1158 } 1159 1160 /* 1161 * Wait until the controller's mailbox is available 1162 */ 1163 static inline int 1164 mega_busywait_mbox (adapter_t *adapter) 1165 { 1166 if (adapter->mbox->m_in.busy) 1167 return __mega_busywait_mbox(adapter); 1168 return 0; 1169 } 1170 1171 /** 1172 * issue_scb_block() 1173 * @adapter: pointer to our soft state 1174 * @raw_mbox: the mailbox 1175 * 1176 * Issue a scb in synchronous and non-interrupt mode 1177 */ 1178 static int 1179 issue_scb_block(adapter_t *adapter, u_char *raw_mbox) 1180 { 1181 volatile mbox64_t *mbox64 = adapter->mbox64; 1182 volatile mbox_t *mbox = adapter->mbox; 1183 u8 byte; 1184 1185 /* Wait until mailbox is free */ 1186 if(mega_busywait_mbox (adapter)) 1187 goto bug_blocked_mailbox; 1188 1189 /* Copy mailbox data into host structure */ 1190 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); 1191 mbox->m_out.cmdid = 0xFE; 1192 mbox->m_in.busy = 1; 1193 1194 switch (raw_mbox[0]) { 1195 case MEGA_MBOXCMD_LREAD64: 1196 case MEGA_MBOXCMD_LWRITE64: 1197 case MEGA_MBOXCMD_PASSTHRU64: 1198 case MEGA_MBOXCMD_EXTPTHRU: 1199 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1200 mbox64->xfer_segment_hi = 0; 1201 mbox->m_out.xferaddr = 0xFFFFFFFF; 1202 break; 1203 default: 1204 mbox64->xfer_segment_lo = 0; 1205 mbox64->xfer_segment_hi = 0; 1206 } 1207 1208 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1209 mbox->m_in.poll = 0; 1210 mbox->m_in.ack = 0; 1211 mbox->m_in.numstatus = 0xFF; 1212 mbox->m_in.status = 0xFF; 1213 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1214 1215 while((volatile u8)mbox->m_in.numstatus == 0xFF) 1216 cpu_relax(); 1217 1218 mbox->m_in.numstatus = 0xFF; 1219 1220 while( (volatile u8)mbox->m_in.poll != 0x77 ) 1221 cpu_relax(); 1222 1223 mbox->m_in.poll = 0; 1224 mbox->m_in.ack = 0x77; 1225 1226 WRINDOOR(adapter, adapter->mbox_dma | 0x2); 1227 1228 while(RDINDOOR(adapter) & 0x2) 1229 cpu_relax(); 1230 } 1231 else { 1232 irq_disable(adapter); 1233 issue_command(adapter); 1234 1235 while (!((byte = irq_state(adapter)) & INTR_VALID)) 1236 cpu_relax(); 1237 1238 set_irq_state(adapter, byte); 1239 irq_enable(adapter); 1240 irq_ack(adapter); 1241 } 1242 1243 return mbox->m_in.status; 1244 1245 bug_blocked_mailbox: 1246 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); 1247 udelay (1000); 1248 return -1; 1249 } 1250 1251 1252 /** 1253 * megaraid_isr_iomapped() 1254 * @irq: irq 1255 * @devp: pointer to our soft state 1256 * 1257 * Interrupt service routine for io-mapped controllers. 1258 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1259 * and service the completed commands. 1260 */ 1261 static irqreturn_t 1262 megaraid_isr_iomapped(int irq, void *devp) 1263 { 1264 adapter_t *adapter = devp; 1265 unsigned long flags; 1266 u8 status; 1267 u8 nstatus; 1268 u8 completed[MAX_FIRMWARE_STATUS]; 1269 u8 byte; 1270 int handled = 0; 1271 1272 1273 /* 1274 * loop till F/W has more commands for us to complete. 1275 */ 1276 spin_lock_irqsave(&adapter->lock, flags); 1277 1278 do { 1279 /* Check if a valid interrupt is pending */ 1280 byte = irq_state(adapter); 1281 if( (byte & VALID_INTR_BYTE) == 0 ) { 1282 /* 1283 * No more pending commands 1284 */ 1285 goto out_unlock; 1286 } 1287 set_irq_state(adapter, byte); 1288 1289 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1290 == 0xFF) 1291 cpu_relax(); 1292 adapter->mbox->m_in.numstatus = 0xFF; 1293 1294 status = adapter->mbox->m_in.status; 1295 1296 /* 1297 * decrement the pending queue counter 1298 */ 1299 atomic_sub(nstatus, &adapter->pend_cmds); 1300 1301 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1302 nstatus); 1303 1304 /* Acknowledge interrupt */ 1305 irq_ack(adapter); 1306 1307 mega_cmd_done(adapter, completed, nstatus, status); 1308 1309 mega_rundoneq(adapter); 1310 1311 handled = 1; 1312 1313 /* Loop through any pending requests */ 1314 if(atomic_read(&adapter->quiescent) == 0) { 1315 mega_runpendq(adapter); 1316 } 1317 1318 } while(1); 1319 1320 out_unlock: 1321 1322 spin_unlock_irqrestore(&adapter->lock, flags); 1323 1324 return IRQ_RETVAL(handled); 1325 } 1326 1327 1328 /** 1329 * megaraid_isr_memmapped() 1330 * @irq: irq 1331 * @devp: pointer to our soft state 1332 * 1333 * Interrupt service routine for memory-mapped controllers. 1334 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1335 * and service the completed commands. 1336 */ 1337 static irqreturn_t 1338 megaraid_isr_memmapped(int irq, void *devp) 1339 { 1340 adapter_t *adapter = devp; 1341 unsigned long flags; 1342 u8 status; 1343 u32 dword = 0; 1344 u8 nstatus; 1345 u8 completed[MAX_FIRMWARE_STATUS]; 1346 int handled = 0; 1347 1348 1349 /* 1350 * loop till F/W has more commands for us to complete. 1351 */ 1352 spin_lock_irqsave(&adapter->lock, flags); 1353 1354 do { 1355 /* Check if a valid interrupt is pending */ 1356 dword = RDOUTDOOR(adapter); 1357 if(dword != 0x10001234) { 1358 /* 1359 * No more pending commands 1360 */ 1361 goto out_unlock; 1362 } 1363 WROUTDOOR(adapter, 0x10001234); 1364 1365 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1366 == 0xFF) { 1367 cpu_relax(); 1368 } 1369 adapter->mbox->m_in.numstatus = 0xFF; 1370 1371 status = adapter->mbox->m_in.status; 1372 1373 /* 1374 * decrement the pending queue counter 1375 */ 1376 atomic_sub(nstatus, &adapter->pend_cmds); 1377 1378 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1379 nstatus); 1380 1381 /* Acknowledge interrupt */ 1382 WRINDOOR(adapter, 0x2); 1383 1384 handled = 1; 1385 1386 while( RDINDOOR(adapter) & 0x02 ) 1387 cpu_relax(); 1388 1389 mega_cmd_done(adapter, completed, nstatus, status); 1390 1391 mega_rundoneq(adapter); 1392 1393 /* Loop through any pending requests */ 1394 if(atomic_read(&adapter->quiescent) == 0) { 1395 mega_runpendq(adapter); 1396 } 1397 1398 } while(1); 1399 1400 out_unlock: 1401 1402 spin_unlock_irqrestore(&adapter->lock, flags); 1403 1404 return IRQ_RETVAL(handled); 1405 } 1406 /** 1407 * mega_cmd_done() 1408 * @adapter: pointer to our soft state 1409 * @completed: array of ids of completed commands 1410 * @nstatus: number of completed commands 1411 * @status: status of the last command completed 1412 * 1413 * Complete the commands and call the scsi mid-layer callback hooks. 1414 */ 1415 static void 1416 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) 1417 { 1418 mega_ext_passthru *epthru = NULL; 1419 struct scatterlist *sgl; 1420 struct scsi_cmnd *cmd = NULL; 1421 mega_passthru *pthru = NULL; 1422 mbox_t *mbox = NULL; 1423 u8 c; 1424 scb_t *scb; 1425 int islogical; 1426 int cmdid; 1427 int i; 1428 1429 /* 1430 * for all the commands completed, call the mid-layer callback routine 1431 * and free the scb. 1432 */ 1433 for( i = 0; i < nstatus; i++ ) { 1434 1435 cmdid = completed[i]; 1436 1437 /* 1438 * Only free SCBs for the commands coming down from the 1439 * mid-layer, not for which were issued internally 1440 * 1441 * For internal command, restore the status returned by the 1442 * firmware so that user can interpret it. 1443 */ 1444 if (cmdid == CMDID_INT_CMDS) { 1445 scb = &adapter->int_scb; 1446 1447 list_del_init(&scb->list); 1448 scb->state = SCB_FREE; 1449 1450 adapter->int_status = status; 1451 complete(&adapter->int_waitq); 1452 } else { 1453 scb = &adapter->scb_list[cmdid]; 1454 1455 /* 1456 * Make sure f/w has completed a valid command 1457 */ 1458 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { 1459 dev_crit(&adapter->dev->dev, "invalid command " 1460 "Id %d, scb->state:%x, scsi cmd:%p\n", 1461 cmdid, scb->state, scb->cmd); 1462 1463 continue; 1464 } 1465 1466 /* 1467 * Was a abort issued for this command 1468 */ 1469 if( scb->state & SCB_ABORT ) { 1470 1471 dev_warn(&adapter->dev->dev, 1472 "aborted cmd [%x] complete\n", 1473 scb->idx); 1474 1475 scb->cmd->result = (DID_ABORT << 16); 1476 1477 list_add_tail(SCSI_LIST(scb->cmd), 1478 &adapter->completed_list); 1479 1480 mega_free_scb(adapter, scb); 1481 1482 continue; 1483 } 1484 1485 /* 1486 * Was a reset issued for this command 1487 */ 1488 if( scb->state & SCB_RESET ) { 1489 1490 dev_warn(&adapter->dev->dev, 1491 "reset cmd [%x] complete\n", 1492 scb->idx); 1493 1494 scb->cmd->result = (DID_RESET << 16); 1495 1496 list_add_tail(SCSI_LIST(scb->cmd), 1497 &adapter->completed_list); 1498 1499 mega_free_scb (adapter, scb); 1500 1501 continue; 1502 } 1503 1504 cmd = scb->cmd; 1505 pthru = scb->pthru; 1506 epthru = scb->epthru; 1507 mbox = (mbox_t *)scb->raw_mbox; 1508 1509 #if MEGA_HAVE_STATS 1510 { 1511 1512 int logdrv = mbox->m_out.logdrv; 1513 1514 islogical = adapter->logdrv_chan[cmd->channel]; 1515 /* 1516 * Maintain an error counter for the logical drive. 1517 * Some application like SNMP agent need such 1518 * statistics 1519 */ 1520 if( status && islogical && (cmd->cmnd[0] == READ_6 || 1521 cmd->cmnd[0] == READ_10 || 1522 cmd->cmnd[0] == READ_12)) { 1523 /* 1524 * Logical drive number increases by 0x80 when 1525 * a logical drive is deleted 1526 */ 1527 adapter->rd_errors[logdrv%0x80]++; 1528 } 1529 1530 if( status && islogical && (cmd->cmnd[0] == WRITE_6 || 1531 cmd->cmnd[0] == WRITE_10 || 1532 cmd->cmnd[0] == WRITE_12)) { 1533 /* 1534 * Logical drive number increases by 0x80 when 1535 * a logical drive is deleted 1536 */ 1537 adapter->wr_errors[logdrv%0x80]++; 1538 } 1539 1540 } 1541 #endif 1542 } 1543 1544 /* 1545 * Do not return the presence of hard disk on the channel so, 1546 * inquiry sent, and returned data==hard disk or removable 1547 * hard disk and not logical, request should return failure! - 1548 * PJ 1549 */ 1550 islogical = adapter->logdrv_chan[cmd->device->channel]; 1551 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1552 1553 sgl = scsi_sglist(cmd); 1554 if( sg_page(sgl) ) { 1555 c = *(unsigned char *) sg_virt(&sgl[0]); 1556 } else { 1557 dev_warn(&adapter->dev->dev, "invalid sg\n"); 1558 c = 0; 1559 } 1560 1561 if(IS_RAID_CH(adapter, cmd->device->channel) && 1562 ((c & 0x1F ) == TYPE_DISK)) { 1563 status = 0xF0; 1564 } 1565 } 1566 1567 /* clear result; otherwise, success returns corrupt value */ 1568 cmd->result = 0; 1569 1570 /* Convert MegaRAID status to Linux error code */ 1571 switch (status) { 1572 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ 1573 cmd->result |= (DID_OK << 16); 1574 break; 1575 1576 case 0x02: /* ERROR_ABORTED, i.e. 1577 SCSI_STATUS_CHECK_CONDITION */ 1578 1579 /* set sense_buffer and result fields */ 1580 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || 1581 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { 1582 1583 memcpy(cmd->sense_buffer, pthru->reqsensearea, 1584 14); 1585 1586 cmd->result = (DRIVER_SENSE << 24) | 1587 (DID_OK << 16) | 1588 (CHECK_CONDITION << 1); 1589 } 1590 else { 1591 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { 1592 1593 memcpy(cmd->sense_buffer, 1594 epthru->reqsensearea, 14); 1595 1596 cmd->result = (DRIVER_SENSE << 24) | 1597 (DID_OK << 16) | 1598 (CHECK_CONDITION << 1); 1599 } else { 1600 cmd->sense_buffer[0] = 0x70; 1601 cmd->sense_buffer[2] = ABORTED_COMMAND; 1602 cmd->result |= (CHECK_CONDITION << 1); 1603 } 1604 } 1605 break; 1606 1607 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. 1608 SCSI_STATUS_BUSY */ 1609 cmd->result |= (DID_BUS_BUSY << 16) | status; 1610 break; 1611 1612 default: 1613 #if MEGA_HAVE_CLUSTERING 1614 /* 1615 * If TEST_UNIT_READY fails, we know 1616 * MEGA_RESERVATION_STATUS failed 1617 */ 1618 if( cmd->cmnd[0] == TEST_UNIT_READY ) { 1619 cmd->result |= (DID_ERROR << 16) | 1620 (RESERVATION_CONFLICT << 1); 1621 } 1622 else 1623 /* 1624 * Error code returned is 1 if Reserve or Release 1625 * failed or the input parameter is invalid 1626 */ 1627 if( status == 1 && 1628 (cmd->cmnd[0] == RESERVE || 1629 cmd->cmnd[0] == RELEASE) ) { 1630 1631 cmd->result |= (DID_ERROR << 16) | 1632 (RESERVATION_CONFLICT << 1); 1633 } 1634 else 1635 #endif 1636 cmd->result |= (DID_BAD_TARGET << 16)|status; 1637 } 1638 1639 mega_free_scb(adapter, scb); 1640 1641 /* Add Scsi_Command to end of completed queue */ 1642 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); 1643 } 1644 } 1645 1646 1647 /* 1648 * mega_runpendq() 1649 * 1650 * Run through the list of completed requests and finish it 1651 */ 1652 static void 1653 mega_rundoneq (adapter_t *adapter) 1654 { 1655 struct scsi_cmnd *cmd; 1656 struct list_head *pos; 1657 1658 list_for_each(pos, &adapter->completed_list) { 1659 1660 struct scsi_pointer* spos = (struct scsi_pointer *)pos; 1661 1662 cmd = list_entry(spos, struct scsi_cmnd, SCp); 1663 cmd->scsi_done(cmd); 1664 } 1665 1666 INIT_LIST_HEAD(&adapter->completed_list); 1667 } 1668 1669 1670 /* 1671 * Free a SCB structure 1672 * Note: We assume the scsi commands associated with this scb is not free yet. 1673 */ 1674 static void 1675 mega_free_scb(adapter_t *adapter, scb_t *scb) 1676 { 1677 switch( scb->dma_type ) { 1678 1679 case MEGA_DMA_TYPE_NONE: 1680 break; 1681 1682 case MEGA_SGLIST: 1683 scsi_dma_unmap(scb->cmd); 1684 break; 1685 default: 1686 break; 1687 } 1688 1689 /* 1690 * Remove from the pending list 1691 */ 1692 list_del_init(&scb->list); 1693 1694 /* Link the scb back into free list */ 1695 scb->state = SCB_FREE; 1696 scb->cmd = NULL; 1697 1698 list_add(&scb->list, &adapter->free_list); 1699 } 1700 1701 1702 static int 1703 __mega_busywait_mbox (adapter_t *adapter) 1704 { 1705 volatile mbox_t *mbox = adapter->mbox; 1706 long counter; 1707 1708 for (counter = 0; counter < 10000; counter++) { 1709 if (!mbox->m_in.busy) 1710 return 0; 1711 udelay(100); 1712 cond_resched(); 1713 } 1714 return -1; /* give up after 1 second */ 1715 } 1716 1717 /* 1718 * Copies data to SGLIST 1719 * Note: For 64 bit cards, we need a minimum of one SG element for read/write 1720 */ 1721 static int 1722 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) 1723 { 1724 struct scatterlist *sg; 1725 struct scsi_cmnd *cmd; 1726 int sgcnt; 1727 int idx; 1728 1729 cmd = scb->cmd; 1730 1731 /* 1732 * Copy Scatter-Gather list info into controller structure. 1733 * 1734 * The number of sg elements returned must not exceed our limit 1735 */ 1736 sgcnt = scsi_dma_map(cmd); 1737 1738 scb->dma_type = MEGA_SGLIST; 1739 1740 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); 1741 1742 *len = 0; 1743 1744 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { 1745 sg = scsi_sglist(cmd); 1746 scb->dma_h_bulkdata = sg_dma_address(sg); 1747 *buf = (u32)scb->dma_h_bulkdata; 1748 *len = sg_dma_len(sg); 1749 return 0; 1750 } 1751 1752 scsi_for_each_sg(cmd, sg, sgcnt, idx) { 1753 if (adapter->has_64bit_addr) { 1754 scb->sgl64[idx].address = sg_dma_address(sg); 1755 *len += scb->sgl64[idx].length = sg_dma_len(sg); 1756 } else { 1757 scb->sgl[idx].address = sg_dma_address(sg); 1758 *len += scb->sgl[idx].length = sg_dma_len(sg); 1759 } 1760 } 1761 1762 /* Reset pointer and length fields */ 1763 *buf = scb->sgl_dma_addr; 1764 1765 /* Return count of SG requests */ 1766 return sgcnt; 1767 } 1768 1769 1770 /* 1771 * mega_8_to_40ld() 1772 * 1773 * takes all info in AdapterInquiry structure and puts it into ProductInfo and 1774 * Enquiry3 structures for later use 1775 */ 1776 static void 1777 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, 1778 mega_product_info *product_info) 1779 { 1780 int i; 1781 1782 product_info->max_commands = inquiry->adapter_info.max_commands; 1783 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; 1784 product_info->nchannels = inquiry->adapter_info.nchannels; 1785 1786 for (i = 0; i < 4; i++) { 1787 product_info->fw_version[i] = 1788 inquiry->adapter_info.fw_version[i]; 1789 1790 product_info->bios_version[i] = 1791 inquiry->adapter_info.bios_version[i]; 1792 } 1793 enquiry3->cache_flush_interval = 1794 inquiry->adapter_info.cache_flush_interval; 1795 1796 product_info->dram_size = inquiry->adapter_info.dram_size; 1797 1798 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; 1799 1800 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { 1801 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; 1802 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; 1803 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; 1804 } 1805 1806 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) 1807 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; 1808 } 1809 1810 static inline void 1811 mega_free_sgl(adapter_t *adapter) 1812 { 1813 scb_t *scb; 1814 int i; 1815 1816 for(i = 0; i < adapter->max_cmds; i++) { 1817 1818 scb = &adapter->scb_list[i]; 1819 1820 if( scb->sgl64 ) { 1821 dma_free_coherent(&adapter->dev->dev, 1822 sizeof(mega_sgl64) * adapter->sglen, 1823 scb->sgl64, scb->sgl_dma_addr); 1824 1825 scb->sgl64 = NULL; 1826 } 1827 1828 if( scb->pthru ) { 1829 dma_free_coherent(&adapter->dev->dev, 1830 sizeof(mega_passthru), scb->pthru, 1831 scb->pthru_dma_addr); 1832 1833 scb->pthru = NULL; 1834 } 1835 1836 if( scb->epthru ) { 1837 dma_free_coherent(&adapter->dev->dev, 1838 sizeof(mega_ext_passthru), 1839 scb->epthru, scb->epthru_dma_addr); 1840 1841 scb->epthru = NULL; 1842 } 1843 1844 } 1845 } 1846 1847 1848 /* 1849 * Get information about the card/driver 1850 */ 1851 const char * 1852 megaraid_info(struct Scsi_Host *host) 1853 { 1854 static char buffer[512]; 1855 adapter_t *adapter; 1856 1857 adapter = (adapter_t *)host->hostdata; 1858 1859 sprintf (buffer, 1860 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", 1861 adapter->fw_version, adapter->product_info.max_commands, 1862 adapter->host->max_id, adapter->host->max_channel, 1863 (u32)adapter->host->max_lun); 1864 return buffer; 1865 } 1866 1867 /* 1868 * Abort a previous SCSI request. Only commands on the pending list can be 1869 * aborted. All the commands issued to the F/W must complete. 1870 */ 1871 static int 1872 megaraid_abort(struct scsi_cmnd *cmd) 1873 { 1874 adapter_t *adapter; 1875 int rval; 1876 1877 adapter = (adapter_t *)cmd->device->host->hostdata; 1878 1879 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); 1880 1881 /* 1882 * This is required here to complete any completed requests 1883 * to be communicated over to the mid layer. 1884 */ 1885 mega_rundoneq(adapter); 1886 1887 return rval; 1888 } 1889 1890 1891 static int 1892 megaraid_reset(struct scsi_cmnd *cmd) 1893 { 1894 adapter_t *adapter; 1895 megacmd_t mc; 1896 int rval; 1897 1898 adapter = (adapter_t *)cmd->device->host->hostdata; 1899 1900 #if MEGA_HAVE_CLUSTERING 1901 mc.cmd = MEGA_CLUSTER_CMD; 1902 mc.opcode = MEGA_RESET_RESERVATIONS; 1903 1904 if( mega_internal_command(adapter, &mc, NULL) != 0 ) { 1905 dev_warn(&adapter->dev->dev, "reservation reset failed\n"); 1906 } 1907 else { 1908 dev_info(&adapter->dev->dev, "reservation reset\n"); 1909 } 1910 #endif 1911 1912 spin_lock_irq(&adapter->lock); 1913 1914 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); 1915 1916 /* 1917 * This is required here to complete any completed requests 1918 * to be communicated over to the mid layer. 1919 */ 1920 mega_rundoneq(adapter); 1921 spin_unlock_irq(&adapter->lock); 1922 1923 return rval; 1924 } 1925 1926 /** 1927 * megaraid_abort_and_reset() 1928 * @adapter: megaraid soft state 1929 * @cmd: scsi command to be aborted or reset 1930 * @aor: abort or reset flag 1931 * 1932 * Try to locate the scsi command in the pending queue. If found and is not 1933 * issued to the controller, abort/reset it. Otherwise return failure 1934 */ 1935 static int 1936 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) 1937 { 1938 struct list_head *pos, *next; 1939 scb_t *scb; 1940 1941 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n", 1942 (aor == SCB_ABORT)? "ABORTING":"RESET", 1943 cmd->cmnd[0], cmd->device->channel, 1944 cmd->device->id, (u32)cmd->device->lun); 1945 1946 if(list_empty(&adapter->pending_list)) 1947 return FAILED; 1948 1949 list_for_each_safe(pos, next, &adapter->pending_list) { 1950 1951 scb = list_entry(pos, scb_t, list); 1952 1953 if (scb->cmd == cmd) { /* Found command */ 1954 1955 scb->state |= aor; 1956 1957 /* 1958 * Check if this command has firmware ownership. If 1959 * yes, we cannot reset this command. Whenever f/w 1960 * completes this command, we will return appropriate 1961 * status from ISR. 1962 */ 1963 if( scb->state & SCB_ISSUED ) { 1964 1965 dev_warn(&adapter->dev->dev, 1966 "%s[%x], fw owner\n", 1967 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1968 scb->idx); 1969 1970 return FAILED; 1971 } 1972 else { 1973 1974 /* 1975 * Not yet issued! Remove from the pending 1976 * list 1977 */ 1978 dev_warn(&adapter->dev->dev, 1979 "%s-[%x], driver owner\n", 1980 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1981 scb->idx); 1982 1983 mega_free_scb(adapter, scb); 1984 1985 if( aor == SCB_ABORT ) { 1986 cmd->result = (DID_ABORT << 16); 1987 } 1988 else { 1989 cmd->result = (DID_RESET << 16); 1990 } 1991 1992 list_add_tail(SCSI_LIST(cmd), 1993 &adapter->completed_list); 1994 1995 return SUCCESS; 1996 } 1997 } 1998 } 1999 2000 return FAILED; 2001 } 2002 2003 static inline int 2004 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) 2005 { 2006 *pdev = pci_alloc_dev(NULL); 2007 2008 if( *pdev == NULL ) return -1; 2009 2010 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); 2011 2012 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) { 2013 kfree(*pdev); 2014 return -1; 2015 } 2016 2017 return 0; 2018 } 2019 2020 static inline void 2021 free_local_pdev(struct pci_dev *pdev) 2022 { 2023 kfree(pdev); 2024 } 2025 2026 /** 2027 * mega_allocate_inquiry() 2028 * @dma_handle: handle returned for dma address 2029 * @pdev: handle to pci device 2030 * 2031 * allocates memory for inquiry structure 2032 */ 2033 static inline void * 2034 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) 2035 { 2036 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3), 2037 dma_handle, GFP_KERNEL); 2038 } 2039 2040 2041 static inline void 2042 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) 2043 { 2044 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry, 2045 dma_handle); 2046 } 2047 2048 2049 #ifdef CONFIG_PROC_FS 2050 /* Following code handles /proc fs */ 2051 2052 /** 2053 * proc_show_config() 2054 * @m: Synthetic file construction data 2055 * @v: File iterator 2056 * 2057 * Display configuration information about the controller. 2058 */ 2059 static int 2060 proc_show_config(struct seq_file *m, void *v) 2061 { 2062 2063 adapter_t *adapter = m->private; 2064 2065 seq_puts(m, MEGARAID_VERSION); 2066 if(adapter->product_info.product_name[0]) 2067 seq_printf(m, "%s\n", adapter->product_info.product_name); 2068 2069 seq_puts(m, "Controller Type: "); 2070 2071 if( adapter->flag & BOARD_MEMMAP ) 2072 seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); 2073 else 2074 seq_puts(m, "418/428/434\n"); 2075 2076 if(adapter->flag & BOARD_40LD) 2077 seq_puts(m, "Controller Supports 40 Logical Drives\n"); 2078 2079 if(adapter->flag & BOARD_64BIT) 2080 seq_puts(m, "Controller capable of 64-bit memory addressing\n"); 2081 if( adapter->has_64bit_addr ) 2082 seq_puts(m, "Controller using 64-bit memory addressing\n"); 2083 else 2084 seq_puts(m, "Controller is not using 64-bit memory addressing\n"); 2085 2086 seq_printf(m, "Base = %08lx, Irq = %d, ", 2087 adapter->base, adapter->host->irq); 2088 2089 seq_printf(m, "Logical Drives = %d, Channels = %d\n", 2090 adapter->numldrv, adapter->product_info.nchannels); 2091 2092 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", 2093 adapter->fw_version, adapter->bios_version, 2094 adapter->product_info.dram_size); 2095 2096 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", 2097 adapter->product_info.max_commands, adapter->max_cmds); 2098 2099 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); 2100 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); 2101 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); 2102 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); 2103 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); 2104 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); 2105 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); 2106 seq_printf(m, "quiescent = %d\n", 2107 atomic_read(&adapter->quiescent)); 2108 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); 2109 2110 seq_puts(m, "\nModule Parameters:\n"); 2111 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); 2112 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); 2113 return 0; 2114 } 2115 2116 /** 2117 * proc_show_stat() 2118 * @m: Synthetic file construction data 2119 * @v: File iterator 2120 * 2121 * Display statistical information about the I/O activity. 2122 */ 2123 static int 2124 proc_show_stat(struct seq_file *m, void *v) 2125 { 2126 adapter_t *adapter = m->private; 2127 #if MEGA_HAVE_STATS 2128 int i; 2129 #endif 2130 2131 seq_puts(m, "Statistical Information for this controller\n"); 2132 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); 2133 #if MEGA_HAVE_STATS 2134 for(i = 0; i < adapter->numldrv; i++) { 2135 seq_printf(m, "Logical Drive %d:\n", i); 2136 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", 2137 adapter->nreads[i], adapter->nwrites[i]); 2138 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", 2139 adapter->nreadblocks[i], adapter->nwriteblocks[i]); 2140 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", 2141 adapter->rd_errors[i], adapter->wr_errors[i]); 2142 } 2143 #else 2144 seq_puts(m, "IO and error counters not compiled in driver.\n"); 2145 #endif 2146 return 0; 2147 } 2148 2149 2150 /** 2151 * proc_show_mbox() 2152 * @m: Synthetic file construction data 2153 * @v: File iterator 2154 * 2155 * Display mailbox information for the last command issued. This information 2156 * is good for debugging. 2157 */ 2158 static int 2159 proc_show_mbox(struct seq_file *m, void *v) 2160 { 2161 adapter_t *adapter = m->private; 2162 volatile mbox_t *mbox = adapter->mbox; 2163 2164 seq_puts(m, "Contents of Mail Box Structure\n"); 2165 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); 2166 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); 2167 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); 2168 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); 2169 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); 2170 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); 2171 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); 2172 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); 2173 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); 2174 return 0; 2175 } 2176 2177 2178 /** 2179 * proc_show_rebuild_rate() 2180 * @m: Synthetic file construction data 2181 * @v: File iterator 2182 * 2183 * Display current rebuild rate 2184 */ 2185 static int 2186 proc_show_rebuild_rate(struct seq_file *m, void *v) 2187 { 2188 adapter_t *adapter = m->private; 2189 dma_addr_t dma_handle; 2190 caddr_t inquiry; 2191 struct pci_dev *pdev; 2192 2193 if( make_local_pdev(adapter, &pdev) != 0 ) 2194 return 0; 2195 2196 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2197 goto free_pdev; 2198 2199 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2200 seq_puts(m, "Adapter inquiry failed.\n"); 2201 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2202 goto free_inquiry; 2203 } 2204 2205 if( adapter->flag & BOARD_40LD ) 2206 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2207 ((mega_inquiry3 *)inquiry)->rebuild_rate); 2208 else 2209 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2210 ((mraid_ext_inquiry *) 2211 inquiry)->raid_inq.adapter_info.rebuild_rate); 2212 2213 free_inquiry: 2214 mega_free_inquiry(inquiry, dma_handle, pdev); 2215 free_pdev: 2216 free_local_pdev(pdev); 2217 return 0; 2218 } 2219 2220 2221 /** 2222 * proc_show_battery() 2223 * @m: Synthetic file construction data 2224 * @v: File iterator 2225 * 2226 * Display information about the battery module on the controller. 2227 */ 2228 static int 2229 proc_show_battery(struct seq_file *m, void *v) 2230 { 2231 adapter_t *adapter = m->private; 2232 dma_addr_t dma_handle; 2233 caddr_t inquiry; 2234 struct pci_dev *pdev; 2235 u8 battery_status; 2236 2237 if( make_local_pdev(adapter, &pdev) != 0 ) 2238 return 0; 2239 2240 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2241 goto free_pdev; 2242 2243 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2244 seq_puts(m, "Adapter inquiry failed.\n"); 2245 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2246 goto free_inquiry; 2247 } 2248 2249 if( adapter->flag & BOARD_40LD ) { 2250 battery_status = ((mega_inquiry3 *)inquiry)->battery_status; 2251 } 2252 else { 2253 battery_status = ((mraid_ext_inquiry *)inquiry)-> 2254 raid_inq.adapter_info.battery_status; 2255 } 2256 2257 /* 2258 * Decode the battery status 2259 */ 2260 seq_printf(m, "Battery Status:[%d]", battery_status); 2261 2262 if(battery_status == MEGA_BATT_CHARGE_DONE) 2263 seq_puts(m, " Charge Done"); 2264 2265 if(battery_status & MEGA_BATT_MODULE_MISSING) 2266 seq_puts(m, " Module Missing"); 2267 2268 if(battery_status & MEGA_BATT_LOW_VOLTAGE) 2269 seq_puts(m, " Low Voltage"); 2270 2271 if(battery_status & MEGA_BATT_TEMP_HIGH) 2272 seq_puts(m, " Temperature High"); 2273 2274 if(battery_status & MEGA_BATT_PACK_MISSING) 2275 seq_puts(m, " Pack Missing"); 2276 2277 if(battery_status & MEGA_BATT_CHARGE_INPROG) 2278 seq_puts(m, " Charge In-progress"); 2279 2280 if(battery_status & MEGA_BATT_CHARGE_FAIL) 2281 seq_puts(m, " Charge Fail"); 2282 2283 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) 2284 seq_puts(m, " Cycles Exceeded"); 2285 2286 seq_putc(m, '\n'); 2287 2288 free_inquiry: 2289 mega_free_inquiry(inquiry, dma_handle, pdev); 2290 free_pdev: 2291 free_local_pdev(pdev); 2292 return 0; 2293 } 2294 2295 2296 /* 2297 * Display scsi inquiry 2298 */ 2299 static void 2300 mega_print_inquiry(struct seq_file *m, char *scsi_inq) 2301 { 2302 int i; 2303 2304 seq_puts(m, " Vendor: "); 2305 seq_write(m, scsi_inq + 8, 8); 2306 seq_puts(m, " Model: "); 2307 seq_write(m, scsi_inq + 16, 16); 2308 seq_puts(m, " Rev: "); 2309 seq_write(m, scsi_inq + 32, 4); 2310 seq_putc(m, '\n'); 2311 2312 i = scsi_inq[0] & 0x1f; 2313 seq_printf(m, " Type: %s ", scsi_device_type(i)); 2314 2315 seq_printf(m, " ANSI SCSI revision: %02x", 2316 scsi_inq[2] & 0x07); 2317 2318 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) 2319 seq_puts(m, " CCS\n"); 2320 else 2321 seq_putc(m, '\n'); 2322 } 2323 2324 /** 2325 * proc_show_pdrv() 2326 * @m: Synthetic file construction data 2327 * @adapter: pointer to our soft state 2328 * @channel: channel 2329 * 2330 * Display information about the physical drives. 2331 */ 2332 static int 2333 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) 2334 { 2335 dma_addr_t dma_handle; 2336 char *scsi_inq; 2337 dma_addr_t scsi_inq_dma_handle; 2338 caddr_t inquiry; 2339 struct pci_dev *pdev; 2340 u8 *pdrv_state; 2341 u8 state; 2342 int tgt; 2343 int max_channels; 2344 int i; 2345 2346 if( make_local_pdev(adapter, &pdev) != 0 ) 2347 return 0; 2348 2349 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2350 goto free_pdev; 2351 2352 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2353 seq_puts(m, "Adapter inquiry failed.\n"); 2354 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2355 goto free_inquiry; 2356 } 2357 2358 2359 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle, 2360 GFP_KERNEL); 2361 if( scsi_inq == NULL ) { 2362 seq_puts(m, "memory not available for scsi inq.\n"); 2363 goto free_inquiry; 2364 } 2365 2366 if( adapter->flag & BOARD_40LD ) { 2367 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; 2368 } 2369 else { 2370 pdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2371 raid_inq.pdrv_info.pdrv_state; 2372 } 2373 2374 max_channels = adapter->product_info.nchannels; 2375 2376 if( channel >= max_channels ) { 2377 goto free_pci; 2378 } 2379 2380 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { 2381 2382 i = channel*16 + tgt; 2383 2384 state = *(pdrv_state + i); 2385 switch( state & 0x0F ) { 2386 case PDRV_ONLINE: 2387 seq_printf(m, "Channel:%2d Id:%2d State: Online", 2388 channel, tgt); 2389 break; 2390 2391 case PDRV_FAILED: 2392 seq_printf(m, "Channel:%2d Id:%2d State: Failed", 2393 channel, tgt); 2394 break; 2395 2396 case PDRV_RBLD: 2397 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", 2398 channel, tgt); 2399 break; 2400 2401 case PDRV_HOTSPARE: 2402 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", 2403 channel, tgt); 2404 break; 2405 2406 default: 2407 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", 2408 channel, tgt); 2409 break; 2410 } 2411 2412 /* 2413 * This interface displays inquiries for disk drives 2414 * only. Inquries for logical drives and non-disk 2415 * devices are available through /proc/scsi/scsi 2416 */ 2417 memset(scsi_inq, 0, 256); 2418 if( mega_internal_dev_inquiry(adapter, channel, tgt, 2419 scsi_inq_dma_handle) || 2420 (scsi_inq[0] & 0x1F) != TYPE_DISK ) { 2421 continue; 2422 } 2423 2424 /* 2425 * Check for overflow. We print less than 240 2426 * characters for inquiry 2427 */ 2428 seq_puts(m, ".\n"); 2429 mega_print_inquiry(m, scsi_inq); 2430 } 2431 2432 free_pci: 2433 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle); 2434 free_inquiry: 2435 mega_free_inquiry(inquiry, dma_handle, pdev); 2436 free_pdev: 2437 free_local_pdev(pdev); 2438 return 0; 2439 } 2440 2441 /** 2442 * proc_show_pdrv_ch0() 2443 * @m: Synthetic file construction data 2444 * @v: File iterator 2445 * 2446 * Display information about the physical drives on physical channel 0. 2447 */ 2448 static int 2449 proc_show_pdrv_ch0(struct seq_file *m, void *v) 2450 { 2451 return proc_show_pdrv(m, m->private, 0); 2452 } 2453 2454 2455 /** 2456 * proc_show_pdrv_ch1() 2457 * @m: Synthetic file construction data 2458 * @v: File iterator 2459 * 2460 * Display information about the physical drives on physical channel 1. 2461 */ 2462 static int 2463 proc_show_pdrv_ch1(struct seq_file *m, void *v) 2464 { 2465 return proc_show_pdrv(m, m->private, 1); 2466 } 2467 2468 2469 /** 2470 * proc_show_pdrv_ch2() 2471 * @m: Synthetic file construction data 2472 * @v: File iterator 2473 * 2474 * Display information about the physical drives on physical channel 2. 2475 */ 2476 static int 2477 proc_show_pdrv_ch2(struct seq_file *m, void *v) 2478 { 2479 return proc_show_pdrv(m, m->private, 2); 2480 } 2481 2482 2483 /** 2484 * proc_show_pdrv_ch3() 2485 * @m: Synthetic file construction data 2486 * @v: File iterator 2487 * 2488 * Display information about the physical drives on physical channel 3. 2489 */ 2490 static int 2491 proc_show_pdrv_ch3(struct seq_file *m, void *v) 2492 { 2493 return proc_show_pdrv(m, m->private, 3); 2494 } 2495 2496 2497 /** 2498 * proc_show_rdrv() 2499 * @m: Synthetic file construction data 2500 * @adapter: pointer to our soft state 2501 * @start: starting logical drive to display 2502 * @end: ending logical drive to display 2503 * 2504 * We do not print the inquiry information since its already available through 2505 * /proc/scsi/scsi interface 2506 */ 2507 static int 2508 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) 2509 { 2510 dma_addr_t dma_handle; 2511 logdrv_param *lparam; 2512 megacmd_t mc; 2513 char *disk_array; 2514 dma_addr_t disk_array_dma_handle; 2515 caddr_t inquiry; 2516 struct pci_dev *pdev; 2517 u8 *rdrv_state; 2518 int num_ldrv; 2519 u32 array_sz; 2520 int i; 2521 2522 if( make_local_pdev(adapter, &pdev) != 0 ) 2523 return 0; 2524 2525 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2526 goto free_pdev; 2527 2528 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2529 seq_puts(m, "Adapter inquiry failed.\n"); 2530 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2531 goto free_inquiry; 2532 } 2533 2534 memset(&mc, 0, sizeof(megacmd_t)); 2535 2536 if( adapter->flag & BOARD_40LD ) { 2537 array_sz = sizeof(disk_array_40ld); 2538 2539 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; 2540 2541 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; 2542 } 2543 else { 2544 array_sz = sizeof(disk_array_8ld); 2545 2546 rdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2547 raid_inq.logdrv_info.ldrv_state; 2548 2549 num_ldrv = ((mraid_ext_inquiry *)inquiry)-> 2550 raid_inq.logdrv_info.num_ldrv; 2551 } 2552 2553 disk_array = dma_alloc_coherent(&pdev->dev, array_sz, 2554 &disk_array_dma_handle, GFP_KERNEL); 2555 2556 if( disk_array == NULL ) { 2557 seq_puts(m, "memory not available.\n"); 2558 goto free_inquiry; 2559 } 2560 2561 mc.xferaddr = (u32)disk_array_dma_handle; 2562 2563 if( adapter->flag & BOARD_40LD ) { 2564 mc.cmd = FC_NEW_CONFIG; 2565 mc.opcode = OP_DCMD_READ_CONFIG; 2566 2567 if( mega_internal_command(adapter, &mc, NULL) ) { 2568 seq_puts(m, "40LD read config failed.\n"); 2569 goto free_pci; 2570 } 2571 2572 } 2573 else { 2574 mc.cmd = NEW_READ_CONFIG_8LD; 2575 2576 if( mega_internal_command(adapter, &mc, NULL) ) { 2577 mc.cmd = READ_CONFIG_8LD; 2578 if( mega_internal_command(adapter, &mc, NULL) ) { 2579 seq_puts(m, "8LD read config failed.\n"); 2580 goto free_pci; 2581 } 2582 } 2583 } 2584 2585 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { 2586 2587 if( adapter->flag & BOARD_40LD ) { 2588 lparam = 2589 &((disk_array_40ld *)disk_array)->ldrv[i].lparam; 2590 } 2591 else { 2592 lparam = 2593 &((disk_array_8ld *)disk_array)->ldrv[i].lparam; 2594 } 2595 2596 /* 2597 * Check for overflow. We print less than 240 characters for 2598 * information about each logical drive. 2599 */ 2600 seq_printf(m, "Logical drive:%2d:, ", i); 2601 2602 switch( rdrv_state[i] & 0x0F ) { 2603 case RDRV_OFFLINE: 2604 seq_puts(m, "state: offline"); 2605 break; 2606 case RDRV_DEGRADED: 2607 seq_puts(m, "state: degraded"); 2608 break; 2609 case RDRV_OPTIMAL: 2610 seq_puts(m, "state: optimal"); 2611 break; 2612 case RDRV_DELETED: 2613 seq_puts(m, "state: deleted"); 2614 break; 2615 default: 2616 seq_puts(m, "state: unknown"); 2617 break; 2618 } 2619 2620 /* 2621 * Check if check consistency or initialization is going on 2622 * for this logical drive. 2623 */ 2624 if( (rdrv_state[i] & 0xF0) == 0x20 ) 2625 seq_puts(m, ", check-consistency in progress"); 2626 else if( (rdrv_state[i] & 0xF0) == 0x10 ) 2627 seq_puts(m, ", initialization in progress"); 2628 2629 seq_putc(m, '\n'); 2630 2631 seq_printf(m, "Span depth:%3d, ", lparam->span_depth); 2632 seq_printf(m, "RAID level:%3d, ", lparam->level); 2633 seq_printf(m, "Stripe size:%3d, ", 2634 lparam->stripe_sz ? lparam->stripe_sz/2: 128); 2635 seq_printf(m, "Row size:%3d\n", lparam->row_size); 2636 2637 seq_puts(m, "Read Policy: "); 2638 switch(lparam->read_ahead) { 2639 case NO_READ_AHEAD: 2640 seq_puts(m, "No read ahead, "); 2641 break; 2642 case READ_AHEAD: 2643 seq_puts(m, "Read ahead, "); 2644 break; 2645 case ADAP_READ_AHEAD: 2646 seq_puts(m, "Adaptive, "); 2647 break; 2648 2649 } 2650 2651 seq_puts(m, "Write Policy: "); 2652 switch(lparam->write_mode) { 2653 case WRMODE_WRITE_THRU: 2654 seq_puts(m, "Write thru, "); 2655 break; 2656 case WRMODE_WRITE_BACK: 2657 seq_puts(m, "Write back, "); 2658 break; 2659 } 2660 2661 seq_puts(m, "Cache Policy: "); 2662 switch(lparam->direct_io) { 2663 case CACHED_IO: 2664 seq_puts(m, "Cached IO\n\n"); 2665 break; 2666 case DIRECT_IO: 2667 seq_puts(m, "Direct IO\n\n"); 2668 break; 2669 } 2670 } 2671 2672 free_pci: 2673 dma_free_coherent(&pdev->dev, array_sz, disk_array, 2674 disk_array_dma_handle); 2675 free_inquiry: 2676 mega_free_inquiry(inquiry, dma_handle, pdev); 2677 free_pdev: 2678 free_local_pdev(pdev); 2679 return 0; 2680 } 2681 2682 /** 2683 * proc_show_rdrv_10() 2684 * @m: Synthetic file construction data 2685 * @v: File iterator 2686 * 2687 * Display real time information about the logical drives 0 through 9. 2688 */ 2689 static int 2690 proc_show_rdrv_10(struct seq_file *m, void *v) 2691 { 2692 return proc_show_rdrv(m, m->private, 0, 9); 2693 } 2694 2695 2696 /** 2697 * proc_show_rdrv_20() 2698 * @m: Synthetic file construction data 2699 * @v: File iterator 2700 * 2701 * Display real time information about the logical drives 0 through 9. 2702 */ 2703 static int 2704 proc_show_rdrv_20(struct seq_file *m, void *v) 2705 { 2706 return proc_show_rdrv(m, m->private, 10, 19); 2707 } 2708 2709 2710 /** 2711 * proc_show_rdrv_30() 2712 * @m: Synthetic file construction data 2713 * @v: File iterator 2714 * 2715 * Display real time information about the logical drives 0 through 9. 2716 */ 2717 static int 2718 proc_show_rdrv_30(struct seq_file *m, void *v) 2719 { 2720 return proc_show_rdrv(m, m->private, 20, 29); 2721 } 2722 2723 2724 /** 2725 * proc_show_rdrv_40() 2726 * @m: Synthetic file construction data 2727 * @v: File iterator 2728 * 2729 * Display real time information about the logical drives 0 through 9. 2730 */ 2731 static int 2732 proc_show_rdrv_40(struct seq_file *m, void *v) 2733 { 2734 return proc_show_rdrv(m, m->private, 30, 39); 2735 } 2736 2737 /** 2738 * mega_create_proc_entry() 2739 * @index: index in soft state array 2740 * @parent: parent node for this /proc entry 2741 * 2742 * Creates /proc entries for our controllers. 2743 */ 2744 static void 2745 mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2746 { 2747 adapter_t *adapter = hba_soft_state[index]; 2748 struct proc_dir_entry *dir; 2749 u8 string[16]; 2750 2751 sprintf(string, "hba%d", adapter->host->host_no); 2752 dir = proc_mkdir_data(string, 0, parent, adapter); 2753 if (!dir) { 2754 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); 2755 return; 2756 } 2757 2758 proc_create_single_data("config", S_IRUSR, dir, 2759 proc_show_config, adapter); 2760 proc_create_single_data("stat", S_IRUSR, dir, 2761 proc_show_stat, adapter); 2762 proc_create_single_data("mailbox", S_IRUSR, dir, 2763 proc_show_mbox, adapter); 2764 #if MEGA_HAVE_ENH_PROC 2765 proc_create_single_data("rebuild-rate", S_IRUSR, dir, 2766 proc_show_rebuild_rate, adapter); 2767 proc_create_single_data("battery-status", S_IRUSR, dir, 2768 proc_show_battery, adapter); 2769 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, 2770 proc_show_pdrv_ch0, adapter); 2771 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, 2772 proc_show_pdrv_ch1, adapter); 2773 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, 2774 proc_show_pdrv_ch2, adapter); 2775 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, 2776 proc_show_pdrv_ch3, adapter); 2777 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, 2778 proc_show_rdrv_10, adapter); 2779 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, 2780 proc_show_rdrv_20, adapter); 2781 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, 2782 proc_show_rdrv_30, adapter); 2783 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, 2784 proc_show_rdrv_40, adapter); 2785 #endif 2786 } 2787 2788 #else 2789 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2790 { 2791 } 2792 #endif 2793 2794 2795 /* 2796 * megaraid_biosparam() 2797 * 2798 * Return the disk geometry for a particular disk 2799 */ 2800 static int 2801 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, 2802 sector_t capacity, int geom[]) 2803 { 2804 adapter_t *adapter; 2805 int heads; 2806 int sectors; 2807 int cylinders; 2808 2809 /* Get pointer to host config structure */ 2810 adapter = (adapter_t *)sdev->host->hostdata; 2811 2812 if (IS_RAID_CH(adapter, sdev->channel)) { 2813 /* Default heads (64) & sectors (32) */ 2814 heads = 64; 2815 sectors = 32; 2816 cylinders = (ulong)capacity / (heads * sectors); 2817 2818 /* 2819 * Handle extended translation size for logical drives 2820 * > 1Gb 2821 */ 2822 if ((ulong)capacity >= 0x200000) { 2823 heads = 255; 2824 sectors = 63; 2825 cylinders = (ulong)capacity / (heads * sectors); 2826 } 2827 2828 /* return result */ 2829 geom[0] = heads; 2830 geom[1] = sectors; 2831 geom[2] = cylinders; 2832 } 2833 else { 2834 if (scsi_partsize(bdev, capacity, geom)) 2835 return 0; 2836 2837 dev_info(&adapter->dev->dev, 2838 "invalid partition on this disk on channel %d\n", 2839 sdev->channel); 2840 2841 /* Default heads (64) & sectors (32) */ 2842 heads = 64; 2843 sectors = 32; 2844 cylinders = (ulong)capacity / (heads * sectors); 2845 2846 /* Handle extended translation size for logical drives > 1Gb */ 2847 if ((ulong)capacity >= 0x200000) { 2848 heads = 255; 2849 sectors = 63; 2850 cylinders = (ulong)capacity / (heads * sectors); 2851 } 2852 2853 /* return result */ 2854 geom[0] = heads; 2855 geom[1] = sectors; 2856 geom[2] = cylinders; 2857 } 2858 2859 return 0; 2860 } 2861 2862 /** 2863 * mega_init_scb() 2864 * @adapter: pointer to our soft state 2865 * 2866 * Allocate memory for the various pointers in the scb structures: 2867 * scatter-gather list pointer, passthru and extended passthru structure 2868 * pointers. 2869 */ 2870 static int 2871 mega_init_scb(adapter_t *adapter) 2872 { 2873 scb_t *scb; 2874 int i; 2875 2876 for( i = 0; i < adapter->max_cmds; i++ ) { 2877 2878 scb = &adapter->scb_list[i]; 2879 2880 scb->sgl64 = NULL; 2881 scb->sgl = NULL; 2882 scb->pthru = NULL; 2883 scb->epthru = NULL; 2884 } 2885 2886 for( i = 0; i < adapter->max_cmds; i++ ) { 2887 2888 scb = &adapter->scb_list[i]; 2889 2890 scb->idx = i; 2891 2892 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev, 2893 sizeof(mega_sgl64) * adapter->sglen, 2894 &scb->sgl_dma_addr, GFP_KERNEL); 2895 2896 scb->sgl = (mega_sglist *)scb->sgl64; 2897 2898 if( !scb->sgl ) { 2899 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); 2900 mega_free_sgl(adapter); 2901 return -1; 2902 } 2903 2904 scb->pthru = dma_alloc_coherent(&adapter->dev->dev, 2905 sizeof(mega_passthru), 2906 &scb->pthru_dma_addr, GFP_KERNEL); 2907 2908 if( !scb->pthru ) { 2909 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); 2910 mega_free_sgl(adapter); 2911 return -1; 2912 } 2913 2914 scb->epthru = dma_alloc_coherent(&adapter->dev->dev, 2915 sizeof(mega_ext_passthru), 2916 &scb->epthru_dma_addr, GFP_KERNEL); 2917 2918 if( !scb->epthru ) { 2919 dev_warn(&adapter->dev->dev, 2920 "Can't allocate extended passthru\n"); 2921 mega_free_sgl(adapter); 2922 return -1; 2923 } 2924 2925 2926 scb->dma_type = MEGA_DMA_TYPE_NONE; 2927 2928 /* 2929 * Link to free list 2930 * lock not required since we are loading the driver, so no 2931 * commands possible right now. 2932 */ 2933 scb->state = SCB_FREE; 2934 scb->cmd = NULL; 2935 list_add(&scb->list, &adapter->free_list); 2936 } 2937 2938 return 0; 2939 } 2940 2941 2942 /** 2943 * megadev_open() 2944 * @inode: unused 2945 * @filep: unused 2946 * 2947 * Routines for the character/ioctl interface to the driver. Find out if this 2948 * is a valid open. 2949 */ 2950 static int 2951 megadev_open (struct inode *inode, struct file *filep) 2952 { 2953 /* 2954 * Only allow superuser to access private ioctl interface 2955 */ 2956 if( !capable(CAP_SYS_ADMIN) ) return -EACCES; 2957 2958 return 0; 2959 } 2960 2961 2962 /** 2963 * megadev_ioctl() 2964 * @filep: Our device file 2965 * @cmd: ioctl command 2966 * @arg: user buffer 2967 * 2968 * ioctl entry point for our private ioctl interface. We move the data in from 2969 * the user space, prepare the command (if necessary, convert the old MIMD 2970 * ioctl to new ioctl command), and issue a synchronous command to the 2971 * controller. 2972 */ 2973 static int 2974 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2975 { 2976 adapter_t *adapter; 2977 nitioctl_t uioc; 2978 int adapno; 2979 int rval; 2980 mega_passthru __user *upthru; /* user address for passthru */ 2981 mega_passthru *pthru; /* copy user passthru here */ 2982 dma_addr_t pthru_dma_hndl; 2983 void *data = NULL; /* data to be transferred */ 2984 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ 2985 megacmd_t mc; 2986 #if MEGA_HAVE_STATS 2987 megastat_t __user *ustats = NULL; 2988 int num_ldrv = 0; 2989 #endif 2990 u32 uxferaddr = 0; 2991 struct pci_dev *pdev; 2992 2993 /* 2994 * Make sure only USCSICMD are issued through this interface. 2995 * MIMD application would still fire different command. 2996 */ 2997 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { 2998 return -EINVAL; 2999 } 3000 3001 /* 3002 * Check and convert a possible MIMD command to NIT command. 3003 * mega_m_to_n() copies the data from the user space, so we do not 3004 * have to do it here. 3005 * NOTE: We will need some user address to copyout the data, therefore 3006 * the inteface layer will also provide us with the required user 3007 * addresses. 3008 */ 3009 memset(&uioc, 0, sizeof(nitioctl_t)); 3010 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) 3011 return rval; 3012 3013 3014 switch( uioc.opcode ) { 3015 3016 case GET_DRIVER_VER: 3017 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) 3018 return (-EFAULT); 3019 3020 break; 3021 3022 case GET_N_ADAP: 3023 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) 3024 return (-EFAULT); 3025 3026 /* 3027 * Shucks. MIMD interface returns a positive value for number 3028 * of adapters. TODO: Change it to return 0 when there is no 3029 * applicatio using mimd interface. 3030 */ 3031 return hba_count; 3032 3033 case GET_ADAP_INFO: 3034 3035 /* 3036 * Which adapter 3037 */ 3038 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3039 return (-ENODEV); 3040 3041 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, 3042 sizeof(struct mcontroller)) ) 3043 return (-EFAULT); 3044 break; 3045 3046 #if MEGA_HAVE_STATS 3047 3048 case GET_STATS: 3049 /* 3050 * Which adapter 3051 */ 3052 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3053 return (-ENODEV); 3054 3055 adapter = hba_soft_state[adapno]; 3056 3057 ustats = uioc.uioc_uaddr; 3058 3059 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) 3060 return (-EFAULT); 3061 3062 /* 3063 * Check for the validity of the logical drive number 3064 */ 3065 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; 3066 3067 if( copy_to_user(ustats->nreads, adapter->nreads, 3068 num_ldrv*sizeof(u32)) ) 3069 return -EFAULT; 3070 3071 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, 3072 num_ldrv*sizeof(u32)) ) 3073 return -EFAULT; 3074 3075 if( copy_to_user(ustats->nwrites, adapter->nwrites, 3076 num_ldrv*sizeof(u32)) ) 3077 return -EFAULT; 3078 3079 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, 3080 num_ldrv*sizeof(u32)) ) 3081 return -EFAULT; 3082 3083 if( copy_to_user(ustats->rd_errors, adapter->rd_errors, 3084 num_ldrv*sizeof(u32)) ) 3085 return -EFAULT; 3086 3087 if( copy_to_user(ustats->wr_errors, adapter->wr_errors, 3088 num_ldrv*sizeof(u32)) ) 3089 return -EFAULT; 3090 3091 return 0; 3092 3093 #endif 3094 case MBOX_CMD: 3095 3096 /* 3097 * Which adapter 3098 */ 3099 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3100 return (-ENODEV); 3101 3102 adapter = hba_soft_state[adapno]; 3103 3104 /* 3105 * Deletion of logical drive is a special case. The adapter 3106 * should be quiescent before this command is issued. 3107 */ 3108 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && 3109 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { 3110 3111 /* 3112 * Do we support this feature 3113 */ 3114 if( !adapter->support_random_del ) { 3115 dev_warn(&adapter->dev->dev, "logdrv " 3116 "delete on non-supporting F/W\n"); 3117 3118 return (-EINVAL); 3119 } 3120 3121 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); 3122 3123 if( rval == 0 ) { 3124 memset(&mc, 0, sizeof(megacmd_t)); 3125 3126 mc.status = rval; 3127 3128 rval = mega_n_to_m((void __user *)arg, &mc); 3129 } 3130 3131 return rval; 3132 } 3133 /* 3134 * This interface only support the regular passthru commands. 3135 * Reject extended passthru and 64-bit passthru 3136 */ 3137 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || 3138 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { 3139 3140 dev_warn(&adapter->dev->dev, "rejected passthru\n"); 3141 3142 return (-EINVAL); 3143 } 3144 3145 /* 3146 * For all internal commands, the buffer must be allocated in 3147 * <4GB address range 3148 */ 3149 if( make_local_pdev(adapter, &pdev) != 0 ) 3150 return -EIO; 3151 3152 /* Is it a passthru command or a DCMD */ 3153 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { 3154 /* Passthru commands */ 3155 3156 pthru = dma_alloc_coherent(&pdev->dev, 3157 sizeof(mega_passthru), 3158 &pthru_dma_hndl, GFP_KERNEL); 3159 3160 if( pthru == NULL ) { 3161 free_local_pdev(pdev); 3162 return (-ENOMEM); 3163 } 3164 3165 /* 3166 * The user passthru structure 3167 */ 3168 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; 3169 3170 /* 3171 * Copy in the user passthru here. 3172 */ 3173 if( copy_from_user(pthru, upthru, 3174 sizeof(mega_passthru)) ) { 3175 3176 dma_free_coherent(&pdev->dev, 3177 sizeof(mega_passthru), 3178 pthru, pthru_dma_hndl); 3179 3180 free_local_pdev(pdev); 3181 3182 return (-EFAULT); 3183 } 3184 3185 /* 3186 * Is there a data transfer 3187 */ 3188 if( pthru->dataxferlen ) { 3189 data = dma_alloc_coherent(&pdev->dev, 3190 pthru->dataxferlen, 3191 &data_dma_hndl, 3192 GFP_KERNEL); 3193 3194 if( data == NULL ) { 3195 dma_free_coherent(&pdev->dev, 3196 sizeof(mega_passthru), 3197 pthru, 3198 pthru_dma_hndl); 3199 3200 free_local_pdev(pdev); 3201 3202 return (-ENOMEM); 3203 } 3204 3205 /* 3206 * Save the user address and point the kernel 3207 * address at just allocated memory 3208 */ 3209 uxferaddr = pthru->dataxferaddr; 3210 pthru->dataxferaddr = data_dma_hndl; 3211 } 3212 3213 3214 /* 3215 * Is data coming down-stream 3216 */ 3217 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { 3218 /* 3219 * Get the user data 3220 */ 3221 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3222 pthru->dataxferlen) ) { 3223 rval = (-EFAULT); 3224 goto freemem_and_return; 3225 } 3226 } 3227 3228 memset(&mc, 0, sizeof(megacmd_t)); 3229 3230 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 3231 mc.xferaddr = (u32)pthru_dma_hndl; 3232 3233 /* 3234 * Issue the command 3235 */ 3236 mega_internal_command(adapter, &mc, pthru); 3237 3238 rval = mega_n_to_m((void __user *)arg, &mc); 3239 3240 if( rval ) goto freemem_and_return; 3241 3242 3243 /* 3244 * Is data going up-stream 3245 */ 3246 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3247 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3248 pthru->dataxferlen) ) { 3249 rval = (-EFAULT); 3250 } 3251 } 3252 3253 /* 3254 * Send the request sense data also, irrespective of 3255 * whether the user has asked for it or not. 3256 */ 3257 if (copy_to_user(upthru->reqsensearea, 3258 pthru->reqsensearea, 14)) 3259 rval = -EFAULT; 3260 3261 freemem_and_return: 3262 if( pthru->dataxferlen ) { 3263 dma_free_coherent(&pdev->dev, 3264 pthru->dataxferlen, data, 3265 data_dma_hndl); 3266 } 3267 3268 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), 3269 pthru, pthru_dma_hndl); 3270 3271 free_local_pdev(pdev); 3272 3273 return rval; 3274 } 3275 else { 3276 /* DCMD commands */ 3277 3278 /* 3279 * Is there a data transfer 3280 */ 3281 if( uioc.xferlen ) { 3282 data = dma_alloc_coherent(&pdev->dev, 3283 uioc.xferlen, 3284 &data_dma_hndl, 3285 GFP_KERNEL); 3286 3287 if( data == NULL ) { 3288 free_local_pdev(pdev); 3289 return (-ENOMEM); 3290 } 3291 3292 uxferaddr = MBOX(uioc)->xferaddr; 3293 } 3294 3295 /* 3296 * Is data coming down-stream 3297 */ 3298 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { 3299 /* 3300 * Get the user data 3301 */ 3302 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3303 uioc.xferlen) ) { 3304 3305 dma_free_coherent(&pdev->dev, 3306 uioc.xferlen, data, 3307 data_dma_hndl); 3308 3309 free_local_pdev(pdev); 3310 3311 return (-EFAULT); 3312 } 3313 } 3314 3315 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); 3316 3317 mc.xferaddr = (u32)data_dma_hndl; 3318 3319 /* 3320 * Issue the command 3321 */ 3322 mega_internal_command(adapter, &mc, NULL); 3323 3324 rval = mega_n_to_m((void __user *)arg, &mc); 3325 3326 if( rval ) { 3327 if( uioc.xferlen ) { 3328 dma_free_coherent(&pdev->dev, 3329 uioc.xferlen, data, 3330 data_dma_hndl); 3331 } 3332 3333 free_local_pdev(pdev); 3334 3335 return rval; 3336 } 3337 3338 /* 3339 * Is data going up-stream 3340 */ 3341 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3342 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3343 uioc.xferlen) ) { 3344 3345 rval = (-EFAULT); 3346 } 3347 } 3348 3349 if( uioc.xferlen ) { 3350 dma_free_coherent(&pdev->dev, uioc.xferlen, 3351 data, data_dma_hndl); 3352 } 3353 3354 free_local_pdev(pdev); 3355 3356 return rval; 3357 } 3358 3359 default: 3360 return (-EINVAL); 3361 } 3362 3363 return 0; 3364 } 3365 3366 static long 3367 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3368 { 3369 int ret; 3370 3371 mutex_lock(&megadev_mutex); 3372 ret = megadev_ioctl(filep, cmd, arg); 3373 mutex_unlock(&megadev_mutex); 3374 3375 return ret; 3376 } 3377 3378 /** 3379 * mega_m_to_n() 3380 * @arg: user address 3381 * @uioc: new ioctl structure 3382 * 3383 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl 3384 * structure 3385 * 3386 * Converts the older mimd ioctl structure to newer NIT structure 3387 */ 3388 static int 3389 mega_m_to_n(void __user *arg, nitioctl_t *uioc) 3390 { 3391 struct uioctl_t uioc_mimd; 3392 char signature[8] = {0}; 3393 u8 opcode; 3394 u8 subopcode; 3395 3396 3397 /* 3398 * check is the application conforms to NIT. We do not have to do much 3399 * in that case. 3400 * We exploit the fact that the signature is stored in the very 3401 * beginning of the structure. 3402 */ 3403 3404 if( copy_from_user(signature, arg, 7) ) 3405 return (-EFAULT); 3406 3407 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3408 3409 /* 3410 * NOTE NOTE: The nit ioctl is still under flux because of 3411 * change of mailbox definition, in HPE. No applications yet 3412 * use this interface and let's not have applications use this 3413 * interface till the new specifitions are in place. 3414 */ 3415 return -EINVAL; 3416 #if 0 3417 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) 3418 return (-EFAULT); 3419 return 0; 3420 #endif 3421 } 3422 3423 /* 3424 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t 3425 * 3426 * Get the user ioctl structure 3427 */ 3428 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) 3429 return (-EFAULT); 3430 3431 3432 /* 3433 * Get the opcode and subopcode for the commands 3434 */ 3435 opcode = uioc_mimd.ui.fcs.opcode; 3436 subopcode = uioc_mimd.ui.fcs.subopcode; 3437 3438 switch (opcode) { 3439 case 0x82: 3440 3441 switch (subopcode) { 3442 3443 case MEGAIOC_QDRVRVER: /* Query driver version */ 3444 uioc->opcode = GET_DRIVER_VER; 3445 uioc->uioc_uaddr = uioc_mimd.data; 3446 break; 3447 3448 case MEGAIOC_QNADAP: /* Get # of adapters */ 3449 uioc->opcode = GET_N_ADAP; 3450 uioc->uioc_uaddr = uioc_mimd.data; 3451 break; 3452 3453 case MEGAIOC_QADAPINFO: /* Get adapter information */ 3454 uioc->opcode = GET_ADAP_INFO; 3455 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3456 uioc->uioc_uaddr = uioc_mimd.data; 3457 break; 3458 3459 default: 3460 return(-EINVAL); 3461 } 3462 3463 break; 3464 3465 3466 case 0x81: 3467 3468 uioc->opcode = MBOX_CMD; 3469 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3470 3471 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3472 3473 uioc->xferlen = uioc_mimd.ui.fcs.length; 3474 3475 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3476 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3477 3478 break; 3479 3480 case 0x80: 3481 3482 uioc->opcode = MBOX_CMD; 3483 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3484 3485 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3486 3487 /* 3488 * Choose the xferlen bigger of input and output data 3489 */ 3490 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? 3491 uioc_mimd.outlen : uioc_mimd.inlen; 3492 3493 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3494 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3495 3496 break; 3497 3498 default: 3499 return (-EINVAL); 3500 3501 } 3502 3503 return 0; 3504 } 3505 3506 /* 3507 * mega_n_to_m() 3508 * @arg: user address 3509 * @mc: mailbox command 3510 * 3511 * Updates the status information to the application, depending on application 3512 * conforms to older mimd ioctl interface or newer NIT ioctl interface 3513 */ 3514 static int 3515 mega_n_to_m(void __user *arg, megacmd_t *mc) 3516 { 3517 nitioctl_t __user *uiocp; 3518 megacmd_t __user *umc; 3519 mega_passthru __user *upthru; 3520 struct uioctl_t __user *uioc_mimd; 3521 char signature[8] = {0}; 3522 3523 /* 3524 * check is the application conforms to NIT. 3525 */ 3526 if( copy_from_user(signature, arg, 7) ) 3527 return -EFAULT; 3528 3529 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3530 3531 uiocp = arg; 3532 3533 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) 3534 return (-EFAULT); 3535 3536 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3537 3538 umc = MBOX_P(uiocp); 3539 3540 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3541 return -EFAULT; 3542 3543 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) 3544 return (-EFAULT); 3545 } 3546 } 3547 else { 3548 uioc_mimd = arg; 3549 3550 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) 3551 return (-EFAULT); 3552 3553 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3554 3555 umc = (megacmd_t __user *)uioc_mimd->mbox; 3556 3557 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3558 return (-EFAULT); 3559 3560 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) 3561 return (-EFAULT); 3562 } 3563 } 3564 3565 return 0; 3566 } 3567 3568 3569 /* 3570 * MEGARAID 'FW' commands. 3571 */ 3572 3573 /** 3574 * mega_is_bios_enabled() 3575 * @adapter: pointer to our soft state 3576 * 3577 * issue command to find out if the BIOS is enabled for this controller 3578 */ 3579 static int 3580 mega_is_bios_enabled(adapter_t *adapter) 3581 { 3582 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3583 mbox_t *mbox; 3584 3585 mbox = (mbox_t *)raw_mbox; 3586 3587 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3588 3589 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3590 3591 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3592 3593 raw_mbox[0] = IS_BIOS_ENABLED; 3594 raw_mbox[2] = GET_BIOS; 3595 3596 issue_scb_block(adapter, raw_mbox); 3597 3598 return *(char *)adapter->mega_buffer; 3599 } 3600 3601 3602 /** 3603 * mega_enum_raid_scsi() 3604 * @adapter: pointer to our soft state 3605 * 3606 * Find out what channels are RAID/SCSI. This information is used to 3607 * differentiate the virtual channels and physical channels and to support 3608 * ROMB feature and non-disk devices. 3609 */ 3610 static void 3611 mega_enum_raid_scsi(adapter_t *adapter) 3612 { 3613 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3614 mbox_t *mbox; 3615 int i; 3616 3617 mbox = (mbox_t *)raw_mbox; 3618 3619 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3620 3621 /* 3622 * issue command to find out what channels are raid/scsi 3623 */ 3624 raw_mbox[0] = CHNL_CLASS; 3625 raw_mbox[2] = GET_CHNL_CLASS; 3626 3627 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3628 3629 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3630 3631 /* 3632 * Non-ROMB firmware fail this command, so all channels 3633 * must be shown RAID 3634 */ 3635 adapter->mega_ch_class = 0xFF; 3636 3637 if(!issue_scb_block(adapter, raw_mbox)) { 3638 adapter->mega_ch_class = *((char *)adapter->mega_buffer); 3639 3640 } 3641 3642 for( i = 0; i < adapter->product_info.nchannels; i++ ) { 3643 if( (adapter->mega_ch_class >> i) & 0x01 ) { 3644 dev_info(&adapter->dev->dev, "channel[%d] is raid\n", 3645 i); 3646 } 3647 else { 3648 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", 3649 i); 3650 } 3651 } 3652 3653 return; 3654 } 3655 3656 3657 /** 3658 * mega_get_boot_drv() 3659 * @adapter: pointer to our soft state 3660 * 3661 * Find out which device is the boot device. Note, any logical drive or any 3662 * phyical device (e.g., a CDROM) can be designated as a boot device. 3663 */ 3664 static void 3665 mega_get_boot_drv(adapter_t *adapter) 3666 { 3667 struct private_bios_data *prv_bios_data; 3668 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3669 mbox_t *mbox; 3670 u16 cksum = 0; 3671 u8 *cksum_p; 3672 u8 boot_pdrv; 3673 int i; 3674 3675 mbox = (mbox_t *)raw_mbox; 3676 3677 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3678 3679 raw_mbox[0] = BIOS_PVT_DATA; 3680 raw_mbox[2] = GET_BIOS_PVT_DATA; 3681 3682 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3683 3684 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3685 3686 adapter->boot_ldrv_enabled = 0; 3687 adapter->boot_ldrv = 0; 3688 3689 adapter->boot_pdrv_enabled = 0; 3690 adapter->boot_pdrv_ch = 0; 3691 adapter->boot_pdrv_tgt = 0; 3692 3693 if(issue_scb_block(adapter, raw_mbox) == 0) { 3694 prv_bios_data = 3695 (struct private_bios_data *)adapter->mega_buffer; 3696 3697 cksum = 0; 3698 cksum_p = (char *)prv_bios_data; 3699 for (i = 0; i < 14; i++ ) { 3700 cksum += (u16)(*cksum_p++); 3701 } 3702 3703 if (prv_bios_data->cksum == (u16)(0-cksum) ) { 3704 3705 /* 3706 * If MSB is set, a physical drive is set as boot 3707 * device 3708 */ 3709 if( prv_bios_data->boot_drv & 0x80 ) { 3710 adapter->boot_pdrv_enabled = 1; 3711 boot_pdrv = prv_bios_data->boot_drv & 0x7F; 3712 adapter->boot_pdrv_ch = boot_pdrv / 16; 3713 adapter->boot_pdrv_tgt = boot_pdrv % 16; 3714 } 3715 else { 3716 adapter->boot_ldrv_enabled = 1; 3717 adapter->boot_ldrv = prv_bios_data->boot_drv; 3718 } 3719 } 3720 } 3721 3722 } 3723 3724 /** 3725 * mega_support_random_del() 3726 * @adapter: pointer to our soft state 3727 * 3728 * Find out if this controller supports random deletion and addition of 3729 * logical drives 3730 */ 3731 static int 3732 mega_support_random_del(adapter_t *adapter) 3733 { 3734 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3735 mbox_t *mbox; 3736 int rval; 3737 3738 mbox = (mbox_t *)raw_mbox; 3739 3740 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3741 3742 /* 3743 * issue command 3744 */ 3745 raw_mbox[0] = FC_DEL_LOGDRV; 3746 raw_mbox[2] = OP_SUP_DEL_LOGDRV; 3747 3748 rval = issue_scb_block(adapter, raw_mbox); 3749 3750 return !rval; 3751 } 3752 3753 3754 /** 3755 * mega_support_ext_cdb() 3756 * @adapter: pointer to our soft state 3757 * 3758 * Find out if this firmware support cdblen > 10 3759 */ 3760 static int 3761 mega_support_ext_cdb(adapter_t *adapter) 3762 { 3763 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3764 mbox_t *mbox; 3765 int rval; 3766 3767 mbox = (mbox_t *)raw_mbox; 3768 3769 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 3770 /* 3771 * issue command to find out if controller supports extended CDBs. 3772 */ 3773 raw_mbox[0] = 0xA4; 3774 raw_mbox[2] = 0x16; 3775 3776 rval = issue_scb_block(adapter, raw_mbox); 3777 3778 return !rval; 3779 } 3780 3781 3782 /** 3783 * mega_del_logdrv() 3784 * @adapter: pointer to our soft state 3785 * @logdrv: logical drive to be deleted 3786 * 3787 * Delete the specified logical drive. It is the responsibility of the user 3788 * app to let the OS know about this operation. 3789 */ 3790 static int 3791 mega_del_logdrv(adapter_t *adapter, int logdrv) 3792 { 3793 unsigned long flags; 3794 scb_t *scb; 3795 int rval; 3796 3797 /* 3798 * Stop sending commands to the controller, queue them internally. 3799 * When deletion is complete, ISR will flush the queue. 3800 */ 3801 atomic_set(&adapter->quiescent, 1); 3802 3803 /* 3804 * Wait till all the issued commands are complete and there are no 3805 * commands in the pending queue 3806 */ 3807 while (atomic_read(&adapter->pend_cmds) > 0 || 3808 !list_empty(&adapter->pending_list)) 3809 msleep(1000); /* sleep for 1s */ 3810 3811 rval = mega_do_del_logdrv(adapter, logdrv); 3812 3813 spin_lock_irqsave(&adapter->lock, flags); 3814 3815 /* 3816 * If delete operation was successful, add 0x80 to the logical drive 3817 * ids for commands in the pending queue. 3818 */ 3819 if (adapter->read_ldidmap) { 3820 struct list_head *pos; 3821 list_for_each(pos, &adapter->pending_list) { 3822 scb = list_entry(pos, scb_t, list); 3823 if (scb->pthru->logdrv < 0x80 ) 3824 scb->pthru->logdrv += 0x80; 3825 } 3826 } 3827 3828 atomic_set(&adapter->quiescent, 0); 3829 3830 mega_runpendq(adapter); 3831 3832 spin_unlock_irqrestore(&adapter->lock, flags); 3833 3834 return rval; 3835 } 3836 3837 3838 static int 3839 mega_do_del_logdrv(adapter_t *adapter, int logdrv) 3840 { 3841 megacmd_t mc; 3842 int rval; 3843 3844 memset( &mc, 0, sizeof(megacmd_t)); 3845 3846 mc.cmd = FC_DEL_LOGDRV; 3847 mc.opcode = OP_DEL_LOGDRV; 3848 mc.subopcode = logdrv; 3849 3850 rval = mega_internal_command(adapter, &mc, NULL); 3851 3852 /* log this event */ 3853 if(rval) { 3854 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); 3855 return rval; 3856 } 3857 3858 /* 3859 * After deleting first logical drive, the logical drives must be 3860 * addressed by adding 0x80 to the logical drive id. 3861 */ 3862 adapter->read_ldidmap = 1; 3863 3864 return rval; 3865 } 3866 3867 3868 /** 3869 * mega_get_max_sgl() 3870 * @adapter: pointer to our soft state 3871 * 3872 * Find out the maximum number of scatter-gather elements supported by this 3873 * version of the firmware 3874 */ 3875 static void 3876 mega_get_max_sgl(adapter_t *adapter) 3877 { 3878 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3879 mbox_t *mbox; 3880 3881 mbox = (mbox_t *)raw_mbox; 3882 3883 memset(mbox, 0, sizeof(raw_mbox)); 3884 3885 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3886 3887 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3888 3889 raw_mbox[0] = MAIN_MISC_OPCODE; 3890 raw_mbox[2] = GET_MAX_SG_SUPPORT; 3891 3892 3893 if( issue_scb_block(adapter, raw_mbox) ) { 3894 /* 3895 * f/w does not support this command. Choose the default value 3896 */ 3897 adapter->sglen = MIN_SGLIST; 3898 } 3899 else { 3900 adapter->sglen = *((char *)adapter->mega_buffer); 3901 3902 /* 3903 * Make sure this is not more than the resources we are 3904 * planning to allocate 3905 */ 3906 if ( adapter->sglen > MAX_SGLIST ) 3907 adapter->sglen = MAX_SGLIST; 3908 } 3909 3910 return; 3911 } 3912 3913 3914 /** 3915 * mega_support_cluster() 3916 * @adapter: pointer to our soft state 3917 * 3918 * Find out if this firmware support cluster calls. 3919 */ 3920 static int 3921 mega_support_cluster(adapter_t *adapter) 3922 { 3923 unsigned char raw_mbox[sizeof(struct mbox_out)]; 3924 mbox_t *mbox; 3925 3926 mbox = (mbox_t *)raw_mbox; 3927 3928 memset(mbox, 0, sizeof(raw_mbox)); 3929 3930 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3931 3932 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle; 3933 3934 /* 3935 * Try to get the initiator id. This command will succeed iff the 3936 * clustering is available on this HBA. 3937 */ 3938 raw_mbox[0] = MEGA_GET_TARGET_ID; 3939 3940 if( issue_scb_block(adapter, raw_mbox) == 0 ) { 3941 3942 /* 3943 * Cluster support available. Get the initiator target id. 3944 * Tell our id to mid-layer too. 3945 */ 3946 adapter->this_id = *(u32 *)adapter->mega_buffer; 3947 adapter->host->this_id = adapter->this_id; 3948 3949 return 1; 3950 } 3951 3952 return 0; 3953 } 3954 3955 #ifdef CONFIG_PROC_FS 3956 /** 3957 * mega_adapinq() 3958 * @adapter: pointer to our soft state 3959 * @dma_handle: DMA address of the buffer 3960 * 3961 * Issue internal commands while interrupts are available. 3962 * We only issue direct mailbox commands from within the driver. ioctl() 3963 * interface using these routines can issue passthru commands. 3964 */ 3965 static int 3966 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) 3967 { 3968 megacmd_t mc; 3969 3970 memset(&mc, 0, sizeof(megacmd_t)); 3971 3972 if( adapter->flag & BOARD_40LD ) { 3973 mc.cmd = FC_NEW_CONFIG; 3974 mc.opcode = NC_SUBOP_ENQUIRY3; 3975 mc.subopcode = ENQ3_GET_SOLICITED_FULL; 3976 } 3977 else { 3978 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; 3979 } 3980 3981 mc.xferaddr = (u32)dma_handle; 3982 3983 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { 3984 return -1; 3985 } 3986 3987 return 0; 3988 } 3989 3990 3991 /** 3992 * mega_internal_dev_inquiry() 3993 * @adapter: pointer to our soft state 3994 * @ch: channel for this device 3995 * @tgt: ID of this device 3996 * @buf_dma_handle: DMA address of the buffer 3997 * 3998 * Issue the scsi inquiry for the specified device. 3999 */ 4000 static int 4001 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, 4002 dma_addr_t buf_dma_handle) 4003 { 4004 mega_passthru *pthru; 4005 dma_addr_t pthru_dma_handle; 4006 megacmd_t mc; 4007 int rval; 4008 struct pci_dev *pdev; 4009 4010 4011 /* 4012 * For all internal commands, the buffer must be allocated in <4GB 4013 * address range 4014 */ 4015 if( make_local_pdev(adapter, &pdev) != 0 ) return -1; 4016 4017 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), 4018 &pthru_dma_handle, GFP_KERNEL); 4019 4020 if( pthru == NULL ) { 4021 free_local_pdev(pdev); 4022 return -1; 4023 } 4024 4025 pthru->timeout = 2; 4026 pthru->ars = 1; 4027 pthru->reqsenselen = 14; 4028 pthru->islogical = 0; 4029 4030 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; 4031 4032 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; 4033 4034 pthru->cdblen = 6; 4035 4036 pthru->cdb[0] = INQUIRY; 4037 pthru->cdb[1] = 0; 4038 pthru->cdb[2] = 0; 4039 pthru->cdb[3] = 0; 4040 pthru->cdb[4] = 255; 4041 pthru->cdb[5] = 0; 4042 4043 4044 pthru->dataxferaddr = (u32)buf_dma_handle; 4045 pthru->dataxferlen = 256; 4046 4047 memset(&mc, 0, sizeof(megacmd_t)); 4048 4049 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 4050 mc.xferaddr = (u32)pthru_dma_handle; 4051 4052 rval = mega_internal_command(adapter, &mc, pthru); 4053 4054 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, 4055 pthru_dma_handle); 4056 4057 free_local_pdev(pdev); 4058 4059 return rval; 4060 } 4061 #endif 4062 4063 /** 4064 * mega_internal_command() 4065 * @adapter: pointer to our soft state 4066 * @mc: the mailbox command 4067 * @pthru: Passthru structure for DCDB commands 4068 * 4069 * Issue the internal commands in interrupt mode. 4070 * The last argument is the address of the passthru structure if the command 4071 * to be fired is a passthru command 4072 * 4073 * Note: parameter 'pthru' is null for non-passthru commands. 4074 */ 4075 static int 4076 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) 4077 { 4078 unsigned long flags; 4079 scb_t *scb; 4080 int rval; 4081 4082 /* 4083 * The internal commands share one command id and hence are 4084 * serialized. This is so because we want to reserve maximum number of 4085 * available command ids for the I/O commands. 4086 */ 4087 mutex_lock(&adapter->int_mtx); 4088 4089 scb = &adapter->int_scb; 4090 memset(scb, 0, sizeof(scb_t)); 4091 4092 scb->idx = CMDID_INT_CMDS; 4093 scb->state |= SCB_ACTIVE | SCB_PENDQ; 4094 4095 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); 4096 4097 /* 4098 * Is it a passthru command 4099 */ 4100 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) 4101 scb->pthru = pthru; 4102 4103 spin_lock_irqsave(&adapter->lock, flags); 4104 list_add_tail(&scb->list, &adapter->pending_list); 4105 /* 4106 * Check if the HBA is in quiescent state, e.g., during a 4107 * delete logical drive opertion. If it is, don't run 4108 * the pending_list. 4109 */ 4110 if (atomic_read(&adapter->quiescent) == 0) 4111 mega_runpendq(adapter); 4112 spin_unlock_irqrestore(&adapter->lock, flags); 4113 4114 wait_for_completion(&adapter->int_waitq); 4115 4116 mc->status = rval = adapter->int_status; 4117 4118 /* 4119 * Print a debug message for all failed commands. Applications can use 4120 * this information. 4121 */ 4122 if (rval && trace_level) { 4123 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", 4124 mc->cmd, mc->opcode, mc->subopcode, rval); 4125 } 4126 4127 mutex_unlock(&adapter->int_mtx); 4128 return rval; 4129 } 4130 4131 static struct scsi_host_template megaraid_template = { 4132 .module = THIS_MODULE, 4133 .name = "MegaRAID", 4134 .proc_name = "megaraid_legacy", 4135 .info = megaraid_info, 4136 .queuecommand = megaraid_queue, 4137 .bios_param = megaraid_biosparam, 4138 .max_sectors = MAX_SECTORS_PER_IO, 4139 .can_queue = MAX_COMMANDS, 4140 .this_id = DEFAULT_INITIATOR_ID, 4141 .sg_tablesize = MAX_SGLIST, 4142 .cmd_per_lun = DEF_CMD_PER_LUN, 4143 .eh_abort_handler = megaraid_abort, 4144 .eh_device_reset_handler = megaraid_reset, 4145 .eh_bus_reset_handler = megaraid_reset, 4146 .eh_host_reset_handler = megaraid_reset, 4147 .no_write_same = 1, 4148 }; 4149 4150 static int 4151 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4152 { 4153 struct Scsi_Host *host; 4154 adapter_t *adapter; 4155 unsigned long mega_baseport, tbase, flag = 0; 4156 u16 subsysid, subsysvid; 4157 u8 pci_bus, pci_dev_func; 4158 int irq, i, j; 4159 int error = -ENODEV; 4160 4161 if (hba_count >= MAX_CONTROLLERS) 4162 goto out; 4163 4164 if (pci_enable_device(pdev)) 4165 goto out; 4166 pci_set_master(pdev); 4167 4168 pci_bus = pdev->bus->number; 4169 pci_dev_func = pdev->devfn; 4170 4171 /* 4172 * The megaraid3 stuff reports the ID of the Intel part which is not 4173 * remotely specific to the megaraid 4174 */ 4175 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 4176 u16 magic; 4177 /* 4178 * Don't fall over the Compaq management cards using the same 4179 * PCI identifier 4180 */ 4181 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && 4182 pdev->subsystem_device == 0xC000) 4183 goto out_disable_device; 4184 /* Now check the magic signature byte */ 4185 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); 4186 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) 4187 goto out_disable_device; 4188 /* Ok it is probably a megaraid */ 4189 } 4190 4191 /* 4192 * For these vendor and device ids, signature offsets are not 4193 * valid and 64 bit is implicit 4194 */ 4195 if (id->driver_data & BOARD_64BIT) 4196 flag |= BOARD_64BIT; 4197 else { 4198 u32 magic64; 4199 4200 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); 4201 if (magic64 == HBA_SIGNATURE_64BIT) 4202 flag |= BOARD_64BIT; 4203 } 4204 4205 subsysvid = pdev->subsystem_vendor; 4206 subsysid = pdev->subsystem_device; 4207 4208 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", 4209 id->vendor, id->device); 4210 4211 /* Read the base port and IRQ from PCI */ 4212 mega_baseport = pci_resource_start(pdev, 0); 4213 irq = pdev->irq; 4214 4215 tbase = mega_baseport; 4216 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { 4217 flag |= BOARD_MEMMAP; 4218 4219 if (!request_mem_region(mega_baseport, 128, "megaraid")) { 4220 dev_warn(&pdev->dev, "mem region busy!\n"); 4221 goto out_disable_device; 4222 } 4223 4224 mega_baseport = (unsigned long)ioremap(mega_baseport, 128); 4225 if (!mega_baseport) { 4226 dev_warn(&pdev->dev, "could not map hba memory\n"); 4227 goto out_release_region; 4228 } 4229 } else { 4230 flag |= BOARD_IOMAP; 4231 mega_baseport += 0x10; 4232 4233 if (!request_region(mega_baseport, 16, "megaraid")) 4234 goto out_disable_device; 4235 } 4236 4237 /* Initialize SCSI Host structure */ 4238 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); 4239 if (!host) 4240 goto out_iounmap; 4241 4242 adapter = (adapter_t *)host->hostdata; 4243 memset(adapter, 0, sizeof(adapter_t)); 4244 4245 dev_notice(&pdev->dev, 4246 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", 4247 host->host_no, mega_baseport, irq); 4248 4249 adapter->base = mega_baseport; 4250 if (flag & BOARD_MEMMAP) 4251 adapter->mmio_base = (void __iomem *) mega_baseport; 4252 4253 INIT_LIST_HEAD(&adapter->free_list); 4254 INIT_LIST_HEAD(&adapter->pending_list); 4255 INIT_LIST_HEAD(&adapter->completed_list); 4256 4257 adapter->flag = flag; 4258 spin_lock_init(&adapter->lock); 4259 4260 host->cmd_per_lun = max_cmd_per_lun; 4261 host->max_sectors = max_sectors_per_io; 4262 4263 adapter->dev = pdev; 4264 adapter->host = host; 4265 4266 adapter->host->irq = irq; 4267 4268 if (flag & BOARD_MEMMAP) 4269 adapter->host->base = tbase; 4270 else { 4271 adapter->host->io_port = tbase; 4272 adapter->host->n_io_port = 16; 4273 } 4274 4275 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; 4276 4277 /* 4278 * Allocate buffer to issue internal commands. 4279 */ 4280 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev, 4281 MEGA_BUFFER_SIZE, 4282 &adapter->buf_dma_handle, 4283 GFP_KERNEL); 4284 if (!adapter->mega_buffer) { 4285 dev_warn(&pdev->dev, "out of RAM\n"); 4286 goto out_host_put; 4287 } 4288 4289 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), 4290 GFP_KERNEL); 4291 if (!adapter->scb_list) { 4292 dev_warn(&pdev->dev, "out of RAM\n"); 4293 goto out_free_cmd_buffer; 4294 } 4295 4296 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4297 megaraid_isr_memmapped : megaraid_isr_iomapped, 4298 IRQF_SHARED, "megaraid", adapter)) { 4299 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); 4300 goto out_free_scb_list; 4301 } 4302 4303 if (mega_setup_mailbox(adapter)) 4304 goto out_free_irq; 4305 4306 if (mega_query_adapter(adapter)) 4307 goto out_free_mbox; 4308 4309 /* 4310 * Have checks for some buggy f/w 4311 */ 4312 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { 4313 /* 4314 * Which firmware 4315 */ 4316 if (!strcmp(adapter->fw_version, "3.00") || 4317 !strcmp(adapter->fw_version, "3.01")) { 4318 4319 dev_warn(&pdev->dev, 4320 "Your card is a Dell PERC " 4321 "2/SC RAID controller with " 4322 "firmware\nmegaraid: 3.00 or 3.01. " 4323 "This driver is known to have " 4324 "corruption issues\nmegaraid: with " 4325 "those firmware versions on this " 4326 "specific card. In order\nmegaraid: " 4327 "to protect your data, please upgrade " 4328 "your firmware to version\nmegaraid: " 4329 "3.10 or later, available from the " 4330 "Dell Technical Support web\n" 4331 "megaraid: site at\nhttp://support." 4332 "dell.com/us/en/filelib/download/" 4333 "index.asp?fileid=2940\n" 4334 ); 4335 } 4336 } 4337 4338 /* 4339 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with 4340 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit 4341 * support, since this firmware cannot handle 64 bit 4342 * addressing 4343 */ 4344 if ((subsysvid == PCI_VENDOR_ID_HP) && 4345 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { 4346 /* 4347 * which firmware 4348 */ 4349 if (!strcmp(adapter->fw_version, "H01.07") || 4350 !strcmp(adapter->fw_version, "H01.08") || 4351 !strcmp(adapter->fw_version, "H01.09") ) { 4352 dev_warn(&pdev->dev, 4353 "Firmware H.01.07, " 4354 "H.01.08, and H.01.09 on 1M/2M " 4355 "controllers\n" 4356 "do not support 64 bit " 4357 "addressing.\nDISABLING " 4358 "64 bit support.\n"); 4359 adapter->flag &= ~BOARD_64BIT; 4360 } 4361 } 4362 4363 if (mega_is_bios_enabled(adapter)) 4364 mega_hbas[hba_count].is_bios_enabled = 1; 4365 mega_hbas[hba_count].hostdata_addr = adapter; 4366 4367 /* 4368 * Find out which channel is raid and which is scsi. This is 4369 * for ROMB support. 4370 */ 4371 mega_enum_raid_scsi(adapter); 4372 4373 /* 4374 * Find out if a logical drive is set as the boot drive. If 4375 * there is one, will make that as the first logical drive. 4376 * ROMB: Do we have to boot from a physical drive. Then all 4377 * the physical drives would appear before the logical disks. 4378 * Else, all the physical drives would be exported to the mid 4379 * layer after logical drives. 4380 */ 4381 mega_get_boot_drv(adapter); 4382 4383 if (adapter->boot_pdrv_enabled) { 4384 j = adapter->product_info.nchannels; 4385 for( i = 0; i < j; i++ ) 4386 adapter->logdrv_chan[i] = 0; 4387 for( i = j; i < NVIRT_CHAN + j; i++ ) 4388 adapter->logdrv_chan[i] = 1; 4389 } else { 4390 for (i = 0; i < NVIRT_CHAN; i++) 4391 adapter->logdrv_chan[i] = 1; 4392 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) 4393 adapter->logdrv_chan[i] = 0; 4394 adapter->mega_ch_class <<= NVIRT_CHAN; 4395 } 4396 4397 /* 4398 * Do we support random deletion and addition of logical 4399 * drives 4400 */ 4401 adapter->read_ldidmap = 0; /* set it after first logdrv 4402 delete cmd */ 4403 adapter->support_random_del = mega_support_random_del(adapter); 4404 4405 /* Initialize SCBs */ 4406 if (mega_init_scb(adapter)) 4407 goto out_free_mbox; 4408 4409 /* 4410 * Reset the pending commands counter 4411 */ 4412 atomic_set(&adapter->pend_cmds, 0); 4413 4414 /* 4415 * Reset the adapter quiescent flag 4416 */ 4417 atomic_set(&adapter->quiescent, 0); 4418 4419 hba_soft_state[hba_count] = adapter; 4420 4421 /* 4422 * Fill in the structure which needs to be passed back to the 4423 * application when it does an ioctl() for controller related 4424 * information. 4425 */ 4426 i = hba_count; 4427 4428 mcontroller[i].base = mega_baseport; 4429 mcontroller[i].irq = irq; 4430 mcontroller[i].numldrv = adapter->numldrv; 4431 mcontroller[i].pcibus = pci_bus; 4432 mcontroller[i].pcidev = id->device; 4433 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); 4434 mcontroller[i].pciid = -1; 4435 mcontroller[i].pcivendor = id->vendor; 4436 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); 4437 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; 4438 4439 4440 /* Set the Mode of addressing to 64 bit if we can */ 4441 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4442 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 4443 adapter->has_64bit_addr = 1; 4444 } else { 4445 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4446 adapter->has_64bit_addr = 0; 4447 } 4448 4449 mutex_init(&adapter->int_mtx); 4450 init_completion(&adapter->int_waitq); 4451 4452 adapter->this_id = DEFAULT_INITIATOR_ID; 4453 adapter->host->this_id = DEFAULT_INITIATOR_ID; 4454 4455 #if MEGA_HAVE_CLUSTERING 4456 /* 4457 * Is cluster support enabled on this controller 4458 * Note: In a cluster the HBAs ( the initiators ) will have 4459 * different target IDs and we cannot assume it to be 7. Call 4460 * to mega_support_cluster() will get the target ids also if 4461 * the cluster support is available 4462 */ 4463 adapter->has_cluster = mega_support_cluster(adapter); 4464 if (adapter->has_cluster) { 4465 dev_notice(&pdev->dev, 4466 "Cluster driver, initiator id:%d\n", 4467 adapter->this_id); 4468 } 4469 #endif 4470 4471 pci_set_drvdata(pdev, host); 4472 4473 mega_create_proc_entry(hba_count, mega_proc_dir_entry); 4474 4475 error = scsi_add_host(host, &pdev->dev); 4476 if (error) 4477 goto out_free_mbox; 4478 4479 scsi_scan_host(host); 4480 hba_count++; 4481 return 0; 4482 4483 out_free_mbox: 4484 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4485 adapter->una_mbox64, adapter->una_mbox64_dma); 4486 out_free_irq: 4487 free_irq(adapter->host->irq, adapter); 4488 out_free_scb_list: 4489 kfree(adapter->scb_list); 4490 out_free_cmd_buffer: 4491 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4492 adapter->mega_buffer, adapter->buf_dma_handle); 4493 out_host_put: 4494 scsi_host_put(host); 4495 out_iounmap: 4496 if (flag & BOARD_MEMMAP) 4497 iounmap((void *)mega_baseport); 4498 out_release_region: 4499 if (flag & BOARD_MEMMAP) 4500 release_mem_region(tbase, 128); 4501 else 4502 release_region(mega_baseport, 16); 4503 out_disable_device: 4504 pci_disable_device(pdev); 4505 out: 4506 return error; 4507 } 4508 4509 static void 4510 __megaraid_shutdown(adapter_t *adapter) 4511 { 4512 u_char raw_mbox[sizeof(struct mbox_out)]; 4513 mbox_t *mbox = (mbox_t *)raw_mbox; 4514 int i; 4515 4516 /* Flush adapter cache */ 4517 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4518 raw_mbox[0] = FLUSH_ADAPTER; 4519 4520 free_irq(adapter->host->irq, adapter); 4521 4522 /* Issue a blocking (interrupts disabled) command to the card */ 4523 issue_scb_block(adapter, raw_mbox); 4524 4525 /* Flush disks cache */ 4526 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4527 raw_mbox[0] = FLUSH_SYSTEM; 4528 4529 /* Issue a blocking (interrupts disabled) command to the card */ 4530 issue_scb_block(adapter, raw_mbox); 4531 4532 if (atomic_read(&adapter->pend_cmds) > 0) 4533 dev_warn(&adapter->dev->dev, "pending commands!!\n"); 4534 4535 /* 4536 * Have a delibrate delay to make sure all the caches are 4537 * actually flushed. 4538 */ 4539 for (i = 0; i <= 10; i++) 4540 mdelay(1000); 4541 } 4542 4543 static void 4544 megaraid_remove_one(struct pci_dev *pdev) 4545 { 4546 struct Scsi_Host *host = pci_get_drvdata(pdev); 4547 adapter_t *adapter = (adapter_t *)host->hostdata; 4548 char buf[12] = { 0 }; 4549 4550 scsi_remove_host(host); 4551 4552 __megaraid_shutdown(adapter); 4553 4554 /* Free our resources */ 4555 if (adapter->flag & BOARD_MEMMAP) { 4556 iounmap((void *)adapter->base); 4557 release_mem_region(adapter->host->base, 128); 4558 } else 4559 release_region(adapter->base, 16); 4560 4561 mega_free_sgl(adapter); 4562 4563 sprintf(buf, "hba%d", adapter->host->host_no); 4564 remove_proc_subtree(buf, mega_proc_dir_entry); 4565 4566 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4567 adapter->mega_buffer, adapter->buf_dma_handle); 4568 kfree(adapter->scb_list); 4569 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4570 adapter->una_mbox64, adapter->una_mbox64_dma); 4571 4572 scsi_host_put(host); 4573 pci_disable_device(pdev); 4574 4575 hba_count--; 4576 } 4577 4578 static void 4579 megaraid_shutdown(struct pci_dev *pdev) 4580 { 4581 struct Scsi_Host *host = pci_get_drvdata(pdev); 4582 adapter_t *adapter = (adapter_t *)host->hostdata; 4583 4584 __megaraid_shutdown(adapter); 4585 } 4586 4587 static struct pci_device_id megaraid_pci_tbl[] = { 4588 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, 4589 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4590 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, 4591 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4592 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, 4593 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4594 {0,} 4595 }; 4596 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 4597 4598 static struct pci_driver megaraid_pci_driver = { 4599 .name = "megaraid_legacy", 4600 .id_table = megaraid_pci_tbl, 4601 .probe = megaraid_probe_one, 4602 .remove = megaraid_remove_one, 4603 .shutdown = megaraid_shutdown, 4604 }; 4605 4606 static int __init megaraid_init(void) 4607 { 4608 int error; 4609 4610 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) 4611 max_cmd_per_lun = MAX_CMD_PER_LUN; 4612 if (max_mbox_busy_wait > MBOX_BUSY_WAIT) 4613 max_mbox_busy_wait = MBOX_BUSY_WAIT; 4614 4615 #ifdef CONFIG_PROC_FS 4616 mega_proc_dir_entry = proc_mkdir("megaraid", NULL); 4617 if (!mega_proc_dir_entry) { 4618 printk(KERN_WARNING 4619 "megaraid: failed to create megaraid root\n"); 4620 } 4621 #endif 4622 error = pci_register_driver(&megaraid_pci_driver); 4623 if (error) { 4624 #ifdef CONFIG_PROC_FS 4625 remove_proc_entry("megaraid", NULL); 4626 #endif 4627 return error; 4628 } 4629 4630 /* 4631 * Register the driver as a character device, for applications 4632 * to access it for ioctls. 4633 * First argument (major) to register_chrdev implies a dynamic 4634 * major number allocation. 4635 */ 4636 major = register_chrdev(0, "megadev_legacy", &megadev_fops); 4637 if (!major) { 4638 printk(KERN_WARNING 4639 "megaraid: failed to register char device\n"); 4640 } 4641 4642 return 0; 4643 } 4644 4645 static void __exit megaraid_exit(void) 4646 { 4647 /* 4648 * Unregister the character device interface to the driver. 4649 */ 4650 unregister_chrdev(major, "megadev_legacy"); 4651 4652 pci_unregister_driver(&megaraid_pci_driver); 4653 4654 #ifdef CONFIG_PROC_FS 4655 remove_proc_entry("megaraid", NULL); 4656 #endif 4657 } 4658 4659 module_init(megaraid_init); 4660 module_exit(megaraid_exit); 4661 4662 /* vi: set ts=8 sw=8 tw=78: */ 4663