1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Linux MegaRAID device driver 5 * 6 * Copyright (c) 2002 LSI Logic Corporation. 7 * 8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 9 * - fixes 10 * - speed-ups (list handling fixes, issued_list, optimizations.) 11 * - lots of cleanups. 12 * 13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de> 14 * - new-style, hotplug-aware pci probing and scsi registration 15 * 16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju 17 * <Seokmann.Ju@lsil.com> 18 * 19 * Description: Linux device driver for LSI Logic MegaRAID controller 20 * 21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493 22 * 518, 520, 531, 532 23 * 24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell, 25 * and others. Please send updates to the mailing list 26 * linux-scsi@vger.kernel.org . 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/fs.h> 31 #include <linux/blkdev.h> 32 #include <linux/uaccess.h> 33 #include <asm/io.h> 34 #include <linux/completion.h> 35 #include <linux/delay.h> 36 #include <linux/proc_fs.h> 37 #include <linux/seq_file.h> 38 #include <linux/reboot.h> 39 #include <linux/module.h> 40 #include <linux/list.h> 41 #include <linux/interrupt.h> 42 #include <linux/pci.h> 43 #include <linux/init.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/mutex.h> 46 #include <linux/slab.h> 47 #include <scsi/scsicam.h> 48 49 #include "scsi.h" 50 #include <scsi/scsi_host.h> 51 52 #include "megaraid.h" 53 54 #define MEGARAID_MODULE_VERSION "2.00.4" 55 56 MODULE_AUTHOR ("sju@lsil.com"); 57 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); 58 MODULE_LICENSE ("GPL"); 59 MODULE_VERSION(MEGARAID_MODULE_VERSION); 60 61 static DEFINE_MUTEX(megadev_mutex); 62 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; 63 module_param(max_cmd_per_lun, uint, 0); 64 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); 65 66 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO; 67 module_param(max_sectors_per_io, ushort, 0); 68 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)"); 69 70 71 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; 72 module_param(max_mbox_busy_wait, ushort, 0); 73 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 74 75 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20) 76 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C) 77 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20) 78 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C) 79 80 /* 81 * Global variables 82 */ 83 84 static int hba_count; 85 static adapter_t *hba_soft_state[MAX_CONTROLLERS]; 86 static struct proc_dir_entry *mega_proc_dir_entry; 87 88 /* For controller re-ordering */ 89 static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; 90 91 static long 92 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 93 94 /* 95 * The File Operations structure for the serial/ioctl interface of the driver 96 */ 97 static const struct file_operations megadev_fops = { 98 .owner = THIS_MODULE, 99 .unlocked_ioctl = megadev_unlocked_ioctl, 100 .open = megadev_open, 101 .llseek = noop_llseek, 102 }; 103 104 /* 105 * Array to structures for storing the information about the controllers. This 106 * information is sent to the user level applications, when they do an ioctl 107 * for this information. 108 */ 109 static struct mcontroller mcontroller[MAX_CONTROLLERS]; 110 111 /* The current driver version */ 112 static u32 driver_ver = 0x02000000; 113 114 /* major number used by the device for character interface */ 115 static int major; 116 117 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01) 118 119 120 /* 121 * Debug variable to print some diagnostic messages 122 */ 123 static int trace_level; 124 125 /** 126 * mega_setup_mailbox() 127 * @adapter: pointer to our soft state 128 * 129 * Allocates a 8 byte aligned memory for the handshake mailbox. 130 */ 131 static int 132 mega_setup_mailbox(adapter_t *adapter) 133 { 134 unsigned long align; 135 136 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev, 137 sizeof(mbox64_t), 138 &adapter->una_mbox64_dma, 139 GFP_KERNEL); 140 141 if( !adapter->una_mbox64 ) return -1; 142 143 adapter->mbox = &adapter->una_mbox64->mbox; 144 145 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) & 146 (~0UL ^ 0xFUL)); 147 148 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8); 149 150 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox); 151 152 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align; 153 154 /* 155 * Register the mailbox if the controller is an io-mapped controller 156 */ 157 if( adapter->flag & BOARD_IOMAP ) { 158 159 outb(adapter->mbox_dma & 0xFF, 160 adapter->host->io_port + MBOX_PORT0); 161 162 outb((adapter->mbox_dma >> 8) & 0xFF, 163 adapter->host->io_port + MBOX_PORT1); 164 165 outb((adapter->mbox_dma >> 16) & 0xFF, 166 adapter->host->io_port + MBOX_PORT2); 167 168 outb((adapter->mbox_dma >> 24) & 0xFF, 169 adapter->host->io_port + MBOX_PORT3); 170 171 outb(ENABLE_MBOX_BYTE, 172 adapter->host->io_port + ENABLE_MBOX_REGION); 173 174 irq_ack(adapter); 175 176 irq_enable(adapter); 177 } 178 179 return 0; 180 } 181 182 183 /* 184 * mega_query_adapter() 185 * @adapter - pointer to our soft state 186 * 187 * Issue the adapter inquiry commands to the controller and find out 188 * information and parameter about the devices attached 189 */ 190 static int 191 mega_query_adapter(adapter_t *adapter) 192 { 193 dma_addr_t prod_info_dma_handle; 194 mega_inquiry3 *inquiry3; 195 struct mbox_out mbox; 196 u8 *raw_mbox = (u8 *)&mbox; 197 int retval; 198 199 /* Initialize adapter inquiry mailbox */ 200 201 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 202 memset(&mbox, 0, sizeof(mbox)); 203 204 /* 205 * Try to issue Inquiry3 command 206 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and 207 * update enquiry3 structure 208 */ 209 mbox.xferaddr = (u32)adapter->buf_dma_handle; 210 211 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer; 212 213 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 214 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */ 215 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */ 216 217 /* Issue a blocking command to the card */ 218 if ((retval = issue_scb_block(adapter, raw_mbox))) { 219 /* the adapter does not support 40ld */ 220 221 mraid_ext_inquiry *ext_inq; 222 mraid_inquiry *inq; 223 dma_addr_t dma_handle; 224 225 ext_inq = dma_alloc_coherent(&adapter->dev->dev, 226 sizeof(mraid_ext_inquiry), 227 &dma_handle, GFP_KERNEL); 228 229 if( ext_inq == NULL ) return -1; 230 231 inq = &ext_inq->raid_inq; 232 233 mbox.xferaddr = (u32)dma_handle; 234 235 /*issue old 0x04 command to adapter */ 236 mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ; 237 238 issue_scb_block(adapter, raw_mbox); 239 240 /* 241 * update Enquiry3 and ProductInfo structures with 242 * mraid_inquiry structure 243 */ 244 mega_8_to_40ld(inq, inquiry3, 245 (mega_product_info *)&adapter->product_info); 246 247 dma_free_coherent(&adapter->dev->dev, 248 sizeof(mraid_ext_inquiry), ext_inq, 249 dma_handle); 250 251 } else { /*adapter supports 40ld */ 252 adapter->flag |= BOARD_40LD; 253 254 /* 255 * get product_info, which is static information and will be 256 * unchanged 257 */ 258 prod_info_dma_handle = dma_map_single(&adapter->dev->dev, 259 (void *)&adapter->product_info, 260 sizeof(mega_product_info), 261 DMA_FROM_DEVICE); 262 263 mbox.xferaddr = prod_info_dma_handle; 264 265 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */ 266 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ 267 268 if ((retval = issue_scb_block(adapter, raw_mbox))) 269 dev_warn(&adapter->dev->dev, 270 "Product_info cmd failed with error: %d\n", 271 retval); 272 273 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle, 274 sizeof(mega_product_info), DMA_FROM_DEVICE); 275 } 276 277 278 /* 279 * kernel scans the channels from 0 to <= max_channel 280 */ 281 adapter->host->max_channel = 282 adapter->product_info.nchannels + NVIRT_CHAN -1; 283 284 adapter->host->max_id = 16; /* max targets per channel */ 285 286 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */ 287 288 adapter->host->cmd_per_lun = max_cmd_per_lun; 289 290 adapter->numldrv = inquiry3->num_ldrv; 291 292 adapter->max_cmds = adapter->product_info.max_commands; 293 294 if(adapter->max_cmds > MAX_COMMANDS) 295 adapter->max_cmds = MAX_COMMANDS; 296 297 adapter->host->can_queue = adapter->max_cmds - 1; 298 299 /* 300 * Get the maximum number of scatter-gather elements supported by this 301 * firmware 302 */ 303 mega_get_max_sgl(adapter); 304 305 adapter->host->sg_tablesize = adapter->sglen; 306 307 /* use HP firmware and bios version encoding 308 Note: fw_version[0|1] and bios_version[0|1] were originally shifted 309 right 8 bits making them zero. This 0 value was hardcoded to fix 310 sparse warnings. */ 311 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { 312 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 313 "%c%d%d.%d%d", 314 adapter->product_info.fw_version[2], 315 0, 316 adapter->product_info.fw_version[1] & 0x0f, 317 0, 318 adapter->product_info.fw_version[0] & 0x0f); 319 snprintf(adapter->bios_version, sizeof(adapter->fw_version), 320 "%c%d%d.%d%d", 321 adapter->product_info.bios_version[2], 322 0, 323 adapter->product_info.bios_version[1] & 0x0f, 324 0, 325 adapter->product_info.bios_version[0] & 0x0f); 326 } else { 327 memcpy(adapter->fw_version, 328 (char *)adapter->product_info.fw_version, 4); 329 adapter->fw_version[4] = 0; 330 331 memcpy(adapter->bios_version, 332 (char *)adapter->product_info.bios_version, 4); 333 334 adapter->bios_version[4] = 0; 335 } 336 337 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n", 338 adapter->fw_version, adapter->bios_version, adapter->numldrv); 339 340 /* 341 * Do we support extended (>10 bytes) cdbs 342 */ 343 adapter->support_ext_cdb = mega_support_ext_cdb(adapter); 344 if (adapter->support_ext_cdb) 345 dev_notice(&adapter->dev->dev, "supports extended CDBs\n"); 346 347 348 return 0; 349 } 350 351 /** 352 * mega_runpendq() 353 * @adapter: pointer to our soft state 354 * 355 * Runs through the list of pending requests. 356 */ 357 static inline void 358 mega_runpendq(adapter_t *adapter) 359 { 360 if(!list_empty(&adapter->pending_list)) 361 __mega_runpendq(adapter); 362 } 363 364 /* 365 * megaraid_queue() 366 * @scmd - Issue this scsi command 367 * @done - the callback hook into the scsi mid-layer 368 * 369 * The command queuing entry point for the mid-layer. 370 */ 371 static int megaraid_queue_lck(struct scsi_cmnd *scmd) 372 { 373 adapter_t *adapter; 374 scb_t *scb; 375 int busy=0; 376 unsigned long flags; 377 378 adapter = (adapter_t *)scmd->device->host->hostdata; 379 380 /* 381 * Allocate and build a SCB request 382 * busy flag will be set if mega_build_cmd() command could not 383 * allocate scb. We will return non-zero status in that case. 384 * NOTE: scb can be null even though certain commands completed 385 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would 386 * return 0 in that case. 387 */ 388 389 spin_lock_irqsave(&adapter->lock, flags); 390 scb = mega_build_cmd(adapter, scmd, &busy); 391 if (!scb) 392 goto out; 393 394 scb->state |= SCB_PENDQ; 395 list_add_tail(&scb->list, &adapter->pending_list); 396 397 /* 398 * Check if the HBA is in quiescent state, e.g., during a 399 * delete logical drive opertion. If it is, don't run 400 * the pending_list. 401 */ 402 if (atomic_read(&adapter->quiescent) == 0) 403 mega_runpendq(adapter); 404 405 busy = 0; 406 out: 407 spin_unlock_irqrestore(&adapter->lock, flags); 408 return busy; 409 } 410 411 static DEF_SCSI_QCMD(megaraid_queue) 412 413 /** 414 * mega_allocate_scb() 415 * @adapter: pointer to our soft state 416 * @cmd: scsi command from the mid-layer 417 * 418 * Allocate a SCB structure. This is the central structure for controller 419 * commands. 420 */ 421 static inline scb_t * 422 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) 423 { 424 struct list_head *head = &adapter->free_list; 425 scb_t *scb; 426 427 /* Unlink command from Free List */ 428 if( !list_empty(head) ) { 429 430 scb = list_entry(head->next, scb_t, list); 431 432 list_del_init(head->next); 433 434 scb->state = SCB_ACTIVE; 435 scb->cmd = cmd; 436 scb->dma_type = MEGA_DMA_TYPE_NONE; 437 438 return scb; 439 } 440 441 return NULL; 442 } 443 444 /** 445 * mega_get_ldrv_num() 446 * @adapter: pointer to our soft state 447 * @cmd: scsi mid layer command 448 * @channel: channel on the controller 449 * 450 * Calculate the logical drive number based on the information in scsi command 451 * and the channel number. 452 */ 453 static inline int 454 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) 455 { 456 int tgt; 457 int ldrv_num; 458 459 tgt = cmd->device->id; 460 461 if ( tgt > adapter->this_id ) 462 tgt--; /* we do not get inquires for initiator id */ 463 464 ldrv_num = (channel * 15) + tgt; 465 466 467 /* 468 * If we have a logical drive with boot enabled, project it first 469 */ 470 if( adapter->boot_ldrv_enabled ) { 471 if( ldrv_num == 0 ) { 472 ldrv_num = adapter->boot_ldrv; 473 } 474 else { 475 if( ldrv_num <= adapter->boot_ldrv ) { 476 ldrv_num--; 477 } 478 } 479 } 480 481 /* 482 * If "delete logical drive" feature is enabled on this controller. 483 * Do only if at least one delete logical drive operation was done. 484 * 485 * Also, after logical drive deletion, instead of logical drive number, 486 * the value returned should be 0x80+logical drive id. 487 * 488 * These is valid only for IO commands. 489 */ 490 491 if (adapter->support_random_del && adapter->read_ldidmap ) 492 switch (cmd->cmnd[0]) { 493 case READ_6: 494 case WRITE_6: 495 case READ_10: 496 case WRITE_10: 497 ldrv_num += 0x80; 498 } 499 500 return ldrv_num; 501 } 502 503 /** 504 * mega_build_cmd() 505 * @adapter: pointer to our soft state 506 * @cmd: Prepare using this scsi command 507 * @busy: busy flag if no resources 508 * 509 * Prepares a command and scatter gather list for the controller. This routine 510 * also finds out if the commands is intended for a logical drive or a 511 * physical device and prepares the controller command accordingly. 512 * 513 * We also re-order the logical drives and physical devices based on their 514 * boot settings. 515 */ 516 static scb_t * 517 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) 518 { 519 mega_passthru *pthru; 520 scb_t *scb; 521 mbox_t *mbox; 522 u32 seg; 523 char islogical; 524 int max_ldrv_num; 525 int channel = 0; 526 int target = 0; 527 int ldrv_num = 0; /* logical drive number */ 528 529 /* 530 * We know what channels our logical drives are on - mega_find_card() 531 */ 532 islogical = adapter->logdrv_chan[cmd->device->channel]; 533 534 /* 535 * The theory: If physical drive is chosen for boot, all the physical 536 * devices are exported before the logical drives, otherwise physical 537 * devices are pushed after logical drives, in which case - Kernel sees 538 * the physical devices on virtual channel which is obviously converted 539 * to actual channel on the HBA. 540 */ 541 if( adapter->boot_pdrv_enabled ) { 542 if( islogical ) { 543 /* logical channel */ 544 channel = cmd->device->channel - 545 adapter->product_info.nchannels; 546 } 547 else { 548 /* this is physical channel */ 549 channel = cmd->device->channel; 550 target = cmd->device->id; 551 552 /* 553 * boot from a physical disk, that disk needs to be 554 * exposed first IF both the channels are SCSI, then 555 * booting from the second channel is not allowed. 556 */ 557 if( target == 0 ) { 558 target = adapter->boot_pdrv_tgt; 559 } 560 else if( target == adapter->boot_pdrv_tgt ) { 561 target = 0; 562 } 563 } 564 } 565 else { 566 if( islogical ) { 567 /* this is the logical channel */ 568 channel = cmd->device->channel; 569 } 570 else { 571 /* physical channel */ 572 channel = cmd->device->channel - NVIRT_CHAN; 573 target = cmd->device->id; 574 } 575 } 576 577 578 if(islogical) { 579 580 /* have just LUN 0 for each target on virtual channels */ 581 if (cmd->device->lun) { 582 cmd->result = (DID_BAD_TARGET << 16); 583 scsi_done(cmd); 584 return NULL; 585 } 586 587 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel); 588 589 590 max_ldrv_num = (adapter->flag & BOARD_40LD) ? 591 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD; 592 593 /* 594 * max_ldrv_num increases by 0x80 if some logical drive was 595 * deleted. 596 */ 597 if(adapter->read_ldidmap) 598 max_ldrv_num += 0x80; 599 600 if(ldrv_num > max_ldrv_num ) { 601 cmd->result = (DID_BAD_TARGET << 16); 602 scsi_done(cmd); 603 return NULL; 604 } 605 606 } 607 else { 608 if( cmd->device->lun > 7) { 609 /* 610 * Do not support lun >7 for physically accessed 611 * devices 612 */ 613 cmd->result = (DID_BAD_TARGET << 16); 614 scsi_done(cmd); 615 return NULL; 616 } 617 } 618 619 /* 620 * 621 * Logical drive commands 622 * 623 */ 624 if(islogical) { 625 switch (cmd->cmnd[0]) { 626 case TEST_UNIT_READY: 627 #if MEGA_HAVE_CLUSTERING 628 /* 629 * Do we support clustering and is the support enabled 630 * If no, return success always 631 */ 632 if( !adapter->has_cluster ) { 633 cmd->result = (DID_OK << 16); 634 scsi_done(cmd); 635 return NULL; 636 } 637 638 if(!(scb = mega_allocate_scb(adapter, cmd))) { 639 *busy = 1; 640 return NULL; 641 } 642 643 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 644 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS; 645 scb->raw_mbox[3] = ldrv_num; 646 647 scb->dma_direction = DMA_NONE; 648 649 return scb; 650 #else 651 cmd->result = (DID_OK << 16); 652 scsi_done(cmd); 653 return NULL; 654 #endif 655 656 case MODE_SENSE: { 657 char *buf; 658 struct scatterlist *sg; 659 660 sg = scsi_sglist(cmd); 661 buf = kmap_atomic(sg_page(sg)) + sg->offset; 662 663 memset(buf, 0, cmd->cmnd[4]); 664 kunmap_atomic(buf - sg->offset); 665 666 cmd->result = (DID_OK << 16); 667 scsi_done(cmd); 668 return NULL; 669 } 670 671 case READ_CAPACITY: 672 case INQUIRY: 673 674 if(!(adapter->flag & (1L << cmd->device->channel))) { 675 676 dev_notice(&adapter->dev->dev, 677 "scsi%d: scanning scsi channel %d " 678 "for logical drives\n", 679 adapter->host->host_no, 680 cmd->device->channel); 681 682 adapter->flag |= (1L << cmd->device->channel); 683 } 684 685 /* Allocate a SCB and initialize passthru */ 686 if(!(scb = mega_allocate_scb(adapter, cmd))) { 687 *busy = 1; 688 return NULL; 689 } 690 pthru = scb->pthru; 691 692 mbox = (mbox_t *)scb->raw_mbox; 693 memset(mbox, 0, sizeof(scb->raw_mbox)); 694 memset(pthru, 0, sizeof(mega_passthru)); 695 696 pthru->timeout = 0; 697 pthru->ars = 1; 698 pthru->reqsenselen = 14; 699 pthru->islogical = 1; 700 pthru->logdrv = ldrv_num; 701 pthru->cdblen = cmd->cmd_len; 702 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 703 704 if( adapter->has_64bit_addr ) { 705 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 706 } 707 else { 708 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 709 } 710 711 scb->dma_direction = DMA_FROM_DEVICE; 712 713 pthru->numsgelements = mega_build_sglist(adapter, scb, 714 &pthru->dataxferaddr, &pthru->dataxferlen); 715 716 mbox->m_out.xferaddr = scb->pthru_dma_addr; 717 718 return scb; 719 720 case READ_6: 721 case WRITE_6: 722 case READ_10: 723 case WRITE_10: 724 case READ_12: 725 case WRITE_12: 726 727 /* Allocate a SCB and initialize mailbox */ 728 if(!(scb = mega_allocate_scb(adapter, cmd))) { 729 *busy = 1; 730 return NULL; 731 } 732 mbox = (mbox_t *)scb->raw_mbox; 733 734 memset(mbox, 0, sizeof(scb->raw_mbox)); 735 mbox->m_out.logdrv = ldrv_num; 736 737 /* 738 * A little hack: 2nd bit is zero for all scsi read 739 * commands and is set for all scsi write commands 740 */ 741 if( adapter->has_64bit_addr ) { 742 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 743 MEGA_MBOXCMD_LWRITE64: 744 MEGA_MBOXCMD_LREAD64 ; 745 } 746 else { 747 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ? 748 MEGA_MBOXCMD_LWRITE: 749 MEGA_MBOXCMD_LREAD ; 750 } 751 752 /* 753 * 6-byte READ(0x08) or WRITE(0x0A) cdb 754 */ 755 if( cmd->cmd_len == 6 ) { 756 mbox->m_out.numsectors = (u32) cmd->cmnd[4]; 757 mbox->m_out.lba = 758 ((u32)cmd->cmnd[1] << 16) | 759 ((u32)cmd->cmnd[2] << 8) | 760 (u32)cmd->cmnd[3]; 761 762 mbox->m_out.lba &= 0x1FFFFF; 763 764 #if MEGA_HAVE_STATS 765 /* 766 * Take modulo 0x80, since the logical drive 767 * number increases by 0x80 when a logical 768 * drive was deleted 769 */ 770 if (*cmd->cmnd == READ_6) { 771 adapter->nreads[ldrv_num%0x80]++; 772 adapter->nreadblocks[ldrv_num%0x80] += 773 mbox->m_out.numsectors; 774 } else { 775 adapter->nwrites[ldrv_num%0x80]++; 776 adapter->nwriteblocks[ldrv_num%0x80] += 777 mbox->m_out.numsectors; 778 } 779 #endif 780 } 781 782 /* 783 * 10-byte READ(0x28) or WRITE(0x2A) cdb 784 */ 785 if( cmd->cmd_len == 10 ) { 786 mbox->m_out.numsectors = 787 (u32)cmd->cmnd[8] | 788 ((u32)cmd->cmnd[7] << 8); 789 mbox->m_out.lba = 790 ((u32)cmd->cmnd[2] << 24) | 791 ((u32)cmd->cmnd[3] << 16) | 792 ((u32)cmd->cmnd[4] << 8) | 793 (u32)cmd->cmnd[5]; 794 795 #if MEGA_HAVE_STATS 796 if (*cmd->cmnd == READ_10) { 797 adapter->nreads[ldrv_num%0x80]++; 798 adapter->nreadblocks[ldrv_num%0x80] += 799 mbox->m_out.numsectors; 800 } else { 801 adapter->nwrites[ldrv_num%0x80]++; 802 adapter->nwriteblocks[ldrv_num%0x80] += 803 mbox->m_out.numsectors; 804 } 805 #endif 806 } 807 808 /* 809 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 810 */ 811 if( cmd->cmd_len == 12 ) { 812 mbox->m_out.lba = 813 ((u32)cmd->cmnd[2] << 24) | 814 ((u32)cmd->cmnd[3] << 16) | 815 ((u32)cmd->cmnd[4] << 8) | 816 (u32)cmd->cmnd[5]; 817 818 mbox->m_out.numsectors = 819 ((u32)cmd->cmnd[6] << 24) | 820 ((u32)cmd->cmnd[7] << 16) | 821 ((u32)cmd->cmnd[8] << 8) | 822 (u32)cmd->cmnd[9]; 823 824 #if MEGA_HAVE_STATS 825 if (*cmd->cmnd == READ_12) { 826 adapter->nreads[ldrv_num%0x80]++; 827 adapter->nreadblocks[ldrv_num%0x80] += 828 mbox->m_out.numsectors; 829 } else { 830 adapter->nwrites[ldrv_num%0x80]++; 831 adapter->nwriteblocks[ldrv_num%0x80] += 832 mbox->m_out.numsectors; 833 } 834 #endif 835 } 836 837 /* 838 * If it is a read command 839 */ 840 if( (*cmd->cmnd & 0x0F) == 0x08 ) { 841 scb->dma_direction = DMA_FROM_DEVICE; 842 } 843 else { 844 scb->dma_direction = DMA_TO_DEVICE; 845 } 846 847 /* Calculate Scatter-Gather info */ 848 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb, 849 (u32 *)&mbox->m_out.xferaddr, &seg); 850 851 return scb; 852 853 #if MEGA_HAVE_CLUSTERING 854 case RESERVE: 855 case RELEASE: 856 857 /* 858 * Do we support clustering and is the support enabled 859 */ 860 if( ! adapter->has_cluster ) { 861 862 cmd->result = (DID_BAD_TARGET << 16); 863 scsi_done(cmd); 864 return NULL; 865 } 866 867 /* Allocate a SCB and initialize mailbox */ 868 if(!(scb = mega_allocate_scb(adapter, cmd))) { 869 *busy = 1; 870 return NULL; 871 } 872 873 scb->raw_mbox[0] = MEGA_CLUSTER_CMD; 874 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ? 875 MEGA_RESERVE_LD : MEGA_RELEASE_LD; 876 877 scb->raw_mbox[3] = ldrv_num; 878 879 scb->dma_direction = DMA_NONE; 880 881 return scb; 882 #endif 883 884 default: 885 cmd->result = (DID_BAD_TARGET << 16); 886 scsi_done(cmd); 887 return NULL; 888 } 889 } 890 891 /* 892 * Passthru drive commands 893 */ 894 else { 895 /* Allocate a SCB and initialize passthru */ 896 if(!(scb = mega_allocate_scb(adapter, cmd))) { 897 *busy = 1; 898 return NULL; 899 } 900 901 mbox = (mbox_t *)scb->raw_mbox; 902 memset(mbox, 0, sizeof(scb->raw_mbox)); 903 904 if( adapter->support_ext_cdb ) { 905 906 mega_prepare_extpassthru(adapter, scb, cmd, 907 channel, target); 908 909 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU; 910 911 mbox->m_out.xferaddr = scb->epthru_dma_addr; 912 913 } 914 else { 915 916 pthru = mega_prepare_passthru(adapter, scb, cmd, 917 channel, target); 918 919 /* Initialize mailbox */ 920 if( adapter->has_64bit_addr ) { 921 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64; 922 } 923 else { 924 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU; 925 } 926 927 mbox->m_out.xferaddr = scb->pthru_dma_addr; 928 929 } 930 return scb; 931 } 932 return NULL; 933 } 934 935 936 /** 937 * mega_prepare_passthru() 938 * @adapter: pointer to our soft state 939 * @scb: our scsi control block 940 * @cmd: scsi command from the mid-layer 941 * @channel: actual channel on the controller 942 * @target: actual id on the controller. 943 * 944 * prepare a command for the scsi physical devices. 945 */ 946 static mega_passthru * 947 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, 948 int channel, int target) 949 { 950 mega_passthru *pthru; 951 952 pthru = scb->pthru; 953 memset(pthru, 0, sizeof (mega_passthru)); 954 955 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 956 pthru->timeout = 2; 957 958 pthru->ars = 1; 959 pthru->reqsenselen = 14; 960 pthru->islogical = 0; 961 962 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 963 964 pthru->target = (adapter->flag & BOARD_40LD) ? 965 (channel << 4) | target : target; 966 967 pthru->cdblen = cmd->cmd_len; 968 pthru->logdrv = cmd->device->lun; 969 970 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len); 971 972 /* Not sure about the direction */ 973 scb->dma_direction = DMA_BIDIRECTIONAL; 974 975 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */ 976 switch (cmd->cmnd[0]) { 977 case INQUIRY: 978 case READ_CAPACITY: 979 if(!(adapter->flag & (1L << cmd->device->channel))) { 980 981 dev_notice(&adapter->dev->dev, 982 "scsi%d: scanning scsi channel %d [P%d] " 983 "for physical devices\n", 984 adapter->host->host_no, 985 cmd->device->channel, channel); 986 987 adapter->flag |= (1L << cmd->device->channel); 988 } 989 fallthrough; 990 default: 991 pthru->numsgelements = mega_build_sglist(adapter, scb, 992 &pthru->dataxferaddr, &pthru->dataxferlen); 993 break; 994 } 995 return pthru; 996 } 997 998 999 /** 1000 * mega_prepare_extpassthru() 1001 * @adapter: pointer to our soft state 1002 * @scb: our scsi control block 1003 * @cmd: scsi command from the mid-layer 1004 * @channel: actual channel on the controller 1005 * @target: actual id on the controller. 1006 * 1007 * prepare a command for the scsi physical devices. This rountine prepares 1008 * commands for devices which can take extended CDBs (>10 bytes) 1009 */ 1010 static mega_ext_passthru * 1011 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, 1012 struct scsi_cmnd *cmd, 1013 int channel, int target) 1014 { 1015 mega_ext_passthru *epthru; 1016 1017 epthru = scb->epthru; 1018 memset(epthru, 0, sizeof(mega_ext_passthru)); 1019 1020 /* 0=6sec/1=60sec/2=10min/3=3hrs */ 1021 epthru->timeout = 2; 1022 1023 epthru->ars = 1; 1024 epthru->reqsenselen = 14; 1025 epthru->islogical = 0; 1026 1027 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel; 1028 epthru->target = (adapter->flag & BOARD_40LD) ? 1029 (channel << 4) | target : target; 1030 1031 epthru->cdblen = cmd->cmd_len; 1032 epthru->logdrv = cmd->device->lun; 1033 1034 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len); 1035 1036 /* Not sure about the direction */ 1037 scb->dma_direction = DMA_BIDIRECTIONAL; 1038 1039 switch(cmd->cmnd[0]) { 1040 case INQUIRY: 1041 case READ_CAPACITY: 1042 if(!(adapter->flag & (1L << cmd->device->channel))) { 1043 1044 dev_notice(&adapter->dev->dev, 1045 "scsi%d: scanning scsi channel %d [P%d] " 1046 "for physical devices\n", 1047 adapter->host->host_no, 1048 cmd->device->channel, channel); 1049 1050 adapter->flag |= (1L << cmd->device->channel); 1051 } 1052 fallthrough; 1053 default: 1054 epthru->numsgelements = mega_build_sglist(adapter, scb, 1055 &epthru->dataxferaddr, &epthru->dataxferlen); 1056 break; 1057 } 1058 1059 return epthru; 1060 } 1061 1062 static void 1063 __mega_runpendq(adapter_t *adapter) 1064 { 1065 scb_t *scb; 1066 struct list_head *pos, *next; 1067 1068 /* Issue any pending commands to the card */ 1069 list_for_each_safe(pos, next, &adapter->pending_list) { 1070 1071 scb = list_entry(pos, scb_t, list); 1072 1073 if( !(scb->state & SCB_ISSUED) ) { 1074 1075 if( issue_scb(adapter, scb) != 0 ) 1076 return; 1077 } 1078 } 1079 1080 return; 1081 } 1082 1083 1084 /** 1085 * issue_scb() 1086 * @adapter: pointer to our soft state 1087 * @scb: scsi control block 1088 * 1089 * Post a command to the card if the mailbox is available, otherwise return 1090 * busy. We also take the scb from the pending list if the mailbox is 1091 * available. 1092 */ 1093 static int 1094 issue_scb(adapter_t *adapter, scb_t *scb) 1095 { 1096 volatile mbox64_t *mbox64 = adapter->mbox64; 1097 volatile mbox_t *mbox = adapter->mbox; 1098 unsigned int i = 0; 1099 1100 if(unlikely(mbox->m_in.busy)) { 1101 do { 1102 udelay(1); 1103 i++; 1104 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) ); 1105 1106 if(mbox->m_in.busy) return -1; 1107 } 1108 1109 /* Copy mailbox data into host structure */ 1110 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox, 1111 sizeof(struct mbox_out)); 1112 1113 mbox->m_out.cmdid = scb->idx; /* Set cmdid */ 1114 mbox->m_in.busy = 1; /* Set busy */ 1115 1116 1117 /* 1118 * Increment the pending queue counter 1119 */ 1120 atomic_inc(&adapter->pend_cmds); 1121 1122 switch (mbox->m_out.cmd) { 1123 case MEGA_MBOXCMD_LREAD64: 1124 case MEGA_MBOXCMD_LWRITE64: 1125 case MEGA_MBOXCMD_PASSTHRU64: 1126 case MEGA_MBOXCMD_EXTPTHRU: 1127 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1128 mbox64->xfer_segment_hi = 0; 1129 mbox->m_out.xferaddr = 0xFFFFFFFF; 1130 break; 1131 default: 1132 mbox64->xfer_segment_lo = 0; 1133 mbox64->xfer_segment_hi = 0; 1134 } 1135 1136 /* 1137 * post the command 1138 */ 1139 scb->state |= SCB_ISSUED; 1140 1141 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1142 mbox->m_in.poll = 0; 1143 mbox->m_in.ack = 0; 1144 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1145 } 1146 else { 1147 irq_enable(adapter); 1148 issue_command(adapter); 1149 } 1150 1151 return 0; 1152 } 1153 1154 /* 1155 * Wait until the controller's mailbox is available 1156 */ 1157 static inline int 1158 mega_busywait_mbox (adapter_t *adapter) 1159 { 1160 if (adapter->mbox->m_in.busy) 1161 return __mega_busywait_mbox(adapter); 1162 return 0; 1163 } 1164 1165 /** 1166 * issue_scb_block() 1167 * @adapter: pointer to our soft state 1168 * @raw_mbox: the mailbox 1169 * 1170 * Issue a scb in synchronous and non-interrupt mode 1171 */ 1172 static int 1173 issue_scb_block(adapter_t *adapter, u_char *raw_mbox) 1174 { 1175 volatile mbox64_t *mbox64 = adapter->mbox64; 1176 volatile mbox_t *mbox = adapter->mbox; 1177 u8 byte; 1178 1179 /* Wait until mailbox is free */ 1180 if(mega_busywait_mbox (adapter)) 1181 goto bug_blocked_mailbox; 1182 1183 /* Copy mailbox data into host structure */ 1184 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out)); 1185 mbox->m_out.cmdid = 0xFE; 1186 mbox->m_in.busy = 1; 1187 1188 switch (raw_mbox[0]) { 1189 case MEGA_MBOXCMD_LREAD64: 1190 case MEGA_MBOXCMD_LWRITE64: 1191 case MEGA_MBOXCMD_PASSTHRU64: 1192 case MEGA_MBOXCMD_EXTPTHRU: 1193 mbox64->xfer_segment_lo = mbox->m_out.xferaddr; 1194 mbox64->xfer_segment_hi = 0; 1195 mbox->m_out.xferaddr = 0xFFFFFFFF; 1196 break; 1197 default: 1198 mbox64->xfer_segment_lo = 0; 1199 mbox64->xfer_segment_hi = 0; 1200 } 1201 1202 if( likely(adapter->flag & BOARD_MEMMAP) ) { 1203 mbox->m_in.poll = 0; 1204 mbox->m_in.ack = 0; 1205 mbox->m_in.numstatus = 0xFF; 1206 mbox->m_in.status = 0xFF; 1207 WRINDOOR(adapter, adapter->mbox_dma | 0x1); 1208 1209 while((volatile u8)mbox->m_in.numstatus == 0xFF) 1210 cpu_relax(); 1211 1212 mbox->m_in.numstatus = 0xFF; 1213 1214 while( (volatile u8)mbox->m_in.poll != 0x77 ) 1215 cpu_relax(); 1216 1217 mbox->m_in.poll = 0; 1218 mbox->m_in.ack = 0x77; 1219 1220 WRINDOOR(adapter, adapter->mbox_dma | 0x2); 1221 1222 while(RDINDOOR(adapter) & 0x2) 1223 cpu_relax(); 1224 } 1225 else { 1226 irq_disable(adapter); 1227 issue_command(adapter); 1228 1229 while (!((byte = irq_state(adapter)) & INTR_VALID)) 1230 cpu_relax(); 1231 1232 set_irq_state(adapter, byte); 1233 irq_enable(adapter); 1234 irq_ack(adapter); 1235 } 1236 1237 return mbox->m_in.status; 1238 1239 bug_blocked_mailbox: 1240 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n"); 1241 udelay (1000); 1242 return -1; 1243 } 1244 1245 1246 /** 1247 * megaraid_isr_iomapped() 1248 * @irq: irq 1249 * @devp: pointer to our soft state 1250 * 1251 * Interrupt service routine for io-mapped controllers. 1252 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1253 * and service the completed commands. 1254 */ 1255 static irqreturn_t 1256 megaraid_isr_iomapped(int irq, void *devp) 1257 { 1258 adapter_t *adapter = devp; 1259 unsigned long flags; 1260 u8 status; 1261 u8 nstatus; 1262 u8 completed[MAX_FIRMWARE_STATUS]; 1263 u8 byte; 1264 int handled = 0; 1265 1266 1267 /* 1268 * loop till F/W has more commands for us to complete. 1269 */ 1270 spin_lock_irqsave(&adapter->lock, flags); 1271 1272 do { 1273 /* Check if a valid interrupt is pending */ 1274 byte = irq_state(adapter); 1275 if( (byte & VALID_INTR_BYTE) == 0 ) { 1276 /* 1277 * No more pending commands 1278 */ 1279 goto out_unlock; 1280 } 1281 set_irq_state(adapter, byte); 1282 1283 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1284 == 0xFF) 1285 cpu_relax(); 1286 adapter->mbox->m_in.numstatus = 0xFF; 1287 1288 status = adapter->mbox->m_in.status; 1289 1290 /* 1291 * decrement the pending queue counter 1292 */ 1293 atomic_sub(nstatus, &adapter->pend_cmds); 1294 1295 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1296 nstatus); 1297 1298 /* Acknowledge interrupt */ 1299 irq_ack(adapter); 1300 1301 mega_cmd_done(adapter, completed, nstatus, status); 1302 1303 mega_rundoneq(adapter); 1304 1305 handled = 1; 1306 1307 /* Loop through any pending requests */ 1308 if(atomic_read(&adapter->quiescent) == 0) { 1309 mega_runpendq(adapter); 1310 } 1311 1312 } while(1); 1313 1314 out_unlock: 1315 1316 spin_unlock_irqrestore(&adapter->lock, flags); 1317 1318 return IRQ_RETVAL(handled); 1319 } 1320 1321 1322 /** 1323 * megaraid_isr_memmapped() 1324 * @irq: irq 1325 * @devp: pointer to our soft state 1326 * 1327 * Interrupt service routine for memory-mapped controllers. 1328 * Find out if our device is interrupting. If yes, acknowledge the interrupt 1329 * and service the completed commands. 1330 */ 1331 static irqreturn_t 1332 megaraid_isr_memmapped(int irq, void *devp) 1333 { 1334 adapter_t *adapter = devp; 1335 unsigned long flags; 1336 u8 status; 1337 u32 dword = 0; 1338 u8 nstatus; 1339 u8 completed[MAX_FIRMWARE_STATUS]; 1340 int handled = 0; 1341 1342 1343 /* 1344 * loop till F/W has more commands for us to complete. 1345 */ 1346 spin_lock_irqsave(&adapter->lock, flags); 1347 1348 do { 1349 /* Check if a valid interrupt is pending */ 1350 dword = RDOUTDOOR(adapter); 1351 if(dword != 0x10001234) { 1352 /* 1353 * No more pending commands 1354 */ 1355 goto out_unlock; 1356 } 1357 WROUTDOOR(adapter, 0x10001234); 1358 1359 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus) 1360 == 0xFF) { 1361 cpu_relax(); 1362 } 1363 adapter->mbox->m_in.numstatus = 0xFF; 1364 1365 status = adapter->mbox->m_in.status; 1366 1367 /* 1368 * decrement the pending queue counter 1369 */ 1370 atomic_sub(nstatus, &adapter->pend_cmds); 1371 1372 memcpy(completed, (void *)adapter->mbox->m_in.completed, 1373 nstatus); 1374 1375 /* Acknowledge interrupt */ 1376 WRINDOOR(adapter, 0x2); 1377 1378 handled = 1; 1379 1380 while( RDINDOOR(adapter) & 0x02 ) 1381 cpu_relax(); 1382 1383 mega_cmd_done(adapter, completed, nstatus, status); 1384 1385 mega_rundoneq(adapter); 1386 1387 /* Loop through any pending requests */ 1388 if(atomic_read(&adapter->quiescent) == 0) { 1389 mega_runpendq(adapter); 1390 } 1391 1392 } while(1); 1393 1394 out_unlock: 1395 1396 spin_unlock_irqrestore(&adapter->lock, flags); 1397 1398 return IRQ_RETVAL(handled); 1399 } 1400 /** 1401 * mega_cmd_done() 1402 * @adapter: pointer to our soft state 1403 * @completed: array of ids of completed commands 1404 * @nstatus: number of completed commands 1405 * @status: status of the last command completed 1406 * 1407 * Complete the commands and call the scsi mid-layer callback hooks. 1408 */ 1409 static void 1410 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) 1411 { 1412 mega_ext_passthru *epthru = NULL; 1413 struct scatterlist *sgl; 1414 struct scsi_cmnd *cmd = NULL; 1415 mega_passthru *pthru = NULL; 1416 mbox_t *mbox = NULL; 1417 u8 c; 1418 scb_t *scb; 1419 int islogical; 1420 int cmdid; 1421 int i; 1422 1423 /* 1424 * for all the commands completed, call the mid-layer callback routine 1425 * and free the scb. 1426 */ 1427 for( i = 0; i < nstatus; i++ ) { 1428 1429 cmdid = completed[i]; 1430 1431 /* 1432 * Only free SCBs for the commands coming down from the 1433 * mid-layer, not for which were issued internally 1434 * 1435 * For internal command, restore the status returned by the 1436 * firmware so that user can interpret it. 1437 */ 1438 if (cmdid == CMDID_INT_CMDS) { 1439 scb = &adapter->int_scb; 1440 1441 list_del_init(&scb->list); 1442 scb->state = SCB_FREE; 1443 1444 adapter->int_status = status; 1445 complete(&adapter->int_waitq); 1446 } else { 1447 scb = &adapter->scb_list[cmdid]; 1448 1449 /* 1450 * Make sure f/w has completed a valid command 1451 */ 1452 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { 1453 dev_crit(&adapter->dev->dev, "invalid command " 1454 "Id %d, scb->state:%x, scsi cmd:%p\n", 1455 cmdid, scb->state, scb->cmd); 1456 1457 continue; 1458 } 1459 1460 /* 1461 * Was a abort issued for this command 1462 */ 1463 if( scb->state & SCB_ABORT ) { 1464 1465 dev_warn(&adapter->dev->dev, 1466 "aborted cmd [%x] complete\n", 1467 scb->idx); 1468 1469 scb->cmd->result = (DID_ABORT << 16); 1470 1471 list_add_tail(SCSI_LIST(scb->cmd), 1472 &adapter->completed_list); 1473 1474 mega_free_scb(adapter, scb); 1475 1476 continue; 1477 } 1478 1479 /* 1480 * Was a reset issued for this command 1481 */ 1482 if( scb->state & SCB_RESET ) { 1483 1484 dev_warn(&adapter->dev->dev, 1485 "reset cmd [%x] complete\n", 1486 scb->idx); 1487 1488 scb->cmd->result = (DID_RESET << 16); 1489 1490 list_add_tail(SCSI_LIST(scb->cmd), 1491 &adapter->completed_list); 1492 1493 mega_free_scb (adapter, scb); 1494 1495 continue; 1496 } 1497 1498 cmd = scb->cmd; 1499 pthru = scb->pthru; 1500 epthru = scb->epthru; 1501 mbox = (mbox_t *)scb->raw_mbox; 1502 1503 #if MEGA_HAVE_STATS 1504 { 1505 1506 int logdrv = mbox->m_out.logdrv; 1507 1508 islogical = adapter->logdrv_chan[cmd->channel]; 1509 /* 1510 * Maintain an error counter for the logical drive. 1511 * Some application like SNMP agent need such 1512 * statistics 1513 */ 1514 if( status && islogical && (cmd->cmnd[0] == READ_6 || 1515 cmd->cmnd[0] == READ_10 || 1516 cmd->cmnd[0] == READ_12)) { 1517 /* 1518 * Logical drive number increases by 0x80 when 1519 * a logical drive is deleted 1520 */ 1521 adapter->rd_errors[logdrv%0x80]++; 1522 } 1523 1524 if( status && islogical && (cmd->cmnd[0] == WRITE_6 || 1525 cmd->cmnd[0] == WRITE_10 || 1526 cmd->cmnd[0] == WRITE_12)) { 1527 /* 1528 * Logical drive number increases by 0x80 when 1529 * a logical drive is deleted 1530 */ 1531 adapter->wr_errors[logdrv%0x80]++; 1532 } 1533 1534 } 1535 #endif 1536 } 1537 1538 /* 1539 * Do not return the presence of hard disk on the channel so, 1540 * inquiry sent, and returned data==hard disk or removable 1541 * hard disk and not logical, request should return failure! - 1542 * PJ 1543 */ 1544 islogical = adapter->logdrv_chan[cmd->device->channel]; 1545 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1546 1547 sgl = scsi_sglist(cmd); 1548 if( sg_page(sgl) ) { 1549 c = *(unsigned char *) sg_virt(&sgl[0]); 1550 } else { 1551 dev_warn(&adapter->dev->dev, "invalid sg\n"); 1552 c = 0; 1553 } 1554 1555 if(IS_RAID_CH(adapter, cmd->device->channel) && 1556 ((c & 0x1F ) == TYPE_DISK)) { 1557 status = 0xF0; 1558 } 1559 } 1560 1561 /* clear result; otherwise, success returns corrupt value */ 1562 cmd->result = 0; 1563 1564 /* Convert MegaRAID status to Linux error code */ 1565 switch (status) { 1566 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */ 1567 cmd->result |= (DID_OK << 16); 1568 break; 1569 1570 case 0x02: /* ERROR_ABORTED, i.e. 1571 SCSI_STATUS_CHECK_CONDITION */ 1572 1573 /* set sense_buffer and result fields */ 1574 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU || 1575 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) { 1576 1577 memcpy(cmd->sense_buffer, pthru->reqsensearea, 1578 14); 1579 1580 cmd->result = SAM_STAT_CHECK_CONDITION; 1581 } 1582 else { 1583 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) { 1584 1585 memcpy(cmd->sense_buffer, 1586 epthru->reqsensearea, 14); 1587 1588 cmd->result = SAM_STAT_CHECK_CONDITION; 1589 } else 1590 scsi_build_sense(cmd, 0, 1591 ABORTED_COMMAND, 0, 0); 1592 } 1593 break; 1594 1595 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. 1596 SCSI_STATUS_BUSY */ 1597 cmd->result |= (DID_BUS_BUSY << 16) | status; 1598 break; 1599 1600 default: 1601 #if MEGA_HAVE_CLUSTERING 1602 /* 1603 * If TEST_UNIT_READY fails, we know 1604 * MEGA_RESERVATION_STATUS failed 1605 */ 1606 if( cmd->cmnd[0] == TEST_UNIT_READY ) { 1607 cmd->result |= (DID_ERROR << 16) | 1608 SAM_STAT_RESERVATION_CONFLICT; 1609 } 1610 else 1611 /* 1612 * Error code returned is 1 if Reserve or Release 1613 * failed or the input parameter is invalid 1614 */ 1615 if( status == 1 && 1616 (cmd->cmnd[0] == RESERVE || 1617 cmd->cmnd[0] == RELEASE) ) { 1618 1619 cmd->result |= (DID_ERROR << 16) | 1620 SAM_STAT_RESERVATION_CONFLICT; 1621 } 1622 else 1623 #endif 1624 cmd->result |= (DID_BAD_TARGET << 16)|status; 1625 } 1626 1627 mega_free_scb(adapter, scb); 1628 1629 /* Add Scsi_Command to end of completed queue */ 1630 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); 1631 } 1632 } 1633 1634 1635 /* 1636 * mega_runpendq() 1637 * 1638 * Run through the list of completed requests and finish it 1639 */ 1640 static void 1641 mega_rundoneq (adapter_t *adapter) 1642 { 1643 struct scsi_cmnd *cmd; 1644 struct list_head *pos; 1645 1646 list_for_each(pos, &adapter->completed_list) { 1647 1648 struct scsi_pointer* spos = (struct scsi_pointer *)pos; 1649 1650 cmd = list_entry(spos, struct scsi_cmnd, SCp); 1651 scsi_done(cmd); 1652 } 1653 1654 INIT_LIST_HEAD(&adapter->completed_list); 1655 } 1656 1657 1658 /* 1659 * Free a SCB structure 1660 * Note: We assume the scsi commands associated with this scb is not free yet. 1661 */ 1662 static void 1663 mega_free_scb(adapter_t *adapter, scb_t *scb) 1664 { 1665 switch( scb->dma_type ) { 1666 1667 case MEGA_DMA_TYPE_NONE: 1668 break; 1669 1670 case MEGA_SGLIST: 1671 scsi_dma_unmap(scb->cmd); 1672 break; 1673 default: 1674 break; 1675 } 1676 1677 /* 1678 * Remove from the pending list 1679 */ 1680 list_del_init(&scb->list); 1681 1682 /* Link the scb back into free list */ 1683 scb->state = SCB_FREE; 1684 scb->cmd = NULL; 1685 1686 list_add(&scb->list, &adapter->free_list); 1687 } 1688 1689 1690 static int 1691 __mega_busywait_mbox (adapter_t *adapter) 1692 { 1693 volatile mbox_t *mbox = adapter->mbox; 1694 long counter; 1695 1696 for (counter = 0; counter < 10000; counter++) { 1697 if (!mbox->m_in.busy) 1698 return 0; 1699 udelay(100); 1700 cond_resched(); 1701 } 1702 return -1; /* give up after 1 second */ 1703 } 1704 1705 /* 1706 * Copies data to SGLIST 1707 * Note: For 64 bit cards, we need a minimum of one SG element for read/write 1708 */ 1709 static int 1710 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) 1711 { 1712 struct scatterlist *sg; 1713 struct scsi_cmnd *cmd; 1714 int sgcnt; 1715 int idx; 1716 1717 cmd = scb->cmd; 1718 1719 /* 1720 * Copy Scatter-Gather list info into controller structure. 1721 * 1722 * The number of sg elements returned must not exceed our limit 1723 */ 1724 sgcnt = scsi_dma_map(cmd); 1725 1726 scb->dma_type = MEGA_SGLIST; 1727 1728 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0); 1729 1730 *len = 0; 1731 1732 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { 1733 sg = scsi_sglist(cmd); 1734 scb->dma_h_bulkdata = sg_dma_address(sg); 1735 *buf = (u32)scb->dma_h_bulkdata; 1736 *len = sg_dma_len(sg); 1737 return 0; 1738 } 1739 1740 scsi_for_each_sg(cmd, sg, sgcnt, idx) { 1741 if (adapter->has_64bit_addr) { 1742 scb->sgl64[idx].address = sg_dma_address(sg); 1743 *len += scb->sgl64[idx].length = sg_dma_len(sg); 1744 } else { 1745 scb->sgl[idx].address = sg_dma_address(sg); 1746 *len += scb->sgl[idx].length = sg_dma_len(sg); 1747 } 1748 } 1749 1750 /* Reset pointer and length fields */ 1751 *buf = scb->sgl_dma_addr; 1752 1753 /* Return count of SG requests */ 1754 return sgcnt; 1755 } 1756 1757 1758 /* 1759 * mega_8_to_40ld() 1760 * 1761 * takes all info in AdapterInquiry structure and puts it into ProductInfo and 1762 * Enquiry3 structures for later use 1763 */ 1764 static void 1765 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, 1766 mega_product_info *product_info) 1767 { 1768 int i; 1769 1770 product_info->max_commands = inquiry->adapter_info.max_commands; 1771 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate; 1772 product_info->nchannels = inquiry->adapter_info.nchannels; 1773 1774 for (i = 0; i < 4; i++) { 1775 product_info->fw_version[i] = 1776 inquiry->adapter_info.fw_version[i]; 1777 1778 product_info->bios_version[i] = 1779 inquiry->adapter_info.bios_version[i]; 1780 } 1781 enquiry3->cache_flush_interval = 1782 inquiry->adapter_info.cache_flush_interval; 1783 1784 product_info->dram_size = inquiry->adapter_info.dram_size; 1785 1786 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv; 1787 1788 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) { 1789 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i]; 1790 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i]; 1791 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i]; 1792 } 1793 1794 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) 1795 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i]; 1796 } 1797 1798 static inline void 1799 mega_free_sgl(adapter_t *adapter) 1800 { 1801 scb_t *scb; 1802 int i; 1803 1804 for(i = 0; i < adapter->max_cmds; i++) { 1805 1806 scb = &adapter->scb_list[i]; 1807 1808 if( scb->sgl64 ) { 1809 dma_free_coherent(&adapter->dev->dev, 1810 sizeof(mega_sgl64) * adapter->sglen, 1811 scb->sgl64, scb->sgl_dma_addr); 1812 1813 scb->sgl64 = NULL; 1814 } 1815 1816 if( scb->pthru ) { 1817 dma_free_coherent(&adapter->dev->dev, 1818 sizeof(mega_passthru), scb->pthru, 1819 scb->pthru_dma_addr); 1820 1821 scb->pthru = NULL; 1822 } 1823 1824 if( scb->epthru ) { 1825 dma_free_coherent(&adapter->dev->dev, 1826 sizeof(mega_ext_passthru), 1827 scb->epthru, scb->epthru_dma_addr); 1828 1829 scb->epthru = NULL; 1830 } 1831 1832 } 1833 } 1834 1835 1836 /* 1837 * Get information about the card/driver 1838 */ 1839 const char * 1840 megaraid_info(struct Scsi_Host *host) 1841 { 1842 static char buffer[512]; 1843 adapter_t *adapter; 1844 1845 adapter = (adapter_t *)host->hostdata; 1846 1847 sprintf (buffer, 1848 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", 1849 adapter->fw_version, adapter->product_info.max_commands, 1850 adapter->host->max_id, adapter->host->max_channel, 1851 (u32)adapter->host->max_lun); 1852 return buffer; 1853 } 1854 1855 /* 1856 * Abort a previous SCSI request. Only commands on the pending list can be 1857 * aborted. All the commands issued to the F/W must complete. 1858 */ 1859 static int 1860 megaraid_abort(struct scsi_cmnd *cmd) 1861 { 1862 adapter_t *adapter; 1863 int rval; 1864 1865 adapter = (adapter_t *)cmd->device->host->hostdata; 1866 1867 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT); 1868 1869 /* 1870 * This is required here to complete any completed requests 1871 * to be communicated over to the mid layer. 1872 */ 1873 mega_rundoneq(adapter); 1874 1875 return rval; 1876 } 1877 1878 1879 static int 1880 megaraid_reset(struct scsi_cmnd *cmd) 1881 { 1882 adapter_t *adapter; 1883 megacmd_t mc; 1884 int rval; 1885 1886 adapter = (adapter_t *)cmd->device->host->hostdata; 1887 1888 #if MEGA_HAVE_CLUSTERING 1889 mc.cmd = MEGA_CLUSTER_CMD; 1890 mc.opcode = MEGA_RESET_RESERVATIONS; 1891 1892 if( mega_internal_command(adapter, &mc, NULL) != 0 ) { 1893 dev_warn(&adapter->dev->dev, "reservation reset failed\n"); 1894 } 1895 else { 1896 dev_info(&adapter->dev->dev, "reservation reset\n"); 1897 } 1898 #endif 1899 1900 spin_lock_irq(&adapter->lock); 1901 1902 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET); 1903 1904 /* 1905 * This is required here to complete any completed requests 1906 * to be communicated over to the mid layer. 1907 */ 1908 mega_rundoneq(adapter); 1909 spin_unlock_irq(&adapter->lock); 1910 1911 return rval; 1912 } 1913 1914 /** 1915 * megaraid_abort_and_reset() 1916 * @adapter: megaraid soft state 1917 * @cmd: scsi command to be aborted or reset 1918 * @aor: abort or reset flag 1919 * 1920 * Try to locate the scsi command in the pending queue. If found and is not 1921 * issued to the controller, abort/reset it. Otherwise return failure 1922 */ 1923 static int 1924 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) 1925 { 1926 struct list_head *pos, *next; 1927 scb_t *scb; 1928 1929 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n", 1930 (aor == SCB_ABORT)? "ABORTING":"RESET", 1931 cmd->cmnd[0], cmd->device->channel, 1932 cmd->device->id, (u32)cmd->device->lun); 1933 1934 if(list_empty(&adapter->pending_list)) 1935 return FAILED; 1936 1937 list_for_each_safe(pos, next, &adapter->pending_list) { 1938 1939 scb = list_entry(pos, scb_t, list); 1940 1941 if (scb->cmd == cmd) { /* Found command */ 1942 1943 scb->state |= aor; 1944 1945 /* 1946 * Check if this command has firmware ownership. If 1947 * yes, we cannot reset this command. Whenever f/w 1948 * completes this command, we will return appropriate 1949 * status from ISR. 1950 */ 1951 if( scb->state & SCB_ISSUED ) { 1952 1953 dev_warn(&adapter->dev->dev, 1954 "%s[%x], fw owner\n", 1955 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1956 scb->idx); 1957 1958 return FAILED; 1959 } 1960 else { 1961 1962 /* 1963 * Not yet issued! Remove from the pending 1964 * list 1965 */ 1966 dev_warn(&adapter->dev->dev, 1967 "%s-[%x], driver owner\n", 1968 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1969 scb->idx); 1970 1971 mega_free_scb(adapter, scb); 1972 1973 if( aor == SCB_ABORT ) { 1974 cmd->result = (DID_ABORT << 16); 1975 } 1976 else { 1977 cmd->result = (DID_RESET << 16); 1978 } 1979 1980 list_add_tail(SCSI_LIST(cmd), 1981 &adapter->completed_list); 1982 1983 return SUCCESS; 1984 } 1985 } 1986 } 1987 1988 return FAILED; 1989 } 1990 1991 static inline int 1992 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) 1993 { 1994 *pdev = pci_alloc_dev(NULL); 1995 1996 if( *pdev == NULL ) return -1; 1997 1998 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); 1999 2000 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) { 2001 kfree(*pdev); 2002 return -1; 2003 } 2004 2005 return 0; 2006 } 2007 2008 static inline void 2009 free_local_pdev(struct pci_dev *pdev) 2010 { 2011 kfree(pdev); 2012 } 2013 2014 /** 2015 * mega_allocate_inquiry() 2016 * @dma_handle: handle returned for dma address 2017 * @pdev: handle to pci device 2018 * 2019 * allocates memory for inquiry structure 2020 */ 2021 static inline void * 2022 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev) 2023 { 2024 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3), 2025 dma_handle, GFP_KERNEL); 2026 } 2027 2028 2029 static inline void 2030 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) 2031 { 2032 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry, 2033 dma_handle); 2034 } 2035 2036 2037 #ifdef CONFIG_PROC_FS 2038 /* Following code handles /proc fs */ 2039 2040 /** 2041 * proc_show_config() 2042 * @m: Synthetic file construction data 2043 * @v: File iterator 2044 * 2045 * Display configuration information about the controller. 2046 */ 2047 static int 2048 proc_show_config(struct seq_file *m, void *v) 2049 { 2050 2051 adapter_t *adapter = m->private; 2052 2053 seq_puts(m, MEGARAID_VERSION); 2054 if(adapter->product_info.product_name[0]) 2055 seq_printf(m, "%s\n", adapter->product_info.product_name); 2056 2057 seq_puts(m, "Controller Type: "); 2058 2059 if( adapter->flag & BOARD_MEMMAP ) 2060 seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); 2061 else 2062 seq_puts(m, "418/428/434\n"); 2063 2064 if(adapter->flag & BOARD_40LD) 2065 seq_puts(m, "Controller Supports 40 Logical Drives\n"); 2066 2067 if(adapter->flag & BOARD_64BIT) 2068 seq_puts(m, "Controller capable of 64-bit memory addressing\n"); 2069 if( adapter->has_64bit_addr ) 2070 seq_puts(m, "Controller using 64-bit memory addressing\n"); 2071 else 2072 seq_puts(m, "Controller is not using 64-bit memory addressing\n"); 2073 2074 seq_printf(m, "Base = %08lx, Irq = %d, ", 2075 adapter->base, adapter->host->irq); 2076 2077 seq_printf(m, "Logical Drives = %d, Channels = %d\n", 2078 adapter->numldrv, adapter->product_info.nchannels); 2079 2080 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", 2081 adapter->fw_version, adapter->bios_version, 2082 adapter->product_info.dram_size); 2083 2084 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", 2085 adapter->product_info.max_commands, adapter->max_cmds); 2086 2087 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); 2088 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); 2089 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); 2090 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); 2091 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); 2092 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); 2093 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); 2094 seq_printf(m, "quiescent = %d\n", 2095 atomic_read(&adapter->quiescent)); 2096 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); 2097 2098 seq_puts(m, "\nModule Parameters:\n"); 2099 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); 2100 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); 2101 return 0; 2102 } 2103 2104 /** 2105 * proc_show_stat() 2106 * @m: Synthetic file construction data 2107 * @v: File iterator 2108 * 2109 * Display statistical information about the I/O activity. 2110 */ 2111 static int 2112 proc_show_stat(struct seq_file *m, void *v) 2113 { 2114 adapter_t *adapter = m->private; 2115 #if MEGA_HAVE_STATS 2116 int i; 2117 #endif 2118 2119 seq_puts(m, "Statistical Information for this controller\n"); 2120 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); 2121 #if MEGA_HAVE_STATS 2122 for(i = 0; i < adapter->numldrv; i++) { 2123 seq_printf(m, "Logical Drive %d:\n", i); 2124 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", 2125 adapter->nreads[i], adapter->nwrites[i]); 2126 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", 2127 adapter->nreadblocks[i], adapter->nwriteblocks[i]); 2128 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", 2129 adapter->rd_errors[i], adapter->wr_errors[i]); 2130 } 2131 #else 2132 seq_puts(m, "IO and error counters not compiled in driver.\n"); 2133 #endif 2134 return 0; 2135 } 2136 2137 2138 /** 2139 * proc_show_mbox() 2140 * @m: Synthetic file construction data 2141 * @v: File iterator 2142 * 2143 * Display mailbox information for the last command issued. This information 2144 * is good for debugging. 2145 */ 2146 static int 2147 proc_show_mbox(struct seq_file *m, void *v) 2148 { 2149 adapter_t *adapter = m->private; 2150 volatile mbox_t *mbox = adapter->mbox; 2151 2152 seq_puts(m, "Contents of Mail Box Structure\n"); 2153 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); 2154 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); 2155 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); 2156 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); 2157 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); 2158 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); 2159 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); 2160 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); 2161 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); 2162 return 0; 2163 } 2164 2165 2166 /** 2167 * proc_show_rebuild_rate() 2168 * @m: Synthetic file construction data 2169 * @v: File iterator 2170 * 2171 * Display current rebuild rate 2172 */ 2173 static int 2174 proc_show_rebuild_rate(struct seq_file *m, void *v) 2175 { 2176 adapter_t *adapter = m->private; 2177 dma_addr_t dma_handle; 2178 caddr_t inquiry; 2179 struct pci_dev *pdev; 2180 2181 if( make_local_pdev(adapter, &pdev) != 0 ) 2182 return 0; 2183 2184 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2185 goto free_pdev; 2186 2187 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2188 seq_puts(m, "Adapter inquiry failed.\n"); 2189 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2190 goto free_inquiry; 2191 } 2192 2193 if( adapter->flag & BOARD_40LD ) 2194 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2195 ((mega_inquiry3 *)inquiry)->rebuild_rate); 2196 else 2197 seq_printf(m, "Rebuild Rate: [%d%%]\n", 2198 ((mraid_ext_inquiry *) 2199 inquiry)->raid_inq.adapter_info.rebuild_rate); 2200 2201 free_inquiry: 2202 mega_free_inquiry(inquiry, dma_handle, pdev); 2203 free_pdev: 2204 free_local_pdev(pdev); 2205 return 0; 2206 } 2207 2208 2209 /** 2210 * proc_show_battery() 2211 * @m: Synthetic file construction data 2212 * @v: File iterator 2213 * 2214 * Display information about the battery module on the controller. 2215 */ 2216 static int 2217 proc_show_battery(struct seq_file *m, void *v) 2218 { 2219 adapter_t *adapter = m->private; 2220 dma_addr_t dma_handle; 2221 caddr_t inquiry; 2222 struct pci_dev *pdev; 2223 u8 battery_status; 2224 2225 if( make_local_pdev(adapter, &pdev) != 0 ) 2226 return 0; 2227 2228 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2229 goto free_pdev; 2230 2231 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2232 seq_puts(m, "Adapter inquiry failed.\n"); 2233 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2234 goto free_inquiry; 2235 } 2236 2237 if( adapter->flag & BOARD_40LD ) { 2238 battery_status = ((mega_inquiry3 *)inquiry)->battery_status; 2239 } 2240 else { 2241 battery_status = ((mraid_ext_inquiry *)inquiry)-> 2242 raid_inq.adapter_info.battery_status; 2243 } 2244 2245 /* 2246 * Decode the battery status 2247 */ 2248 seq_printf(m, "Battery Status:[%d]", battery_status); 2249 2250 if(battery_status == MEGA_BATT_CHARGE_DONE) 2251 seq_puts(m, " Charge Done"); 2252 2253 if(battery_status & MEGA_BATT_MODULE_MISSING) 2254 seq_puts(m, " Module Missing"); 2255 2256 if(battery_status & MEGA_BATT_LOW_VOLTAGE) 2257 seq_puts(m, " Low Voltage"); 2258 2259 if(battery_status & MEGA_BATT_TEMP_HIGH) 2260 seq_puts(m, " Temperature High"); 2261 2262 if(battery_status & MEGA_BATT_PACK_MISSING) 2263 seq_puts(m, " Pack Missing"); 2264 2265 if(battery_status & MEGA_BATT_CHARGE_INPROG) 2266 seq_puts(m, " Charge In-progress"); 2267 2268 if(battery_status & MEGA_BATT_CHARGE_FAIL) 2269 seq_puts(m, " Charge Fail"); 2270 2271 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) 2272 seq_puts(m, " Cycles Exceeded"); 2273 2274 seq_putc(m, '\n'); 2275 2276 free_inquiry: 2277 mega_free_inquiry(inquiry, dma_handle, pdev); 2278 free_pdev: 2279 free_local_pdev(pdev); 2280 return 0; 2281 } 2282 2283 2284 /* 2285 * Display scsi inquiry 2286 */ 2287 static void 2288 mega_print_inquiry(struct seq_file *m, char *scsi_inq) 2289 { 2290 int i; 2291 2292 seq_puts(m, " Vendor: "); 2293 seq_write(m, scsi_inq + 8, 8); 2294 seq_puts(m, " Model: "); 2295 seq_write(m, scsi_inq + 16, 16); 2296 seq_puts(m, " Rev: "); 2297 seq_write(m, scsi_inq + 32, 4); 2298 seq_putc(m, '\n'); 2299 2300 i = scsi_inq[0] & 0x1f; 2301 seq_printf(m, " Type: %s ", scsi_device_type(i)); 2302 2303 seq_printf(m, " ANSI SCSI revision: %02x", 2304 scsi_inq[2] & 0x07); 2305 2306 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) 2307 seq_puts(m, " CCS\n"); 2308 else 2309 seq_putc(m, '\n'); 2310 } 2311 2312 /** 2313 * proc_show_pdrv() 2314 * @m: Synthetic file construction data 2315 * @adapter: pointer to our soft state 2316 * @channel: channel 2317 * 2318 * Display information about the physical drives. 2319 */ 2320 static int 2321 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) 2322 { 2323 dma_addr_t dma_handle; 2324 char *scsi_inq; 2325 dma_addr_t scsi_inq_dma_handle; 2326 caddr_t inquiry; 2327 struct pci_dev *pdev; 2328 u8 *pdrv_state; 2329 u8 state; 2330 int tgt; 2331 int max_channels; 2332 int i; 2333 2334 if( make_local_pdev(adapter, &pdev) != 0 ) 2335 return 0; 2336 2337 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2338 goto free_pdev; 2339 2340 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2341 seq_puts(m, "Adapter inquiry failed.\n"); 2342 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2343 goto free_inquiry; 2344 } 2345 2346 2347 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle, 2348 GFP_KERNEL); 2349 if( scsi_inq == NULL ) { 2350 seq_puts(m, "memory not available for scsi inq.\n"); 2351 goto free_inquiry; 2352 } 2353 2354 if( adapter->flag & BOARD_40LD ) { 2355 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state; 2356 } 2357 else { 2358 pdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2359 raid_inq.pdrv_info.pdrv_state; 2360 } 2361 2362 max_channels = adapter->product_info.nchannels; 2363 2364 if( channel >= max_channels ) { 2365 goto free_pci; 2366 } 2367 2368 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) { 2369 2370 i = channel*16 + tgt; 2371 2372 state = *(pdrv_state + i); 2373 switch( state & 0x0F ) { 2374 case PDRV_ONLINE: 2375 seq_printf(m, "Channel:%2d Id:%2d State: Online", 2376 channel, tgt); 2377 break; 2378 2379 case PDRV_FAILED: 2380 seq_printf(m, "Channel:%2d Id:%2d State: Failed", 2381 channel, tgt); 2382 break; 2383 2384 case PDRV_RBLD: 2385 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", 2386 channel, tgt); 2387 break; 2388 2389 case PDRV_HOTSPARE: 2390 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", 2391 channel, tgt); 2392 break; 2393 2394 default: 2395 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", 2396 channel, tgt); 2397 break; 2398 } 2399 2400 /* 2401 * This interface displays inquiries for disk drives 2402 * only. Inquries for logical drives and non-disk 2403 * devices are available through /proc/scsi/scsi 2404 */ 2405 memset(scsi_inq, 0, 256); 2406 if( mega_internal_dev_inquiry(adapter, channel, tgt, 2407 scsi_inq_dma_handle) || 2408 (scsi_inq[0] & 0x1F) != TYPE_DISK ) { 2409 continue; 2410 } 2411 2412 /* 2413 * Check for overflow. We print less than 240 2414 * characters for inquiry 2415 */ 2416 seq_puts(m, ".\n"); 2417 mega_print_inquiry(m, scsi_inq); 2418 } 2419 2420 free_pci: 2421 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle); 2422 free_inquiry: 2423 mega_free_inquiry(inquiry, dma_handle, pdev); 2424 free_pdev: 2425 free_local_pdev(pdev); 2426 return 0; 2427 } 2428 2429 /** 2430 * proc_show_pdrv_ch0() 2431 * @m: Synthetic file construction data 2432 * @v: File iterator 2433 * 2434 * Display information about the physical drives on physical channel 0. 2435 */ 2436 static int 2437 proc_show_pdrv_ch0(struct seq_file *m, void *v) 2438 { 2439 return proc_show_pdrv(m, m->private, 0); 2440 } 2441 2442 2443 /** 2444 * proc_show_pdrv_ch1() 2445 * @m: Synthetic file construction data 2446 * @v: File iterator 2447 * 2448 * Display information about the physical drives on physical channel 1. 2449 */ 2450 static int 2451 proc_show_pdrv_ch1(struct seq_file *m, void *v) 2452 { 2453 return proc_show_pdrv(m, m->private, 1); 2454 } 2455 2456 2457 /** 2458 * proc_show_pdrv_ch2() 2459 * @m: Synthetic file construction data 2460 * @v: File iterator 2461 * 2462 * Display information about the physical drives on physical channel 2. 2463 */ 2464 static int 2465 proc_show_pdrv_ch2(struct seq_file *m, void *v) 2466 { 2467 return proc_show_pdrv(m, m->private, 2); 2468 } 2469 2470 2471 /** 2472 * proc_show_pdrv_ch3() 2473 * @m: Synthetic file construction data 2474 * @v: File iterator 2475 * 2476 * Display information about the physical drives on physical channel 3. 2477 */ 2478 static int 2479 proc_show_pdrv_ch3(struct seq_file *m, void *v) 2480 { 2481 return proc_show_pdrv(m, m->private, 3); 2482 } 2483 2484 2485 /** 2486 * proc_show_rdrv() 2487 * @m: Synthetic file construction data 2488 * @adapter: pointer to our soft state 2489 * @start: starting logical drive to display 2490 * @end: ending logical drive to display 2491 * 2492 * We do not print the inquiry information since its already available through 2493 * /proc/scsi/scsi interface 2494 */ 2495 static int 2496 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) 2497 { 2498 dma_addr_t dma_handle; 2499 logdrv_param *lparam; 2500 megacmd_t mc; 2501 char *disk_array; 2502 dma_addr_t disk_array_dma_handle; 2503 caddr_t inquiry; 2504 struct pci_dev *pdev; 2505 u8 *rdrv_state; 2506 int num_ldrv; 2507 u32 array_sz; 2508 int i; 2509 2510 if( make_local_pdev(adapter, &pdev) != 0 ) 2511 return 0; 2512 2513 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) 2514 goto free_pdev; 2515 2516 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2517 seq_puts(m, "Adapter inquiry failed.\n"); 2518 dev_warn(&adapter->dev->dev, "inquiry failed\n"); 2519 goto free_inquiry; 2520 } 2521 2522 memset(&mc, 0, sizeof(megacmd_t)); 2523 2524 if( adapter->flag & BOARD_40LD ) { 2525 array_sz = sizeof(disk_array_40ld); 2526 2527 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state; 2528 2529 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv; 2530 } 2531 else { 2532 array_sz = sizeof(disk_array_8ld); 2533 2534 rdrv_state = ((mraid_ext_inquiry *)inquiry)-> 2535 raid_inq.logdrv_info.ldrv_state; 2536 2537 num_ldrv = ((mraid_ext_inquiry *)inquiry)-> 2538 raid_inq.logdrv_info.num_ldrv; 2539 } 2540 2541 disk_array = dma_alloc_coherent(&pdev->dev, array_sz, 2542 &disk_array_dma_handle, GFP_KERNEL); 2543 2544 if( disk_array == NULL ) { 2545 seq_puts(m, "memory not available.\n"); 2546 goto free_inquiry; 2547 } 2548 2549 mc.xferaddr = (u32)disk_array_dma_handle; 2550 2551 if( adapter->flag & BOARD_40LD ) { 2552 mc.cmd = FC_NEW_CONFIG; 2553 mc.opcode = OP_DCMD_READ_CONFIG; 2554 2555 if( mega_internal_command(adapter, &mc, NULL) ) { 2556 seq_puts(m, "40LD read config failed.\n"); 2557 goto free_pci; 2558 } 2559 2560 } 2561 else { 2562 mc.cmd = NEW_READ_CONFIG_8LD; 2563 2564 if( mega_internal_command(adapter, &mc, NULL) ) { 2565 mc.cmd = READ_CONFIG_8LD; 2566 if( mega_internal_command(adapter, &mc, NULL) ) { 2567 seq_puts(m, "8LD read config failed.\n"); 2568 goto free_pci; 2569 } 2570 } 2571 } 2572 2573 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) { 2574 2575 if( adapter->flag & BOARD_40LD ) { 2576 lparam = 2577 &((disk_array_40ld *)disk_array)->ldrv[i].lparam; 2578 } 2579 else { 2580 lparam = 2581 &((disk_array_8ld *)disk_array)->ldrv[i].lparam; 2582 } 2583 2584 /* 2585 * Check for overflow. We print less than 240 characters for 2586 * information about each logical drive. 2587 */ 2588 seq_printf(m, "Logical drive:%2d:, ", i); 2589 2590 switch( rdrv_state[i] & 0x0F ) { 2591 case RDRV_OFFLINE: 2592 seq_puts(m, "state: offline"); 2593 break; 2594 case RDRV_DEGRADED: 2595 seq_puts(m, "state: degraded"); 2596 break; 2597 case RDRV_OPTIMAL: 2598 seq_puts(m, "state: optimal"); 2599 break; 2600 case RDRV_DELETED: 2601 seq_puts(m, "state: deleted"); 2602 break; 2603 default: 2604 seq_puts(m, "state: unknown"); 2605 break; 2606 } 2607 2608 /* 2609 * Check if check consistency or initialization is going on 2610 * for this logical drive. 2611 */ 2612 if( (rdrv_state[i] & 0xF0) == 0x20 ) 2613 seq_puts(m, ", check-consistency in progress"); 2614 else if( (rdrv_state[i] & 0xF0) == 0x10 ) 2615 seq_puts(m, ", initialization in progress"); 2616 2617 seq_putc(m, '\n'); 2618 2619 seq_printf(m, "Span depth:%3d, ", lparam->span_depth); 2620 seq_printf(m, "RAID level:%3d, ", lparam->level); 2621 seq_printf(m, "Stripe size:%3d, ", 2622 lparam->stripe_sz ? lparam->stripe_sz/2: 128); 2623 seq_printf(m, "Row size:%3d\n", lparam->row_size); 2624 2625 seq_puts(m, "Read Policy: "); 2626 switch(lparam->read_ahead) { 2627 case NO_READ_AHEAD: 2628 seq_puts(m, "No read ahead, "); 2629 break; 2630 case READ_AHEAD: 2631 seq_puts(m, "Read ahead, "); 2632 break; 2633 case ADAP_READ_AHEAD: 2634 seq_puts(m, "Adaptive, "); 2635 break; 2636 2637 } 2638 2639 seq_puts(m, "Write Policy: "); 2640 switch(lparam->write_mode) { 2641 case WRMODE_WRITE_THRU: 2642 seq_puts(m, "Write thru, "); 2643 break; 2644 case WRMODE_WRITE_BACK: 2645 seq_puts(m, "Write back, "); 2646 break; 2647 } 2648 2649 seq_puts(m, "Cache Policy: "); 2650 switch(lparam->direct_io) { 2651 case CACHED_IO: 2652 seq_puts(m, "Cached IO\n\n"); 2653 break; 2654 case DIRECT_IO: 2655 seq_puts(m, "Direct IO\n\n"); 2656 break; 2657 } 2658 } 2659 2660 free_pci: 2661 dma_free_coherent(&pdev->dev, array_sz, disk_array, 2662 disk_array_dma_handle); 2663 free_inquiry: 2664 mega_free_inquiry(inquiry, dma_handle, pdev); 2665 free_pdev: 2666 free_local_pdev(pdev); 2667 return 0; 2668 } 2669 2670 /** 2671 * proc_show_rdrv_10() 2672 * @m: Synthetic file construction data 2673 * @v: File iterator 2674 * 2675 * Display real time information about the logical drives 0 through 9. 2676 */ 2677 static int 2678 proc_show_rdrv_10(struct seq_file *m, void *v) 2679 { 2680 return proc_show_rdrv(m, m->private, 0, 9); 2681 } 2682 2683 2684 /** 2685 * proc_show_rdrv_20() 2686 * @m: Synthetic file construction data 2687 * @v: File iterator 2688 * 2689 * Display real time information about the logical drives 0 through 9. 2690 */ 2691 static int 2692 proc_show_rdrv_20(struct seq_file *m, void *v) 2693 { 2694 return proc_show_rdrv(m, m->private, 10, 19); 2695 } 2696 2697 2698 /** 2699 * proc_show_rdrv_30() 2700 * @m: Synthetic file construction data 2701 * @v: File iterator 2702 * 2703 * Display real time information about the logical drives 0 through 9. 2704 */ 2705 static int 2706 proc_show_rdrv_30(struct seq_file *m, void *v) 2707 { 2708 return proc_show_rdrv(m, m->private, 20, 29); 2709 } 2710 2711 2712 /** 2713 * proc_show_rdrv_40() 2714 * @m: Synthetic file construction data 2715 * @v: File iterator 2716 * 2717 * Display real time information about the logical drives 0 through 9. 2718 */ 2719 static int 2720 proc_show_rdrv_40(struct seq_file *m, void *v) 2721 { 2722 return proc_show_rdrv(m, m->private, 30, 39); 2723 } 2724 2725 /** 2726 * mega_create_proc_entry() 2727 * @index: index in soft state array 2728 * @parent: parent node for this /proc entry 2729 * 2730 * Creates /proc entries for our controllers. 2731 */ 2732 static void 2733 mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2734 { 2735 adapter_t *adapter = hba_soft_state[index]; 2736 struct proc_dir_entry *dir; 2737 u8 string[16]; 2738 2739 sprintf(string, "hba%d", adapter->host->host_no); 2740 dir = proc_mkdir_data(string, 0, parent, adapter); 2741 if (!dir) { 2742 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n"); 2743 return; 2744 } 2745 2746 proc_create_single_data("config", S_IRUSR, dir, 2747 proc_show_config, adapter); 2748 proc_create_single_data("stat", S_IRUSR, dir, 2749 proc_show_stat, adapter); 2750 proc_create_single_data("mailbox", S_IRUSR, dir, 2751 proc_show_mbox, adapter); 2752 #if MEGA_HAVE_ENH_PROC 2753 proc_create_single_data("rebuild-rate", S_IRUSR, dir, 2754 proc_show_rebuild_rate, adapter); 2755 proc_create_single_data("battery-status", S_IRUSR, dir, 2756 proc_show_battery, adapter); 2757 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir, 2758 proc_show_pdrv_ch0, adapter); 2759 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir, 2760 proc_show_pdrv_ch1, adapter); 2761 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir, 2762 proc_show_pdrv_ch2, adapter); 2763 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir, 2764 proc_show_pdrv_ch3, adapter); 2765 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir, 2766 proc_show_rdrv_10, adapter); 2767 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir, 2768 proc_show_rdrv_20, adapter); 2769 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir, 2770 proc_show_rdrv_30, adapter); 2771 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir, 2772 proc_show_rdrv_40, adapter); 2773 #endif 2774 } 2775 2776 #else 2777 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) 2778 { 2779 } 2780 #endif 2781 2782 2783 /* 2784 * megaraid_biosparam() 2785 * 2786 * Return the disk geometry for a particular disk 2787 */ 2788 static int 2789 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev, 2790 sector_t capacity, int geom[]) 2791 { 2792 adapter_t *adapter; 2793 int heads; 2794 int sectors; 2795 int cylinders; 2796 2797 /* Get pointer to host config structure */ 2798 adapter = (adapter_t *)sdev->host->hostdata; 2799 2800 if (IS_RAID_CH(adapter, sdev->channel)) { 2801 /* Default heads (64) & sectors (32) */ 2802 heads = 64; 2803 sectors = 32; 2804 cylinders = (ulong)capacity / (heads * sectors); 2805 2806 /* 2807 * Handle extended translation size for logical drives 2808 * > 1Gb 2809 */ 2810 if ((ulong)capacity >= 0x200000) { 2811 heads = 255; 2812 sectors = 63; 2813 cylinders = (ulong)capacity / (heads * sectors); 2814 } 2815 2816 /* return result */ 2817 geom[0] = heads; 2818 geom[1] = sectors; 2819 geom[2] = cylinders; 2820 } 2821 else { 2822 if (scsi_partsize(bdev, capacity, geom)) 2823 return 0; 2824 2825 dev_info(&adapter->dev->dev, 2826 "invalid partition on this disk on channel %d\n", 2827 sdev->channel); 2828 2829 /* Default heads (64) & sectors (32) */ 2830 heads = 64; 2831 sectors = 32; 2832 cylinders = (ulong)capacity / (heads * sectors); 2833 2834 /* Handle extended translation size for logical drives > 1Gb */ 2835 if ((ulong)capacity >= 0x200000) { 2836 heads = 255; 2837 sectors = 63; 2838 cylinders = (ulong)capacity / (heads * sectors); 2839 } 2840 2841 /* return result */ 2842 geom[0] = heads; 2843 geom[1] = sectors; 2844 geom[2] = cylinders; 2845 } 2846 2847 return 0; 2848 } 2849 2850 /** 2851 * mega_init_scb() 2852 * @adapter: pointer to our soft state 2853 * 2854 * Allocate memory for the various pointers in the scb structures: 2855 * scatter-gather list pointer, passthru and extended passthru structure 2856 * pointers. 2857 */ 2858 static int 2859 mega_init_scb(adapter_t *adapter) 2860 { 2861 scb_t *scb; 2862 int i; 2863 2864 for( i = 0; i < adapter->max_cmds; i++ ) { 2865 2866 scb = &adapter->scb_list[i]; 2867 2868 scb->sgl64 = NULL; 2869 scb->sgl = NULL; 2870 scb->pthru = NULL; 2871 scb->epthru = NULL; 2872 } 2873 2874 for( i = 0; i < adapter->max_cmds; i++ ) { 2875 2876 scb = &adapter->scb_list[i]; 2877 2878 scb->idx = i; 2879 2880 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev, 2881 sizeof(mega_sgl64) * adapter->sglen, 2882 &scb->sgl_dma_addr, GFP_KERNEL); 2883 2884 scb->sgl = (mega_sglist *)scb->sgl64; 2885 2886 if( !scb->sgl ) { 2887 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n"); 2888 mega_free_sgl(adapter); 2889 return -1; 2890 } 2891 2892 scb->pthru = dma_alloc_coherent(&adapter->dev->dev, 2893 sizeof(mega_passthru), 2894 &scb->pthru_dma_addr, GFP_KERNEL); 2895 2896 if( !scb->pthru ) { 2897 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n"); 2898 mega_free_sgl(adapter); 2899 return -1; 2900 } 2901 2902 scb->epthru = dma_alloc_coherent(&adapter->dev->dev, 2903 sizeof(mega_ext_passthru), 2904 &scb->epthru_dma_addr, GFP_KERNEL); 2905 2906 if( !scb->epthru ) { 2907 dev_warn(&adapter->dev->dev, 2908 "Can't allocate extended passthru\n"); 2909 mega_free_sgl(adapter); 2910 return -1; 2911 } 2912 2913 2914 scb->dma_type = MEGA_DMA_TYPE_NONE; 2915 2916 /* 2917 * Link to free list 2918 * lock not required since we are loading the driver, so no 2919 * commands possible right now. 2920 */ 2921 scb->state = SCB_FREE; 2922 scb->cmd = NULL; 2923 list_add(&scb->list, &adapter->free_list); 2924 } 2925 2926 return 0; 2927 } 2928 2929 2930 /** 2931 * megadev_open() 2932 * @inode: unused 2933 * @filep: unused 2934 * 2935 * Routines for the character/ioctl interface to the driver. Find out if this 2936 * is a valid open. 2937 */ 2938 static int 2939 megadev_open (struct inode *inode, struct file *filep) 2940 { 2941 /* 2942 * Only allow superuser to access private ioctl interface 2943 */ 2944 if( !capable(CAP_SYS_ADMIN) ) return -EACCES; 2945 2946 return 0; 2947 } 2948 2949 2950 /** 2951 * megadev_ioctl() 2952 * @filep: Our device file 2953 * @cmd: ioctl command 2954 * @arg: user buffer 2955 * 2956 * ioctl entry point for our private ioctl interface. We move the data in from 2957 * the user space, prepare the command (if necessary, convert the old MIMD 2958 * ioctl to new ioctl command), and issue a synchronous command to the 2959 * controller. 2960 */ 2961 static int 2962 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 2963 { 2964 adapter_t *adapter; 2965 nitioctl_t uioc; 2966 int adapno; 2967 int rval; 2968 mega_passthru __user *upthru; /* user address for passthru */ 2969 mega_passthru *pthru; /* copy user passthru here */ 2970 dma_addr_t pthru_dma_hndl; 2971 void *data = NULL; /* data to be transferred */ 2972 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */ 2973 megacmd_t mc; 2974 #if MEGA_HAVE_STATS 2975 megastat_t __user *ustats = NULL; 2976 int num_ldrv = 0; 2977 #endif 2978 u32 uxferaddr = 0; 2979 struct pci_dev *pdev; 2980 2981 /* 2982 * Make sure only USCSICMD are issued through this interface. 2983 * MIMD application would still fire different command. 2984 */ 2985 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) { 2986 return -EINVAL; 2987 } 2988 2989 /* 2990 * Check and convert a possible MIMD command to NIT command. 2991 * mega_m_to_n() copies the data from the user space, so we do not 2992 * have to do it here. 2993 * NOTE: We will need some user address to copyout the data, therefore 2994 * the inteface layer will also provide us with the required user 2995 * addresses. 2996 */ 2997 memset(&uioc, 0, sizeof(nitioctl_t)); 2998 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 ) 2999 return rval; 3000 3001 3002 switch( uioc.opcode ) { 3003 3004 case GET_DRIVER_VER: 3005 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) ) 3006 return (-EFAULT); 3007 3008 break; 3009 3010 case GET_N_ADAP: 3011 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) ) 3012 return (-EFAULT); 3013 3014 /* 3015 * Shucks. MIMD interface returns a positive value for number 3016 * of adapters. TODO: Change it to return 0 when there is no 3017 * applicatio using mimd interface. 3018 */ 3019 return hba_count; 3020 3021 case GET_ADAP_INFO: 3022 3023 /* 3024 * Which adapter 3025 */ 3026 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3027 return (-ENODEV); 3028 3029 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno, 3030 sizeof(struct mcontroller)) ) 3031 return (-EFAULT); 3032 break; 3033 3034 #if MEGA_HAVE_STATS 3035 3036 case GET_STATS: 3037 /* 3038 * Which adapter 3039 */ 3040 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3041 return (-ENODEV); 3042 3043 adapter = hba_soft_state[adapno]; 3044 3045 ustats = uioc.uioc_uaddr; 3046 3047 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) ) 3048 return (-EFAULT); 3049 3050 /* 3051 * Check for the validity of the logical drive number 3052 */ 3053 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL; 3054 3055 if( copy_to_user(ustats->nreads, adapter->nreads, 3056 num_ldrv*sizeof(u32)) ) 3057 return -EFAULT; 3058 3059 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks, 3060 num_ldrv*sizeof(u32)) ) 3061 return -EFAULT; 3062 3063 if( copy_to_user(ustats->nwrites, adapter->nwrites, 3064 num_ldrv*sizeof(u32)) ) 3065 return -EFAULT; 3066 3067 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks, 3068 num_ldrv*sizeof(u32)) ) 3069 return -EFAULT; 3070 3071 if( copy_to_user(ustats->rd_errors, adapter->rd_errors, 3072 num_ldrv*sizeof(u32)) ) 3073 return -EFAULT; 3074 3075 if( copy_to_user(ustats->wr_errors, adapter->wr_errors, 3076 num_ldrv*sizeof(u32)) ) 3077 return -EFAULT; 3078 3079 return 0; 3080 3081 #endif 3082 case MBOX_CMD: 3083 3084 /* 3085 * Which adapter 3086 */ 3087 if( (adapno = GETADAP(uioc.adapno)) >= hba_count ) 3088 return (-ENODEV); 3089 3090 adapter = hba_soft_state[adapno]; 3091 3092 /* 3093 * Deletion of logical drive is a special case. The adapter 3094 * should be quiescent before this command is issued. 3095 */ 3096 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV && 3097 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) { 3098 3099 /* 3100 * Do we support this feature 3101 */ 3102 if( !adapter->support_random_del ) { 3103 dev_warn(&adapter->dev->dev, "logdrv " 3104 "delete on non-supporting F/W\n"); 3105 3106 return (-EINVAL); 3107 } 3108 3109 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] ); 3110 3111 if( rval == 0 ) { 3112 memset(&mc, 0, sizeof(megacmd_t)); 3113 3114 mc.status = rval; 3115 3116 rval = mega_n_to_m((void __user *)arg, &mc); 3117 } 3118 3119 return rval; 3120 } 3121 /* 3122 * This interface only support the regular passthru commands. 3123 * Reject extended passthru and 64-bit passthru 3124 */ 3125 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || 3126 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { 3127 3128 dev_warn(&adapter->dev->dev, "rejected passthru\n"); 3129 3130 return (-EINVAL); 3131 } 3132 3133 /* 3134 * For all internal commands, the buffer must be allocated in 3135 * <4GB address range 3136 */ 3137 if( make_local_pdev(adapter, &pdev) != 0 ) 3138 return -EIO; 3139 3140 /* Is it a passthru command or a DCMD */ 3141 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) { 3142 /* Passthru commands */ 3143 3144 pthru = dma_alloc_coherent(&pdev->dev, 3145 sizeof(mega_passthru), 3146 &pthru_dma_hndl, GFP_KERNEL); 3147 3148 if( pthru == NULL ) { 3149 free_local_pdev(pdev); 3150 return (-ENOMEM); 3151 } 3152 3153 /* 3154 * The user passthru structure 3155 */ 3156 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr; 3157 3158 /* 3159 * Copy in the user passthru here. 3160 */ 3161 if( copy_from_user(pthru, upthru, 3162 sizeof(mega_passthru)) ) { 3163 3164 dma_free_coherent(&pdev->dev, 3165 sizeof(mega_passthru), 3166 pthru, pthru_dma_hndl); 3167 3168 free_local_pdev(pdev); 3169 3170 return (-EFAULT); 3171 } 3172 3173 /* 3174 * Is there a data transfer 3175 */ 3176 if( pthru->dataxferlen ) { 3177 data = dma_alloc_coherent(&pdev->dev, 3178 pthru->dataxferlen, 3179 &data_dma_hndl, 3180 GFP_KERNEL); 3181 3182 if( data == NULL ) { 3183 dma_free_coherent(&pdev->dev, 3184 sizeof(mega_passthru), 3185 pthru, 3186 pthru_dma_hndl); 3187 3188 free_local_pdev(pdev); 3189 3190 return (-ENOMEM); 3191 } 3192 3193 /* 3194 * Save the user address and point the kernel 3195 * address at just allocated memory 3196 */ 3197 uxferaddr = pthru->dataxferaddr; 3198 pthru->dataxferaddr = data_dma_hndl; 3199 } 3200 3201 3202 /* 3203 * Is data coming down-stream 3204 */ 3205 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) { 3206 /* 3207 * Get the user data 3208 */ 3209 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3210 pthru->dataxferlen) ) { 3211 rval = (-EFAULT); 3212 goto freemem_and_return; 3213 } 3214 } 3215 3216 memset(&mc, 0, sizeof(megacmd_t)); 3217 3218 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 3219 mc.xferaddr = (u32)pthru_dma_hndl; 3220 3221 /* 3222 * Issue the command 3223 */ 3224 mega_internal_command(adapter, &mc, pthru); 3225 3226 rval = mega_n_to_m((void __user *)arg, &mc); 3227 3228 if( rval ) goto freemem_and_return; 3229 3230 3231 /* 3232 * Is data going up-stream 3233 */ 3234 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3235 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3236 pthru->dataxferlen) ) { 3237 rval = (-EFAULT); 3238 } 3239 } 3240 3241 /* 3242 * Send the request sense data also, irrespective of 3243 * whether the user has asked for it or not. 3244 */ 3245 if (copy_to_user(upthru->reqsensearea, 3246 pthru->reqsensearea, 14)) 3247 rval = -EFAULT; 3248 3249 freemem_and_return: 3250 if( pthru->dataxferlen ) { 3251 dma_free_coherent(&pdev->dev, 3252 pthru->dataxferlen, data, 3253 data_dma_hndl); 3254 } 3255 3256 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), 3257 pthru, pthru_dma_hndl); 3258 3259 free_local_pdev(pdev); 3260 3261 return rval; 3262 } 3263 else { 3264 /* DCMD commands */ 3265 3266 /* 3267 * Is there a data transfer 3268 */ 3269 if( uioc.xferlen ) { 3270 data = dma_alloc_coherent(&pdev->dev, 3271 uioc.xferlen, 3272 &data_dma_hndl, 3273 GFP_KERNEL); 3274 3275 if( data == NULL ) { 3276 free_local_pdev(pdev); 3277 return (-ENOMEM); 3278 } 3279 3280 uxferaddr = MBOX(uioc)->xferaddr; 3281 } 3282 3283 /* 3284 * Is data coming down-stream 3285 */ 3286 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) { 3287 /* 3288 * Get the user data 3289 */ 3290 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr, 3291 uioc.xferlen) ) { 3292 3293 dma_free_coherent(&pdev->dev, 3294 uioc.xferlen, data, 3295 data_dma_hndl); 3296 3297 free_local_pdev(pdev); 3298 3299 return (-EFAULT); 3300 } 3301 } 3302 3303 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t)); 3304 3305 mc.xferaddr = (u32)data_dma_hndl; 3306 3307 /* 3308 * Issue the command 3309 */ 3310 mega_internal_command(adapter, &mc, NULL); 3311 3312 rval = mega_n_to_m((void __user *)arg, &mc); 3313 3314 if( rval ) { 3315 if( uioc.xferlen ) { 3316 dma_free_coherent(&pdev->dev, 3317 uioc.xferlen, data, 3318 data_dma_hndl); 3319 } 3320 3321 free_local_pdev(pdev); 3322 3323 return rval; 3324 } 3325 3326 /* 3327 * Is data going up-stream 3328 */ 3329 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3330 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data, 3331 uioc.xferlen) ) { 3332 3333 rval = (-EFAULT); 3334 } 3335 } 3336 3337 if( uioc.xferlen ) { 3338 dma_free_coherent(&pdev->dev, uioc.xferlen, 3339 data, data_dma_hndl); 3340 } 3341 3342 free_local_pdev(pdev); 3343 3344 return rval; 3345 } 3346 3347 default: 3348 return (-EINVAL); 3349 } 3350 3351 return 0; 3352 } 3353 3354 static long 3355 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3356 { 3357 int ret; 3358 3359 mutex_lock(&megadev_mutex); 3360 ret = megadev_ioctl(filep, cmd, arg); 3361 mutex_unlock(&megadev_mutex); 3362 3363 return ret; 3364 } 3365 3366 /** 3367 * mega_m_to_n() 3368 * @arg: user address 3369 * @uioc: new ioctl structure 3370 * 3371 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl 3372 * structure 3373 * 3374 * Converts the older mimd ioctl structure to newer NIT structure 3375 */ 3376 static int 3377 mega_m_to_n(void __user *arg, nitioctl_t *uioc) 3378 { 3379 struct uioctl_t uioc_mimd; 3380 char signature[8] = {0}; 3381 u8 opcode; 3382 u8 subopcode; 3383 3384 3385 /* 3386 * check is the application conforms to NIT. We do not have to do much 3387 * in that case. 3388 * We exploit the fact that the signature is stored in the very 3389 * beginning of the structure. 3390 */ 3391 3392 if( copy_from_user(signature, arg, 7) ) 3393 return (-EFAULT); 3394 3395 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3396 3397 /* 3398 * NOTE NOTE: The nit ioctl is still under flux because of 3399 * change of mailbox definition, in HPE. No applications yet 3400 * use this interface and let's not have applications use this 3401 * interface till the new specifitions are in place. 3402 */ 3403 return -EINVAL; 3404 #if 0 3405 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) ) 3406 return (-EFAULT); 3407 return 0; 3408 #endif 3409 } 3410 3411 /* 3412 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t 3413 * 3414 * Get the user ioctl structure 3415 */ 3416 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) ) 3417 return (-EFAULT); 3418 3419 3420 /* 3421 * Get the opcode and subopcode for the commands 3422 */ 3423 opcode = uioc_mimd.ui.fcs.opcode; 3424 subopcode = uioc_mimd.ui.fcs.subopcode; 3425 3426 switch (opcode) { 3427 case 0x82: 3428 3429 switch (subopcode) { 3430 3431 case MEGAIOC_QDRVRVER: /* Query driver version */ 3432 uioc->opcode = GET_DRIVER_VER; 3433 uioc->uioc_uaddr = uioc_mimd.data; 3434 break; 3435 3436 case MEGAIOC_QNADAP: /* Get # of adapters */ 3437 uioc->opcode = GET_N_ADAP; 3438 uioc->uioc_uaddr = uioc_mimd.data; 3439 break; 3440 3441 case MEGAIOC_QADAPINFO: /* Get adapter information */ 3442 uioc->opcode = GET_ADAP_INFO; 3443 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3444 uioc->uioc_uaddr = uioc_mimd.data; 3445 break; 3446 3447 default: 3448 return(-EINVAL); 3449 } 3450 3451 break; 3452 3453 3454 case 0x81: 3455 3456 uioc->opcode = MBOX_CMD; 3457 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3458 3459 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3460 3461 uioc->xferlen = uioc_mimd.ui.fcs.length; 3462 3463 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3464 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3465 3466 break; 3467 3468 case 0x80: 3469 3470 uioc->opcode = MBOX_CMD; 3471 uioc->adapno = uioc_mimd.ui.fcs.adapno; 3472 3473 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18); 3474 3475 /* 3476 * Choose the xferlen bigger of input and output data 3477 */ 3478 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ? 3479 uioc_mimd.outlen : uioc_mimd.inlen; 3480 3481 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD; 3482 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR; 3483 3484 break; 3485 3486 default: 3487 return (-EINVAL); 3488 3489 } 3490 3491 return 0; 3492 } 3493 3494 /* 3495 * mega_n_to_m() 3496 * @arg: user address 3497 * @mc: mailbox command 3498 * 3499 * Updates the status information to the application, depending on application 3500 * conforms to older mimd ioctl interface or newer NIT ioctl interface 3501 */ 3502 static int 3503 mega_n_to_m(void __user *arg, megacmd_t *mc) 3504 { 3505 nitioctl_t __user *uiocp; 3506 megacmd_t __user *umc; 3507 mega_passthru __user *upthru; 3508 struct uioctl_t __user *uioc_mimd; 3509 char signature[8] = {0}; 3510 3511 /* 3512 * check is the application conforms to NIT. 3513 */ 3514 if( copy_from_user(signature, arg, 7) ) 3515 return -EFAULT; 3516 3517 if( memcmp(signature, "MEGANIT", 7) == 0 ) { 3518 3519 uiocp = arg; 3520 3521 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) ) 3522 return (-EFAULT); 3523 3524 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3525 3526 umc = MBOX_P(uiocp); 3527 3528 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3529 return -EFAULT; 3530 3531 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus)) 3532 return (-EFAULT); 3533 } 3534 } 3535 else { 3536 uioc_mimd = arg; 3537 3538 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) ) 3539 return (-EFAULT); 3540 3541 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 3542 3543 umc = (megacmd_t __user *)uioc_mimd->mbox; 3544 3545 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr)) 3546 return (-EFAULT); 3547 3548 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) ) 3549 return (-EFAULT); 3550 } 3551 } 3552 3553 return 0; 3554 } 3555 3556 3557 /* 3558 * MEGARAID 'FW' commands. 3559 */ 3560 3561 /** 3562 * mega_is_bios_enabled() 3563 * @adapter: pointer to our soft state 3564 * 3565 * issue command to find out if the BIOS is enabled for this controller 3566 */ 3567 static int 3568 mega_is_bios_enabled(adapter_t *adapter) 3569 { 3570 struct mbox_out mbox; 3571 unsigned char *raw_mbox = (u8 *)&mbox; 3572 3573 memset(&mbox, 0, sizeof(mbox)); 3574 3575 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3576 3577 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3578 3579 raw_mbox[0] = IS_BIOS_ENABLED; 3580 raw_mbox[2] = GET_BIOS; 3581 3582 issue_scb_block(adapter, raw_mbox); 3583 3584 return *(char *)adapter->mega_buffer; 3585 } 3586 3587 3588 /** 3589 * mega_enum_raid_scsi() 3590 * @adapter: pointer to our soft state 3591 * 3592 * Find out what channels are RAID/SCSI. This information is used to 3593 * differentiate the virtual channels and physical channels and to support 3594 * ROMB feature and non-disk devices. 3595 */ 3596 static void 3597 mega_enum_raid_scsi(adapter_t *adapter) 3598 { 3599 struct mbox_out mbox; 3600 unsigned char *raw_mbox = (u8 *)&mbox; 3601 int i; 3602 3603 memset(&mbox, 0, sizeof(mbox)); 3604 3605 /* 3606 * issue command to find out what channels are raid/scsi 3607 */ 3608 raw_mbox[0] = CHNL_CLASS; 3609 raw_mbox[2] = GET_CHNL_CLASS; 3610 3611 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3612 3613 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3614 3615 /* 3616 * Non-ROMB firmware fail this command, so all channels 3617 * must be shown RAID 3618 */ 3619 adapter->mega_ch_class = 0xFF; 3620 3621 if(!issue_scb_block(adapter, raw_mbox)) { 3622 adapter->mega_ch_class = *((char *)adapter->mega_buffer); 3623 3624 } 3625 3626 for( i = 0; i < adapter->product_info.nchannels; i++ ) { 3627 if( (adapter->mega_ch_class >> i) & 0x01 ) { 3628 dev_info(&adapter->dev->dev, "channel[%d] is raid\n", 3629 i); 3630 } 3631 else { 3632 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n", 3633 i); 3634 } 3635 } 3636 3637 return; 3638 } 3639 3640 3641 /** 3642 * mega_get_boot_drv() 3643 * @adapter: pointer to our soft state 3644 * 3645 * Find out which device is the boot device. Note, any logical drive or any 3646 * phyical device (e.g., a CDROM) can be designated as a boot device. 3647 */ 3648 static void 3649 mega_get_boot_drv(adapter_t *adapter) 3650 { 3651 struct private_bios_data *prv_bios_data; 3652 struct mbox_out mbox; 3653 unsigned char *raw_mbox = (u8 *)&mbox; 3654 u16 cksum = 0; 3655 u8 *cksum_p; 3656 u8 boot_pdrv; 3657 int i; 3658 3659 memset(&mbox, 0, sizeof(mbox)); 3660 3661 raw_mbox[0] = BIOS_PVT_DATA; 3662 raw_mbox[2] = GET_BIOS_PVT_DATA; 3663 3664 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3665 3666 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3667 3668 adapter->boot_ldrv_enabled = 0; 3669 adapter->boot_ldrv = 0; 3670 3671 adapter->boot_pdrv_enabled = 0; 3672 adapter->boot_pdrv_ch = 0; 3673 adapter->boot_pdrv_tgt = 0; 3674 3675 if(issue_scb_block(adapter, raw_mbox) == 0) { 3676 prv_bios_data = 3677 (struct private_bios_data *)adapter->mega_buffer; 3678 3679 cksum = 0; 3680 cksum_p = (char *)prv_bios_data; 3681 for (i = 0; i < 14; i++ ) { 3682 cksum += (u16)(*cksum_p++); 3683 } 3684 3685 if (prv_bios_data->cksum == (u16)(0-cksum) ) { 3686 3687 /* 3688 * If MSB is set, a physical drive is set as boot 3689 * device 3690 */ 3691 if( prv_bios_data->boot_drv & 0x80 ) { 3692 adapter->boot_pdrv_enabled = 1; 3693 boot_pdrv = prv_bios_data->boot_drv & 0x7F; 3694 adapter->boot_pdrv_ch = boot_pdrv / 16; 3695 adapter->boot_pdrv_tgt = boot_pdrv % 16; 3696 } 3697 else { 3698 adapter->boot_ldrv_enabled = 1; 3699 adapter->boot_ldrv = prv_bios_data->boot_drv; 3700 } 3701 } 3702 } 3703 3704 } 3705 3706 /** 3707 * mega_support_random_del() 3708 * @adapter: pointer to our soft state 3709 * 3710 * Find out if this controller supports random deletion and addition of 3711 * logical drives 3712 */ 3713 static int 3714 mega_support_random_del(adapter_t *adapter) 3715 { 3716 struct mbox_out mbox; 3717 unsigned char *raw_mbox = (u8 *)&mbox; 3718 int rval; 3719 3720 memset(&mbox, 0, sizeof(mbox)); 3721 3722 /* 3723 * issue command 3724 */ 3725 raw_mbox[0] = FC_DEL_LOGDRV; 3726 raw_mbox[2] = OP_SUP_DEL_LOGDRV; 3727 3728 rval = issue_scb_block(adapter, raw_mbox); 3729 3730 return !rval; 3731 } 3732 3733 3734 /** 3735 * mega_support_ext_cdb() 3736 * @adapter: pointer to our soft state 3737 * 3738 * Find out if this firmware support cdblen > 10 3739 */ 3740 static int 3741 mega_support_ext_cdb(adapter_t *adapter) 3742 { 3743 struct mbox_out mbox; 3744 unsigned char *raw_mbox = (u8 *)&mbox; 3745 int rval; 3746 3747 memset(&mbox, 0, sizeof(mbox)); 3748 /* 3749 * issue command to find out if controller supports extended CDBs. 3750 */ 3751 raw_mbox[0] = 0xA4; 3752 raw_mbox[2] = 0x16; 3753 3754 rval = issue_scb_block(adapter, raw_mbox); 3755 3756 return !rval; 3757 } 3758 3759 3760 /** 3761 * mega_del_logdrv() 3762 * @adapter: pointer to our soft state 3763 * @logdrv: logical drive to be deleted 3764 * 3765 * Delete the specified logical drive. It is the responsibility of the user 3766 * app to let the OS know about this operation. 3767 */ 3768 static int 3769 mega_del_logdrv(adapter_t *adapter, int logdrv) 3770 { 3771 unsigned long flags; 3772 scb_t *scb; 3773 int rval; 3774 3775 /* 3776 * Stop sending commands to the controller, queue them internally. 3777 * When deletion is complete, ISR will flush the queue. 3778 */ 3779 atomic_set(&adapter->quiescent, 1); 3780 3781 /* 3782 * Wait till all the issued commands are complete and there are no 3783 * commands in the pending queue 3784 */ 3785 while (atomic_read(&adapter->pend_cmds) > 0 || 3786 !list_empty(&adapter->pending_list)) 3787 msleep(1000); /* sleep for 1s */ 3788 3789 rval = mega_do_del_logdrv(adapter, logdrv); 3790 3791 spin_lock_irqsave(&adapter->lock, flags); 3792 3793 /* 3794 * If delete operation was successful, add 0x80 to the logical drive 3795 * ids for commands in the pending queue. 3796 */ 3797 if (adapter->read_ldidmap) { 3798 struct list_head *pos; 3799 list_for_each(pos, &adapter->pending_list) { 3800 scb = list_entry(pos, scb_t, list); 3801 if (scb->pthru->logdrv < 0x80 ) 3802 scb->pthru->logdrv += 0x80; 3803 } 3804 } 3805 3806 atomic_set(&adapter->quiescent, 0); 3807 3808 mega_runpendq(adapter); 3809 3810 spin_unlock_irqrestore(&adapter->lock, flags); 3811 3812 return rval; 3813 } 3814 3815 3816 static int 3817 mega_do_del_logdrv(adapter_t *adapter, int logdrv) 3818 { 3819 megacmd_t mc; 3820 int rval; 3821 3822 memset( &mc, 0, sizeof(megacmd_t)); 3823 3824 mc.cmd = FC_DEL_LOGDRV; 3825 mc.opcode = OP_DEL_LOGDRV; 3826 mc.subopcode = logdrv; 3827 3828 rval = mega_internal_command(adapter, &mc, NULL); 3829 3830 /* log this event */ 3831 if(rval) { 3832 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv); 3833 return rval; 3834 } 3835 3836 /* 3837 * After deleting first logical drive, the logical drives must be 3838 * addressed by adding 0x80 to the logical drive id. 3839 */ 3840 adapter->read_ldidmap = 1; 3841 3842 return rval; 3843 } 3844 3845 3846 /** 3847 * mega_get_max_sgl() 3848 * @adapter: pointer to our soft state 3849 * 3850 * Find out the maximum number of scatter-gather elements supported by this 3851 * version of the firmware 3852 */ 3853 static void 3854 mega_get_max_sgl(adapter_t *adapter) 3855 { 3856 struct mbox_out mbox; 3857 unsigned char *raw_mbox = (u8 *)&mbox; 3858 3859 memset(&mbox, 0, sizeof(mbox)); 3860 3861 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3862 3863 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3864 3865 raw_mbox[0] = MAIN_MISC_OPCODE; 3866 raw_mbox[2] = GET_MAX_SG_SUPPORT; 3867 3868 3869 if( issue_scb_block(adapter, raw_mbox) ) { 3870 /* 3871 * f/w does not support this command. Choose the default value 3872 */ 3873 adapter->sglen = MIN_SGLIST; 3874 } 3875 else { 3876 adapter->sglen = *((char *)adapter->mega_buffer); 3877 3878 /* 3879 * Make sure this is not more than the resources we are 3880 * planning to allocate 3881 */ 3882 if ( adapter->sglen > MAX_SGLIST ) 3883 adapter->sglen = MAX_SGLIST; 3884 } 3885 3886 return; 3887 } 3888 3889 3890 /** 3891 * mega_support_cluster() 3892 * @adapter: pointer to our soft state 3893 * 3894 * Find out if this firmware support cluster calls. 3895 */ 3896 static int 3897 mega_support_cluster(adapter_t *adapter) 3898 { 3899 struct mbox_out mbox; 3900 unsigned char *raw_mbox = (u8 *)&mbox; 3901 3902 memset(&mbox, 0, sizeof(mbox)); 3903 3904 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE); 3905 3906 mbox.xferaddr = (u32)adapter->buf_dma_handle; 3907 3908 /* 3909 * Try to get the initiator id. This command will succeed iff the 3910 * clustering is available on this HBA. 3911 */ 3912 raw_mbox[0] = MEGA_GET_TARGET_ID; 3913 3914 if( issue_scb_block(adapter, raw_mbox) == 0 ) { 3915 3916 /* 3917 * Cluster support available. Get the initiator target id. 3918 * Tell our id to mid-layer too. 3919 */ 3920 adapter->this_id = *(u32 *)adapter->mega_buffer; 3921 adapter->host->this_id = adapter->this_id; 3922 3923 return 1; 3924 } 3925 3926 return 0; 3927 } 3928 3929 #ifdef CONFIG_PROC_FS 3930 /** 3931 * mega_adapinq() 3932 * @adapter: pointer to our soft state 3933 * @dma_handle: DMA address of the buffer 3934 * 3935 * Issue internal commands while interrupts are available. 3936 * We only issue direct mailbox commands from within the driver. ioctl() 3937 * interface using these routines can issue passthru commands. 3938 */ 3939 static int 3940 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle) 3941 { 3942 megacmd_t mc; 3943 3944 memset(&mc, 0, sizeof(megacmd_t)); 3945 3946 if( adapter->flag & BOARD_40LD ) { 3947 mc.cmd = FC_NEW_CONFIG; 3948 mc.opcode = NC_SUBOP_ENQUIRY3; 3949 mc.subopcode = ENQ3_GET_SOLICITED_FULL; 3950 } 3951 else { 3952 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ; 3953 } 3954 3955 mc.xferaddr = (u32)dma_handle; 3956 3957 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) { 3958 return -1; 3959 } 3960 3961 return 0; 3962 } 3963 3964 3965 /** 3966 * mega_internal_dev_inquiry() 3967 * @adapter: pointer to our soft state 3968 * @ch: channel for this device 3969 * @tgt: ID of this device 3970 * @buf_dma_handle: DMA address of the buffer 3971 * 3972 * Issue the scsi inquiry for the specified device. 3973 */ 3974 static int 3975 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, 3976 dma_addr_t buf_dma_handle) 3977 { 3978 mega_passthru *pthru; 3979 dma_addr_t pthru_dma_handle; 3980 megacmd_t mc; 3981 int rval; 3982 struct pci_dev *pdev; 3983 3984 3985 /* 3986 * For all internal commands, the buffer must be allocated in <4GB 3987 * address range 3988 */ 3989 if( make_local_pdev(adapter, &pdev) != 0 ) return -1; 3990 3991 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru), 3992 &pthru_dma_handle, GFP_KERNEL); 3993 3994 if( pthru == NULL ) { 3995 free_local_pdev(pdev); 3996 return -1; 3997 } 3998 3999 pthru->timeout = 2; 4000 pthru->ars = 1; 4001 pthru->reqsenselen = 14; 4002 pthru->islogical = 0; 4003 4004 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch; 4005 4006 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt; 4007 4008 pthru->cdblen = 6; 4009 4010 pthru->cdb[0] = INQUIRY; 4011 pthru->cdb[1] = 0; 4012 pthru->cdb[2] = 0; 4013 pthru->cdb[3] = 0; 4014 pthru->cdb[4] = 255; 4015 pthru->cdb[5] = 0; 4016 4017 4018 pthru->dataxferaddr = (u32)buf_dma_handle; 4019 pthru->dataxferlen = 256; 4020 4021 memset(&mc, 0, sizeof(megacmd_t)); 4022 4023 mc.cmd = MEGA_MBOXCMD_PASSTHRU; 4024 mc.xferaddr = (u32)pthru_dma_handle; 4025 4026 rval = mega_internal_command(adapter, &mc, pthru); 4027 4028 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru, 4029 pthru_dma_handle); 4030 4031 free_local_pdev(pdev); 4032 4033 return rval; 4034 } 4035 #endif 4036 4037 /** 4038 * mega_internal_command() 4039 * @adapter: pointer to our soft state 4040 * @mc: the mailbox command 4041 * @pthru: Passthru structure for DCDB commands 4042 * 4043 * Issue the internal commands in interrupt mode. 4044 * The last argument is the address of the passthru structure if the command 4045 * to be fired is a passthru command 4046 * 4047 * Note: parameter 'pthru' is null for non-passthru commands. 4048 */ 4049 static int 4050 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) 4051 { 4052 unsigned long flags; 4053 scb_t *scb; 4054 int rval; 4055 4056 /* 4057 * The internal commands share one command id and hence are 4058 * serialized. This is so because we want to reserve maximum number of 4059 * available command ids for the I/O commands. 4060 */ 4061 mutex_lock(&adapter->int_mtx); 4062 4063 scb = &adapter->int_scb; 4064 memset(scb, 0, sizeof(scb_t)); 4065 4066 scb->idx = CMDID_INT_CMDS; 4067 scb->state |= SCB_ACTIVE | SCB_PENDQ; 4068 4069 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); 4070 4071 /* 4072 * Is it a passthru command 4073 */ 4074 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) 4075 scb->pthru = pthru; 4076 4077 spin_lock_irqsave(&adapter->lock, flags); 4078 list_add_tail(&scb->list, &adapter->pending_list); 4079 /* 4080 * Check if the HBA is in quiescent state, e.g., during a 4081 * delete logical drive opertion. If it is, don't run 4082 * the pending_list. 4083 */ 4084 if (atomic_read(&adapter->quiescent) == 0) 4085 mega_runpendq(adapter); 4086 spin_unlock_irqrestore(&adapter->lock, flags); 4087 4088 wait_for_completion(&adapter->int_waitq); 4089 4090 mc->status = rval = adapter->int_status; 4091 4092 /* 4093 * Print a debug message for all failed commands. Applications can use 4094 * this information. 4095 */ 4096 if (rval && trace_level) { 4097 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n", 4098 mc->cmd, mc->opcode, mc->subopcode, rval); 4099 } 4100 4101 mutex_unlock(&adapter->int_mtx); 4102 return rval; 4103 } 4104 4105 static struct scsi_host_template megaraid_template = { 4106 .module = THIS_MODULE, 4107 .name = "MegaRAID", 4108 .proc_name = "megaraid_legacy", 4109 .info = megaraid_info, 4110 .queuecommand = megaraid_queue, 4111 .bios_param = megaraid_biosparam, 4112 .max_sectors = MAX_SECTORS_PER_IO, 4113 .can_queue = MAX_COMMANDS, 4114 .this_id = DEFAULT_INITIATOR_ID, 4115 .sg_tablesize = MAX_SGLIST, 4116 .cmd_per_lun = DEF_CMD_PER_LUN, 4117 .eh_abort_handler = megaraid_abort, 4118 .eh_device_reset_handler = megaraid_reset, 4119 .eh_bus_reset_handler = megaraid_reset, 4120 .eh_host_reset_handler = megaraid_reset, 4121 .no_write_same = 1, 4122 }; 4123 4124 static int 4125 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4126 { 4127 struct Scsi_Host *host; 4128 adapter_t *adapter; 4129 unsigned long mega_baseport, tbase, flag = 0; 4130 u16 subsysid, subsysvid; 4131 u8 pci_bus, pci_dev_func; 4132 int irq, i, j; 4133 int error = -ENODEV; 4134 4135 if (hba_count >= MAX_CONTROLLERS) 4136 goto out; 4137 4138 if (pci_enable_device(pdev)) 4139 goto out; 4140 pci_set_master(pdev); 4141 4142 pci_bus = pdev->bus->number; 4143 pci_dev_func = pdev->devfn; 4144 4145 /* 4146 * The megaraid3 stuff reports the ID of the Intel part which is not 4147 * remotely specific to the megaraid 4148 */ 4149 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 4150 u16 magic; 4151 /* 4152 * Don't fall over the Compaq management cards using the same 4153 * PCI identifier 4154 */ 4155 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && 4156 pdev->subsystem_device == 0xC000) 4157 goto out_disable_device; 4158 /* Now check the magic signature byte */ 4159 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); 4160 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) 4161 goto out_disable_device; 4162 /* Ok it is probably a megaraid */ 4163 } 4164 4165 /* 4166 * For these vendor and device ids, signature offsets are not 4167 * valid and 64 bit is implicit 4168 */ 4169 if (id->driver_data & BOARD_64BIT) 4170 flag |= BOARD_64BIT; 4171 else { 4172 u32 magic64; 4173 4174 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64); 4175 if (magic64 == HBA_SIGNATURE_64BIT) 4176 flag |= BOARD_64BIT; 4177 } 4178 4179 subsysvid = pdev->subsystem_vendor; 4180 subsysid = pdev->subsystem_device; 4181 4182 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n", 4183 id->vendor, id->device); 4184 4185 /* Read the base port and IRQ from PCI */ 4186 mega_baseport = pci_resource_start(pdev, 0); 4187 irq = pdev->irq; 4188 4189 tbase = mega_baseport; 4190 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) { 4191 flag |= BOARD_MEMMAP; 4192 4193 if (!request_mem_region(mega_baseport, 128, "megaraid")) { 4194 dev_warn(&pdev->dev, "mem region busy!\n"); 4195 goto out_disable_device; 4196 } 4197 4198 mega_baseport = (unsigned long)ioremap(mega_baseport, 128); 4199 if (!mega_baseport) { 4200 dev_warn(&pdev->dev, "could not map hba memory\n"); 4201 goto out_release_region; 4202 } 4203 } else { 4204 flag |= BOARD_IOMAP; 4205 mega_baseport += 0x10; 4206 4207 if (!request_region(mega_baseport, 16, "megaraid")) 4208 goto out_disable_device; 4209 } 4210 4211 /* Initialize SCSI Host structure */ 4212 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t)); 4213 if (!host) 4214 goto out_iounmap; 4215 4216 adapter = (adapter_t *)host->hostdata; 4217 memset(adapter, 0, sizeof(adapter_t)); 4218 4219 dev_notice(&pdev->dev, 4220 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", 4221 host->host_no, mega_baseport, irq); 4222 4223 adapter->base = mega_baseport; 4224 if (flag & BOARD_MEMMAP) 4225 adapter->mmio_base = (void __iomem *) mega_baseport; 4226 4227 INIT_LIST_HEAD(&adapter->free_list); 4228 INIT_LIST_HEAD(&adapter->pending_list); 4229 INIT_LIST_HEAD(&adapter->completed_list); 4230 4231 adapter->flag = flag; 4232 spin_lock_init(&adapter->lock); 4233 4234 host->cmd_per_lun = max_cmd_per_lun; 4235 host->max_sectors = max_sectors_per_io; 4236 4237 adapter->dev = pdev; 4238 adapter->host = host; 4239 4240 adapter->host->irq = irq; 4241 4242 if (flag & BOARD_MEMMAP) 4243 adapter->host->base = tbase; 4244 else { 4245 adapter->host->io_port = tbase; 4246 adapter->host->n_io_port = 16; 4247 } 4248 4249 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func; 4250 4251 /* 4252 * Allocate buffer to issue internal commands. 4253 */ 4254 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev, 4255 MEGA_BUFFER_SIZE, 4256 &adapter->buf_dma_handle, 4257 GFP_KERNEL); 4258 if (!adapter->mega_buffer) { 4259 dev_warn(&pdev->dev, "out of RAM\n"); 4260 goto out_host_put; 4261 } 4262 4263 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t), 4264 GFP_KERNEL); 4265 if (!adapter->scb_list) { 4266 dev_warn(&pdev->dev, "out of RAM\n"); 4267 goto out_free_cmd_buffer; 4268 } 4269 4270 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4271 megaraid_isr_memmapped : megaraid_isr_iomapped, 4272 IRQF_SHARED, "megaraid", adapter)) { 4273 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq); 4274 goto out_free_scb_list; 4275 } 4276 4277 if (mega_setup_mailbox(adapter)) 4278 goto out_free_irq; 4279 4280 if (mega_query_adapter(adapter)) 4281 goto out_free_mbox; 4282 4283 /* 4284 * Have checks for some buggy f/w 4285 */ 4286 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) { 4287 /* 4288 * Which firmware 4289 */ 4290 if (!strcmp(adapter->fw_version, "3.00") || 4291 !strcmp(adapter->fw_version, "3.01")) { 4292 4293 dev_warn(&pdev->dev, 4294 "Your card is a Dell PERC " 4295 "2/SC RAID controller with " 4296 "firmware\nmegaraid: 3.00 or 3.01. " 4297 "This driver is known to have " 4298 "corruption issues\nmegaraid: with " 4299 "those firmware versions on this " 4300 "specific card. In order\nmegaraid: " 4301 "to protect your data, please upgrade " 4302 "your firmware to version\nmegaraid: " 4303 "3.10 or later, available from the " 4304 "Dell Technical Support web\n" 4305 "megaraid: site at\nhttp://support." 4306 "dell.com/us/en/filelib/download/" 4307 "index.asp?fileid=2940\n" 4308 ); 4309 } 4310 } 4311 4312 /* 4313 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with 4314 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit 4315 * support, since this firmware cannot handle 64 bit 4316 * addressing 4317 */ 4318 if ((subsysvid == PCI_VENDOR_ID_HP) && 4319 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { 4320 /* 4321 * which firmware 4322 */ 4323 if (!strcmp(adapter->fw_version, "H01.07") || 4324 !strcmp(adapter->fw_version, "H01.08") || 4325 !strcmp(adapter->fw_version, "H01.09") ) { 4326 dev_warn(&pdev->dev, 4327 "Firmware H.01.07, " 4328 "H.01.08, and H.01.09 on 1M/2M " 4329 "controllers\n" 4330 "do not support 64 bit " 4331 "addressing.\nDISABLING " 4332 "64 bit support.\n"); 4333 adapter->flag &= ~BOARD_64BIT; 4334 } 4335 } 4336 4337 if (mega_is_bios_enabled(adapter)) 4338 mega_hbas[hba_count].is_bios_enabled = 1; 4339 mega_hbas[hba_count].hostdata_addr = adapter; 4340 4341 /* 4342 * Find out which channel is raid and which is scsi. This is 4343 * for ROMB support. 4344 */ 4345 mega_enum_raid_scsi(adapter); 4346 4347 /* 4348 * Find out if a logical drive is set as the boot drive. If 4349 * there is one, will make that as the first logical drive. 4350 * ROMB: Do we have to boot from a physical drive. Then all 4351 * the physical drives would appear before the logical disks. 4352 * Else, all the physical drives would be exported to the mid 4353 * layer after logical drives. 4354 */ 4355 mega_get_boot_drv(adapter); 4356 4357 if (adapter->boot_pdrv_enabled) { 4358 j = adapter->product_info.nchannels; 4359 for( i = 0; i < j; i++ ) 4360 adapter->logdrv_chan[i] = 0; 4361 for( i = j; i < NVIRT_CHAN + j; i++ ) 4362 adapter->logdrv_chan[i] = 1; 4363 } else { 4364 for (i = 0; i < NVIRT_CHAN; i++) 4365 adapter->logdrv_chan[i] = 1; 4366 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++) 4367 adapter->logdrv_chan[i] = 0; 4368 adapter->mega_ch_class <<= NVIRT_CHAN; 4369 } 4370 4371 /* 4372 * Do we support random deletion and addition of logical 4373 * drives 4374 */ 4375 adapter->read_ldidmap = 0; /* set it after first logdrv 4376 delete cmd */ 4377 adapter->support_random_del = mega_support_random_del(adapter); 4378 4379 /* Initialize SCBs */ 4380 if (mega_init_scb(adapter)) 4381 goto out_free_mbox; 4382 4383 /* 4384 * Reset the pending commands counter 4385 */ 4386 atomic_set(&adapter->pend_cmds, 0); 4387 4388 /* 4389 * Reset the adapter quiescent flag 4390 */ 4391 atomic_set(&adapter->quiescent, 0); 4392 4393 hba_soft_state[hba_count] = adapter; 4394 4395 /* 4396 * Fill in the structure which needs to be passed back to the 4397 * application when it does an ioctl() for controller related 4398 * information. 4399 */ 4400 i = hba_count; 4401 4402 mcontroller[i].base = mega_baseport; 4403 mcontroller[i].irq = irq; 4404 mcontroller[i].numldrv = adapter->numldrv; 4405 mcontroller[i].pcibus = pci_bus; 4406 mcontroller[i].pcidev = id->device; 4407 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func); 4408 mcontroller[i].pciid = -1; 4409 mcontroller[i].pcivendor = id->vendor; 4410 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func); 4411 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func; 4412 4413 4414 /* Set the Mode of addressing to 64 bit if we can */ 4415 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4416 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 4417 adapter->has_64bit_addr = 1; 4418 } else { 4419 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4420 adapter->has_64bit_addr = 0; 4421 } 4422 4423 mutex_init(&adapter->int_mtx); 4424 init_completion(&adapter->int_waitq); 4425 4426 adapter->this_id = DEFAULT_INITIATOR_ID; 4427 adapter->host->this_id = DEFAULT_INITIATOR_ID; 4428 4429 #if MEGA_HAVE_CLUSTERING 4430 /* 4431 * Is cluster support enabled on this controller 4432 * Note: In a cluster the HBAs ( the initiators ) will have 4433 * different target IDs and we cannot assume it to be 7. Call 4434 * to mega_support_cluster() will get the target ids also if 4435 * the cluster support is available 4436 */ 4437 adapter->has_cluster = mega_support_cluster(adapter); 4438 if (adapter->has_cluster) { 4439 dev_notice(&pdev->dev, 4440 "Cluster driver, initiator id:%d\n", 4441 adapter->this_id); 4442 } 4443 #endif 4444 4445 pci_set_drvdata(pdev, host); 4446 4447 mega_create_proc_entry(hba_count, mega_proc_dir_entry); 4448 4449 error = scsi_add_host(host, &pdev->dev); 4450 if (error) 4451 goto out_free_mbox; 4452 4453 scsi_scan_host(host); 4454 hba_count++; 4455 return 0; 4456 4457 out_free_mbox: 4458 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4459 adapter->una_mbox64, adapter->una_mbox64_dma); 4460 out_free_irq: 4461 free_irq(adapter->host->irq, adapter); 4462 out_free_scb_list: 4463 kfree(adapter->scb_list); 4464 out_free_cmd_buffer: 4465 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4466 adapter->mega_buffer, adapter->buf_dma_handle); 4467 out_host_put: 4468 scsi_host_put(host); 4469 out_iounmap: 4470 if (flag & BOARD_MEMMAP) 4471 iounmap((void *)mega_baseport); 4472 out_release_region: 4473 if (flag & BOARD_MEMMAP) 4474 release_mem_region(tbase, 128); 4475 else 4476 release_region(mega_baseport, 16); 4477 out_disable_device: 4478 pci_disable_device(pdev); 4479 out: 4480 return error; 4481 } 4482 4483 static void 4484 __megaraid_shutdown(adapter_t *adapter) 4485 { 4486 u_char raw_mbox[sizeof(struct mbox_out)]; 4487 mbox_t *mbox = (mbox_t *)raw_mbox; 4488 int i; 4489 4490 /* Flush adapter cache */ 4491 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4492 raw_mbox[0] = FLUSH_ADAPTER; 4493 4494 free_irq(adapter->host->irq, adapter); 4495 4496 /* Issue a blocking (interrupts disabled) command to the card */ 4497 issue_scb_block(adapter, raw_mbox); 4498 4499 /* Flush disks cache */ 4500 memset(&mbox->m_out, 0, sizeof(raw_mbox)); 4501 raw_mbox[0] = FLUSH_SYSTEM; 4502 4503 /* Issue a blocking (interrupts disabled) command to the card */ 4504 issue_scb_block(adapter, raw_mbox); 4505 4506 if (atomic_read(&adapter->pend_cmds) > 0) 4507 dev_warn(&adapter->dev->dev, "pending commands!!\n"); 4508 4509 /* 4510 * Have a delibrate delay to make sure all the caches are 4511 * actually flushed. 4512 */ 4513 for (i = 0; i <= 10; i++) 4514 mdelay(1000); 4515 } 4516 4517 static void 4518 megaraid_remove_one(struct pci_dev *pdev) 4519 { 4520 struct Scsi_Host *host = pci_get_drvdata(pdev); 4521 adapter_t *adapter = (adapter_t *)host->hostdata; 4522 char buf[12] = { 0 }; 4523 4524 scsi_remove_host(host); 4525 4526 __megaraid_shutdown(adapter); 4527 4528 /* Free our resources */ 4529 if (adapter->flag & BOARD_MEMMAP) { 4530 iounmap((void *)adapter->base); 4531 release_mem_region(adapter->host->base, 128); 4532 } else 4533 release_region(adapter->base, 16); 4534 4535 mega_free_sgl(adapter); 4536 4537 sprintf(buf, "hba%d", adapter->host->host_no); 4538 remove_proc_subtree(buf, mega_proc_dir_entry); 4539 4540 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE, 4541 adapter->mega_buffer, adapter->buf_dma_handle); 4542 kfree(adapter->scb_list); 4543 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t), 4544 adapter->una_mbox64, adapter->una_mbox64_dma); 4545 4546 scsi_host_put(host); 4547 pci_disable_device(pdev); 4548 4549 hba_count--; 4550 } 4551 4552 static void 4553 megaraid_shutdown(struct pci_dev *pdev) 4554 { 4555 struct Scsi_Host *host = pci_get_drvdata(pdev); 4556 adapter_t *adapter = (adapter_t *)host->hostdata; 4557 4558 __megaraid_shutdown(adapter); 4559 } 4560 4561 static struct pci_device_id megaraid_pci_tbl[] = { 4562 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID, 4563 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4564 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2, 4565 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4566 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, 4567 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 4568 {0,} 4569 }; 4570 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 4571 4572 static struct pci_driver megaraid_pci_driver = { 4573 .name = "megaraid_legacy", 4574 .id_table = megaraid_pci_tbl, 4575 .probe = megaraid_probe_one, 4576 .remove = megaraid_remove_one, 4577 .shutdown = megaraid_shutdown, 4578 }; 4579 4580 static int __init megaraid_init(void) 4581 { 4582 int error; 4583 4584 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN)) 4585 max_cmd_per_lun = MAX_CMD_PER_LUN; 4586 if (max_mbox_busy_wait > MBOX_BUSY_WAIT) 4587 max_mbox_busy_wait = MBOX_BUSY_WAIT; 4588 4589 #ifdef CONFIG_PROC_FS 4590 mega_proc_dir_entry = proc_mkdir("megaraid", NULL); 4591 if (!mega_proc_dir_entry) { 4592 printk(KERN_WARNING 4593 "megaraid: failed to create megaraid root\n"); 4594 } 4595 #endif 4596 error = pci_register_driver(&megaraid_pci_driver); 4597 if (error) { 4598 #ifdef CONFIG_PROC_FS 4599 remove_proc_entry("megaraid", NULL); 4600 #endif 4601 return error; 4602 } 4603 4604 /* 4605 * Register the driver as a character device, for applications 4606 * to access it for ioctls. 4607 * First argument (major) to register_chrdev implies a dynamic 4608 * major number allocation. 4609 */ 4610 major = register_chrdev(0, "megadev_legacy", &megadev_fops); 4611 if (!major) { 4612 printk(KERN_WARNING 4613 "megaraid: failed to register char device\n"); 4614 } 4615 4616 return 0; 4617 } 4618 4619 static void __exit megaraid_exit(void) 4620 { 4621 /* 4622 * Unregister the character device interface to the driver. 4623 */ 4624 unregister_chrdev(major, "megadev_legacy"); 4625 4626 pci_unregister_driver(&megaraid_pci_driver); 4627 4628 #ifdef CONFIG_PROC_FS 4629 remove_proc_entry("megaraid", NULL); 4630 #endif 4631 } 4632 4633 module_init(megaraid_init); 4634 module_exit(megaraid_exit); 4635 4636 /* vi: set ts=8 sw=8 tw=78: */ 4637