1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2002 LSI Logic Corporation.
7 *
8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
9 * - fixes
10 * - speed-ups (list handling fixes, issued_list, optimizations.)
11 * - lots of cleanups.
12 *
13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
14 * - new-style, hotplug-aware pci probing and scsi registration
15 *
16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
17 * <Seokmann.Ju@lsil.com>
18 *
19 * Description: Linux device driver for LSI Logic MegaRAID controller
20 *
21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
22 * 518, 520, 531, 532
23 *
24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
25 * and others. Please send updates to the mailing list
26 * linux-scsi@vger.kernel.org .
27 */
28
29 #include <linux/mm.h>
30 #include <linux/fs.h>
31 #include <linux/blkdev.h>
32 #include <linux/uaccess.h>
33 #include <asm/io.h>
34 #include <linux/completion.h>
35 #include <linux/delay.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/reboot.h>
39 #include <linux/module.h>
40 #include <linux/list.h>
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/mutex.h>
46 #include <linux/slab.h>
47
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_eh.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsicam.h>
55
56 #include "megaraid.h"
57
58 #define MEGARAID_MODULE_VERSION "2.00.4"
59
60 MODULE_AUTHOR ("sju@lsil.com");
61 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
62 MODULE_LICENSE ("GPL");
63 MODULE_VERSION(MEGARAID_MODULE_VERSION);
64
65 static DEFINE_MUTEX(megadev_mutex);
66 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
67 module_param(max_cmd_per_lun, uint, 0);
68 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
69
70 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
71 module_param(max_sectors_per_io, ushort, 0);
72 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
73
74
75 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
76 module_param(max_mbox_busy_wait, ushort, 0);
77 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
78
79 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
80 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
81 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
82 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
83
84 /*
85 * Global variables
86 */
87
88 static int hba_count;
89 static adapter_t *hba_soft_state[MAX_CONTROLLERS];
90 static struct proc_dir_entry *mega_proc_dir_entry;
91
92 /* For controller re-ordering */
93 static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
94
95 static long
96 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
97
98 /*
99 * The File Operations structure for the serial/ioctl interface of the driver
100 */
101 static const struct file_operations megadev_fops = {
102 .owner = THIS_MODULE,
103 .unlocked_ioctl = megadev_unlocked_ioctl,
104 .open = megadev_open,
105 .llseek = noop_llseek,
106 };
107
108 /*
109 * Array to structures for storing the information about the controllers. This
110 * information is sent to the user level applications, when they do an ioctl
111 * for this information.
112 */
113 static struct mcontroller mcontroller[MAX_CONTROLLERS];
114
115 /* The current driver version */
116 static u32 driver_ver = 0x02000000;
117
118 /* major number used by the device for character interface */
119 static int major;
120
121 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
122
123
124 /*
125 * Debug variable to print some diagnostic messages
126 */
127 static int trace_level;
128
129 /**
130 * mega_setup_mailbox()
131 * @adapter: pointer to our soft state
132 *
133 * Allocates a 8 byte aligned memory for the handshake mailbox.
134 */
135 static int
mega_setup_mailbox(adapter_t * adapter)136 mega_setup_mailbox(adapter_t *adapter)
137 {
138 unsigned long align;
139
140 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
141 sizeof(mbox64_t),
142 &adapter->una_mbox64_dma,
143 GFP_KERNEL);
144
145 if( !adapter->una_mbox64 ) return -1;
146
147 adapter->mbox = &adapter->una_mbox64->mbox;
148
149 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
150 (~0UL ^ 0xFUL));
151
152 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
153
154 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
155
156 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
157
158 /*
159 * Register the mailbox if the controller is an io-mapped controller
160 */
161 if( adapter->flag & BOARD_IOMAP ) {
162
163 outb(adapter->mbox_dma & 0xFF,
164 adapter->host->io_port + MBOX_PORT0);
165
166 outb((adapter->mbox_dma >> 8) & 0xFF,
167 adapter->host->io_port + MBOX_PORT1);
168
169 outb((adapter->mbox_dma >> 16) & 0xFF,
170 adapter->host->io_port + MBOX_PORT2);
171
172 outb((adapter->mbox_dma >> 24) & 0xFF,
173 adapter->host->io_port + MBOX_PORT3);
174
175 outb(ENABLE_MBOX_BYTE,
176 adapter->host->io_port + ENABLE_MBOX_REGION);
177
178 irq_ack(adapter);
179
180 irq_enable(adapter);
181 }
182
183 return 0;
184 }
185
186
187 /*
188 * mega_query_adapter()
189 * @adapter - pointer to our soft state
190 *
191 * Issue the adapter inquiry commands to the controller and find out
192 * information and parameter about the devices attached
193 */
194 static int
mega_query_adapter(adapter_t * adapter)195 mega_query_adapter(adapter_t *adapter)
196 {
197 dma_addr_t prod_info_dma_handle;
198 mega_inquiry3 *inquiry3;
199 struct mbox_out mbox;
200 u8 *raw_mbox = (u8 *)&mbox;
201 int retval;
202
203 /* Initialize adapter inquiry mailbox */
204
205 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
206 memset(&mbox, 0, sizeof(mbox));
207
208 /*
209 * Try to issue Inquiry3 command
210 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
211 * update enquiry3 structure
212 */
213 mbox.xferaddr = (u32)adapter->buf_dma_handle;
214
215 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
216
217 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
218 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
219 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
220
221 /* Issue a blocking command to the card */
222 if (issue_scb_block(adapter, raw_mbox)) {
223 /* the adapter does not support 40ld */
224
225 mraid_ext_inquiry *ext_inq;
226 mraid_inquiry *inq;
227 dma_addr_t dma_handle;
228
229 ext_inq = dma_alloc_coherent(&adapter->dev->dev,
230 sizeof(mraid_ext_inquiry),
231 &dma_handle, GFP_KERNEL);
232
233 if( ext_inq == NULL ) return -1;
234
235 inq = &ext_inq->raid_inq;
236
237 mbox.xferaddr = (u32)dma_handle;
238
239 /*issue old 0x04 command to adapter */
240 mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
241
242 issue_scb_block(adapter, raw_mbox);
243
244 /*
245 * update Enquiry3 and ProductInfo structures with
246 * mraid_inquiry structure
247 */
248 mega_8_to_40ld(inq, inquiry3,
249 (mega_product_info *)&adapter->product_info);
250
251 dma_free_coherent(&adapter->dev->dev,
252 sizeof(mraid_ext_inquiry), ext_inq,
253 dma_handle);
254
255 } else { /*adapter supports 40ld */
256 adapter->flag |= BOARD_40LD;
257
258 /*
259 * get product_info, which is static information and will be
260 * unchanged
261 */
262 prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
263 (void *)&adapter->product_info,
264 sizeof(mega_product_info),
265 DMA_FROM_DEVICE);
266
267 mbox.xferaddr = prod_info_dma_handle;
268
269 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
270 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
271
272 if ((retval = issue_scb_block(adapter, raw_mbox)))
273 dev_warn(&adapter->dev->dev,
274 "Product_info cmd failed with error: %d\n",
275 retval);
276
277 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
278 sizeof(mega_product_info), DMA_FROM_DEVICE);
279 }
280
281
282 /*
283 * kernel scans the channels from 0 to <= max_channel
284 */
285 adapter->host->max_channel =
286 adapter->product_info.nchannels + NVIRT_CHAN -1;
287
288 adapter->host->max_id = 16; /* max targets per channel */
289
290 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
291
292 adapter->host->cmd_per_lun = max_cmd_per_lun;
293
294 adapter->numldrv = inquiry3->num_ldrv;
295
296 adapter->max_cmds = adapter->product_info.max_commands;
297
298 if(adapter->max_cmds > MAX_COMMANDS)
299 adapter->max_cmds = MAX_COMMANDS;
300
301 adapter->host->can_queue = adapter->max_cmds - 1;
302
303 /*
304 * Get the maximum number of scatter-gather elements supported by this
305 * firmware
306 */
307 mega_get_max_sgl(adapter);
308
309 adapter->host->sg_tablesize = adapter->sglen;
310
311 /* use HP firmware and bios version encoding
312 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
313 right 8 bits making them zero. This 0 value was hardcoded to fix
314 sparse warnings. */
315 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
316 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
317 "%c%d%d.%d%d",
318 adapter->product_info.fw_version[2],
319 0,
320 adapter->product_info.fw_version[1] & 0x0f,
321 0,
322 adapter->product_info.fw_version[0] & 0x0f);
323 snprintf(adapter->bios_version, sizeof(adapter->fw_version),
324 "%c%d%d.%d%d",
325 adapter->product_info.bios_version[2],
326 0,
327 adapter->product_info.bios_version[1] & 0x0f,
328 0,
329 adapter->product_info.bios_version[0] & 0x0f);
330 } else {
331 memcpy(adapter->fw_version,
332 (char *)adapter->product_info.fw_version, 4);
333 adapter->fw_version[4] = 0;
334
335 memcpy(adapter->bios_version,
336 (char *)adapter->product_info.bios_version, 4);
337
338 adapter->bios_version[4] = 0;
339 }
340
341 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
342 adapter->fw_version, adapter->bios_version, adapter->numldrv);
343
344 /*
345 * Do we support extended (>10 bytes) cdbs
346 */
347 adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
348 if (adapter->support_ext_cdb)
349 dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
350
351
352 return 0;
353 }
354
355 /**
356 * mega_runpendq()
357 * @adapter: pointer to our soft state
358 *
359 * Runs through the list of pending requests.
360 */
361 static inline void
mega_runpendq(adapter_t * adapter)362 mega_runpendq(adapter_t *adapter)
363 {
364 if(!list_empty(&adapter->pending_list))
365 __mega_runpendq(adapter);
366 }
367
368 /*
369 * megaraid_queue()
370 * @scmd - Issue this scsi command
371 * @done - the callback hook into the scsi mid-layer
372 *
373 * The command queuing entry point for the mid-layer.
374 */
megaraid_queue_lck(struct scsi_cmnd * scmd)375 static int megaraid_queue_lck(struct scsi_cmnd *scmd)
376 {
377 adapter_t *adapter;
378 scb_t *scb;
379 int busy=0;
380 unsigned long flags;
381
382 adapter = (adapter_t *)scmd->device->host->hostdata;
383
384 /*
385 * Allocate and build a SCB request
386 * busy flag will be set if mega_build_cmd() command could not
387 * allocate scb. We will return non-zero status in that case.
388 * NOTE: scb can be null even though certain commands completed
389 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
390 * return 0 in that case.
391 */
392
393 spin_lock_irqsave(&adapter->lock, flags);
394 scb = mega_build_cmd(adapter, scmd, &busy);
395 if (!scb)
396 goto out;
397
398 scb->state |= SCB_PENDQ;
399 list_add_tail(&scb->list, &adapter->pending_list);
400
401 /*
402 * Check if the HBA is in quiescent state, e.g., during a
403 * delete logical drive opertion. If it is, don't run
404 * the pending_list.
405 */
406 if (atomic_read(&adapter->quiescent) == 0)
407 mega_runpendq(adapter);
408
409 busy = 0;
410 out:
411 spin_unlock_irqrestore(&adapter->lock, flags);
412 return busy;
413 }
414
DEF_SCSI_QCMD(megaraid_queue)415 static DEF_SCSI_QCMD(megaraid_queue)
416
417 /**
418 * mega_allocate_scb()
419 * @adapter: pointer to our soft state
420 * @cmd: scsi command from the mid-layer
421 *
422 * Allocate a SCB structure. This is the central structure for controller
423 * commands.
424 */
425 static inline scb_t *
426 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
427 {
428 struct list_head *head = &adapter->free_list;
429 scb_t *scb;
430
431 /* Unlink command from Free List */
432 if( !list_empty(head) ) {
433
434 scb = list_entry(head->next, scb_t, list);
435
436 list_del_init(head->next);
437
438 scb->state = SCB_ACTIVE;
439 scb->cmd = cmd;
440 scb->dma_type = MEGA_DMA_TYPE_NONE;
441
442 return scb;
443 }
444
445 return NULL;
446 }
447
448 /**
449 * mega_get_ldrv_num()
450 * @adapter: pointer to our soft state
451 * @cmd: scsi mid layer command
452 * @channel: channel on the controller
453 *
454 * Calculate the logical drive number based on the information in scsi command
455 * and the channel number.
456 */
457 static inline int
mega_get_ldrv_num(adapter_t * adapter,struct scsi_cmnd * cmd,int channel)458 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
459 {
460 int tgt;
461 int ldrv_num;
462
463 tgt = cmd->device->id;
464
465 if ( tgt > adapter->this_id )
466 tgt--; /* we do not get inquires for initiator id */
467
468 ldrv_num = (channel * 15) + tgt;
469
470
471 /*
472 * If we have a logical drive with boot enabled, project it first
473 */
474 if( adapter->boot_ldrv_enabled ) {
475 if( ldrv_num == 0 ) {
476 ldrv_num = adapter->boot_ldrv;
477 }
478 else {
479 if( ldrv_num <= adapter->boot_ldrv ) {
480 ldrv_num--;
481 }
482 }
483 }
484
485 /*
486 * If "delete logical drive" feature is enabled on this controller.
487 * Do only if at least one delete logical drive operation was done.
488 *
489 * Also, after logical drive deletion, instead of logical drive number,
490 * the value returned should be 0x80+logical drive id.
491 *
492 * These is valid only for IO commands.
493 */
494
495 if (adapter->support_random_del && adapter->read_ldidmap )
496 switch (cmd->cmnd[0]) {
497 case READ_6:
498 case WRITE_6:
499 case READ_10:
500 case WRITE_10:
501 ldrv_num += 0x80;
502 }
503
504 return ldrv_num;
505 }
506
507 /**
508 * mega_build_cmd()
509 * @adapter: pointer to our soft state
510 * @cmd: Prepare using this scsi command
511 * @busy: busy flag if no resources
512 *
513 * Prepares a command and scatter gather list for the controller. This routine
514 * also finds out if the commands is intended for a logical drive or a
515 * physical device and prepares the controller command accordingly.
516 *
517 * We also re-order the logical drives and physical devices based on their
518 * boot settings.
519 */
520 static scb_t *
mega_build_cmd(adapter_t * adapter,struct scsi_cmnd * cmd,int * busy)521 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
522 {
523 mega_passthru *pthru;
524 scb_t *scb;
525 mbox_t *mbox;
526 u32 seg;
527 char islogical;
528 int max_ldrv_num;
529 int channel = 0;
530 int target = 0;
531 int ldrv_num = 0; /* logical drive number */
532
533 /*
534 * We know what channels our logical drives are on - mega_find_card()
535 */
536 islogical = adapter->logdrv_chan[cmd->device->channel];
537
538 /*
539 * The theory: If physical drive is chosen for boot, all the physical
540 * devices are exported before the logical drives, otherwise physical
541 * devices are pushed after logical drives, in which case - Kernel sees
542 * the physical devices on virtual channel which is obviously converted
543 * to actual channel on the HBA.
544 */
545 if( adapter->boot_pdrv_enabled ) {
546 if( islogical ) {
547 /* logical channel */
548 channel = cmd->device->channel -
549 adapter->product_info.nchannels;
550 }
551 else {
552 /* this is physical channel */
553 channel = cmd->device->channel;
554 target = cmd->device->id;
555
556 /*
557 * boot from a physical disk, that disk needs to be
558 * exposed first IF both the channels are SCSI, then
559 * booting from the second channel is not allowed.
560 */
561 if( target == 0 ) {
562 target = adapter->boot_pdrv_tgt;
563 }
564 else if( target == adapter->boot_pdrv_tgt ) {
565 target = 0;
566 }
567 }
568 }
569 else {
570 if( islogical ) {
571 /* this is the logical channel */
572 channel = cmd->device->channel;
573 }
574 else {
575 /* physical channel */
576 channel = cmd->device->channel - NVIRT_CHAN;
577 target = cmd->device->id;
578 }
579 }
580
581
582 if(islogical) {
583
584 /* have just LUN 0 for each target on virtual channels */
585 if (cmd->device->lun) {
586 cmd->result = (DID_BAD_TARGET << 16);
587 scsi_done(cmd);
588 return NULL;
589 }
590
591 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
592
593
594 max_ldrv_num = (adapter->flag & BOARD_40LD) ?
595 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
596
597 /*
598 * max_ldrv_num increases by 0x80 if some logical drive was
599 * deleted.
600 */
601 if(adapter->read_ldidmap)
602 max_ldrv_num += 0x80;
603
604 if(ldrv_num > max_ldrv_num ) {
605 cmd->result = (DID_BAD_TARGET << 16);
606 scsi_done(cmd);
607 return NULL;
608 }
609
610 }
611 else {
612 if( cmd->device->lun > 7) {
613 /*
614 * Do not support lun >7 for physically accessed
615 * devices
616 */
617 cmd->result = (DID_BAD_TARGET << 16);
618 scsi_done(cmd);
619 return NULL;
620 }
621 }
622
623 /*
624 *
625 * Logical drive commands
626 *
627 */
628 if(islogical) {
629 switch (cmd->cmnd[0]) {
630 case TEST_UNIT_READY:
631 #if MEGA_HAVE_CLUSTERING
632 /*
633 * Do we support clustering and is the support enabled
634 * If no, return success always
635 */
636 if( !adapter->has_cluster ) {
637 cmd->result = (DID_OK << 16);
638 scsi_done(cmd);
639 return NULL;
640 }
641
642 if(!(scb = mega_allocate_scb(adapter, cmd))) {
643 *busy = 1;
644 return NULL;
645 }
646
647 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
648 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
649 scb->raw_mbox[3] = ldrv_num;
650
651 scb->dma_direction = DMA_NONE;
652
653 return scb;
654 #else
655 cmd->result = (DID_OK << 16);
656 scsi_done(cmd);
657 return NULL;
658 #endif
659
660 case MODE_SENSE: {
661 char *buf;
662 struct scatterlist *sg;
663
664 sg = scsi_sglist(cmd);
665 buf = kmap_atomic(sg_page(sg)) + sg->offset;
666
667 memset(buf, 0, cmd->cmnd[4]);
668 kunmap_atomic(buf - sg->offset);
669
670 cmd->result = (DID_OK << 16);
671 scsi_done(cmd);
672 return NULL;
673 }
674
675 case READ_CAPACITY:
676 case INQUIRY:
677
678 if(!(adapter->flag & (1L << cmd->device->channel))) {
679
680 dev_notice(&adapter->dev->dev,
681 "scsi%d: scanning scsi channel %d "
682 "for logical drives\n",
683 adapter->host->host_no,
684 cmd->device->channel);
685
686 adapter->flag |= (1L << cmd->device->channel);
687 }
688
689 /* Allocate a SCB and initialize passthru */
690 if(!(scb = mega_allocate_scb(adapter, cmd))) {
691 *busy = 1;
692 return NULL;
693 }
694 pthru = scb->pthru;
695
696 mbox = (mbox_t *)scb->raw_mbox;
697 memset(mbox, 0, sizeof(scb->raw_mbox));
698 memset(pthru, 0, sizeof(mega_passthru));
699
700 pthru->timeout = 0;
701 pthru->ars = 1;
702 pthru->reqsenselen = 14;
703 pthru->islogical = 1;
704 pthru->logdrv = ldrv_num;
705 pthru->cdblen = cmd->cmd_len;
706 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
707
708 if( adapter->has_64bit_addr ) {
709 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
710 }
711 else {
712 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
713 }
714
715 scb->dma_direction = DMA_FROM_DEVICE;
716
717 pthru->numsgelements = mega_build_sglist(adapter, scb,
718 &pthru->dataxferaddr, &pthru->dataxferlen);
719
720 mbox->m_out.xferaddr = scb->pthru_dma_addr;
721
722 return scb;
723
724 case READ_6:
725 case WRITE_6:
726 case READ_10:
727 case WRITE_10:
728 case READ_12:
729 case WRITE_12:
730
731 /* Allocate a SCB and initialize mailbox */
732 if(!(scb = mega_allocate_scb(adapter, cmd))) {
733 *busy = 1;
734 return NULL;
735 }
736 mbox = (mbox_t *)scb->raw_mbox;
737
738 memset(mbox, 0, sizeof(scb->raw_mbox));
739 mbox->m_out.logdrv = ldrv_num;
740
741 /*
742 * A little hack: 2nd bit is zero for all scsi read
743 * commands and is set for all scsi write commands
744 */
745 if( adapter->has_64bit_addr ) {
746 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
747 MEGA_MBOXCMD_LWRITE64:
748 MEGA_MBOXCMD_LREAD64 ;
749 }
750 else {
751 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
752 MEGA_MBOXCMD_LWRITE:
753 MEGA_MBOXCMD_LREAD ;
754 }
755
756 /*
757 * 6-byte READ(0x08) or WRITE(0x0A) cdb
758 */
759 if( cmd->cmd_len == 6 ) {
760 mbox->m_out.numsectors = (u32) cmd->cmnd[4];
761 mbox->m_out.lba =
762 ((u32)cmd->cmnd[1] << 16) |
763 ((u32)cmd->cmnd[2] << 8) |
764 (u32)cmd->cmnd[3];
765
766 mbox->m_out.lba &= 0x1FFFFF;
767
768 #if MEGA_HAVE_STATS
769 /*
770 * Take modulo 0x80, since the logical drive
771 * number increases by 0x80 when a logical
772 * drive was deleted
773 */
774 if (*cmd->cmnd == READ_6) {
775 adapter->nreads[ldrv_num%0x80]++;
776 adapter->nreadblocks[ldrv_num%0x80] +=
777 mbox->m_out.numsectors;
778 } else {
779 adapter->nwrites[ldrv_num%0x80]++;
780 adapter->nwriteblocks[ldrv_num%0x80] +=
781 mbox->m_out.numsectors;
782 }
783 #endif
784 }
785
786 /*
787 * 10-byte READ(0x28) or WRITE(0x2A) cdb
788 */
789 if( cmd->cmd_len == 10 ) {
790 mbox->m_out.numsectors =
791 (u32)cmd->cmnd[8] |
792 ((u32)cmd->cmnd[7] << 8);
793 mbox->m_out.lba =
794 ((u32)cmd->cmnd[2] << 24) |
795 ((u32)cmd->cmnd[3] << 16) |
796 ((u32)cmd->cmnd[4] << 8) |
797 (u32)cmd->cmnd[5];
798
799 #if MEGA_HAVE_STATS
800 if (*cmd->cmnd == READ_10) {
801 adapter->nreads[ldrv_num%0x80]++;
802 adapter->nreadblocks[ldrv_num%0x80] +=
803 mbox->m_out.numsectors;
804 } else {
805 adapter->nwrites[ldrv_num%0x80]++;
806 adapter->nwriteblocks[ldrv_num%0x80] +=
807 mbox->m_out.numsectors;
808 }
809 #endif
810 }
811
812 /*
813 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
814 */
815 if( cmd->cmd_len == 12 ) {
816 mbox->m_out.lba =
817 ((u32)cmd->cmnd[2] << 24) |
818 ((u32)cmd->cmnd[3] << 16) |
819 ((u32)cmd->cmnd[4] << 8) |
820 (u32)cmd->cmnd[5];
821
822 mbox->m_out.numsectors =
823 ((u32)cmd->cmnd[6] << 24) |
824 ((u32)cmd->cmnd[7] << 16) |
825 ((u32)cmd->cmnd[8] << 8) |
826 (u32)cmd->cmnd[9];
827
828 #if MEGA_HAVE_STATS
829 if (*cmd->cmnd == READ_12) {
830 adapter->nreads[ldrv_num%0x80]++;
831 adapter->nreadblocks[ldrv_num%0x80] +=
832 mbox->m_out.numsectors;
833 } else {
834 adapter->nwrites[ldrv_num%0x80]++;
835 adapter->nwriteblocks[ldrv_num%0x80] +=
836 mbox->m_out.numsectors;
837 }
838 #endif
839 }
840
841 /*
842 * If it is a read command
843 */
844 if( (*cmd->cmnd & 0x0F) == 0x08 ) {
845 scb->dma_direction = DMA_FROM_DEVICE;
846 }
847 else {
848 scb->dma_direction = DMA_TO_DEVICE;
849 }
850
851 /* Calculate Scatter-Gather info */
852 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
853 (u32 *)&mbox->m_out.xferaddr, &seg);
854
855 return scb;
856
857 #if MEGA_HAVE_CLUSTERING
858 case RESERVE:
859 case RELEASE:
860
861 /*
862 * Do we support clustering and is the support enabled
863 */
864 if( ! adapter->has_cluster ) {
865
866 cmd->result = (DID_BAD_TARGET << 16);
867 scsi_done(cmd);
868 return NULL;
869 }
870
871 /* Allocate a SCB and initialize mailbox */
872 if(!(scb = mega_allocate_scb(adapter, cmd))) {
873 *busy = 1;
874 return NULL;
875 }
876
877 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
878 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
879 MEGA_RESERVE_LD : MEGA_RELEASE_LD;
880
881 scb->raw_mbox[3] = ldrv_num;
882
883 scb->dma_direction = DMA_NONE;
884
885 return scb;
886 #endif
887
888 default:
889 cmd->result = (DID_BAD_TARGET << 16);
890 scsi_done(cmd);
891 return NULL;
892 }
893 }
894
895 /*
896 * Passthru drive commands
897 */
898 else {
899 /* Allocate a SCB and initialize passthru */
900 if(!(scb = mega_allocate_scb(adapter, cmd))) {
901 *busy = 1;
902 return NULL;
903 }
904
905 mbox = (mbox_t *)scb->raw_mbox;
906 memset(mbox, 0, sizeof(scb->raw_mbox));
907
908 if( adapter->support_ext_cdb ) {
909
910 mega_prepare_extpassthru(adapter, scb, cmd,
911 channel, target);
912
913 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
914
915 mbox->m_out.xferaddr = scb->epthru_dma_addr;
916
917 }
918 else {
919
920 pthru = mega_prepare_passthru(adapter, scb, cmd,
921 channel, target);
922
923 /* Initialize mailbox */
924 if( adapter->has_64bit_addr ) {
925 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
926 }
927 else {
928 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
929 }
930
931 mbox->m_out.xferaddr = scb->pthru_dma_addr;
932
933 }
934 return scb;
935 }
936 return NULL;
937 }
938
939
940 /**
941 * mega_prepare_passthru()
942 * @adapter: pointer to our soft state
943 * @scb: our scsi control block
944 * @cmd: scsi command from the mid-layer
945 * @channel: actual channel on the controller
946 * @target: actual id on the controller.
947 *
948 * prepare a command for the scsi physical devices.
949 */
950 static mega_passthru *
mega_prepare_passthru(adapter_t * adapter,scb_t * scb,struct scsi_cmnd * cmd,int channel,int target)951 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
952 int channel, int target)
953 {
954 mega_passthru *pthru;
955
956 pthru = scb->pthru;
957 memset(pthru, 0, sizeof (mega_passthru));
958
959 /* 0=6sec/1=60sec/2=10min/3=3hrs */
960 pthru->timeout = 2;
961
962 pthru->ars = 1;
963 pthru->reqsenselen = 14;
964 pthru->islogical = 0;
965
966 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
967
968 pthru->target = (adapter->flag & BOARD_40LD) ?
969 (channel << 4) | target : target;
970
971 pthru->cdblen = cmd->cmd_len;
972 pthru->logdrv = cmd->device->lun;
973
974 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
975
976 /* Not sure about the direction */
977 scb->dma_direction = DMA_BIDIRECTIONAL;
978
979 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
980 switch (cmd->cmnd[0]) {
981 case INQUIRY:
982 case READ_CAPACITY:
983 if(!(adapter->flag & (1L << cmd->device->channel))) {
984
985 dev_notice(&adapter->dev->dev,
986 "scsi%d: scanning scsi channel %d [P%d] "
987 "for physical devices\n",
988 adapter->host->host_no,
989 cmd->device->channel, channel);
990
991 adapter->flag |= (1L << cmd->device->channel);
992 }
993 fallthrough;
994 default:
995 pthru->numsgelements = mega_build_sglist(adapter, scb,
996 &pthru->dataxferaddr, &pthru->dataxferlen);
997 break;
998 }
999 return pthru;
1000 }
1001
1002
1003 /**
1004 * mega_prepare_extpassthru()
1005 * @adapter: pointer to our soft state
1006 * @scb: our scsi control block
1007 * @cmd: scsi command from the mid-layer
1008 * @channel: actual channel on the controller
1009 * @target: actual id on the controller.
1010 *
1011 * prepare a command for the scsi physical devices. This rountine prepares
1012 * commands for devices which can take extended CDBs (>10 bytes)
1013 */
1014 static mega_ext_passthru *
mega_prepare_extpassthru(adapter_t * adapter,scb_t * scb,struct scsi_cmnd * cmd,int channel,int target)1015 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
1016 struct scsi_cmnd *cmd,
1017 int channel, int target)
1018 {
1019 mega_ext_passthru *epthru;
1020
1021 epthru = scb->epthru;
1022 memset(epthru, 0, sizeof(mega_ext_passthru));
1023
1024 /* 0=6sec/1=60sec/2=10min/3=3hrs */
1025 epthru->timeout = 2;
1026
1027 epthru->ars = 1;
1028 epthru->reqsenselen = 14;
1029 epthru->islogical = 0;
1030
1031 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
1032 epthru->target = (adapter->flag & BOARD_40LD) ?
1033 (channel << 4) | target : target;
1034
1035 epthru->cdblen = cmd->cmd_len;
1036 epthru->logdrv = cmd->device->lun;
1037
1038 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
1039
1040 /* Not sure about the direction */
1041 scb->dma_direction = DMA_BIDIRECTIONAL;
1042
1043 switch(cmd->cmnd[0]) {
1044 case INQUIRY:
1045 case READ_CAPACITY:
1046 if(!(adapter->flag & (1L << cmd->device->channel))) {
1047
1048 dev_notice(&adapter->dev->dev,
1049 "scsi%d: scanning scsi channel %d [P%d] "
1050 "for physical devices\n",
1051 adapter->host->host_no,
1052 cmd->device->channel, channel);
1053
1054 adapter->flag |= (1L << cmd->device->channel);
1055 }
1056 fallthrough;
1057 default:
1058 epthru->numsgelements = mega_build_sglist(adapter, scb,
1059 &epthru->dataxferaddr, &epthru->dataxferlen);
1060 break;
1061 }
1062
1063 return epthru;
1064 }
1065
1066 static void
__mega_runpendq(adapter_t * adapter)1067 __mega_runpendq(adapter_t *adapter)
1068 {
1069 scb_t *scb;
1070 struct list_head *pos, *next;
1071
1072 /* Issue any pending commands to the card */
1073 list_for_each_safe(pos, next, &adapter->pending_list) {
1074
1075 scb = list_entry(pos, scb_t, list);
1076
1077 if( !(scb->state & SCB_ISSUED) ) {
1078
1079 if( issue_scb(adapter, scb) != 0 )
1080 return;
1081 }
1082 }
1083
1084 return;
1085 }
1086
1087
1088 /**
1089 * issue_scb()
1090 * @adapter: pointer to our soft state
1091 * @scb: scsi control block
1092 *
1093 * Post a command to the card if the mailbox is available, otherwise return
1094 * busy. We also take the scb from the pending list if the mailbox is
1095 * available.
1096 */
1097 static int
issue_scb(adapter_t * adapter,scb_t * scb)1098 issue_scb(adapter_t *adapter, scb_t *scb)
1099 {
1100 volatile mbox64_t *mbox64 = adapter->mbox64;
1101 volatile mbox_t *mbox = adapter->mbox;
1102 unsigned int i = 0;
1103
1104 if(unlikely(mbox->m_in.busy)) {
1105 do {
1106 udelay(1);
1107 i++;
1108 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
1109
1110 if(mbox->m_in.busy) return -1;
1111 }
1112
1113 /* Copy mailbox data into host structure */
1114 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
1115 sizeof(struct mbox_out));
1116
1117 mbox->m_out.cmdid = scb->idx; /* Set cmdid */
1118 mbox->m_in.busy = 1; /* Set busy */
1119
1120
1121 /*
1122 * Increment the pending queue counter
1123 */
1124 atomic_inc(&adapter->pend_cmds);
1125
1126 switch (mbox->m_out.cmd) {
1127 case MEGA_MBOXCMD_LREAD64:
1128 case MEGA_MBOXCMD_LWRITE64:
1129 case MEGA_MBOXCMD_PASSTHRU64:
1130 case MEGA_MBOXCMD_EXTPTHRU:
1131 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1132 mbox64->xfer_segment_hi = 0;
1133 mbox->m_out.xferaddr = 0xFFFFFFFF;
1134 break;
1135 default:
1136 mbox64->xfer_segment_lo = 0;
1137 mbox64->xfer_segment_hi = 0;
1138 }
1139
1140 /*
1141 * post the command
1142 */
1143 scb->state |= SCB_ISSUED;
1144
1145 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1146 mbox->m_in.poll = 0;
1147 mbox->m_in.ack = 0;
1148 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1149 }
1150 else {
1151 irq_enable(adapter);
1152 issue_command(adapter);
1153 }
1154
1155 return 0;
1156 }
1157
1158 /*
1159 * Wait until the controller's mailbox is available
1160 */
1161 static inline int
mega_busywait_mbox(adapter_t * adapter)1162 mega_busywait_mbox (adapter_t *adapter)
1163 {
1164 if (adapter->mbox->m_in.busy)
1165 return __mega_busywait_mbox(adapter);
1166 return 0;
1167 }
1168
1169 /**
1170 * issue_scb_block()
1171 * @adapter: pointer to our soft state
1172 * @raw_mbox: the mailbox
1173 *
1174 * Issue a scb in synchronous and non-interrupt mode
1175 */
1176 static int
issue_scb_block(adapter_t * adapter,u_char * raw_mbox)1177 issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
1178 {
1179 volatile mbox64_t *mbox64 = adapter->mbox64;
1180 volatile mbox_t *mbox = adapter->mbox;
1181 u8 byte;
1182
1183 /* Wait until mailbox is free */
1184 if(mega_busywait_mbox (adapter))
1185 goto bug_blocked_mailbox;
1186
1187 /* Copy mailbox data into host structure */
1188 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
1189 mbox->m_out.cmdid = 0xFE;
1190 mbox->m_in.busy = 1;
1191
1192 switch (raw_mbox[0]) {
1193 case MEGA_MBOXCMD_LREAD64:
1194 case MEGA_MBOXCMD_LWRITE64:
1195 case MEGA_MBOXCMD_PASSTHRU64:
1196 case MEGA_MBOXCMD_EXTPTHRU:
1197 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1198 mbox64->xfer_segment_hi = 0;
1199 mbox->m_out.xferaddr = 0xFFFFFFFF;
1200 break;
1201 default:
1202 mbox64->xfer_segment_lo = 0;
1203 mbox64->xfer_segment_hi = 0;
1204 }
1205
1206 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1207 mbox->m_in.poll = 0;
1208 mbox->m_in.ack = 0;
1209 mbox->m_in.numstatus = 0xFF;
1210 mbox->m_in.status = 0xFF;
1211 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1212
1213 while((volatile u8)mbox->m_in.numstatus == 0xFF)
1214 cpu_relax();
1215
1216 mbox->m_in.numstatus = 0xFF;
1217
1218 while( (volatile u8)mbox->m_in.poll != 0x77 )
1219 cpu_relax();
1220
1221 mbox->m_in.poll = 0;
1222 mbox->m_in.ack = 0x77;
1223
1224 WRINDOOR(adapter, adapter->mbox_dma | 0x2);
1225
1226 while(RDINDOOR(adapter) & 0x2)
1227 cpu_relax();
1228 }
1229 else {
1230 irq_disable(adapter);
1231 issue_command(adapter);
1232
1233 while (!((byte = irq_state(adapter)) & INTR_VALID))
1234 cpu_relax();
1235
1236 set_irq_state(adapter, byte);
1237 irq_enable(adapter);
1238 irq_ack(adapter);
1239 }
1240
1241 return mbox->m_in.status;
1242
1243 bug_blocked_mailbox:
1244 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
1245 udelay (1000);
1246 return -1;
1247 }
1248
1249
1250 /**
1251 * megaraid_isr_iomapped()
1252 * @irq: irq
1253 * @devp: pointer to our soft state
1254 *
1255 * Interrupt service routine for io-mapped controllers.
1256 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1257 * and service the completed commands.
1258 */
1259 static irqreturn_t
megaraid_isr_iomapped(int irq,void * devp)1260 megaraid_isr_iomapped(int irq, void *devp)
1261 {
1262 adapter_t *adapter = devp;
1263 unsigned long flags;
1264 u8 status;
1265 u8 nstatus;
1266 u8 completed[MAX_FIRMWARE_STATUS];
1267 u8 byte;
1268 int handled = 0;
1269
1270
1271 /*
1272 * loop till F/W has more commands for us to complete.
1273 */
1274 spin_lock_irqsave(&adapter->lock, flags);
1275
1276 do {
1277 /* Check if a valid interrupt is pending */
1278 byte = irq_state(adapter);
1279 if( (byte & VALID_INTR_BYTE) == 0 ) {
1280 /*
1281 * No more pending commands
1282 */
1283 goto out_unlock;
1284 }
1285 set_irq_state(adapter, byte);
1286
1287 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1288 == 0xFF)
1289 cpu_relax();
1290 adapter->mbox->m_in.numstatus = 0xFF;
1291
1292 status = adapter->mbox->m_in.status;
1293
1294 /*
1295 * decrement the pending queue counter
1296 */
1297 atomic_sub(nstatus, &adapter->pend_cmds);
1298
1299 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1300 nstatus);
1301
1302 /* Acknowledge interrupt */
1303 irq_ack(adapter);
1304
1305 mega_cmd_done(adapter, completed, nstatus, status);
1306
1307 mega_rundoneq(adapter);
1308
1309 handled = 1;
1310
1311 /* Loop through any pending requests */
1312 if(atomic_read(&adapter->quiescent) == 0) {
1313 mega_runpendq(adapter);
1314 }
1315
1316 } while(1);
1317
1318 out_unlock:
1319
1320 spin_unlock_irqrestore(&adapter->lock, flags);
1321
1322 return IRQ_RETVAL(handled);
1323 }
1324
1325
1326 /**
1327 * megaraid_isr_memmapped()
1328 * @irq: irq
1329 * @devp: pointer to our soft state
1330 *
1331 * Interrupt service routine for memory-mapped controllers.
1332 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1333 * and service the completed commands.
1334 */
1335 static irqreturn_t
megaraid_isr_memmapped(int irq,void * devp)1336 megaraid_isr_memmapped(int irq, void *devp)
1337 {
1338 adapter_t *adapter = devp;
1339 unsigned long flags;
1340 u8 status;
1341 u32 dword = 0;
1342 u8 nstatus;
1343 u8 completed[MAX_FIRMWARE_STATUS];
1344 int handled = 0;
1345
1346
1347 /*
1348 * loop till F/W has more commands for us to complete.
1349 */
1350 spin_lock_irqsave(&adapter->lock, flags);
1351
1352 do {
1353 /* Check if a valid interrupt is pending */
1354 dword = RDOUTDOOR(adapter);
1355 if(dword != 0x10001234) {
1356 /*
1357 * No more pending commands
1358 */
1359 goto out_unlock;
1360 }
1361 WROUTDOOR(adapter, 0x10001234);
1362
1363 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1364 == 0xFF) {
1365 cpu_relax();
1366 }
1367 adapter->mbox->m_in.numstatus = 0xFF;
1368
1369 status = adapter->mbox->m_in.status;
1370
1371 /*
1372 * decrement the pending queue counter
1373 */
1374 atomic_sub(nstatus, &adapter->pend_cmds);
1375
1376 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1377 nstatus);
1378
1379 /* Acknowledge interrupt */
1380 WRINDOOR(adapter, 0x2);
1381
1382 handled = 1;
1383
1384 while( RDINDOOR(adapter) & 0x02 )
1385 cpu_relax();
1386
1387 mega_cmd_done(adapter, completed, nstatus, status);
1388
1389 mega_rundoneq(adapter);
1390
1391 /* Loop through any pending requests */
1392 if(atomic_read(&adapter->quiescent) == 0) {
1393 mega_runpendq(adapter);
1394 }
1395
1396 } while(1);
1397
1398 out_unlock:
1399
1400 spin_unlock_irqrestore(&adapter->lock, flags);
1401
1402 return IRQ_RETVAL(handled);
1403 }
1404 /**
1405 * mega_cmd_done()
1406 * @adapter: pointer to our soft state
1407 * @completed: array of ids of completed commands
1408 * @nstatus: number of completed commands
1409 * @status: status of the last command completed
1410 *
1411 * Complete the commands and call the scsi mid-layer callback hooks.
1412 */
1413 static void
mega_cmd_done(adapter_t * adapter,u8 completed[],int nstatus,int status)1414 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1415 {
1416 mega_ext_passthru *epthru = NULL;
1417 struct scatterlist *sgl;
1418 struct scsi_cmnd *cmd = NULL;
1419 mega_passthru *pthru = NULL;
1420 mbox_t *mbox = NULL;
1421 u8 c;
1422 scb_t *scb;
1423 int islogical;
1424 int cmdid;
1425 int i;
1426
1427 /*
1428 * for all the commands completed, call the mid-layer callback routine
1429 * and free the scb.
1430 */
1431 for( i = 0; i < nstatus; i++ ) {
1432
1433 cmdid = completed[i];
1434
1435 /*
1436 * Only free SCBs for the commands coming down from the
1437 * mid-layer, not for which were issued internally
1438 *
1439 * For internal command, restore the status returned by the
1440 * firmware so that user can interpret it.
1441 */
1442 if (cmdid == CMDID_INT_CMDS) {
1443 scb = &adapter->int_scb;
1444 cmd = scb->cmd;
1445
1446 list_del_init(&scb->list);
1447 scb->state = SCB_FREE;
1448
1449 adapter->int_status = status;
1450 complete(&adapter->int_waitq);
1451 } else {
1452 scb = &adapter->scb_list[cmdid];
1453
1454 /*
1455 * Make sure f/w has completed a valid command
1456 */
1457 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
1458 dev_crit(&adapter->dev->dev, "invalid command "
1459 "Id %d, scb->state:%x, scsi cmd:%p\n",
1460 cmdid, scb->state, scb->cmd);
1461
1462 continue;
1463 }
1464
1465 /*
1466 * Was a abort issued for this command
1467 */
1468 if( scb->state & SCB_ABORT ) {
1469
1470 dev_warn(&adapter->dev->dev,
1471 "aborted cmd [%x] complete\n",
1472 scb->idx);
1473
1474 scb->cmd->result = (DID_ABORT << 16);
1475
1476 list_add_tail(SCSI_LIST(scb->cmd),
1477 &adapter->completed_list);
1478
1479 mega_free_scb(adapter, scb);
1480
1481 continue;
1482 }
1483
1484 /*
1485 * Was a reset issued for this command
1486 */
1487 if( scb->state & SCB_RESET ) {
1488
1489 dev_warn(&adapter->dev->dev,
1490 "reset cmd [%x] complete\n",
1491 scb->idx);
1492
1493 scb->cmd->result = (DID_RESET << 16);
1494
1495 list_add_tail(SCSI_LIST(scb->cmd),
1496 &adapter->completed_list);
1497
1498 mega_free_scb (adapter, scb);
1499
1500 continue;
1501 }
1502
1503 cmd = scb->cmd;
1504 pthru = scb->pthru;
1505 epthru = scb->epthru;
1506 mbox = (mbox_t *)scb->raw_mbox;
1507
1508 #if MEGA_HAVE_STATS
1509 {
1510
1511 int logdrv = mbox->m_out.logdrv;
1512
1513 islogical = adapter->logdrv_chan[cmd->channel];
1514 /*
1515 * Maintain an error counter for the logical drive.
1516 * Some application like SNMP agent need such
1517 * statistics
1518 */
1519 if( status && islogical && (cmd->cmnd[0] == READ_6 ||
1520 cmd->cmnd[0] == READ_10 ||
1521 cmd->cmnd[0] == READ_12)) {
1522 /*
1523 * Logical drive number increases by 0x80 when
1524 * a logical drive is deleted
1525 */
1526 adapter->rd_errors[logdrv%0x80]++;
1527 }
1528
1529 if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
1530 cmd->cmnd[0] == WRITE_10 ||
1531 cmd->cmnd[0] == WRITE_12)) {
1532 /*
1533 * Logical drive number increases by 0x80 when
1534 * a logical drive is deleted
1535 */
1536 adapter->wr_errors[logdrv%0x80]++;
1537 }
1538
1539 }
1540 #endif
1541 }
1542
1543 /*
1544 * Do not return the presence of hard disk on the channel so,
1545 * inquiry sent, and returned data==hard disk or removable
1546 * hard disk and not logical, request should return failure! -
1547 * PJ
1548 */
1549 islogical = adapter->logdrv_chan[cmd->device->channel];
1550 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1551
1552 sgl = scsi_sglist(cmd);
1553 if( sg_page(sgl) ) {
1554 c = *(unsigned char *) sg_virt(&sgl[0]);
1555 } else {
1556 dev_warn(&adapter->dev->dev, "invalid sg\n");
1557 c = 0;
1558 }
1559
1560 if(IS_RAID_CH(adapter, cmd->device->channel) &&
1561 ((c & 0x1F ) == TYPE_DISK)) {
1562 status = 0xF0;
1563 }
1564 }
1565
1566 /* clear result; otherwise, success returns corrupt value */
1567 cmd->result = 0;
1568
1569 /* Convert MegaRAID status to Linux error code */
1570 switch (status) {
1571 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
1572 cmd->result |= (DID_OK << 16);
1573 break;
1574
1575 case 0x02: /* ERROR_ABORTED, i.e.
1576 SCSI_STATUS_CHECK_CONDITION */
1577
1578 /* set sense_buffer and result fields */
1579 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
1580 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
1581
1582 memcpy(cmd->sense_buffer, pthru->reqsensearea,
1583 14);
1584
1585 cmd->result = SAM_STAT_CHECK_CONDITION;
1586 }
1587 else {
1588 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
1589
1590 memcpy(cmd->sense_buffer,
1591 epthru->reqsensearea, 14);
1592
1593 cmd->result = SAM_STAT_CHECK_CONDITION;
1594 } else
1595 scsi_build_sense(cmd, 0,
1596 ABORTED_COMMAND, 0, 0);
1597 }
1598 break;
1599
1600 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
1601 SCSI_STATUS_BUSY */
1602 cmd->result |= (DID_BUS_BUSY << 16) | status;
1603 break;
1604
1605 default:
1606 #if MEGA_HAVE_CLUSTERING
1607 /*
1608 * If TEST_UNIT_READY fails, we know
1609 * MEGA_RESERVATION_STATUS failed
1610 */
1611 if( cmd->cmnd[0] == TEST_UNIT_READY ) {
1612 cmd->result |= (DID_ERROR << 16) |
1613 SAM_STAT_RESERVATION_CONFLICT;
1614 }
1615 else
1616 /*
1617 * Error code returned is 1 if Reserve or Release
1618 * failed or the input parameter is invalid
1619 */
1620 if( status == 1 &&
1621 (cmd->cmnd[0] == RESERVE ||
1622 cmd->cmnd[0] == RELEASE) ) {
1623
1624 cmd->result |= (DID_ERROR << 16) |
1625 SAM_STAT_RESERVATION_CONFLICT;
1626 }
1627 else
1628 #endif
1629 cmd->result |= (DID_BAD_TARGET << 16)|status;
1630 }
1631
1632 mega_free_scb(adapter, scb);
1633
1634 /* Add Scsi_Command to end of completed queue */
1635 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
1636 }
1637 }
1638
1639
1640 /*
1641 * mega_runpendq()
1642 *
1643 * Run through the list of completed requests and finish it
1644 */
1645 static void
mega_rundoneq(adapter_t * adapter)1646 mega_rundoneq (adapter_t *adapter)
1647 {
1648 struct megaraid_cmd_priv *cmd_priv;
1649
1650 list_for_each_entry(cmd_priv, &adapter->completed_list, entry)
1651 scsi_done(megaraid_to_scsi_cmd(cmd_priv));
1652
1653 INIT_LIST_HEAD(&adapter->completed_list);
1654 }
1655
1656
1657 /*
1658 * Free a SCB structure
1659 * Note: We assume the scsi commands associated with this scb is not free yet.
1660 */
1661 static void
mega_free_scb(adapter_t * adapter,scb_t * scb)1662 mega_free_scb(adapter_t *adapter, scb_t *scb)
1663 {
1664 switch( scb->dma_type ) {
1665
1666 case MEGA_DMA_TYPE_NONE:
1667 break;
1668
1669 case MEGA_SGLIST:
1670 scsi_dma_unmap(scb->cmd);
1671 break;
1672 default:
1673 break;
1674 }
1675
1676 /*
1677 * Remove from the pending list
1678 */
1679 list_del_init(&scb->list);
1680
1681 /* Link the scb back into free list */
1682 scb->state = SCB_FREE;
1683 scb->cmd = NULL;
1684
1685 list_add(&scb->list, &adapter->free_list);
1686 }
1687
1688
1689 static int
__mega_busywait_mbox(adapter_t * adapter)1690 __mega_busywait_mbox (adapter_t *adapter)
1691 {
1692 volatile mbox_t *mbox = adapter->mbox;
1693 long counter;
1694
1695 for (counter = 0; counter < 10000; counter++) {
1696 if (!mbox->m_in.busy)
1697 return 0;
1698 udelay(100);
1699 cond_resched();
1700 }
1701 return -1; /* give up after 1 second */
1702 }
1703
1704 /*
1705 * Copies data to SGLIST
1706 * Note: For 64 bit cards, we need a minimum of one SG element for read/write
1707 */
1708 static int
mega_build_sglist(adapter_t * adapter,scb_t * scb,u32 * buf,u32 * len)1709 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1710 {
1711 struct scatterlist *sg;
1712 struct scsi_cmnd *cmd;
1713 int sgcnt;
1714 int idx;
1715
1716 cmd = scb->cmd;
1717
1718 /*
1719 * Copy Scatter-Gather list info into controller structure.
1720 *
1721 * The number of sg elements returned must not exceed our limit
1722 */
1723 sgcnt = scsi_dma_map(cmd);
1724
1725 scb->dma_type = MEGA_SGLIST;
1726
1727 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
1728
1729 *len = 0;
1730
1731 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
1732 sg = scsi_sglist(cmd);
1733 scb->dma_h_bulkdata = sg_dma_address(sg);
1734 *buf = (u32)scb->dma_h_bulkdata;
1735 *len = sg_dma_len(sg);
1736 return 0;
1737 }
1738
1739 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1740 if (adapter->has_64bit_addr) {
1741 scb->sgl64[idx].address = sg_dma_address(sg);
1742 *len += scb->sgl64[idx].length = sg_dma_len(sg);
1743 } else {
1744 scb->sgl[idx].address = sg_dma_address(sg);
1745 *len += scb->sgl[idx].length = sg_dma_len(sg);
1746 }
1747 }
1748
1749 /* Reset pointer and length fields */
1750 *buf = scb->sgl_dma_addr;
1751
1752 /* Return count of SG requests */
1753 return sgcnt;
1754 }
1755
1756
1757 /*
1758 * mega_8_to_40ld()
1759 *
1760 * takes all info in AdapterInquiry structure and puts it into ProductInfo and
1761 * Enquiry3 structures for later use
1762 */
1763 static void
mega_8_to_40ld(mraid_inquiry * inquiry,mega_inquiry3 * enquiry3,mega_product_info * product_info)1764 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
1765 mega_product_info *product_info)
1766 {
1767 int i;
1768
1769 product_info->max_commands = inquiry->adapter_info.max_commands;
1770 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
1771 product_info->nchannels = inquiry->adapter_info.nchannels;
1772
1773 for (i = 0; i < 4; i++) {
1774 product_info->fw_version[i] =
1775 inquiry->adapter_info.fw_version[i];
1776
1777 product_info->bios_version[i] =
1778 inquiry->adapter_info.bios_version[i];
1779 }
1780 enquiry3->cache_flush_interval =
1781 inquiry->adapter_info.cache_flush_interval;
1782
1783 product_info->dram_size = inquiry->adapter_info.dram_size;
1784
1785 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
1786
1787 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
1788 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
1789 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
1790 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
1791 }
1792
1793 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
1794 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
1795 }
1796
1797 static inline void
mega_free_sgl(adapter_t * adapter)1798 mega_free_sgl(adapter_t *adapter)
1799 {
1800 scb_t *scb;
1801 int i;
1802
1803 for(i = 0; i < adapter->max_cmds; i++) {
1804
1805 scb = &adapter->scb_list[i];
1806
1807 if( scb->sgl64 ) {
1808 dma_free_coherent(&adapter->dev->dev,
1809 sizeof(mega_sgl64) * adapter->sglen,
1810 scb->sgl64, scb->sgl_dma_addr);
1811
1812 scb->sgl64 = NULL;
1813 }
1814
1815 if( scb->pthru ) {
1816 dma_free_coherent(&adapter->dev->dev,
1817 sizeof(mega_passthru), scb->pthru,
1818 scb->pthru_dma_addr);
1819
1820 scb->pthru = NULL;
1821 }
1822
1823 if( scb->epthru ) {
1824 dma_free_coherent(&adapter->dev->dev,
1825 sizeof(mega_ext_passthru),
1826 scb->epthru, scb->epthru_dma_addr);
1827
1828 scb->epthru = NULL;
1829 }
1830
1831 }
1832 }
1833
1834
1835 /*
1836 * Get information about the card/driver
1837 */
1838 const char *
megaraid_info(struct Scsi_Host * host)1839 megaraid_info(struct Scsi_Host *host)
1840 {
1841 static char buffer[512];
1842 adapter_t *adapter;
1843
1844 adapter = (adapter_t *)host->hostdata;
1845
1846 sprintf (buffer,
1847 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
1848 adapter->fw_version, adapter->product_info.max_commands,
1849 adapter->host->max_id, adapter->host->max_channel,
1850 (u32)adapter->host->max_lun);
1851 return buffer;
1852 }
1853
1854 /*
1855 * Abort a previous SCSI request. Only commands on the pending list can be
1856 * aborted. All the commands issued to the F/W must complete.
1857 */
1858 static int
megaraid_abort(struct scsi_cmnd * cmd)1859 megaraid_abort(struct scsi_cmnd *cmd)
1860 {
1861 adapter_t *adapter;
1862 int rval;
1863
1864 adapter = (adapter_t *)cmd->device->host->hostdata;
1865
1866 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
1867
1868 /*
1869 * This is required here to complete any completed requests
1870 * to be communicated over to the mid layer.
1871 */
1872 mega_rundoneq(adapter);
1873
1874 return rval;
1875 }
1876
1877
1878 static int
megaraid_reset(struct scsi_cmnd * cmd)1879 megaraid_reset(struct scsi_cmnd *cmd)
1880 {
1881 adapter_t *adapter;
1882 megacmd_t mc;
1883 int rval;
1884
1885 adapter = (adapter_t *)cmd->device->host->hostdata;
1886
1887 #if MEGA_HAVE_CLUSTERING
1888 mc.cmd = MEGA_CLUSTER_CMD;
1889 mc.opcode = MEGA_RESET_RESERVATIONS;
1890
1891 if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
1892 dev_warn(&adapter->dev->dev, "reservation reset failed\n");
1893 }
1894 else {
1895 dev_info(&adapter->dev->dev, "reservation reset\n");
1896 }
1897 #endif
1898
1899 spin_lock_irq(&adapter->lock);
1900
1901 rval = megaraid_abort_and_reset(adapter, NULL, SCB_RESET);
1902
1903 /*
1904 * This is required here to complete any completed requests
1905 * to be communicated over to the mid layer.
1906 */
1907 mega_rundoneq(adapter);
1908 spin_unlock_irq(&adapter->lock);
1909
1910 return rval;
1911 }
1912
1913 /**
1914 * megaraid_abort_and_reset()
1915 * @adapter: megaraid soft state
1916 * @cmd: scsi command to be aborted or reset
1917 * @aor: abort or reset flag
1918 *
1919 * Try to locate the scsi command in the pending queue. If found and is not
1920 * issued to the controller, abort/reset it. Otherwise return failure
1921 */
1922 static int
megaraid_abort_and_reset(adapter_t * adapter,struct scsi_cmnd * cmd,int aor)1923 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
1924 {
1925 struct list_head *pos, *next;
1926 scb_t *scb;
1927
1928 if (aor == SCB_ABORT)
1929 dev_warn(&adapter->dev->dev,
1930 "ABORTING cmd=%x <c=%d t=%d l=%d>\n",
1931 cmd->cmnd[0], cmd->device->channel,
1932 cmd->device->id, (u32)cmd->device->lun);
1933 else
1934 dev_warn(&adapter->dev->dev, "RESETTING\n");
1935
1936 if(list_empty(&adapter->pending_list))
1937 return FAILED;
1938
1939 list_for_each_safe(pos, next, &adapter->pending_list) {
1940
1941 scb = list_entry(pos, scb_t, list);
1942
1943 if (!cmd || scb->cmd == cmd) { /* Found command */
1944
1945 scb->state |= aor;
1946
1947 /*
1948 * Check if this command has firmware ownership. If
1949 * yes, we cannot reset this command. Whenever f/w
1950 * completes this command, we will return appropriate
1951 * status from ISR.
1952 */
1953 if( scb->state & SCB_ISSUED ) {
1954
1955 dev_warn(&adapter->dev->dev,
1956 "%s[%x], fw owner\n",
1957 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1958 scb->idx);
1959
1960 return FAILED;
1961 }
1962 /*
1963 * Not yet issued! Remove from the pending
1964 * list
1965 */
1966 dev_warn(&adapter->dev->dev,
1967 "%s-[%x], driver owner\n",
1968 (cmd) ? "ABORTING":"RESET",
1969 scb->idx);
1970 mega_free_scb(adapter, scb);
1971
1972 if (cmd) {
1973 cmd->result = (DID_ABORT << 16);
1974 list_add_tail(SCSI_LIST(cmd),
1975 &adapter->completed_list);
1976 }
1977
1978 return SUCCESS;
1979 }
1980 }
1981
1982 return FAILED;
1983 }
1984
1985 static inline int
make_local_pdev(adapter_t * adapter,struct pci_dev ** pdev)1986 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
1987 {
1988 *pdev = pci_alloc_dev(NULL);
1989
1990 if( *pdev == NULL ) return -1;
1991
1992 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
1993
1994 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
1995 kfree(*pdev);
1996 return -1;
1997 }
1998
1999 return 0;
2000 }
2001
2002 static inline void
free_local_pdev(struct pci_dev * pdev)2003 free_local_pdev(struct pci_dev *pdev)
2004 {
2005 kfree(pdev);
2006 }
2007
2008 /**
2009 * mega_allocate_inquiry()
2010 * @dma_handle: handle returned for dma address
2011 * @pdev: handle to pci device
2012 *
2013 * allocates memory for inquiry structure
2014 */
2015 static inline void *
mega_allocate_inquiry(dma_addr_t * dma_handle,struct pci_dev * pdev)2016 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
2017 {
2018 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
2019 dma_handle, GFP_KERNEL);
2020 }
2021
2022
2023 static inline void
mega_free_inquiry(void * inquiry,dma_addr_t dma_handle,struct pci_dev * pdev)2024 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
2025 {
2026 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
2027 dma_handle);
2028 }
2029
2030
2031 #ifdef CONFIG_PROC_FS
2032 /* Following code handles /proc fs */
2033
2034 /**
2035 * proc_show_config()
2036 * @m: Synthetic file construction data
2037 * @v: File iterator
2038 *
2039 * Display configuration information about the controller.
2040 */
2041 static int
proc_show_config(struct seq_file * m,void * v)2042 proc_show_config(struct seq_file *m, void *v)
2043 {
2044
2045 adapter_t *adapter = m->private;
2046
2047 seq_puts(m, MEGARAID_VERSION);
2048 if(adapter->product_info.product_name[0])
2049 seq_printf(m, "%s\n", adapter->product_info.product_name);
2050
2051 seq_puts(m, "Controller Type: ");
2052
2053 if( adapter->flag & BOARD_MEMMAP )
2054 seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
2055 else
2056 seq_puts(m, "418/428/434\n");
2057
2058 if(adapter->flag & BOARD_40LD)
2059 seq_puts(m, "Controller Supports 40 Logical Drives\n");
2060
2061 if(adapter->flag & BOARD_64BIT)
2062 seq_puts(m, "Controller capable of 64-bit memory addressing\n");
2063 if( adapter->has_64bit_addr )
2064 seq_puts(m, "Controller using 64-bit memory addressing\n");
2065 else
2066 seq_puts(m, "Controller is not using 64-bit memory addressing\n");
2067
2068 seq_printf(m, "Base = %08lx, Irq = %d, ",
2069 adapter->base, adapter->host->irq);
2070
2071 seq_printf(m, "Logical Drives = %d, Channels = %d\n",
2072 adapter->numldrv, adapter->product_info.nchannels);
2073
2074 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
2075 adapter->fw_version, adapter->bios_version,
2076 adapter->product_info.dram_size);
2077
2078 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
2079 adapter->product_info.max_commands, adapter->max_cmds);
2080
2081 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
2082 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
2083 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
2084 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
2085 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
2086 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
2087 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
2088 seq_printf(m, "quiescent = %d\n",
2089 atomic_read(&adapter->quiescent));
2090 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
2091
2092 seq_puts(m, "\nModule Parameters:\n");
2093 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
2094 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
2095 return 0;
2096 }
2097
2098 /**
2099 * proc_show_stat()
2100 * @m: Synthetic file construction data
2101 * @v: File iterator
2102 *
2103 * Display statistical information about the I/O activity.
2104 */
2105 static int
proc_show_stat(struct seq_file * m,void * v)2106 proc_show_stat(struct seq_file *m, void *v)
2107 {
2108 adapter_t *adapter = m->private;
2109 #if MEGA_HAVE_STATS
2110 int i;
2111 #endif
2112
2113 seq_puts(m, "Statistical Information for this controller\n");
2114 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
2115 #if MEGA_HAVE_STATS
2116 for(i = 0; i < adapter->numldrv; i++) {
2117 seq_printf(m, "Logical Drive %d:\n", i);
2118 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
2119 adapter->nreads[i], adapter->nwrites[i]);
2120 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
2121 adapter->nreadblocks[i], adapter->nwriteblocks[i]);
2122 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
2123 adapter->rd_errors[i], adapter->wr_errors[i]);
2124 }
2125 #else
2126 seq_puts(m, "IO and error counters not compiled in driver.\n");
2127 #endif
2128 return 0;
2129 }
2130
2131
2132 /**
2133 * proc_show_mbox()
2134 * @m: Synthetic file construction data
2135 * @v: File iterator
2136 *
2137 * Display mailbox information for the last command issued. This information
2138 * is good for debugging.
2139 */
2140 static int
proc_show_mbox(struct seq_file * m,void * v)2141 proc_show_mbox(struct seq_file *m, void *v)
2142 {
2143 adapter_t *adapter = m->private;
2144 volatile mbox_t *mbox = adapter->mbox;
2145
2146 seq_puts(m, "Contents of Mail Box Structure\n");
2147 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
2148 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
2149 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
2150 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
2151 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
2152 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
2153 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
2154 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
2155 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
2156 return 0;
2157 }
2158
2159
2160 /**
2161 * proc_show_rebuild_rate()
2162 * @m: Synthetic file construction data
2163 * @v: File iterator
2164 *
2165 * Display current rebuild rate
2166 */
2167 static int
proc_show_rebuild_rate(struct seq_file * m,void * v)2168 proc_show_rebuild_rate(struct seq_file *m, void *v)
2169 {
2170 adapter_t *adapter = m->private;
2171 dma_addr_t dma_handle;
2172 caddr_t inquiry;
2173 struct pci_dev *pdev;
2174
2175 if( make_local_pdev(adapter, &pdev) != 0 )
2176 return 0;
2177
2178 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2179 goto free_pdev;
2180
2181 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2182 seq_puts(m, "Adapter inquiry failed.\n");
2183 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2184 goto free_inquiry;
2185 }
2186
2187 if( adapter->flag & BOARD_40LD )
2188 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2189 ((mega_inquiry3 *)inquiry)->rebuild_rate);
2190 else
2191 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2192 ((mraid_ext_inquiry *)
2193 inquiry)->raid_inq.adapter_info.rebuild_rate);
2194
2195 free_inquiry:
2196 mega_free_inquiry(inquiry, dma_handle, pdev);
2197 free_pdev:
2198 free_local_pdev(pdev);
2199 return 0;
2200 }
2201
2202
2203 /**
2204 * proc_show_battery()
2205 * @m: Synthetic file construction data
2206 * @v: File iterator
2207 *
2208 * Display information about the battery module on the controller.
2209 */
2210 static int
proc_show_battery(struct seq_file * m,void * v)2211 proc_show_battery(struct seq_file *m, void *v)
2212 {
2213 adapter_t *adapter = m->private;
2214 dma_addr_t dma_handle;
2215 caddr_t inquiry;
2216 struct pci_dev *pdev;
2217 u8 battery_status;
2218
2219 if( make_local_pdev(adapter, &pdev) != 0 )
2220 return 0;
2221
2222 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2223 goto free_pdev;
2224
2225 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2226 seq_puts(m, "Adapter inquiry failed.\n");
2227 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2228 goto free_inquiry;
2229 }
2230
2231 if( adapter->flag & BOARD_40LD ) {
2232 battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
2233 }
2234 else {
2235 battery_status = ((mraid_ext_inquiry *)inquiry)->
2236 raid_inq.adapter_info.battery_status;
2237 }
2238
2239 /*
2240 * Decode the battery status
2241 */
2242 seq_printf(m, "Battery Status:[%d]", battery_status);
2243
2244 if(battery_status == MEGA_BATT_CHARGE_DONE)
2245 seq_puts(m, " Charge Done");
2246
2247 if(battery_status & MEGA_BATT_MODULE_MISSING)
2248 seq_puts(m, " Module Missing");
2249
2250 if(battery_status & MEGA_BATT_LOW_VOLTAGE)
2251 seq_puts(m, " Low Voltage");
2252
2253 if(battery_status & MEGA_BATT_TEMP_HIGH)
2254 seq_puts(m, " Temperature High");
2255
2256 if(battery_status & MEGA_BATT_PACK_MISSING)
2257 seq_puts(m, " Pack Missing");
2258
2259 if(battery_status & MEGA_BATT_CHARGE_INPROG)
2260 seq_puts(m, " Charge In-progress");
2261
2262 if(battery_status & MEGA_BATT_CHARGE_FAIL)
2263 seq_puts(m, " Charge Fail");
2264
2265 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
2266 seq_puts(m, " Cycles Exceeded");
2267
2268 seq_putc(m, '\n');
2269
2270 free_inquiry:
2271 mega_free_inquiry(inquiry, dma_handle, pdev);
2272 free_pdev:
2273 free_local_pdev(pdev);
2274 return 0;
2275 }
2276
2277
2278 /*
2279 * Display scsi inquiry
2280 */
2281 static void
mega_print_inquiry(struct seq_file * m,char * scsi_inq)2282 mega_print_inquiry(struct seq_file *m, char *scsi_inq)
2283 {
2284 int i;
2285
2286 seq_puts(m, " Vendor: ");
2287 seq_write(m, scsi_inq + 8, 8);
2288 seq_puts(m, " Model: ");
2289 seq_write(m, scsi_inq + 16, 16);
2290 seq_puts(m, " Rev: ");
2291 seq_write(m, scsi_inq + 32, 4);
2292 seq_putc(m, '\n');
2293
2294 i = scsi_inq[0] & 0x1f;
2295 seq_printf(m, " Type: %s ", scsi_device_type(i));
2296
2297 seq_printf(m, " ANSI SCSI revision: %02x",
2298 scsi_inq[2] & 0x07);
2299
2300 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
2301 seq_puts(m, " CCS\n");
2302 else
2303 seq_putc(m, '\n');
2304 }
2305
2306 /**
2307 * proc_show_pdrv()
2308 * @m: Synthetic file construction data
2309 * @adapter: pointer to our soft state
2310 * @channel: channel
2311 *
2312 * Display information about the physical drives.
2313 */
2314 static int
proc_show_pdrv(struct seq_file * m,adapter_t * adapter,int channel)2315 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
2316 {
2317 dma_addr_t dma_handle;
2318 char *scsi_inq;
2319 dma_addr_t scsi_inq_dma_handle;
2320 caddr_t inquiry;
2321 struct pci_dev *pdev;
2322 u8 *pdrv_state;
2323 u8 state;
2324 int tgt;
2325 int max_channels;
2326 int i;
2327
2328 if( make_local_pdev(adapter, &pdev) != 0 )
2329 return 0;
2330
2331 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2332 goto free_pdev;
2333
2334 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2335 seq_puts(m, "Adapter inquiry failed.\n");
2336 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2337 goto free_inquiry;
2338 }
2339
2340
2341 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
2342 GFP_KERNEL);
2343 if( scsi_inq == NULL ) {
2344 seq_puts(m, "memory not available for scsi inq.\n");
2345 goto free_inquiry;
2346 }
2347
2348 if( adapter->flag & BOARD_40LD ) {
2349 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
2350 }
2351 else {
2352 pdrv_state = ((mraid_ext_inquiry *)inquiry)->
2353 raid_inq.pdrv_info.pdrv_state;
2354 }
2355
2356 max_channels = adapter->product_info.nchannels;
2357
2358 if( channel >= max_channels ) {
2359 goto free_pci;
2360 }
2361
2362 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
2363
2364 i = channel*16 + tgt;
2365
2366 state = *(pdrv_state + i);
2367 switch( state & 0x0F ) {
2368 case PDRV_ONLINE:
2369 seq_printf(m, "Channel:%2d Id:%2d State: Online",
2370 channel, tgt);
2371 break;
2372
2373 case PDRV_FAILED:
2374 seq_printf(m, "Channel:%2d Id:%2d State: Failed",
2375 channel, tgt);
2376 break;
2377
2378 case PDRV_RBLD:
2379 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
2380 channel, tgt);
2381 break;
2382
2383 case PDRV_HOTSPARE:
2384 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
2385 channel, tgt);
2386 break;
2387
2388 default:
2389 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
2390 channel, tgt);
2391 break;
2392 }
2393
2394 /*
2395 * This interface displays inquiries for disk drives
2396 * only. Inquries for logical drives and non-disk
2397 * devices are available through /proc/scsi/scsi
2398 */
2399 memset(scsi_inq, 0, 256);
2400 if( mega_internal_dev_inquiry(adapter, channel, tgt,
2401 scsi_inq_dma_handle) ||
2402 (scsi_inq[0] & 0x1F) != TYPE_DISK ) {
2403 continue;
2404 }
2405
2406 /*
2407 * Check for overflow. We print less than 240
2408 * characters for inquiry
2409 */
2410 seq_puts(m, ".\n");
2411 mega_print_inquiry(m, scsi_inq);
2412 }
2413
2414 free_pci:
2415 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
2416 free_inquiry:
2417 mega_free_inquiry(inquiry, dma_handle, pdev);
2418 free_pdev:
2419 free_local_pdev(pdev);
2420 return 0;
2421 }
2422
2423 /**
2424 * proc_show_pdrv_ch0()
2425 * @m: Synthetic file construction data
2426 * @v: File iterator
2427 *
2428 * Display information about the physical drives on physical channel 0.
2429 */
2430 static int
proc_show_pdrv_ch0(struct seq_file * m,void * v)2431 proc_show_pdrv_ch0(struct seq_file *m, void *v)
2432 {
2433 return proc_show_pdrv(m, m->private, 0);
2434 }
2435
2436
2437 /**
2438 * proc_show_pdrv_ch1()
2439 * @m: Synthetic file construction data
2440 * @v: File iterator
2441 *
2442 * Display information about the physical drives on physical channel 1.
2443 */
2444 static int
proc_show_pdrv_ch1(struct seq_file * m,void * v)2445 proc_show_pdrv_ch1(struct seq_file *m, void *v)
2446 {
2447 return proc_show_pdrv(m, m->private, 1);
2448 }
2449
2450
2451 /**
2452 * proc_show_pdrv_ch2()
2453 * @m: Synthetic file construction data
2454 * @v: File iterator
2455 *
2456 * Display information about the physical drives on physical channel 2.
2457 */
2458 static int
proc_show_pdrv_ch2(struct seq_file * m,void * v)2459 proc_show_pdrv_ch2(struct seq_file *m, void *v)
2460 {
2461 return proc_show_pdrv(m, m->private, 2);
2462 }
2463
2464
2465 /**
2466 * proc_show_pdrv_ch3()
2467 * @m: Synthetic file construction data
2468 * @v: File iterator
2469 *
2470 * Display information about the physical drives on physical channel 3.
2471 */
2472 static int
proc_show_pdrv_ch3(struct seq_file * m,void * v)2473 proc_show_pdrv_ch3(struct seq_file *m, void *v)
2474 {
2475 return proc_show_pdrv(m, m->private, 3);
2476 }
2477
2478
2479 /**
2480 * proc_show_rdrv()
2481 * @m: Synthetic file construction data
2482 * @adapter: pointer to our soft state
2483 * @start: starting logical drive to display
2484 * @end: ending logical drive to display
2485 *
2486 * We do not print the inquiry information since its already available through
2487 * /proc/scsi/scsi interface
2488 */
2489 static int
proc_show_rdrv(struct seq_file * m,adapter_t * adapter,int start,int end)2490 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
2491 {
2492 dma_addr_t dma_handle;
2493 logdrv_param *lparam;
2494 megacmd_t mc;
2495 char *disk_array;
2496 dma_addr_t disk_array_dma_handle;
2497 caddr_t inquiry;
2498 struct pci_dev *pdev;
2499 u8 *rdrv_state;
2500 int num_ldrv;
2501 u32 array_sz;
2502 int i;
2503
2504 if( make_local_pdev(adapter, &pdev) != 0 )
2505 return 0;
2506
2507 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2508 goto free_pdev;
2509
2510 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2511 seq_puts(m, "Adapter inquiry failed.\n");
2512 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2513 goto free_inquiry;
2514 }
2515
2516 memset(&mc, 0, sizeof(megacmd_t));
2517
2518 if( adapter->flag & BOARD_40LD ) {
2519 array_sz = sizeof(disk_array_40ld);
2520
2521 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
2522
2523 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
2524 }
2525 else {
2526 array_sz = sizeof(disk_array_8ld);
2527
2528 rdrv_state = ((mraid_ext_inquiry *)inquiry)->
2529 raid_inq.logdrv_info.ldrv_state;
2530
2531 num_ldrv = ((mraid_ext_inquiry *)inquiry)->
2532 raid_inq.logdrv_info.num_ldrv;
2533 }
2534
2535 disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
2536 &disk_array_dma_handle, GFP_KERNEL);
2537
2538 if( disk_array == NULL ) {
2539 seq_puts(m, "memory not available.\n");
2540 goto free_inquiry;
2541 }
2542
2543 mc.xferaddr = (u32)disk_array_dma_handle;
2544
2545 if( adapter->flag & BOARD_40LD ) {
2546 mc.cmd = FC_NEW_CONFIG;
2547 mc.opcode = OP_DCMD_READ_CONFIG;
2548
2549 if( mega_internal_command(adapter, &mc, NULL) ) {
2550 seq_puts(m, "40LD read config failed.\n");
2551 goto free_pci;
2552 }
2553
2554 }
2555 else {
2556 mc.cmd = NEW_READ_CONFIG_8LD;
2557
2558 if( mega_internal_command(adapter, &mc, NULL) ) {
2559 mc.cmd = READ_CONFIG_8LD;
2560 if( mega_internal_command(adapter, &mc, NULL) ) {
2561 seq_puts(m, "8LD read config failed.\n");
2562 goto free_pci;
2563 }
2564 }
2565 }
2566
2567 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
2568
2569 if( adapter->flag & BOARD_40LD ) {
2570 lparam =
2571 &((disk_array_40ld *)disk_array)->ldrv[i].lparam;
2572 }
2573 else {
2574 lparam =
2575 &((disk_array_8ld *)disk_array)->ldrv[i].lparam;
2576 }
2577
2578 /*
2579 * Check for overflow. We print less than 240 characters for
2580 * information about each logical drive.
2581 */
2582 seq_printf(m, "Logical drive:%2d:, ", i);
2583
2584 switch( rdrv_state[i] & 0x0F ) {
2585 case RDRV_OFFLINE:
2586 seq_puts(m, "state: offline");
2587 break;
2588 case RDRV_DEGRADED:
2589 seq_puts(m, "state: degraded");
2590 break;
2591 case RDRV_OPTIMAL:
2592 seq_puts(m, "state: optimal");
2593 break;
2594 case RDRV_DELETED:
2595 seq_puts(m, "state: deleted");
2596 break;
2597 default:
2598 seq_puts(m, "state: unknown");
2599 break;
2600 }
2601
2602 /*
2603 * Check if check consistency or initialization is going on
2604 * for this logical drive.
2605 */
2606 if( (rdrv_state[i] & 0xF0) == 0x20 )
2607 seq_puts(m, ", check-consistency in progress");
2608 else if( (rdrv_state[i] & 0xF0) == 0x10 )
2609 seq_puts(m, ", initialization in progress");
2610
2611 seq_putc(m, '\n');
2612
2613 seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
2614 seq_printf(m, "RAID level:%3d, ", lparam->level);
2615 seq_printf(m, "Stripe size:%3d, ",
2616 lparam->stripe_sz ? lparam->stripe_sz/2: 128);
2617 seq_printf(m, "Row size:%3d\n", lparam->row_size);
2618
2619 seq_puts(m, "Read Policy: ");
2620 switch(lparam->read_ahead) {
2621 case NO_READ_AHEAD:
2622 seq_puts(m, "No read ahead, ");
2623 break;
2624 case READ_AHEAD:
2625 seq_puts(m, "Read ahead, ");
2626 break;
2627 case ADAP_READ_AHEAD:
2628 seq_puts(m, "Adaptive, ");
2629 break;
2630
2631 }
2632
2633 seq_puts(m, "Write Policy: ");
2634 switch(lparam->write_mode) {
2635 case WRMODE_WRITE_THRU:
2636 seq_puts(m, "Write thru, ");
2637 break;
2638 case WRMODE_WRITE_BACK:
2639 seq_puts(m, "Write back, ");
2640 break;
2641 }
2642
2643 seq_puts(m, "Cache Policy: ");
2644 switch(lparam->direct_io) {
2645 case CACHED_IO:
2646 seq_puts(m, "Cached IO\n\n");
2647 break;
2648 case DIRECT_IO:
2649 seq_puts(m, "Direct IO\n\n");
2650 break;
2651 }
2652 }
2653
2654 free_pci:
2655 dma_free_coherent(&pdev->dev, array_sz, disk_array,
2656 disk_array_dma_handle);
2657 free_inquiry:
2658 mega_free_inquiry(inquiry, dma_handle, pdev);
2659 free_pdev:
2660 free_local_pdev(pdev);
2661 return 0;
2662 }
2663
2664 /**
2665 * proc_show_rdrv_10()
2666 * @m: Synthetic file construction data
2667 * @v: File iterator
2668 *
2669 * Display real time information about the logical drives 0 through 9.
2670 */
2671 static int
proc_show_rdrv_10(struct seq_file * m,void * v)2672 proc_show_rdrv_10(struct seq_file *m, void *v)
2673 {
2674 return proc_show_rdrv(m, m->private, 0, 9);
2675 }
2676
2677
2678 /**
2679 * proc_show_rdrv_20()
2680 * @m: Synthetic file construction data
2681 * @v: File iterator
2682 *
2683 * Display real time information about the logical drives 0 through 9.
2684 */
2685 static int
proc_show_rdrv_20(struct seq_file * m,void * v)2686 proc_show_rdrv_20(struct seq_file *m, void *v)
2687 {
2688 return proc_show_rdrv(m, m->private, 10, 19);
2689 }
2690
2691
2692 /**
2693 * proc_show_rdrv_30()
2694 * @m: Synthetic file construction data
2695 * @v: File iterator
2696 *
2697 * Display real time information about the logical drives 0 through 9.
2698 */
2699 static int
proc_show_rdrv_30(struct seq_file * m,void * v)2700 proc_show_rdrv_30(struct seq_file *m, void *v)
2701 {
2702 return proc_show_rdrv(m, m->private, 20, 29);
2703 }
2704
2705
2706 /**
2707 * proc_show_rdrv_40()
2708 * @m: Synthetic file construction data
2709 * @v: File iterator
2710 *
2711 * Display real time information about the logical drives 0 through 9.
2712 */
2713 static int
proc_show_rdrv_40(struct seq_file * m,void * v)2714 proc_show_rdrv_40(struct seq_file *m, void *v)
2715 {
2716 return proc_show_rdrv(m, m->private, 30, 39);
2717 }
2718
2719 /**
2720 * mega_create_proc_entry()
2721 * @index: index in soft state array
2722 * @parent: parent node for this /proc entry
2723 *
2724 * Creates /proc entries for our controllers.
2725 */
2726 static void
mega_create_proc_entry(int index,struct proc_dir_entry * parent)2727 mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2728 {
2729 adapter_t *adapter = hba_soft_state[index];
2730 struct proc_dir_entry *dir;
2731 u8 string[16];
2732
2733 sprintf(string, "hba%d", adapter->host->host_no);
2734 dir = proc_mkdir_data(string, 0, parent, adapter);
2735 if (!dir) {
2736 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
2737 return;
2738 }
2739
2740 proc_create_single_data("config", S_IRUSR, dir,
2741 proc_show_config, adapter);
2742 proc_create_single_data("stat", S_IRUSR, dir,
2743 proc_show_stat, adapter);
2744 proc_create_single_data("mailbox", S_IRUSR, dir,
2745 proc_show_mbox, adapter);
2746 #if MEGA_HAVE_ENH_PROC
2747 proc_create_single_data("rebuild-rate", S_IRUSR, dir,
2748 proc_show_rebuild_rate, adapter);
2749 proc_create_single_data("battery-status", S_IRUSR, dir,
2750 proc_show_battery, adapter);
2751 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir,
2752 proc_show_pdrv_ch0, adapter);
2753 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir,
2754 proc_show_pdrv_ch1, adapter);
2755 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir,
2756 proc_show_pdrv_ch2, adapter);
2757 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir,
2758 proc_show_pdrv_ch3, adapter);
2759 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir,
2760 proc_show_rdrv_10, adapter);
2761 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir,
2762 proc_show_rdrv_20, adapter);
2763 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir,
2764 proc_show_rdrv_30, adapter);
2765 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir,
2766 proc_show_rdrv_40, adapter);
2767 #endif
2768 }
2769
2770 #else
mega_create_proc_entry(int index,struct proc_dir_entry * parent)2771 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2772 {
2773 }
2774 #endif
2775
2776
2777 /*
2778 * megaraid_biosparam()
2779 *
2780 * Return the disk geometry for a particular disk
2781 */
2782 static int
megaraid_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])2783 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2784 sector_t capacity, int geom[])
2785 {
2786 adapter_t *adapter;
2787 int heads;
2788 int sectors;
2789 int cylinders;
2790
2791 /* Get pointer to host config structure */
2792 adapter = (adapter_t *)sdev->host->hostdata;
2793
2794 if (IS_RAID_CH(adapter, sdev->channel)) {
2795 /* Default heads (64) & sectors (32) */
2796 heads = 64;
2797 sectors = 32;
2798 cylinders = (ulong)capacity / (heads * sectors);
2799
2800 /*
2801 * Handle extended translation size for logical drives
2802 * > 1Gb
2803 */
2804 if ((ulong)capacity >= 0x200000) {
2805 heads = 255;
2806 sectors = 63;
2807 cylinders = (ulong)capacity / (heads * sectors);
2808 }
2809
2810 /* return result */
2811 geom[0] = heads;
2812 geom[1] = sectors;
2813 geom[2] = cylinders;
2814 }
2815 else {
2816 if (scsi_partsize(bdev, capacity, geom))
2817 return 0;
2818
2819 dev_info(&adapter->dev->dev,
2820 "invalid partition on this disk on channel %d\n",
2821 sdev->channel);
2822
2823 /* Default heads (64) & sectors (32) */
2824 heads = 64;
2825 sectors = 32;
2826 cylinders = (ulong)capacity / (heads * sectors);
2827
2828 /* Handle extended translation size for logical drives > 1Gb */
2829 if ((ulong)capacity >= 0x200000) {
2830 heads = 255;
2831 sectors = 63;
2832 cylinders = (ulong)capacity / (heads * sectors);
2833 }
2834
2835 /* return result */
2836 geom[0] = heads;
2837 geom[1] = sectors;
2838 geom[2] = cylinders;
2839 }
2840
2841 return 0;
2842 }
2843
2844 /**
2845 * mega_init_scb()
2846 * @adapter: pointer to our soft state
2847 *
2848 * Allocate memory for the various pointers in the scb structures:
2849 * scatter-gather list pointer, passthru and extended passthru structure
2850 * pointers.
2851 */
2852 static int
mega_init_scb(adapter_t * adapter)2853 mega_init_scb(adapter_t *adapter)
2854 {
2855 scb_t *scb;
2856 int i;
2857
2858 for( i = 0; i < adapter->max_cmds; i++ ) {
2859
2860 scb = &adapter->scb_list[i];
2861
2862 scb->sgl64 = NULL;
2863 scb->sgl = NULL;
2864 scb->pthru = NULL;
2865 scb->epthru = NULL;
2866 }
2867
2868 for( i = 0; i < adapter->max_cmds; i++ ) {
2869
2870 scb = &adapter->scb_list[i];
2871
2872 scb->idx = i;
2873
2874 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
2875 sizeof(mega_sgl64) * adapter->sglen,
2876 &scb->sgl_dma_addr, GFP_KERNEL);
2877
2878 scb->sgl = (mega_sglist *)scb->sgl64;
2879
2880 if( !scb->sgl ) {
2881 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
2882 mega_free_sgl(adapter);
2883 return -1;
2884 }
2885
2886 scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
2887 sizeof(mega_passthru),
2888 &scb->pthru_dma_addr, GFP_KERNEL);
2889
2890 if( !scb->pthru ) {
2891 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
2892 mega_free_sgl(adapter);
2893 return -1;
2894 }
2895
2896 scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
2897 sizeof(mega_ext_passthru),
2898 &scb->epthru_dma_addr, GFP_KERNEL);
2899
2900 if( !scb->epthru ) {
2901 dev_warn(&adapter->dev->dev,
2902 "Can't allocate extended passthru\n");
2903 mega_free_sgl(adapter);
2904 return -1;
2905 }
2906
2907
2908 scb->dma_type = MEGA_DMA_TYPE_NONE;
2909
2910 /*
2911 * Link to free list
2912 * lock not required since we are loading the driver, so no
2913 * commands possible right now.
2914 */
2915 scb->state = SCB_FREE;
2916 scb->cmd = NULL;
2917 list_add(&scb->list, &adapter->free_list);
2918 }
2919
2920 return 0;
2921 }
2922
2923
2924 /**
2925 * megadev_open()
2926 * @inode: unused
2927 * @filep: unused
2928 *
2929 * Routines for the character/ioctl interface to the driver. Find out if this
2930 * is a valid open.
2931 */
2932 static int
megadev_open(struct inode * inode,struct file * filep)2933 megadev_open (struct inode *inode, struct file *filep)
2934 {
2935 /*
2936 * Only allow superuser to access private ioctl interface
2937 */
2938 if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
2939
2940 return 0;
2941 }
2942
2943
2944 /**
2945 * megadev_ioctl()
2946 * @filep: Our device file
2947 * @cmd: ioctl command
2948 * @arg: user buffer
2949 *
2950 * ioctl entry point for our private ioctl interface. We move the data in from
2951 * the user space, prepare the command (if necessary, convert the old MIMD
2952 * ioctl to new ioctl command), and issue a synchronous command to the
2953 * controller.
2954 */
2955 static int
megadev_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)2956 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2957 {
2958 adapter_t *adapter;
2959 nitioctl_t uioc;
2960 int adapno;
2961 int rval;
2962 mega_passthru __user *upthru; /* user address for passthru */
2963 mega_passthru *pthru; /* copy user passthru here */
2964 dma_addr_t pthru_dma_hndl;
2965 void *data = NULL; /* data to be transferred */
2966 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
2967 megacmd_t mc;
2968 #if MEGA_HAVE_STATS
2969 megastat_t __user *ustats = NULL;
2970 int num_ldrv = 0;
2971 #endif
2972 u32 uxferaddr = 0;
2973 struct pci_dev *pdev;
2974
2975 /*
2976 * Make sure only USCSICMD are issued through this interface.
2977 * MIMD application would still fire different command.
2978 */
2979 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
2980 return -EINVAL;
2981 }
2982
2983 /*
2984 * Check and convert a possible MIMD command to NIT command.
2985 * mega_m_to_n() copies the data from the user space, so we do not
2986 * have to do it here.
2987 * NOTE: We will need some user address to copyout the data, therefore
2988 * the inteface layer will also provide us with the required user
2989 * addresses.
2990 */
2991 memset(&uioc, 0, sizeof(nitioctl_t));
2992 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
2993 return rval;
2994
2995
2996 switch( uioc.opcode ) {
2997
2998 case GET_DRIVER_VER:
2999 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
3000 return (-EFAULT);
3001
3002 break;
3003
3004 case GET_N_ADAP:
3005 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
3006 return (-EFAULT);
3007
3008 /*
3009 * Shucks. MIMD interface returns a positive value for number
3010 * of adapters. TODO: Change it to return 0 when there is no
3011 * applicatio using mimd interface.
3012 */
3013 return hba_count;
3014
3015 case GET_ADAP_INFO:
3016
3017 /*
3018 * Which adapter
3019 */
3020 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3021 return (-ENODEV);
3022
3023 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
3024 sizeof(struct mcontroller)) )
3025 return (-EFAULT);
3026 break;
3027
3028 #if MEGA_HAVE_STATS
3029
3030 case GET_STATS:
3031 /*
3032 * Which adapter
3033 */
3034 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3035 return (-ENODEV);
3036
3037 adapter = hba_soft_state[adapno];
3038
3039 ustats = uioc.uioc_uaddr;
3040
3041 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
3042 return (-EFAULT);
3043
3044 /*
3045 * Check for the validity of the logical drive number
3046 */
3047 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
3048
3049 if( copy_to_user(ustats->nreads, adapter->nreads,
3050 num_ldrv*sizeof(u32)) )
3051 return -EFAULT;
3052
3053 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
3054 num_ldrv*sizeof(u32)) )
3055 return -EFAULT;
3056
3057 if( copy_to_user(ustats->nwrites, adapter->nwrites,
3058 num_ldrv*sizeof(u32)) )
3059 return -EFAULT;
3060
3061 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
3062 num_ldrv*sizeof(u32)) )
3063 return -EFAULT;
3064
3065 if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
3066 num_ldrv*sizeof(u32)) )
3067 return -EFAULT;
3068
3069 if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
3070 num_ldrv*sizeof(u32)) )
3071 return -EFAULT;
3072
3073 return 0;
3074
3075 #endif
3076 case MBOX_CMD:
3077
3078 /*
3079 * Which adapter
3080 */
3081 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3082 return (-ENODEV);
3083
3084 adapter = hba_soft_state[adapno];
3085
3086 /*
3087 * Deletion of logical drive is a special case. The adapter
3088 * should be quiescent before this command is issued.
3089 */
3090 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
3091 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
3092
3093 /*
3094 * Do we support this feature
3095 */
3096 if( !adapter->support_random_del ) {
3097 dev_warn(&adapter->dev->dev, "logdrv "
3098 "delete on non-supporting F/W\n");
3099
3100 return (-EINVAL);
3101 }
3102
3103 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
3104
3105 if( rval == 0 ) {
3106 memset(&mc, 0, sizeof(megacmd_t));
3107
3108 mc.status = rval;
3109
3110 rval = mega_n_to_m((void __user *)arg, &mc);
3111 }
3112
3113 return rval;
3114 }
3115 /*
3116 * This interface only support the regular passthru commands.
3117 * Reject extended passthru and 64-bit passthru
3118 */
3119 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
3120 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
3121
3122 dev_warn(&adapter->dev->dev, "rejected passthru\n");
3123
3124 return (-EINVAL);
3125 }
3126
3127 /*
3128 * For all internal commands, the buffer must be allocated in
3129 * <4GB address range
3130 */
3131 if( make_local_pdev(adapter, &pdev) != 0 )
3132 return -EIO;
3133
3134 /* Is it a passthru command or a DCMD */
3135 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
3136 /* Passthru commands */
3137
3138 pthru = dma_alloc_coherent(&pdev->dev,
3139 sizeof(mega_passthru),
3140 &pthru_dma_hndl, GFP_KERNEL);
3141
3142 if( pthru == NULL ) {
3143 free_local_pdev(pdev);
3144 return (-ENOMEM);
3145 }
3146
3147 /*
3148 * The user passthru structure
3149 */
3150 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3151
3152 /*
3153 * Copy in the user passthru here.
3154 */
3155 if( copy_from_user(pthru, upthru,
3156 sizeof(mega_passthru)) ) {
3157
3158 dma_free_coherent(&pdev->dev,
3159 sizeof(mega_passthru),
3160 pthru, pthru_dma_hndl);
3161
3162 free_local_pdev(pdev);
3163
3164 return (-EFAULT);
3165 }
3166
3167 /*
3168 * Is there a data transfer
3169 */
3170 if( pthru->dataxferlen ) {
3171 data = dma_alloc_coherent(&pdev->dev,
3172 pthru->dataxferlen,
3173 &data_dma_hndl,
3174 GFP_KERNEL);
3175
3176 if( data == NULL ) {
3177 dma_free_coherent(&pdev->dev,
3178 sizeof(mega_passthru),
3179 pthru,
3180 pthru_dma_hndl);
3181
3182 free_local_pdev(pdev);
3183
3184 return (-ENOMEM);
3185 }
3186
3187 /*
3188 * Save the user address and point the kernel
3189 * address at just allocated memory
3190 */
3191 uxferaddr = pthru->dataxferaddr;
3192 pthru->dataxferaddr = data_dma_hndl;
3193 }
3194
3195
3196 /*
3197 * Is data coming down-stream
3198 */
3199 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
3200 /*
3201 * Get the user data
3202 */
3203 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3204 pthru->dataxferlen) ) {
3205 rval = (-EFAULT);
3206 goto freemem_and_return;
3207 }
3208 }
3209
3210 memset(&mc, 0, sizeof(megacmd_t));
3211
3212 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
3213 mc.xferaddr = (u32)pthru_dma_hndl;
3214
3215 /*
3216 * Issue the command
3217 */
3218 mega_internal_command(adapter, &mc, pthru);
3219
3220 rval = mega_n_to_m((void __user *)arg, &mc);
3221
3222 if( rval ) goto freemem_and_return;
3223
3224
3225 /*
3226 * Is data going up-stream
3227 */
3228 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3229 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3230 pthru->dataxferlen) ) {
3231 rval = (-EFAULT);
3232 }
3233 }
3234
3235 /*
3236 * Send the request sense data also, irrespective of
3237 * whether the user has asked for it or not.
3238 */
3239 if (copy_to_user(upthru->reqsensearea,
3240 pthru->reqsensearea, 14))
3241 rval = -EFAULT;
3242
3243 freemem_and_return:
3244 if( pthru->dataxferlen ) {
3245 dma_free_coherent(&pdev->dev,
3246 pthru->dataxferlen, data,
3247 data_dma_hndl);
3248 }
3249
3250 dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
3251 pthru, pthru_dma_hndl);
3252
3253 free_local_pdev(pdev);
3254
3255 return rval;
3256 }
3257 else {
3258 /* DCMD commands */
3259
3260 /*
3261 * Is there a data transfer
3262 */
3263 if( uioc.xferlen ) {
3264 data = dma_alloc_coherent(&pdev->dev,
3265 uioc.xferlen,
3266 &data_dma_hndl,
3267 GFP_KERNEL);
3268
3269 if( data == NULL ) {
3270 free_local_pdev(pdev);
3271 return (-ENOMEM);
3272 }
3273
3274 uxferaddr = MBOX(uioc)->xferaddr;
3275 }
3276
3277 /*
3278 * Is data coming down-stream
3279 */
3280 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
3281 /*
3282 * Get the user data
3283 */
3284 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3285 uioc.xferlen) ) {
3286
3287 dma_free_coherent(&pdev->dev,
3288 uioc.xferlen, data,
3289 data_dma_hndl);
3290
3291 free_local_pdev(pdev);
3292
3293 return (-EFAULT);
3294 }
3295 }
3296
3297 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
3298
3299 mc.xferaddr = (u32)data_dma_hndl;
3300
3301 /*
3302 * Issue the command
3303 */
3304 mega_internal_command(adapter, &mc, NULL);
3305
3306 rval = mega_n_to_m((void __user *)arg, &mc);
3307
3308 if( rval ) {
3309 if( uioc.xferlen ) {
3310 dma_free_coherent(&pdev->dev,
3311 uioc.xferlen, data,
3312 data_dma_hndl);
3313 }
3314
3315 free_local_pdev(pdev);
3316
3317 return rval;
3318 }
3319
3320 /*
3321 * Is data going up-stream
3322 */
3323 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3324 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3325 uioc.xferlen) ) {
3326
3327 rval = (-EFAULT);
3328 }
3329 }
3330
3331 if( uioc.xferlen ) {
3332 dma_free_coherent(&pdev->dev, uioc.xferlen,
3333 data, data_dma_hndl);
3334 }
3335
3336 free_local_pdev(pdev);
3337
3338 return rval;
3339 }
3340
3341 default:
3342 return (-EINVAL);
3343 }
3344
3345 return 0;
3346 }
3347
3348 static long
megadev_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)3349 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3350 {
3351 int ret;
3352
3353 mutex_lock(&megadev_mutex);
3354 ret = megadev_ioctl(filep, cmd, arg);
3355 mutex_unlock(&megadev_mutex);
3356
3357 return ret;
3358 }
3359
3360 /**
3361 * mega_m_to_n()
3362 * @arg: user address
3363 * @uioc: new ioctl structure
3364 *
3365 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl
3366 * structure
3367 *
3368 * Converts the older mimd ioctl structure to newer NIT structure
3369 */
3370 static int
mega_m_to_n(void __user * arg,nitioctl_t * uioc)3371 mega_m_to_n(void __user *arg, nitioctl_t *uioc)
3372 {
3373 struct uioctl_t uioc_mimd;
3374 char signature[8] = {0};
3375 u8 opcode;
3376 u8 subopcode;
3377
3378
3379 /*
3380 * check is the application conforms to NIT. We do not have to do much
3381 * in that case.
3382 * We exploit the fact that the signature is stored in the very
3383 * beginning of the structure.
3384 */
3385
3386 if( copy_from_user(signature, arg, 7) )
3387 return (-EFAULT);
3388
3389 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3390
3391 /*
3392 * NOTE NOTE: The nit ioctl is still under flux because of
3393 * change of mailbox definition, in HPE. No applications yet
3394 * use this interface and let's not have applications use this
3395 * interface till the new specifitions are in place.
3396 */
3397 return -EINVAL;
3398 #if 0
3399 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
3400 return (-EFAULT);
3401 return 0;
3402 #endif
3403 }
3404
3405 /*
3406 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
3407 *
3408 * Get the user ioctl structure
3409 */
3410 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
3411 return (-EFAULT);
3412
3413
3414 /*
3415 * Get the opcode and subopcode for the commands
3416 */
3417 opcode = uioc_mimd.ui.fcs.opcode;
3418 subopcode = uioc_mimd.ui.fcs.subopcode;
3419
3420 switch (opcode) {
3421 case 0x82:
3422
3423 switch (subopcode) {
3424
3425 case MEGAIOC_QDRVRVER: /* Query driver version */
3426 uioc->opcode = GET_DRIVER_VER;
3427 uioc->uioc_uaddr = uioc_mimd.data;
3428 break;
3429
3430 case MEGAIOC_QNADAP: /* Get # of adapters */
3431 uioc->opcode = GET_N_ADAP;
3432 uioc->uioc_uaddr = uioc_mimd.data;
3433 break;
3434
3435 case MEGAIOC_QADAPINFO: /* Get adapter information */
3436 uioc->opcode = GET_ADAP_INFO;
3437 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3438 uioc->uioc_uaddr = uioc_mimd.data;
3439 break;
3440
3441 default:
3442 return(-EINVAL);
3443 }
3444
3445 break;
3446
3447
3448 case 0x81:
3449
3450 uioc->opcode = MBOX_CMD;
3451 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3452
3453 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3454
3455 uioc->xferlen = uioc_mimd.ui.fcs.length;
3456
3457 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3458 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3459
3460 break;
3461
3462 case 0x80:
3463
3464 uioc->opcode = MBOX_CMD;
3465 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3466
3467 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3468
3469 /*
3470 * Choose the xferlen bigger of input and output data
3471 */
3472 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
3473 uioc_mimd.outlen : uioc_mimd.inlen;
3474
3475 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3476 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3477
3478 break;
3479
3480 default:
3481 return (-EINVAL);
3482
3483 }
3484
3485 return 0;
3486 }
3487
3488 /*
3489 * mega_n_to_m()
3490 * @arg: user address
3491 * @mc: mailbox command
3492 *
3493 * Updates the status information to the application, depending on application
3494 * conforms to older mimd ioctl interface or newer NIT ioctl interface
3495 */
3496 static int
mega_n_to_m(void __user * arg,megacmd_t * mc)3497 mega_n_to_m(void __user *arg, megacmd_t *mc)
3498 {
3499 nitioctl_t __user *uiocp;
3500 megacmd_t __user *umc;
3501 mega_passthru __user *upthru;
3502 struct uioctl_t __user *uioc_mimd;
3503 char signature[8] = {0};
3504
3505 /*
3506 * check is the application conforms to NIT.
3507 */
3508 if( copy_from_user(signature, arg, 7) )
3509 return -EFAULT;
3510
3511 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3512
3513 uiocp = arg;
3514
3515 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
3516 return (-EFAULT);
3517
3518 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3519
3520 umc = MBOX_P(uiocp);
3521
3522 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3523 return -EFAULT;
3524
3525 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
3526 return (-EFAULT);
3527 }
3528 }
3529 else {
3530 uioc_mimd = arg;
3531
3532 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
3533 return (-EFAULT);
3534
3535 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3536
3537 umc = (megacmd_t __user *)uioc_mimd->mbox;
3538
3539 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3540 return (-EFAULT);
3541
3542 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
3543 return (-EFAULT);
3544 }
3545 }
3546
3547 return 0;
3548 }
3549
3550
3551 /*
3552 * MEGARAID 'FW' commands.
3553 */
3554
3555 /**
3556 * mega_is_bios_enabled()
3557 * @adapter: pointer to our soft state
3558 *
3559 * issue command to find out if the BIOS is enabled for this controller
3560 */
3561 static int
mega_is_bios_enabled(adapter_t * adapter)3562 mega_is_bios_enabled(adapter_t *adapter)
3563 {
3564 struct mbox_out mbox;
3565 unsigned char *raw_mbox = (u8 *)&mbox;
3566
3567 memset(&mbox, 0, sizeof(mbox));
3568
3569 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3570
3571 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3572
3573 raw_mbox[0] = IS_BIOS_ENABLED;
3574 raw_mbox[2] = GET_BIOS;
3575
3576 issue_scb_block(adapter, raw_mbox);
3577
3578 return *(char *)adapter->mega_buffer;
3579 }
3580
3581
3582 /**
3583 * mega_enum_raid_scsi()
3584 * @adapter: pointer to our soft state
3585 *
3586 * Find out what channels are RAID/SCSI. This information is used to
3587 * differentiate the virtual channels and physical channels and to support
3588 * ROMB feature and non-disk devices.
3589 */
3590 static void
mega_enum_raid_scsi(adapter_t * adapter)3591 mega_enum_raid_scsi(adapter_t *adapter)
3592 {
3593 struct mbox_out mbox;
3594 unsigned char *raw_mbox = (u8 *)&mbox;
3595 int i;
3596
3597 memset(&mbox, 0, sizeof(mbox));
3598
3599 /*
3600 * issue command to find out what channels are raid/scsi
3601 */
3602 raw_mbox[0] = CHNL_CLASS;
3603 raw_mbox[2] = GET_CHNL_CLASS;
3604
3605 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3606
3607 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3608
3609 /*
3610 * Non-ROMB firmware fail this command, so all channels
3611 * must be shown RAID
3612 */
3613 adapter->mega_ch_class = 0xFF;
3614
3615 if(!issue_scb_block(adapter, raw_mbox)) {
3616 adapter->mega_ch_class = *((char *)adapter->mega_buffer);
3617
3618 }
3619
3620 for( i = 0; i < adapter->product_info.nchannels; i++ ) {
3621 if( (adapter->mega_ch_class >> i) & 0x01 ) {
3622 dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
3623 i);
3624 }
3625 else {
3626 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
3627 i);
3628 }
3629 }
3630
3631 return;
3632 }
3633
3634
3635 /**
3636 * mega_get_boot_drv()
3637 * @adapter: pointer to our soft state
3638 *
3639 * Find out which device is the boot device. Note, any logical drive or any
3640 * phyical device (e.g., a CDROM) can be designated as a boot device.
3641 */
3642 static void
mega_get_boot_drv(adapter_t * adapter)3643 mega_get_boot_drv(adapter_t *adapter)
3644 {
3645 struct private_bios_data *prv_bios_data;
3646 struct mbox_out mbox;
3647 unsigned char *raw_mbox = (u8 *)&mbox;
3648 u16 cksum = 0;
3649 u8 *cksum_p;
3650 u8 boot_pdrv;
3651 int i;
3652
3653 memset(&mbox, 0, sizeof(mbox));
3654
3655 raw_mbox[0] = BIOS_PVT_DATA;
3656 raw_mbox[2] = GET_BIOS_PVT_DATA;
3657
3658 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3659
3660 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3661
3662 adapter->boot_ldrv_enabled = 0;
3663 adapter->boot_ldrv = 0;
3664
3665 adapter->boot_pdrv_enabled = 0;
3666 adapter->boot_pdrv_ch = 0;
3667 adapter->boot_pdrv_tgt = 0;
3668
3669 if(issue_scb_block(adapter, raw_mbox) == 0) {
3670 prv_bios_data =
3671 (struct private_bios_data *)adapter->mega_buffer;
3672
3673 cksum = 0;
3674 cksum_p = (char *)prv_bios_data;
3675 for (i = 0; i < 14; i++ ) {
3676 cksum += (u16)(*cksum_p++);
3677 }
3678
3679 if (prv_bios_data->cksum == (u16)(0-cksum) ) {
3680
3681 /*
3682 * If MSB is set, a physical drive is set as boot
3683 * device
3684 */
3685 if( prv_bios_data->boot_drv & 0x80 ) {
3686 adapter->boot_pdrv_enabled = 1;
3687 boot_pdrv = prv_bios_data->boot_drv & 0x7F;
3688 adapter->boot_pdrv_ch = boot_pdrv / 16;
3689 adapter->boot_pdrv_tgt = boot_pdrv % 16;
3690 }
3691 else {
3692 adapter->boot_ldrv_enabled = 1;
3693 adapter->boot_ldrv = prv_bios_data->boot_drv;
3694 }
3695 }
3696 }
3697
3698 }
3699
3700 /**
3701 * mega_support_random_del()
3702 * @adapter: pointer to our soft state
3703 *
3704 * Find out if this controller supports random deletion and addition of
3705 * logical drives
3706 */
3707 static int
mega_support_random_del(adapter_t * adapter)3708 mega_support_random_del(adapter_t *adapter)
3709 {
3710 struct mbox_out mbox;
3711 unsigned char *raw_mbox = (u8 *)&mbox;
3712 int rval;
3713
3714 memset(&mbox, 0, sizeof(mbox));
3715
3716 /*
3717 * issue command
3718 */
3719 raw_mbox[0] = FC_DEL_LOGDRV;
3720 raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3721
3722 rval = issue_scb_block(adapter, raw_mbox);
3723
3724 return !rval;
3725 }
3726
3727
3728 /**
3729 * mega_support_ext_cdb()
3730 * @adapter: pointer to our soft state
3731 *
3732 * Find out if this firmware support cdblen > 10
3733 */
3734 static int
mega_support_ext_cdb(adapter_t * adapter)3735 mega_support_ext_cdb(adapter_t *adapter)
3736 {
3737 struct mbox_out mbox;
3738 unsigned char *raw_mbox = (u8 *)&mbox;
3739 int rval;
3740
3741 memset(&mbox, 0, sizeof(mbox));
3742 /*
3743 * issue command to find out if controller supports extended CDBs.
3744 */
3745 raw_mbox[0] = 0xA4;
3746 raw_mbox[2] = 0x16;
3747
3748 rval = issue_scb_block(adapter, raw_mbox);
3749
3750 return !rval;
3751 }
3752
3753
3754 /**
3755 * mega_del_logdrv()
3756 * @adapter: pointer to our soft state
3757 * @logdrv: logical drive to be deleted
3758 *
3759 * Delete the specified logical drive. It is the responsibility of the user
3760 * app to let the OS know about this operation.
3761 */
3762 static int
mega_del_logdrv(adapter_t * adapter,int logdrv)3763 mega_del_logdrv(adapter_t *adapter, int logdrv)
3764 {
3765 unsigned long flags;
3766 scb_t *scb;
3767 int rval;
3768
3769 /*
3770 * Stop sending commands to the controller, queue them internally.
3771 * When deletion is complete, ISR will flush the queue.
3772 */
3773 atomic_set(&adapter->quiescent, 1);
3774
3775 /*
3776 * Wait till all the issued commands are complete and there are no
3777 * commands in the pending queue
3778 */
3779 while (atomic_read(&adapter->pend_cmds) > 0 ||
3780 !list_empty(&adapter->pending_list))
3781 msleep(1000); /* sleep for 1s */
3782
3783 rval = mega_do_del_logdrv(adapter, logdrv);
3784
3785 spin_lock_irqsave(&adapter->lock, flags);
3786
3787 /*
3788 * If delete operation was successful, add 0x80 to the logical drive
3789 * ids for commands in the pending queue.
3790 */
3791 if (adapter->read_ldidmap) {
3792 struct list_head *pos;
3793 list_for_each(pos, &adapter->pending_list) {
3794 scb = list_entry(pos, scb_t, list);
3795 if (scb->pthru->logdrv < 0x80 )
3796 scb->pthru->logdrv += 0x80;
3797 }
3798 }
3799
3800 atomic_set(&adapter->quiescent, 0);
3801
3802 mega_runpendq(adapter);
3803
3804 spin_unlock_irqrestore(&adapter->lock, flags);
3805
3806 return rval;
3807 }
3808
3809
3810 static int
mega_do_del_logdrv(adapter_t * adapter,int logdrv)3811 mega_do_del_logdrv(adapter_t *adapter, int logdrv)
3812 {
3813 megacmd_t mc;
3814 int rval;
3815
3816 memset( &mc, 0, sizeof(megacmd_t));
3817
3818 mc.cmd = FC_DEL_LOGDRV;
3819 mc.opcode = OP_DEL_LOGDRV;
3820 mc.subopcode = logdrv;
3821
3822 rval = mega_internal_command(adapter, &mc, NULL);
3823
3824 /* log this event */
3825 if(rval) {
3826 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
3827 return rval;
3828 }
3829
3830 /*
3831 * After deleting first logical drive, the logical drives must be
3832 * addressed by adding 0x80 to the logical drive id.
3833 */
3834 adapter->read_ldidmap = 1;
3835
3836 return rval;
3837 }
3838
3839
3840 /**
3841 * mega_get_max_sgl()
3842 * @adapter: pointer to our soft state
3843 *
3844 * Find out the maximum number of scatter-gather elements supported by this
3845 * version of the firmware
3846 */
3847 static void
mega_get_max_sgl(adapter_t * adapter)3848 mega_get_max_sgl(adapter_t *adapter)
3849 {
3850 struct mbox_out mbox;
3851 unsigned char *raw_mbox = (u8 *)&mbox;
3852
3853 memset(&mbox, 0, sizeof(mbox));
3854
3855 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3856
3857 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3858
3859 raw_mbox[0] = MAIN_MISC_OPCODE;
3860 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3861
3862
3863 if( issue_scb_block(adapter, raw_mbox) ) {
3864 /*
3865 * f/w does not support this command. Choose the default value
3866 */
3867 adapter->sglen = MIN_SGLIST;
3868 }
3869 else {
3870 adapter->sglen = *((char *)adapter->mega_buffer);
3871
3872 /*
3873 * Make sure this is not more than the resources we are
3874 * planning to allocate
3875 */
3876 if ( adapter->sglen > MAX_SGLIST )
3877 adapter->sglen = MAX_SGLIST;
3878 }
3879
3880 return;
3881 }
3882
3883
3884 /**
3885 * mega_support_cluster()
3886 * @adapter: pointer to our soft state
3887 *
3888 * Find out if this firmware support cluster calls.
3889 */
3890 static int
mega_support_cluster(adapter_t * adapter)3891 mega_support_cluster(adapter_t *adapter)
3892 {
3893 struct mbox_out mbox;
3894 unsigned char *raw_mbox = (u8 *)&mbox;
3895
3896 memset(&mbox, 0, sizeof(mbox));
3897
3898 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3899
3900 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3901
3902 /*
3903 * Try to get the initiator id. This command will succeed iff the
3904 * clustering is available on this HBA.
3905 */
3906 raw_mbox[0] = MEGA_GET_TARGET_ID;
3907
3908 if( issue_scb_block(adapter, raw_mbox) == 0 ) {
3909
3910 /*
3911 * Cluster support available. Get the initiator target id.
3912 * Tell our id to mid-layer too.
3913 */
3914 adapter->this_id = *(u32 *)adapter->mega_buffer;
3915 adapter->host->this_id = adapter->this_id;
3916
3917 return 1;
3918 }
3919
3920 return 0;
3921 }
3922
3923 #ifdef CONFIG_PROC_FS
3924 /**
3925 * mega_adapinq()
3926 * @adapter: pointer to our soft state
3927 * @dma_handle: DMA address of the buffer
3928 *
3929 * Issue internal commands while interrupts are available.
3930 * We only issue direct mailbox commands from within the driver. ioctl()
3931 * interface using these routines can issue passthru commands.
3932 */
3933 static int
mega_adapinq(adapter_t * adapter,dma_addr_t dma_handle)3934 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
3935 {
3936 megacmd_t mc;
3937
3938 memset(&mc, 0, sizeof(megacmd_t));
3939
3940 if( adapter->flag & BOARD_40LD ) {
3941 mc.cmd = FC_NEW_CONFIG;
3942 mc.opcode = NC_SUBOP_ENQUIRY3;
3943 mc.subopcode = ENQ3_GET_SOLICITED_FULL;
3944 }
3945 else {
3946 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
3947 }
3948
3949 mc.xferaddr = (u32)dma_handle;
3950
3951 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
3952 return -1;
3953 }
3954
3955 return 0;
3956 }
3957
3958
3959 /**
3960 * mega_internal_dev_inquiry()
3961 * @adapter: pointer to our soft state
3962 * @ch: channel for this device
3963 * @tgt: ID of this device
3964 * @buf_dma_handle: DMA address of the buffer
3965 *
3966 * Issue the scsi inquiry for the specified device.
3967 */
3968 static int
mega_internal_dev_inquiry(adapter_t * adapter,u8 ch,u8 tgt,dma_addr_t buf_dma_handle)3969 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
3970 dma_addr_t buf_dma_handle)
3971 {
3972 mega_passthru *pthru;
3973 dma_addr_t pthru_dma_handle;
3974 megacmd_t mc;
3975 int rval;
3976 struct pci_dev *pdev;
3977
3978
3979 /*
3980 * For all internal commands, the buffer must be allocated in <4GB
3981 * address range
3982 */
3983 if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
3984
3985 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
3986 &pthru_dma_handle, GFP_KERNEL);
3987
3988 if( pthru == NULL ) {
3989 free_local_pdev(pdev);
3990 return -1;
3991 }
3992
3993 pthru->timeout = 2;
3994 pthru->ars = 1;
3995 pthru->reqsenselen = 14;
3996 pthru->islogical = 0;
3997
3998 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
3999
4000 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
4001
4002 pthru->cdblen = 6;
4003
4004 pthru->cdb[0] = INQUIRY;
4005 pthru->cdb[1] = 0;
4006 pthru->cdb[2] = 0;
4007 pthru->cdb[3] = 0;
4008 pthru->cdb[4] = 255;
4009 pthru->cdb[5] = 0;
4010
4011
4012 pthru->dataxferaddr = (u32)buf_dma_handle;
4013 pthru->dataxferlen = 256;
4014
4015 memset(&mc, 0, sizeof(megacmd_t));
4016
4017 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
4018 mc.xferaddr = (u32)pthru_dma_handle;
4019
4020 rval = mega_internal_command(adapter, &mc, pthru);
4021
4022 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
4023 pthru_dma_handle);
4024
4025 free_local_pdev(pdev);
4026
4027 return rval;
4028 }
4029 #endif
4030
4031 /**
4032 * mega_internal_command()
4033 * @adapter: pointer to our soft state
4034 * @mc: the mailbox command
4035 * @pthru: Passthru structure for DCDB commands
4036 *
4037 * Issue the internal commands in interrupt mode.
4038 * The last argument is the address of the passthru structure if the command
4039 * to be fired is a passthru command
4040 *
4041 * Note: parameter 'pthru' is null for non-passthru commands.
4042 */
4043 static int
mega_internal_command(adapter_t * adapter,megacmd_t * mc,mega_passthru * pthru)4044 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4045 {
4046 unsigned long flags;
4047 scb_t *scb;
4048 int rval;
4049
4050 /*
4051 * The internal commands share one command id and hence are
4052 * serialized. This is so because we want to reserve maximum number of
4053 * available command ids for the I/O commands.
4054 */
4055 mutex_lock(&adapter->int_mtx);
4056
4057 scb = &adapter->int_scb;
4058 memset(scb, 0, sizeof(scb_t));
4059
4060 scb->idx = CMDID_INT_CMDS;
4061 scb->state |= SCB_ACTIVE | SCB_PENDQ;
4062
4063 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
4064
4065 /*
4066 * Is it a passthru command
4067 */
4068 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
4069 scb->pthru = pthru;
4070
4071 spin_lock_irqsave(&adapter->lock, flags);
4072 list_add_tail(&scb->list, &adapter->pending_list);
4073 /*
4074 * Check if the HBA is in quiescent state, e.g., during a
4075 * delete logical drive opertion. If it is, don't run
4076 * the pending_list.
4077 */
4078 if (atomic_read(&adapter->quiescent) == 0)
4079 mega_runpendq(adapter);
4080 spin_unlock_irqrestore(&adapter->lock, flags);
4081
4082 wait_for_completion(&adapter->int_waitq);
4083
4084 mc->status = rval = adapter->int_status;
4085
4086 /*
4087 * Print a debug message for all failed commands. Applications can use
4088 * this information.
4089 */
4090 if (rval && trace_level) {
4091 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
4092 mc->cmd, mc->opcode, mc->subopcode, rval);
4093 }
4094
4095 mutex_unlock(&adapter->int_mtx);
4096 return rval;
4097 }
4098
4099 static const struct scsi_host_template megaraid_template = {
4100 .module = THIS_MODULE,
4101 .name = "MegaRAID",
4102 .proc_name = "megaraid_legacy",
4103 .info = megaraid_info,
4104 .queuecommand = megaraid_queue,
4105 .bios_param = megaraid_biosparam,
4106 .max_sectors = MAX_SECTORS_PER_IO,
4107 .can_queue = MAX_COMMANDS,
4108 .this_id = DEFAULT_INITIATOR_ID,
4109 .sg_tablesize = MAX_SGLIST,
4110 .cmd_per_lun = DEF_CMD_PER_LUN,
4111 .eh_abort_handler = megaraid_abort,
4112 .eh_host_reset_handler = megaraid_reset,
4113 .no_write_same = 1,
4114 .cmd_size = sizeof(struct megaraid_cmd_priv),
4115 };
4116
4117 static int
megaraid_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)4118 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4119 {
4120 struct Scsi_Host *host;
4121 adapter_t *adapter;
4122 unsigned long mega_baseport, tbase, flag = 0;
4123 u16 subsysid, subsysvid;
4124 u8 pci_bus, pci_dev_func;
4125 int irq, i, j;
4126 int error = -ENODEV;
4127
4128 if (hba_count >= MAX_CONTROLLERS)
4129 goto out;
4130
4131 if (pci_enable_device(pdev))
4132 goto out;
4133 pci_set_master(pdev);
4134
4135 pci_bus = pdev->bus->number;
4136 pci_dev_func = pdev->devfn;
4137
4138 /*
4139 * The megaraid3 stuff reports the ID of the Intel part which is not
4140 * remotely specific to the megaraid
4141 */
4142 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
4143 u16 magic;
4144 /*
4145 * Don't fall over the Compaq management cards using the same
4146 * PCI identifier
4147 */
4148 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4149 pdev->subsystem_device == 0xC000)
4150 goto out_disable_device;
4151 /* Now check the magic signature byte */
4152 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4153 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4154 goto out_disable_device;
4155 /* Ok it is probably a megaraid */
4156 }
4157
4158 /*
4159 * For these vendor and device ids, signature offsets are not
4160 * valid and 64 bit is implicit
4161 */
4162 if (id->driver_data & BOARD_64BIT)
4163 flag |= BOARD_64BIT;
4164 else {
4165 u32 magic64;
4166
4167 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
4168 if (magic64 == HBA_SIGNATURE_64BIT)
4169 flag |= BOARD_64BIT;
4170 }
4171
4172 subsysvid = pdev->subsystem_vendor;
4173 subsysid = pdev->subsystem_device;
4174
4175 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
4176 id->vendor, id->device);
4177
4178 /* Read the base port and IRQ from PCI */
4179 mega_baseport = pci_resource_start(pdev, 0);
4180 irq = pdev->irq;
4181
4182 tbase = mega_baseport;
4183 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
4184 flag |= BOARD_MEMMAP;
4185
4186 if (!request_mem_region(mega_baseport, 128, "megaraid")) {
4187 dev_warn(&pdev->dev, "mem region busy!\n");
4188 goto out_disable_device;
4189 }
4190
4191 mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
4192 if (!mega_baseport) {
4193 dev_warn(&pdev->dev, "could not map hba memory\n");
4194 goto out_release_region;
4195 }
4196 } else {
4197 flag |= BOARD_IOMAP;
4198 mega_baseport += 0x10;
4199
4200 if (!request_region(mega_baseport, 16, "megaraid"))
4201 goto out_disable_device;
4202 }
4203
4204 /* Initialize SCSI Host structure */
4205 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
4206 if (!host)
4207 goto out_iounmap;
4208
4209 adapter = (adapter_t *)host->hostdata;
4210 memset(adapter, 0, sizeof(adapter_t));
4211
4212 dev_notice(&pdev->dev,
4213 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
4214 host->host_no, mega_baseport, irq);
4215
4216 adapter->base = mega_baseport;
4217 if (flag & BOARD_MEMMAP)
4218 adapter->mmio_base = (void __iomem *) mega_baseport;
4219
4220 INIT_LIST_HEAD(&adapter->free_list);
4221 INIT_LIST_HEAD(&adapter->pending_list);
4222 INIT_LIST_HEAD(&adapter->completed_list);
4223
4224 adapter->flag = flag;
4225 spin_lock_init(&adapter->lock);
4226
4227 host->cmd_per_lun = max_cmd_per_lun;
4228 host->max_sectors = max_sectors_per_io;
4229
4230 adapter->dev = pdev;
4231 adapter->host = host;
4232
4233 adapter->host->irq = irq;
4234
4235 if (flag & BOARD_MEMMAP)
4236 adapter->host->base = tbase;
4237 else {
4238 adapter->host->io_port = tbase;
4239 adapter->host->n_io_port = 16;
4240 }
4241
4242 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
4243
4244 /*
4245 * Allocate buffer to issue internal commands.
4246 */
4247 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
4248 MEGA_BUFFER_SIZE,
4249 &adapter->buf_dma_handle,
4250 GFP_KERNEL);
4251 if (!adapter->mega_buffer) {
4252 dev_warn(&pdev->dev, "out of RAM\n");
4253 goto out_host_put;
4254 }
4255
4256 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
4257 GFP_KERNEL);
4258 if (!adapter->scb_list) {
4259 dev_warn(&pdev->dev, "out of RAM\n");
4260 goto out_free_cmd_buffer;
4261 }
4262
4263 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4264 megaraid_isr_memmapped : megaraid_isr_iomapped,
4265 IRQF_SHARED, "megaraid", adapter)) {
4266 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
4267 goto out_free_scb_list;
4268 }
4269
4270 if (mega_setup_mailbox(adapter))
4271 goto out_free_irq;
4272
4273 if (mega_query_adapter(adapter))
4274 goto out_free_mbox;
4275
4276 /*
4277 * Have checks for some buggy f/w
4278 */
4279 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
4280 /*
4281 * Which firmware
4282 */
4283 if (!strcmp(adapter->fw_version, "3.00") ||
4284 !strcmp(adapter->fw_version, "3.01")) {
4285
4286 dev_warn(&pdev->dev,
4287 "Your card is a Dell PERC "
4288 "2/SC RAID controller with "
4289 "firmware\nmegaraid: 3.00 or 3.01. "
4290 "This driver is known to have "
4291 "corruption issues\nmegaraid: with "
4292 "those firmware versions on this "
4293 "specific card. In order\nmegaraid: "
4294 "to protect your data, please upgrade "
4295 "your firmware to version\nmegaraid: "
4296 "3.10 or later, available from the "
4297 "Dell Technical Support web\n"
4298 "megaraid: site at\nhttp://support."
4299 "dell.com/us/en/filelib/download/"
4300 "index.asp?fileid=2940\n"
4301 );
4302 }
4303 }
4304
4305 /*
4306 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
4307 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
4308 * support, since this firmware cannot handle 64 bit
4309 * addressing
4310 */
4311 if ((subsysvid == PCI_VENDOR_ID_HP) &&
4312 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
4313 /*
4314 * which firmware
4315 */
4316 if (!strcmp(adapter->fw_version, "H01.07") ||
4317 !strcmp(adapter->fw_version, "H01.08") ||
4318 !strcmp(adapter->fw_version, "H01.09") ) {
4319 dev_warn(&pdev->dev,
4320 "Firmware H.01.07, "
4321 "H.01.08, and H.01.09 on 1M/2M "
4322 "controllers\n"
4323 "do not support 64 bit "
4324 "addressing.\nDISABLING "
4325 "64 bit support.\n");
4326 adapter->flag &= ~BOARD_64BIT;
4327 }
4328 }
4329
4330 if (mega_is_bios_enabled(adapter))
4331 mega_hbas[hba_count].is_bios_enabled = 1;
4332 mega_hbas[hba_count].hostdata_addr = adapter;
4333
4334 /*
4335 * Find out which channel is raid and which is scsi. This is
4336 * for ROMB support.
4337 */
4338 mega_enum_raid_scsi(adapter);
4339
4340 /*
4341 * Find out if a logical drive is set as the boot drive. If
4342 * there is one, will make that as the first logical drive.
4343 * ROMB: Do we have to boot from a physical drive. Then all
4344 * the physical drives would appear before the logical disks.
4345 * Else, all the physical drives would be exported to the mid
4346 * layer after logical drives.
4347 */
4348 mega_get_boot_drv(adapter);
4349
4350 if (adapter->boot_pdrv_enabled) {
4351 j = adapter->product_info.nchannels;
4352 for( i = 0; i < j; i++ )
4353 adapter->logdrv_chan[i] = 0;
4354 for( i = j; i < NVIRT_CHAN + j; i++ )
4355 adapter->logdrv_chan[i] = 1;
4356 } else {
4357 for (i = 0; i < NVIRT_CHAN; i++)
4358 adapter->logdrv_chan[i] = 1;
4359 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
4360 adapter->logdrv_chan[i] = 0;
4361 adapter->mega_ch_class <<= NVIRT_CHAN;
4362 }
4363
4364 /*
4365 * Do we support random deletion and addition of logical
4366 * drives
4367 */
4368 adapter->read_ldidmap = 0; /* set it after first logdrv
4369 delete cmd */
4370 adapter->support_random_del = mega_support_random_del(adapter);
4371
4372 /* Initialize SCBs */
4373 if (mega_init_scb(adapter))
4374 goto out_free_mbox;
4375
4376 /*
4377 * Reset the pending commands counter
4378 */
4379 atomic_set(&adapter->pend_cmds, 0);
4380
4381 /*
4382 * Reset the adapter quiescent flag
4383 */
4384 atomic_set(&adapter->quiescent, 0);
4385
4386 hba_soft_state[hba_count] = adapter;
4387
4388 /*
4389 * Fill in the structure which needs to be passed back to the
4390 * application when it does an ioctl() for controller related
4391 * information.
4392 */
4393 i = hba_count;
4394
4395 mcontroller[i].base = mega_baseport;
4396 mcontroller[i].irq = irq;
4397 mcontroller[i].numldrv = adapter->numldrv;
4398 mcontroller[i].pcibus = pci_bus;
4399 mcontroller[i].pcidev = id->device;
4400 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
4401 mcontroller[i].pciid = -1;
4402 mcontroller[i].pcivendor = id->vendor;
4403 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
4404 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
4405
4406
4407 /* Set the Mode of addressing to 64 bit if we can */
4408 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
4409 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4410 adapter->has_64bit_addr = 1;
4411 } else {
4412 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4413 adapter->has_64bit_addr = 0;
4414 }
4415
4416 mutex_init(&adapter->int_mtx);
4417 init_completion(&adapter->int_waitq);
4418
4419 adapter->this_id = DEFAULT_INITIATOR_ID;
4420 adapter->host->this_id = DEFAULT_INITIATOR_ID;
4421
4422 #if MEGA_HAVE_CLUSTERING
4423 /*
4424 * Is cluster support enabled on this controller
4425 * Note: In a cluster the HBAs ( the initiators ) will have
4426 * different target IDs and we cannot assume it to be 7. Call
4427 * to mega_support_cluster() will get the target ids also if
4428 * the cluster support is available
4429 */
4430 adapter->has_cluster = mega_support_cluster(adapter);
4431 if (adapter->has_cluster) {
4432 dev_notice(&pdev->dev,
4433 "Cluster driver, initiator id:%d\n",
4434 adapter->this_id);
4435 }
4436 #endif
4437
4438 pci_set_drvdata(pdev, host);
4439
4440 mega_create_proc_entry(hba_count, mega_proc_dir_entry);
4441
4442 error = scsi_add_host(host, &pdev->dev);
4443 if (error)
4444 goto out_free_mbox;
4445
4446 scsi_scan_host(host);
4447 hba_count++;
4448 return 0;
4449
4450 out_free_mbox:
4451 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4452 adapter->una_mbox64, adapter->una_mbox64_dma);
4453 out_free_irq:
4454 free_irq(adapter->host->irq, adapter);
4455 out_free_scb_list:
4456 kfree(adapter->scb_list);
4457 out_free_cmd_buffer:
4458 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4459 adapter->mega_buffer, adapter->buf_dma_handle);
4460 out_host_put:
4461 scsi_host_put(host);
4462 out_iounmap:
4463 if (flag & BOARD_MEMMAP)
4464 iounmap((void *)mega_baseport);
4465 out_release_region:
4466 if (flag & BOARD_MEMMAP)
4467 release_mem_region(tbase, 128);
4468 else
4469 release_region(mega_baseport, 16);
4470 out_disable_device:
4471 pci_disable_device(pdev);
4472 out:
4473 return error;
4474 }
4475
4476 static void
__megaraid_shutdown(adapter_t * adapter)4477 __megaraid_shutdown(adapter_t *adapter)
4478 {
4479 u_char raw_mbox[sizeof(struct mbox_out)];
4480 mbox_t *mbox = (mbox_t *)raw_mbox;
4481 int i;
4482
4483 /* Flush adapter cache */
4484 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4485 raw_mbox[0] = FLUSH_ADAPTER;
4486
4487 free_irq(adapter->host->irq, adapter);
4488
4489 /* Issue a blocking (interrupts disabled) command to the card */
4490 issue_scb_block(adapter, raw_mbox);
4491
4492 /* Flush disks cache */
4493 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4494 raw_mbox[0] = FLUSH_SYSTEM;
4495
4496 /* Issue a blocking (interrupts disabled) command to the card */
4497 issue_scb_block(adapter, raw_mbox);
4498
4499 if (atomic_read(&adapter->pend_cmds) > 0)
4500 dev_warn(&adapter->dev->dev, "pending commands!!\n");
4501
4502 /*
4503 * Have a delibrate delay to make sure all the caches are
4504 * actually flushed.
4505 */
4506 for (i = 0; i <= 10; i++)
4507 mdelay(1000);
4508 }
4509
4510 static void
megaraid_remove_one(struct pci_dev * pdev)4511 megaraid_remove_one(struct pci_dev *pdev)
4512 {
4513 struct Scsi_Host *host = pci_get_drvdata(pdev);
4514 adapter_t *adapter = (adapter_t *)host->hostdata;
4515 char buf[12] = { 0 };
4516
4517 scsi_remove_host(host);
4518
4519 __megaraid_shutdown(adapter);
4520
4521 /* Free our resources */
4522 if (adapter->flag & BOARD_MEMMAP) {
4523 iounmap((void *)adapter->base);
4524 release_mem_region(adapter->host->base, 128);
4525 } else
4526 release_region(adapter->base, 16);
4527
4528 mega_free_sgl(adapter);
4529
4530 sprintf(buf, "hba%d", adapter->host->host_no);
4531 remove_proc_subtree(buf, mega_proc_dir_entry);
4532
4533 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4534 adapter->mega_buffer, adapter->buf_dma_handle);
4535 kfree(adapter->scb_list);
4536 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4537 adapter->una_mbox64, adapter->una_mbox64_dma);
4538
4539 scsi_host_put(host);
4540 pci_disable_device(pdev);
4541
4542 hba_count--;
4543 }
4544
4545 static void
megaraid_shutdown(struct pci_dev * pdev)4546 megaraid_shutdown(struct pci_dev *pdev)
4547 {
4548 struct Scsi_Host *host = pci_get_drvdata(pdev);
4549 adapter_t *adapter = (adapter_t *)host->hostdata;
4550
4551 __megaraid_shutdown(adapter);
4552 }
4553
4554 static struct pci_device_id megaraid_pci_tbl[] = {
4555 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
4556 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4557 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
4558 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4559 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
4560 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4561 {0,}
4562 };
4563 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
4564
4565 static struct pci_driver megaraid_pci_driver = {
4566 .name = "megaraid_legacy",
4567 .id_table = megaraid_pci_tbl,
4568 .probe = megaraid_probe_one,
4569 .remove = megaraid_remove_one,
4570 .shutdown = megaraid_shutdown,
4571 };
4572
megaraid_init(void)4573 static int __init megaraid_init(void)
4574 {
4575 int error;
4576
4577 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
4578 max_cmd_per_lun = MAX_CMD_PER_LUN;
4579 if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
4580 max_mbox_busy_wait = MBOX_BUSY_WAIT;
4581
4582 #ifdef CONFIG_PROC_FS
4583 mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
4584 if (!mega_proc_dir_entry) {
4585 printk(KERN_WARNING
4586 "megaraid: failed to create megaraid root\n");
4587 }
4588 #endif
4589 error = pci_register_driver(&megaraid_pci_driver);
4590 if (error) {
4591 #ifdef CONFIG_PROC_FS
4592 remove_proc_entry("megaraid", NULL);
4593 #endif
4594 return error;
4595 }
4596
4597 /*
4598 * Register the driver as a character device, for applications
4599 * to access it for ioctls.
4600 * First argument (major) to register_chrdev implies a dynamic
4601 * major number allocation.
4602 */
4603 major = register_chrdev(0, "megadev_legacy", &megadev_fops);
4604 if (major < 0) {
4605 printk(KERN_WARNING
4606 "megaraid: failed to register char device\n");
4607 }
4608
4609 return 0;
4610 }
4611
megaraid_exit(void)4612 static void __exit megaraid_exit(void)
4613 {
4614 /*
4615 * Unregister the character device interface to the driver.
4616 */
4617 unregister_chrdev(major, "megadev_legacy");
4618
4619 pci_unregister_driver(&megaraid_pci_driver);
4620
4621 #ifdef CONFIG_PROC_FS
4622 remove_proc_entry("megaraid", NULL);
4623 #endif
4624 }
4625
4626 module_init(megaraid_init);
4627 module_exit(megaraid_exit);
4628
4629 /* vi: set ts=8 sw=8 tw=78: */
4630