1 /*
2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
5 */
6 /*
7 * Copyright (c) 1999,2000 Michael Smith
8 * Copyright (c) 2000 BSDi
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32 /*
33 * Copyright (c) 2002 Eric Moore
34 * Copyright (c) 2002 LSI Logic Corporation
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. The party using or redistributing the source code and binary forms
46 * agrees to the disclaimer below and the terms and conditions set forth
47 * herein.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #include <sys/int_types.h>
63 #include <sys/scsi/scsi.h>
64 #include <sys/dkbad.h>
65 #include <sys/dklabel.h>
66 #include <sys/dkio.h>
67 #include <sys/cdio.h>
68 #include <sys/mhd.h>
69 #include <sys/vtoc.h>
70 #include <sys/dktp/fdisk.h>
71 #include <sys/scsi/targets/sddef.h>
72 #include <sys/debug.h>
73 #include <sys/pci.h>
74 #include <sys/ksynch.h>
75 #include <sys/ddi.h>
76 #include <sys/sunddi.h>
77 #include <sys/modctl.h>
78 #include <sys/byteorder.h>
79
80 #include "amrreg.h"
81 #include "amrvar.h"
82
83 /* dynamic debug symbol */
84 int amr_debug_var = 0;
85
86 #define AMR_DELAY(cond, count, done_flag) { \
87 int local_counter = 0; \
88 done_flag = 1; \
89 while (!(cond)) { \
90 delay(drv_usectohz(100)); \
91 if ((local_counter) > count) { \
92 done_flag = 0; \
93 break; \
94 } \
95 (local_counter)++; \
96 } \
97 }
98
99 #define AMR_BUSYWAIT(cond, count, done_flag) { \
100 int local_counter = 0; \
101 done_flag = 1; \
102 while (!(cond)) { \
103 drv_usecwait(100); \
104 if ((local_counter) > count) { \
105 done_flag = 0; \
106 break; \
107 } \
108 (local_counter)++; \
109 } \
110 }
111
112 /*
113 * driver interfaces
114 */
115
116 static uint_t amr_intr(caddr_t arg);
117 static void amr_done(struct amr_softs *softs);
118
119 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
120 void *arg, void **result);
121 static int amr_attach(dev_info_t *, ddi_attach_cmd_t);
122 static int amr_detach(dev_info_t *, ddi_detach_cmd_t);
123
124 static int amr_setup_mbox(struct amr_softs *softs);
125 static int amr_setup_sg(struct amr_softs *softs);
126
127 /*
128 * Command wrappers
129 */
130 static int amr_query_controller(struct amr_softs *softs);
131 static void *amr_enquiry(struct amr_softs *softs, size_t bufsize,
132 uint8_t cmd, uint8_t cmdsub, uint8_t cmdqual);
133 static int amr_flush(struct amr_softs *softs);
134
135 /*
136 * Command processing.
137 */
138 static void amr_rw_command(struct amr_softs *softs,
139 struct scsi_pkt *pkt, int lun);
140 static void amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp,
141 unsigned int capacity);
142 static void amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key);
143 static int amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size);
144 static void amr_enquiry_unmapcmd(struct amr_command *ac);
145 static int amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg);
146 static void amr_unmapcmd(struct amr_command *ac);
147
148 /*
149 * Status monitoring
150 */
151 static void amr_periodic(void *data);
152
153 /*
154 * Interface-specific shims
155 */
156 static int amr_poll_command(struct amr_command *ac);
157 static void amr_start_waiting_queue(void *softp);
158 static void amr_call_pkt_comp(struct amr_command *head);
159
160 /*
161 * SCSI interface
162 */
163 static int amr_setup_tran(dev_info_t *dip, struct amr_softs *softp);
164
165 /*
166 * Function prototypes
167 *
168 * SCSA functions exported by means of the transport table
169 */
170 static int amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 scsi_hba_tran_t *tran, struct scsi_device *sd);
172 static int amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
173 static int amr_tran_reset(struct scsi_address *ap, int level);
174 static int amr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
175 static int amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
176 int whom);
177 static struct scsi_pkt *amr_tran_init_pkt(struct scsi_address *ap,
178 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
179 int tgtlen, int flags, int (*callback)(), caddr_t arg);
180 static void amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
181 static void amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
182 static void amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
183
184 static ddi_dma_attr_t buffer_dma_attr = {
185 DMA_ATTR_V0, /* version of this structure */
186 0, /* lowest usable address */
187 0xffffffffull, /* highest usable address */
188 0x00ffffffull, /* maximum DMAable byte count */
189 4, /* alignment */
190 1, /* burst sizes */
191 1, /* minimum transfer */
192 0xffffffffull, /* maximum transfer */
193 0xffffffffull, /* maximum segment length */
194 AMR_NSEG, /* maximum number of segments */
195 AMR_BLKSIZE, /* granularity */
196 0, /* flags (reserved) */
197 };
198
199 static ddi_dma_attr_t addr_dma_attr = {
200 DMA_ATTR_V0, /* version of this structure */
201 0, /* lowest usable address */
202 0xffffffffull, /* highest usable address */
203 0x7fffffff, /* maximum DMAable byte count */
204 4, /* alignment */
205 1, /* burst sizes */
206 1, /* minimum transfer */
207 0xffffffffull, /* maximum transfer */
208 0xffffffffull, /* maximum segment length */
209 1, /* maximum number of segments */
210 1, /* granularity */
211 0, /* flags (reserved) */
212 };
213
214
215 static struct dev_ops amr_ops = {
216 DEVO_REV, /* devo_rev, */
217 0, /* refcnt */
218 amr_info, /* info */
219 nulldev, /* identify */
220 nulldev, /* probe */
221 amr_attach, /* attach */
222 amr_detach, /* detach */
223 nodev, /* reset */
224 NULL, /* driver operations */
225 (struct bus_ops *)0, /* bus operations */
226 0, /* power */
227 ddi_quiesce_not_supported, /* devo_quiesce */
228 };
229
230
231 extern struct mod_ops mod_driverops;
232 static struct modldrv modldrv = {
233 &mod_driverops, /* Type of module. driver here */
234 "AMR Driver", /* Name of the module. */
235 &amr_ops, /* Driver ops vector */
236 };
237
238 static struct modlinkage modlinkage = {
239 MODREV_1,
240 &modldrv,
241 NULL
242 };
243
244 /* DMA access attributes */
245 static ddi_device_acc_attr_t accattr = {
246 DDI_DEVICE_ATTR_V0,
247 DDI_NEVERSWAP_ACC,
248 DDI_STRICTORDER_ACC
249 };
250
251 static struct amr_softs *amr_softstatep;
252
253
254 int
_init(void)255 _init(void)
256 {
257 int error;
258
259 error = ddi_soft_state_init((void *)&amr_softstatep,
260 sizeof (struct amr_softs), 0);
261
262 if (error != 0)
263 goto error_out;
264
265 if ((error = scsi_hba_init(&modlinkage)) != 0) {
266 ddi_soft_state_fini((void*)&amr_softstatep);
267 goto error_out;
268 }
269
270 error = mod_install(&modlinkage);
271 if (error != 0) {
272 scsi_hba_fini(&modlinkage);
273 ddi_soft_state_fini((void*)&amr_softstatep);
274 goto error_out;
275 }
276
277 return (error);
278
279 error_out:
280 cmn_err(CE_NOTE, "_init failed");
281 return (error);
282 }
283
284 int
_info(struct modinfo * modinfop)285 _info(struct modinfo *modinfop)
286 {
287 return (mod_info(&modlinkage, modinfop));
288 }
289
290 int
_fini(void)291 _fini(void)
292 {
293 int error;
294
295 if ((error = mod_remove(&modlinkage)) != 0) {
296 return (error);
297 }
298
299 scsi_hba_fini(&modlinkage);
300
301 ddi_soft_state_fini((void*)&amr_softstatep);
302 return (error);
303 }
304
305
306 static int
amr_attach(dev_info_t * dev,ddi_attach_cmd_t cmd)307 amr_attach(dev_info_t *dev, ddi_attach_cmd_t cmd)
308 {
309 struct amr_softs *softs;
310 int error;
311 uint32_t command, i;
312 int instance;
313 caddr_t cfgaddr;
314
315 instance = ddi_get_instance(dev);
316
317 switch (cmd) {
318 case DDI_ATTACH:
319 break;
320
321 case DDI_RESUME:
322 return (DDI_FAILURE);
323
324 default:
325 return (DDI_FAILURE);
326 }
327
328 /*
329 * Initialize softs.
330 */
331 if (ddi_soft_state_zalloc(amr_softstatep, instance) != DDI_SUCCESS)
332 return (DDI_FAILURE);
333 softs = ddi_get_soft_state(amr_softstatep, instance);
334 softs->state |= AMR_STATE_SOFT_STATE_SETUP;
335
336 softs->dev_info_p = dev;
337
338 AMRDB_PRINT((CE_NOTE, "softs: %p; busy_slot addr: %p",
339 (void *)softs, (void *)&(softs->amr_busyslots)));
340
341 if (pci_config_setup(dev, &(softs->pciconfig_handle))
342 != DDI_SUCCESS) {
343 goto error_out;
344 }
345 softs->state |= AMR_STATE_PCI_CONFIG_SETUP;
346
347 error = ddi_regs_map_setup(dev, 1, &cfgaddr, 0, 0,
348 &accattr, &(softs->regsmap_handle));
349 if (error != DDI_SUCCESS) {
350 goto error_out;
351 }
352 softs->state |= AMR_STATE_PCI_MEM_MAPPED;
353
354 /*
355 * Determine board type.
356 */
357 command = pci_config_get16(softs->pciconfig_handle, PCI_CONF_COMM);
358
359 /*
360 * Make sure we are going to be able to talk to this board.
361 */
362 if ((command & PCI_COMM_MAE) == 0) {
363 AMRDB_PRINT((CE_NOTE, "memory window not available"));
364 goto error_out;
365 }
366
367 /* force the busmaster enable bit on */
368 if (!(command & PCI_COMM_ME)) {
369 command |= PCI_COMM_ME;
370 pci_config_put16(softs->pciconfig_handle,
371 PCI_CONF_COMM, command);
372 command = pci_config_get16(softs->pciconfig_handle,
373 PCI_CONF_COMM);
374 if (!(command & PCI_COMM_ME))
375 goto error_out;
376 }
377
378 /*
379 * Allocate and connect our interrupt.
380 */
381 if (ddi_intr_hilevel(dev, 0) != 0) {
382 AMRDB_PRINT((CE_NOTE,
383 "High level interrupt is not supported!"));
384 goto error_out;
385 }
386
387 if (ddi_get_iblock_cookie(dev, 0, &softs->iblock_cookiep)
388 != DDI_SUCCESS) {
389 goto error_out;
390 }
391
392 mutex_init(&softs->cmd_mutex, NULL, MUTEX_DRIVER,
393 softs->iblock_cookiep); /* should be used in interrupt */
394 mutex_init(&softs->queue_mutex, NULL, MUTEX_DRIVER,
395 softs->iblock_cookiep); /* should be used in interrupt */
396 mutex_init(&softs->periodic_mutex, NULL, MUTEX_DRIVER,
397 softs->iblock_cookiep); /* should be used in interrupt */
398 /* sychronize waits for the busy slots via this cv */
399 cv_init(&softs->cmd_cv, NULL, CV_DRIVER, NULL);
400 softs->state |= AMR_STATE_KMUTEX_INITED;
401
402 /*
403 * Do bus-independent initialisation, bring controller online.
404 */
405 if (amr_setup_mbox(softs) != DDI_SUCCESS)
406 goto error_out;
407 softs->state |= AMR_STATE_MAILBOX_SETUP;
408
409 if (amr_setup_sg(softs) != DDI_SUCCESS)
410 goto error_out;
411
412 softs->state |= AMR_STATE_SG_TABLES_SETUP;
413
414 if (amr_query_controller(softs) != DDI_SUCCESS)
415 goto error_out;
416
417 /*
418 * A taskq is created for dispatching the waiting queue processing
419 * thread. The threads number equals to the logic drive number and
420 * the thread number should be 1 if there is no logic driver is
421 * configured for this instance.
422 */
423 if ((softs->amr_taskq = ddi_taskq_create(dev, "amr_taskq",
424 MAX(softs->amr_nlogdrives, 1), TASKQ_DEFAULTPRI, 0)) == NULL) {
425 goto error_out;
426 }
427 softs->state |= AMR_STATE_TASKQ_SETUP;
428
429 if (ddi_add_intr(dev, 0, &softs->iblock_cookiep, NULL,
430 amr_intr, (caddr_t)softs) != DDI_SUCCESS) {
431 goto error_out;
432 }
433 softs->state |= AMR_STATE_INTR_SETUP;
434
435 /* set up the tran interface */
436 if (amr_setup_tran(softs->dev_info_p, softs) != DDI_SUCCESS) {
437 AMRDB_PRINT((CE_NOTE, "setup tran failed"));
438 goto error_out;
439 }
440 softs->state |= AMR_STATE_TRAN_SETUP;
441
442 /* schedule a thread for periodic check */
443 mutex_enter(&softs->periodic_mutex);
444 softs->timeout_t = timeout(amr_periodic, (void *)softs,
445 drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
446 softs->state |= AMR_STATE_TIMEOUT_ENABLED;
447 mutex_exit(&softs->periodic_mutex);
448
449 /* print firmware information in verbose mode */
450 cmn_err(CE_CONT, "?MegaRaid %s %s attached.",
451 softs->amr_product_info.pi_product_name,
452 softs->amr_product_info.pi_firmware_ver);
453
454 /* clear any interrupts */
455 AMR_QCLEAR_INTR(softs);
456 return (DDI_SUCCESS);
457
458 error_out:
459 if (softs->state & AMR_STATE_INTR_SETUP) {
460 ddi_remove_intr(dev, 0, softs->iblock_cookiep);
461 }
462 if (softs->state & AMR_STATE_TASKQ_SETUP) {
463 ddi_taskq_destroy(softs->amr_taskq);
464 }
465 if (softs->state & AMR_STATE_SG_TABLES_SETUP) {
466 for (i = 0; i < softs->sg_max_count; i++) {
467 (void) ddi_dma_unbind_handle(
468 softs->sg_items[i].sg_handle);
469 (void) ddi_dma_mem_free(
470 &((softs->sg_items[i]).sg_acc_handle));
471 (void) ddi_dma_free_handle(
472 &(softs->sg_items[i].sg_handle));
473 }
474 }
475 if (softs->state & AMR_STATE_MAILBOX_SETUP) {
476 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
477 (void) ddi_dma_mem_free(&softs->mbox_acc_handle);
478 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
479 }
480 if (softs->state & AMR_STATE_KMUTEX_INITED) {
481 mutex_destroy(&softs->queue_mutex);
482 mutex_destroy(&softs->cmd_mutex);
483 mutex_destroy(&softs->periodic_mutex);
484 cv_destroy(&softs->cmd_cv);
485 }
486 if (softs->state & AMR_STATE_PCI_MEM_MAPPED)
487 ddi_regs_map_free(&softs->regsmap_handle);
488 if (softs->state & AMR_STATE_PCI_CONFIG_SETUP)
489 pci_config_teardown(&softs->pciconfig_handle);
490 if (softs->state & AMR_STATE_SOFT_STATE_SETUP)
491 ddi_soft_state_free(amr_softstatep, instance);
492 return (DDI_FAILURE);
493 }
494
495 /*
496 * Bring the controller down to a dormant state and detach all child devices.
497 * This function is called during detach, system shutdown.
498 *
499 * Note that we can assume that the bufq on the controller is empty, as we won't
500 * allow shutdown if any device is open.
501 */
502 /*ARGSUSED*/
amr_detach(dev_info_t * dev,ddi_detach_cmd_t cmd)503 static int amr_detach(dev_info_t *dev, ddi_detach_cmd_t cmd)
504 {
505 struct amr_softs *softs;
506 int instance;
507 uint32_t i, done_flag;
508
509 instance = ddi_get_instance(dev);
510 softs = ddi_get_soft_state(amr_softstatep, instance);
511
512 /* flush the controllor */
513 if (amr_flush(softs) != 0) {
514 AMRDB_PRINT((CE_NOTE, "device shutdown failed"));
515 return (EIO);
516 }
517
518 /* release the amr timer */
519 mutex_enter(&softs->periodic_mutex);
520 softs->state &= ~AMR_STATE_TIMEOUT_ENABLED;
521 if (softs->timeout_t) {
522 (void) untimeout(softs->timeout_t);
523 softs->timeout_t = 0;
524 }
525 mutex_exit(&softs->periodic_mutex);
526
527 for (i = 0; i < softs->sg_max_count; i++) {
528 (void) ddi_dma_unbind_handle(
529 softs->sg_items[i].sg_handle);
530 (void) ddi_dma_mem_free(
531 &((softs->sg_items[i]).sg_acc_handle));
532 (void) ddi_dma_free_handle(
533 &(softs->sg_items[i].sg_handle));
534 }
535
536 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
537 (void) ddi_dma_mem_free(&softs->mbox_acc_handle);
538 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
539
540 /* disconnect the interrupt handler */
541 ddi_remove_intr(softs->dev_info_p, 0, softs->iblock_cookiep);
542
543 /* wait for the completion of current in-progress interruptes */
544 AMR_DELAY((softs->amr_interrupts_counter == 0), 1000, done_flag);
545 if (!done_flag) {
546 cmn_err(CE_WARN, "Suspicious interrupts in-progress.");
547 }
548
549 ddi_taskq_destroy(softs->amr_taskq);
550
551 (void) scsi_hba_detach(dev);
552 scsi_hba_tran_free(softs->hba_tran);
553 ddi_regs_map_free(&softs->regsmap_handle);
554 pci_config_teardown(&softs->pciconfig_handle);
555
556 mutex_destroy(&softs->queue_mutex);
557 mutex_destroy(&softs->cmd_mutex);
558 mutex_destroy(&softs->periodic_mutex);
559 cv_destroy(&softs->cmd_cv);
560
561 /* print firmware information in verbose mode */
562 cmn_err(CE_NOTE, "?MegaRaid %s %s detached.",
563 softs->amr_product_info.pi_product_name,
564 softs->amr_product_info.pi_firmware_ver);
565
566 ddi_soft_state_free(amr_softstatep, instance);
567
568 return (DDI_SUCCESS);
569 }
570
571
572 /*ARGSUSED*/
amr_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)573 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
574 void *arg, void **result)
575 {
576 struct amr_softs *softs;
577 int instance;
578
579 instance = ddi_get_instance(dip);
580
581 switch (infocmd) {
582 case DDI_INFO_DEVT2DEVINFO:
583 softs = ddi_get_soft_state(amr_softstatep, instance);
584 if (softs != NULL) {
585 *result = softs->dev_info_p;
586 return (DDI_SUCCESS);
587 } else {
588 *result = NULL;
589 return (DDI_FAILURE);
590 }
591 case DDI_INFO_DEVT2INSTANCE:
592 *(int *)result = instance;
593 break;
594 default:
595 break;
596 }
597 return (DDI_SUCCESS);
598 }
599
600 /*
601 * Take an interrupt, or be poked by other code to look for interrupt-worthy
602 * status.
603 */
604 static uint_t
amr_intr(caddr_t arg)605 amr_intr(caddr_t arg)
606 {
607 struct amr_softs *softs = (struct amr_softs *)arg;
608
609 softs->amr_interrupts_counter++;
610
611 if (AMR_QGET_ODB(softs) != AMR_QODB_READY) {
612 softs->amr_interrupts_counter--;
613 return (DDI_INTR_UNCLAIMED);
614 }
615
616 /* collect finished commands, queue anything waiting */
617 amr_done(softs);
618
619 softs->amr_interrupts_counter--;
620
621 return (DDI_INTR_CLAIMED);
622
623 }
624
625 /*
626 * Setup the amr mailbox
627 */
628 static int
amr_setup_mbox(struct amr_softs * softs)629 amr_setup_mbox(struct amr_softs *softs)
630 {
631 uint32_t move;
632 size_t mbox_len;
633
634 if (ddi_dma_alloc_handle(
635 softs->dev_info_p,
636 &addr_dma_attr,
637 DDI_DMA_SLEEP,
638 NULL,
639 &softs->mbox_dma_handle) != DDI_SUCCESS) {
640 AMRDB_PRINT((CE_NOTE, "Cannot alloc dma handle for mailbox"));
641 goto error_out;
642 }
643
644 if (ddi_dma_mem_alloc(
645 softs->mbox_dma_handle,
646 sizeof (struct amr_mailbox) + 16,
647 &accattr,
648 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
649 DDI_DMA_SLEEP,
650 NULL,
651 (caddr_t *)(&softs->mbox),
652 &mbox_len,
653 &softs->mbox_acc_handle) !=
654 DDI_SUCCESS) {
655
656 AMRDB_PRINT((CE_WARN, "Cannot alloc dma memory for mailbox"));
657 goto error_out;
658 }
659
660 if (ddi_dma_addr_bind_handle(
661 softs->mbox_dma_handle,
662 NULL,
663 (caddr_t)softs->mbox,
664 mbox_len,
665 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
666 DDI_DMA_SLEEP,
667 NULL,
668 &softs->mbox_dma_cookie,
669 &softs->mbox_dma_cookien) != DDI_DMA_MAPPED) {
670
671 AMRDB_PRINT((CE_NOTE, "Cannot bind dma memory for mailbox"));
672 goto error_out;
673 }
674
675 if (softs->mbox_dma_cookien != 1)
676 goto error_out;
677
678 /* The phy address of mailbox must be aligned on a 16-byte boundary */
679 move = 16 - (((uint32_t)softs->mbox_dma_cookie.dmac_address)&0xf);
680 softs->mbox_phyaddr =
681 (softs->mbox_dma_cookie.dmac_address + move);
682
683 softs->mailbox =
684 (struct amr_mailbox *)(((uintptr_t)softs->mbox) + move);
685
686 AMRDB_PRINT((CE_NOTE, "phraddy=%x, mailbox=%p, softs->mbox=%p, move=%x",
687 softs->mbox_phyaddr, (void *)softs->mailbox,
688 softs->mbox, move));
689
690 return (DDI_SUCCESS);
691
692 error_out:
693 if (softs->mbox_dma_cookien)
694 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
695 if (softs->mbox_acc_handle) {
696 (void) ddi_dma_mem_free(&(softs->mbox_acc_handle));
697 softs->mbox_acc_handle = NULL;
698 }
699 if (softs->mbox_dma_handle) {
700 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
701 softs->mbox_dma_handle = NULL;
702 }
703
704 return (DDI_FAILURE);
705 }
706
707 /*
708 * Perform a periodic check of the controller status
709 */
710 static void
amr_periodic(void * data)711 amr_periodic(void *data)
712 {
713 uint32_t i;
714 struct amr_softs *softs = (struct amr_softs *)data;
715 struct scsi_pkt *pkt;
716 register struct amr_command *ac;
717
718 for (i = 0; i < softs->sg_max_count; i++) {
719 if (softs->busycmd[i] == NULL)
720 continue;
721
722 mutex_enter(&softs->cmd_mutex);
723
724 if (softs->busycmd[i] == NULL) {
725 mutex_exit(&softs->cmd_mutex);
726 continue;
727 }
728
729 pkt = softs->busycmd[i]->pkt;
730
731 if ((pkt->pkt_time != 0) &&
732 (ddi_get_time() -
733 softs->busycmd[i]->ac_timestamp >
734 pkt->pkt_time)) {
735
736 cmn_err(CE_WARN,
737 "!timed out packet detected,\
738 sc = %p, pkt = %p, index = %d, ac = %p",
739 (void *)softs,
740 (void *)pkt,
741 i,
742 (void *)softs->busycmd[i]);
743
744 ac = softs->busycmd[i];
745 ac->ac_next = NULL;
746
747 /* pull command from the busy index */
748 softs->busycmd[i] = NULL;
749 if (softs->amr_busyslots > 0)
750 softs->amr_busyslots--;
751 if (softs->amr_busyslots == 0)
752 cv_broadcast(&softs->cmd_cv);
753
754 mutex_exit(&softs->cmd_mutex);
755
756 pkt = ac->pkt;
757 *pkt->pkt_scbp = 0;
758 pkt->pkt_statistics |= STAT_TIMEOUT;
759 pkt->pkt_reason = CMD_TIMEOUT;
760 if (!(pkt->pkt_flags & FLAG_NOINTR)) {
761 /* call pkt callback */
762 scsi_hba_pkt_comp(pkt);
763 }
764
765 } else {
766 mutex_exit(&softs->cmd_mutex);
767 }
768 }
769
770 /* restart the amr timer */
771 mutex_enter(&softs->periodic_mutex);
772 if (softs->state & AMR_STATE_TIMEOUT_ENABLED)
773 softs->timeout_t = timeout(amr_periodic, (void *)softs,
774 drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
775 mutex_exit(&softs->periodic_mutex);
776 }
777
778 /*
779 * Interrogate the controller for the operational parameters we require.
780 */
781 static int
amr_query_controller(struct amr_softs * softs)782 amr_query_controller(struct amr_softs *softs)
783 {
784 struct amr_enquiry3 *aex;
785 struct amr_prodinfo *ap;
786 struct amr_enquiry *ae;
787 uint32_t ldrv;
788 int instance;
789
790 /*
791 * If we haven't found the real limit yet, let us have a couple of
792 * commands in order to be able to probe.
793 */
794 if (softs->maxio == 0)
795 softs->maxio = 2;
796
797 instance = ddi_get_instance(softs->dev_info_p);
798
799 /*
800 * Try to issue an ENQUIRY3 command
801 */
802 if ((aex = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE, AMR_CMD_CONFIG,
803 AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
804
805 AMRDB_PRINT((CE_NOTE, "First enquiry"));
806
807 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
808 softs->logic_drive[ldrv].al_size =
809 aex->ae_drivesize[ldrv];
810 softs->logic_drive[ldrv].al_state =
811 aex->ae_drivestate[ldrv];
812 softs->logic_drive[ldrv].al_properties =
813 aex->ae_driveprop[ldrv];
814 AMRDB_PRINT((CE_NOTE,
815 " drive %d: size: %d state %x properties %x\n",
816 ldrv,
817 softs->logic_drive[ldrv].al_size,
818 softs->logic_drive[ldrv].al_state,
819 softs->logic_drive[ldrv].al_properties));
820
821 if (softs->logic_drive[ldrv].al_state ==
822 AMR_LDRV_OFFLINE)
823 cmn_err(CE_NOTE,
824 "!instance %d log-drive %d is offline",
825 instance, ldrv);
826 else
827 softs->amr_nlogdrives++;
828 }
829 kmem_free(aex, AMR_ENQ_BUFFER_SIZE);
830
831 if ((ap = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE,
832 AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
833 AMRDB_PRINT((CE_NOTE,
834 "Cannot obtain product data from controller"));
835 return (EIO);
836 }
837
838 softs->maxdrives = AMR_40LD_MAXDRIVES;
839 softs->maxchan = ap->ap_nschan;
840 softs->maxio = ap->ap_maxio;
841
842 bcopy(ap->ap_firmware, softs->amr_product_info.pi_firmware_ver,
843 AMR_FIRMWARE_VER_SIZE);
844 softs->amr_product_info.
845 pi_firmware_ver[AMR_FIRMWARE_VER_SIZE] = 0;
846
847 bcopy(ap->ap_product, softs->amr_product_info.pi_product_name,
848 AMR_PRODUCT_INFO_SIZE);
849 softs->amr_product_info.
850 pi_product_name[AMR_PRODUCT_INFO_SIZE] = 0;
851
852 kmem_free(ap, AMR_ENQ_BUFFER_SIZE);
853 AMRDB_PRINT((CE_NOTE, "maxio=%d", softs->maxio));
854 } else {
855
856 AMRDB_PRINT((CE_NOTE, "First enquiry failed, \
857 so try another way"));
858
859 /* failed, try the 8LD ENQUIRY commands */
860 if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
861 AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
862 == NULL) {
863
864 if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
865 AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
866 == NULL) {
867 AMRDB_PRINT((CE_NOTE,
868 "Cannot obtain configuration data"));
869 return (EIO);
870 }
871 ae->ae_signature = 0;
872 }
873
874 /*
875 * Fetch current state of logical drives.
876 */
877 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
878 softs->logic_drive[ldrv].al_size =
879 ae->ae_ldrv.al_size[ldrv];
880 softs->logic_drive[ldrv].al_state =
881 ae->ae_ldrv.al_state[ldrv];
882 softs->logic_drive[ldrv].al_properties =
883 ae->ae_ldrv.al_properties[ldrv];
884 AMRDB_PRINT((CE_NOTE,
885 " ********* drive %d: %d state %x properties %x",
886 ldrv,
887 softs->logic_drive[ldrv].al_size,
888 softs->logic_drive[ldrv].al_state,
889 softs->logic_drive[ldrv].al_properties));
890
891 if (softs->logic_drive[ldrv].al_state ==
892 AMR_LDRV_OFFLINE)
893 cmn_err(CE_NOTE,
894 "!instance %d log-drive %d is offline",
895 instance, ldrv);
896 else
897 softs->amr_nlogdrives++;
898 }
899
900 softs->maxdrives = AMR_8LD_MAXDRIVES;
901 softs->maxchan = ae->ae_adapter.aa_channels;
902 softs->maxio = ae->ae_adapter.aa_maxio;
903 kmem_free(ae, AMR_ENQ_BUFFER_SIZE);
904 }
905
906 /*
907 * Mark remaining drives as unused.
908 */
909 for (; ldrv < AMR_MAXLD; ldrv++)
910 softs->logic_drive[ldrv].al_state = AMR_LDRV_OFFLINE;
911
912 /*
913 * Cap the maximum number of outstanding I/Os. AMI's driver
914 * doesn't trust the controller's reported value, and lockups have
915 * been seen when we do.
916 */
917 softs->maxio = MIN(softs->maxio, AMR_LIMITCMD);
918
919 return (DDI_SUCCESS);
920 }
921
922 /*
923 * Run a generic enquiry-style command.
924 */
925 static void *
amr_enquiry(struct amr_softs * softs,size_t bufsize,uint8_t cmd,uint8_t cmdsub,uint8_t cmdqual)926 amr_enquiry(struct amr_softs *softs, size_t bufsize, uint8_t cmd,
927 uint8_t cmdsub, uint8_t cmdqual)
928 {
929 struct amr_command ac;
930 void *result;
931
932 result = NULL;
933
934 bzero(&ac, sizeof (struct amr_command));
935 ac.ac_softs = softs;
936
937 /* set command flags */
938 ac.ac_flags |= AMR_CMD_DATAOUT;
939
940 /* build the command proper */
941 ac.mailbox.mb_command = cmd;
942 ac.mailbox.mb_cmdsub = cmdsub;
943 ac.mailbox.mb_cmdqual = cmdqual;
944
945 if (amr_enquiry_mapcmd(&ac, bufsize) != DDI_SUCCESS)
946 return (NULL);
947
948 if (amr_poll_command(&ac) || ac.ac_status != 0) {
949 AMRDB_PRINT((CE_NOTE, "can not poll command, goto out"));
950 amr_enquiry_unmapcmd(&ac);
951 return (NULL);
952 }
953
954 /* allocate the response structure */
955 result = kmem_zalloc(bufsize, KM_SLEEP);
956
957 bcopy(ac.ac_data, result, bufsize);
958
959 amr_enquiry_unmapcmd(&ac);
960 return (result);
961 }
962
963 /*
964 * Flush the controller's internal cache, return status.
965 */
966 static int
amr_flush(struct amr_softs * softs)967 amr_flush(struct amr_softs *softs)
968 {
969 struct amr_command ac;
970 int error = 0;
971
972 bzero(&ac, sizeof (struct amr_command));
973 ac.ac_softs = softs;
974
975 ac.ac_flags |= AMR_CMD_DATAOUT;
976
977 /* build the command proper */
978 ac.mailbox.mb_command = AMR_CMD_FLUSH;
979
980 /* have to poll, as the system may be going down or otherwise damaged */
981 if (error = amr_poll_command(&ac)) {
982 AMRDB_PRINT((CE_NOTE, "can not poll this cmd"));
983 return (error);
984 }
985
986 return (error);
987 }
988
989 /*
990 * Take a command, submit it to the controller and wait for it to return.
991 * Returns nonzero on error. Can be safely called with interrupts enabled.
992 */
993 static int
amr_poll_command(struct amr_command * ac)994 amr_poll_command(struct amr_command *ac)
995 {
996 struct amr_softs *softs = ac->ac_softs;
997 volatile uint32_t done_flag;
998
999 AMRDB_PRINT((CE_NOTE, "Amr_Poll bcopy(%p, %p, %d)",
1000 (void *)&ac->mailbox,
1001 (void *)softs->mailbox,
1002 (uint32_t)AMR_MBOX_CMDSIZE));
1003
1004 mutex_enter(&softs->cmd_mutex);
1005
1006 while (softs->amr_busyslots != 0)
1007 cv_wait(&softs->cmd_cv, &softs->cmd_mutex);
1008
1009 /*
1010 * For read/write commands, the scatter/gather table should be
1011 * filled, and the last entry in scatter/gather table will be used.
1012 */
1013 if ((ac->mailbox.mb_command == AMR_CMD_LREAD) ||
1014 (ac->mailbox.mb_command == AMR_CMD_LWRITE)) {
1015 bcopy(ac->sgtable,
1016 softs->sg_items[softs->sg_max_count - 1].sg_table,
1017 sizeof (struct amr_sgentry) * AMR_NSEG);
1018
1019 (void) ddi_dma_sync(
1020 softs->sg_items[softs->sg_max_count - 1].sg_handle,
1021 0, 0, DDI_DMA_SYNC_FORDEV);
1022
1023 ac->mailbox.mb_physaddr =
1024 softs->sg_items[softs->sg_max_count - 1].sg_phyaddr;
1025 }
1026
1027 bcopy(&ac->mailbox, (void *)softs->mailbox, AMR_MBOX_CMDSIZE);
1028
1029 /* sync the dma memory */
1030 (void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
1031
1032 /* clear the poll/ack fields in the mailbox */
1033 softs->mailbox->mb_ident = AMR_POLL_COMMAND_ID;
1034 softs->mailbox->mb_nstatus = AMR_POLL_DEFAULT_NSTATUS;
1035 softs->mailbox->mb_status = AMR_POLL_DEFAULT_STATUS;
1036 softs->mailbox->mb_poll = 0;
1037 softs->mailbox->mb_ack = 0;
1038 softs->mailbox->mb_busy = 1;
1039
1040 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
1041
1042 /* sync the dma memory */
1043 (void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1044
1045 AMR_DELAY((softs->mailbox->mb_nstatus != AMR_POLL_DEFAULT_NSTATUS),
1046 1000, done_flag);
1047 if (!done_flag) {
1048 mutex_exit(&softs->cmd_mutex);
1049 return (1);
1050 }
1051
1052 ac->ac_status = softs->mailbox->mb_status;
1053
1054 AMR_DELAY((softs->mailbox->mb_poll == AMR_POLL_ACK), 1000, done_flag);
1055 if (!done_flag) {
1056 mutex_exit(&softs->cmd_mutex);
1057 return (1);
1058 }
1059
1060 softs->mailbox->mb_poll = 0;
1061 softs->mailbox->mb_ack = AMR_POLL_ACK;
1062
1063 /* acknowledge that we have the commands */
1064 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1065
1066 AMR_DELAY(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK), 1000, done_flag);
1067 if (!done_flag) {
1068 mutex_exit(&softs->cmd_mutex);
1069 return (1);
1070 }
1071
1072 mutex_exit(&softs->cmd_mutex);
1073 return (ac->ac_status != AMR_STATUS_SUCCESS);
1074 }
1075
1076 /*
1077 * setup the scatter/gather table
1078 */
1079 static int
amr_setup_sg(struct amr_softs * softs)1080 amr_setup_sg(struct amr_softs *softs)
1081 {
1082 uint32_t i;
1083 size_t len;
1084 ddi_dma_cookie_t cookie;
1085 uint_t cookien;
1086
1087 softs->sg_max_count = 0;
1088
1089 for (i = 0; i < AMR_MAXCMD; i++) {
1090
1091 /* reset the cookien */
1092 cookien = 0;
1093
1094 (softs->sg_items[i]).sg_handle = NULL;
1095 if (ddi_dma_alloc_handle(
1096 softs->dev_info_p,
1097 &addr_dma_attr,
1098 DDI_DMA_SLEEP,
1099 NULL,
1100 &((softs->sg_items[i]).sg_handle)) != DDI_SUCCESS) {
1101
1102 AMRDB_PRINT((CE_WARN,
1103 "Cannot alloc dma handle for s/g table"));
1104 goto error_out;
1105 }
1106
1107 if (ddi_dma_mem_alloc((softs->sg_items[i]).sg_handle,
1108 sizeof (struct amr_sgentry) * AMR_NSEG,
1109 &accattr,
1110 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1111 DDI_DMA_SLEEP, NULL,
1112 (caddr_t *)(&(softs->sg_items[i]).sg_table),
1113 &len,
1114 &(softs->sg_items[i]).sg_acc_handle)
1115 != DDI_SUCCESS) {
1116
1117 AMRDB_PRINT((CE_WARN,
1118 "Cannot allocate DMA memory"));
1119 goto error_out;
1120 }
1121
1122 if (ddi_dma_addr_bind_handle(
1123 (softs->sg_items[i]).sg_handle,
1124 NULL,
1125 (caddr_t)((softs->sg_items[i]).sg_table),
1126 len,
1127 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1128 DDI_DMA_SLEEP,
1129 NULL,
1130 &cookie,
1131 &cookien) != DDI_DMA_MAPPED) {
1132
1133 AMRDB_PRINT((CE_WARN,
1134 "Cannot bind communication area for s/g table"));
1135 goto error_out;
1136 }
1137
1138 if (cookien != 1)
1139 goto error_out;
1140
1141 softs->sg_items[i].sg_phyaddr = cookie.dmac_address;
1142 softs->sg_max_count++;
1143 }
1144
1145 return (DDI_SUCCESS);
1146
1147 error_out:
1148 /*
1149 * Couldn't allocate/initialize all of the sg table entries.
1150 * Clean up the partially-initialized entry before returning.
1151 */
1152 if (cookien) {
1153 (void) ddi_dma_unbind_handle((softs->sg_items[i]).sg_handle);
1154 }
1155 if ((softs->sg_items[i]).sg_acc_handle) {
1156 (void) ddi_dma_mem_free(&((softs->sg_items[i]).sg_acc_handle));
1157 (softs->sg_items[i]).sg_acc_handle = NULL;
1158 }
1159 if ((softs->sg_items[i]).sg_handle) {
1160 (void) ddi_dma_free_handle(&((softs->sg_items[i]).sg_handle));
1161 (softs->sg_items[i]).sg_handle = NULL;
1162 }
1163
1164 /*
1165 * At least two sg table entries are needed. One is for regular data
1166 * I/O commands, the other is for poll I/O commands.
1167 */
1168 return (softs->sg_max_count > 1 ? DDI_SUCCESS : DDI_FAILURE);
1169 }
1170
1171 /*
1172 * Map/unmap (ac)'s data in the controller's addressable space as required.
1173 *
1174 * These functions may be safely called multiple times on a given command.
1175 */
1176 static void
amr_setup_dmamap(struct amr_command * ac,ddi_dma_cookie_t * buffer_dma_cookiep,int nsegments)1177 amr_setup_dmamap(struct amr_command *ac, ddi_dma_cookie_t *buffer_dma_cookiep,
1178 int nsegments)
1179 {
1180 struct amr_sgentry *sg;
1181 uint32_t i, size;
1182
1183 sg = ac->sgtable;
1184
1185 size = 0;
1186
1187 ac->mailbox.mb_nsgelem = (uint8_t)nsegments;
1188 for (i = 0; i < nsegments; i++, sg++) {
1189 sg->sg_addr = buffer_dma_cookiep->dmac_address;
1190 sg->sg_count = buffer_dma_cookiep->dmac_size;
1191 size += sg->sg_count;
1192
1193 /*
1194 * There is no next cookie if the end of the current
1195 * window is reached. Otherwise, the next cookie
1196 * would be found.
1197 */
1198 if ((ac->current_cookie + i + 1) != ac->num_of_cookie)
1199 ddi_dma_nextcookie(ac->buffer_dma_handle,
1200 buffer_dma_cookiep);
1201 }
1202
1203 ac->transfer_size = size;
1204 ac->data_transfered += size;
1205 }
1206
1207
1208 /*
1209 * map the amr command for enquiry, allocate the DMA resource
1210 */
1211 static int
amr_enquiry_mapcmd(struct amr_command * ac,uint32_t data_size)1212 amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size)
1213 {
1214 struct amr_softs *softs = ac->ac_softs;
1215 size_t len;
1216 uint_t dma_flags;
1217
1218 AMRDB_PRINT((CE_NOTE, "Amr_enquiry_mapcmd called, ac=%p, flags=%x",
1219 (void *)ac, ac->ac_flags));
1220
1221 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1222 dma_flags = DDI_DMA_READ;
1223 } else {
1224 dma_flags = DDI_DMA_WRITE;
1225 }
1226
1227 dma_flags |= DDI_DMA_CONSISTENT;
1228
1229 /* process the DMA by address bind mode */
1230 if (ddi_dma_alloc_handle(softs->dev_info_p,
1231 &addr_dma_attr, DDI_DMA_SLEEP, NULL,
1232 &ac->buffer_dma_handle) !=
1233 DDI_SUCCESS) {
1234
1235 AMRDB_PRINT((CE_WARN,
1236 "Cannot allocate addr DMA tag"));
1237 goto error_out;
1238 }
1239
1240 if (ddi_dma_mem_alloc(ac->buffer_dma_handle,
1241 data_size,
1242 &accattr,
1243 dma_flags,
1244 DDI_DMA_SLEEP,
1245 NULL,
1246 (caddr_t *)&ac->ac_data,
1247 &len,
1248 &ac->buffer_acc_handle) !=
1249 DDI_SUCCESS) {
1250
1251 AMRDB_PRINT((CE_WARN,
1252 "Cannot allocate DMA memory"));
1253 goto error_out;
1254 }
1255
1256 if ((ddi_dma_addr_bind_handle(
1257 ac->buffer_dma_handle,
1258 NULL, ac->ac_data, len, dma_flags,
1259 DDI_DMA_SLEEP, NULL, &ac->buffer_dma_cookie,
1260 &ac->num_of_cookie)) != DDI_DMA_MAPPED) {
1261
1262 AMRDB_PRINT((CE_WARN,
1263 "Cannot bind addr for dma"));
1264 goto error_out;
1265 }
1266
1267 ac->ac_dataphys = (&ac->buffer_dma_cookie)->dmac_address;
1268
1269 ((struct amr_mailbox *)&(ac->mailbox))->mb_param = 0;
1270 ac->mailbox.mb_nsgelem = 0;
1271 ac->mailbox.mb_physaddr = ac->ac_dataphys;
1272
1273 ac->ac_flags |= AMR_CMD_MAPPED;
1274
1275 return (DDI_SUCCESS);
1276
1277 error_out:
1278 if (ac->num_of_cookie)
1279 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1280 if (ac->buffer_acc_handle) {
1281 ddi_dma_mem_free(&ac->buffer_acc_handle);
1282 ac->buffer_acc_handle = NULL;
1283 }
1284 if (ac->buffer_dma_handle) {
1285 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1286 ac->buffer_dma_handle = NULL;
1287 }
1288
1289 return (DDI_FAILURE);
1290 }
1291
1292 /*
1293 * unmap the amr command for enquiry, free the DMA resource
1294 */
1295 static void
amr_enquiry_unmapcmd(struct amr_command * ac)1296 amr_enquiry_unmapcmd(struct amr_command *ac)
1297 {
1298 AMRDB_PRINT((CE_NOTE, "Amr_enquiry_unmapcmd called, ac=%p",
1299 (void *)ac));
1300
1301 /* if the command involved data at all and was mapped */
1302 if ((ac->ac_flags & AMR_CMD_MAPPED) && ac->ac_data) {
1303 if (ac->buffer_dma_handle)
1304 (void) ddi_dma_unbind_handle(
1305 ac->buffer_dma_handle);
1306 if (ac->buffer_acc_handle) {
1307 ddi_dma_mem_free(&ac->buffer_acc_handle);
1308 ac->buffer_acc_handle = NULL;
1309 }
1310 if (ac->buffer_dma_handle) {
1311 (void) ddi_dma_free_handle(
1312 &ac->buffer_dma_handle);
1313 ac->buffer_dma_handle = NULL;
1314 }
1315 }
1316
1317 ac->ac_flags &= ~AMR_CMD_MAPPED;
1318 }
1319
1320 /*
1321 * map the amr command, allocate the DMA resource
1322 */
1323 static int
amr_mapcmd(struct amr_command * ac,int (* callback)(),caddr_t arg)1324 amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg)
1325 {
1326 uint_t dma_flags;
1327 off_t off;
1328 size_t len;
1329 int error;
1330 int (*cb)(caddr_t);
1331
1332 AMRDB_PRINT((CE_NOTE, "Amr_mapcmd called, ac=%p, flags=%x",
1333 (void *)ac, ac->ac_flags));
1334
1335 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1336 dma_flags = DDI_DMA_READ;
1337 } else {
1338 dma_flags = DDI_DMA_WRITE;
1339 }
1340
1341 if (ac->ac_flags & AMR_CMD_PKT_CONSISTENT) {
1342 dma_flags |= DDI_DMA_CONSISTENT;
1343 }
1344 if (ac->ac_flags & AMR_CMD_PKT_DMA_PARTIAL) {
1345 dma_flags |= DDI_DMA_PARTIAL;
1346 }
1347
1348 if ((!(ac->ac_flags & AMR_CMD_MAPPED)) && (ac->ac_buf == NULL)) {
1349 ac->ac_flags |= AMR_CMD_MAPPED;
1350 return (DDI_SUCCESS);
1351 }
1352
1353 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1354
1355 /* if the command involves data at all, and hasn't been mapped */
1356 if (!(ac->ac_flags & AMR_CMD_MAPPED)) {
1357 /* process the DMA by buffer bind mode */
1358 error = ddi_dma_buf_bind_handle(ac->buffer_dma_handle,
1359 ac->ac_buf,
1360 dma_flags,
1361 cb,
1362 arg,
1363 &ac->buffer_dma_cookie,
1364 &ac->num_of_cookie);
1365 switch (error) {
1366 case DDI_DMA_PARTIAL_MAP:
1367 if (ddi_dma_numwin(ac->buffer_dma_handle,
1368 &ac->num_of_win) == DDI_FAILURE) {
1369
1370 AMRDB_PRINT((CE_WARN,
1371 "Cannot get dma num win"));
1372 (void) ddi_dma_unbind_handle(
1373 ac->buffer_dma_handle);
1374 (void) ddi_dma_free_handle(
1375 &ac->buffer_dma_handle);
1376 ac->buffer_dma_handle = NULL;
1377 return (DDI_FAILURE);
1378 }
1379 ac->current_win = 0;
1380 break;
1381
1382 case DDI_DMA_MAPPED:
1383 ac->num_of_win = 1;
1384 ac->current_win = 0;
1385 break;
1386
1387 default:
1388 AMRDB_PRINT((CE_WARN,
1389 "Cannot bind buf for dma"));
1390
1391 (void) ddi_dma_free_handle(
1392 &ac->buffer_dma_handle);
1393 ac->buffer_dma_handle = NULL;
1394 return (DDI_FAILURE);
1395 }
1396
1397 ac->current_cookie = 0;
1398
1399 ac->ac_flags |= AMR_CMD_MAPPED;
1400 } else if (ac->current_cookie == AMR_LAST_COOKIE_TAG) {
1401 /* get the next window */
1402 ac->current_win++;
1403 (void) ddi_dma_getwin(ac->buffer_dma_handle,
1404 ac->current_win, &off, &len,
1405 &ac->buffer_dma_cookie,
1406 &ac->num_of_cookie);
1407 ac->current_cookie = 0;
1408 }
1409
1410 if ((ac->num_of_cookie - ac->current_cookie) > AMR_NSEG) {
1411 amr_setup_dmamap(ac, &ac->buffer_dma_cookie, AMR_NSEG);
1412 ac->current_cookie += AMR_NSEG;
1413 } else {
1414 amr_setup_dmamap(ac, &ac->buffer_dma_cookie,
1415 ac->num_of_cookie - ac->current_cookie);
1416 ac->current_cookie = AMR_LAST_COOKIE_TAG;
1417 }
1418
1419 return (DDI_SUCCESS);
1420 }
1421
1422 /*
1423 * unmap the amr command, free the DMA resource
1424 */
1425 static void
amr_unmapcmd(struct amr_command * ac)1426 amr_unmapcmd(struct amr_command *ac)
1427 {
1428 AMRDB_PRINT((CE_NOTE, "Amr_unmapcmd called, ac=%p",
1429 (void *)ac));
1430
1431 /* if the command involved data at all and was mapped */
1432 if ((ac->ac_flags & AMR_CMD_MAPPED) &&
1433 ac->ac_buf && ac->buffer_dma_handle)
1434 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1435
1436 ac->ac_flags &= ~AMR_CMD_MAPPED;
1437 }
1438
1439 static int
amr_setup_tran(dev_info_t * dip,struct amr_softs * softp)1440 amr_setup_tran(dev_info_t *dip, struct amr_softs *softp)
1441 {
1442 softp->hba_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1443
1444 /*
1445 * hba_private always points to the amr_softs struct
1446 */
1447 softp->hba_tran->tran_hba_private = softp;
1448 softp->hba_tran->tran_tgt_init = amr_tran_tgt_init;
1449 softp->hba_tran->tran_tgt_probe = scsi_hba_probe;
1450 softp->hba_tran->tran_start = amr_tran_start;
1451 softp->hba_tran->tran_reset = amr_tran_reset;
1452 softp->hba_tran->tran_getcap = amr_tran_getcap;
1453 softp->hba_tran->tran_setcap = amr_tran_setcap;
1454 softp->hba_tran->tran_init_pkt = amr_tran_init_pkt;
1455 softp->hba_tran->tran_destroy_pkt = amr_tran_destroy_pkt;
1456 softp->hba_tran->tran_dmafree = amr_tran_dmafree;
1457 softp->hba_tran->tran_sync_pkt = amr_tran_sync_pkt;
1458 softp->hba_tran->tran_abort = NULL;
1459 softp->hba_tran->tran_tgt_free = NULL;
1460 softp->hba_tran->tran_quiesce = NULL;
1461 softp->hba_tran->tran_unquiesce = NULL;
1462 softp->hba_tran->tran_sd = NULL;
1463
1464 if (scsi_hba_attach_setup(dip, &buffer_dma_attr, softp->hba_tran,
1465 SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
1466 scsi_hba_tran_free(softp->hba_tran);
1467 softp->hba_tran = NULL;
1468 return (DDI_FAILURE);
1469 } else {
1470 return (DDI_SUCCESS);
1471 }
1472 }
1473
1474 /*ARGSUSED*/
1475 static int
amr_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * tran,struct scsi_device * sd)1476 amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1477 scsi_hba_tran_t *tran, struct scsi_device *sd)
1478 {
1479 struct amr_softs *softs;
1480 ushort_t target = sd->sd_address.a_target;
1481 uchar_t lun = sd->sd_address.a_lun;
1482
1483 softs = (struct amr_softs *)
1484 (sd->sd_address.a_hba_tran->tran_hba_private);
1485
1486 if ((lun == 0) && (target < AMR_MAXLD))
1487 if (softs->logic_drive[target].al_state != AMR_LDRV_OFFLINE)
1488 return (DDI_SUCCESS);
1489
1490 return (DDI_FAILURE);
1491 }
1492
1493 static int
amr_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)1494 amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1495 {
1496 struct amr_softs *softs;
1497 struct buf *bp = NULL;
1498 union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1499 int ret;
1500 uint32_t capacity;
1501 struct amr_command *ac;
1502
1503 AMRDB_PRINT((CE_NOTE, "amr_tran_start, cmd=%X,target=%d,lun=%d",
1504 cdbp->scc_cmd, ap->a_target, ap->a_lun));
1505
1506 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1507 if ((ap->a_lun != 0) || (ap->a_target >= AMR_MAXLD) ||
1508 (softs->logic_drive[ap->a_target].al_state ==
1509 AMR_LDRV_OFFLINE)) {
1510 cmn_err(CE_WARN, "target or lun is not correct!");
1511 ret = TRAN_BADPKT;
1512 return (ret);
1513 }
1514
1515 ac = (struct amr_command *)pkt->pkt_ha_private;
1516 bp = ac->ac_buf;
1517
1518 AMRDB_PRINT((CE_NOTE, "scsi cmd accepted, cmd=%X", cdbp->scc_cmd));
1519
1520 switch (cdbp->scc_cmd) {
1521 case SCMD_READ: /* read */
1522 case SCMD_READ_G1: /* read g1 */
1523 case SCMD_READ_BUFFER: /* read buffer */
1524 case SCMD_WRITE: /* write */
1525 case SCMD_WRITE_G1: /* write g1 */
1526 case SCMD_WRITE_BUFFER: /* write buffer */
1527 amr_rw_command(softs, pkt, ap->a_target);
1528
1529 if (pkt->pkt_flags & FLAG_NOINTR) {
1530 (void) amr_poll_command(ac);
1531 pkt->pkt_state |= (STATE_GOT_BUS
1532 | STATE_GOT_TARGET
1533 | STATE_SENT_CMD
1534 | STATE_XFERRED_DATA);
1535 *pkt->pkt_scbp = 0;
1536 pkt->pkt_statistics |= STAT_SYNC;
1537 pkt->pkt_reason = CMD_CMPLT;
1538 } else {
1539 mutex_enter(&softs->queue_mutex);
1540 if (softs->waiting_q_head == NULL) {
1541 ac->ac_prev = NULL;
1542 ac->ac_next = NULL;
1543 softs->waiting_q_head = ac;
1544 softs->waiting_q_tail = ac;
1545 } else {
1546 ac->ac_next = NULL;
1547 ac->ac_prev = softs->waiting_q_tail;
1548 softs->waiting_q_tail->ac_next = ac;
1549 softs->waiting_q_tail = ac;
1550 }
1551 mutex_exit(&softs->queue_mutex);
1552 amr_start_waiting_queue((void *)softs);
1553 }
1554 ret = TRAN_ACCEPT;
1555 break;
1556
1557 case SCMD_INQUIRY: /* inquiry */
1558 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1559 struct scsi_inquiry inqp;
1560 uint8_t *sinq_p = (uint8_t *)&inqp;
1561
1562 bzero(&inqp, sizeof (struct scsi_inquiry));
1563
1564 if (((char *)cdbp)[1] || ((char *)cdbp)[2]) {
1565 /*
1566 * The EVDP and pagecode is
1567 * not supported
1568 */
1569 sinq_p[1] = 0xFF;
1570 sinq_p[2] = 0x0;
1571 } else {
1572 inqp.inq_len = AMR_INQ_ADDITIONAL_LEN;
1573 inqp.inq_ansi = AMR_INQ_ANSI_VER;
1574 inqp.inq_rdf = AMR_INQ_RESP_DATA_FORMAT;
1575 /* Enable Tag Queue */
1576 inqp.inq_cmdque = 1;
1577 bcopy("MegaRaid", inqp.inq_vid,
1578 sizeof (inqp.inq_vid));
1579 bcopy(softs->amr_product_info.pi_product_name,
1580 inqp.inq_pid,
1581 AMR_PRODUCT_INFO_SIZE);
1582 bcopy(softs->amr_product_info.pi_firmware_ver,
1583 inqp.inq_revision,
1584 AMR_FIRMWARE_VER_SIZE);
1585 }
1586
1587 amr_unmapcmd(ac);
1588
1589 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1590 bp_mapin(bp);
1591 bcopy(&inqp, bp->b_un.b_addr,
1592 sizeof (struct scsi_inquiry));
1593
1594 pkt->pkt_state |= STATE_XFERRED_DATA;
1595 }
1596 pkt->pkt_reason = CMD_CMPLT;
1597 pkt->pkt_state |= (STATE_GOT_BUS
1598 | STATE_GOT_TARGET
1599 | STATE_SENT_CMD);
1600 *pkt->pkt_scbp = 0;
1601 ret = TRAN_ACCEPT;
1602 if (!(pkt->pkt_flags & FLAG_NOINTR))
1603 scsi_hba_pkt_comp(pkt);
1604 break;
1605
1606 case SCMD_READ_CAPACITY: /* read capacity */
1607 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1608 struct scsi_capacity cp;
1609
1610 capacity = softs->logic_drive[ap->a_target].al_size - 1;
1611 cp.capacity = BE_32(capacity);
1612 cp.lbasize = BE_32(512);
1613
1614 amr_unmapcmd(ac);
1615
1616 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1617 bp_mapin(bp);
1618 bcopy(&cp, bp->b_un.b_addr, 8);
1619 }
1620 pkt->pkt_reason = CMD_CMPLT;
1621 pkt->pkt_state |= (STATE_GOT_BUS
1622 | STATE_GOT_TARGET
1623 | STATE_SENT_CMD
1624 | STATE_XFERRED_DATA);
1625 *pkt->pkt_scbp = 0;
1626 ret = TRAN_ACCEPT;
1627 if (!(pkt->pkt_flags & FLAG_NOINTR))
1628 scsi_hba_pkt_comp(pkt);
1629 break;
1630
1631 case SCMD_MODE_SENSE: /* mode sense */
1632 case SCMD_MODE_SENSE_G1: /* mode sense g1 */
1633 amr_unmapcmd(ac);
1634
1635 capacity = softs->logic_drive[ap->a_target].al_size - 1;
1636 amr_mode_sense(cdbp, bp, capacity);
1637
1638 pkt->pkt_reason = CMD_CMPLT;
1639 pkt->pkt_state |= (STATE_GOT_BUS
1640 | STATE_GOT_TARGET
1641 | STATE_SENT_CMD
1642 | STATE_XFERRED_DATA);
1643 *pkt->pkt_scbp = 0;
1644 ret = TRAN_ACCEPT;
1645 if (!(pkt->pkt_flags & FLAG_NOINTR))
1646 scsi_hba_pkt_comp(pkt);
1647 break;
1648
1649 case SCMD_TEST_UNIT_READY: /* test unit ready */
1650 case SCMD_REQUEST_SENSE: /* request sense */
1651 case SCMD_FORMAT: /* format */
1652 case SCMD_START_STOP: /* start stop */
1653 case SCMD_SYNCHRONIZE_CACHE: /* synchronize cache */
1654 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1655 amr_unmapcmd(ac);
1656
1657 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1658 bp_mapin(bp);
1659 bzero(bp->b_un.b_addr, bp->b_bcount);
1660
1661 pkt->pkt_state |= STATE_XFERRED_DATA;
1662 }
1663 pkt->pkt_reason = CMD_CMPLT;
1664 pkt->pkt_state |= (STATE_GOT_BUS
1665 | STATE_GOT_TARGET
1666 | STATE_SENT_CMD);
1667 ret = TRAN_ACCEPT;
1668 *pkt->pkt_scbp = 0;
1669 if (!(pkt->pkt_flags & FLAG_NOINTR))
1670 scsi_hba_pkt_comp(pkt);
1671 break;
1672
1673 default: /* any other commands */
1674 amr_unmapcmd(ac);
1675 pkt->pkt_reason = CMD_INCOMPLETE;
1676 pkt->pkt_state = (STATE_GOT_BUS
1677 | STATE_GOT_TARGET
1678 | STATE_SENT_CMD
1679 | STATE_GOT_STATUS
1680 | STATE_ARQ_DONE);
1681 ret = TRAN_ACCEPT;
1682 *pkt->pkt_scbp = 0;
1683 amr_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
1684 if (!(pkt->pkt_flags & FLAG_NOINTR))
1685 scsi_hba_pkt_comp(pkt);
1686 break;
1687 }
1688
1689 return (ret);
1690 }
1691
1692 /*
1693 * tran_reset() will reset the bus/target/adapter to support the fault recovery
1694 * functionality according to the "level" in interface. However, we got the
1695 * confirmation from LSI that these HBA cards does not support any commands to
1696 * reset bus/target/adapter/channel.
1697 *
1698 * If the tran_reset() return a FAILURE to the sd, the system will not
1699 * continue to dump the core. But core dump is an crucial method to analyze
1700 * problems in panic. Now we adopt a work around solution, that is to return
1701 * a fake SUCCESS to sd during panic, which will force the system continue
1702 * to dump core though the core may have problems in some situtation because
1703 * some on-the-fly commands will continue DMAing data to the memory.
1704 * In addition, the work around core dump method may not be performed
1705 * successfully if the panic is caused by the HBA itself. So the work around
1706 * solution is not a good example for the implementation of tran_reset(),
1707 * the most reasonable approach should send a reset command to the adapter.
1708 */
1709 /*ARGSUSED*/
1710 static int
amr_tran_reset(struct scsi_address * ap,int level)1711 amr_tran_reset(struct scsi_address *ap, int level)
1712 {
1713 struct amr_softs *softs;
1714 volatile uint32_t done_flag;
1715
1716 if (ddi_in_panic()) {
1717 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1718
1719 /* Acknowledge the card if there are any significant commands */
1720 while (softs->amr_busyslots > 0) {
1721 AMR_DELAY((softs->mailbox->mb_busy == 0),
1722 AMR_RETRYCOUNT, done_flag);
1723 if (!done_flag) {
1724 /*
1725 * command not completed, indicate the
1726 * problem and continue get ac
1727 */
1728 cmn_err(CE_WARN,
1729 "AMR command is not completed");
1730 return (0);
1731 }
1732
1733 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1734
1735 /* wait for the acknowledge from hardware */
1736 AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
1737 AMR_RETRYCOUNT, done_flag);
1738 if (!done_flag) {
1739 /*
1740 * command is not completed, return from the
1741 * current interrupt and wait for the next one
1742 */
1743 cmn_err(CE_WARN, "No answer from the hardware");
1744
1745 mutex_exit(&softs->cmd_mutex);
1746 return (0);
1747 }
1748
1749 softs->amr_busyslots -= softs->mailbox->mb_nstatus;
1750 }
1751
1752 /* flush the controllor */
1753 (void) amr_flush(softs);
1754
1755 /*
1756 * If the system is in panic, the tran_reset() will return a
1757 * fake SUCCESS to sd, then the system would continue dump the
1758 * core by poll commands. This is a work around for dumping
1759 * core in panic.
1760 *
1761 * Note: Some on-the-fly command will continue DMAing data to
1762 * the memory when the core is dumping, which may cause
1763 * some flaws in the dumped core file, so a cmn_err()
1764 * will be printed out to warn users. However, for most
1765 * cases, the core file will be fine.
1766 */
1767 cmn_err(CE_WARN, "This system contains a SCSI HBA card/driver "
1768 "that doesn't support software reset. This "
1769 "means that memory being used by the HBA for "
1770 "DMA based reads could have been updated after "
1771 "we panic'd.");
1772 return (1);
1773 } else {
1774 /* return failure to sd */
1775 return (0);
1776 }
1777 }
1778
1779 /*ARGSUSED*/
1780 static int
amr_tran_getcap(struct scsi_address * ap,char * cap,int whom)1781 amr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1782 {
1783 struct amr_softs *softs;
1784
1785 /*
1786 * We don't allow inquiring about capabilities for other targets
1787 */
1788 if (cap == NULL || whom == 0)
1789 return (-1);
1790
1791 softs = ((struct amr_softs *)(ap->a_hba_tran)->tran_hba_private);
1792
1793 switch (scsi_hba_lookup_capstr(cap)) {
1794 case SCSI_CAP_ARQ:
1795 return (1);
1796 case SCSI_CAP_GEOMETRY:
1797 return ((AMR_DEFAULT_HEADS << 16) | AMR_DEFAULT_CYLINDERS);
1798 case SCSI_CAP_SECTOR_SIZE:
1799 return (AMR_DEFAULT_SECTORS);
1800 case SCSI_CAP_TOTAL_SECTORS:
1801 /* number of sectors */
1802 return (softs->logic_drive[ap->a_target].al_size);
1803 case SCSI_CAP_UNTAGGED_QING:
1804 case SCSI_CAP_TAGGED_QING:
1805 return (1);
1806 default:
1807 return (-1);
1808 }
1809 }
1810
1811 /*ARGSUSED*/
1812 static int
amr_tran_setcap(struct scsi_address * ap,char * cap,int value,int whom)1813 amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1814 int whom)
1815 {
1816 /*
1817 * We don't allow setting capabilities for other targets
1818 */
1819 if (cap == NULL || whom == 0) {
1820 AMRDB_PRINT((CE_NOTE,
1821 "Set Cap not supported, string = %s, whom=%d",
1822 cap, whom));
1823 return (-1);
1824 }
1825
1826 switch (scsi_hba_lookup_capstr(cap)) {
1827 case SCSI_CAP_ARQ:
1828 return (1);
1829 case SCSI_CAP_TOTAL_SECTORS:
1830 return (1);
1831 case SCSI_CAP_SECTOR_SIZE:
1832 return (1);
1833 case SCSI_CAP_UNTAGGED_QING:
1834 case SCSI_CAP_TAGGED_QING:
1835 return ((value == 1) ? 1 : 0);
1836 default:
1837 return (0);
1838 }
1839 }
1840
1841 static struct scsi_pkt *
amr_tran_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)1842 amr_tran_init_pkt(struct scsi_address *ap,
1843 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1844 int tgtlen, int flags, int (*callback)(), caddr_t arg)
1845 {
1846 struct amr_softs *softs;
1847 struct amr_command *ac;
1848 uint32_t slen;
1849
1850 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1851
1852 if ((ap->a_lun != 0)||(ap->a_target >= AMR_MAXLD)||
1853 (softs->logic_drive[ap->a_target].al_state ==
1854 AMR_LDRV_OFFLINE)) {
1855 return (NULL);
1856 }
1857
1858 if (pkt == NULL) {
1859 /* force auto request sense */
1860 slen = MAX(statuslen, sizeof (struct scsi_arq_status));
1861
1862 pkt = scsi_hba_pkt_alloc(softs->dev_info_p, ap, cmdlen,
1863 slen, tgtlen, sizeof (struct amr_command),
1864 callback, arg);
1865 if (pkt == NULL) {
1866 AMRDB_PRINT((CE_WARN, "scsi_hba_pkt_alloc failed"));
1867 return (NULL);
1868 }
1869 pkt->pkt_address = *ap;
1870 pkt->pkt_comp = (void (*)())NULL;
1871 pkt->pkt_time = 0;
1872 pkt->pkt_resid = 0;
1873 pkt->pkt_statistics = 0;
1874 pkt->pkt_reason = 0;
1875
1876 ac = (struct amr_command *)pkt->pkt_ha_private;
1877 ac->ac_buf = bp;
1878 ac->cmdlen = cmdlen;
1879 ac->ac_softs = softs;
1880 ac->pkt = pkt;
1881 ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
1882 ac->ac_flags &= ~AMR_CMD_BUSY;
1883
1884 if ((bp == NULL) || (bp->b_bcount == 0)) {
1885 return (pkt);
1886 }
1887
1888 if (ddi_dma_alloc_handle(softs->dev_info_p, &buffer_dma_attr,
1889 DDI_DMA_SLEEP, NULL,
1890 &ac->buffer_dma_handle) != DDI_SUCCESS) {
1891
1892 AMRDB_PRINT((CE_WARN,
1893 "Cannot allocate buffer DMA tag"));
1894 scsi_hba_pkt_free(ap, pkt);
1895 return (NULL);
1896
1897 }
1898
1899 } else {
1900 if ((bp == NULL) || (bp->b_bcount == 0)) {
1901 return (pkt);
1902 }
1903 ac = (struct amr_command *)pkt->pkt_ha_private;
1904 }
1905
1906 ASSERT(ac != NULL);
1907
1908 if (bp->b_flags & B_READ) {
1909 ac->ac_flags |= AMR_CMD_DATAOUT;
1910 } else {
1911 ac->ac_flags |= AMR_CMD_DATAIN;
1912 }
1913
1914 if (flags & PKT_CONSISTENT) {
1915 ac->ac_flags |= AMR_CMD_PKT_CONSISTENT;
1916 }
1917
1918 if (flags & PKT_DMA_PARTIAL) {
1919 ac->ac_flags |= AMR_CMD_PKT_DMA_PARTIAL;
1920 }
1921
1922 if (amr_mapcmd(ac, callback, arg) != DDI_SUCCESS) {
1923 scsi_hba_pkt_free(ap, pkt);
1924 return (NULL);
1925 }
1926
1927 pkt->pkt_resid = bp->b_bcount - ac->data_transfered;
1928
1929 AMRDB_PRINT((CE_NOTE,
1930 "init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
1931 (uint32_t)pkt->pkt_resid, (uint32_t)bp->b_bcount,
1932 ac->data_transfered));
1933
1934 ASSERT(pkt->pkt_resid >= 0);
1935
1936 return (pkt);
1937 }
1938
1939 static void
amr_tran_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1940 amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1941 {
1942 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1943
1944 amr_unmapcmd(ac);
1945
1946 if (ac->buffer_dma_handle) {
1947 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1948 ac->buffer_dma_handle = NULL;
1949 }
1950
1951 scsi_hba_pkt_free(ap, pkt);
1952 AMRDB_PRINT((CE_NOTE, "Destroy pkt called"));
1953 }
1954
1955 /*ARGSUSED*/
1956 static void
amr_tran_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1957 amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1958 {
1959 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1960
1961 if (ac->buffer_dma_handle) {
1962 (void) ddi_dma_sync(ac->buffer_dma_handle, 0, 0,
1963 (ac->ac_flags & AMR_CMD_DATAIN) ?
1964 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1965 }
1966 }
1967
1968 /*ARGSUSED*/
1969 static void
amr_tran_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1970 amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1971 {
1972 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1973
1974 if (ac->ac_flags & AMR_CMD_MAPPED) {
1975 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1976 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1977 ac->buffer_dma_handle = NULL;
1978 ac->ac_flags &= ~AMR_CMD_MAPPED;
1979 }
1980
1981 }
1982
1983 /*ARGSUSED*/
1984 static void
amr_rw_command(struct amr_softs * softs,struct scsi_pkt * pkt,int target)1985 amr_rw_command(struct amr_softs *softs, struct scsi_pkt *pkt, int target)
1986 {
1987 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1988 union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1989 uint8_t cmd;
1990
1991 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1992 cmd = AMR_CMD_LREAD;
1993 } else {
1994 cmd = AMR_CMD_LWRITE;
1995 }
1996
1997 ac->mailbox.mb_command = cmd;
1998 ac->mailbox.mb_blkcount =
1999 (ac->transfer_size + AMR_BLKSIZE - 1)/AMR_BLKSIZE;
2000 ac->mailbox.mb_lba = (ac->cmdlen == 10) ?
2001 GETG1ADDR(cdbp) : GETG0ADDR(cdbp);
2002 ac->mailbox.mb_drive = (uint8_t)target;
2003 }
2004
2005 static void
amr_mode_sense(union scsi_cdb * cdbp,struct buf * bp,unsigned int capacity)2006 amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp, unsigned int capacity)
2007 {
2008 uchar_t pagecode;
2009 struct mode_format *page3p;
2010 struct mode_geometry *page4p;
2011 struct mode_header *headerp;
2012 uint32_t ncyl;
2013
2014 if (!(bp && bp->b_un.b_addr && bp->b_bcount))
2015 return;
2016
2017 if (bp->b_flags & (B_PHYS | B_PAGEIO))
2018 bp_mapin(bp);
2019
2020 pagecode = cdbp->cdb_un.sg.scsi[0];
2021 switch (pagecode) {
2022 case SD_MODE_SENSE_PAGE3_CODE:
2023 headerp = (struct mode_header *)(bp->b_un.b_addr);
2024 headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2025
2026 page3p = (struct mode_format *)((caddr_t)headerp +
2027 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2028 page3p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE3_CODE);
2029 page3p->mode_page.length = BE_8(sizeof (struct mode_format));
2030 page3p->data_bytes_sect = BE_16(AMR_DEFAULT_SECTORS);
2031 page3p->sect_track = BE_16(AMR_DEFAULT_CYLINDERS);
2032
2033 return;
2034
2035 case SD_MODE_SENSE_PAGE4_CODE:
2036 headerp = (struct mode_header *)(bp->b_un.b_addr);
2037 headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2038
2039 page4p = (struct mode_geometry *)((caddr_t)headerp +
2040 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2041 page4p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE4_CODE);
2042 page4p->mode_page.length = BE_8(sizeof (struct mode_geometry));
2043 page4p->heads = BE_8(AMR_DEFAULT_HEADS);
2044 page4p->rpm = BE_16(AMR_DEFAULT_ROTATIONS);
2045
2046 ncyl = capacity / (AMR_DEFAULT_HEADS*AMR_DEFAULT_CYLINDERS);
2047 page4p->cyl_lb = BE_8(ncyl & 0xff);
2048 page4p->cyl_mb = BE_8((ncyl >> 8) & 0xff);
2049 page4p->cyl_ub = BE_8((ncyl >> 16) & 0xff);
2050
2051 return;
2052 default:
2053 bzero(bp->b_un.b_addr, bp->b_bcount);
2054 return;
2055 }
2056 }
2057
2058 static void
amr_set_arq_data(struct scsi_pkt * pkt,uchar_t key)2059 amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
2060 {
2061 struct scsi_arq_status *arqstat;
2062
2063 arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
2064 arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
2065 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2066 arqstat->sts_rqpkt_resid = 0;
2067 arqstat->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2068 STATE_SENT_CMD | STATE_XFERRED_DATA;
2069 arqstat->sts_rqpkt_statistics = 0;
2070 arqstat->sts_sensedata.es_valid = 1;
2071 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2072 arqstat->sts_sensedata.es_key = key;
2073 }
2074
2075 static void
amr_start_waiting_queue(void * softp)2076 amr_start_waiting_queue(void *softp)
2077 {
2078 uint32_t slot;
2079 struct amr_command *ac;
2080 volatile uint32_t done_flag;
2081 struct amr_softs *softs = (struct amr_softs *)softp;
2082
2083 /* only one command allowed at the same time */
2084 mutex_enter(&softs->queue_mutex);
2085 mutex_enter(&softs->cmd_mutex);
2086
2087 while ((ac = softs->waiting_q_head) != NULL) {
2088 /*
2089 * Find an available slot, the last slot is
2090 * occupied by poll I/O command.
2091 */
2092 for (slot = 0; slot < (softs->sg_max_count - 1); slot++) {
2093 if (softs->busycmd[slot] == NULL) {
2094 if (AMR_QGET_IDB(softs) & AMR_QIDB_SUBMIT) {
2095 /*
2096 * only one command allowed at the
2097 * same time
2098 */
2099 mutex_exit(&softs->cmd_mutex);
2100 mutex_exit(&softs->queue_mutex);
2101 return;
2102 }
2103
2104 ac->ac_timestamp = ddi_get_time();
2105
2106 if (!(ac->ac_flags & AMR_CMD_GOT_SLOT)) {
2107
2108 softs->busycmd[slot] = ac;
2109 ac->ac_slot = slot;
2110 softs->amr_busyslots++;
2111
2112 bcopy(ac->sgtable,
2113 softs->sg_items[slot].sg_table,
2114 sizeof (struct amr_sgentry) *
2115 AMR_NSEG);
2116
2117 (void) ddi_dma_sync(
2118 softs->sg_items[slot].sg_handle,
2119 0, 0, DDI_DMA_SYNC_FORDEV);
2120
2121 ac->mailbox.mb_physaddr =
2122 softs->sg_items[slot].sg_phyaddr;
2123 }
2124
2125 /* take the cmd from the queue */
2126 softs->waiting_q_head = ac->ac_next;
2127
2128 ac->mailbox.mb_ident = ac->ac_slot + 1;
2129 ac->mailbox.mb_busy = 1;
2130 ac->ac_next = NULL;
2131 ac->ac_prev = NULL;
2132 ac->ac_flags |= AMR_CMD_GOT_SLOT;
2133
2134 /* clear the poll/ack fields in the mailbox */
2135 softs->mailbox->mb_poll = 0;
2136 softs->mailbox->mb_ack = 0;
2137
2138 AMR_DELAY((softs->mailbox->mb_busy == 0),
2139 AMR_RETRYCOUNT, done_flag);
2140 if (!done_flag) {
2141 /*
2142 * command not completed, indicate the
2143 * problem and continue get ac
2144 */
2145 cmn_err(CE_WARN,
2146 "AMR command is not completed");
2147 break;
2148 }
2149
2150 bcopy(&ac->mailbox, (void *)softs->mailbox,
2151 AMR_MBOX_CMDSIZE);
2152 ac->ac_flags |= AMR_CMD_BUSY;
2153
2154 (void) ddi_dma_sync(softs->mbox_dma_handle,
2155 0, 0, DDI_DMA_SYNC_FORDEV);
2156
2157 AMR_QPUT_IDB(softs,
2158 softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
2159
2160 /*
2161 * current ac is submitted
2162 * so quit 'for-loop' to get next ac
2163 */
2164 break;
2165 }
2166 }
2167
2168 /* no slot, finish our task */
2169 if (slot == softs->maxio)
2170 break;
2171 }
2172
2173 /* only one command allowed at the same time */
2174 mutex_exit(&softs->cmd_mutex);
2175 mutex_exit(&softs->queue_mutex);
2176 }
2177
2178 static void
amr_done(struct amr_softs * softs)2179 amr_done(struct amr_softs *softs)
2180 {
2181
2182 uint32_t i, idx;
2183 volatile uint32_t done_flag;
2184 struct amr_mailbox *mbox, mbsave;
2185 struct amr_command *ac, *head, *tail;
2186
2187 head = tail = NULL;
2188
2189 AMR_QPUT_ODB(softs, AMR_QODB_READY);
2190
2191 /* acknowledge interrupt */
2192 (void) AMR_QGET_ODB(softs);
2193
2194 mutex_enter(&softs->cmd_mutex);
2195
2196 if (softs->mailbox->mb_nstatus != 0) {
2197 (void) ddi_dma_sync(softs->mbox_dma_handle,
2198 0, 0, DDI_DMA_SYNC_FORCPU);
2199
2200 /* save mailbox, which contains a list of completed commands */
2201 bcopy((void *)(uintptr_t)(volatile void *)softs->mailbox,
2202 &mbsave, sizeof (mbsave));
2203
2204 mbox = &mbsave;
2205
2206 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
2207
2208 /* wait for the acknowledge from hardware */
2209 AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
2210 AMR_RETRYCOUNT, done_flag);
2211 if (!done_flag) {
2212 /*
2213 * command is not completed, return from the current
2214 * interrupt and wait for the next one
2215 */
2216 cmn_err(CE_WARN, "No answer from the hardware");
2217
2218 mutex_exit(&softs->cmd_mutex);
2219 return;
2220 }
2221
2222 for (i = 0; i < mbox->mb_nstatus; i++) {
2223 idx = mbox->mb_completed[i] - 1;
2224 ac = softs->busycmd[idx];
2225
2226 if (ac != NULL) {
2227 /* pull the command from the busy index */
2228 softs->busycmd[idx] = NULL;
2229 if (softs->amr_busyslots > 0)
2230 softs->amr_busyslots--;
2231 if (softs->amr_busyslots == 0)
2232 cv_broadcast(&softs->cmd_cv);
2233
2234 ac->ac_flags &= ~AMR_CMD_BUSY;
2235 ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
2236 ac->ac_status = mbox->mb_status;
2237
2238 /* enqueue here */
2239 if (head) {
2240 tail->ac_next = ac;
2241 tail = ac;
2242 tail->ac_next = NULL;
2243 } else {
2244 tail = head = ac;
2245 ac->ac_next = NULL;
2246 }
2247 } else {
2248 AMRDB_PRINT((CE_WARN,
2249 "ac in mailbox is NULL!"));
2250 }
2251 }
2252 } else {
2253 AMRDB_PRINT((CE_WARN, "mailbox is not ready for copy out!"));
2254 }
2255
2256 mutex_exit(&softs->cmd_mutex);
2257
2258 if (head != NULL) {
2259 amr_call_pkt_comp(head);
2260 }
2261
2262 /* dispatch a thread to process the pending I/O if there is any */
2263 if ((ddi_taskq_dispatch(softs->amr_taskq, amr_start_waiting_queue,
2264 (void *)softs, DDI_NOSLEEP)) != DDI_SUCCESS) {
2265 cmn_err(CE_WARN, "No memory available to dispatch taskq");
2266 }
2267 }
2268
2269 static void
amr_call_pkt_comp(register struct amr_command * head)2270 amr_call_pkt_comp(register struct amr_command *head)
2271 {
2272 register struct scsi_pkt *pkt;
2273 register struct amr_command *ac, *localhead;
2274
2275 localhead = head;
2276
2277 while (localhead) {
2278 ac = localhead;
2279 localhead = ac->ac_next;
2280 ac->ac_next = NULL;
2281
2282 pkt = ac->pkt;
2283 *pkt->pkt_scbp = 0;
2284
2285 if (ac->ac_status == AMR_STATUS_SUCCESS) {
2286 pkt->pkt_state |= (STATE_GOT_BUS
2287 | STATE_GOT_TARGET
2288 | STATE_SENT_CMD
2289 | STATE_XFERRED_DATA);
2290 pkt->pkt_reason = CMD_CMPLT;
2291 } else {
2292 pkt->pkt_state |= STATE_GOT_BUS
2293 | STATE_ARQ_DONE;
2294 pkt->pkt_reason = CMD_INCOMPLETE;
2295 amr_set_arq_data(pkt, KEY_HARDWARE_ERROR);
2296 }
2297 if (!(pkt->pkt_flags & FLAG_NOINTR)) {
2298 scsi_hba_pkt_comp(pkt);
2299 }
2300 }
2301 }
2302