1 /*
2 * megaraid_sas.c: source for mega_sas driver
3 *
4 * MegaRAID device driver for SAS controllers
5 * Copyright (c) 2005-2008, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com>
11 * Seokmann Ju
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above copyright notice,
20 * this list of conditions and the following disclaimer in the documentation
21 * and/or other materials provided with the distribution.
22 *
23 * 3. Neither the name of the author nor the names of its contributors may be
24 * used to endorse or promote products derived from this software without
25 * specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 */
40
41 /*
42 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
43 * Use is subject to license terms.
44 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
45 */
46
47 #include <sys/types.h>
48 #include <sys/param.h>
49 #include <sys/file.h>
50 #include <sys/errno.h>
51 #include <sys/open.h>
52 #include <sys/cred.h>
53 #include <sys/modctl.h>
54 #include <sys/conf.h>
55 #include <sys/devops.h>
56 #include <sys/cmn_err.h>
57 #include <sys/kmem.h>
58 #include <sys/stat.h>
59 #include <sys/mkdev.h>
60 #include <sys/pci.h>
61 #include <sys/scsi/scsi.h>
62 #include <sys/ddi.h>
63 #include <sys/sunddi.h>
64 #include <sys/atomic.h>
65 #include <sys/signal.h>
66
67 #include "megaraid_sas.h"
68
69 /*
70 * FMA header files
71 */
72 #include <sys/ddifm.h>
73 #include <sys/fm/protocol.h>
74 #include <sys/fm/util.h>
75 #include <sys/fm/io/ddi.h>
76
77 /*
78 * Local static data
79 */
80 static void *megasas_state = NULL;
81 static int debug_level_g = CL_ANN;
82
83 #pragma weak scsi_hba_open
84 #pragma weak scsi_hba_close
85 #pragma weak scsi_hba_ioctl
86
87 static ddi_dma_attr_t megasas_generic_dma_attr = {
88 DMA_ATTR_V0, /* dma_attr_version */
89 0, /* low DMA address range */
90 0xFFFFFFFFU, /* high DMA address range */
91 0xFFFFFFFFU, /* DMA counter register */
92 8, /* DMA address alignment */
93 0x07, /* DMA burstsizes */
94 1, /* min DMA size */
95 0xFFFFFFFFU, /* max DMA size */
96 0xFFFFFFFFU, /* segment boundary */
97 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */
98 512, /* granularity of device */
99 0 /* bus specific DMA flags */
100 };
101
102 int32_t megasas_max_cap_maxxfer = 0x1000000;
103
104 /*
105 * cb_ops contains base level routines
106 */
107 static struct cb_ops megasas_cb_ops = {
108 megasas_open, /* open */
109 megasas_close, /* close */
110 nodev, /* strategy */
111 nodev, /* print */
112 nodev, /* dump */
113 nodev, /* read */
114 nodev, /* write */
115 megasas_ioctl, /* ioctl */
116 nodev, /* devmap */
117 nodev, /* mmap */
118 nodev, /* segmap */
119 nochpoll, /* poll */
120 nodev, /* cb_prop_op */
121 0, /* streamtab */
122 D_NEW | D_HOTPLUG, /* cb_flag */
123 CB_REV, /* cb_rev */
124 nodev, /* cb_aread */
125 nodev /* cb_awrite */
126 };
127
128 /*
129 * dev_ops contains configuration routines
130 */
131 static struct dev_ops megasas_ops = {
132 DEVO_REV, /* rev, */
133 0, /* refcnt */
134 megasas_getinfo, /* getinfo */
135 nulldev, /* identify */
136 nulldev, /* probe */
137 megasas_attach, /* attach */
138 megasas_detach, /* detach */
139 megasas_reset, /* reset */
140 &megasas_cb_ops, /* char/block ops */
141 NULL, /* bus ops */
142 NULL, /* power */
143 ddi_quiesce_not_supported, /* devo_quiesce */
144 };
145
146 static struct modldrv modldrv = {
147 &mod_driverops, /* module type - driver */
148 MEGASAS_VERSION,
149 &megasas_ops, /* driver ops */
150 };
151
152 static struct modlinkage modlinkage = {
153 MODREV_1, /* ml_rev - must be MODREV_1 */
154 &modldrv, /* ml_linkage */
155 NULL /* end of driver linkage */
156 };
157
158 static struct ddi_device_acc_attr endian_attr = {
159 DDI_DEVICE_ATTR_V1,
160 DDI_STRUCTURE_LE_ACC,
161 DDI_STRICTORDER_ACC,
162 DDI_DEFAULT_ACC
163 };
164
165
166 /*
167 * ************************************************************************** *
168 * *
169 * common entry points - for loadable kernel modules *
170 * *
171 * ************************************************************************** *
172 */
173
174 /*
175 * _init - initialize a loadable module
176 * @void
177 *
178 * The driver should perform any one-time resource allocation or data
179 * initialization during driver loading in _init(). For example, the driver
180 * should initialize any mutexes global to the driver in this routine.
181 * The driver should not, however, use _init() to allocate or initialize
182 * anything that has to do with a particular instance of the device.
183 * Per-instance initialization must be done in attach().
184 */
185 int
_init(void)186 _init(void)
187 {
188 int ret;
189
190 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
191
192 ret = ddi_soft_state_init(&megasas_state,
193 sizeof (struct megasas_instance), 0);
194
195 if (ret != 0) {
196 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state"));
197 return (ret);
198 }
199
200 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
201 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba"));
202 ddi_soft_state_fini(&megasas_state);
203 return (ret);
204 }
205
206 ret = mod_install(&modlinkage);
207
208 if (ret != 0) {
209 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed"));
210 scsi_hba_fini(&modlinkage);
211 ddi_soft_state_fini(&megasas_state);
212 }
213
214 return (ret);
215 }
216
217 /*
218 * _info - returns information about a loadable module.
219 * @void
220 *
221 * _info() is called to return module information. This is a typical entry
222 * point that does predefined role. It simply calls mod_info().
223 */
224 int
_info(struct modinfo * modinfop)225 _info(struct modinfo *modinfop)
226 {
227 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
228
229 return (mod_info(&modlinkage, modinfop));
230 }
231
232 /*
233 * _fini - prepare a loadable module for unloading
234 * @void
235 *
236 * In _fini(), the driver should release any resources that were allocated in
237 * _init(). The driver must remove itself from the system module list.
238 */
239 int
_fini(void)240 _fini(void)
241 {
242 int ret;
243
244 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
245
246 if ((ret = mod_remove(&modlinkage)) != 0)
247 return (ret);
248
249 scsi_hba_fini(&modlinkage);
250
251 ddi_soft_state_fini(&megasas_state);
252
253 return (ret);
254 }
255
256
257 /*
258 * ************************************************************************** *
259 * *
260 * common entry points - for autoconfiguration *
261 * *
262 * ************************************************************************** *
263 */
264 /*
265 * attach - adds a device to the system as part of initialization
266 * @dip:
267 * @cmd:
268 *
269 * The kernel calls a driver's attach() entry point to attach an instance of
270 * a device (for MegaRAID, it is instance of a controller) or to resume
271 * operation for an instance of a device that has been suspended or has been
272 * shut down by the power management framework
273 * The attach() entry point typically includes the following types of
274 * processing:
275 * - allocate a soft-state structure for the device instance (for MegaRAID,
276 * controller instance)
277 * - initialize per-instance mutexes
278 * - initialize condition variables
279 * - register the device's interrupts (for MegaRAID, controller's interrupts)
280 * - map the registers and memory of the device instance (for MegaRAID,
281 * controller instance)
282 * - create minor device nodes for the device instance (for MegaRAID,
283 * controller instance)
284 * - report that the device instance (for MegaRAID, controller instance) has
285 * attached
286 */
287 static int
megasas_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)288 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
289 {
290 int instance_no;
291 int nregs;
292 uint8_t added_isr_f = 0;
293 uint8_t added_soft_isr_f = 0;
294 uint8_t create_devctl_node_f = 0;
295 uint8_t create_scsi_node_f = 0;
296 uint8_t create_ioc_node_f = 0;
297 uint8_t tran_alloc_f = 0;
298 uint8_t irq;
299 uint16_t vendor_id;
300 uint16_t device_id;
301 uint16_t subsysvid;
302 uint16_t subsysid;
303 uint16_t command;
304
305 scsi_hba_tran_t *tran;
306 ddi_dma_attr_t tran_dma_attr;
307 struct megasas_instance *instance;
308
309 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
310
311 /* CONSTCOND */
312 ASSERT(NO_COMPETING_THREADS);
313
314 instance_no = ddi_get_instance(dip);
315
316 /*
317 * Since we know that some instantiations of this device can be
318 * plugged into slave-only SBus slots, check to see whether this is
319 * one such.
320 */
321 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
322 con_log(CL_ANN, (CE_WARN,
323 "mega%d: Device in slave-only slot, unused", instance_no));
324 return (DDI_FAILURE);
325 }
326
327 switch (cmd) {
328 case DDI_ATTACH:
329 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH"));
330 /* allocate the soft state for the instance */
331 if (ddi_soft_state_zalloc(megasas_state, instance_no)
332 != DDI_SUCCESS) {
333 con_log(CL_ANN, (CE_WARN,
334 "mega%d: Failed to allocate soft state",
335 instance_no));
336
337 return (DDI_FAILURE);
338 }
339
340 instance = (struct megasas_instance *)ddi_get_soft_state
341 (megasas_state, instance_no);
342
343 if (instance == NULL) {
344 con_log(CL_ANN, (CE_WARN,
345 "mega%d: Bad soft state", instance_no));
346
347 ddi_soft_state_free(megasas_state, instance_no);
348
349 return (DDI_FAILURE);
350 }
351
352 bzero((caddr_t)instance,
353 sizeof (struct megasas_instance));
354
355 instance->func_ptr = kmem_zalloc(
356 sizeof (struct megasas_func_ptr), KM_SLEEP);
357 ASSERT(instance->func_ptr);
358
359 /* Setup the PCI configuration space handles */
360 if (pci_config_setup(dip, &instance->pci_handle) !=
361 DDI_SUCCESS) {
362 con_log(CL_ANN, (CE_WARN,
363 "mega%d: pci config setup failed ",
364 instance_no));
365
366 kmem_free(instance->func_ptr,
367 sizeof (struct megasas_func_ptr));
368 ddi_soft_state_free(megasas_state, instance_no);
369
370 return (DDI_FAILURE);
371 }
372
373 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
374 con_log(CL_ANN, (CE_WARN,
375 "megaraid: failed to get registers."));
376
377 pci_config_teardown(&instance->pci_handle);
378 kmem_free(instance->func_ptr,
379 sizeof (struct megasas_func_ptr));
380 ddi_soft_state_free(megasas_state, instance_no);
381
382 return (DDI_FAILURE);
383 }
384
385 vendor_id = pci_config_get16(instance->pci_handle,
386 PCI_CONF_VENID);
387 device_id = pci_config_get16(instance->pci_handle,
388 PCI_CONF_DEVID);
389
390 subsysvid = pci_config_get16(instance->pci_handle,
391 PCI_CONF_SUBVENID);
392 subsysid = pci_config_get16(instance->pci_handle,
393 PCI_CONF_SUBSYSID);
394
395 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
396 (pci_config_get16(instance->pci_handle,
397 PCI_CONF_COMM) | PCI_COMM_ME));
398 irq = pci_config_get8(instance->pci_handle,
399 PCI_CONF_ILINE);
400
401 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
402 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
403 instance_no, vendor_id, device_id, subsysvid,
404 subsysid, irq, MEGASAS_VERSION));
405
406 /* enable bus-mastering */
407 command = pci_config_get16(instance->pci_handle,
408 PCI_CONF_COMM);
409
410 if (!(command & PCI_COMM_ME)) {
411 command |= PCI_COMM_ME;
412
413 pci_config_put16(instance->pci_handle,
414 PCI_CONF_COMM, command);
415
416 con_log(CL_ANN, (CE_CONT, "megaraid%d: "
417 "enable bus-mastering\n", instance_no));
418 } else {
419 con_log(CL_DLEVEL1, (CE_CONT, "megaraid%d: "
420 "bus-mastering already set\n", instance_no));
421 }
422
423 /* initialize function pointers */
424 if ((device_id == PCI_DEVICE_ID_LSI_1078) ||
425 (device_id == PCI_DEVICE_ID_LSI_1078DE)) {
426 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
427 "1078R/DE detected\n", instance_no));
428 instance->func_ptr->read_fw_status_reg =
429 read_fw_status_reg_ppc;
430 instance->func_ptr->issue_cmd = issue_cmd_ppc;
431 instance->func_ptr->issue_cmd_in_sync_mode =
432 issue_cmd_in_sync_mode_ppc;
433 instance->func_ptr->issue_cmd_in_poll_mode =
434 issue_cmd_in_poll_mode_ppc;
435 instance->func_ptr->enable_intr =
436 enable_intr_ppc;
437 instance->func_ptr->disable_intr =
438 disable_intr_ppc;
439 instance->func_ptr->intr_ack = intr_ack_ppc;
440 } else {
441 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
442 "1064/8R detected\n", instance_no));
443 instance->func_ptr->read_fw_status_reg =
444 read_fw_status_reg_xscale;
445 instance->func_ptr->issue_cmd =
446 issue_cmd_xscale;
447 instance->func_ptr->issue_cmd_in_sync_mode =
448 issue_cmd_in_sync_mode_xscale;
449 instance->func_ptr->issue_cmd_in_poll_mode =
450 issue_cmd_in_poll_mode_xscale;
451 instance->func_ptr->enable_intr =
452 enable_intr_xscale;
453 instance->func_ptr->disable_intr =
454 disable_intr_xscale;
455 instance->func_ptr->intr_ack =
456 intr_ack_xscale;
457 }
458
459 instance->baseaddress = pci_config_get32(
460 instance->pci_handle, PCI_CONF_BASE0);
461 instance->baseaddress &= 0x0fffc;
462
463 instance->dip = dip;
464 instance->vendor_id = vendor_id;
465 instance->device_id = device_id;
466 instance->subsysvid = subsysvid;
467 instance->subsysid = subsysid;
468
469 /* Initialize FMA */
470 instance->fm_capabilities = ddi_prop_get_int(
471 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
472 "fm-capable", DDI_FM_EREPORT_CAPABLE |
473 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
474 | DDI_FM_ERRCB_CAPABLE);
475
476 megasas_fm_init(instance);
477
478 /* setup the mfi based low level driver */
479 if (init_mfi(instance) != DDI_SUCCESS) {
480 con_log(CL_ANN, (CE_WARN, "megaraid: "
481 "could not initialize the low level driver"));
482
483 goto fail_attach;
484 }
485
486 /*
487 * Allocate the interrupt blocking cookie.
488 * It represents the information the framework
489 * needs to block interrupts. This cookie will
490 * be used by the locks shared accross our ISR.
491 * These locks must be initialized before we
492 * register our ISR.
493 * ddi_add_intr(9F)
494 */
495 if (ddi_get_iblock_cookie(dip, 0,
496 &instance->iblock_cookie) != DDI_SUCCESS) {
497
498 goto fail_attach;
499 }
500
501 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH,
502 &instance->soft_iblock_cookie) != DDI_SUCCESS) {
503
504 goto fail_attach;
505 }
506
507 /*
508 * Initialize the driver mutexes common to
509 * normal/high level isr
510 */
511 if (ddi_intr_hilevel(dip, 0)) {
512 instance->isr_level = HIGH_LEVEL_INTR;
513 mutex_init(&instance->cmd_pool_mtx,
514 "cmd_pool_mtx", MUTEX_DRIVER,
515 instance->soft_iblock_cookie);
516 mutex_init(&instance->cmd_pend_mtx,
517 "cmd_pend_mtx", MUTEX_DRIVER,
518 instance->soft_iblock_cookie);
519 } else {
520 /*
521 * Initialize the driver mutexes
522 * specific to soft-isr
523 */
524 instance->isr_level = NORMAL_LEVEL_INTR;
525 mutex_init(&instance->cmd_pool_mtx,
526 "cmd_pool_mtx", MUTEX_DRIVER,
527 instance->iblock_cookie);
528 mutex_init(&instance->cmd_pend_mtx,
529 "cmd_pend_mtx", MUTEX_DRIVER,
530 instance->iblock_cookie);
531 }
532
533 mutex_init(&instance->completed_pool_mtx,
534 "completed_pool_mtx", MUTEX_DRIVER,
535 instance->iblock_cookie);
536 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
537 MUTEX_DRIVER, instance->iblock_cookie);
538 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx",
539 MUTEX_DRIVER, instance->iblock_cookie);
540 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx",
541 MUTEX_DRIVER, instance->iblock_cookie);
542
543 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
544 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL);
545
546 INIT_LIST_HEAD(&instance->completed_pool_list);
547
548 /* Register our isr. */
549 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr,
550 (caddr_t)instance) != DDI_SUCCESS) {
551 con_log(CL_ANN, (CE_WARN,
552 " ISR did not register"));
553
554 goto fail_attach;
555 }
556
557 added_isr_f = 1;
558
559 /* Register our soft-isr for highlevel interrupts. */
560 if (instance->isr_level == HIGH_LEVEL_INTR) {
561 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH,
562 &instance->soft_intr_id, NULL, NULL,
563 megasas_softintr, (caddr_t)instance) !=
564 DDI_SUCCESS) {
565 con_log(CL_ANN, (CE_WARN,
566 " Software ISR did not register"));
567
568 goto fail_attach;
569 }
570
571 added_soft_isr_f = 1;
572 }
573
574 /* Allocate a transport structure */
575 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
576
577 if (tran == NULL) {
578 con_log(CL_ANN, (CE_WARN,
579 "scsi_hba_tran_alloc failed"));
580 goto fail_attach;
581 }
582
583 tran_alloc_f = 1;
584
585 instance->tran = tran;
586
587 tran->tran_hba_private = instance;
588 tran->tran_tgt_private = NULL;
589 tran->tran_tgt_init = megasas_tran_tgt_init;
590 tran->tran_tgt_probe = scsi_hba_probe;
591 tran->tran_tgt_free = (void (*)())NULL;
592 tran->tran_init_pkt = megasas_tran_init_pkt;
593 tran->tran_start = megasas_tran_start;
594 tran->tran_abort = megasas_tran_abort;
595 tran->tran_reset = megasas_tran_reset;
596 tran->tran_bus_reset = megasas_tran_bus_reset;
597 tran->tran_getcap = megasas_tran_getcap;
598 tran->tran_setcap = megasas_tran_setcap;
599 tran->tran_destroy_pkt = megasas_tran_destroy_pkt;
600 tran->tran_dmafree = megasas_tran_dmafree;
601 tran->tran_sync_pkt = megasas_tran_sync_pkt;
602 tran->tran_reset_notify = NULL;
603 tran->tran_quiesce = megasas_tran_quiesce;
604 tran->tran_unquiesce = megasas_tran_unquiesce;
605
606 tran_dma_attr = megasas_generic_dma_attr;
607 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
608
609 /* Attach this instance of the hba */
610 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
611 != DDI_SUCCESS) {
612 con_log(CL_ANN, (CE_WARN,
613 "scsi_hba_attach failed\n"));
614
615 goto fail_attach;
616 }
617
618 /* create devctl node for cfgadm command */
619 if (ddi_create_minor_node(dip, "devctl",
620 S_IFCHR, INST2DEVCTL(instance_no),
621 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
622 con_log(CL_ANN, (CE_WARN,
623 "megaraid: failed to create devctl node."));
624
625 goto fail_attach;
626 }
627
628 create_devctl_node_f = 1;
629
630 /* create scsi node for cfgadm command */
631 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
632 INST2SCSI(instance_no),
633 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
634 DDI_FAILURE) {
635 con_log(CL_ANN, (CE_WARN,
636 "megaraid: failed to create scsi node."));
637
638 goto fail_attach;
639 }
640
641 create_scsi_node_f = 1;
642
643 (void) sprintf(instance->iocnode, "%d:lsirdctl",
644 instance_no);
645
646 /*
647 * Create a node for applications
648 * for issuing ioctl to the driver.
649 */
650 if (ddi_create_minor_node(dip, instance->iocnode,
651 S_IFCHR, INST2LSIRDCTL(instance_no),
652 DDI_PSEUDO, 0) == DDI_FAILURE) {
653 con_log(CL_ANN, (CE_WARN,
654 "megaraid: failed to create ioctl node."));
655
656 goto fail_attach;
657 }
658
659 create_ioc_node_f = 1;
660
661 /* enable interrupt */
662 instance->func_ptr->enable_intr(instance);
663
664 /* initiate AEN */
665 if (start_mfi_aen(instance)) {
666 con_log(CL_ANN, (CE_WARN,
667 "megaraid: failed to initiate AEN."));
668 goto fail_initiate_aen;
669 }
670
671 con_log(CL_DLEVEL1, (CE_NOTE,
672 "AEN started for instance %d.", instance_no));
673
674 /* Finally! We are on the air. */
675 ddi_report_dev(dip);
676
677 if (megasas_check_acc_handle(instance->regmap_handle) !=
678 DDI_SUCCESS) {
679 goto fail_attach;
680 }
681 if (megasas_check_acc_handle(instance->pci_handle) !=
682 DDI_SUCCESS) {
683 goto fail_attach;
684 }
685 break;
686 case DDI_PM_RESUME:
687 con_log(CL_ANN, (CE_NOTE,
688 "megasas: DDI_PM_RESUME"));
689 break;
690 case DDI_RESUME:
691 con_log(CL_ANN, (CE_NOTE,
692 "megasas: DDI_RESUME"));
693 break;
694 default:
695 con_log(CL_ANN, (CE_WARN,
696 "megasas: invalid attach cmd=%x", cmd));
697 return (DDI_FAILURE);
698 }
699
700 return (DDI_SUCCESS);
701
702 fail_initiate_aen:
703 fail_attach:
704 if (create_devctl_node_f) {
705 ddi_remove_minor_node(dip, "devctl");
706 }
707
708 if (create_scsi_node_f) {
709 ddi_remove_minor_node(dip, "scsi");
710 }
711
712 if (create_ioc_node_f) {
713 ddi_remove_minor_node(dip, instance->iocnode);
714 }
715
716 if (tran_alloc_f) {
717 scsi_hba_tran_free(tran);
718 }
719
720
721 if (added_soft_isr_f) {
722 ddi_remove_softintr(instance->soft_intr_id);
723 }
724
725 if (added_isr_f) {
726 ddi_remove_intr(dip, 0, instance->iblock_cookie);
727 }
728
729 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
730 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
731
732 megasas_fm_fini(instance);
733
734 pci_config_teardown(&instance->pci_handle);
735
736 ddi_soft_state_free(megasas_state, instance_no);
737
738 con_log(CL_ANN, (CE_NOTE,
739 "megasas: return failure from mega_attach\n"));
740
741 return (DDI_FAILURE);
742 }
743
744 /*
745 * getinfo - gets device information
746 * @dip:
747 * @cmd:
748 * @arg:
749 * @resultp:
750 *
751 * The system calls getinfo() to obtain configuration information that only
752 * the driver knows. The mapping of minor numbers to device instance is
753 * entirely under the control of the driver. The system sometimes needs to ask
754 * the driver which device a particular dev_t represents.
755 * Given the device number return the devinfo pointer from the scsi_device
756 * structure.
757 */
758 /*ARGSUSED*/
759 static int
megasas_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** resultp)760 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
761 {
762 int rval;
763 int megasas_minor = getminor((dev_t)arg);
764
765 struct megasas_instance *instance;
766
767 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
768
769 switch (cmd) {
770 case DDI_INFO_DEVT2DEVINFO:
771 instance = (struct megasas_instance *)
772 ddi_get_soft_state(megasas_state,
773 MINOR2INST(megasas_minor));
774
775 if (instance == NULL) {
776 *resultp = NULL;
777 rval = DDI_FAILURE;
778 } else {
779 *resultp = instance->dip;
780 rval = DDI_SUCCESS;
781 }
782 break;
783 case DDI_INFO_DEVT2INSTANCE:
784 *resultp = (void *)instance;
785 rval = DDI_SUCCESS;
786 break;
787 default:
788 *resultp = NULL;
789 rval = DDI_FAILURE;
790 }
791
792 return (rval);
793 }
794
795 /*
796 * detach - detaches a device from the system
797 * @dip: pointer to the device's dev_info structure
798 * @cmd: type of detach
799 *
800 * A driver's detach() entry point is called to detach an instance of a device
801 * that is bound to the driver. The entry point is called with the instance of
802 * the device node to be detached and with DDI_DETACH, which is specified as
803 * the cmd argument to the entry point.
804 * This routine is called during driver unload. We free all the allocated
805 * resources and call the corresponding LLD so that it can also release all
806 * its resources.
807 */
808 static int
megasas_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)809 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
810 {
811 int instance_no;
812
813 struct megasas_instance *instance;
814
815 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
816
817 /* CONSTCOND */
818 ASSERT(NO_COMPETING_THREADS);
819
820 instance_no = ddi_get_instance(dip);
821
822 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state,
823 instance_no);
824
825 if (!instance) {
826 con_log(CL_ANN, (CE_WARN,
827 "megasas:%d could not get instance in detach",
828 instance_no));
829
830 return (DDI_FAILURE);
831 }
832
833 con_log(CL_ANN, (CE_NOTE,
834 "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n",
835 instance_no, instance->vendor_id, instance->device_id,
836 instance->subsysvid, instance->subsysid));
837
838 switch (cmd) {
839 case DDI_DETACH:
840 con_log(CL_ANN, (CE_NOTE,
841 "megasas_detach: DDI_DETACH\n"));
842
843 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
844 con_log(CL_ANN, (CE_WARN,
845 "megasas:%d failed to detach",
846 instance_no));
847
848 return (DDI_FAILURE);
849 }
850
851 scsi_hba_tran_free(instance->tran);
852
853 if (abort_aen_cmd(instance, instance->aen_cmd)) {
854 con_log(CL_ANN, (CE_WARN, "megasas_detach: "
855 "failed to abort prevous AEN command\n"));
856
857 return (DDI_FAILURE);
858 }
859
860 instance->func_ptr->disable_intr(instance);
861
862 if (instance->isr_level == HIGH_LEVEL_INTR) {
863 ddi_remove_softintr(instance->soft_intr_id);
864 }
865
866 ddi_remove_intr(dip, 0, instance->iblock_cookie);
867
868 free_space_for_mfi(instance);
869
870 megasas_fm_fini(instance);
871
872 pci_config_teardown(&instance->pci_handle);
873
874 kmem_free(instance->func_ptr,
875 sizeof (struct megasas_func_ptr));
876
877 ddi_soft_state_free(megasas_state, instance_no);
878 break;
879 case DDI_PM_SUSPEND:
880 con_log(CL_ANN, (CE_NOTE,
881 "megasas_detach: DDI_PM_SUSPEND\n"));
882
883 break;
884 case DDI_SUSPEND:
885 con_log(CL_ANN, (CE_NOTE,
886 "megasas_detach: DDI_SUSPEND\n"));
887
888 break;
889 default:
890 con_log(CL_ANN, (CE_WARN,
891 "invalid detach command:0x%x", cmd));
892 return (DDI_FAILURE);
893 }
894
895 return (DDI_SUCCESS);
896 }
897
898 /*
899 * ************************************************************************** *
900 * *
901 * common entry points - for character driver types *
902 * *
903 * ************************************************************************** *
904 */
905 /*
906 * open - gets access to a device
907 * @dev:
908 * @openflags:
909 * @otyp:
910 * @credp:
911 *
912 * Access to a device by one or more application programs is controlled
913 * through the open() and close() entry points. The primary function of
914 * open() is to verify that the open request is allowed.
915 */
916 static int
megasas_open(dev_t * dev,int openflags,int otyp,cred_t * credp)917 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
918 {
919 int rval = 0;
920
921 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
922
923 /* Check root permissions */
924 if (drv_priv(credp) != 0) {
925 con_log(CL_ANN, (CE_WARN,
926 "megaraid: Non-root ioctl access tried!"));
927 return (EPERM);
928 }
929
930 /* Verify we are being opened as a character device */
931 if (otyp != OTYP_CHR) {
932 con_log(CL_ANN, (CE_WARN,
933 "megaraid: ioctl node must be a char node\n"));
934 return (EINVAL);
935 }
936
937 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev)))
938 == NULL) {
939 return (ENXIO);
940 }
941
942 if (scsi_hba_open) {
943 rval = scsi_hba_open(dev, openflags, otyp, credp);
944 }
945
946 return (rval);
947 }
948
949 /*
950 * close - gives up access to a device
951 * @dev:
952 * @openflags:
953 * @otyp:
954 * @credp:
955 *
956 * close() should perform any cleanup necessary to finish using the minor
957 * device, and prepare the device (and driver) to be opened again.
958 */
959 static int
megasas_close(dev_t dev,int openflags,int otyp,cred_t * credp)960 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
961 {
962 int rval = 0;
963
964 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
965
966 /* no need for locks! */
967
968 if (scsi_hba_close) {
969 rval = scsi_hba_close(dev, openflags, otyp, credp);
970 }
971
972 return (rval);
973 }
974
975 /*
976 * ioctl - performs a range of I/O commands for character drivers
977 * @dev:
978 * @cmd:
979 * @arg:
980 * @mode:
981 * @credp:
982 * @rvalp:
983 *
984 * ioctl() routine must make sure that user data is copied into or out of the
985 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
986 * and ddi_copyout(), as appropriate.
987 * This is a wrapper routine to serialize access to the actual ioctl routine.
988 * ioctl() should return 0 on success, or the appropriate error number. The
989 * driver may also set the value returned to the calling process through rvalp.
990 */
991 static int
megasas_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)992 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
993 int *rvalp)
994 {
995 int rval = 0;
996
997 struct megasas_instance *instance;
998 struct megasas_ioctl ioctl;
999 struct megasas_aen aen;
1000
1001 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1002
1003 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev)));
1004
1005 if (instance == NULL) {
1006 /* invalid minor number */
1007 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found."));
1008 return (ENXIO);
1009 }
1010
1011 switch ((uint_t)cmd) {
1012 case MEGASAS_IOCTL_FIRMWARE:
1013 if (ddi_copyin((void *) arg, &ioctl,
1014 sizeof (struct megasas_ioctl), mode)) {
1015 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: "
1016 "ERROR IOCTL copyin"));
1017 return (EFAULT);
1018 }
1019
1020 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) {
1021 rval = handle_drv_ioctl(instance, &ioctl, mode);
1022 } else {
1023 rval = handle_mfi_ioctl(instance, &ioctl, mode);
1024 }
1025
1026 if (ddi_copyout((void *) &ioctl, (void *)arg,
1027 (sizeof (struct megasas_ioctl) - 1), mode)) {
1028 con_log(CL_ANN, (CE_WARN,
1029 "megasas_ioctl: copy_to_user failed\n"));
1030 rval = 1;
1031 }
1032
1033 break;
1034 case MEGASAS_IOCTL_AEN:
1035 if (ddi_copyin((void *) arg, &aen,
1036 sizeof (struct megasas_aen), mode)) {
1037 con_log(CL_ANN, (CE_WARN,
1038 "megasas_ioctl: ERROR AEN copyin"));
1039 return (EFAULT);
1040 }
1041
1042 rval = handle_mfi_aen(instance, &aen);
1043
1044 if (ddi_copyout((void *) &aen, (void *)arg,
1045 sizeof (struct megasas_aen), mode)) {
1046 con_log(CL_ANN, (CE_WARN,
1047 "megasas_ioctl: copy_to_user failed\n"));
1048 rval = 1;
1049 }
1050
1051 break;
1052 default:
1053 rval = scsi_hba_ioctl(dev, cmd, arg,
1054 mode, credp, rvalp);
1055
1056 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: "
1057 "scsi_hba_ioctl called, ret = %x.", rval));
1058 }
1059
1060 return (rval);
1061 }
1062
1063 /*
1064 * ************************************************************************** *
1065 * *
1066 * common entry points - for block driver types *
1067 * *
1068 * ************************************************************************** *
1069 */
1070 /*
1071 * reset - TBD
1072 * @dip:
1073 * @cmd:
1074 *
1075 * TBD
1076 */
1077 /*ARGSUSED*/
1078 static int
megasas_reset(dev_info_t * dip,ddi_reset_cmd_t cmd)1079 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1080 {
1081 int instance_no;
1082
1083 struct megasas_instance *instance;
1084
1085 instance_no = ddi_get_instance(dip);
1086 instance = (struct megasas_instance *)ddi_get_soft_state
1087 (megasas_state, instance_no);
1088
1089 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1090
1091 if (!instance) {
1092 con_log(CL_ANN, (CE_WARN,
1093 "megaraid:%d could not get adapter in reset",
1094 instance_no));
1095 return (DDI_FAILURE);
1096 }
1097
1098 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..",
1099 instance_no));
1100
1101 flush_cache(instance);
1102
1103 return (DDI_SUCCESS);
1104 }
1105
1106
1107 /*
1108 * ************************************************************************** *
1109 * *
1110 * entry points (SCSI HBA) *
1111 * *
1112 * ************************************************************************** *
1113 */
1114 /*
1115 * tran_tgt_init - initialize a target device instance
1116 * @hba_dip:
1117 * @tgt_dip:
1118 * @tran:
1119 * @sd:
1120 *
1121 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1122 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1123 * the device's address as valid and supportable for that particular HBA.
1124 * By returning DDI_FAILURE, the instance of the target driver for that device
1125 * is not probed or attached.
1126 */
1127 /*ARGSUSED*/
1128 static int
megasas_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * tran,struct scsi_device * sd)1129 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1130 scsi_hba_tran_t *tran, struct scsi_device *sd)
1131 {
1132 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133
1134 return (DDI_SUCCESS);
1135 }
1136
1137 /*
1138 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1139 * @ap:
1140 * @pkt:
1141 * @bp:
1142 * @cmdlen:
1143 * @statuslen:
1144 * @tgtlen:
1145 * @flags:
1146 * @callback:
1147 *
1148 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1149 * structure and DMA resources for a target driver request. The
1150 * tran_init_pkt() entry point is called when the target driver calls the
1151 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1152 * is a request to perform one or more of three possible services:
1153 * - allocation and initialization of a scsi_pkt structure
1154 * - allocation of DMA resources for data transfer
1155 * - reallocation of DMA resources for the next portion of the data transfer
1156 */
1157 static struct scsi_pkt *
megasas_tran_init_pkt(struct scsi_address * ap,register struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)1158 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1159 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1160 int flags, int (*callback)(), caddr_t arg)
1161 {
1162 struct scsa_cmd *acmd;
1163 struct megasas_instance *instance;
1164 struct scsi_pkt *new_pkt;
1165
1166 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1167
1168 instance = ADDR2MEGA(ap);
1169
1170 /* step #1 : pkt allocation */
1171 if (pkt == NULL) {
1172 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1173 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1174 if (pkt == NULL) {
1175 return (NULL);
1176 }
1177
1178 acmd = PKT2CMD(pkt);
1179
1180 /*
1181 * Initialize the new pkt - we redundantly initialize
1182 * all the fields for illustrative purposes.
1183 */
1184 acmd->cmd_pkt = pkt;
1185 acmd->cmd_flags = 0;
1186 acmd->cmd_scblen = statuslen;
1187 acmd->cmd_cdblen = cmdlen;
1188 acmd->cmd_dmahandle = NULL;
1189 acmd->cmd_ncookies = 0;
1190 acmd->cmd_cookie = 0;
1191 acmd->cmd_cookiecnt = 0;
1192 acmd->cmd_nwin = 0;
1193
1194 pkt->pkt_address = *ap;
1195 pkt->pkt_comp = (void (*)())NULL;
1196 pkt->pkt_flags = 0;
1197 pkt->pkt_time = 0;
1198 pkt->pkt_resid = 0;
1199 pkt->pkt_state = 0;
1200 pkt->pkt_statistics = 0;
1201 pkt->pkt_reason = 0;
1202 new_pkt = pkt;
1203 } else {
1204 acmd = PKT2CMD(pkt);
1205 new_pkt = NULL;
1206 }
1207
1208 /* step #2 : dma allocation/move */
1209 if (bp && bp->b_bcount != 0) {
1210 if (acmd->cmd_dmahandle == NULL) {
1211 if (megasas_dma_alloc(instance, pkt, bp, flags,
1212 callback) == -1) {
1213 if (new_pkt) {
1214 scsi_hba_pkt_free(ap, new_pkt);
1215 }
1216
1217 return ((struct scsi_pkt *)NULL);
1218 }
1219 } else {
1220 if (megasas_dma_move(instance, pkt, bp) == -1) {
1221 return ((struct scsi_pkt *)NULL);
1222 }
1223 }
1224 }
1225
1226 return (pkt);
1227 }
1228
1229 /*
1230 * tran_start - transport a SCSI command to the addressed target
1231 * @ap:
1232 * @pkt:
1233 *
1234 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1235 * SCSI command to the addressed target. The SCSI command is described
1236 * entirely within the scsi_pkt structure, which the target driver allocated
1237 * through the HBA driver's tran_init_pkt() entry point. If the command
1238 * involves a data transfer, DMA resources must also have been allocated for
1239 * the scsi_pkt structure.
1240 *
1241 * Return Values :
1242 * TRAN_BUSY - request queue is full, no more free scbs
1243 * TRAN_ACCEPT - pkt has been submitted to the instance
1244 */
1245 static int
megasas_tran_start(struct scsi_address * ap,register struct scsi_pkt * pkt)1246 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1247 {
1248 uchar_t cmd_done = 0;
1249
1250 struct megasas_instance *instance = ADDR2MEGA(ap);
1251 struct megasas_cmd *cmd;
1252
1253 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
1254 __func__, __LINE__, pkt->pkt_cdbp[0]));
1255
1256 pkt->pkt_reason = CMD_CMPLT;
1257 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1258
1259 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1260
1261 /*
1262 * Check if the command is already completed by the mega_build_cmd()
1263 * routine. In which case the busy_flag would be clear and scb will be
1264 * NULL and appropriate reason provided in pkt_reason field
1265 */
1266 if (cmd_done) {
1267 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1268 scsi_hba_pkt_comp(pkt);
1269 }
1270 pkt->pkt_reason = CMD_CMPLT;
1271 pkt->pkt_scbp[0] = STATUS_GOOD;
1272 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1273 | STATE_SENT_CMD;
1274 return (TRAN_ACCEPT);
1275 }
1276
1277 if (cmd == NULL) {
1278 return (TRAN_BUSY);
1279 }
1280
1281 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1282 if (instance->fw_outstanding > instance->max_fw_cmds) {
1283 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy"));
1284 return_mfi_pkt(instance, cmd);
1285 return (TRAN_BUSY);
1286 }
1287
1288 /* Syncronize the Cmd frame for the controller */
1289 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1290 DDI_DMA_SYNC_FORDEV);
1291
1292 instance->func_ptr->issue_cmd(cmd, instance);
1293
1294 } else {
1295 struct megasas_header *hdr = &cmd->frame->hdr;
1296
1297 cmd->sync_cmd = MEGASAS_TRUE;
1298
1299 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1300
1301 pkt->pkt_reason = CMD_CMPLT;
1302 pkt->pkt_statistics = 0;
1303 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1304
1305 switch (hdr->cmd_status) {
1306 case MFI_STAT_OK:
1307 pkt->pkt_scbp[0] = STATUS_GOOD;
1308 break;
1309
1310 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1311
1312 pkt->pkt_reason = CMD_CMPLT;
1313 pkt->pkt_statistics = 0;
1314
1315 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1316 break;
1317
1318 case MFI_STAT_DEVICE_NOT_FOUND:
1319 pkt->pkt_reason = CMD_DEV_GONE;
1320 pkt->pkt_statistics = STAT_DISCON;
1321 break;
1322
1323 default:
1324 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1325 }
1326
1327 return_mfi_pkt(instance, cmd);
1328 (void) megasas_common_check(instance, cmd);
1329
1330 scsi_hba_pkt_comp(pkt);
1331
1332 }
1333
1334 return (TRAN_ACCEPT);
1335 }
1336
1337 /*
1338 * tran_abort - Abort any commands that are currently in transport
1339 * @ap:
1340 * @pkt:
1341 *
1342 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1343 * commands that are currently in transport for a particular target. This entry
1344 * point is called when a target driver calls scsi_abort(). The tran_abort()
1345 * entry point should attempt to abort the command denoted by the pkt
1346 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1347 * abort all outstanding commands in the transport layer for the particular
1348 * target or logical unit.
1349 */
1350 /*ARGSUSED*/
1351 static int
megasas_tran_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1352 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1353 {
1354 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1355
1356 /* aborting command not supported by H/W */
1357
1358 return (DDI_FAILURE);
1359 }
1360
1361 /*
1362 * tran_reset - reset either the SCSI bus or target
1363 * @ap:
1364 * @level:
1365 *
1366 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1367 * the SCSI bus or a particular SCSI target device. This entry point is called
1368 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1369 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1370 * particular target or logical unit must be reset.
1371 */
1372 /*ARGSUSED*/
1373 static int
megasas_tran_reset(struct scsi_address * ap,int level)1374 megasas_tran_reset(struct scsi_address *ap, int level)
1375 {
1376 struct megasas_instance *instance = ADDR2MEGA(ap);
1377
1378 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1379
1380 if (wait_for_outstanding(instance)) {
1381 return (DDI_FAILURE);
1382 } else {
1383 return (DDI_SUCCESS);
1384 }
1385 }
1386
1387 /*
1388 * tran_bus_reset - reset the SCSI bus
1389 * @dip:
1390 * @level:
1391 *
1392 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1393 * initialized during the HBA driver's attach(). The vector should point to
1394 * an HBA entry point that is to be called when a user initiates a bus reset.
1395 * Implementation is hardware specific. If the HBA driver cannot reset the
1396 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1397 * or not initialize this vector.
1398 */
1399 /*ARGSUSED*/
1400 static int
megasas_tran_bus_reset(dev_info_t * dip,int level)1401 megasas_tran_bus_reset(dev_info_t *dip, int level)
1402 {
1403 int instance_no = ddi_get_instance(dip);
1404
1405 struct megasas_instance *instance = ddi_get_soft_state(megasas_state,
1406 instance_no);
1407
1408 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1409
1410 if (wait_for_outstanding(instance)) {
1411 return (DDI_FAILURE);
1412 } else {
1413 return (DDI_SUCCESS);
1414 }
1415 }
1416
1417 /*
1418 * tran_getcap - get one of a set of SCSA-defined capabilities
1419 * @ap:
1420 * @cap:
1421 * @whom:
1422 *
1423 * The target driver can request the current setting of the capability for a
1424 * particular target by setting the whom parameter to nonzero. A whom value of
1425 * zero indicates a request for the current setting of the general capability
1426 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1427 * for undefined capabilities or the current value of the requested capability.
1428 */
1429 /*ARGSUSED*/
1430 static int
megasas_tran_getcap(struct scsi_address * ap,char * cap,int whom)1431 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1432 {
1433 int rval = 0;
1434
1435 struct megasas_instance *instance = ADDR2MEGA(ap);
1436
1437 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1438
1439 /* we do allow inquiring about capabilities for other targets */
1440 if (cap == NULL) {
1441 return (-1);
1442 }
1443
1444 switch (scsi_hba_lookup_capstr(cap)) {
1445 case SCSI_CAP_DMA_MAX:
1446 /* Limit to 16MB max transfer */
1447 rval = megasas_max_cap_maxxfer;
1448 break;
1449 case SCSI_CAP_MSG_OUT:
1450 rval = 1;
1451 break;
1452 case SCSI_CAP_DISCONNECT:
1453 rval = 0;
1454 break;
1455 case SCSI_CAP_SYNCHRONOUS:
1456 rval = 0;
1457 break;
1458 case SCSI_CAP_WIDE_XFER:
1459 rval = 1;
1460 break;
1461 case SCSI_CAP_TAGGED_QING:
1462 rval = 1;
1463 break;
1464 case SCSI_CAP_UNTAGGED_QING:
1465 rval = 1;
1466 break;
1467 case SCSI_CAP_PARITY:
1468 rval = 1;
1469 break;
1470 case SCSI_CAP_INITIATOR_ID:
1471 rval = instance->init_id;
1472 break;
1473 case SCSI_CAP_ARQ:
1474 rval = 1;
1475 break;
1476 case SCSI_CAP_LINKED_CMDS:
1477 rval = 0;
1478 break;
1479 case SCSI_CAP_RESET_NOTIFICATION:
1480 rval = 1;
1481 break;
1482 case SCSI_CAP_GEOMETRY:
1483 rval = -1;
1484
1485 break;
1486 default:
1487 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
1488 scsi_hba_lookup_capstr(cap)));
1489 rval = -1;
1490 break;
1491 }
1492
1493 return (rval);
1494 }
1495
1496 /*
1497 * tran_setcap - set one of a set of SCSA-defined capabilities
1498 * @ap:
1499 * @cap:
1500 * @value:
1501 * @whom:
1502 *
1503 * The target driver might request that the new value be set for a particular
1504 * target by setting the whom parameter to nonzero. A whom value of zero
1505 * means that request is to set the new value for the SCSI bus or for adapter
1506 * hardware in general.
1507 * The tran_setcap() should return the following values as appropriate:
1508 * - -1 for undefined capabilities
1509 * - 0 if the HBA driver cannot set the capability to the requested value
1510 * - 1 if the HBA driver is able to set the capability to the requested value
1511 */
1512 /*ARGSUSED*/
1513 static int
megasas_tran_setcap(struct scsi_address * ap,char * cap,int value,int whom)1514 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1515 {
1516 int rval = 1;
1517
1518 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1519
1520 /* We don't allow setting capabilities for other targets */
1521 if (cap == NULL || whom == 0) {
1522 return (-1);
1523 }
1524
1525 switch (scsi_hba_lookup_capstr(cap)) {
1526 case SCSI_CAP_DMA_MAX:
1527 case SCSI_CAP_MSG_OUT:
1528 case SCSI_CAP_PARITY:
1529 case SCSI_CAP_LINKED_CMDS:
1530 case SCSI_CAP_RESET_NOTIFICATION:
1531 case SCSI_CAP_DISCONNECT:
1532 case SCSI_CAP_SYNCHRONOUS:
1533 case SCSI_CAP_UNTAGGED_QING:
1534 case SCSI_CAP_WIDE_XFER:
1535 case SCSI_CAP_INITIATOR_ID:
1536 case SCSI_CAP_ARQ:
1537 /*
1538 * None of these are settable via
1539 * the capability interface.
1540 */
1541 break;
1542 case SCSI_CAP_TAGGED_QING:
1543 rval = 1;
1544 break;
1545 case SCSI_CAP_SECTOR_SIZE:
1546 rval = 1;
1547 break;
1548
1549 case SCSI_CAP_TOTAL_SECTORS:
1550 rval = 1;
1551 break;
1552 default:
1553 rval = -1;
1554 break;
1555 }
1556
1557 return (rval);
1558 }
1559
1560 /*
1561 * tran_destroy_pkt - deallocate scsi_pkt structure
1562 * @ap:
1563 * @pkt:
1564 *
1565 * The tran_destroy_pkt() entry point is the HBA driver function that
1566 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
1567 * called when the target driver calls scsi_destroy_pkt(). The
1568 * tran_destroy_pkt() entry point must free any DMA resources that have been
1569 * allocated for the packet. An implicit DMA synchronization occurs if the
1570 * DMA resources are freed and any cached data remains after the completion
1571 * of the transfer.
1572 */
1573 static void
megasas_tran_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1574 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1575 {
1576 struct scsa_cmd *acmd = PKT2CMD(pkt);
1577
1578 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1579
1580 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1581 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1582
1583 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1584
1585 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1586
1587 acmd->cmd_dmahandle = NULL;
1588 }
1589
1590 /* free the pkt */
1591 scsi_hba_pkt_free(ap, pkt);
1592 }
1593
1594 /*
1595 * tran_dmafree - deallocates DMA resources
1596 * @ap:
1597 * @pkt:
1598 *
1599 * The tran_dmafree() entry point deallocates DMAQ resources that have been
1600 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
1601 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
1602 * free only DMA resources allocated for a scsi_pkt structure, not the
1603 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
1604 * implicitly performed.
1605 */
1606 /*ARGSUSED*/
1607 static void
megasas_tran_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1608 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1609 {
1610 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1611
1612 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1613
1614 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1615 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1616
1617 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1618
1619 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1620
1621 acmd->cmd_dmahandle = NULL;
1622 }
1623 }
1624
1625 /*
1626 * tran_sync_pkt - synchronize the DMA object allocated
1627 * @ap:
1628 * @pkt:
1629 *
1630 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
1631 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
1632 * entry point is called when the target driver calls scsi_sync_pkt(). If the
1633 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
1634 * must synchronize the CPU's view of the data. If the data transfer direction
1635 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
1636 * device's view of the data.
1637 */
1638 /*ARGSUSED*/
1639 static void
megasas_tran_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1640 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1641 {
1642 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1643
1644 /*
1645 * following 'ddi_dma_sync()' API call
1646 * already called for each I/O in the ISR
1647 */
1648 #if 0
1649 int i;
1650
1651 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1652
1653 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1654 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
1655 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
1656 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1657 }
1658 #endif
1659 }
1660
1661 /*ARGSUSED*/
1662 static int
megasas_tran_quiesce(dev_info_t * dip)1663 megasas_tran_quiesce(dev_info_t *dip)
1664 {
1665 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1666
1667 return (1);
1668 }
1669
1670 /*ARGSUSED*/
1671 static int
megasas_tran_unquiesce(dev_info_t * dip)1672 megasas_tran_unquiesce(dev_info_t *dip)
1673 {
1674 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1675
1676 return (1);
1677 }
1678
1679 /*
1680 * megasas_isr(caddr_t)
1681 *
1682 * The Interrupt Service Routine
1683 *
1684 * Collect status for all completed commands and do callback
1685 *
1686 */
1687 static uint_t
megasas_isr(struct megasas_instance * instance)1688 megasas_isr(struct megasas_instance *instance)
1689 {
1690 int need_softintr;
1691 uint32_t producer;
1692 uint32_t consumer;
1693 uint32_t context;
1694
1695 struct megasas_cmd *cmd;
1696
1697 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1698
1699 ASSERT(instance);
1700 if (!instance->func_ptr->intr_ack(instance)) {
1701 return (DDI_INTR_UNCLAIMED);
1702 }
1703
1704 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1705 0, 0, DDI_DMA_SYNC_FORCPU);
1706
1707 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
1708 != DDI_SUCCESS) {
1709 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1710 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1711 return (DDI_INTR_UNCLAIMED);
1712 }
1713
1714 producer = *instance->producer;
1715 consumer = *instance->consumer;
1716
1717 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ",
1718 producer, consumer));
1719
1720 mutex_enter(&instance->completed_pool_mtx);
1721
1722 while (consumer != producer) {
1723 context = instance->reply_queue[consumer];
1724 cmd = instance->cmd_list[context];
1725 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
1726
1727 consumer++;
1728 if (consumer == (instance->max_fw_cmds + 1)) {
1729 consumer = 0;
1730 }
1731 }
1732
1733 mutex_exit(&instance->completed_pool_mtx);
1734
1735 *instance->consumer = consumer;
1736 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1737 0, 0, DDI_DMA_SYNC_FORDEV);
1738
1739 if (instance->softint_running) {
1740 need_softintr = 0;
1741 } else {
1742 need_softintr = 1;
1743 }
1744
1745 if (instance->isr_level == HIGH_LEVEL_INTR) {
1746 if (need_softintr) {
1747 ddi_trigger_softintr(instance->soft_intr_id);
1748 }
1749 } else {
1750 /*
1751 * Not a high-level interrupt, therefore call the soft level
1752 * interrupt explicitly
1753 */
1754 (void) megasas_softintr(instance);
1755 }
1756
1757 return (DDI_INTR_CLAIMED);
1758 }
1759
1760
1761 /*
1762 * ************************************************************************** *
1763 * *
1764 * libraries *
1765 * *
1766 * ************************************************************************** *
1767 */
1768 /*
1769 * get_mfi_pkt : Get a command from the free pool
1770 */
1771 static struct megasas_cmd *
get_mfi_pkt(struct megasas_instance * instance)1772 get_mfi_pkt(struct megasas_instance *instance)
1773 {
1774 mlist_t *head = &instance->cmd_pool_list;
1775 struct megasas_cmd *cmd = NULL;
1776
1777 mutex_enter(&instance->cmd_pool_mtx);
1778 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1779
1780 if (!mlist_empty(head)) {
1781 cmd = mlist_entry(head->next, struct megasas_cmd, list);
1782 mlist_del_init(head->next);
1783 }
1784 if (cmd != NULL)
1785 cmd->pkt = NULL;
1786 mutex_exit(&instance->cmd_pool_mtx);
1787
1788 return (cmd);
1789 }
1790
1791 /*
1792 * return_mfi_pkt : Return a cmd to free command pool
1793 */
1794 static void
return_mfi_pkt(struct megasas_instance * instance,struct megasas_cmd * cmd)1795 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd)
1796 {
1797 mutex_enter(&instance->cmd_pool_mtx);
1798 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1799
1800 mlist_add(&cmd->list, &instance->cmd_pool_list);
1801
1802 mutex_exit(&instance->cmd_pool_mtx);
1803 }
1804
1805 /*
1806 * destroy_mfi_frame_pool
1807 */
1808 static void
destroy_mfi_frame_pool(struct megasas_instance * instance)1809 destroy_mfi_frame_pool(struct megasas_instance *instance)
1810 {
1811 int i;
1812 uint32_t max_cmd = instance->max_fw_cmds;
1813
1814 struct megasas_cmd *cmd;
1815
1816 /* return all frames to pool */
1817 for (i = 0; i < max_cmd; i++) {
1818
1819 cmd = instance->cmd_list[i];
1820
1821 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
1822 (void) mega_free_dma_obj(instance, cmd->frame_dma_obj);
1823
1824 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
1825 }
1826
1827 }
1828
1829 /*
1830 * create_mfi_frame_pool
1831 */
1832 static int
create_mfi_frame_pool(struct megasas_instance * instance)1833 create_mfi_frame_pool(struct megasas_instance *instance)
1834 {
1835 int i = 0;
1836 int cookie_cnt;
1837 uint16_t max_cmd;
1838 uint16_t sge_sz;
1839 uint32_t sgl_sz;
1840 uint32_t tot_frame_size;
1841
1842 struct megasas_cmd *cmd;
1843
1844 max_cmd = instance->max_fw_cmds;
1845
1846 sge_sz = sizeof (struct megasas_sge64);
1847
1848 /* calculated the number of 64byte frames required for SGL */
1849 sgl_sz = sge_sz * instance->max_num_sge;
1850 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH;
1851
1852 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
1853 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
1854
1855 while (i < max_cmd) {
1856 cmd = instance->cmd_list[i];
1857
1858 cmd->frame_dma_obj.size = tot_frame_size;
1859 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr;
1860 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1861 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1862 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
1863 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
1864
1865
1866 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj);
1867
1868 if (cookie_cnt == -1 || cookie_cnt > 1) {
1869 con_log(CL_ANN, (CE_WARN,
1870 "create_mfi_frame_pool: could not alloc."));
1871 return (DDI_FAILURE);
1872 }
1873
1874 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
1875
1876 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
1877 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer;
1878 cmd->frame_phys_addr =
1879 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
1880
1881 cmd->sense = (uint8_t *)(((unsigned long)
1882 cmd->frame_dma_obj.buffer) +
1883 tot_frame_size - SENSE_LENGTH);
1884 cmd->sense_phys_addr =
1885 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
1886 tot_frame_size - SENSE_LENGTH;
1887
1888 if (!cmd->frame || !cmd->sense) {
1889 con_log(CL_ANN, (CE_NOTE,
1890 "megasas: pci_pool_alloc failed \n"));
1891
1892 return (-ENOMEM);
1893 }
1894
1895 cmd->frame->io.context = cmd->index;
1896 i++;
1897
1898 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
1899 cmd->frame->io.context, cmd->frame_phys_addr));
1900 }
1901
1902 return (DDI_SUCCESS);
1903 }
1904
1905 /*
1906 * free_additional_dma_buffer
1907 */
1908 static void
free_additional_dma_buffer(struct megasas_instance * instance)1909 free_additional_dma_buffer(struct megasas_instance *instance)
1910 {
1911 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
1912 (void) mega_free_dma_obj(instance,
1913 instance->mfi_internal_dma_obj);
1914 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
1915 }
1916
1917 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
1918 (void) mega_free_dma_obj(instance,
1919 instance->mfi_evt_detail_obj);
1920 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
1921 }
1922 }
1923
1924 /*
1925 * alloc_additional_dma_buffer
1926 */
1927 static int
alloc_additional_dma_buffer(struct megasas_instance * instance)1928 alloc_additional_dma_buffer(struct megasas_instance *instance)
1929 {
1930 uint32_t reply_q_sz;
1931 uint32_t internal_buf_size = PAGESIZE*2;
1932
1933 /* max cmds plus 1 + producer & consumer */
1934 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
1935
1936 instance->mfi_internal_dma_obj.size = internal_buf_size;
1937 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr;
1938 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1939 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
1940 0xFFFFFFFFU;
1941 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
1942
1943 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj)
1944 != 1) {
1945 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q"));
1946 return (DDI_FAILURE);
1947 }
1948
1949 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
1950
1951 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
1952
1953 instance->producer = (uint32_t *)((unsigned long)
1954 instance->mfi_internal_dma_obj.buffer);
1955 instance->consumer = (uint32_t *)((unsigned long)
1956 instance->mfi_internal_dma_obj.buffer + 4);
1957 instance->reply_queue = (uint32_t *)((unsigned long)
1958 instance->mfi_internal_dma_obj.buffer + 8);
1959 instance->internal_buf = (caddr_t)(((unsigned long)
1960 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
1961 instance->internal_buf_dmac_add =
1962 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
1963 reply_q_sz;
1964 instance->internal_buf_size = internal_buf_size -
1965 (reply_q_sz + 8);
1966
1967 /* allocate evt_detail */
1968 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail);
1969 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr;
1970 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1971 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1972 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
1973 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
1974
1975 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) {
1976 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: "
1977 "could not data transfer buffer alloc."));
1978 return (DDI_FAILURE);
1979 }
1980
1981 bzero(instance->mfi_evt_detail_obj.buffer,
1982 sizeof (struct megasas_evt_detail));
1983
1984 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
1985
1986 return (DDI_SUCCESS);
1987 }
1988
1989 /*
1990 * free_space_for_mfi
1991 */
1992 static void
free_space_for_mfi(struct megasas_instance * instance)1993 free_space_for_mfi(struct megasas_instance *instance)
1994 {
1995 int i;
1996 uint32_t max_cmd = instance->max_fw_cmds;
1997
1998 /* already freed */
1999 if (instance->cmd_list == NULL) {
2000 return;
2001 }
2002
2003 free_additional_dma_buffer(instance);
2004
2005 /* first free the MFI frame pool */
2006 destroy_mfi_frame_pool(instance);
2007
2008 /* free all the commands in the cmd_list */
2009 for (i = 0; i < instance->max_fw_cmds; i++) {
2010 kmem_free(instance->cmd_list[i],
2011 sizeof (struct megasas_cmd));
2012
2013 instance->cmd_list[i] = NULL;
2014 }
2015
2016 /* free the cmd_list buffer itself */
2017 kmem_free(instance->cmd_list,
2018 sizeof (struct megasas_cmd *) * max_cmd);
2019
2020 instance->cmd_list = NULL;
2021
2022 INIT_LIST_HEAD(&instance->cmd_pool_list);
2023 }
2024
2025 /*
2026 * alloc_space_for_mfi
2027 */
2028 static int
alloc_space_for_mfi(struct megasas_instance * instance)2029 alloc_space_for_mfi(struct megasas_instance *instance)
2030 {
2031 int i;
2032 uint32_t max_cmd;
2033 size_t sz;
2034
2035 struct megasas_cmd *cmd;
2036
2037 max_cmd = instance->max_fw_cmds;
2038 sz = sizeof (struct megasas_cmd *) * max_cmd;
2039
2040 /*
2041 * instance->cmd_list is an array of struct megasas_cmd pointers.
2042 * Allocate the dynamic array first and then allocate individual
2043 * commands.
2044 */
2045 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
2046 ASSERT(instance->cmd_list);
2047
2048 for (i = 0; i < max_cmd; i++) {
2049 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd),
2050 KM_SLEEP);
2051 ASSERT(instance->cmd_list[i]);
2052 }
2053
2054 INIT_LIST_HEAD(&instance->cmd_pool_list);
2055
2056 /* add all the commands to command pool (instance->cmd_pool) */
2057 for (i = 0; i < max_cmd; i++) {
2058 cmd = instance->cmd_list[i];
2059 cmd->index = i;
2060
2061 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2062 }
2063
2064 /* create a frame pool and assign one frame to each cmd */
2065 if (create_mfi_frame_pool(instance)) {
2066 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2067 return (DDI_FAILURE);
2068 }
2069
2070 /* create a frame pool and assign one frame to each cmd */
2071 if (alloc_additional_dma_buffer(instance)) {
2072 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2073 return (DDI_FAILURE);
2074 }
2075
2076 return (DDI_SUCCESS);
2077 }
2078
2079 /*
2080 * get_ctrl_info
2081 */
2082 static int
get_ctrl_info(struct megasas_instance * instance,struct megasas_ctrl_info * ctrl_info)2083 get_ctrl_info(struct megasas_instance *instance,
2084 struct megasas_ctrl_info *ctrl_info)
2085 {
2086 int ret = 0;
2087
2088 struct megasas_cmd *cmd;
2089 struct megasas_dcmd_frame *dcmd;
2090 struct megasas_ctrl_info *ci;
2091
2092 cmd = get_mfi_pkt(instance);
2093
2094 if (!cmd) {
2095 con_log(CL_ANN, (CE_WARN,
2096 "Failed to get a cmd for ctrl info\n"));
2097 return (DDI_FAILURE);
2098 }
2099
2100 dcmd = &cmd->frame->dcmd;
2101
2102 ci = (struct megasas_ctrl_info *)instance->internal_buf;
2103
2104 if (!ci) {
2105 con_log(CL_ANN, (CE_WARN,
2106 "Failed to alloc mem for ctrl info\n"));
2107 return_mfi_pkt(instance, cmd);
2108 return (DDI_FAILURE);
2109 }
2110
2111 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info));
2112
2113 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
2114 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2115
2116 dcmd->cmd = MFI_CMD_OP_DCMD;
2117 dcmd->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2118 dcmd->sge_count = 1;
2119 dcmd->flags = MFI_FRAME_DIR_READ;
2120 dcmd->timeout = 0;
2121 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info);
2122 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2123 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add;
2124 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info);
2125
2126 cmd->frame_count = 1;
2127
2128 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2129 ret = 0;
2130 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info));
2131 } else {
2132 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n"));
2133 ret = -1;
2134 }
2135
2136 return_mfi_pkt(instance, cmd);
2137 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2138 ret = -1;
2139 }
2140
2141 return (ret);
2142 }
2143
2144 /*
2145 * abort_aen_cmd
2146 */
2147 static int
abort_aen_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort)2148 abort_aen_cmd(struct megasas_instance *instance,
2149 struct megasas_cmd *cmd_to_abort)
2150 {
2151 int ret = 0;
2152
2153 struct megasas_cmd *cmd;
2154 struct megasas_abort_frame *abort_fr;
2155
2156 cmd = get_mfi_pkt(instance);
2157
2158 if (!cmd) {
2159 con_log(CL_ANN, (CE_WARN,
2160 "Failed to get a cmd for ctrl info\n"));
2161 return (DDI_FAILURE);
2162 }
2163
2164 abort_fr = &cmd->frame->abort;
2165
2166 /* prepare and issue the abort frame */
2167 abort_fr->cmd = MFI_CMD_OP_ABORT;
2168 abort_fr->cmd_status = MFI_CMD_STATUS_SYNC_MODE;
2169 abort_fr->flags = 0;
2170 abort_fr->abort_context = cmd_to_abort->index;
2171 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
2172 abort_fr->abort_mfi_phys_addr_hi = 0;
2173
2174 instance->aen_cmd->abort_aen = 1;
2175
2176 cmd->sync_cmd = MEGASAS_TRUE;
2177 cmd->frame_count = 1;
2178
2179 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2180 con_log(CL_ANN, (CE_WARN,
2181 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n"));
2182 ret = -1;
2183 } else {
2184 ret = 0;
2185 }
2186
2187 instance->aen_cmd->abort_aen = 1;
2188 instance->aen_cmd = 0;
2189
2190 return_mfi_pkt(instance, cmd);
2191 (void) megasas_common_check(instance, cmd);
2192
2193 return (ret);
2194 }
2195
2196 /*
2197 * init_mfi
2198 */
2199 static int
init_mfi(struct megasas_instance * instance)2200 init_mfi(struct megasas_instance *instance)
2201 {
2202 off_t reglength;
2203 struct megasas_cmd *cmd;
2204 struct megasas_ctrl_info ctrl_info;
2205 struct megasas_init_frame *init_frame;
2206 struct megasas_init_queue_info *initq_info;
2207
2208 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length)
2209 != DDI_SUCCESS) || reglength < MINIMUM_MFI_MEM_SZ) {
2210 return (DDI_FAILURE);
2211 }
2212
2213 if (reglength > DEFAULT_MFI_MEM_SZ) {
2214 reglength = DEFAULT_MFI_MEM_SZ;
2215 con_log(CL_DLEVEL1, (CE_NOTE,
2216 "mega: register length to map is 0x%lx bytes", reglength));
2217 }
2218
2219 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO,
2220 &instance->regmap, 0, reglength, &endian_attr,
2221 &instance->regmap_handle) != DDI_SUCCESS) {
2222 con_log(CL_ANN, (CE_NOTE,
2223 "megaraid: couldn't map control registers"));
2224
2225 goto fail_mfi_reg_setup;
2226 }
2227
2228 /* we expect the FW state to be READY */
2229 if (mfi_state_transition_to_ready(instance)) {
2230 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready"));
2231 goto fail_ready_state;
2232 }
2233
2234 /* get various operational parameters from status register */
2235 instance->max_num_sge =
2236 (instance->func_ptr->read_fw_status_reg(instance) &
2237 0xFF0000) >> 0x10;
2238 /*
2239 * Reduce the max supported cmds by 1. This is to ensure that the
2240 * reply_q_sz (1 more than the max cmd that driver may send)
2241 * does not exceed max cmds that the FW can support
2242 */
2243 instance->max_fw_cmds =
2244 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
2245 instance->max_fw_cmds = instance->max_fw_cmds - 1;
2246
2247 instance->max_num_sge =
2248 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ?
2249 MEGASAS_MAX_SGE_CNT : instance->max_num_sge;
2250
2251 /* create a pool of commands */
2252 if (alloc_space_for_mfi(instance))
2253 goto fail_alloc_fw_space;
2254
2255 /* disable interrupt for initial preparation */
2256 instance->func_ptr->disable_intr(instance);
2257
2258 /*
2259 * Prepare a init frame. Note the init frame points to queue info
2260 * structure. Each frame has SGL allocated after first 64 bytes. For
2261 * this frame - since we don't need any SGL - we use SGL's space as
2262 * queue info structure
2263 */
2264 cmd = get_mfi_pkt(instance);
2265
2266 init_frame = (struct megasas_init_frame *)cmd->frame;
2267 initq_info = (struct megasas_init_queue_info *)
2268 ((unsigned long)init_frame + 64);
2269
2270 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
2271 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info));
2272
2273 initq_info->init_flags = 0;
2274
2275 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
2276
2277 initq_info->producer_index_phys_addr_hi = 0;
2278 initq_info->producer_index_phys_addr_lo =
2279 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
2280
2281 initq_info->consumer_index_phys_addr_hi = 0;
2282 initq_info->consumer_index_phys_addr_lo =
2283 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4;
2284
2285 initq_info->reply_queue_start_phys_addr_hi = 0;
2286 initq_info->reply_queue_start_phys_addr_lo =
2287 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8;
2288
2289 init_frame->cmd = MFI_CMD_OP_INIT;
2290 init_frame->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2291 init_frame->flags = 0;
2292 init_frame->queue_info_new_phys_addr_lo =
2293 cmd->frame_phys_addr + 64;
2294 init_frame->queue_info_new_phys_addr_hi = 0;
2295
2296 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info);
2297
2298 cmd->frame_count = 1;
2299
2300 /* issue the init frame in polled mode */
2301 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2302 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
2303 goto fail_fw_init;
2304 }
2305
2306 return_mfi_pkt(instance, cmd);
2307 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2308 goto fail_fw_init;
2309 }
2310
2311 /* gather misc FW related information */
2312 if (!get_ctrl_info(instance, &ctrl_info)) {
2313 instance->max_sectors_per_req = ctrl_info.max_request_size;
2314 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d",
2315 ctrl_info.product_name, ctrl_info.ld_present_count));
2316 } else {
2317 instance->max_sectors_per_req = instance->max_num_sge *
2318 PAGESIZE / 512;
2319 }
2320
2321 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2322 goto fail_fw_init;
2323 }
2324
2325 return (0);
2326
2327 fail_fw_init:
2328 fail_alloc_fw_space:
2329
2330 free_space_for_mfi(instance);
2331
2332 fail_ready_state:
2333 ddi_regs_map_free(&instance->regmap_handle);
2334
2335 fail_mfi_reg_setup:
2336 return (DDI_FAILURE);
2337 }
2338
2339 /*
2340 * mfi_state_transition_to_ready : Move the FW to READY state
2341 *
2342 * @reg_set : MFI register set
2343 */
2344 static int
mfi_state_transition_to_ready(struct megasas_instance * instance)2345 mfi_state_transition_to_ready(struct megasas_instance *instance)
2346 {
2347 int i;
2348 uint8_t max_wait;
2349 uint32_t fw_ctrl;
2350 uint32_t fw_state;
2351 uint32_t cur_state;
2352
2353 fw_state =
2354 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK;
2355 con_log(CL_ANN1, (CE_NOTE,
2356 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
2357
2358 while (fw_state != MFI_STATE_READY) {
2359 con_log(CL_ANN, (CE_NOTE,
2360 "mfi_state_transition_to_ready:FW state%x", fw_state));
2361
2362 switch (fw_state) {
2363 case MFI_STATE_FAULT:
2364 con_log(CL_ANN, (CE_NOTE,
2365 "megasas: FW in FAULT state!!"));
2366
2367 return (-ENODEV);
2368 case MFI_STATE_WAIT_HANDSHAKE:
2369 /* set the CLR bit in IMR0 */
2370 con_log(CL_ANN, (CE_NOTE,
2371 "megasas: FW waiting for HANDSHAKE"));
2372 /*
2373 * PCI_Hot Plug: MFI F/W requires
2374 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2375 * to be set
2376 */
2377 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
2378 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
2379 MFI_INIT_HOTPLUG, instance);
2380
2381 max_wait = 2;
2382 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2383 break;
2384 case MFI_STATE_BOOT_MESSAGE_PENDING:
2385 /* set the CLR bit in IMR0 */
2386 con_log(CL_ANN, (CE_NOTE,
2387 "megasas: FW state boot message pending"));
2388 /*
2389 * PCI_Hot Plug: MFI F/W requires
2390 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2391 * to be set
2392 */
2393 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
2394
2395 max_wait = 10;
2396 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2397 break;
2398 case MFI_STATE_OPERATIONAL:
2399 /* bring it to READY state; assuming max wait 2 secs */
2400 instance->func_ptr->disable_intr(instance);
2401 con_log(CL_ANN1, (CE_NOTE,
2402 "megasas: FW in OPERATIONAL state"));
2403 /*
2404 * PCI_Hot Plug: MFI F/W requires
2405 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
2406 * to be set
2407 */
2408 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
2409 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
2410
2411 max_wait = 10;
2412 cur_state = MFI_STATE_OPERATIONAL;
2413 break;
2414 case MFI_STATE_UNDEFINED:
2415 /* this state should not last for more than 2 seconds */
2416 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n"));
2417
2418 max_wait = 2;
2419 cur_state = MFI_STATE_UNDEFINED;
2420 break;
2421 case MFI_STATE_BB_INIT:
2422 max_wait = 2;
2423 cur_state = MFI_STATE_BB_INIT;
2424 break;
2425 case MFI_STATE_FW_INIT:
2426 max_wait = 2;
2427 cur_state = MFI_STATE_FW_INIT;
2428 break;
2429 case MFI_STATE_DEVICE_SCAN:
2430 max_wait = 10;
2431 cur_state = MFI_STATE_DEVICE_SCAN;
2432 break;
2433 default:
2434 con_log(CL_ANN, (CE_NOTE,
2435 "megasas: Unknown state 0x%x\n", fw_state));
2436 return (-ENODEV);
2437 }
2438
2439 /* the cur_state should not last for more than max_wait secs */
2440 for (i = 0; i < (max_wait * MILLISEC); i++) {
2441 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
2442 fw_state =
2443 instance->func_ptr->read_fw_status_reg(instance) &
2444 MFI_STATE_MASK;
2445
2446 if (fw_state == cur_state) {
2447 delay(1 * drv_usectohz(MILLISEC));
2448 } else {
2449 break;
2450 }
2451 }
2452
2453 /* return error if fw_state hasn't changed after max_wait */
2454 if (fw_state == cur_state) {
2455 con_log(CL_ANN, (CE_NOTE,
2456 "FW state hasn't changed in %d secs\n", max_wait));
2457 return (-ENODEV);
2458 }
2459 };
2460
2461 fw_ctrl = RD_IB_DOORBELL(instance);
2462
2463 con_log(CL_ANN1, (CE_NOTE,
2464 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
2465
2466 /*
2467 * Write 0xF to the doorbell register to do the following.
2468 * - Abort all outstanding commands (bit 0).
2469 * - Transition from OPERATIONAL to READY state (bit 1).
2470 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
2471 * - Set to release FW to continue running (i.e. BIOS handshake
2472 * (bit 3).
2473 */
2474 WR_IB_DOORBELL(0xF, instance);
2475
2476 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2477 return (-ENODEV);
2478 }
2479 return (0);
2480 }
2481
2482 /*
2483 * get_seq_num
2484 */
2485 static int
get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)2486 get_seq_num(struct megasas_instance *instance,
2487 struct megasas_evt_log_info *eli)
2488 {
2489 int ret = 0;
2490
2491 dma_obj_t dcmd_dma_obj;
2492 struct megasas_cmd *cmd;
2493 struct megasas_dcmd_frame *dcmd;
2494
2495 cmd = get_mfi_pkt(instance);
2496
2497 if (!cmd) {
2498 cmn_err(CE_WARN, "megasas: failed to get a cmd\n");
2499 return (-ENOMEM);
2500 }
2501
2502 dcmd = &cmd->frame->dcmd;
2503
2504 /* allocate the data transfer buffer */
2505 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info);
2506 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
2507 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2508 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2509 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
2510 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
2511
2512 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
2513 con_log(CL_ANN, (CE_WARN,
2514 "get_seq_num: could not data transfer buffer alloc."));
2515 return (DDI_FAILURE);
2516 }
2517
2518 (void) memset(dcmd_dma_obj.buffer, 0,
2519 sizeof (struct megasas_evt_log_info));
2520
2521 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2522
2523 dcmd->cmd = MFI_CMD_OP_DCMD;
2524 dcmd->cmd_status = 0;
2525 dcmd->sge_count = 1;
2526 dcmd->flags = MFI_FRAME_DIR_READ;
2527 dcmd->timeout = 0;
2528 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info);
2529 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2530 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info);
2531 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
2532
2533 cmd->sync_cmd = MEGASAS_TRUE;
2534 cmd->frame_count = 1;
2535
2536 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2537 cmn_err(CE_WARN, "get_seq_num: "
2538 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n");
2539 ret = -1;
2540 } else {
2541 /* copy the data back into callers buffer */
2542 bcopy(dcmd_dma_obj.buffer, eli,
2543 sizeof (struct megasas_evt_log_info));
2544 ret = 0;
2545 }
2546
2547 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
2548 ret = -1;
2549
2550 return_mfi_pkt(instance, cmd);
2551 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2552 ret = -1;
2553 }
2554 return (ret);
2555 }
2556
2557 /*
2558 * start_mfi_aen
2559 */
2560 static int
start_mfi_aen(struct megasas_instance * instance)2561 start_mfi_aen(struct megasas_instance *instance)
2562 {
2563 int ret = 0;
2564
2565 struct megasas_evt_log_info eli;
2566 union megasas_evt_class_locale class_locale;
2567
2568 /* get the latest sequence number from FW */
2569 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info));
2570
2571 if (get_seq_num(instance, &eli)) {
2572 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n");
2573 return (-1);
2574 }
2575
2576 /* register AEN with FW for latest sequence number plus 1 */
2577 class_locale.members.reserved = 0;
2578 class_locale.members.locale = MR_EVT_LOCALE_ALL;
2579 class_locale.members.class = MR_EVT_CLASS_CRITICAL;
2580
2581 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
2582 class_locale.word);
2583
2584 if (ret) {
2585 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n");
2586 return (-1);
2587 }
2588
2589 return (ret);
2590 }
2591
2592 /*
2593 * flush_cache
2594 */
2595 static void
flush_cache(struct megasas_instance * instance)2596 flush_cache(struct megasas_instance *instance)
2597 {
2598 struct megasas_cmd *cmd;
2599 struct megasas_dcmd_frame *dcmd;
2600
2601 if (!(cmd = get_mfi_pkt(instance)))
2602 return;
2603
2604 dcmd = &cmd->frame->dcmd;
2605
2606 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2607
2608 dcmd->cmd = MFI_CMD_OP_DCMD;
2609 dcmd->cmd_status = 0x0;
2610 dcmd->sge_count = 0;
2611 dcmd->flags = MFI_FRAME_DIR_NONE;
2612 dcmd->timeout = 0;
2613 dcmd->data_xfer_len = 0;
2614 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2615 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2616
2617 cmd->frame_count = 1;
2618
2619 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2620 cmn_err(CE_WARN,
2621 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n");
2622 }
2623 con_log(CL_DLEVEL1, (CE_NOTE, "done"));
2624 return_mfi_pkt(instance, cmd);
2625 (void) megasas_common_check(instance, cmd);
2626 }
2627
2628 /*
2629 * service_mfi_aen- Completes an AEN command
2630 * @instance: Adapter soft state
2631 * @cmd: Command to be completed
2632 *
2633 */
2634 static void
service_mfi_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)2635 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2636 {
2637 uint32_t seq_num;
2638 struct megasas_evt_detail *evt_detail =
2639 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
2640
2641 cmd->cmd_status = cmd->frame->io.cmd_status;
2642
2643 if (cmd->cmd_status == ENODATA) {
2644 cmd->cmd_status = 0;
2645 }
2646
2647 /*
2648 * log the MFI AEN event to the sysevent queue so that
2649 * application will get noticed
2650 */
2651 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
2652 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
2653 int instance_no = ddi_get_instance(instance->dip);
2654 con_log(CL_ANN, (CE_WARN,
2655 "mega%d: Failed to log AEN event", instance_no));
2656 }
2657
2658 /* get copy of seq_num and class/locale for re-registration */
2659 seq_num = evt_detail->seq_num;
2660 seq_num++;
2661 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
2662 sizeof (struct megasas_evt_detail));
2663
2664 cmd->frame->dcmd.cmd_status = 0x0;
2665 cmd->frame->dcmd.mbox.w[0] = seq_num;
2666
2667 instance->aen_seq_num = seq_num;
2668
2669 cmd->frame_count = 1;
2670
2671 /* Issue the aen registration frame */
2672 instance->func_ptr->issue_cmd(cmd, instance);
2673 }
2674
2675 /*
2676 * complete_cmd_in_sync_mode - Completes an internal command
2677 * @instance: Adapter soft state
2678 * @cmd: Command to be completed
2679 *
2680 * The issue_cmd_in_sync_mode() function waits for a command to complete
2681 * after it issues a command. This function wakes up that waiting routine by
2682 * calling wake_up() on the wait queue.
2683 */
2684 static void
complete_cmd_in_sync_mode(struct megasas_instance * instance,struct megasas_cmd * cmd)2685 complete_cmd_in_sync_mode(struct megasas_instance *instance,
2686 struct megasas_cmd *cmd)
2687 {
2688 cmd->cmd_status = cmd->frame->io.cmd_status;
2689
2690 cmd->sync_cmd = MEGASAS_FALSE;
2691
2692 if (cmd->cmd_status == ENODATA) {
2693 cmd->cmd_status = 0;
2694 }
2695
2696 cv_broadcast(&instance->int_cmd_cv);
2697 }
2698
2699 /*
2700 * megasas_softintr - The Software ISR
2701 * @param arg : HBA soft state
2702 *
2703 * called from high-level interrupt if hi-level interrupt are not there,
2704 * otherwise triggered as a soft interrupt
2705 */
2706 static uint_t
megasas_softintr(struct megasas_instance * instance)2707 megasas_softintr(struct megasas_instance *instance)
2708 {
2709 struct scsi_pkt *pkt;
2710 struct scsa_cmd *acmd;
2711 struct megasas_cmd *cmd;
2712 struct mlist_head *pos, *next;
2713 mlist_t process_list;
2714 struct megasas_header *hdr;
2715 struct scsi_arq_status *arqstat;
2716
2717 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called"));
2718
2719 ASSERT(instance);
2720 mutex_enter(&instance->completed_pool_mtx);
2721
2722 if (mlist_empty(&instance->completed_pool_list)) {
2723 mutex_exit(&instance->completed_pool_mtx);
2724 return (DDI_INTR_UNCLAIMED);
2725 }
2726
2727 instance->softint_running = 1;
2728
2729 INIT_LIST_HEAD(&process_list);
2730 mlist_splice(&instance->completed_pool_list, &process_list);
2731 INIT_LIST_HEAD(&instance->completed_pool_list);
2732
2733 mutex_exit(&instance->completed_pool_mtx);
2734
2735 /* perform all callbacks first, before releasing the SCBs */
2736 mlist_for_each_safe(pos, next, &process_list) {
2737 cmd = mlist_entry(pos, struct megasas_cmd, list);
2738
2739 /* syncronize the Cmd frame for the controller */
2740 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
2741 0, 0, DDI_DMA_SYNC_FORCPU);
2742
2743 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
2744 DDI_SUCCESS) {
2745 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2746 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2747 return (DDI_INTR_UNCLAIMED);
2748 }
2749
2750 hdr = &cmd->frame->hdr;
2751
2752 /* remove the internal command from the process list */
2753 mlist_del_init(&cmd->list);
2754
2755 switch (hdr->cmd) {
2756 case MFI_CMD_OP_PD_SCSI:
2757 case MFI_CMD_OP_LD_SCSI:
2758 case MFI_CMD_OP_LD_READ:
2759 case MFI_CMD_OP_LD_WRITE:
2760 /*
2761 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
2762 * could have been issued either through an
2763 * IO path or an IOCTL path. If it was via IOCTL,
2764 * we will send it to internal completion.
2765 */
2766 if (cmd->sync_cmd == MEGASAS_TRUE) {
2767 complete_cmd_in_sync_mode(instance, cmd);
2768 break;
2769 }
2770
2771 /* regular commands */
2772 acmd = cmd->cmd;
2773 pkt = CMD2PKT(acmd);
2774
2775 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2776 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2777 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2778 acmd->cmd_dma_offset,
2779 acmd->cmd_dma_len,
2780 DDI_DMA_SYNC_FORCPU);
2781 }
2782 }
2783
2784 pkt->pkt_reason = CMD_CMPLT;
2785 pkt->pkt_statistics = 0;
2786 pkt->pkt_state = STATE_GOT_BUS
2787 | STATE_GOT_TARGET | STATE_SENT_CMD
2788 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2789
2790 con_log(CL_ANN1, (CE_CONT,
2791 "CDB[0] = %x completed for %s: size %lx context %x",
2792 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
2793 acmd->cmd_dmacount, hdr->context));
2794
2795 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2796 struct scsi_inquiry *inq;
2797
2798 if (acmd->cmd_dmacount != 0) {
2799 bp_mapin(acmd->cmd_buf);
2800 inq = (struct scsi_inquiry *)
2801 acmd->cmd_buf->b_un.b_addr;
2802
2803 /* don't expose physical drives to OS */
2804 if (acmd->islogical &&
2805 (hdr->cmd_status == MFI_STAT_OK)) {
2806 display_scsi_inquiry(
2807 (caddr_t)inq);
2808 } else if ((hdr->cmd_status ==
2809 MFI_STAT_OK) && inq->inq_dtype ==
2810 DTYPE_DIRECT) {
2811
2812 display_scsi_inquiry(
2813 (caddr_t)inq);
2814
2815 /* for physical disk */
2816 hdr->cmd_status =
2817 MFI_STAT_DEVICE_NOT_FOUND;
2818 }
2819 }
2820 }
2821
2822 switch (hdr->cmd_status) {
2823 case MFI_STAT_OK:
2824 pkt->pkt_scbp[0] = STATUS_GOOD;
2825 break;
2826 case MFI_STAT_LD_CC_IN_PROGRESS:
2827 case MFI_STAT_LD_RECON_IN_PROGRESS:
2828 /* SJ - these are not correct way */
2829 pkt->pkt_scbp[0] = STATUS_GOOD;
2830 break;
2831 case MFI_STAT_LD_INIT_IN_PROGRESS:
2832 con_log(CL_ANN,
2833 (CE_WARN, "Initialization in Progress"));
2834 pkt->pkt_reason = CMD_TRAN_ERR;
2835
2836 break;
2837 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2838 con_log(CL_ANN1, (CE_CONT, "scsi_done error"));
2839
2840 pkt->pkt_reason = CMD_CMPLT;
2841 ((struct scsi_status *)
2842 pkt->pkt_scbp)->sts_chk = 1;
2843
2844 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2845
2846 con_log(CL_ANN,
2847 (CE_WARN, "TEST_UNIT_READY fail"));
2848
2849 } else {
2850 pkt->pkt_state |= STATE_ARQ_DONE;
2851 arqstat = (void *)(pkt->pkt_scbp);
2852 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2853 arqstat->sts_rqpkt_resid = 0;
2854 arqstat->sts_rqpkt_state |=
2855 STATE_GOT_BUS | STATE_GOT_TARGET
2856 | STATE_SENT_CMD
2857 | STATE_XFERRED_DATA;
2858 *(uint8_t *)&arqstat->sts_rqpkt_status =
2859 STATUS_GOOD;
2860
2861 bcopy(cmd->sense,
2862 &(arqstat->sts_sensedata),
2863 acmd->cmd_scblen -
2864 offsetof(struct scsi_arq_status,
2865 sts_sensedata));
2866 }
2867 break;
2868 case MFI_STAT_LD_OFFLINE:
2869 case MFI_STAT_DEVICE_NOT_FOUND:
2870 con_log(CL_ANN1, (CE_CONT,
2871 "device not found error"));
2872 pkt->pkt_reason = CMD_DEV_GONE;
2873 pkt->pkt_statistics = STAT_DISCON;
2874 break;
2875 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2876 pkt->pkt_state |= STATE_ARQ_DONE;
2877 pkt->pkt_reason = CMD_CMPLT;
2878 ((struct scsi_status *)
2879 pkt->pkt_scbp)->sts_chk = 1;
2880
2881 arqstat = (void *)(pkt->pkt_scbp);
2882 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2883 arqstat->sts_rqpkt_resid = 0;
2884 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2885 | STATE_GOT_TARGET | STATE_SENT_CMD
2886 | STATE_XFERRED_DATA;
2887 *(uint8_t *)&arqstat->sts_rqpkt_status =
2888 STATUS_GOOD;
2889
2890 arqstat->sts_sensedata.es_valid = 1;
2891 arqstat->sts_sensedata.es_key =
2892 KEY_ILLEGAL_REQUEST;
2893 arqstat->sts_sensedata.es_class =
2894 CLASS_EXTENDED_SENSE;
2895
2896 /*
2897 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2898 * ASC: 0x21h; ASCQ: 0x00h;
2899 */
2900 arqstat->sts_sensedata.es_add_code = 0x21;
2901 arqstat->sts_sensedata.es_qual_code = 0x00;
2902
2903 break;
2904
2905 default:
2906 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
2907 pkt->pkt_reason = CMD_TRAN_ERR;
2908
2909 break;
2910 }
2911
2912 atomic_add_16(&instance->fw_outstanding, (-1));
2913
2914 return_mfi_pkt(instance, cmd);
2915
2916 (void) megasas_common_check(instance, cmd);
2917
2918 if (acmd->cmd_dmahandle) {
2919 if (megasas_check_dma_handle(
2920 acmd->cmd_dmahandle) != DDI_SUCCESS) {
2921 ddi_fm_service_impact(instance->dip,
2922 DDI_SERVICE_UNAFFECTED);
2923 pkt->pkt_reason = CMD_TRAN_ERR;
2924 pkt->pkt_statistics = 0;
2925 }
2926 }
2927
2928 /* Call the callback routine */
2929 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2930 scsi_hba_pkt_comp(pkt);
2931 }
2932
2933 break;
2934 case MFI_CMD_OP_SMP:
2935 case MFI_CMD_OP_STP:
2936 complete_cmd_in_sync_mode(instance, cmd);
2937 break;
2938 case MFI_CMD_OP_DCMD:
2939 /* see if got an event notification */
2940 if (cmd->frame->dcmd.opcode ==
2941 MR_DCMD_CTRL_EVENT_WAIT) {
2942 if ((instance->aen_cmd == cmd) &&
2943 (instance->aen_cmd->abort_aen)) {
2944 con_log(CL_ANN, (CE_WARN,
2945 "megasas_softintr: "
2946 "aborted_aen returned"));
2947 } else {
2948 service_mfi_aen(instance, cmd);
2949
2950 atomic_add_16(&instance->fw_outstanding,
2951 (-1));
2952 }
2953 } else {
2954 complete_cmd_in_sync_mode(instance, cmd);
2955 }
2956
2957 break;
2958 case MFI_CMD_OP_ABORT:
2959 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete"));
2960 /*
2961 * MFI_CMD_OP_ABORT successfully completed
2962 * in the synchronous mode
2963 */
2964 complete_cmd_in_sync_mode(instance, cmd);
2965 break;
2966 default:
2967 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2968 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2969
2970 if (cmd->pkt != NULL) {
2971 pkt = cmd->pkt;
2972 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2973 scsi_hba_pkt_comp(pkt);
2974 }
2975 }
2976 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!"));
2977 break;
2978 }
2979 }
2980
2981 instance->softint_running = 0;
2982
2983 return (DDI_INTR_CLAIMED);
2984 }
2985
2986 /*
2987 * mega_alloc_dma_obj
2988 *
2989 * Allocate the memory and other resources for an dma object.
2990 */
2991 static int
mega_alloc_dma_obj(struct megasas_instance * instance,dma_obj_t * obj)2992 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj)
2993 {
2994 int i;
2995 size_t alen = 0;
2996 uint_t cookie_cnt;
2997 struct ddi_device_acc_attr tmp_endian_attr;
2998
2999 tmp_endian_attr = endian_attr;
3000 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3001 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
3002 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
3003 if (i != DDI_SUCCESS) {
3004
3005 switch (i) {
3006 case DDI_DMA_BADATTR :
3007 con_log(CL_ANN, (CE_WARN,
3008 "Failed ddi_dma_alloc_handle- Bad atrib"));
3009 break;
3010 case DDI_DMA_NORESOURCES :
3011 con_log(CL_ANN, (CE_WARN,
3012 "Failed ddi_dma_alloc_handle- No Resources"));
3013 break;
3014 default :
3015 con_log(CL_ANN, (CE_WARN,
3016 "Failed ddi_dma_alloc_handle :unknown %d", i));
3017 break;
3018 }
3019
3020 return (-1);
3021 }
3022
3023 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
3024 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
3025 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
3026 alen < obj->size) {
3027
3028 ddi_dma_free_handle(&obj->dma_handle);
3029
3030 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
3031
3032 return (-1);
3033 }
3034
3035 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
3036 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
3037 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
3038
3039 ddi_dma_mem_free(&obj->acc_handle);
3040 ddi_dma_free_handle(&obj->dma_handle);
3041
3042 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
3043
3044 return (-1);
3045 }
3046
3047 if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
3048 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3049 return (-1);
3050 }
3051
3052 if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
3053 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3054 return (-1);
3055 }
3056
3057 return (cookie_cnt);
3058 }
3059
3060 /*
3061 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t)
3062 *
3063 * De-allocate the memory and other resources for an dma object, which must
3064 * have been alloated by a previous call to mega_alloc_dma_obj()
3065 */
3066 static int
mega_free_dma_obj(struct megasas_instance * instance,dma_obj_t obj)3067 mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj)
3068 {
3069
3070 if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
3071 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3072 return (DDI_FAILURE);
3073 }
3074
3075 if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
3076 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3077 return (DDI_FAILURE);
3078 }
3079
3080 (void) ddi_dma_unbind_handle(obj.dma_handle);
3081 ddi_dma_mem_free(&obj.acc_handle);
3082 ddi_dma_free_handle(&obj.dma_handle);
3083
3084 return (DDI_SUCCESS);
3085 }
3086
3087 /*
3088 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
3089 * int, int (*)())
3090 *
3091 * Allocate dma resources for a new scsi command
3092 */
3093 static int
megasas_dma_alloc(struct megasas_instance * instance,struct scsi_pkt * pkt,struct buf * bp,int flags,int (* callback)())3094 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt,
3095 struct buf *bp, int flags, int (*callback)())
3096 {
3097 int dma_flags;
3098 int (*cb)(caddr_t);
3099 int i;
3100
3101 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr;
3102 struct scsa_cmd *acmd = PKT2CMD(pkt);
3103
3104 acmd->cmd_buf = bp;
3105
3106 if (bp->b_flags & B_READ) {
3107 acmd->cmd_flags &= ~CFLAG_DMASEND;
3108 dma_flags = DDI_DMA_READ;
3109 } else {
3110 acmd->cmd_flags |= CFLAG_DMASEND;
3111 dma_flags = DDI_DMA_WRITE;
3112 }
3113
3114 if (flags & PKT_CONSISTENT) {
3115 acmd->cmd_flags |= CFLAG_CONSISTENT;
3116 dma_flags |= DDI_DMA_CONSISTENT;
3117 }
3118
3119 if (flags & PKT_DMA_PARTIAL) {
3120 dma_flags |= DDI_DMA_PARTIAL;
3121 }
3122
3123 dma_flags |= DDI_DMA_REDZONE;
3124
3125 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3126
3127 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
3128 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
3129
3130 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
3131 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
3132 switch (i) {
3133 case DDI_DMA_BADATTR:
3134 bioerror(bp, EFAULT);
3135 return (-1);
3136
3137 case DDI_DMA_NORESOURCES:
3138 bioerror(bp, 0);
3139 return (-1);
3140
3141 default:
3142 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
3143 "0x%x impossible\n", i));
3144 bioerror(bp, EFAULT);
3145 return (-1);
3146 }
3147 }
3148
3149 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
3150 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
3151
3152 switch (i) {
3153 case DDI_DMA_PARTIAL_MAP:
3154 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
3155 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3156 "DDI_DMA_PARTIAL_MAP impossible\n"));
3157 goto no_dma_cookies;
3158 }
3159
3160 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
3161 DDI_FAILURE) {
3162 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n"));
3163 goto no_dma_cookies;
3164 }
3165
3166 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3167 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3168 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3169 DDI_FAILURE) {
3170
3171 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n"));
3172 goto no_dma_cookies;
3173 }
3174
3175 goto get_dma_cookies;
3176 case DDI_DMA_MAPPED:
3177 acmd->cmd_nwin = 1;
3178 acmd->cmd_dma_len = 0;
3179 acmd->cmd_dma_offset = 0;
3180
3181 get_dma_cookies:
3182 i = 0;
3183 acmd->cmd_dmacount = 0;
3184 for (;;) {
3185 acmd->cmd_dmacount +=
3186 acmd->cmd_dmacookies[i++].dmac_size;
3187
3188 if (i == instance->max_num_sge ||
3189 i == acmd->cmd_ncookies)
3190 break;
3191
3192 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3193 &acmd->cmd_dmacookies[i]);
3194 }
3195
3196 acmd->cmd_cookie = i;
3197 acmd->cmd_cookiecnt = i;
3198
3199 acmd->cmd_flags |= CFLAG_DMAVALID;
3200
3201 if (bp->b_bcount >= acmd->cmd_dmacount) {
3202 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3203 } else {
3204 pkt->pkt_resid = 0;
3205 }
3206
3207 return (0);
3208 case DDI_DMA_NORESOURCES:
3209 bioerror(bp, 0);
3210 break;
3211 case DDI_DMA_NOMAPPING:
3212 bioerror(bp, EFAULT);
3213 break;
3214 case DDI_DMA_TOOBIG:
3215 bioerror(bp, EINVAL);
3216 break;
3217 case DDI_DMA_INUSE:
3218 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
3219 " DDI_DMA_INUSE impossible\n"));
3220 break;
3221 default:
3222 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3223 "0x%x impossible\n", i));
3224 break;
3225 }
3226
3227 no_dma_cookies:
3228 ddi_dma_free_handle(&acmd->cmd_dmahandle);
3229 acmd->cmd_dmahandle = NULL;
3230 acmd->cmd_flags &= ~CFLAG_DMAVALID;
3231 return (-1);
3232 }
3233
3234 /*
3235 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *)
3236 *
3237 * move dma resources to next dma window
3238 *
3239 */
3240 static int
megasas_dma_move(struct megasas_instance * instance,struct scsi_pkt * pkt,struct buf * bp)3241 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt,
3242 struct buf *bp)
3243 {
3244 int i = 0;
3245
3246 struct scsa_cmd *acmd = PKT2CMD(pkt);
3247
3248 /*
3249 * If there are no more cookies remaining in this window,
3250 * must move to the next window first.
3251 */
3252 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
3253 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
3254 return (0);
3255 }
3256
3257 /* at last window, cannot move */
3258 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
3259 return (-1);
3260 }
3261
3262 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3263 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3264 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3265 DDI_FAILURE) {
3266 return (-1);
3267 }
3268
3269 acmd->cmd_cookie = 0;
3270 } else {
3271 /* still more cookies in this window - get the next one */
3272 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3273 &acmd->cmd_dmacookies[0]);
3274 }
3275
3276 /* get remaining cookies in this window, up to our maximum */
3277 for (;;) {
3278 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
3279 acmd->cmd_cookie++;
3280
3281 if (i == instance->max_num_sge ||
3282 acmd->cmd_cookie == acmd->cmd_ncookies) {
3283 break;
3284 }
3285
3286 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3287 &acmd->cmd_dmacookies[i]);
3288 }
3289
3290 acmd->cmd_cookiecnt = i;
3291
3292 if (bp->b_bcount >= acmd->cmd_dmacount) {
3293 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3294 } else {
3295 pkt->pkt_resid = 0;
3296 }
3297
3298 return (0);
3299 }
3300
3301 /*
3302 * build_cmd
3303 */
3304 static struct megasas_cmd *
build_cmd(struct megasas_instance * instance,struct scsi_address * ap,struct scsi_pkt * pkt,uchar_t * cmd_done)3305 build_cmd(struct megasas_instance *instance, struct scsi_address *ap,
3306 struct scsi_pkt *pkt, uchar_t *cmd_done)
3307 {
3308 uint16_t flags = 0;
3309 uint32_t i;
3310 uint32_t sge_bytes;
3311
3312 struct megasas_cmd *cmd;
3313 struct megasas_sge64 *mfi_sgl;
3314 struct scsa_cmd *acmd = PKT2CMD(pkt);
3315 struct megasas_pthru_frame *pthru;
3316 struct megasas_io_frame *ldio;
3317
3318 /* find out if this is logical or physical drive command. */
3319 acmd->islogical = MEGADRV_IS_LOGICAL(ap);
3320 acmd->device_id = MAP_DEVICE_ID(instance, ap);
3321 *cmd_done = 0;
3322
3323 /* get the command packet */
3324 if (!(cmd = get_mfi_pkt(instance))) {
3325 return (NULL);
3326 }
3327
3328 cmd->pkt = pkt;
3329 cmd->cmd = acmd;
3330
3331 /* lets get the command directions */
3332 if (acmd->cmd_flags & CFLAG_DMASEND) {
3333 flags = MFI_FRAME_DIR_WRITE;
3334
3335 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3336 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3337 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3338 DDI_DMA_SYNC_FORDEV);
3339 }
3340 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
3341 flags = MFI_FRAME_DIR_READ;
3342
3343 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3344 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3345 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3346 DDI_DMA_SYNC_FORCPU);
3347 }
3348 } else {
3349 flags = MFI_FRAME_DIR_NONE;
3350 }
3351
3352 flags |= MFI_FRAME_SGL64;
3353
3354 switch (pkt->pkt_cdbp[0]) {
3355
3356 /*
3357 * case SCMD_SYNCHRONIZE_CACHE:
3358 * flush_cache(instance);
3359 * return_mfi_pkt(instance, cmd);
3360 * *cmd_done = 1;
3361 *
3362 * return (NULL);
3363 */
3364
3365 case SCMD_READ:
3366 case SCMD_WRITE:
3367 case SCMD_READ_G1:
3368 case SCMD_WRITE_G1:
3369 if (acmd->islogical) {
3370 ldio = (struct megasas_io_frame *)cmd->frame;
3371
3372 /*
3373 * preare the Logical IO frame:
3374 * 2nd bit is zero for all read cmds
3375 */
3376 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ?
3377 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ;
3378 ldio->cmd_status = 0x0;
3379 ldio->scsi_status = 0x0;
3380 ldio->target_id = acmd->device_id;
3381 ldio->timeout = 0;
3382 ldio->reserved_0 = 0;
3383 ldio->pad_0 = 0;
3384 ldio->flags = flags;
3385
3386 /* Initialize sense Information */
3387 bzero(cmd->sense, SENSE_LENGTH);
3388 ldio->sense_len = SENSE_LENGTH;
3389 ldio->sense_buf_phys_addr_hi = 0;
3390 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3391
3392 ldio->start_lba_hi = 0;
3393 ldio->access_byte = (acmd->cmd_cdblen != 6) ?
3394 pkt->pkt_cdbp[1] : 0;
3395 ldio->sge_count = acmd->cmd_cookiecnt;
3396 mfi_sgl = (struct megasas_sge64 *)&ldio->sgl;
3397
3398 if (acmd->cmd_cdblen == CDB_GROUP0) {
3399 ldio->lba_count = host_to_le16(
3400 (uint16_t)(pkt->pkt_cdbp[4]));
3401
3402 ldio->start_lba_lo = host_to_le32(
3403 ((uint32_t)(pkt->pkt_cdbp[3])) |
3404 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
3405 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
3406 << 16));
3407 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
3408 ldio->lba_count = host_to_le16(
3409 ((uint16_t)(pkt->pkt_cdbp[8])) |
3410 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
3411
3412 ldio->start_lba_lo = host_to_le32(
3413 ((uint32_t)(pkt->pkt_cdbp[5])) |
3414 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3415 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3416 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3417 } else if (acmd->cmd_cdblen == CDB_GROUP2) {
3418 ldio->lba_count = host_to_le16(
3419 ((uint16_t)(pkt->pkt_cdbp[9])) |
3420 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) |
3421 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) |
3422 ((uint16_t)(pkt->pkt_cdbp[6]) << 24));
3423
3424 ldio->start_lba_lo = host_to_le32(
3425 ((uint32_t)(pkt->pkt_cdbp[5])) |
3426 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3427 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3428 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3429 } else if (acmd->cmd_cdblen == CDB_GROUP3) {
3430 ldio->lba_count = host_to_le16(
3431 ((uint16_t)(pkt->pkt_cdbp[13])) |
3432 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) |
3433 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) |
3434 ((uint16_t)(pkt->pkt_cdbp[10]) << 24));
3435
3436 ldio->start_lba_lo = host_to_le32(
3437 ((uint32_t)(pkt->pkt_cdbp[9])) |
3438 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
3439 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
3440 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
3441
3442 ldio->start_lba_lo = host_to_le32(
3443 ((uint32_t)(pkt->pkt_cdbp[5])) |
3444 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3445 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3446 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3447 }
3448
3449 break;
3450 }
3451 /* For all non-rd/wr cmds */
3452 /* FALLTHROUGH */
3453 default:
3454 pthru = (struct megasas_pthru_frame *)cmd->frame;
3455
3456 /* prepare the DCDB frame */
3457 pthru->cmd = (acmd->islogical) ?
3458 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI;
3459 pthru->cmd_status = 0x0;
3460 pthru->scsi_status = 0x0;
3461 pthru->target_id = acmd->device_id;
3462 pthru->lun = 0;
3463 pthru->cdb_len = acmd->cmd_cdblen;
3464 pthru->timeout = 0;
3465 pthru->flags = flags;
3466 pthru->data_xfer_len = acmd->cmd_dmacount;
3467 pthru->sge_count = acmd->cmd_cookiecnt;
3468 mfi_sgl = (struct megasas_sge64 *)&pthru->sgl;
3469
3470 bzero(cmd->sense, SENSE_LENGTH);
3471 pthru->sense_len = SENSE_LENGTH;
3472 pthru->sense_buf_phys_addr_hi = 0;
3473 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3474
3475 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen);
3476
3477 break;
3478 }
3479 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */
3480
3481 /* prepare the scatter-gather list for the firmware */
3482 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
3483 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress;
3484 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size;
3485 }
3486
3487 sge_bytes = sizeof (struct megasas_sge64)*acmd->cmd_cookiecnt;
3488
3489 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
3490 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
3491
3492 if (cmd->frame_count >= 8) {
3493 cmd->frame_count = 8;
3494 }
3495
3496 return (cmd);
3497 }
3498
3499 /*
3500 * wait_for_outstanding - Wait for all outstanding cmds
3501 * @instance: Adapter soft state
3502 *
3503 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
3504 * complete all its outstanding commands. Returns error if one or more IOs
3505 * are pending after this time period.
3506 */
3507 static int
wait_for_outstanding(struct megasas_instance * instance)3508 wait_for_outstanding(struct megasas_instance *instance)
3509 {
3510 int i;
3511 uint32_t wait_time = 90;
3512
3513 for (i = 0; i < wait_time; i++) {
3514 if (!instance->fw_outstanding) {
3515 break;
3516 }
3517
3518 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
3519 }
3520
3521 if (instance->fw_outstanding) {
3522 return (1);
3523 }
3524
3525 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION);
3526
3527 return (0);
3528 }
3529
3530 /*
3531 * issue_mfi_pthru
3532 */
3533 static int
issue_mfi_pthru(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)3534 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3535 struct megasas_cmd *cmd, int mode)
3536 {
3537 void *ubuf;
3538 uint32_t kphys_addr = 0;
3539 uint32_t xferlen = 0;
3540 uint_t model;
3541
3542 dma_obj_t pthru_dma_obj;
3543 struct megasas_pthru_frame *kpthru;
3544 struct megasas_pthru_frame *pthru;
3545
3546 pthru = &cmd->frame->pthru;
3547 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0];
3548
3549 model = ddi_model_convert_from(mode & FMODELS);
3550 if (model == DDI_MODEL_ILP32) {
3551 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3552
3553 xferlen = kpthru->sgl.sge32[0].length;
3554
3555 /* SJ! - ubuf needs to be virtual address. */
3556 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3557 } else {
3558 #ifdef _ILP32
3559 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3560 xferlen = kpthru->sgl.sge32[0].length;
3561 /* SJ! - ubuf needs to be virtual address. */
3562 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3563 #else
3564 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64"));
3565 xferlen = kpthru->sgl.sge64[0].length;
3566 /* SJ! - ubuf needs to be virtual address. */
3567 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
3568 #endif
3569 }
3570
3571 if (xferlen) {
3572 /* means IOCTL requires DMA */
3573 /* allocate the data transfer buffer */
3574 pthru_dma_obj.size = xferlen;
3575 pthru_dma_obj.dma_attr = megasas_generic_dma_attr;
3576 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3577 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3578 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
3579 pthru_dma_obj.dma_attr.dma_attr_align = 1;
3580
3581 /* allocate kernel buffer for DMA */
3582 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) {
3583 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3584 "could not data transfer buffer alloc."));
3585 return (DDI_FAILURE);
3586 }
3587
3588 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3589 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
3590 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer,
3591 xferlen, mode)) {
3592 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3593 "copy from user space failed\n"));
3594 return (1);
3595 }
3596 }
3597
3598 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
3599 }
3600
3601 pthru->cmd = kpthru->cmd;
3602 pthru->sense_len = kpthru->sense_len;
3603 pthru->cmd_status = kpthru->cmd_status;
3604 pthru->scsi_status = kpthru->scsi_status;
3605 pthru->target_id = kpthru->target_id;
3606 pthru->lun = kpthru->lun;
3607 pthru->cdb_len = kpthru->cdb_len;
3608 pthru->sge_count = kpthru->sge_count;
3609 pthru->timeout = kpthru->timeout;
3610 pthru->data_xfer_len = kpthru->data_xfer_len;
3611
3612 pthru->sense_buf_phys_addr_hi = 0;
3613 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
3614 pthru->sense_buf_phys_addr_lo = 0;
3615
3616 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len);
3617
3618 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64;
3619 pthru->sgl.sge32[0].length = xferlen;
3620 pthru->sgl.sge32[0].phys_addr = kphys_addr;
3621
3622 cmd->sync_cmd = MEGASAS_TRUE;
3623 cmd->frame_count = 1;
3624
3625 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3626 con_log(CL_ANN, (CE_WARN,
3627 "issue_mfi_pthru: fw_ioctl failed\n"));
3628 } else {
3629 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) {
3630
3631 if (ddi_copyout(pthru_dma_obj.buffer, ubuf,
3632 xferlen, mode)) {
3633 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3634 "copy to user space failed\n"));
3635 return (1);
3636 }
3637 }
3638 }
3639
3640 kpthru->cmd_status = pthru->cmd_status;
3641 kpthru->scsi_status = pthru->scsi_status;
3642
3643 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, "
3644 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status));
3645
3646 if (xferlen) {
3647 /* free kernel buffer */
3648 if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
3649 return (1);
3650 }
3651
3652 return (0);
3653 }
3654
3655 /*
3656 * issue_mfi_dcmd
3657 */
3658 static int
issue_mfi_dcmd(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)3659 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3660 struct megasas_cmd *cmd, int mode)
3661 {
3662 void *ubuf;
3663 uint32_t kphys_addr = 0;
3664 uint32_t xferlen = 0;
3665 uint32_t model;
3666 dma_obj_t dcmd_dma_obj;
3667 struct megasas_dcmd_frame *kdcmd;
3668 struct megasas_dcmd_frame *dcmd;
3669
3670 dcmd = &cmd->frame->dcmd;
3671 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
3672
3673 model = ddi_model_convert_from(mode & FMODELS);
3674 if (model == DDI_MODEL_ILP32) {
3675 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3676
3677 xferlen = kdcmd->sgl.sge32[0].length;
3678
3679 /* SJ! - ubuf needs to be virtual address. */
3680 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3681 }
3682 else
3683 {
3684 #ifdef _ILP32
3685 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3686 xferlen = kdcmd->sgl.sge32[0].length;
3687 /* SJ! - ubuf needs to be virtual address. */
3688 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3689 #else
3690 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64"));
3691 xferlen = kdcmd->sgl.sge64[0].length;
3692 /* SJ! - ubuf needs to be virtual address. */
3693 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr;
3694 #endif
3695 }
3696 if (xferlen) {
3697 /* means IOCTL requires DMA */
3698 /* allocate the data transfer buffer */
3699 dcmd_dma_obj.size = xferlen;
3700 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
3701 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3702 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3703 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3704 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3705
3706 /* allocate kernel buffer for DMA */
3707 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
3708 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3709 "could not data transfer buffer alloc."));
3710 return (DDI_FAILURE);
3711 }
3712
3713 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3714 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
3715 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer,
3716 xferlen, mode)) {
3717 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3718 "copy from user space failed\n"));
3719 return (1);
3720 }
3721 }
3722
3723 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
3724 }
3725
3726 dcmd->cmd = kdcmd->cmd;
3727 dcmd->cmd_status = kdcmd->cmd_status;
3728 dcmd->sge_count = kdcmd->sge_count;
3729 dcmd->timeout = kdcmd->timeout;
3730 dcmd->data_xfer_len = kdcmd->data_xfer_len;
3731 dcmd->opcode = kdcmd->opcode;
3732
3733 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, DCMD_MBOX_SZ);
3734
3735 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64;
3736 dcmd->sgl.sge32[0].length = xferlen;
3737 dcmd->sgl.sge32[0].phys_addr = kphys_addr;
3738
3739 cmd->sync_cmd = MEGASAS_TRUE;
3740 cmd->frame_count = 1;
3741
3742 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3743 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n"));
3744 } else {
3745 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
3746
3747 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf,
3748 xferlen, mode)) {
3749 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3750 "copy to user space failed\n"));
3751 return (1);
3752 }
3753 }
3754 }
3755
3756 kdcmd->cmd_status = dcmd->cmd_status;
3757
3758 if (xferlen) {
3759 /* free kernel buffer */
3760 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3761 return (1);
3762 }
3763
3764 return (0);
3765 }
3766
3767 /*
3768 * issue_mfi_smp
3769 */
3770 static int
issue_mfi_smp(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)3771 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3772 struct megasas_cmd *cmd, int mode)
3773 {
3774 void *request_ubuf;
3775 void *response_ubuf;
3776 uint32_t request_xferlen = 0;
3777 uint32_t response_xferlen = 0;
3778 uint_t model;
3779 dma_obj_t request_dma_obj;
3780 dma_obj_t response_dma_obj;
3781 struct megasas_smp_frame *ksmp;
3782 struct megasas_smp_frame *smp;
3783 struct megasas_sge32 *sge32;
3784 #ifndef _ILP32
3785 struct megasas_sge64 *sge64;
3786 #endif
3787
3788 smp = &cmd->frame->smp;
3789 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0];
3790
3791 model = ddi_model_convert_from(mode & FMODELS);
3792 if (model == DDI_MODEL_ILP32) {
3793 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3794
3795 sge32 = &ksmp->sgl[0].sge32[0];
3796 response_xferlen = sge32[0].length;
3797 request_xferlen = sge32[1].length;
3798 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3799 "response_xferlen = %x, request_xferlen = %x",
3800 response_xferlen, request_xferlen));
3801
3802 /* SJ! - ubuf needs to be virtual address. */
3803
3804 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3805 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3806 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3807 "response_ubuf = %p, request_ubuf = %p",
3808 response_ubuf, request_ubuf));
3809 } else {
3810 #ifdef _ILP32
3811 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3812
3813 sge32 = &ksmp->sgl[0].sge32[0];
3814 response_xferlen = sge32[0].length;
3815 request_xferlen = sge32[1].length;
3816 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3817 "response_xferlen = %x, request_xferlen = %x",
3818 response_xferlen, request_xferlen));
3819
3820 /* SJ! - ubuf needs to be virtual address. */
3821
3822 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3823 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3824 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3825 "response_ubuf = %p, request_ubuf = %p",
3826 response_ubuf, request_ubuf));
3827 #else
3828 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64"));
3829
3830 sge64 = &ksmp->sgl[0].sge64[0];
3831 response_xferlen = sge64[0].length;
3832 request_xferlen = sge64[1].length;
3833
3834 /* SJ! - ubuf needs to be virtual address. */
3835 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
3836 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
3837 #endif
3838 }
3839 if (request_xferlen) {
3840 /* means IOCTL requires DMA */
3841 /* allocate the data transfer buffer */
3842 request_dma_obj.size = request_xferlen;
3843 request_dma_obj.dma_attr = megasas_generic_dma_attr;
3844 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3845 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3846 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
3847 request_dma_obj.dma_attr.dma_attr_align = 1;
3848
3849 /* allocate kernel buffer for DMA */
3850 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) {
3851 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3852 "could not data transfer buffer alloc."));
3853 return (DDI_FAILURE);
3854 }
3855
3856 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3857 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer,
3858 request_xferlen, mode)) {
3859 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3860 "copy from user space failed\n"));
3861 return (1);
3862 }
3863 }
3864
3865 if (response_xferlen) {
3866 /* means IOCTL requires DMA */
3867 /* allocate the data transfer buffer */
3868 response_dma_obj.size = response_xferlen;
3869 response_dma_obj.dma_attr = megasas_generic_dma_attr;
3870 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3871 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3872 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
3873 response_dma_obj.dma_attr.dma_attr_align = 1;
3874
3875 /* allocate kernel buffer for DMA */
3876 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) {
3877 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3878 "could not data transfer buffer alloc."));
3879 return (DDI_FAILURE);
3880 }
3881
3882 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3883 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer,
3884 response_xferlen, mode)) {
3885 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3886 "copy from user space failed\n"));
3887 return (1);
3888 }
3889 }
3890
3891 smp->cmd = ksmp->cmd;
3892 smp->cmd_status = ksmp->cmd_status;
3893 smp->connection_status = ksmp->connection_status;
3894 smp->sge_count = ksmp->sge_count;
3895 /* smp->context = ksmp->context; */
3896 smp->timeout = ksmp->timeout;
3897 smp->data_xfer_len = ksmp->data_xfer_len;
3898
3899 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr,
3900 sizeof (uint64_t));
3901
3902 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64;
3903
3904 model = ddi_model_convert_from(mode & FMODELS);
3905 if (model == DDI_MODEL_ILP32) {
3906 con_log(CL_ANN1, (CE_NOTE,
3907 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3908
3909 sge32 = &smp->sgl[0].sge32[0];
3910 sge32[0].length = response_xferlen;
3911 sge32[0].phys_addr =
3912 response_dma_obj.dma_cookie[0].dmac_address;
3913 sge32[1].length = request_xferlen;
3914 sge32[1].phys_addr =
3915 request_dma_obj.dma_cookie[0].dmac_address;
3916 } else {
3917 #ifdef _ILP32
3918 con_log(CL_ANN1, (CE_NOTE,
3919 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3920 sge32 = &smp->sgl[0].sge32[0];
3921 sge32[0].length = response_xferlen;
3922 sge32[0].phys_addr =
3923 response_dma_obj.dma_cookie[0].dmac_address;
3924 sge32[1].length = request_xferlen;
3925 sge32[1].phys_addr =
3926 request_dma_obj.dma_cookie[0].dmac_address;
3927 #else
3928 con_log(CL_ANN1, (CE_NOTE,
3929 "issue_mfi_smp: DDI_MODEL_LP64"));
3930 sge64 = &smp->sgl[0].sge64[0];
3931 sge64[0].length = response_xferlen;
3932 sge64[0].phys_addr =
3933 response_dma_obj.dma_cookie[0].dmac_address;
3934 sge64[1].length = request_xferlen;
3935 sge64[1].phys_addr =
3936 request_dma_obj.dma_cookie[0].dmac_address;
3937 #endif
3938 }
3939 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3940 "smp->response_xferlen = %d, smp->request_xferlen = %d "
3941 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length,
3942 smp->data_xfer_len));
3943
3944 cmd->sync_cmd = MEGASAS_TRUE;
3945 cmd->frame_count = 1;
3946
3947 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3948 con_log(CL_ANN, (CE_WARN,
3949 "issue_mfi_smp: fw_ioctl failed\n"));
3950 } else {
3951 con_log(CL_ANN1, (CE_NOTE,
3952 "issue_mfi_smp: copy to user space\n"));
3953
3954 if (request_xferlen) {
3955 if (ddi_copyout(request_dma_obj.buffer, request_ubuf,
3956 request_xferlen, mode)) {
3957 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3958 "copy to user space failed\n"));
3959 return (1);
3960 }
3961 }
3962
3963 if (response_xferlen) {
3964 if (ddi_copyout(response_dma_obj.buffer, response_ubuf,
3965 response_xferlen, mode)) {
3966 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3967 "copy to user space failed\n"));
3968 return (1);
3969 }
3970 }
3971 }
3972
3973 ksmp->cmd_status = smp->cmd_status;
3974 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
3975 smp->cmd_status));
3976
3977
3978 if (request_xferlen) {
3979 /* free kernel buffer */
3980 if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS)
3981 return (1);
3982 }
3983
3984 if (response_xferlen) {
3985 /* free kernel buffer */
3986 if (mega_free_dma_obj(instance, response_dma_obj) !=
3987 DDI_SUCCESS)
3988 return (1);
3989 }
3990
3991 return (0);
3992 }
3993
3994 /*
3995 * issue_mfi_stp
3996 */
3997 static int
issue_mfi_stp(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)3998 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3999 struct megasas_cmd *cmd, int mode)
4000 {
4001 void *fis_ubuf;
4002 void *data_ubuf;
4003 uint32_t fis_xferlen = 0;
4004 uint32_t data_xferlen = 0;
4005 uint_t model;
4006 dma_obj_t fis_dma_obj;
4007 dma_obj_t data_dma_obj;
4008 struct megasas_stp_frame *kstp;
4009 struct megasas_stp_frame *stp;
4010
4011 stp = &cmd->frame->stp;
4012 kstp = (struct megasas_stp_frame *)&ioctl->frame[0];
4013
4014 model = ddi_model_convert_from(mode & FMODELS);
4015 if (model == DDI_MODEL_ILP32) {
4016 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4017
4018 fis_xferlen = kstp->sgl.sge32[0].length;
4019 data_xferlen = kstp->sgl.sge32[1].length;
4020
4021 /* SJ! - ubuf needs to be virtual address. */
4022 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4023 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4024 }
4025 else
4026 {
4027 #ifdef _ILP32
4028 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4029
4030 fis_xferlen = kstp->sgl.sge32[0].length;
4031 data_xferlen = kstp->sgl.sge32[1].length;
4032
4033 /* SJ! - ubuf needs to be virtual address. */
4034 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4035 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4036 #else
4037 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64"));
4038
4039 fis_xferlen = kstp->sgl.sge64[0].length;
4040 data_xferlen = kstp->sgl.sge64[1].length;
4041
4042 /* SJ! - ubuf needs to be virtual address. */
4043 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
4044 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
4045 #endif
4046 }
4047
4048
4049 if (fis_xferlen) {
4050 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: "
4051 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
4052
4053 /* means IOCTL requires DMA */
4054 /* allocate the data transfer buffer */
4055 fis_dma_obj.size = fis_xferlen;
4056 fis_dma_obj.dma_attr = megasas_generic_dma_attr;
4057 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4058 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4059 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
4060 fis_dma_obj.dma_attr.dma_attr_align = 1;
4061
4062 /* allocate kernel buffer for DMA */
4063 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) {
4064 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4065 "could not data transfer buffer alloc."));
4066 return (DDI_FAILURE);
4067 }
4068
4069 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4070 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer,
4071 fis_xferlen, mode)) {
4072 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4073 "copy from user space failed\n"));
4074 return (1);
4075 }
4076 }
4077
4078 if (data_xferlen) {
4079 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p "
4080 "data_xferlen = %x", data_ubuf, data_xferlen));
4081
4082 /* means IOCTL requires DMA */
4083 /* allocate the data transfer buffer */
4084 data_dma_obj.size = data_xferlen;
4085 data_dma_obj.dma_attr = megasas_generic_dma_attr;
4086 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4087 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4088 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
4089 data_dma_obj.dma_attr.dma_attr_align = 1;
4090
4091 /* allocate kernel buffer for DMA */
4092 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) {
4093 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4094 "could not data transfer buffer alloc."));
4095 return (DDI_FAILURE);
4096 }
4097
4098 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4099 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer,
4100 data_xferlen, mode)) {
4101 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4102 "copy from user space failed\n"));
4103 return (1);
4104 }
4105 }
4106
4107 stp->cmd = kstp->cmd;
4108 stp->cmd_status = kstp->cmd_status;
4109 stp->connection_status = kstp->connection_status;
4110 stp->target_id = kstp->target_id;
4111 stp->sge_count = kstp->sge_count;
4112 /* stp->context = kstp->context; */
4113 stp->timeout = kstp->timeout;
4114 stp->data_xfer_len = kstp->data_xfer_len;
4115
4116 bcopy((void *)kstp->fis, (void *)stp->fis, 10);
4117
4118 stp->flags = kstp->flags & ~MFI_FRAME_SGL64;
4119 stp->stp_flags = kstp->stp_flags;
4120 stp->sgl.sge32[0].length = fis_xferlen;
4121 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address;
4122 stp->sgl.sge32[1].length = data_xferlen;
4123 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address;
4124
4125 cmd->sync_cmd = MEGASAS_TRUE;
4126 cmd->frame_count = 1;
4127
4128 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4129 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n"));
4130 } else {
4131
4132 if (fis_xferlen) {
4133 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf,
4134 fis_xferlen, mode)) {
4135 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4136 "copy to user space failed\n"));
4137 return (1);
4138 }
4139 }
4140
4141 if (data_xferlen) {
4142 if (ddi_copyout(data_dma_obj.buffer, data_ubuf,
4143 data_xferlen, mode)) {
4144 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4145 "copy to user space failed\n"));
4146 return (1);
4147 }
4148 }
4149 }
4150
4151 kstp->cmd_status = stp->cmd_status;
4152
4153 if (fis_xferlen) {
4154 /* free kernel buffer */
4155 if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
4156 return (1);
4157 }
4158
4159 if (data_xferlen) {
4160 /* free kernel buffer */
4161 if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
4162 return (1);
4163 }
4164
4165 return (0);
4166 }
4167
4168 /*
4169 * fill_up_drv_ver
4170 */
4171 static void
fill_up_drv_ver(struct megasas_drv_ver * dv)4172 fill_up_drv_ver(struct megasas_drv_ver *dv)
4173 {
4174 (void) memset(dv, 0, sizeof (struct megasas_drv_ver));
4175
4176 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
4177 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
4178 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas"));
4179 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION));
4180 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE,
4181 strlen(MEGASAS_RELDATE));
4182 }
4183
4184 /*
4185 * handle_drv_ioctl
4186 */
4187 static int
handle_drv_ioctl(struct megasas_instance * instance,struct megasas_ioctl * ioctl,int mode)4188 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4189 int mode)
4190 {
4191 int i;
4192 int rval = 0;
4193 int *props = NULL;
4194 void *ubuf;
4195
4196 uint8_t *pci_conf_buf;
4197 uint32_t xferlen;
4198 uint32_t num_props;
4199 uint_t model;
4200 struct megasas_dcmd_frame *kdcmd;
4201 struct megasas_drv_ver dv;
4202 struct megasas_pci_information pi;
4203
4204 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
4205
4206 model = ddi_model_convert_from(mode & FMODELS);
4207 if (model == DDI_MODEL_ILP32) {
4208 con_log(CL_ANN1, (CE_NOTE,
4209 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4210
4211 xferlen = kdcmd->sgl.sge32[0].length;
4212
4213 /* SJ! - ubuf needs to be virtual address. */
4214 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4215 } else {
4216 #ifdef _ILP32
4217 con_log(CL_ANN1, (CE_NOTE,
4218 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4219 xferlen = kdcmd->sgl.sge32[0].length;
4220 /* SJ! - ubuf needs to be virtual address. */
4221 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4222 #else
4223 con_log(CL_ANN1, (CE_NOTE,
4224 "handle_drv_ioctl: DDI_MODEL_LP64"));
4225 xferlen = kdcmd->sgl.sge64[0].length;
4226 /* SJ! - ubuf needs to be virtual address. */
4227 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
4228 #endif
4229 }
4230 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4231 "dataBuf=%p size=%d bytes", ubuf, xferlen));
4232
4233 switch (kdcmd->opcode) {
4234 case MR_DRIVER_IOCTL_DRIVER_VERSION:
4235 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4236 "MR_DRIVER_IOCTL_DRIVER_VERSION"));
4237
4238 fill_up_drv_ver(&dv);
4239
4240 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
4241 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4242 "MR_DRIVER_IOCTL_DRIVER_VERSION : "
4243 "copy to user space failed\n"));
4244 kdcmd->cmd_status = 1;
4245 rval = 1;
4246 } else {
4247 kdcmd->cmd_status = 0;
4248 }
4249 break;
4250 case MR_DRIVER_IOCTL_PCI_INFORMATION:
4251 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4252 "MR_DRIVER_IOCTL_PCI_INFORMAITON"));
4253
4254 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
4255 0, "reg", &props, &num_props)) {
4256 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4257 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4258 "ddi_prop_look_int_array failed\n"));
4259 rval = 1;
4260 } else {
4261
4262 pi.busNumber = (props[0] >> 16) & 0xFF;
4263 pi.deviceNumber = (props[0] >> 11) & 0x1f;
4264 pi.functionNumber = (props[0] >> 8) & 0x7;
4265 ddi_prop_free((void *)props);
4266 }
4267
4268 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
4269
4270 for (i = 0; i < (sizeof (struct megasas_pci_information) -
4271 offsetof(struct megasas_pci_information, pciHeaderInfo));
4272 i++) {
4273 pci_conf_buf[i] =
4274 pci_config_get8(instance->pci_handle, i);
4275 }
4276
4277 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
4278 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4279 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4280 "copy to user space failed\n"));
4281 kdcmd->cmd_status = 1;
4282 rval = 1;
4283 } else {
4284 kdcmd->cmd_status = 0;
4285 }
4286 break;
4287 default:
4288 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4289 "invalid driver specific IOCTL opcode = 0x%x",
4290 kdcmd->opcode));
4291 kdcmd->cmd_status = 1;
4292 rval = 1;
4293 break;
4294 }
4295
4296 return (rval);
4297 }
4298
4299 /*
4300 * handle_mfi_ioctl
4301 */
4302 static int
handle_mfi_ioctl(struct megasas_instance * instance,struct megasas_ioctl * ioctl,int mode)4303 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4304 int mode)
4305 {
4306 int rval = 0;
4307
4308 struct megasas_header *hdr;
4309 struct megasas_cmd *cmd;
4310
4311 cmd = get_mfi_pkt(instance);
4312
4313 if (!cmd) {
4314 con_log(CL_ANN, (CE_WARN, "megasas: "
4315 "failed to get a cmd packet\n"));
4316 return (1);
4317 }
4318
4319 hdr = (struct megasas_header *)&ioctl->frame[0];
4320
4321 switch (hdr->cmd) {
4322 case MFI_CMD_OP_DCMD:
4323 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
4324 break;
4325 case MFI_CMD_OP_SMP:
4326 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
4327 break;
4328 case MFI_CMD_OP_STP:
4329 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
4330 break;
4331 case MFI_CMD_OP_LD_SCSI:
4332 case MFI_CMD_OP_PD_SCSI:
4333 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
4334 break;
4335 default:
4336 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
4337 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd));
4338 rval = 1;
4339 break;
4340 }
4341
4342
4343 return_mfi_pkt(instance, cmd);
4344 if (megasas_common_check(instance, cmd) != DDI_SUCCESS)
4345 rval = 1;
4346 return (rval);
4347 }
4348
4349 /*
4350 * AEN
4351 */
4352 static int
handle_mfi_aen(struct megasas_instance * instance,struct megasas_aen * aen)4353 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen)
4354 {
4355 int rval = 0;
4356
4357 rval = register_mfi_aen(instance, instance->aen_seq_num,
4358 aen->class_locale_word);
4359
4360 aen->cmd_status = (uint8_t)rval;
4361
4362 return (rval);
4363 }
4364
4365 static int
register_mfi_aen(struct megasas_instance * instance,uint32_t seq_num,uint32_t class_locale_word)4366 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num,
4367 uint32_t class_locale_word)
4368 {
4369 int ret_val;
4370
4371 struct megasas_cmd *cmd;
4372 struct megasas_dcmd_frame *dcmd;
4373 union megasas_evt_class_locale curr_aen;
4374 union megasas_evt_class_locale prev_aen;
4375
4376 /*
4377 * If there an AEN pending already (aen_cmd), check if the
4378 * class_locale of that pending AEN is inclusive of the new
4379 * AEN request we currently have. If it is, then we don't have
4380 * to do anything. In other words, whichever events the current
4381 * AEN request is subscribing to, have already been subscribed
4382 * to.
4383 *
4384 * If the old_cmd is _not_ inclusive, then we have to abort
4385 * that command, form a class_locale that is superset of both
4386 * old and current and re-issue to the FW
4387 */
4388
4389 curr_aen.word = class_locale_word;
4390
4391 if (instance->aen_cmd) {
4392 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
4393
4394 /*
4395 * A class whose enum value is smaller is inclusive of all
4396 * higher values. If a PROGRESS (= -1) was previously
4397 * registered, then a new registration requests for higher
4398 * classes need not be sent to FW. They are automatically
4399 * included.
4400 *
4401 * Locale numbers don't have such hierarchy. They are bitmap
4402 * values
4403 */
4404 if ((prev_aen.members.class <= curr_aen.members.class) &&
4405 !((prev_aen.members.locale & curr_aen.members.locale) ^
4406 curr_aen.members.locale)) {
4407 /*
4408 * Previously issued event registration includes
4409 * current request. Nothing to do.
4410 */
4411
4412 return (0);
4413 } else {
4414 curr_aen.members.locale |= prev_aen.members.locale;
4415
4416 if (prev_aen.members.class < curr_aen.members.class)
4417 curr_aen.members.class = prev_aen.members.class;
4418
4419 ret_val = abort_aen_cmd(instance, instance->aen_cmd);
4420
4421 if (ret_val) {
4422 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
4423 "failed to abort prevous AEN command\n"));
4424
4425 return (ret_val);
4426 }
4427 }
4428 } else {
4429 curr_aen.word = class_locale_word;
4430 }
4431
4432 cmd = get_mfi_pkt(instance);
4433
4434 if (!cmd)
4435 return (-ENOMEM);
4436
4437 dcmd = &cmd->frame->dcmd;
4438
4439 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
4440 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4441
4442 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4443 sizeof (struct megasas_evt_detail));
4444
4445 /* Prepare DCMD for aen registration */
4446 dcmd->cmd = MFI_CMD_OP_DCMD;
4447 dcmd->cmd_status = 0x0;
4448 dcmd->sge_count = 1;
4449 dcmd->flags = MFI_FRAME_DIR_READ;
4450 dcmd->timeout = 0;
4451 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail);
4452 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
4453 dcmd->mbox.w[0] = seq_num;
4454 dcmd->mbox.w[1] = curr_aen.word;
4455 dcmd->sgl.sge32[0].phys_addr =
4456 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address;
4457 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail);
4458
4459 instance->aen_seq_num = seq_num;
4460
4461 /*
4462 * Store reference to the cmd used to register for AEN. When an
4463 * application wants us to register for AEN, we have to abort this
4464 * cmd and re-register with a new EVENT LOCALE supplied by that app
4465 */
4466 instance->aen_cmd = cmd;
4467
4468 cmd->frame_count = 1;
4469
4470 /* Issue the aen registration frame */
4471 /* atomic_add_16 (&instance->fw_outstanding, 1); */
4472 instance->func_ptr->issue_cmd(cmd, instance);
4473
4474 return (0);
4475 }
4476
4477 static void
display_scsi_inquiry(caddr_t scsi_inq)4478 display_scsi_inquiry(caddr_t scsi_inq)
4479 {
4480 #define MAX_SCSI_DEVICE_CODE 14
4481 int i;
4482 char inquiry_buf[256] = {0};
4483 int len;
4484 const char *const scsi_device_types[] = {
4485 "Direct-Access ",
4486 "Sequential-Access",
4487 "Printer ",
4488 "Processor ",
4489 "WORM ",
4490 "CD-ROM ",
4491 "Scanner ",
4492 "Optical Device ",
4493 "Medium Changer ",
4494 "Communications ",
4495 "Unknown ",
4496 "Unknown ",
4497 "Unknown ",
4498 "Enclosure ",
4499 };
4500
4501 len = 0;
4502
4503 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
4504 for (i = 8; i < 16; i++) {
4505 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4506 scsi_inq[i]);
4507 }
4508
4509 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
4510
4511 for (i = 16; i < 32; i++) {
4512 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4513 scsi_inq[i]);
4514 }
4515
4516 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
4517
4518 for (i = 32; i < 36; i++) {
4519 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4520 scsi_inq[i]);
4521 }
4522
4523 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4524
4525
4526 i = scsi_inq[0] & 0x1f;
4527
4528
4529 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
4530 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
4531 "Unknown ");
4532
4533
4534 len += snprintf(inquiry_buf + len, 265 - len,
4535 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
4536
4537 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
4538 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
4539 } else {
4540 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4541 }
4542
4543 con_log(CL_ANN1, (CE_CONT, inquiry_buf));
4544 }
4545
4546 static int
read_fw_status_reg_xscale(struct megasas_instance * instance)4547 read_fw_status_reg_xscale(struct megasas_instance *instance)
4548 {
4549 return ((int)RD_OB_MSG_0(instance));
4550 }
4551
4552 static int
read_fw_status_reg_ppc(struct megasas_instance * instance)4553 read_fw_status_reg_ppc(struct megasas_instance *instance)
4554 {
4555 return ((int)RD_OB_SCRATCH_PAD_0(instance));
4556 }
4557
4558 static void
issue_cmd_xscale(struct megasas_cmd * cmd,struct megasas_instance * instance)4559 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
4560 {
4561 atomic_inc_16(&instance->fw_outstanding);
4562
4563 /* Issue the command to the FW */
4564 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4565 (cmd->frame_count - 1), instance);
4566 }
4567
4568 static void
issue_cmd_ppc(struct megasas_cmd * cmd,struct megasas_instance * instance)4569 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance)
4570 {
4571 atomic_inc_16(&instance->fw_outstanding);
4572
4573 /* Issue the command to the FW */
4574 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4575 (((cmd->frame_count - 1) << 1) | 1), instance);
4576 }
4577
4578 /*
4579 * issue_cmd_in_sync_mode
4580 */
4581 static int
issue_cmd_in_sync_mode_xscale(struct megasas_instance * instance,struct megasas_cmd * cmd)4582 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance,
4583 struct megasas_cmd *cmd)
4584 {
4585 int i;
4586 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4587
4588 cmd->cmd_status = ENODATA;
4589
4590 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4591 (cmd->frame_count - 1), instance);
4592
4593 mutex_enter(&instance->int_cmd_mtx);
4594
4595 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4596 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4597 }
4598
4599 mutex_exit(&instance->int_cmd_mtx);
4600
4601 if (i < (msecs -1)) {
4602 return (0);
4603 } else {
4604 return (1);
4605 }
4606 }
4607
4608 static int
issue_cmd_in_sync_mode_ppc(struct megasas_instance * instance,struct megasas_cmd * cmd)4609 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance,
4610 struct megasas_cmd *cmd)
4611 {
4612 int i;
4613 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4614
4615 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n"));
4616
4617 cmd->cmd_status = ENODATA;
4618
4619 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4620 (((cmd->frame_count - 1) << 1) | 1), instance);
4621
4622 mutex_enter(&instance->int_cmd_mtx);
4623
4624 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4625 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4626 }
4627
4628 mutex_exit(&instance->int_cmd_mtx);
4629
4630 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n"));
4631
4632 if (i < (msecs -1)) {
4633 return (0);
4634 } else {
4635 return (1);
4636 }
4637 }
4638
4639 /*
4640 * issue_cmd_in_poll_mode
4641 */
4642 static int
issue_cmd_in_poll_mode_xscale(struct megasas_instance * instance,struct megasas_cmd * cmd)4643 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance,
4644 struct megasas_cmd *cmd)
4645 {
4646 int i;
4647 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4648 struct megasas_header *frame_hdr;
4649
4650 frame_hdr = (struct megasas_header *)cmd->frame;
4651 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4652 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4653
4654 /* issue the frame using inbound queue port */
4655 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4656 (cmd->frame_count - 1), instance);
4657
4658 /* wait for cmd_status to change from 0xFF */
4659 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4660 MFI_CMD_STATUS_POLL_MODE); i++) {
4661 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4662 }
4663
4664 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4665 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4666 "cmd polling timed out"));
4667 return (DDI_FAILURE);
4668 }
4669
4670 return (DDI_SUCCESS);
4671 }
4672
4673 static int
issue_cmd_in_poll_mode_ppc(struct megasas_instance * instance,struct megasas_cmd * cmd)4674 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance,
4675 struct megasas_cmd *cmd)
4676 {
4677 int i;
4678 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4679 struct megasas_header *frame_hdr;
4680
4681 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n"));
4682
4683 frame_hdr = (struct megasas_header *)cmd->frame;
4684 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4685 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4686
4687 /* issue the frame using inbound queue port */
4688 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4689 (((cmd->frame_count - 1) << 1) | 1), instance);
4690
4691 /* wait for cmd_status to change from 0xFF */
4692 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4693 MFI_CMD_STATUS_POLL_MODE); i++) {
4694 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4695 }
4696
4697 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4698 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4699 "cmd polling timed out"));
4700 return (DDI_FAILURE);
4701 }
4702
4703 return (DDI_SUCCESS);
4704 }
4705
4706 static void
enable_intr_xscale(struct megasas_instance * instance)4707 enable_intr_xscale(struct megasas_instance *instance)
4708 {
4709 MFI_ENABLE_INTR(instance);
4710 }
4711
4712 static void
enable_intr_ppc(struct megasas_instance * instance)4713 enable_intr_ppc(struct megasas_instance *instance)
4714 {
4715 uint32_t mask;
4716
4717 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n"));
4718
4719 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
4720 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
4721
4722 /*
4723 * As 1078DE is same as 1078 chip, the interrupt mask
4724 * remains the same.
4725 */
4726 /* WR_OB_INTR_MASK(~0x80000000, instance); */
4727 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance);
4728
4729 /* dummy read to force PCI flush */
4730 mask = RD_OB_INTR_MASK(instance);
4731
4732 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
4733 "outbound_intr_mask = 0x%x\n", mask));
4734 }
4735
4736 static void
disable_intr_xscale(struct megasas_instance * instance)4737 disable_intr_xscale(struct megasas_instance *instance)
4738 {
4739 MFI_DISABLE_INTR(instance);
4740 }
4741
4742 static void
disable_intr_ppc(struct megasas_instance * instance)4743 disable_intr_ppc(struct megasas_instance *instance)
4744 {
4745 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n"));
4746
4747 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
4748 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4749
4750 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
4751 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
4752
4753 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
4754 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4755
4756 /* dummy read to force PCI flush */
4757 (void) RD_OB_INTR_MASK(instance);
4758 }
4759
4760 static int
intr_ack_xscale(struct megasas_instance * instance)4761 intr_ack_xscale(struct megasas_instance *instance)
4762 {
4763 uint32_t status;
4764
4765 /* check if it is our interrupt */
4766 status = RD_OB_INTR_STATUS(instance);
4767
4768 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
4769 return (DDI_INTR_UNCLAIMED);
4770 }
4771
4772 /* clear the interrupt by writing back the same value */
4773 WR_OB_INTR_STATUS(status, instance);
4774
4775 return (DDI_INTR_CLAIMED);
4776 }
4777
4778 static int
intr_ack_ppc(struct megasas_instance * instance)4779 intr_ack_ppc(struct megasas_instance *instance)
4780 {
4781 uint32_t status;
4782
4783 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n"));
4784
4785 /* check if it is our interrupt */
4786 status = RD_OB_INTR_STATUS(instance);
4787
4788 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status));
4789
4790 /*
4791 * As 1078DE is same as 1078 chip, the status field
4792 * remains the same.
4793 */
4794 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) {
4795 return (DDI_INTR_UNCLAIMED);
4796 }
4797
4798 /* clear the interrupt by writing back the same value */
4799 WR_OB_DOORBELL_CLEAR(status, instance);
4800
4801 /* dummy READ */
4802 status = RD_OB_INTR_STATUS(instance);
4803
4804 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n"));
4805
4806 return (DDI_INTR_CLAIMED);
4807 }
4808
4809 static int
megasas_common_check(struct megasas_instance * instance,struct megasas_cmd * cmd)4810 megasas_common_check(struct megasas_instance *instance,
4811 struct megasas_cmd *cmd)
4812 {
4813 int ret = DDI_SUCCESS;
4814
4815 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4816 DDI_SUCCESS) {
4817 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4818 if (cmd->pkt != NULL) {
4819 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4820 cmd->pkt->pkt_statistics = 0;
4821 }
4822 ret = DDI_FAILURE;
4823 }
4824 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
4825 != DDI_SUCCESS) {
4826 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4827 if (cmd->pkt != NULL) {
4828 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4829 cmd->pkt->pkt_statistics = 0;
4830 }
4831 ret = DDI_FAILURE;
4832 }
4833 if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
4834 DDI_SUCCESS) {
4835 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4836 if (cmd->pkt != NULL) {
4837 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4838 cmd->pkt->pkt_statistics = 0;
4839 }
4840 ret = DDI_FAILURE;
4841 }
4842 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4843 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4844 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
4845 if (cmd->pkt != NULL) {
4846 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4847 cmd->pkt->pkt_statistics = 0;
4848 }
4849 ret = DDI_FAILURE;
4850 }
4851
4852 return (ret);
4853 }
4854
4855 /*ARGSUSED*/
4856 static int
megasas_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)4857 megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4858 {
4859 /*
4860 * as the driver can always deal with an error in any dma or
4861 * access handle, we can just return the fme_status value.
4862 */
4863 pci_ereport_post(dip, err, NULL);
4864 return (err->fme_status);
4865 }
4866
4867 static void
megasas_fm_init(struct megasas_instance * instance)4868 megasas_fm_init(struct megasas_instance *instance)
4869 {
4870 /* Need to change iblock to priority for new MSI intr */
4871 ddi_iblock_cookie_t fm_ibc;
4872
4873 /* Only register with IO Fault Services if we have some capability */
4874 if (instance->fm_capabilities) {
4875 /* Adjust access and dma attributes for FMA */
4876 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4877 megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
4878
4879 /*
4880 * Register capabilities with IO Fault Services.
4881 * fm_capabilities will be updated to indicate
4882 * capabilities actually supported (not requested.)
4883 */
4884
4885 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
4886
4887 /*
4888 * Initialize pci ereport capabilities if ereport
4889 * capable (should always be.)
4890 */
4891
4892 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4893 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4894 pci_ereport_setup(instance->dip);
4895 }
4896
4897 /*
4898 * Register error callback if error callback capable.
4899 */
4900 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4901 ddi_fm_handler_register(instance->dip,
4902 megasas_fm_error_cb, (void*) instance);
4903 }
4904 } else {
4905 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4906 megasas_generic_dma_attr.dma_attr_flags = 0;
4907 }
4908 }
4909
4910 static void
megasas_fm_fini(struct megasas_instance * instance)4911 megasas_fm_fini(struct megasas_instance *instance)
4912 {
4913 /* Only unregister FMA capabilities if registered */
4914 if (instance->fm_capabilities) {
4915 /*
4916 * Un-register error callback if error callback capable.
4917 */
4918 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4919 ddi_fm_handler_unregister(instance->dip);
4920 }
4921
4922 /*
4923 * Release any resources allocated by pci_ereport_setup()
4924 */
4925 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4926 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4927 pci_ereport_teardown(instance->dip);
4928 }
4929
4930 /* Unregister from IO Fault Services */
4931 ddi_fm_fini(instance->dip);
4932
4933 /* Adjust access and dma attributes for FMA */
4934 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4935 megasas_generic_dma_attr.dma_attr_flags = 0;
4936 }
4937 }
4938
4939 int
megasas_check_acc_handle(ddi_acc_handle_t handle)4940 megasas_check_acc_handle(ddi_acc_handle_t handle)
4941 {
4942 ddi_fm_error_t de;
4943
4944 if (handle == NULL) {
4945 return (DDI_FAILURE);
4946 }
4947
4948 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4949
4950 return (de.fme_status);
4951 }
4952
4953 int
megasas_check_dma_handle(ddi_dma_handle_t handle)4954 megasas_check_dma_handle(ddi_dma_handle_t handle)
4955 {
4956 ddi_fm_error_t de;
4957
4958 if (handle == NULL) {
4959 return (DDI_FAILURE);
4960 }
4961
4962 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4963
4964 return (de.fme_status);
4965 }
4966
4967 void
megasas_fm_ereport(struct megasas_instance * instance,char * detail)4968 megasas_fm_ereport(struct megasas_instance *instance, char *detail)
4969 {
4970 uint64_t ena;
4971 char buf[FM_MAX_CLASS];
4972
4973 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4974 ena = fm_ena_generate(0, FM_ENA_FMT1);
4975 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
4976 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
4977 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
4978 }
4979 }
4980