xref: /freebsd/sys/dev/mrsas/mrsas.c (revision 93e779a26c651610ac6e7986d67ecc9ed2cadcbf)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
52 #include <sys/taskqueue.h>
53 #include <sys/smp.h>
54 
55 
56 /*
57  * Function prototypes
58  */
59 static d_open_t mrsas_open;
60 static d_close_t mrsas_close;
61 static d_read_t mrsas_read;
62 static d_write_t mrsas_write;
63 static d_ioctl_t mrsas_ioctl;
64 static d_poll_t mrsas_poll;
65 
66 static struct mrsas_mgmt_info mrsas_mgmt_info;
67 static struct mrsas_ident *mrsas_find_ident(device_t);
68 static int mrsas_setup_msix(struct mrsas_softc *sc);
69 static int mrsas_allocate_msix(struct mrsas_softc *sc);
70 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
71 static void mrsas_flush_cache(struct mrsas_softc *sc);
72 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
73 static void mrsas_ocr_thread(void *arg);
74 static int mrsas_get_map_info(struct mrsas_softc *sc);
75 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
76 static int mrsas_sync_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_pd_list(struct mrsas_softc *sc);
78 static int mrsas_get_ld_list(struct mrsas_softc *sc);
79 static int mrsas_setup_irq(struct mrsas_softc *sc);
80 static int mrsas_alloc_mem(struct mrsas_softc *sc);
81 static int mrsas_init_fw(struct mrsas_softc *sc);
82 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
83 static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
84 static int mrsas_clear_intr(struct mrsas_softc *sc);
85 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
86 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
87 static int
88 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
89     struct mrsas_mfi_cmd *cmd_to_abort);
90 static struct mrsas_softc *
91 mrsas_get_softc_instance(struct cdev *dev,
92     u_long cmd, caddr_t arg);
93 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
94 u_int8_t
95 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
96     struct mrsas_mfi_cmd *mfi_cmd);
97 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
98 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
99 int	mrsas_init_adapter(struct mrsas_softc *sc);
100 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
101 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
102 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
103 int	mrsas_ioc_init(struct mrsas_softc *sc);
104 int	mrsas_bus_scan(struct mrsas_softc *sc);
105 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
106 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
107 int	mrsas_reset_ctrl(struct mrsas_softc *sc);
108 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc);
109 int
110 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
111     struct mrsas_mfi_cmd *cmd);
112 int
113 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
114     int size);
115 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
116 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
117 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
118 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
119 void	mrsas_disable_intr(struct mrsas_softc *sc);
120 void	mrsas_enable_intr(struct mrsas_softc *sc);
121 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
122 void	mrsas_free_mem(struct mrsas_softc *sc);
123 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
124 void	mrsas_isr(void *arg);
125 void	mrsas_teardown_intr(struct mrsas_softc *sc);
126 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
127 void	mrsas_kill_hba(struct mrsas_softc *sc);
128 void	mrsas_aen_handler(struct mrsas_softc *sc);
129 void
130 mrsas_write_reg(struct mrsas_softc *sc, int offset,
131     u_int32_t value);
132 void
133 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
134     u_int32_t req_desc_hi);
135 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
136 void
137 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
138     struct mrsas_mfi_cmd *cmd, u_int8_t status);
139 void
140 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
141     u_int8_t extStatus);
142 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
143 
144 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
145         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
146 
147 extern int mrsas_cam_attach(struct mrsas_softc *sc);
148 extern void mrsas_cam_detach(struct mrsas_softc *sc);
149 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
150 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
152 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
153 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
154 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
155 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
156 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
157 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
158 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
159 extern void mrsas_xpt_release(struct mrsas_softc *sc);
160 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
161 mrsas_get_request_desc(struct mrsas_softc *sc,
162     u_int16_t index);
163 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
164 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
165 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
166 
167 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
168 
169 /*
170  * PCI device struct and table
171  *
172  */
173 typedef struct mrsas_ident {
174 	uint16_t vendor;
175 	uint16_t device;
176 	uint16_t subvendor;
177 	uint16_t subdevice;
178 	const char *desc;
179 }	MRSAS_CTLR_ID;
180 
181 MRSAS_CTLR_ID device_table[] = {
182 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
183 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
184 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
185 	{0, 0, 0, 0, NULL}
186 };
187 
188 /*
189  * Character device entry points
190  *
191  */
192 static struct cdevsw mrsas_cdevsw = {
193 	.d_version = D_VERSION,
194 	.d_open = mrsas_open,
195 	.d_close = mrsas_close,
196 	.d_read = mrsas_read,
197 	.d_write = mrsas_write,
198 	.d_ioctl = mrsas_ioctl,
199 	.d_poll = mrsas_poll,
200 	.d_name = "mrsas",
201 };
202 
203 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
204 
205 /*
206  * In the cdevsw routines, we find our softc by using the si_drv1 member of
207  * struct cdev.  We set this variable to point to our softc in our attach
208  * routine when we create the /dev entry.
209  */
210 int
211 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
212 {
213 	struct mrsas_softc *sc;
214 
215 	sc = dev->si_drv1;
216 	return (0);
217 }
218 
219 int
220 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
221 {
222 	struct mrsas_softc *sc;
223 
224 	sc = dev->si_drv1;
225 	return (0);
226 }
227 
228 int
229 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
230 {
231 	struct mrsas_softc *sc;
232 
233 	sc = dev->si_drv1;
234 	return (0);
235 }
236 int
237 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
238 {
239 	struct mrsas_softc *sc;
240 
241 	sc = dev->si_drv1;
242 	return (0);
243 }
244 
245 /*
246  * Register Read/Write Functions
247  *
248  */
249 void
250 mrsas_write_reg(struct mrsas_softc *sc, int offset,
251     u_int32_t value)
252 {
253 	bus_space_tag_t bus_tag = sc->bus_tag;
254 	bus_space_handle_t bus_handle = sc->bus_handle;
255 
256 	bus_space_write_4(bus_tag, bus_handle, offset, value);
257 }
258 
259 u_int32_t
260 mrsas_read_reg(struct mrsas_softc *sc, int offset)
261 {
262 	bus_space_tag_t bus_tag = sc->bus_tag;
263 	bus_space_handle_t bus_handle = sc->bus_handle;
264 
265 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
266 }
267 
268 
269 /*
270  * Interrupt Disable/Enable/Clear Functions
271  *
272  */
273 void
274 mrsas_disable_intr(struct mrsas_softc *sc)
275 {
276 	u_int32_t mask = 0xFFFFFFFF;
277 	u_int32_t status;
278 
279 	sc->mask_interrupts = 1;
280 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
281 	/* Dummy read to force pci flush */
282 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
283 }
284 
285 void
286 mrsas_enable_intr(struct mrsas_softc *sc)
287 {
288 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
289 	u_int32_t status;
290 
291 	sc->mask_interrupts = 0;
292 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
293 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
294 
295 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
296 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
297 }
298 
299 static int
300 mrsas_clear_intr(struct mrsas_softc *sc)
301 {
302 	u_int32_t status, fw_status, fw_state;
303 
304 	/* Read received interrupt */
305 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
306 
307 	/*
308 	 * If FW state change interrupt is received, write to it again to
309 	 * clear
310 	 */
311 	if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
312 		fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
313 		    outbound_scratch_pad));
314 		fw_state = fw_status & MFI_STATE_MASK;
315 		if (fw_state == MFI_STATE_FAULT) {
316 			device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
317 			if (sc->ocr_thread_active)
318 				wakeup(&sc->ocr_chan);
319 		}
320 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
321 		mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
322 		return (1);
323 	}
324 	/* Not our interrupt, so just return */
325 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
326 		return (0);
327 
328 	/* We got a reply interrupt */
329 	return (1);
330 }
331 
332 /*
333  * PCI Support Functions
334  *
335  */
336 static struct mrsas_ident *
337 mrsas_find_ident(device_t dev)
338 {
339 	struct mrsas_ident *pci_device;
340 
341 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
342 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
343 		    (pci_device->device == pci_get_device(dev)) &&
344 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
345 		    (pci_device->subvendor == 0xffff)) &&
346 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
347 		    (pci_device->subdevice == 0xffff)))
348 			return (pci_device);
349 	}
350 	return (NULL);
351 }
352 
353 static int
354 mrsas_probe(device_t dev)
355 {
356 	static u_int8_t first_ctrl = 1;
357 	struct mrsas_ident *id;
358 
359 	if ((id = mrsas_find_ident(dev)) != NULL) {
360 		if (first_ctrl) {
361 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
362 			    MRSAS_VERSION);
363 			first_ctrl = 0;
364 		}
365 		device_set_desc(dev, id->desc);
366 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
367 		return (-30);
368 	}
369 	return (ENXIO);
370 }
371 
372 /*
373  * mrsas_setup_sysctl:	setup sysctl values for mrsas
374  * input:				Adapter instance soft state
375  *
376  * Setup sysctl entries for mrsas driver.
377  */
378 static void
379 mrsas_setup_sysctl(struct mrsas_softc *sc)
380 {
381 	struct sysctl_ctx_list *sysctl_ctx = NULL;
382 	struct sysctl_oid *sysctl_tree = NULL;
383 	char tmpstr[80], tmpstr2[80];
384 
385 	/*
386 	 * Setup the sysctl variable so the user can change the debug level
387 	 * on the fly.
388 	 */
389 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
390 	    device_get_unit(sc->mrsas_dev));
391 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
392 
393 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
394 	if (sysctl_ctx != NULL)
395 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
396 
397 	if (sysctl_tree == NULL) {
398 		sysctl_ctx_init(&sc->sysctl_ctx);
399 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
400 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
401 		    CTLFLAG_RD, 0, tmpstr);
402 		if (sc->sysctl_tree == NULL)
403 			return;
404 		sysctl_ctx = &sc->sysctl_ctx;
405 		sysctl_tree = sc->sysctl_tree;
406 	}
407 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
408 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
409 	    "Disable the use of OCR");
410 
411 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
412 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
413 	    strlen(MRSAS_VERSION), "driver version");
414 
415 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 	    OID_AUTO, "reset_count", CTLFLAG_RD,
417 	    &sc->reset_count, 0, "number of ocr from start of the day");
418 
419 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
421 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
422 
423 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
425 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
426 
427 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
429 	    "Driver debug level");
430 
431 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
433 	    0, "Driver IO timeout value in mili-second.");
434 
435 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
437 	    &sc->mrsas_fw_fault_check_delay,
438 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
439 
440 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
441 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
442 	    &sc->reset_in_progress, 0, "ocr in progress status");
443 
444 }
445 
446 /*
447  * mrsas_get_tunables:	get tunable parameters.
448  * input:				Adapter instance soft state
449  *
450  * Get tunable parameters. This will help to debug driver at boot time.
451  */
452 static void
453 mrsas_get_tunables(struct mrsas_softc *sc)
454 {
455 	char tmpstr[80];
456 
457 	/* XXX default to some debugging for now */
458 	sc->mrsas_debug = MRSAS_FAULT;
459 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
460 	sc->mrsas_fw_fault_check_delay = 1;
461 	sc->reset_count = 0;
462 	sc->reset_in_progress = 0;
463 
464 	/*
465 	 * Grab the global variables.
466 	 */
467 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
468 
469 	/*
470 	 * Grab the global variables.
471 	 */
472 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
473 
474 	/* Grab the unit-instance variables */
475 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
476 	    device_get_unit(sc->mrsas_dev));
477 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
478 }
479 
480 /*
481  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
482  * Used to get sequence number at driver load time.
483  * input:		Adapter soft state
484  *
485  * Allocates DMAable memory for the event log info internal command.
486  */
487 int
488 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
489 {
490 	int el_info_size;
491 
492 	/* Allocate get event log info command */
493 	el_info_size = sizeof(struct mrsas_evt_log_info);
494 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
495 	    1, 0,
496 	    BUS_SPACE_MAXADDR_32BIT,
497 	    BUS_SPACE_MAXADDR,
498 	    NULL, NULL,
499 	    el_info_size,
500 	    1,
501 	    el_info_size,
502 	    BUS_DMA_ALLOCNOW,
503 	    NULL, NULL,
504 	    &sc->el_info_tag)) {
505 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
506 		return (ENOMEM);
507 	}
508 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
509 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
510 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
511 		return (ENOMEM);
512 	}
513 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
514 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
515 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
516 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
517 		return (ENOMEM);
518 	}
519 	memset(sc->el_info_mem, 0, el_info_size);
520 	return (0);
521 }
522 
523 /*
524  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
525  * input:					Adapter soft state
526  *
527  * Deallocates memory for the event log info internal command.
528  */
529 void
530 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
531 {
532 	if (sc->el_info_phys_addr)
533 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
534 	if (sc->el_info_mem != NULL)
535 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
536 	if (sc->el_info_tag != NULL)
537 		bus_dma_tag_destroy(sc->el_info_tag);
538 }
539 
540 /*
541  *  mrsas_get_seq_num:	Get latest event sequence number
542  *  @sc:				Adapter soft state
543  *  @eli:				Firmware event log sequence number information.
544  *
545  * Firmware maintains a log of all events in a non-volatile area.
546  * Driver get the sequence number using DCMD
547  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
548  */
549 
550 static int
551 mrsas_get_seq_num(struct mrsas_softc *sc,
552     struct mrsas_evt_log_info *eli)
553 {
554 	struct mrsas_mfi_cmd *cmd;
555 	struct mrsas_dcmd_frame *dcmd;
556 
557 	cmd = mrsas_get_mfi_cmd(sc);
558 
559 	if (!cmd) {
560 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
561 		return -ENOMEM;
562 	}
563 	dcmd = &cmd->frame->dcmd;
564 
565 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
566 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
567 		mrsas_release_mfi_cmd(cmd);
568 		return -ENOMEM;
569 	}
570 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
571 
572 	dcmd->cmd = MFI_CMD_DCMD;
573 	dcmd->cmd_status = 0x0;
574 	dcmd->sge_count = 1;
575 	dcmd->flags = MFI_FRAME_DIR_READ;
576 	dcmd->timeout = 0;
577 	dcmd->pad_0 = 0;
578 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
579 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
580 	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
581 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
582 
583 	mrsas_issue_blocked_cmd(sc, cmd);
584 
585 	/*
586 	 * Copy the data back into callers buffer
587 	 */
588 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
589 	mrsas_free_evt_log_info_cmd(sc);
590 	mrsas_release_mfi_cmd(cmd);
591 
592 	return 0;
593 }
594 
595 
596 /*
597  *  mrsas_register_aen:		Register for asynchronous event notification
598  *  @sc:			Adapter soft state
599  *  @seq_num:			Starting sequence number
600  *  @class_locale:		Class of the event
601  *
602  *  This function subscribes for events beyond the @seq_num
603  *  and type @class_locale.
604  *
605  */
606 static int
607 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
608     u_int32_t class_locale_word)
609 {
610 	int ret_val;
611 	struct mrsas_mfi_cmd *cmd;
612 	struct mrsas_dcmd_frame *dcmd;
613 	union mrsas_evt_class_locale curr_aen;
614 	union mrsas_evt_class_locale prev_aen;
615 
616 	/*
617 	 * If there an AEN pending already (aen_cmd), check if the
618 	 * class_locale of that pending AEN is inclusive of the new AEN
619 	 * request we currently have. If it is, then we don't have to do
620 	 * anything. In other words, whichever events the current AEN request
621 	 * is subscribing to, have already been subscribed to. If the old_cmd
622 	 * is _not_ inclusive, then we have to abort that command, form a
623 	 * class_locale that is superset of both old and current and re-issue
624 	 * to the FW
625 	 */
626 
627 	curr_aen.word = class_locale_word;
628 
629 	if (sc->aen_cmd) {
630 
631 		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
632 
633 		/*
634 		 * A class whose enum value is smaller is inclusive of all
635 		 * higher values. If a PROGRESS (= -1) was previously
636 		 * registered, then a new registration requests for higher
637 		 * classes need not be sent to FW. They are automatically
638 		 * included. Locale numbers don't have such hierarchy. They
639 		 * are bitmap values
640 		 */
641 		if ((prev_aen.members.class <= curr_aen.members.class) &&
642 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
643 		    curr_aen.members.locale)) {
644 			/*
645 			 * Previously issued event registration includes
646 			 * current request. Nothing to do.
647 			 */
648 			return 0;
649 		} else {
650 			curr_aen.members.locale |= prev_aen.members.locale;
651 
652 			if (prev_aen.members.class < curr_aen.members.class)
653 				curr_aen.members.class = prev_aen.members.class;
654 
655 			sc->aen_cmd->abort_aen = 1;
656 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
657 			    sc->aen_cmd);
658 
659 			if (ret_val) {
660 				printf("mrsas: Failed to abort "
661 				    "previous AEN command\n");
662 				return ret_val;
663 			}
664 		}
665 	}
666 	cmd = mrsas_get_mfi_cmd(sc);
667 
668 	if (!cmd)
669 		return -ENOMEM;
670 
671 	dcmd = &cmd->frame->dcmd;
672 
673 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
674 
675 	/*
676 	 * Prepare DCMD for aen registration
677 	 */
678 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
679 
680 	dcmd->cmd = MFI_CMD_DCMD;
681 	dcmd->cmd_status = 0x0;
682 	dcmd->sge_count = 1;
683 	dcmd->flags = MFI_FRAME_DIR_READ;
684 	dcmd->timeout = 0;
685 	dcmd->pad_0 = 0;
686 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
687 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
688 	dcmd->mbox.w[0] = seq_num;
689 	sc->last_seq_num = seq_num;
690 	dcmd->mbox.w[1] = curr_aen.word;
691 	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
692 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
693 
694 	if (sc->aen_cmd != NULL) {
695 		mrsas_release_mfi_cmd(cmd);
696 		return 0;
697 	}
698 	/*
699 	 * Store reference to the cmd used to register for AEN. When an
700 	 * application wants us to register for AEN, we have to abort this
701 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
702 	 */
703 	sc->aen_cmd = cmd;
704 
705 	/*
706 	 * Issue the aen registration frame
707 	 */
708 	if (mrsas_issue_dcmd(sc, cmd)) {
709 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
710 		return (1);
711 	}
712 	return 0;
713 }
714 
715 /*
716  * mrsas_start_aen:	Subscribes to AEN during driver load time
717  * @instance:		Adapter soft state
718  */
719 static int
720 mrsas_start_aen(struct mrsas_softc *sc)
721 {
722 	struct mrsas_evt_log_info eli;
723 	union mrsas_evt_class_locale class_locale;
724 
725 
726 	/* Get the latest sequence number from FW */
727 
728 	memset(&eli, 0, sizeof(eli));
729 
730 	if (mrsas_get_seq_num(sc, &eli))
731 		return -1;
732 
733 	/* Register AEN with FW for latest sequence number plus 1 */
734 	class_locale.members.reserved = 0;
735 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
736 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
737 
738 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
739 	    class_locale.word);
740 
741 }
742 
743 /*
744  * mrsas_setup_msix:	Allocate MSI-x vectors
745  * @sc:					adapter soft state
746  */
747 static int
748 mrsas_setup_msix(struct mrsas_softc *sc)
749 {
750 	int i;
751 
752 	for (i = 0; i < sc->msix_vectors; i++) {
753 		sc->irq_context[i].sc = sc;
754 		sc->irq_context[i].MSIxIndex = i;
755 		sc->irq_id[i] = i + 1;
756 		sc->mrsas_irq[i] = bus_alloc_resource_any
757 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
758 		    ,RF_ACTIVE);
759 		if (sc->mrsas_irq[i] == NULL) {
760 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
761 			goto irq_alloc_failed;
762 		}
763 		if (bus_setup_intr(sc->mrsas_dev,
764 		    sc->mrsas_irq[i],
765 		    INTR_MPSAFE | INTR_TYPE_CAM,
766 		    NULL, mrsas_isr, &sc->irq_context[i],
767 		    &sc->intr_handle[i])) {
768 			device_printf(sc->mrsas_dev,
769 			    "Cannot set up MSI-x interrupt handler\n");
770 			goto irq_alloc_failed;
771 		}
772 	}
773 	return SUCCESS;
774 
775 irq_alloc_failed:
776 	mrsas_teardown_intr(sc);
777 	return (FAIL);
778 }
779 
780 /*
781  * mrsas_allocate_msix:		Setup MSI-x vectors
782  * @sc:						adapter soft state
783  */
784 static int
785 mrsas_allocate_msix(struct mrsas_softc *sc)
786 {
787 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
788 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
789 		    " of vectors\n", sc->msix_vectors);
790 	} else {
791 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
792 		goto irq_alloc_failed;
793 	}
794 	return SUCCESS;
795 
796 irq_alloc_failed:
797 	mrsas_teardown_intr(sc);
798 	return (FAIL);
799 }
800 
801 /*
802  * mrsas_attach:	PCI entry point
803  * input:			pointer to device struct
804  *
805  * Performs setup of PCI and registers, initializes mutexes and linked lists,
806  * registers interrupts and CAM, and initializes   the adapter/controller to
807  * its proper state.
808  */
809 static int
810 mrsas_attach(device_t dev)
811 {
812 	struct mrsas_softc *sc = device_get_softc(dev);
813 	uint32_t cmd, bar, error;
814 	struct cdev *linux_dev;
815 
816 	/* Look up our softc and initialize its fields. */
817 	sc->mrsas_dev = dev;
818 	sc->device_id = pci_get_device(dev);
819 
820 	mrsas_get_tunables(sc);
821 
822 	/*
823 	 * Set up PCI and registers
824 	 */
825 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
826 	if ((cmd & PCIM_CMD_PORTEN) == 0) {
827 		return (ENXIO);
828 	}
829 	/* Force the busmaster enable bit on. */
830 	cmd |= PCIM_CMD_BUSMASTEREN;
831 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
832 
833 	bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
834 
835 	sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
836 	if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
837 	    &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
838 	    == NULL) {
839 		device_printf(dev, "Cannot allocate PCI registers\n");
840 		goto attach_fail;
841 	}
842 	sc->bus_tag = rman_get_bustag(sc->reg_res);
843 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
844 
845 	/* Intialize mutexes */
846 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
847 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
848 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
849 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
850 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
851 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
852 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
853 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
854 
855 	/*
856 	 * Intialize a counting Semaphore to take care no. of concurrent
857 	 * IOCTLs
858 	 */
859 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION);
860 
861 	/* Intialize linked list */
862 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
863 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
864 
865 	mrsas_atomic_set(&sc->fw_outstanding, 0);
866 
867 	sc->io_cmds_highwater = 0;
868 
869 	/* Create a /dev entry for this device. */
870 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
871 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
872 	    device_get_unit(dev));
873 	if (device_get_unit(dev) == 0)
874 		make_dev_alias_p(MAKEDEV_CHECKNAME, &linux_dev, sc->mrsas_cdev,
875 		    "megaraid_sas_ioctl_node");
876 	if (sc->mrsas_cdev)
877 		sc->mrsas_cdev->si_drv1 = sc;
878 
879 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
880 	sc->UnevenSpanSupport = 0;
881 
882 	sc->msix_enable = 0;
883 
884 	/* Initialize Firmware */
885 	if (mrsas_init_fw(sc) != SUCCESS) {
886 		goto attach_fail_fw;
887 	}
888 	/* Register SCSI mid-layer */
889 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
890 		goto attach_fail_cam;
891 	}
892 	/* Register IRQs */
893 	if (mrsas_setup_irq(sc) != SUCCESS) {
894 		goto attach_fail_irq;
895 	}
896 	/* Enable Interrupts */
897 	mrsas_enable_intr(sc);
898 
899 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
900 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
901 	    device_get_unit(sc->mrsas_dev));
902 	if (error) {
903 		printf("Error %d starting rescan thread\n", error);
904 		goto attach_fail_irq;
905 	}
906 	mrsas_setup_sysctl(sc);
907 
908 	/* Initiate AEN (Asynchronous Event Notification) */
909 
910 	if (mrsas_start_aen(sc)) {
911 		printf("Error: start aen failed\n");
912 		goto fail_start_aen;
913 	}
914 	/*
915 	 * Add this controller to mrsas_mgmt_info structure so that it can be
916 	 * exported to management applications
917 	 */
918 	if (device_get_unit(dev) == 0)
919 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
920 
921 	mrsas_mgmt_info.count++;
922 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
923 	mrsas_mgmt_info.max_index++;
924 
925 	return (0);
926 
927 fail_start_aen:
928 attach_fail_irq:
929 	mrsas_teardown_intr(sc);
930 attach_fail_cam:
931 	mrsas_cam_detach(sc);
932 attach_fail_fw:
933 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
934 	if (sc->msix_enable == 1)
935 		pci_release_msi(sc->mrsas_dev);
936 	mrsas_free_mem(sc);
937 	mtx_destroy(&sc->sim_lock);
938 	mtx_destroy(&sc->aen_lock);
939 	mtx_destroy(&sc->pci_lock);
940 	mtx_destroy(&sc->io_lock);
941 	mtx_destroy(&sc->ioctl_lock);
942 	mtx_destroy(&sc->mpt_cmd_pool_lock);
943 	mtx_destroy(&sc->mfi_cmd_pool_lock);
944 	mtx_destroy(&sc->raidmap_lock);
945 	/* Destroy the counting semaphore created for Ioctl */
946 	sema_destroy(&sc->ioctl_count_sema);
947 attach_fail:
948 	destroy_dev(sc->mrsas_cdev);
949 	if (sc->reg_res) {
950 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
951 		    sc->reg_res_id, sc->reg_res);
952 	}
953 	return (ENXIO);
954 }
955 
956 /*
957  * mrsas_detach:	De-allocates and teardown resources
958  * input:			pointer to device struct
959  *
960  * This function is the entry point for device disconnect and detach.
961  * It performs memory de-allocations, shutdown of the controller and various
962  * teardown and destroy resource functions.
963  */
964 static int
965 mrsas_detach(device_t dev)
966 {
967 	struct mrsas_softc *sc;
968 	int i = 0;
969 
970 	sc = device_get_softc(dev);
971 	sc->remove_in_progress = 1;
972 
973 	/* Destroy the character device so no other IOCTL will be handled */
974 	destroy_dev(sc->mrsas_cdev);
975 
976 	/*
977 	 * Take the instance off the instance array. Note that we will not
978 	 * decrement the max_index. We let this array be sparse array
979 	 */
980 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
981 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
982 			mrsas_mgmt_info.count--;
983 			mrsas_mgmt_info.sc_ptr[i] = NULL;
984 			break;
985 		}
986 	}
987 
988 	if (sc->ocr_thread_active)
989 		wakeup(&sc->ocr_chan);
990 	while (sc->reset_in_progress) {
991 		i++;
992 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
993 			mrsas_dprint(sc, MRSAS_INFO,
994 			    "[%2d]waiting for ocr to be finished\n", i);
995 		}
996 		pause("mr_shutdown", hz);
997 	}
998 	i = 0;
999 	while (sc->ocr_thread_active) {
1000 		i++;
1001 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1002 			mrsas_dprint(sc, MRSAS_INFO,
1003 			    "[%2d]waiting for "
1004 			    "mrsas_ocr thread to quit ocr %d\n", i,
1005 			    sc->ocr_thread_active);
1006 		}
1007 		pause("mr_shutdown", hz);
1008 	}
1009 	mrsas_flush_cache(sc);
1010 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1011 	mrsas_disable_intr(sc);
1012 	mrsas_cam_detach(sc);
1013 	mrsas_teardown_intr(sc);
1014 	mrsas_free_mem(sc);
1015 	mtx_destroy(&sc->sim_lock);
1016 	mtx_destroy(&sc->aen_lock);
1017 	mtx_destroy(&sc->pci_lock);
1018 	mtx_destroy(&sc->io_lock);
1019 	mtx_destroy(&sc->ioctl_lock);
1020 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1021 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1022 	mtx_destroy(&sc->raidmap_lock);
1023 
1024 	/* Wait for all the semaphores to be released */
1025 	while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1026 		pause("mr_shutdown", hz);
1027 
1028 	/* Destroy the counting semaphore created for Ioctl */
1029 	sema_destroy(&sc->ioctl_count_sema);
1030 
1031 	if (sc->reg_res) {
1032 		bus_release_resource(sc->mrsas_dev,
1033 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1034 	}
1035 	if (sc->sysctl_tree != NULL)
1036 		sysctl_ctx_free(&sc->sysctl_ctx);
1037 
1038 	return (0);
1039 }
1040 
1041 /*
1042  * mrsas_free_mem:		Frees allocated memory
1043  * input:				Adapter instance soft state
1044  *
1045  * This function is called from mrsas_detach() to free previously allocated
1046  * memory.
1047  */
1048 void
1049 mrsas_free_mem(struct mrsas_softc *sc)
1050 {
1051 	int i;
1052 	u_int32_t max_cmd;
1053 	struct mrsas_mfi_cmd *mfi_cmd;
1054 	struct mrsas_mpt_cmd *mpt_cmd;
1055 
1056 	/*
1057 	 * Free RAID map memory
1058 	 */
1059 	for (i = 0; i < 2; i++) {
1060 		if (sc->raidmap_phys_addr[i])
1061 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1062 		if (sc->raidmap_mem[i] != NULL)
1063 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1064 		if (sc->raidmap_tag[i] != NULL)
1065 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1066 
1067 		if (sc->ld_drv_map[i] != NULL)
1068 			free(sc->ld_drv_map[i], M_MRSAS);
1069 	}
1070 
1071 	/*
1072 	 * Free version buffer memroy
1073 	 */
1074 	if (sc->verbuf_phys_addr)
1075 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1076 	if (sc->verbuf_mem != NULL)
1077 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1078 	if (sc->verbuf_tag != NULL)
1079 		bus_dma_tag_destroy(sc->verbuf_tag);
1080 
1081 
1082 	/*
1083 	 * Free sense buffer memory
1084 	 */
1085 	if (sc->sense_phys_addr)
1086 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1087 	if (sc->sense_mem != NULL)
1088 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1089 	if (sc->sense_tag != NULL)
1090 		bus_dma_tag_destroy(sc->sense_tag);
1091 
1092 	/*
1093 	 * Free chain frame memory
1094 	 */
1095 	if (sc->chain_frame_phys_addr)
1096 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1097 	if (sc->chain_frame_mem != NULL)
1098 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1099 	if (sc->chain_frame_tag != NULL)
1100 		bus_dma_tag_destroy(sc->chain_frame_tag);
1101 
1102 	/*
1103 	 * Free IO Request memory
1104 	 */
1105 	if (sc->io_request_phys_addr)
1106 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1107 	if (sc->io_request_mem != NULL)
1108 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1109 	if (sc->io_request_tag != NULL)
1110 		bus_dma_tag_destroy(sc->io_request_tag);
1111 
1112 	/*
1113 	 * Free Reply Descriptor memory
1114 	 */
1115 	if (sc->reply_desc_phys_addr)
1116 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1117 	if (sc->reply_desc_mem != NULL)
1118 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1119 	if (sc->reply_desc_tag != NULL)
1120 		bus_dma_tag_destroy(sc->reply_desc_tag);
1121 
1122 	/*
1123 	 * Free event detail memory
1124 	 */
1125 	if (sc->evt_detail_phys_addr)
1126 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1127 	if (sc->evt_detail_mem != NULL)
1128 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1129 	if (sc->evt_detail_tag != NULL)
1130 		bus_dma_tag_destroy(sc->evt_detail_tag);
1131 
1132 	/*
1133 	 * Free MFI frames
1134 	 */
1135 	if (sc->mfi_cmd_list) {
1136 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1137 			mfi_cmd = sc->mfi_cmd_list[i];
1138 			mrsas_free_frame(sc, mfi_cmd);
1139 		}
1140 	}
1141 	if (sc->mficmd_frame_tag != NULL)
1142 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1143 
1144 	/*
1145 	 * Free MPT internal command list
1146 	 */
1147 	max_cmd = sc->max_fw_cmds;
1148 	if (sc->mpt_cmd_list) {
1149 		for (i = 0; i < max_cmd; i++) {
1150 			mpt_cmd = sc->mpt_cmd_list[i];
1151 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1152 			free(sc->mpt_cmd_list[i], M_MRSAS);
1153 		}
1154 		free(sc->mpt_cmd_list, M_MRSAS);
1155 		sc->mpt_cmd_list = NULL;
1156 	}
1157 	/*
1158 	 * Free MFI internal command list
1159 	 */
1160 
1161 	if (sc->mfi_cmd_list) {
1162 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1163 			free(sc->mfi_cmd_list[i], M_MRSAS);
1164 		}
1165 		free(sc->mfi_cmd_list, M_MRSAS);
1166 		sc->mfi_cmd_list = NULL;
1167 	}
1168 	/*
1169 	 * Free request descriptor memory
1170 	 */
1171 	free(sc->req_desc, M_MRSAS);
1172 	sc->req_desc = NULL;
1173 
1174 	/*
1175 	 * Destroy parent tag
1176 	 */
1177 	if (sc->mrsas_parent_tag != NULL)
1178 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1179 
1180 	/*
1181 	 * Free ctrl_info memory
1182 	 */
1183 	if (sc->ctrl_info != NULL)
1184 		free(sc->ctrl_info, M_MRSAS);
1185 }
1186 
1187 /*
1188  * mrsas_teardown_intr:	Teardown interrupt
1189  * input:				Adapter instance soft state
1190  *
1191  * This function is called from mrsas_detach() to teardown and release bus
1192  * interrupt resourse.
1193  */
1194 void
1195 mrsas_teardown_intr(struct mrsas_softc *sc)
1196 {
1197 	int i;
1198 
1199 	if (!sc->msix_enable) {
1200 		if (sc->intr_handle[0])
1201 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1202 		if (sc->mrsas_irq[0] != NULL)
1203 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1204 			    sc->irq_id[0], sc->mrsas_irq[0]);
1205 		sc->intr_handle[0] = NULL;
1206 	} else {
1207 		for (i = 0; i < sc->msix_vectors; i++) {
1208 			if (sc->intr_handle[i])
1209 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1210 				    sc->intr_handle[i]);
1211 
1212 			if (sc->mrsas_irq[i] != NULL)
1213 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1214 				    sc->irq_id[i], sc->mrsas_irq[i]);
1215 
1216 			sc->intr_handle[i] = NULL;
1217 		}
1218 		pci_release_msi(sc->mrsas_dev);
1219 	}
1220 
1221 }
1222 
1223 /*
1224  * mrsas_suspend:	Suspend entry point
1225  * input:			Device struct pointer
1226  *
1227  * This function is the entry point for system suspend from the OS.
1228  */
1229 static int
1230 mrsas_suspend(device_t dev)
1231 {
1232 	struct mrsas_softc *sc;
1233 
1234 	sc = device_get_softc(dev);
1235 	return (0);
1236 }
1237 
1238 /*
1239  * mrsas_resume:	Resume entry point
1240  * input:			Device struct pointer
1241  *
1242  * This function is the entry point for system resume from the OS.
1243  */
1244 static int
1245 mrsas_resume(device_t dev)
1246 {
1247 	struct mrsas_softc *sc;
1248 
1249 	sc = device_get_softc(dev);
1250 	return (0);
1251 }
1252 
1253 /**
1254  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1255  *
1256  * This function will return softc instance based on cmd type.
1257  * In some case, application fire ioctl on required management instance and
1258  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1259  * case, else get the softc instance from host_no provided by application in
1260  * user data.
1261  */
1262 
1263 static struct mrsas_softc *
1264 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1265 {
1266 	struct mrsas_softc *sc = NULL;
1267 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1268 
1269 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1270 		sc = dev->si_drv1;
1271 	} else {
1272 		/*
1273 		 * get the Host number & the softc from data sent by the
1274 		 * Application
1275 		 */
1276 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1277 		if ((user_ioc->host_no >= mrsas_mgmt_info.max_index) || (sc == NULL)) {
1278 			if (sc == NULL)
1279 				mrsas_dprint(sc, MRSAS_FAULT,
1280 				    "There is no Controller number %d .\n", user_ioc->host_no);
1281 			else
1282 				mrsas_dprint(sc, MRSAS_FAULT,
1283 				    "Invalid Controller number %d .\n", user_ioc->host_no);
1284 		}
1285 	}
1286 
1287 	return sc;
1288 }
1289 
1290 /*
1291  * mrsas_ioctl:	IOCtl commands entry point.
1292  *
1293  * This function is the entry point for IOCtls from the OS.  It calls the
1294  * appropriate function for processing depending on the command received.
1295  */
1296 static int
1297 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1298     struct thread *td)
1299 {
1300 	struct mrsas_softc *sc;
1301 	int ret = 0, i = 0;
1302 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1303 
1304 	sc = mrsas_get_softc_instance(dev, cmd, arg);
1305 	if (!sc)
1306 		return ENOENT;
1307 
1308 	if (sc->remove_in_progress) {
1309 		mrsas_dprint(sc, MRSAS_INFO,
1310 		    "Driver remove or shutdown called.\n");
1311 		return ENOENT;
1312 	}
1313 	mtx_lock_spin(&sc->ioctl_lock);
1314 	if (!sc->reset_in_progress) {
1315 		mtx_unlock_spin(&sc->ioctl_lock);
1316 		goto do_ioctl;
1317 	}
1318 	mtx_unlock_spin(&sc->ioctl_lock);
1319 	while (sc->reset_in_progress) {
1320 		i++;
1321 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1322 			mrsas_dprint(sc, MRSAS_INFO,
1323 			    "[%2d]waiting for "
1324 			    "OCR to be finished %d\n", i,
1325 			    sc->ocr_thread_active);
1326 		}
1327 		pause("mr_ioctl", hz);
1328 	}
1329 
1330 do_ioctl:
1331 	switch (cmd) {
1332 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1333 #ifdef COMPAT_FREEBSD32
1334 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1335 #endif
1336 		/*
1337 		 * Decrement the Ioctl counting Semaphore before getting an
1338 		 * mfi command
1339 		 */
1340 		sema_wait(&sc->ioctl_count_sema);
1341 
1342 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1343 
1344 		/* Increment the Ioctl counting semaphore value */
1345 		sema_post(&sc->ioctl_count_sema);
1346 
1347 		break;
1348 	case MRSAS_IOC_SCAN_BUS:
1349 		ret = mrsas_bus_scan(sc);
1350 		break;
1351 
1352 	case MRSAS_IOC_GET_PCI_INFO:
1353 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1354 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1355 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1356 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1357 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1358 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1359 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1360 		    "pci device no: %d, pci function no: %d,"
1361 		    "pci domain ID: %d\n",
1362 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1363 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1364 		ret = 0;
1365 		break;
1366 
1367 	default:
1368 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1369 		ret = ENOENT;
1370 	}
1371 
1372 	return (ret);
1373 }
1374 
1375 /*
1376  * mrsas_poll:	poll entry point for mrsas driver fd
1377  *
1378  * This function is the entry point for poll from the OS.  It waits for some AEN
1379  * events to be triggered from the controller and notifies back.
1380  */
1381 static int
1382 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1383 {
1384 	struct mrsas_softc *sc;
1385 	int revents = 0;
1386 
1387 	sc = dev->si_drv1;
1388 
1389 	if (poll_events & (POLLIN | POLLRDNORM)) {
1390 		if (sc->mrsas_aen_triggered) {
1391 			revents |= poll_events & (POLLIN | POLLRDNORM);
1392 		}
1393 	}
1394 	if (revents == 0) {
1395 		if (poll_events & (POLLIN | POLLRDNORM)) {
1396 			mtx_lock(&sc->aen_lock);
1397 			sc->mrsas_poll_waiting = 1;
1398 			selrecord(td, &sc->mrsas_select);
1399 			mtx_unlock(&sc->aen_lock);
1400 		}
1401 	}
1402 	return revents;
1403 }
1404 
1405 /*
1406  * mrsas_setup_irq:	Set up interrupt
1407  * input:			Adapter instance soft state
1408  *
1409  * This function sets up interrupts as a bus resource, with flags indicating
1410  * resource permitting contemporaneous sharing and for resource to activate
1411  * atomically.
1412  */
1413 static int
1414 mrsas_setup_irq(struct mrsas_softc *sc)
1415 {
1416 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1417 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1418 
1419 	else {
1420 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1421 		sc->irq_context[0].sc = sc;
1422 		sc->irq_context[0].MSIxIndex = 0;
1423 		sc->irq_id[0] = 0;
1424 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1425 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1426 		if (sc->mrsas_irq[0] == NULL) {
1427 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1428 			    "interrupt\n");
1429 			return (FAIL);
1430 		}
1431 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1432 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1433 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1434 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1435 			    "interrupt\n");
1436 			return (FAIL);
1437 		}
1438 	}
1439 	return (0);
1440 }
1441 
1442 /*
1443  * mrsas_isr:	ISR entry point
1444  * input:		argument pointer
1445  *
1446  * This function is the interrupt service routine entry point.  There are two
1447  * types of interrupts, state change interrupt and response interrupt.  If an
1448  * interrupt is not ours, we just return.
1449  */
1450 void
1451 mrsas_isr(void *arg)
1452 {
1453 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1454 	struct mrsas_softc *sc = irq_context->sc;
1455 	int status = 0;
1456 
1457 	if (sc->mask_interrupts)
1458 		return;
1459 
1460 	if (!sc->msix_vectors) {
1461 		status = mrsas_clear_intr(sc);
1462 		if (!status)
1463 			return;
1464 	}
1465 	/* If we are resetting, bail */
1466 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1467 		printf(" Entered into ISR when OCR is going active. \n");
1468 		mrsas_clear_intr(sc);
1469 		return;
1470 	}
1471 	/* Process for reply request and clear response interrupt */
1472 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1473 		mrsas_clear_intr(sc);
1474 
1475 	return;
1476 }
1477 
1478 /*
1479  * mrsas_complete_cmd:	Process reply request
1480  * input:				Adapter instance soft state
1481  *
1482  * This function is called from mrsas_isr() to process reply request and clear
1483  * response interrupt. Processing of the reply request entails walking
1484  * through the reply descriptor array for the command request  pended from
1485  * Firmware.  We look at the Function field to determine the command type and
1486  * perform the appropriate action.  Before we return, we clear the response
1487  * interrupt.
1488  */
1489 static int
1490 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1491 {
1492 	Mpi2ReplyDescriptorsUnion_t *desc;
1493 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1494 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1495 	struct mrsas_mpt_cmd *cmd_mpt;
1496 	struct mrsas_mfi_cmd *cmd_mfi;
1497 	u_int8_t reply_descript_type;
1498 	u_int16_t smid, num_completed;
1499 	u_int8_t status, extStatus;
1500 	union desc_value desc_val;
1501 	PLD_LOAD_BALANCE_INFO lbinfo;
1502 	u_int32_t device_id;
1503 	int threshold_reply_count = 0;
1504 
1505 
1506 	/* If we have a hardware error, not need to continue */
1507 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1508 		return (DONE);
1509 
1510 	desc = sc->reply_desc_mem;
1511 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1512 	    + sc->last_reply_idx[MSIxIndex];
1513 
1514 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1515 
1516 	desc_val.word = desc->Words;
1517 	num_completed = 0;
1518 
1519 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1520 
1521 	/* Find our reply descriptor for the command and process */
1522 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1523 		smid = reply_desc->SMID;
1524 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1525 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1526 
1527 		status = scsi_io_req->RaidContext.status;
1528 		extStatus = scsi_io_req->RaidContext.exStatus;
1529 
1530 		switch (scsi_io_req->Function) {
1531 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1532 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1533 			lbinfo = &sc->load_balance_info[device_id];
1534 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1535 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1536 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1537 			}
1538 			/* Fall thru and complete IO */
1539 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1540 			mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1541 			mrsas_cmd_done(sc, cmd_mpt);
1542 			scsi_io_req->RaidContext.status = 0;
1543 			scsi_io_req->RaidContext.exStatus = 0;
1544 			mrsas_atomic_dec(&sc->fw_outstanding);
1545 			break;
1546 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1547 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1548 			mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1549 			cmd_mpt->flags = 0;
1550 			mrsas_release_mpt_cmd(cmd_mpt);
1551 			break;
1552 		}
1553 
1554 		sc->last_reply_idx[MSIxIndex]++;
1555 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1556 			sc->last_reply_idx[MSIxIndex] = 0;
1557 
1558 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1559 							 * 0xFFFFFFFFs */
1560 		num_completed++;
1561 		threshold_reply_count++;
1562 
1563 		/* Get the next reply descriptor */
1564 		if (!sc->last_reply_idx[MSIxIndex]) {
1565 			desc = sc->reply_desc_mem;
1566 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1567 		} else
1568 			desc++;
1569 
1570 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1571 		desc_val.word = desc->Words;
1572 
1573 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1574 
1575 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1576 			break;
1577 
1578 		/*
1579 		 * Write to reply post index after completing threshold reply
1580 		 * count and still there are more replies in reply queue
1581 		 * pending to be completed.
1582 		 */
1583 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1584 			if (sc->msix_enable) {
1585 				if ((sc->device_id == MRSAS_INVADER) ||
1586 				    (sc->device_id == MRSAS_FURY))
1587 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1588 					    ((MSIxIndex & 0x7) << 24) |
1589 					    sc->last_reply_idx[MSIxIndex]);
1590 				else
1591 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1592 					    sc->last_reply_idx[MSIxIndex]);
1593 			} else
1594 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1595 				    reply_post_host_index), sc->last_reply_idx[0]);
1596 
1597 			threshold_reply_count = 0;
1598 		}
1599 	}
1600 
1601 	/* No match, just return */
1602 	if (num_completed == 0)
1603 		return (DONE);
1604 
1605 	/* Clear response interrupt */
1606 	if (sc->msix_enable) {
1607 		if ((sc->device_id == MRSAS_INVADER) ||
1608 		    (sc->device_id == MRSAS_FURY)) {
1609 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1610 			    ((MSIxIndex & 0x7) << 24) |
1611 			    sc->last_reply_idx[MSIxIndex]);
1612 		} else
1613 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1614 			    sc->last_reply_idx[MSIxIndex]);
1615 	} else
1616 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1617 		    reply_post_host_index), sc->last_reply_idx[0]);
1618 
1619 	return (0);
1620 }
1621 
1622 /*
1623  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1624  * input:						Adapter instance soft state
1625  *
1626  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1627  * It checks the command status and maps the appropriate CAM status for the
1628  * CCB.
1629  */
1630 void
1631 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1632 {
1633 	struct mrsas_softc *sc = cmd->sc;
1634 	u_int8_t *sense_data;
1635 
1636 	switch (status) {
1637 	case MFI_STAT_OK:
1638 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1639 		break;
1640 	case MFI_STAT_SCSI_IO_FAILED:
1641 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1642 		cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1643 		sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1644 		if (sense_data) {
1645 			/* For now just copy 18 bytes back */
1646 			memcpy(sense_data, cmd->sense, 18);
1647 			cmd->ccb_ptr->csio.sense_len = 18;
1648 			cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1649 		}
1650 		break;
1651 	case MFI_STAT_LD_OFFLINE:
1652 	case MFI_STAT_DEVICE_NOT_FOUND:
1653 		if (cmd->ccb_ptr->ccb_h.target_lun)
1654 			cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1655 		else
1656 			cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1657 		break;
1658 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1659 		cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1660 		break;
1661 	default:
1662 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1663 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1664 		cmd->ccb_ptr->csio.scsi_status = status;
1665 	}
1666 	return;
1667 }
1668 
1669 /*
1670  * mrsas_alloc_mem:	Allocate DMAable memory
1671  * input:			Adapter instance soft state
1672  *
1673  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1674  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1675  * Kernel virtual address. Callback argument is physical memory address.
1676  */
1677 static int
1678 mrsas_alloc_mem(struct mrsas_softc *sc)
1679 {
1680 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1681 	          chain_frame_size, evt_detail_size, count;
1682 
1683 	/*
1684 	 * Allocate parent DMA tag
1685 	 */
1686 	if (bus_dma_tag_create(NULL,	/* parent */
1687 	    1,				/* alignment */
1688 	    0,				/* boundary */
1689 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1690 	    BUS_SPACE_MAXADDR,		/* highaddr */
1691 	    NULL, NULL,			/* filter, filterarg */
1692 	    MRSAS_MAX_IO_SIZE,		/* maxsize */
1693 	    MRSAS_MAX_SGL,		/* nsegments */
1694 	    MRSAS_MAX_IO_SIZE,		/* maxsegsize */
1695 	    0,				/* flags */
1696 	    NULL, NULL,			/* lockfunc, lockarg */
1697 	    &sc->mrsas_parent_tag	/* tag */
1698 	    )) {
1699 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1700 		return (ENOMEM);
1701 	}
1702 	/*
1703 	 * Allocate for version buffer
1704 	 */
1705 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1706 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1707 	    1, 0,
1708 	    BUS_SPACE_MAXADDR_32BIT,
1709 	    BUS_SPACE_MAXADDR,
1710 	    NULL, NULL,
1711 	    verbuf_size,
1712 	    1,
1713 	    verbuf_size,
1714 	    BUS_DMA_ALLOCNOW,
1715 	    NULL, NULL,
1716 	    &sc->verbuf_tag)) {
1717 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1718 		return (ENOMEM);
1719 	}
1720 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1721 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1722 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1723 		return (ENOMEM);
1724 	}
1725 	bzero(sc->verbuf_mem, verbuf_size);
1726 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1727 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1728 	    BUS_DMA_NOWAIT)) {
1729 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1730 		return (ENOMEM);
1731 	}
1732 	/*
1733 	 * Allocate IO Request Frames
1734 	 */
1735 	io_req_size = sc->io_frames_alloc_sz;
1736 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1737 	    16, 0,
1738 	    BUS_SPACE_MAXADDR_32BIT,
1739 	    BUS_SPACE_MAXADDR,
1740 	    NULL, NULL,
1741 	    io_req_size,
1742 	    1,
1743 	    io_req_size,
1744 	    BUS_DMA_ALLOCNOW,
1745 	    NULL, NULL,
1746 	    &sc->io_request_tag)) {
1747 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1748 		return (ENOMEM);
1749 	}
1750 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1751 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1752 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1753 		return (ENOMEM);
1754 	}
1755 	bzero(sc->io_request_mem, io_req_size);
1756 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1757 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1758 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1759 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1760 		return (ENOMEM);
1761 	}
1762 	/*
1763 	 * Allocate Chain Frames
1764 	 */
1765 	chain_frame_size = sc->chain_frames_alloc_sz;
1766 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1767 	    4, 0,
1768 	    BUS_SPACE_MAXADDR_32BIT,
1769 	    BUS_SPACE_MAXADDR,
1770 	    NULL, NULL,
1771 	    chain_frame_size,
1772 	    1,
1773 	    chain_frame_size,
1774 	    BUS_DMA_ALLOCNOW,
1775 	    NULL, NULL,
1776 	    &sc->chain_frame_tag)) {
1777 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1778 		return (ENOMEM);
1779 	}
1780 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1781 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1782 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1783 		return (ENOMEM);
1784 	}
1785 	bzero(sc->chain_frame_mem, chain_frame_size);
1786 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1787 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1788 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1789 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1790 		return (ENOMEM);
1791 	}
1792 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1793 	/*
1794 	 * Allocate Reply Descriptor Array
1795 	 */
1796 	reply_desc_size = sc->reply_alloc_sz * count;
1797 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1798 	    16, 0,
1799 	    BUS_SPACE_MAXADDR_32BIT,
1800 	    BUS_SPACE_MAXADDR,
1801 	    NULL, NULL,
1802 	    reply_desc_size,
1803 	    1,
1804 	    reply_desc_size,
1805 	    BUS_DMA_ALLOCNOW,
1806 	    NULL, NULL,
1807 	    &sc->reply_desc_tag)) {
1808 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1809 		return (ENOMEM);
1810 	}
1811 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1812 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1813 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1814 		return (ENOMEM);
1815 	}
1816 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1817 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1818 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1819 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1820 		return (ENOMEM);
1821 	}
1822 	/*
1823 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
1824 	 */
1825 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1826 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1827 	    64, 0,
1828 	    BUS_SPACE_MAXADDR_32BIT,
1829 	    BUS_SPACE_MAXADDR,
1830 	    NULL, NULL,
1831 	    sense_size,
1832 	    1,
1833 	    sense_size,
1834 	    BUS_DMA_ALLOCNOW,
1835 	    NULL, NULL,
1836 	    &sc->sense_tag)) {
1837 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1838 		return (ENOMEM);
1839 	}
1840 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1841 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1842 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1843 		return (ENOMEM);
1844 	}
1845 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1846 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1847 	    BUS_DMA_NOWAIT)) {
1848 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1849 		return (ENOMEM);
1850 	}
1851 	/*
1852 	 * Allocate for Event detail structure
1853 	 */
1854 	evt_detail_size = sizeof(struct mrsas_evt_detail);
1855 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1856 	    1, 0,
1857 	    BUS_SPACE_MAXADDR_32BIT,
1858 	    BUS_SPACE_MAXADDR,
1859 	    NULL, NULL,
1860 	    evt_detail_size,
1861 	    1,
1862 	    evt_detail_size,
1863 	    BUS_DMA_ALLOCNOW,
1864 	    NULL, NULL,
1865 	    &sc->evt_detail_tag)) {
1866 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1867 		return (ENOMEM);
1868 	}
1869 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1870 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1871 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1872 		return (ENOMEM);
1873 	}
1874 	bzero(sc->evt_detail_mem, evt_detail_size);
1875 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1876 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1877 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1878 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1879 		return (ENOMEM);
1880 	}
1881 	/*
1882 	 * Create a dma tag for data buffers; size will be the maximum
1883 	 * possible I/O size (280kB).
1884 	 */
1885 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1886 	    1,
1887 	    0,
1888 	    BUS_SPACE_MAXADDR,
1889 	    BUS_SPACE_MAXADDR,
1890 	    NULL, NULL,
1891 	    MRSAS_MAX_IO_SIZE,
1892 	    MRSAS_MAX_SGL,
1893 	    MRSAS_MAX_IO_SIZE,
1894 	    BUS_DMA_ALLOCNOW,
1895 	    busdma_lock_mutex,
1896 	    &sc->io_lock,
1897 	    &sc->data_tag)) {
1898 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1899 		return (ENOMEM);
1900 	}
1901 	return (0);
1902 }
1903 
1904 /*
1905  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
1906  * input:			callback argument, machine dependent type
1907  * 					that describes DMA segments, number of segments, error code
1908  *
1909  * This function is for the driver to receive mapping information resultant of
1910  * the bus_dmamap_load(). The information is actually not being used, but the
1911  * address is saved anyway.
1912  */
1913 void
1914 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1915 {
1916 	bus_addr_t *addr;
1917 
1918 	addr = arg;
1919 	*addr = segs[0].ds_addr;
1920 }
1921 
1922 /*
1923  * mrsas_setup_raidmap:	Set up RAID map.
1924  * input:				Adapter instance soft state
1925  *
1926  * Allocate DMA memory for the RAID maps and perform setup.
1927  */
1928 static int
1929 mrsas_setup_raidmap(struct mrsas_softc *sc)
1930 {
1931 	int i;
1932 
1933 	for (i = 0; i < 2; i++) {
1934 		sc->ld_drv_map[i] =
1935 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1936 		/* Do Error handling */
1937 		if (!sc->ld_drv_map[i]) {
1938 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1939 
1940 			if (i == 1)
1941 				free(sc->ld_drv_map[0], M_MRSAS);
1942 			/* ABORT driver initialization */
1943 			goto ABORT;
1944 		}
1945 	}
1946 
1947 	for (int i = 0; i < 2; i++) {
1948 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
1949 		    4, 0,
1950 		    BUS_SPACE_MAXADDR_32BIT,
1951 		    BUS_SPACE_MAXADDR,
1952 		    NULL, NULL,
1953 		    sc->max_map_sz,
1954 		    1,
1955 		    sc->max_map_sz,
1956 		    BUS_DMA_ALLOCNOW,
1957 		    NULL, NULL,
1958 		    &sc->raidmap_tag[i])) {
1959 			device_printf(sc->mrsas_dev,
1960 			    "Cannot allocate raid map tag.\n");
1961 			return (ENOMEM);
1962 		}
1963 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
1964 		    (void **)&sc->raidmap_mem[i],
1965 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1966 			device_printf(sc->mrsas_dev,
1967 			    "Cannot allocate raidmap memory.\n");
1968 			return (ENOMEM);
1969 		}
1970 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
1971 
1972 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1973 		    sc->raidmap_mem[i], sc->max_map_sz,
1974 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1975 		    BUS_DMA_NOWAIT)) {
1976 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1977 			return (ENOMEM);
1978 		}
1979 		if (!sc->raidmap_mem[i]) {
1980 			device_printf(sc->mrsas_dev,
1981 			    "Cannot allocate memory for raid map.\n");
1982 			return (ENOMEM);
1983 		}
1984 	}
1985 
1986 	if (!mrsas_get_map_info(sc))
1987 		mrsas_sync_map_info(sc);
1988 
1989 	return (0);
1990 
1991 ABORT:
1992 	return (1);
1993 }
1994 
1995 /*
1996  * mrsas_init_fw:	Initialize Firmware
1997  * input:			Adapter soft state
1998  *
1999  * Calls transition_to_ready() to make sure Firmware is in operational state and
2000  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2001  * issues internal commands to get the controller info after the IOC_INIT
2002  * command response is received by Firmware.  Note:  code relating to
2003  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2004  * is left here as placeholder.
2005  */
2006 static int
2007 mrsas_init_fw(struct mrsas_softc *sc)
2008 {
2009 
2010 	int ret, loop, ocr = 0;
2011 	u_int32_t max_sectors_1;
2012 	u_int32_t max_sectors_2;
2013 	u_int32_t tmp_sectors;
2014 	u_int32_t scratch_pad_2;
2015 	int msix_enable = 0;
2016 	int fw_msix_count = 0;
2017 
2018 	/* Make sure Firmware is ready */
2019 	ret = mrsas_transition_to_ready(sc, ocr);
2020 	if (ret != SUCCESS) {
2021 		return (ret);
2022 	}
2023 	/* MSI-x index 0- reply post host index register */
2024 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2025 	/* Check if MSI-X is supported while in ready state */
2026 	msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2027 
2028 	if (msix_enable) {
2029 		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2030 		    outbound_scratch_pad_2));
2031 
2032 		/* Check max MSI-X vectors */
2033 		if (sc->device_id == MRSAS_TBOLT) {
2034 			sc->msix_vectors = (scratch_pad_2
2035 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2036 			fw_msix_count = sc->msix_vectors;
2037 		} else {
2038 			/* Invader/Fury supports 96 MSI-X vectors */
2039 			sc->msix_vectors = ((scratch_pad_2
2040 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2041 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2042 			fw_msix_count = sc->msix_vectors;
2043 
2044 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2045 			    loop++) {
2046 				sc->msix_reg_offset[loop] =
2047 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2048 				    (loop * 0x10);
2049 			}
2050 		}
2051 
2052 		/* Don't bother allocating more MSI-X vectors than cpus */
2053 		sc->msix_vectors = min(sc->msix_vectors,
2054 		    mp_ncpus);
2055 
2056 		/* Allocate MSI-x vectors */
2057 		if (mrsas_allocate_msix(sc) == SUCCESS)
2058 			sc->msix_enable = 1;
2059 		else
2060 			sc->msix_enable = 0;
2061 
2062 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2063 		    "Online CPU %d Current MSIX <%d>\n",
2064 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2065 	}
2066 	if (mrsas_init_adapter(sc) != SUCCESS) {
2067 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2068 		return (1);
2069 	}
2070 	/* Allocate internal commands for pass-thru */
2071 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2072 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2073 		return (1);
2074 	}
2075 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2076 	if (!sc->ctrl_info) {
2077 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2078 		return (1);
2079 	}
2080 	/*
2081 	 * Get the controller info from FW, so that the MAX VD support
2082 	 * availability can be decided.
2083 	 */
2084 	if (mrsas_get_ctrl_info(sc)) {
2085 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2086 		return (1);
2087 	}
2088 	sc->secure_jbod_support =
2089 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2090 
2091 	if (sc->secure_jbod_support)
2092 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2093 
2094 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2095 		device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
2096 		return (1);
2097 	}
2098 	/* For pass-thru, get PD/LD list and controller info */
2099 	memset(sc->pd_list, 0,
2100 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2101 	mrsas_get_pd_list(sc);
2102 
2103 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2104 	mrsas_get_ld_list(sc);
2105 
2106 	/*
2107 	 * Compute the max allowed sectors per IO: The controller info has
2108 	 * two limits on max sectors. Driver should use the minimum of these
2109 	 * two.
2110 	 *
2111 	 * 1 << stripe_sz_ops.min = max sectors per strip
2112 	 *
2113 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2114 	 * calculate max_sectors_1. So the number ended up as zero always.
2115 	 */
2116 	tmp_sectors = 0;
2117 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2118 	    sc->ctrl_info->max_strips_per_io;
2119 	max_sectors_2 = sc->ctrl_info->max_request_size;
2120 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2121 	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2122 
2123 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2124 		sc->max_sectors_per_req = tmp_sectors;
2125 
2126 	sc->disableOnlineCtrlReset =
2127 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2128 	sc->UnevenSpanSupport =
2129 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2130 	if (sc->UnevenSpanSupport) {
2131 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2132 		    sc->UnevenSpanSupport);
2133 
2134 		if (MR_ValidateMapInfo(sc))
2135 			sc->fast_path_io = 1;
2136 		else
2137 			sc->fast_path_io = 0;
2138 	}
2139 	return (0);
2140 }
2141 
2142 /*
2143  * mrsas_init_adapter:	Initializes the adapter/controller
2144  * input:				Adapter soft state
2145  *
2146  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2147  * ROC/controller.  The FW register is read to determined the number of
2148  * commands that is supported.  All memory allocations for IO is based on
2149  * max_cmd.  Appropriate calculations are performed in this function.
2150  */
2151 int
2152 mrsas_init_adapter(struct mrsas_softc *sc)
2153 {
2154 	uint32_t status;
2155 	u_int32_t max_cmd;
2156 	int ret;
2157 	int i = 0;
2158 
2159 	/* Read FW status register */
2160 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2161 
2162 	/* Get operational params from status register */
2163 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2164 
2165 	/* Decrement the max supported by 1, to correlate with FW */
2166 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2167 	max_cmd = sc->max_fw_cmds;
2168 
2169 	/* Determine allocation size of command frames */
2170 	sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2171 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2172 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2173 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2174 	sc->chain_frames_alloc_sz = 1024 * max_cmd;
2175 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2176 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2177 
2178 	sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
2179 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2180 
2181 	/* Used for pass thru MFI frame (DCMD) */
2182 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2183 
2184 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2185 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2186 
2187 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2188 
2189 	for (i = 0; i < count; i++)
2190 		sc->last_reply_idx[i] = 0;
2191 
2192 	ret = mrsas_alloc_mem(sc);
2193 	if (ret != SUCCESS)
2194 		return (ret);
2195 
2196 	ret = mrsas_alloc_mpt_cmds(sc);
2197 	if (ret != SUCCESS)
2198 		return (ret);
2199 
2200 	ret = mrsas_ioc_init(sc);
2201 	if (ret != SUCCESS)
2202 		return (ret);
2203 
2204 	return (0);
2205 }
2206 
2207 /*
2208  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2209  * input:				Adapter soft state
2210  *
2211  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2212  */
2213 int
2214 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2215 {
2216 	int ioc_init_size;
2217 
2218 	/* Allocate IOC INIT command */
2219 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2220 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2221 	    1, 0,
2222 	    BUS_SPACE_MAXADDR_32BIT,
2223 	    BUS_SPACE_MAXADDR,
2224 	    NULL, NULL,
2225 	    ioc_init_size,
2226 	    1,
2227 	    ioc_init_size,
2228 	    BUS_DMA_ALLOCNOW,
2229 	    NULL, NULL,
2230 	    &sc->ioc_init_tag)) {
2231 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2232 		return (ENOMEM);
2233 	}
2234 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2235 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2236 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2237 		return (ENOMEM);
2238 	}
2239 	bzero(sc->ioc_init_mem, ioc_init_size);
2240 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2241 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2242 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2243 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2244 		return (ENOMEM);
2245 	}
2246 	return (0);
2247 }
2248 
2249 /*
2250  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2251  * input:				Adapter soft state
2252  *
2253  * Deallocates memory of the IOC Init cmd.
2254  */
2255 void
2256 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2257 {
2258 	if (sc->ioc_init_phys_mem)
2259 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2260 	if (sc->ioc_init_mem != NULL)
2261 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2262 	if (sc->ioc_init_tag != NULL)
2263 		bus_dma_tag_destroy(sc->ioc_init_tag);
2264 }
2265 
2266 /*
2267  * mrsas_ioc_init:	Sends IOC Init command to FW
2268  * input:			Adapter soft state
2269  *
2270  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2271  */
2272 int
2273 mrsas_ioc_init(struct mrsas_softc *sc)
2274 {
2275 	struct mrsas_init_frame *init_frame;
2276 	pMpi2IOCInitRequest_t IOCInitMsg;
2277 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2278 	u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2279 	bus_addr_t phys_addr;
2280 	int i, retcode = 0;
2281 
2282 	/* Allocate memory for the IOC INIT command */
2283 	if (mrsas_alloc_ioc_cmd(sc)) {
2284 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2285 		return (1);
2286 	}
2287 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2288 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2289 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2290 	IOCInitMsg->MsgVersion = MPI2_VERSION;
2291 	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2292 	IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2293 	IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2294 	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2295 	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2296 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2297 
2298 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2299 	init_frame->cmd = MFI_CMD_INIT;
2300 	init_frame->cmd_status = 0xFF;
2301 	init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2302 
2303 	/* driver support Extended MSIX */
2304 	if ((sc->device_id == MRSAS_INVADER) ||
2305 	    (sc->device_id == MRSAS_FURY)) {
2306 		init_frame->driver_operations.
2307 		    mfi_capabilities.support_additional_msix = 1;
2308 	}
2309 	if (sc->verbuf_mem) {
2310 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2311 		    MRSAS_VERSION);
2312 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2313 		init_frame->driver_ver_hi = 0;
2314 	}
2315 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2316 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2317 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2318 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2319 	init_frame->queue_info_new_phys_addr_lo = phys_addr;
2320 	init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2321 
2322 	req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2323 	req_desc.MFAIo.RequestFlags =
2324 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2325 
2326 	mrsas_disable_intr(sc);
2327 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2328 	mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2329 
2330 	/*
2331 	 * Poll response timer to wait for Firmware response.  While this
2332 	 * timer with the DELAY call could block CPU, the time interval for
2333 	 * this is only 1 millisecond.
2334 	 */
2335 	if (init_frame->cmd_status == 0xFF) {
2336 		for (i = 0; i < (max_wait * 1000); i++) {
2337 			if (init_frame->cmd_status == 0xFF)
2338 				DELAY(1000);
2339 			else
2340 				break;
2341 		}
2342 	}
2343 	if (init_frame->cmd_status == 0)
2344 		mrsas_dprint(sc, MRSAS_OCR,
2345 		    "IOC INIT response received from FW.\n");
2346 	else {
2347 		if (init_frame->cmd_status == 0xFF)
2348 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2349 		else
2350 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2351 		retcode = 1;
2352 	}
2353 
2354 	mrsas_free_ioc_cmd(sc);
2355 	return (retcode);
2356 }
2357 
2358 /*
2359  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2360  * input:					Adapter instance soft state
2361  *
2362  * This function allocates the internal commands for IOs. Each command that is
2363  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2364  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2365  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2366  * max_fw_cmds.
2367  */
2368 int
2369 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2370 {
2371 	int i, j;
2372 	u_int32_t max_cmd, count;
2373 	struct mrsas_mpt_cmd *cmd;
2374 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2375 	u_int32_t offset, chain_offset, sense_offset;
2376 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2377 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2378 
2379 	max_cmd = sc->max_fw_cmds;
2380 
2381 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2382 	if (!sc->req_desc) {
2383 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2384 		return (ENOMEM);
2385 	}
2386 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2387 
2388 	/*
2389 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2390 	 * Allocate the dynamic array first and then allocate individual
2391 	 * commands.
2392 	 */
2393 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2394 	if (!sc->mpt_cmd_list) {
2395 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2396 		return (ENOMEM);
2397 	}
2398 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2399 	for (i = 0; i < max_cmd; i++) {
2400 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2401 		    M_MRSAS, M_NOWAIT);
2402 		if (!sc->mpt_cmd_list[i]) {
2403 			for (j = 0; j < i; j++)
2404 				free(sc->mpt_cmd_list[j], M_MRSAS);
2405 			free(sc->mpt_cmd_list, M_MRSAS);
2406 			sc->mpt_cmd_list = NULL;
2407 			return (ENOMEM);
2408 		}
2409 	}
2410 
2411 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2412 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2413 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2414 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2415 	sense_base = (u_int8_t *)sc->sense_mem;
2416 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2417 	for (i = 0; i < max_cmd; i++) {
2418 		cmd = sc->mpt_cmd_list[i];
2419 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2420 		chain_offset = 1024 * i;
2421 		sense_offset = MRSAS_SENSE_LEN * i;
2422 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2423 		cmd->index = i + 1;
2424 		cmd->ccb_ptr = NULL;
2425 		callout_init(&cmd->cm_callout, 0);
2426 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2427 		cmd->sc = sc;
2428 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2429 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2430 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2431 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2432 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2433 		cmd->sense = sense_base + sense_offset;
2434 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2435 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2436 			return (FAIL);
2437 		}
2438 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2439 	}
2440 
2441 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2442 	reply_desc = sc->reply_desc_mem;
2443 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2444 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2445 		reply_desc->Words = MRSAS_ULONG_MAX;
2446 	}
2447 	return (0);
2448 }
2449 
2450 /*
2451  * mrsas_fire_cmd:	Sends command to FW
2452  * input:			Adapter softstate
2453  * 					request descriptor address low
2454  * 					request descriptor address high
2455  *
2456  * This functions fires the command to Firmware by writing to the
2457  * inbound_low_queue_port and inbound_high_queue_port.
2458  */
2459 void
2460 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2461     u_int32_t req_desc_hi)
2462 {
2463 	mtx_lock(&sc->pci_lock);
2464 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2465 	    req_desc_lo);
2466 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2467 	    req_desc_hi);
2468 	mtx_unlock(&sc->pci_lock);
2469 }
2470 
2471 /*
2472  * mrsas_transition_to_ready:  Move FW to Ready state input:
2473  * Adapter instance soft state
2474  *
2475  * During the initialization, FW passes can potentially be in any one of several
2476  * possible states. If the FW in operational, waiting-for-handshake states,
2477  * driver must take steps to bring it to ready state. Otherwise, it has to
2478  * wait for the ready state.
2479  */
2480 int
2481 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2482 {
2483 	int i;
2484 	u_int8_t max_wait;
2485 	u_int32_t val, fw_state;
2486 	u_int32_t cur_state;
2487 	u_int32_t abs_state, curr_abs_state;
2488 
2489 	val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2490 	fw_state = val & MFI_STATE_MASK;
2491 	max_wait = MRSAS_RESET_WAIT_TIME;
2492 
2493 	if (fw_state != MFI_STATE_READY)
2494 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2495 
2496 	while (fw_state != MFI_STATE_READY) {
2497 		abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2498 		switch (fw_state) {
2499 		case MFI_STATE_FAULT:
2500 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2501 			if (ocr) {
2502 				cur_state = MFI_STATE_FAULT;
2503 				break;
2504 			} else
2505 				return -ENODEV;
2506 		case MFI_STATE_WAIT_HANDSHAKE:
2507 			/* Set the CLR bit in inbound doorbell */
2508 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2509 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2510 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2511 			break;
2512 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2513 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2514 			    MFI_INIT_HOTPLUG);
2515 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2516 			break;
2517 		case MFI_STATE_OPERATIONAL:
2518 			/*
2519 			 * Bring it to READY state; assuming max wait 10
2520 			 * secs
2521 			 */
2522 			mrsas_disable_intr(sc);
2523 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2524 			for (i = 0; i < max_wait * 1000; i++) {
2525 				if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2526 					DELAY(1000);
2527 				else
2528 					break;
2529 			}
2530 			cur_state = MFI_STATE_OPERATIONAL;
2531 			break;
2532 		case MFI_STATE_UNDEFINED:
2533 			/*
2534 			 * This state should not last for more than 2
2535 			 * seconds
2536 			 */
2537 			cur_state = MFI_STATE_UNDEFINED;
2538 			break;
2539 		case MFI_STATE_BB_INIT:
2540 			cur_state = MFI_STATE_BB_INIT;
2541 			break;
2542 		case MFI_STATE_FW_INIT:
2543 			cur_state = MFI_STATE_FW_INIT;
2544 			break;
2545 		case MFI_STATE_FW_INIT_2:
2546 			cur_state = MFI_STATE_FW_INIT_2;
2547 			break;
2548 		case MFI_STATE_DEVICE_SCAN:
2549 			cur_state = MFI_STATE_DEVICE_SCAN;
2550 			break;
2551 		case MFI_STATE_FLUSH_CACHE:
2552 			cur_state = MFI_STATE_FLUSH_CACHE;
2553 			break;
2554 		default:
2555 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2556 			return -ENODEV;
2557 		}
2558 
2559 		/*
2560 		 * The cur_state should not last for more than max_wait secs
2561 		 */
2562 		for (i = 0; i < (max_wait * 1000); i++) {
2563 			fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2564 			    outbound_scratch_pad)) & MFI_STATE_MASK);
2565 			curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2566 			    outbound_scratch_pad));
2567 			if (abs_state == curr_abs_state)
2568 				DELAY(1000);
2569 			else
2570 				break;
2571 		}
2572 
2573 		/*
2574 		 * Return error if fw_state hasn't changed after max_wait
2575 		 */
2576 		if (curr_abs_state == abs_state) {
2577 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2578 			    "in %d secs\n", fw_state, max_wait);
2579 			return -ENODEV;
2580 		}
2581 	}
2582 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2583 	return 0;
2584 }
2585 
2586 /*
2587  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
2588  * input:				Adapter soft state
2589  *
2590  * This function removes an MFI command from the command list.
2591  */
2592 struct mrsas_mfi_cmd *
2593 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2594 {
2595 	struct mrsas_mfi_cmd *cmd = NULL;
2596 
2597 	mtx_lock(&sc->mfi_cmd_pool_lock);
2598 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2599 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2600 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2601 	}
2602 	mtx_unlock(&sc->mfi_cmd_pool_lock);
2603 
2604 	return cmd;
2605 }
2606 
2607 /*
2608  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
2609  * input:				Adapter Context.
2610  *
2611  * This function will check FW status register and flag do_timeout_reset flag.
2612  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2613  * trigger reset.
2614  */
2615 static void
2616 mrsas_ocr_thread(void *arg)
2617 {
2618 	struct mrsas_softc *sc;
2619 	u_int32_t fw_status, fw_state;
2620 
2621 	sc = (struct mrsas_softc *)arg;
2622 
2623 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2624 
2625 	sc->ocr_thread_active = 1;
2626 	mtx_lock(&sc->sim_lock);
2627 	for (;;) {
2628 		/* Sleep for 1 second and check the queue status */
2629 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2630 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2631 		if (sc->remove_in_progress) {
2632 			mrsas_dprint(sc, MRSAS_OCR,
2633 			    "Exit due to shutdown from %s\n", __func__);
2634 			break;
2635 		}
2636 		fw_status = mrsas_read_reg(sc,
2637 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
2638 		fw_state = fw_status & MFI_STATE_MASK;
2639 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2640 			device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2641 			    sc->do_timedout_reset ? "IO Timeout" :
2642 			    "FW fault detected");
2643 			mtx_lock_spin(&sc->ioctl_lock);
2644 			sc->reset_in_progress = 1;
2645 			sc->reset_count++;
2646 			mtx_unlock_spin(&sc->ioctl_lock);
2647 			mrsas_xpt_freeze(sc);
2648 			mrsas_reset_ctrl(sc);
2649 			mrsas_xpt_release(sc);
2650 			sc->reset_in_progress = 0;
2651 			sc->do_timedout_reset = 0;
2652 		}
2653 	}
2654 	mtx_unlock(&sc->sim_lock);
2655 	sc->ocr_thread_active = 0;
2656 	mrsas_kproc_exit(0);
2657 }
2658 
2659 /*
2660  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
2661  * input:					Adapter Context.
2662  *
2663  * This function will clear reply descriptor so that post OCR driver and FW will
2664  * lost old history.
2665  */
2666 void
2667 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2668 {
2669 	int i, count;
2670 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2671 
2672 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2673 	for (i = 0; i < count; i++)
2674 		sc->last_reply_idx[i] = 0;
2675 
2676 	reply_desc = sc->reply_desc_mem;
2677 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2678 		reply_desc->Words = MRSAS_ULONG_MAX;
2679 	}
2680 }
2681 
2682 /*
2683  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
2684  * input:				Adapter Context.
2685  *
2686  * This function will run from thread context so that it can sleep. 1. Do not
2687  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2688  * to complete for 180 seconds. 3. If #2 does not find any outstanding
2689  * command Controller is in working state, so skip OCR. Otherwise, do
2690  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2691  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2692  * OCR, Re-fire Managment command and move Controller to Operation state.
2693  */
2694 int
2695 mrsas_reset_ctrl(struct mrsas_softc *sc)
2696 {
2697 	int retval = SUCCESS, i, j, retry = 0;
2698 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2699 	union ccb *ccb;
2700 	struct mrsas_mfi_cmd *mfi_cmd;
2701 	struct mrsas_mpt_cmd *mpt_cmd;
2702 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2703 
2704 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2705 		device_printf(sc->mrsas_dev,
2706 		    "mrsas: Hardware critical error, returning FAIL.\n");
2707 		return FAIL;
2708 	}
2709 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2710 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2711 	mrsas_disable_intr(sc);
2712 	DELAY(1000 * 1000);
2713 
2714 	/* First try waiting for commands to complete */
2715 	if (mrsas_wait_for_outstanding(sc)) {
2716 		mrsas_dprint(sc, MRSAS_OCR,
2717 		    "resetting adapter from %s.\n",
2718 		    __func__);
2719 		/* Now return commands back to the CAM layer */
2720 		mtx_unlock(&sc->sim_lock);
2721 		for (i = 0; i < sc->max_fw_cmds; i++) {
2722 			mpt_cmd = sc->mpt_cmd_list[i];
2723 			if (mpt_cmd->ccb_ptr) {
2724 				ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2725 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2726 				mrsas_cmd_done(sc, mpt_cmd);
2727 				mrsas_atomic_dec(&sc->fw_outstanding);
2728 			}
2729 		}
2730 		mtx_lock(&sc->sim_lock);
2731 
2732 		status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2733 		    outbound_scratch_pad));
2734 		abs_state = status_reg & MFI_STATE_MASK;
2735 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
2736 		if (sc->disableOnlineCtrlReset ||
2737 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2738 			/* Reset not supported, kill adapter */
2739 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2740 			mrsas_kill_hba(sc);
2741 			retval = FAIL;
2742 			goto out;
2743 		}
2744 		/* Now try to reset the chip */
2745 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2746 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2747 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
2748 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2749 			    MPI2_WRSEQ_1ST_KEY_VALUE);
2750 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2751 			    MPI2_WRSEQ_2ND_KEY_VALUE);
2752 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2753 			    MPI2_WRSEQ_3RD_KEY_VALUE);
2754 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2755 			    MPI2_WRSEQ_4TH_KEY_VALUE);
2756 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2757 			    MPI2_WRSEQ_5TH_KEY_VALUE);
2758 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2759 			    MPI2_WRSEQ_6TH_KEY_VALUE);
2760 
2761 			/* Check that the diag write enable (DRWE) bit is on */
2762 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2763 			    fusion_host_diag));
2764 			retry = 0;
2765 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2766 				DELAY(100 * 1000);
2767 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2768 				    fusion_host_diag));
2769 				if (retry++ == 100) {
2770 					mrsas_dprint(sc, MRSAS_OCR,
2771 					    "Host diag unlock failed!\n");
2772 					break;
2773 				}
2774 			}
2775 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2776 				continue;
2777 
2778 			/* Send chip reset command */
2779 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2780 			    host_diag | HOST_DIAG_RESET_ADAPTER);
2781 			DELAY(3000 * 1000);
2782 
2783 			/* Make sure reset adapter bit is cleared */
2784 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2785 			    fusion_host_diag));
2786 			retry = 0;
2787 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2788 				DELAY(100 * 1000);
2789 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2790 				    fusion_host_diag));
2791 				if (retry++ == 1000) {
2792 					mrsas_dprint(sc, MRSAS_OCR,
2793 					    "Diag reset adapter never cleared!\n");
2794 					break;
2795 				}
2796 			}
2797 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
2798 				continue;
2799 
2800 			abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2801 			    outbound_scratch_pad)) & MFI_STATE_MASK;
2802 			retry = 0;
2803 
2804 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2805 				DELAY(100 * 1000);
2806 				abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2807 				    outbound_scratch_pad)) & MFI_STATE_MASK;
2808 			}
2809 			if (abs_state <= MFI_STATE_FW_INIT) {
2810 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2811 				    " state = 0x%x\n", abs_state);
2812 				continue;
2813 			}
2814 			/* Wait for FW to become ready */
2815 			if (mrsas_transition_to_ready(sc, 1)) {
2816 				mrsas_dprint(sc, MRSAS_OCR,
2817 				    "mrsas: Failed to transition controller to ready.\n");
2818 				continue;
2819 			}
2820 			mrsas_reset_reply_desc(sc);
2821 			if (mrsas_ioc_init(sc)) {
2822 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2823 				continue;
2824 			}
2825 			/* Re-fire management commands */
2826 			for (j = 0; j < sc->max_fw_cmds; j++) {
2827 				mpt_cmd = sc->mpt_cmd_list[j];
2828 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2829 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2830 					if (mfi_cmd->frame->dcmd.opcode ==
2831 					    MR_DCMD_LD_MAP_GET_INFO) {
2832 						mrsas_release_mfi_cmd(mfi_cmd);
2833 						mrsas_release_mpt_cmd(mpt_cmd);
2834 					} else {
2835 						req_desc = mrsas_get_request_desc(sc,
2836 						    mfi_cmd->cmd_id.context.smid - 1);
2837 						mrsas_dprint(sc, MRSAS_OCR,
2838 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
2839 						    mfi_cmd->frame->dcmd.opcode, j);
2840 						if (!req_desc)
2841 							device_printf(sc->mrsas_dev,
2842 							    "Cannot build MPT cmd.\n");
2843 						else
2844 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
2845 							    req_desc->addr.u.high);
2846 					}
2847 				}
2848 			}
2849 
2850 			/* Reset load balance info */
2851 			memset(sc->load_balance_info, 0,
2852 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
2853 
2854 			if (mrsas_get_ctrl_info(sc)) {
2855 				mrsas_kill_hba(sc);
2856 				retval = FAIL;
2857 				goto out;
2858 			}
2859 			if (!mrsas_get_map_info(sc))
2860 				mrsas_sync_map_info(sc);
2861 
2862 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2863 			mrsas_enable_intr(sc);
2864 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2865 
2866 			/* Adapter reset completed successfully */
2867 			device_printf(sc->mrsas_dev, "Reset successful\n");
2868 			retval = SUCCESS;
2869 			goto out;
2870 		}
2871 		/* Reset failed, kill the adapter */
2872 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2873 		mrsas_kill_hba(sc);
2874 		retval = FAIL;
2875 	} else {
2876 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2877 		mrsas_enable_intr(sc);
2878 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2879 	}
2880 out:
2881 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2882 	mrsas_dprint(sc, MRSAS_OCR,
2883 	    "Reset Exit with %d.\n", retval);
2884 	return retval;
2885 }
2886 
2887 /*
2888  * mrsas_kill_hba:	Kill HBA when OCR is not supported
2889  * input:			Adapter Context.
2890  *
2891  * This function will kill HBA when OCR is not supported.
2892  */
2893 void
2894 mrsas_kill_hba(struct mrsas_softc *sc)
2895 {
2896 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2897 	pause("mrsas_kill_hba", 1000);
2898 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2899 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2900 	    MFI_STOP_ADP);
2901 	/* Flush */
2902 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2903 	mrsas_complete_outstanding_ioctls(sc);
2904 }
2905 
2906 /**
2907  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
2908  * input:			Controller softc
2909  *
2910  * Returns void
2911  */
2912 void
2913 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
2914 {
2915 	int i;
2916 	struct mrsas_mpt_cmd *cmd_mpt;
2917 	struct mrsas_mfi_cmd *cmd_mfi;
2918 	u_int32_t count, MSIxIndex;
2919 
2920 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2921 	for (i = 0; i < sc->max_fw_cmds; i++) {
2922 		cmd_mpt = sc->mpt_cmd_list[i];
2923 
2924 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2925 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
2926 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
2927 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2928 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
2929 					    cmd_mpt->io_request->RaidContext.status);
2930 			}
2931 		}
2932 	}
2933 }
2934 
2935 /*
2936  * mrsas_wait_for_outstanding:	Wait for outstanding commands
2937  * input:						Adapter Context.
2938  *
2939  * This function will wait for 180 seconds for outstanding commands to be
2940  * completed.
2941  */
2942 int
2943 mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2944 {
2945 	int i, outstanding, retval = 0;
2946 	u_int32_t fw_state, count, MSIxIndex;
2947 
2948 
2949 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2950 		if (sc->remove_in_progress) {
2951 			mrsas_dprint(sc, MRSAS_OCR,
2952 			    "Driver remove or shutdown called.\n");
2953 			retval = 1;
2954 			goto out;
2955 		}
2956 		/* Check if firmware is in fault state */
2957 		fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2958 		    outbound_scratch_pad)) & MFI_STATE_MASK;
2959 		if (fw_state == MFI_STATE_FAULT) {
2960 			mrsas_dprint(sc, MRSAS_OCR,
2961 			    "Found FW in FAULT state, will reset adapter.\n");
2962 			retval = 1;
2963 			goto out;
2964 		}
2965 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
2966 		if (!outstanding)
2967 			goto out;
2968 
2969 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2970 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2971 			    "commands to complete\n", i, outstanding);
2972 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2973 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2974 				mrsas_complete_cmd(sc, MSIxIndex);
2975 		}
2976 		DELAY(1000 * 1000);
2977 	}
2978 
2979 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
2980 		mrsas_dprint(sc, MRSAS_OCR,
2981 		    " pending commands remain after waiting,"
2982 		    " will reset adapter.\n");
2983 		retval = 1;
2984 	}
2985 out:
2986 	return retval;
2987 }
2988 
2989 /*
2990  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
2991  * input:					Command packet for return to free cmd pool
2992  *
2993  * This function returns the MFI command to the command list.
2994  */
2995 void
2996 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2997 {
2998 	struct mrsas_softc *sc = cmd->sc;
2999 
3000 	mtx_lock(&sc->mfi_cmd_pool_lock);
3001 	cmd->ccb_ptr = NULL;
3002 	cmd->cmd_id.frame_count = 0;
3003 	TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
3004 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3005 
3006 	return;
3007 }
3008 
3009 /*
3010  * mrsas_get_controller_info:	Returns FW's controller structure
3011  * input:						Adapter soft state
3012  * 								Controller information structure
3013  *
3014  * Issues an internal command (DCMD) to get the FW's controller structure. This
3015  * information is mainly used to find out the maximum IO transfer per command
3016  * supported by the FW.
3017  */
3018 static int
3019 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3020 {
3021 	int retcode = 0;
3022 	struct mrsas_mfi_cmd *cmd;
3023 	struct mrsas_dcmd_frame *dcmd;
3024 
3025 	cmd = mrsas_get_mfi_cmd(sc);
3026 
3027 	if (!cmd) {
3028 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3029 		return -ENOMEM;
3030 	}
3031 	dcmd = &cmd->frame->dcmd;
3032 
3033 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3034 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3035 		mrsas_release_mfi_cmd(cmd);
3036 		return -ENOMEM;
3037 	}
3038 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3039 
3040 	dcmd->cmd = MFI_CMD_DCMD;
3041 	dcmd->cmd_status = 0xFF;
3042 	dcmd->sge_count = 1;
3043 	dcmd->flags = MFI_FRAME_DIR_READ;
3044 	dcmd->timeout = 0;
3045 	dcmd->pad_0 = 0;
3046 	dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3047 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3048 	dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3049 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3050 
3051 	if (!mrsas_issue_polled(sc, cmd))
3052 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3053 	else
3054 		retcode = 1;
3055 
3056 	mrsas_update_ext_vd_details(sc);
3057 
3058 	mrsas_free_ctlr_info_cmd(sc);
3059 	mrsas_release_mfi_cmd(cmd);
3060 	return (retcode);
3061 }
3062 
3063 /*
3064  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3065  * input:
3066  *	sc - Controller's softc
3067 */
3068 static void
3069 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3070 {
3071 	sc->max256vdSupport =
3072 	sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3073 	/* Below is additional check to address future FW enhancement */
3074 	if (sc->ctrl_info->max_lds > 64)
3075 		sc->max256vdSupport = 1;
3076 
3077 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3078 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3079 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3080 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3081 	if (sc->max256vdSupport) {
3082 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3083 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3084 	} else {
3085 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3086 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3087 	}
3088 
3089 	sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3090 	    (sizeof(MR_LD_SPAN_MAP) *
3091 	    (sc->fw_supported_vd_count - 1));
3092 	sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3093 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3094 	    (sizeof(MR_LD_SPAN_MAP) *
3095 	    (sc->drv_supported_vd_count - 1));
3096 
3097 	sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3098 
3099 	if (sc->max256vdSupport)
3100 		sc->current_map_sz = sc->new_map_sz;
3101 	else
3102 		sc->current_map_sz = sc->old_map_sz;
3103 }
3104 
3105 /*
3106  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3107  * input:						Adapter soft state
3108  *
3109  * Allocates DMAable memory for the controller info internal command.
3110  */
3111 int
3112 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3113 {
3114 	int ctlr_info_size;
3115 
3116 	/* Allocate get controller info command */
3117 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3118 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3119 	    1, 0,
3120 	    BUS_SPACE_MAXADDR_32BIT,
3121 	    BUS_SPACE_MAXADDR,
3122 	    NULL, NULL,
3123 	    ctlr_info_size,
3124 	    1,
3125 	    ctlr_info_size,
3126 	    BUS_DMA_ALLOCNOW,
3127 	    NULL, NULL,
3128 	    &sc->ctlr_info_tag)) {
3129 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3130 		return (ENOMEM);
3131 	}
3132 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3133 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3134 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3135 		return (ENOMEM);
3136 	}
3137 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3138 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3139 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3140 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3141 		return (ENOMEM);
3142 	}
3143 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3144 	return (0);
3145 }
3146 
3147 /*
3148  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3149  * input:						Adapter soft state
3150  *
3151  * Deallocates memory of the get controller info cmd.
3152  */
3153 void
3154 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3155 {
3156 	if (sc->ctlr_info_phys_addr)
3157 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3158 	if (sc->ctlr_info_mem != NULL)
3159 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3160 	if (sc->ctlr_info_tag != NULL)
3161 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3162 }
3163 
3164 /*
3165  * mrsas_issue_polled:	Issues a polling command
3166  * inputs:				Adapter soft state
3167  * 						Command packet to be issued
3168  *
3169  * This function is for posting of internal commands to Firmware.  MFI requires
3170  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3171  * the poll response timer is 180 seconds.
3172  */
3173 int
3174 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3175 {
3176 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3177 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3178 	int i, retcode = 0;
3179 
3180 	frame_hdr->cmd_status = 0xFF;
3181 	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3182 
3183 	/* Issue the frame using inbound queue port */
3184 	if (mrsas_issue_dcmd(sc, cmd)) {
3185 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3186 		return (1);
3187 	}
3188 	/*
3189 	 * Poll response timer to wait for Firmware response.  While this
3190 	 * timer with the DELAY call could block CPU, the time interval for
3191 	 * this is only 1 millisecond.
3192 	 */
3193 	if (frame_hdr->cmd_status == 0xFF) {
3194 		for (i = 0; i < (max_wait * 1000); i++) {
3195 			if (frame_hdr->cmd_status == 0xFF)
3196 				DELAY(1000);
3197 			else
3198 				break;
3199 		}
3200 	}
3201 	if (frame_hdr->cmd_status != 0) {
3202 		if (frame_hdr->cmd_status == 0xFF)
3203 			device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
3204 		else
3205 			device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
3206 		retcode = 1;
3207 	}
3208 	return (retcode);
3209 }
3210 
3211 /*
3212  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3213  * input:				Adapter soft state mfi cmd pointer
3214  *
3215  * This function is called by mrsas_issued_blocked_cmd() and
3216  * mrsas_issued_polled(), to build the MPT command and then fire the command
3217  * to Firmware.
3218  */
3219 int
3220 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3221 {
3222 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3223 
3224 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3225 	if (!req_desc) {
3226 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3227 		return (1);
3228 	}
3229 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3230 
3231 	return (0);
3232 }
3233 
3234 /*
3235  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3236  * input:				Adapter soft state mfi cmd to build
3237  *
3238  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3239  * command and prepares the MPT command to send to Firmware.
3240  */
3241 MRSAS_REQUEST_DESCRIPTOR_UNION *
3242 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3243 {
3244 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3245 	u_int16_t index;
3246 
3247 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3248 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3249 		return NULL;
3250 	}
3251 	index = cmd->cmd_id.context.smid;
3252 
3253 	req_desc = mrsas_get_request_desc(sc, index - 1);
3254 	if (!req_desc)
3255 		return NULL;
3256 
3257 	req_desc->addr.Words = 0;
3258 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3259 
3260 	req_desc->SCSIIO.SMID = index;
3261 
3262 	return (req_desc);
3263 }
3264 
3265 /*
3266  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3267  * input:						Adapter soft state mfi cmd pointer
3268  *
3269  * The MPT command and the io_request are setup as a passthru command. The SGE
3270  * chain address is set to frame_phys_addr of the MFI command.
3271  */
3272 u_int8_t
3273 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3274 {
3275 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3276 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3277 	struct mrsas_mpt_cmd *mpt_cmd;
3278 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3279 
3280 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3281 	if (!mpt_cmd)
3282 		return (1);
3283 
3284 	/* Save the smid. To be used for returning the cmd */
3285 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3286 
3287 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3288 
3289 	/*
3290 	 * For cmds where the flag is set, store the flag and check on
3291 	 * completion. For cmds with this flag, don't call
3292 	 * mrsas_complete_cmd.
3293 	 */
3294 
3295 	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3296 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3297 
3298 	io_req = mpt_cmd->io_request;
3299 
3300 	if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
3301 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3302 
3303 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3304 		sgl_ptr_end->Flags = 0;
3305 	}
3306 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3307 
3308 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3309 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3310 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3311 
3312 	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3313 
3314 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3315 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3316 
3317 	mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
3318 
3319 	return (0);
3320 }
3321 
3322 /*
3323  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3324  * input:					Adapter soft state Command to be issued
3325  *
3326  * This function waits on an event for the command to be returned from the ISR.
3327  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3328  * internal and ioctl commands.
3329  */
3330 int
3331 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3332 {
3333 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3334 	unsigned long total_time = 0;
3335 	int retcode = 0;
3336 
3337 	/* Initialize cmd_status */
3338 	cmd->cmd_status = ECONNREFUSED;
3339 
3340 	/* Build MPT-MFI command for issue to FW */
3341 	if (mrsas_issue_dcmd(sc, cmd)) {
3342 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3343 		return (1);
3344 	}
3345 	sc->chan = (void *)&cmd;
3346 
3347 	while (1) {
3348 		if (cmd->cmd_status == ECONNREFUSED) {
3349 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3350 		} else
3351 			break;
3352 		total_time++;
3353 		if (total_time >= max_wait) {
3354 			device_printf(sc->mrsas_dev,
3355 			    "Internal command timed out after %d seconds.\n", max_wait);
3356 			retcode = 1;
3357 			break;
3358 		}
3359 	}
3360 	return (retcode);
3361 }
3362 
3363 /*
3364  * mrsas_complete_mptmfi_passthru:	Completes a command
3365  * input:	@sc:					Adapter soft state
3366  * 			@cmd:					Command to be completed
3367  * 			@status:				cmd completion status
3368  *
3369  * This function is called from mrsas_complete_cmd() after an interrupt is
3370  * received from Firmware, and io_request->Function is
3371  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3372  */
3373 void
3374 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3375     u_int8_t status)
3376 {
3377 	struct mrsas_header *hdr = &cmd->frame->hdr;
3378 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3379 
3380 	/* Reset the retry counter for future re-tries */
3381 	cmd->retry_for_fw_reset = 0;
3382 
3383 	if (cmd->ccb_ptr)
3384 		cmd->ccb_ptr = NULL;
3385 
3386 	switch (hdr->cmd) {
3387 	case MFI_CMD_INVALID:
3388 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3389 		break;
3390 	case MFI_CMD_PD_SCSI_IO:
3391 	case MFI_CMD_LD_SCSI_IO:
3392 		/*
3393 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3394 		 * issued either through an IO path or an IOCTL path. If it
3395 		 * was via IOCTL, we will send it to internal completion.
3396 		 */
3397 		if (cmd->sync_cmd) {
3398 			cmd->sync_cmd = 0;
3399 			mrsas_wakeup(sc, cmd);
3400 			break;
3401 		}
3402 	case MFI_CMD_SMP:
3403 	case MFI_CMD_STP:
3404 	case MFI_CMD_DCMD:
3405 		/* Check for LD map update */
3406 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3407 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
3408 			sc->fast_path_io = 0;
3409 			mtx_lock(&sc->raidmap_lock);
3410 			if (cmd_status != 0) {
3411 				if (cmd_status != MFI_STAT_NOT_FOUND)
3412 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3413 				else {
3414 					mrsas_release_mfi_cmd(cmd);
3415 					mtx_unlock(&sc->raidmap_lock);
3416 					break;
3417 				}
3418 			} else
3419 				sc->map_id++;
3420 			mrsas_release_mfi_cmd(cmd);
3421 			if (MR_ValidateMapInfo(sc))
3422 				sc->fast_path_io = 0;
3423 			else
3424 				sc->fast_path_io = 1;
3425 			mrsas_sync_map_info(sc);
3426 			mtx_unlock(&sc->raidmap_lock);
3427 			break;
3428 		}
3429 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3430 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3431 			sc->mrsas_aen_triggered = 0;
3432 		}
3433 		/* See if got an event notification */
3434 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3435 			mrsas_complete_aen(sc, cmd);
3436 		else
3437 			mrsas_wakeup(sc, cmd);
3438 		break;
3439 	case MFI_CMD_ABORT:
3440 		/* Command issued to abort another cmd return */
3441 		mrsas_complete_abort(sc, cmd);
3442 		break;
3443 	default:
3444 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3445 		break;
3446 	}
3447 }
3448 
3449 /*
3450  * mrsas_wakeup:	Completes an internal command
3451  * input:			Adapter soft state
3452  * 					Command to be completed
3453  *
3454  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3455  * timer is started.  This function is called from
3456  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3457  * from the command wait.
3458  */
3459 void
3460 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3461 {
3462 	cmd->cmd_status = cmd->frame->io.cmd_status;
3463 
3464 	if (cmd->cmd_status == ECONNREFUSED)
3465 		cmd->cmd_status = 0;
3466 
3467 	sc->chan = (void *)&cmd;
3468 	wakeup_one((void *)&sc->chan);
3469 	return;
3470 }
3471 
3472 /*
3473  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
3474  * Adapter soft state Shutdown/Hibernate
3475  *
3476  * This function issues a DCMD internal command to Firmware to initiate shutdown
3477  * of the controller.
3478  */
3479 static void
3480 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3481 {
3482 	struct mrsas_mfi_cmd *cmd;
3483 	struct mrsas_dcmd_frame *dcmd;
3484 
3485 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3486 		return;
3487 
3488 	cmd = mrsas_get_mfi_cmd(sc);
3489 	if (!cmd) {
3490 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3491 		return;
3492 	}
3493 	if (sc->aen_cmd)
3494 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3495 
3496 	if (sc->map_update_cmd)
3497 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3498 
3499 	dcmd = &cmd->frame->dcmd;
3500 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3501 
3502 	dcmd->cmd = MFI_CMD_DCMD;
3503 	dcmd->cmd_status = 0x0;
3504 	dcmd->sge_count = 0;
3505 	dcmd->flags = MFI_FRAME_DIR_NONE;
3506 	dcmd->timeout = 0;
3507 	dcmd->pad_0 = 0;
3508 	dcmd->data_xfer_len = 0;
3509 	dcmd->opcode = opcode;
3510 
3511 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3512 
3513 	mrsas_issue_blocked_cmd(sc, cmd);
3514 	mrsas_release_mfi_cmd(cmd);
3515 
3516 	return;
3517 }
3518 
3519 /*
3520  * mrsas_flush_cache:         Requests FW to flush all its caches input:
3521  * Adapter soft state
3522  *
3523  * This function is issues a DCMD internal command to Firmware to initiate
3524  * flushing of all caches.
3525  */
3526 static void
3527 mrsas_flush_cache(struct mrsas_softc *sc)
3528 {
3529 	struct mrsas_mfi_cmd *cmd;
3530 	struct mrsas_dcmd_frame *dcmd;
3531 
3532 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3533 		return;
3534 
3535 	cmd = mrsas_get_mfi_cmd(sc);
3536 	if (!cmd) {
3537 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3538 		return;
3539 	}
3540 	dcmd = &cmd->frame->dcmd;
3541 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3542 
3543 	dcmd->cmd = MFI_CMD_DCMD;
3544 	dcmd->cmd_status = 0x0;
3545 	dcmd->sge_count = 0;
3546 	dcmd->flags = MFI_FRAME_DIR_NONE;
3547 	dcmd->timeout = 0;
3548 	dcmd->pad_0 = 0;
3549 	dcmd->data_xfer_len = 0;
3550 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3551 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3552 
3553 	mrsas_issue_blocked_cmd(sc, cmd);
3554 	mrsas_release_mfi_cmd(cmd);
3555 
3556 	return;
3557 }
3558 
3559 /*
3560  * mrsas_get_map_info:        Load and validate RAID map input:
3561  * Adapter instance soft state
3562  *
3563  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3564  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
3565  */
3566 static int
3567 mrsas_get_map_info(struct mrsas_softc *sc)
3568 {
3569 	uint8_t retcode = 0;
3570 
3571 	sc->fast_path_io = 0;
3572 	if (!mrsas_get_ld_map_info(sc)) {
3573 		retcode = MR_ValidateMapInfo(sc);
3574 		if (retcode == 0) {
3575 			sc->fast_path_io = 1;
3576 			return 0;
3577 		}
3578 	}
3579 	return 1;
3580 }
3581 
3582 /*
3583  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
3584  * Adapter instance soft state
3585  *
3586  * Issues an internal command (DCMD) to get the FW's controller PD list
3587  * structure.
3588  */
3589 static int
3590 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3591 {
3592 	int retcode = 0;
3593 	struct mrsas_mfi_cmd *cmd;
3594 	struct mrsas_dcmd_frame *dcmd;
3595 	void *map;
3596 	bus_addr_t map_phys_addr = 0;
3597 
3598 	cmd = mrsas_get_mfi_cmd(sc);
3599 	if (!cmd) {
3600 		device_printf(sc->mrsas_dev,
3601 		    "Cannot alloc for ld map info cmd.\n");
3602 		return 1;
3603 	}
3604 	dcmd = &cmd->frame->dcmd;
3605 
3606 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3607 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3608 	if (!map) {
3609 		device_printf(sc->mrsas_dev,
3610 		    "Failed to alloc mem for ld map info.\n");
3611 		mrsas_release_mfi_cmd(cmd);
3612 		return (ENOMEM);
3613 	}
3614 	memset(map, 0, sizeof(sc->max_map_sz));
3615 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3616 
3617 	dcmd->cmd = MFI_CMD_DCMD;
3618 	dcmd->cmd_status = 0xFF;
3619 	dcmd->sge_count = 1;
3620 	dcmd->flags = MFI_FRAME_DIR_READ;
3621 	dcmd->timeout = 0;
3622 	dcmd->pad_0 = 0;
3623 	dcmd->data_xfer_len = sc->current_map_sz;
3624 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3625 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3626 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
3627 
3628 	if (!mrsas_issue_polled(sc, cmd))
3629 		retcode = 0;
3630 	else {
3631 		device_printf(sc->mrsas_dev,
3632 		    "Fail to send get LD map info cmd.\n");
3633 		retcode = 1;
3634 	}
3635 	mrsas_release_mfi_cmd(cmd);
3636 
3637 	return (retcode);
3638 }
3639 
3640 /*
3641  * mrsas_sync_map_info:        Get FW's ld_map structure input:
3642  * Adapter instance soft state
3643  *
3644  * Issues an internal command (DCMD) to get the FW's controller PD list
3645  * structure.
3646  */
3647 static int
3648 mrsas_sync_map_info(struct mrsas_softc *sc)
3649 {
3650 	int retcode = 0, i;
3651 	struct mrsas_mfi_cmd *cmd;
3652 	struct mrsas_dcmd_frame *dcmd;
3653 	uint32_t size_sync_info, num_lds;
3654 	MR_LD_TARGET_SYNC *target_map = NULL;
3655 	MR_DRV_RAID_MAP_ALL *map;
3656 	MR_LD_RAID *raid;
3657 	MR_LD_TARGET_SYNC *ld_sync;
3658 	bus_addr_t map_phys_addr = 0;
3659 
3660 	cmd = mrsas_get_mfi_cmd(sc);
3661 	if (!cmd) {
3662 		device_printf(sc->mrsas_dev,
3663 		    "Cannot alloc for sync map info cmd\n");
3664 		return 1;
3665 	}
3666 	map = sc->ld_drv_map[sc->map_id & 1];
3667 	num_lds = map->raidMap.ldCount;
3668 
3669 	dcmd = &cmd->frame->dcmd;
3670 	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3671 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3672 
3673 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3674 	memset(target_map, 0, sc->max_map_sz);
3675 
3676 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3677 
3678 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3679 
3680 	for (i = 0; i < num_lds; i++, ld_sync++) {
3681 		raid = MR_LdRaidGet(i, map);
3682 		ld_sync->targetId = MR_GetLDTgtId(i, map);
3683 		ld_sync->seqNum = raid->seqNum;
3684 	}
3685 
3686 	dcmd->cmd = MFI_CMD_DCMD;
3687 	dcmd->cmd_status = 0xFF;
3688 	dcmd->sge_count = 1;
3689 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3690 	dcmd->timeout = 0;
3691 	dcmd->pad_0 = 0;
3692 	dcmd->data_xfer_len = sc->current_map_sz;
3693 	dcmd->mbox.b[0] = num_lds;
3694 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3695 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3696 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3697 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
3698 
3699 	sc->map_update_cmd = cmd;
3700 	if (mrsas_issue_dcmd(sc, cmd)) {
3701 		device_printf(sc->mrsas_dev,
3702 		    "Fail to send sync map info command.\n");
3703 		return (1);
3704 	}
3705 	return (retcode);
3706 }
3707 
3708 /*
3709  * mrsas_get_pd_list:           Returns FW's PD list structure input:
3710  * Adapter soft state
3711  *
3712  * Issues an internal command (DCMD) to get the FW's controller PD list
3713  * structure.  This information is mainly used to find out about system
3714  * supported by Firmware.
3715  */
3716 static int
3717 mrsas_get_pd_list(struct mrsas_softc *sc)
3718 {
3719 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
3720 	struct mrsas_mfi_cmd *cmd;
3721 	struct mrsas_dcmd_frame *dcmd;
3722 	struct MR_PD_LIST *pd_list_mem;
3723 	struct MR_PD_ADDRESS *pd_addr;
3724 	bus_addr_t pd_list_phys_addr = 0;
3725 	struct mrsas_tmp_dcmd *tcmd;
3726 
3727 	cmd = mrsas_get_mfi_cmd(sc);
3728 	if (!cmd) {
3729 		device_printf(sc->mrsas_dev,
3730 		    "Cannot alloc for get PD list cmd\n");
3731 		return 1;
3732 	}
3733 	dcmd = &cmd->frame->dcmd;
3734 
3735 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3736 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3737 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3738 		device_printf(sc->mrsas_dev,
3739 		    "Cannot alloc dmamap for get PD list cmd\n");
3740 		mrsas_release_mfi_cmd(cmd);
3741 		return (ENOMEM);
3742 	} else {
3743 		pd_list_mem = tcmd->tmp_dcmd_mem;
3744 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3745 	}
3746 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3747 
3748 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3749 	dcmd->mbox.b[1] = 0;
3750 	dcmd->cmd = MFI_CMD_DCMD;
3751 	dcmd->cmd_status = 0xFF;
3752 	dcmd->sge_count = 1;
3753 	dcmd->flags = MFI_FRAME_DIR_READ;
3754 	dcmd->timeout = 0;
3755 	dcmd->pad_0 = 0;
3756 	dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3757 	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3758 	dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3759 	dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3760 
3761 	if (!mrsas_issue_polled(sc, cmd))
3762 		retcode = 0;
3763 	else
3764 		retcode = 1;
3765 
3766 	/* Get the instance PD list */
3767 	pd_count = MRSAS_MAX_PD;
3768 	pd_addr = pd_list_mem->addr;
3769 	if (retcode == 0 && pd_list_mem->count < pd_count) {
3770 		memset(sc->local_pd_list, 0,
3771 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3772 		for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3773 			sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3774 			sc->local_pd_list[pd_addr->deviceId].driveType =
3775 			    pd_addr->scsiDevType;
3776 			sc->local_pd_list[pd_addr->deviceId].driveState =
3777 			    MR_PD_STATE_SYSTEM;
3778 			pd_addr++;
3779 		}
3780 	}
3781 	/*
3782 	 * Use mutext/spinlock if pd_list component size increase more than
3783 	 * 32 bit.
3784 	 */
3785 	memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3786 	mrsas_free_tmp_dcmd(tcmd);
3787 	mrsas_release_mfi_cmd(cmd);
3788 	free(tcmd, M_MRSAS);
3789 	return (retcode);
3790 }
3791 
3792 /*
3793  * mrsas_get_ld_list:           Returns FW's LD list structure input:
3794  * Adapter soft state
3795  *
3796  * Issues an internal command (DCMD) to get the FW's controller PD list
3797  * structure.  This information is mainly used to find out about supported by
3798  * the FW.
3799  */
3800 static int
3801 mrsas_get_ld_list(struct mrsas_softc *sc)
3802 {
3803 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3804 	struct mrsas_mfi_cmd *cmd;
3805 	struct mrsas_dcmd_frame *dcmd;
3806 	struct MR_LD_LIST *ld_list_mem;
3807 	bus_addr_t ld_list_phys_addr = 0;
3808 	struct mrsas_tmp_dcmd *tcmd;
3809 
3810 	cmd = mrsas_get_mfi_cmd(sc);
3811 	if (!cmd) {
3812 		device_printf(sc->mrsas_dev,
3813 		    "Cannot alloc for get LD list cmd\n");
3814 		return 1;
3815 	}
3816 	dcmd = &cmd->frame->dcmd;
3817 
3818 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3819 	ld_list_size = sizeof(struct MR_LD_LIST);
3820 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3821 		device_printf(sc->mrsas_dev,
3822 		    "Cannot alloc dmamap for get LD list cmd\n");
3823 		mrsas_release_mfi_cmd(cmd);
3824 		return (ENOMEM);
3825 	} else {
3826 		ld_list_mem = tcmd->tmp_dcmd_mem;
3827 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3828 	}
3829 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3830 
3831 	if (sc->max256vdSupport)
3832 		dcmd->mbox.b[0] = 1;
3833 
3834 	dcmd->cmd = MFI_CMD_DCMD;
3835 	dcmd->cmd_status = 0xFF;
3836 	dcmd->sge_count = 1;
3837 	dcmd->flags = MFI_FRAME_DIR_READ;
3838 	dcmd->timeout = 0;
3839 	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3840 	dcmd->opcode = MR_DCMD_LD_GET_LIST;
3841 	dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3842 	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3843 	dcmd->pad_0 = 0;
3844 
3845 	if (!mrsas_issue_polled(sc, cmd))
3846 		retcode = 0;
3847 	else
3848 		retcode = 1;
3849 
3850 #if VD_EXT_DEBUG
3851 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
3852 #endif
3853 
3854 	/* Get the instance LD list */
3855 	if ((retcode == 0) &&
3856 	    (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) {
3857 		sc->CurLdCount = ld_list_mem->ldCount;
3858 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
3859 		for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3860 			if (ld_list_mem->ldList[ld_index].state != 0) {
3861 				ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3862 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3863 			}
3864 		}
3865 	}
3866 	mrsas_free_tmp_dcmd(tcmd);
3867 	mrsas_release_mfi_cmd(cmd);
3868 	free(tcmd, M_MRSAS);
3869 	return (retcode);
3870 }
3871 
3872 /*
3873  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
3874  * Adapter soft state Temp command Size of alloction
3875  *
3876  * Allocates DMAable memory for a temporary internal command. The allocated
3877  * memory is initialized to all zeros upon successful loading of the dma
3878  * mapped memory.
3879  */
3880 int
3881 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
3882     struct mrsas_tmp_dcmd *tcmd, int size)
3883 {
3884 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3885 	    1, 0,
3886 	    BUS_SPACE_MAXADDR_32BIT,
3887 	    BUS_SPACE_MAXADDR,
3888 	    NULL, NULL,
3889 	    size,
3890 	    1,
3891 	    size,
3892 	    BUS_DMA_ALLOCNOW,
3893 	    NULL, NULL,
3894 	    &tcmd->tmp_dcmd_tag)) {
3895 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3896 		return (ENOMEM);
3897 	}
3898 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3899 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3900 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3901 		return (ENOMEM);
3902 	}
3903 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3904 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3905 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3906 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3907 		return (ENOMEM);
3908 	}
3909 	memset(tcmd->tmp_dcmd_mem, 0, size);
3910 	return (0);
3911 }
3912 
3913 /*
3914  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
3915  * temporary dcmd pointer
3916  *
3917  * Deallocates memory of the temporary command for use in the construction of
3918  * the internal DCMD.
3919  */
3920 void
3921 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3922 {
3923 	if (tmp->tmp_dcmd_phys_addr)
3924 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3925 	if (tmp->tmp_dcmd_mem != NULL)
3926 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3927 	if (tmp->tmp_dcmd_tag != NULL)
3928 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3929 }
3930 
3931 /*
3932  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
3933  * Adapter soft state Previously issued cmd to be aborted
3934  *
3935  * This function is used to abort previously issued commands, such as AEN and
3936  * RAID map sync map commands.  The abort command is sent as a DCMD internal
3937  * command and subsequently the driver will wait for a return status.  The
3938  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3939  */
3940 static int
3941 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3942     struct mrsas_mfi_cmd *cmd_to_abort)
3943 {
3944 	struct mrsas_mfi_cmd *cmd;
3945 	struct mrsas_abort_frame *abort_fr;
3946 	u_int8_t retcode = 0;
3947 	unsigned long total_time = 0;
3948 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3949 
3950 	cmd = mrsas_get_mfi_cmd(sc);
3951 	if (!cmd) {
3952 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3953 		return (1);
3954 	}
3955 	abort_fr = &cmd->frame->abort;
3956 
3957 	/* Prepare and issue the abort frame */
3958 	abort_fr->cmd = MFI_CMD_ABORT;
3959 	abort_fr->cmd_status = 0xFF;
3960 	abort_fr->flags = 0;
3961 	abort_fr->abort_context = cmd_to_abort->index;
3962 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3963 	abort_fr->abort_mfi_phys_addr_hi = 0;
3964 
3965 	cmd->sync_cmd = 1;
3966 	cmd->cmd_status = 0xFF;
3967 
3968 	if (mrsas_issue_dcmd(sc, cmd)) {
3969 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3970 		return (1);
3971 	}
3972 	/* Wait for this cmd to complete */
3973 	sc->chan = (void *)&cmd;
3974 	while (1) {
3975 		if (cmd->cmd_status == 0xFF) {
3976 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3977 		} else
3978 			break;
3979 		total_time++;
3980 		if (total_time >= max_wait) {
3981 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3982 			retcode = 1;
3983 			break;
3984 		}
3985 	}
3986 
3987 	cmd->sync_cmd = 0;
3988 	mrsas_release_mfi_cmd(cmd);
3989 	return (retcode);
3990 }
3991 
3992 /*
3993  * mrsas_complete_abort:      Completes aborting a command input:
3994  * Adapter soft state Cmd that was issued to abort another cmd
3995  *
3996  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
3997  * change after sending the command.  This function is called from
3998  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3999  */
4000 void
4001 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4002 {
4003 	if (cmd->sync_cmd) {
4004 		cmd->sync_cmd = 0;
4005 		cmd->cmd_status = 0;
4006 		sc->chan = (void *)&cmd;
4007 		wakeup_one((void *)&sc->chan);
4008 	}
4009 	return;
4010 }
4011 
4012 /*
4013  * mrsas_aen_handler:	AEN processing callback function from thread context
4014  * input:				Adapter soft state
4015  *
4016  * Asynchronous event handler
4017  */
4018 void
4019 mrsas_aen_handler(struct mrsas_softc *sc)
4020 {
4021 	union mrsas_evt_class_locale class_locale;
4022 	int doscan = 0;
4023 	u_int32_t seq_num;
4024 	int error;
4025 
4026 	if (!sc) {
4027 		device_printf(sc->mrsas_dev, "invalid instance!\n");
4028 		return;
4029 	}
4030 	if (sc->evt_detail_mem) {
4031 		switch (sc->evt_detail_mem->code) {
4032 		case MR_EVT_PD_INSERTED:
4033 			mrsas_get_pd_list(sc);
4034 			mrsas_bus_scan_sim(sc, sc->sim_1);
4035 			doscan = 0;
4036 			break;
4037 		case MR_EVT_PD_REMOVED:
4038 			mrsas_get_pd_list(sc);
4039 			mrsas_bus_scan_sim(sc, sc->sim_1);
4040 			doscan = 0;
4041 			break;
4042 		case MR_EVT_LD_OFFLINE:
4043 		case MR_EVT_CFG_CLEARED:
4044 		case MR_EVT_LD_DELETED:
4045 			mrsas_bus_scan_sim(sc, sc->sim_0);
4046 			doscan = 0;
4047 			break;
4048 		case MR_EVT_LD_CREATED:
4049 			mrsas_get_ld_list(sc);
4050 			mrsas_bus_scan_sim(sc, sc->sim_0);
4051 			doscan = 0;
4052 			break;
4053 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4054 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4055 		case MR_EVT_LD_STATE_CHANGE:
4056 			doscan = 1;
4057 			break;
4058 		default:
4059 			doscan = 0;
4060 			break;
4061 		}
4062 	} else {
4063 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4064 		return;
4065 	}
4066 	if (doscan) {
4067 		mrsas_get_pd_list(sc);
4068 		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4069 		mrsas_bus_scan_sim(sc, sc->sim_1);
4070 		mrsas_get_ld_list(sc);
4071 		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4072 		mrsas_bus_scan_sim(sc, sc->sim_0);
4073 	}
4074 	seq_num = sc->evt_detail_mem->seq_num + 1;
4075 
4076 	/* Register AEN with FW for latest sequence number plus 1 */
4077 	class_locale.members.reserved = 0;
4078 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4079 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4080 
4081 	if (sc->aen_cmd != NULL)
4082 		return;
4083 
4084 	mtx_lock(&sc->aen_lock);
4085 	error = mrsas_register_aen(sc, seq_num,
4086 	    class_locale.word);
4087 	mtx_unlock(&sc->aen_lock);
4088 
4089 	if (error)
4090 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4091 
4092 }
4093 
4094 
4095 /*
4096  * mrsas_complete_aen:	Completes AEN command
4097  * input:				Adapter soft state
4098  * 						Cmd that was issued to abort another cmd
4099  *
4100  * This function will be called from ISR and will continue event processing from
4101  * thread context by enqueuing task in ev_tq (callback function
4102  * "mrsas_aen_handler").
4103  */
4104 void
4105 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4106 {
4107 	/*
4108 	 * Don't signal app if it is just an aborted previously registered
4109 	 * aen
4110 	 */
4111 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4112 		sc->mrsas_aen_triggered = 1;
4113 		mtx_lock(&sc->aen_lock);
4114 		if (sc->mrsas_poll_waiting) {
4115 			sc->mrsas_poll_waiting = 0;
4116 			selwakeup(&sc->mrsas_select);
4117 		}
4118 		mtx_unlock(&sc->aen_lock);
4119 	} else
4120 		cmd->abort_aen = 0;
4121 
4122 	sc->aen_cmd = NULL;
4123 	mrsas_release_mfi_cmd(cmd);
4124 
4125 	if (!sc->remove_in_progress)
4126 		taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4127 
4128 	return;
4129 }
4130 
4131 static device_method_t mrsas_methods[] = {
4132 	DEVMETHOD(device_probe, mrsas_probe),
4133 	DEVMETHOD(device_attach, mrsas_attach),
4134 	DEVMETHOD(device_detach, mrsas_detach),
4135 	DEVMETHOD(device_suspend, mrsas_suspend),
4136 	DEVMETHOD(device_resume, mrsas_resume),
4137 	DEVMETHOD(bus_print_child, bus_generic_print_child),
4138 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4139 	{0, 0}
4140 };
4141 
4142 static driver_t mrsas_driver = {
4143 	"mrsas",
4144 	mrsas_methods,
4145 	sizeof(struct mrsas_softc)
4146 };
4147 
4148 static devclass_t mrsas_devclass;
4149 
4150 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4151 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
4152