xref: /freebsd/sys/dev/mrsas/mrsas.c (revision 3823d5e198425b4f5e5a80267d195769d1063773)
1 /*
2  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
3  * Support: freebsdraid@lsi.com
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer. 2. Redistributions
11  * in binary form must reproduce the above copyright notice, this list of
12  * conditions and the following disclaimer in the documentation and/or other
13  * materials provided with the distribution. 3. Neither the name of the
14  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
15  * promote products derived from this software without specific prior written
16  * permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  * The views and conclusions contained in the software and documentation are
31  * those of the authors and should not be interpreted as representing
32  * official policies,either expressed or implied, of the FreeBSD Project.
33  *
34  * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621
35  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
36  *
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <dev/mrsas/mrsas.h>
43 #include <dev/mrsas/mrsas_ioctl.h>
44 
45 #include <cam/cam.h>
46 #include <cam/cam_ccb.h>
47 
48 #include <sys/sysctl.h>
49 #include <sys/types.h>
50 #include <sys/kthread.h>
51 #include <sys/taskqueue.h>
52 #include <sys/smp.h>
53 
54 
55 /*
56  * Function prototypes
57  */
58 static d_open_t mrsas_open;
59 static d_close_t mrsas_close;
60 static d_read_t mrsas_read;
61 static d_write_t mrsas_write;
62 static d_ioctl_t mrsas_ioctl;
63 static d_poll_t mrsas_poll;
64 
65 static struct mrsas_mgmt_info mrsas_mgmt_info;
66 static struct mrsas_ident *mrsas_find_ident(device_t);
67 static int mrsas_setup_msix(struct mrsas_softc *sc);
68 static int mrsas_allocate_msix(struct mrsas_softc *sc);
69 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70 static void mrsas_flush_cache(struct mrsas_softc *sc);
71 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72 static void mrsas_ocr_thread(void *arg);
73 static int mrsas_get_map_info(struct mrsas_softc *sc);
74 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75 static int mrsas_sync_map_info(struct mrsas_softc *sc);
76 static int mrsas_get_pd_list(struct mrsas_softc *sc);
77 static int mrsas_get_ld_list(struct mrsas_softc *sc);
78 static int mrsas_setup_irq(struct mrsas_softc *sc);
79 static int mrsas_alloc_mem(struct mrsas_softc *sc);
80 static int mrsas_init_fw(struct mrsas_softc *sc);
81 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82 static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int
85 mrsas_get_ctrl_info(struct mrsas_softc *sc,
86     struct mrsas_ctrl_info *ctrl_info);
87 static int
88 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
89     struct mrsas_mfi_cmd *cmd_to_abort);
90 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
91 u_int8_t
92 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
93     struct mrsas_mfi_cmd *mfi_cmd);
94 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
95 int	mrsas_init_adapter(struct mrsas_softc *sc);
96 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
97 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
98 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
99 int	mrsas_ioc_init(struct mrsas_softc *sc);
100 int	mrsas_bus_scan(struct mrsas_softc *sc);
101 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
102 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
103 int	mrsas_reset_ctrl(struct mrsas_softc *sc);
104 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc);
105 int
106 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
107     struct mrsas_mfi_cmd *cmd);
108 int
109 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
110     int size);
111 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
112 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
113 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
114 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
115 void	mrsas_disable_intr(struct mrsas_softc *sc);
116 void	mrsas_enable_intr(struct mrsas_softc *sc);
117 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
118 void	mrsas_free_mem(struct mrsas_softc *sc);
119 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
120 void	mrsas_isr(void *arg);
121 void	mrsas_teardown_intr(struct mrsas_softc *sc);
122 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
123 void	mrsas_kill_hba(struct mrsas_softc *sc);
124 void	mrsas_aen_handler(struct mrsas_softc *sc);
125 void
126 mrsas_write_reg(struct mrsas_softc *sc, int offset,
127     u_int32_t value);
128 void
129 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
130     u_int32_t req_desc_hi);
131 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
132 void
133 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
134     struct mrsas_mfi_cmd *cmd, u_int8_t status);
135 void
136 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
137     u_int8_t extStatus);
138 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
139 
140 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
141         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
142 
143 extern int mrsas_cam_attach(struct mrsas_softc *sc);
144 extern void mrsas_cam_detach(struct mrsas_softc *sc);
145 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
146 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
147 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
148 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
149 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
150 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
151 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
152 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
153 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
154 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
155 extern void mrsas_xpt_release(struct mrsas_softc *sc);
156 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
157 mrsas_get_request_desc(struct mrsas_softc *sc,
158     u_int16_t index);
159 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
160 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
161 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
162 
163 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
164 
165 /*
166  * PCI device struct and table
167  *
168  */
169 typedef struct mrsas_ident {
170 	uint16_t vendor;
171 	uint16_t device;
172 	uint16_t subvendor;
173 	uint16_t subdevice;
174 	const char *desc;
175 }	MRSAS_CTLR_ID;
176 
177 MRSAS_CTLR_ID device_table[] = {
178 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
179 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
180 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
181 	{0, 0, 0, 0, NULL}
182 };
183 
184 /*
185  * Character device entry points
186  *
187  */
188 static struct cdevsw mrsas_cdevsw = {
189 	.d_version = D_VERSION,
190 	.d_open = mrsas_open,
191 	.d_close = mrsas_close,
192 	.d_read = mrsas_read,
193 	.d_write = mrsas_write,
194 	.d_ioctl = mrsas_ioctl,
195 	.d_poll = mrsas_poll,
196 	.d_name = "mrsas",
197 };
198 
199 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
200 
201 /*
202  * In the cdevsw routines, we find our softc by using the si_drv1 member of
203  * struct cdev.  We set this variable to point to our softc in our attach
204  * routine when we create the /dev entry.
205  */
206 int
207 mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
208 {
209 	struct mrsas_softc *sc;
210 
211 	sc = dev->si_drv1;
212 	return (0);
213 }
214 
215 int
216 mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
217 {
218 	struct mrsas_softc *sc;
219 
220 	sc = dev->si_drv1;
221 	return (0);
222 }
223 
224 int
225 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
226 {
227 	struct mrsas_softc *sc;
228 
229 	sc = dev->si_drv1;
230 	return (0);
231 }
232 int
233 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
234 {
235 	struct mrsas_softc *sc;
236 
237 	sc = dev->si_drv1;
238 	return (0);
239 }
240 
241 /*
242  * Register Read/Write Functions
243  *
244  */
245 void
246 mrsas_write_reg(struct mrsas_softc *sc, int offset,
247     u_int32_t value)
248 {
249 	bus_space_tag_t bus_tag = sc->bus_tag;
250 	bus_space_handle_t bus_handle = sc->bus_handle;
251 
252 	bus_space_write_4(bus_tag, bus_handle, offset, value);
253 }
254 
255 u_int32_t
256 mrsas_read_reg(struct mrsas_softc *sc, int offset)
257 {
258 	bus_space_tag_t bus_tag = sc->bus_tag;
259 	bus_space_handle_t bus_handle = sc->bus_handle;
260 
261 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
262 }
263 
264 
265 /*
266  * Interrupt Disable/Enable/Clear Functions
267  *
268  */
269 void
270 mrsas_disable_intr(struct mrsas_softc *sc)
271 {
272 	u_int32_t mask = 0xFFFFFFFF;
273 	u_int32_t status;
274 
275 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
276 	/* Dummy read to force pci flush */
277 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
278 }
279 
280 void
281 mrsas_enable_intr(struct mrsas_softc *sc)
282 {
283 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
284 	u_int32_t status;
285 
286 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
287 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
288 
289 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
290 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
291 }
292 
293 static int
294 mrsas_clear_intr(struct mrsas_softc *sc)
295 {
296 	u_int32_t status, fw_status, fw_state;
297 
298 	/* Read received interrupt */
299 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
300 
301 	/*
302 	 * If FW state change interrupt is received, write to it again to
303 	 * clear
304 	 */
305 	if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
306 		fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
307 		    outbound_scratch_pad));
308 		fw_state = fw_status & MFI_STATE_MASK;
309 		if (fw_state == MFI_STATE_FAULT) {
310 			device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
311 			if (sc->ocr_thread_active)
312 				wakeup(&sc->ocr_chan);
313 		}
314 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
315 		mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
316 		return (1);
317 	}
318 	/* Not our interrupt, so just return */
319 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
320 		return (0);
321 
322 	/* We got a reply interrupt */
323 	return (1);
324 }
325 
326 /*
327  * PCI Support Functions
328  *
329  */
330 static struct mrsas_ident *
331 mrsas_find_ident(device_t dev)
332 {
333 	struct mrsas_ident *pci_device;
334 
335 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
336 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
337 		    (pci_device->device == pci_get_device(dev)) &&
338 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
339 		    (pci_device->subvendor == 0xffff)) &&
340 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
341 		    (pci_device->subdevice == 0xffff)))
342 			return (pci_device);
343 	}
344 	return (NULL);
345 }
346 
347 static int
348 mrsas_probe(device_t dev)
349 {
350 	static u_int8_t first_ctrl = 1;
351 	struct mrsas_ident *id;
352 
353 	if ((id = mrsas_find_ident(dev)) != NULL) {
354 		if (first_ctrl) {
355 			printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n",
356 			    MRSAS_VERSION);
357 			first_ctrl = 0;
358 		}
359 		device_set_desc(dev, id->desc);
360 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
361 		return (-30);
362 	}
363 	return (ENXIO);
364 }
365 
366 /*
367  * mrsas_setup_sysctl:	setup sysctl values for mrsas
368  * input:				Adapter instance soft state
369  *
370  * Setup sysctl entries for mrsas driver.
371  */
372 static void
373 mrsas_setup_sysctl(struct mrsas_softc *sc)
374 {
375 	struct sysctl_ctx_list *sysctl_ctx = NULL;
376 	struct sysctl_oid *sysctl_tree = NULL;
377 	char tmpstr[80], tmpstr2[80];
378 
379 	/*
380 	 * Setup the sysctl variable so the user can change the debug level
381 	 * on the fly.
382 	 */
383 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
384 	    device_get_unit(sc->mrsas_dev));
385 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
386 
387 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
388 	if (sysctl_ctx != NULL)
389 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
390 
391 	if (sysctl_tree == NULL) {
392 		sysctl_ctx_init(&sc->sysctl_ctx);
393 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
394 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
395 		    CTLFLAG_RD, 0, tmpstr);
396 		if (sc->sysctl_tree == NULL)
397 			return;
398 		sysctl_ctx = &sc->sysctl_ctx;
399 		sysctl_tree = sc->sysctl_tree;
400 	}
401 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
402 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
403 	    "Disable the use of OCR");
404 
405 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
406 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
407 	    strlen(MRSAS_VERSION), "driver version");
408 
409 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
410 	    OID_AUTO, "reset_count", CTLFLAG_RD,
411 	    &sc->reset_count, 0, "number of ocr from start of the day");
412 
413 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
415 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
416 
417 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
419 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
420 
421 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
423 	    "Driver debug level");
424 
425 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
427 	    0, "Driver IO timeout value in mili-second.");
428 
429 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
430 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
431 	    &sc->mrsas_fw_fault_check_delay,
432 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
433 
434 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
435 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
436 	    &sc->reset_in_progress, 0, "ocr in progress status");
437 
438 }
439 
440 /*
441  * mrsas_get_tunables:	get tunable parameters.
442  * input:				Adapter instance soft state
443  *
444  * Get tunable parameters. This will help to debug driver at boot time.
445  */
446 static void
447 mrsas_get_tunables(struct mrsas_softc *sc)
448 {
449 	char tmpstr[80];
450 
451 	/* XXX default to some debugging for now */
452 	sc->mrsas_debug = MRSAS_FAULT;
453 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
454 	sc->mrsas_fw_fault_check_delay = 1;
455 	sc->reset_count = 0;
456 	sc->reset_in_progress = 0;
457 
458 	/*
459 	 * Grab the global variables.
460 	 */
461 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
462 
463 	/* Grab the unit-instance variables */
464 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
465 	    device_get_unit(sc->mrsas_dev));
466 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
467 }
468 
469 /*
470  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
471  * Used to get sequence number at driver load time.
472  * input:		Adapter soft state
473  *
474  * Allocates DMAable memory for the event log info internal command.
475  */
476 int
477 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
478 {
479 	int el_info_size;
480 
481 	/* Allocate get event log info command */
482 	el_info_size = sizeof(struct mrsas_evt_log_info);
483 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
484 	    1, 0,
485 	    BUS_SPACE_MAXADDR_32BIT,
486 	    BUS_SPACE_MAXADDR,
487 	    NULL, NULL,
488 	    el_info_size,
489 	    1,
490 	    el_info_size,
491 	    BUS_DMA_ALLOCNOW,
492 	    NULL, NULL,
493 	    &sc->el_info_tag)) {
494 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
495 		return (ENOMEM);
496 	}
497 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
498 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
499 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
500 		return (ENOMEM);
501 	}
502 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
503 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
504 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
505 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
506 		return (ENOMEM);
507 	}
508 	memset(sc->el_info_mem, 0, el_info_size);
509 	return (0);
510 }
511 
512 /*
513  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
514  * input:					Adapter soft state
515  *
516  * Deallocates memory for the event log info internal command.
517  */
518 void
519 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
520 {
521 	if (sc->el_info_phys_addr)
522 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
523 	if (sc->el_info_mem != NULL)
524 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
525 	if (sc->el_info_tag != NULL)
526 		bus_dma_tag_destroy(sc->el_info_tag);
527 }
528 
529 /*
530  *  mrsas_get_seq_num:	Get latest event sequence number
531  *  @sc:				Adapter soft state
532  *  @eli:				Firmware event log sequence number information.
533  *
534  * Firmware maintains a log of all events in a non-volatile area.
535  * Driver get the sequence number using DCMD
536  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
537  */
538 
539 static int
540 mrsas_get_seq_num(struct mrsas_softc *sc,
541     struct mrsas_evt_log_info *eli)
542 {
543 	struct mrsas_mfi_cmd *cmd;
544 	struct mrsas_dcmd_frame *dcmd;
545 
546 	cmd = mrsas_get_mfi_cmd(sc);
547 
548 	if (!cmd) {
549 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
550 		return -ENOMEM;
551 	}
552 	dcmd = &cmd->frame->dcmd;
553 
554 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
555 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
556 		mrsas_release_mfi_cmd(cmd);
557 		return -ENOMEM;
558 	}
559 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
560 
561 	dcmd->cmd = MFI_CMD_DCMD;
562 	dcmd->cmd_status = 0x0;
563 	dcmd->sge_count = 1;
564 	dcmd->flags = MFI_FRAME_DIR_READ;
565 	dcmd->timeout = 0;
566 	dcmd->pad_0 = 0;
567 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
568 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
569 	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
570 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
571 
572 	mrsas_issue_blocked_cmd(sc, cmd);
573 
574 	/*
575 	 * Copy the data back into callers buffer
576 	 */
577 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
578 	mrsas_free_evt_log_info_cmd(sc);
579 	mrsas_release_mfi_cmd(cmd);
580 
581 	return 0;
582 }
583 
584 
585 /*
586  *  mrsas_register_aen:		Register for asynchronous event notification
587  *  @sc:			Adapter soft state
588  *  @seq_num:			Starting sequence number
589  *  @class_locale:		Class of the event
590  *
591  *  This function subscribes for events beyond the @seq_num
592  *  and type @class_locale.
593  *
594  */
595 static int
596 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
597     u_int32_t class_locale_word)
598 {
599 	int ret_val;
600 	struct mrsas_mfi_cmd *cmd;
601 	struct mrsas_dcmd_frame *dcmd;
602 	union mrsas_evt_class_locale curr_aen;
603 	union mrsas_evt_class_locale prev_aen;
604 
605 	/*
606 	 * If there an AEN pending already (aen_cmd), check if the
607 	 * class_locale of that pending AEN is inclusive of the new AEN
608 	 * request we currently have. If it is, then we don't have to do
609 	 * anything. In other words, whichever events the current AEN request
610 	 * is subscribing to, have already been subscribed to. If the old_cmd
611 	 * is _not_ inclusive, then we have to abort that command, form a
612 	 * class_locale that is superset of both old and current and re-issue
613 	 * to the FW
614 	 */
615 
616 	curr_aen.word = class_locale_word;
617 
618 	if (sc->aen_cmd) {
619 
620 		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
621 
622 		/*
623 		 * A class whose enum value is smaller is inclusive of all
624 		 * higher values. If a PROGRESS (= -1) was previously
625 		 * registered, then a new registration requests for higher
626 		 * classes need not be sent to FW. They are automatically
627 		 * included. Locale numbers don't have such hierarchy. They
628 		 * are bitmap values
629 		 */
630 		if ((prev_aen.members.class <= curr_aen.members.class) &&
631 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
632 		    curr_aen.members.locale)) {
633 			/*
634 			 * Previously issued event registration includes
635 			 * current request. Nothing to do.
636 			 */
637 			return 0;
638 		} else {
639 			curr_aen.members.locale |= prev_aen.members.locale;
640 
641 			if (prev_aen.members.class < curr_aen.members.class)
642 				curr_aen.members.class = prev_aen.members.class;
643 
644 			sc->aen_cmd->abort_aen = 1;
645 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
646 			    sc->aen_cmd);
647 
648 			if (ret_val) {
649 				printf("mrsas: Failed to abort "
650 				    "previous AEN command\n");
651 				return ret_val;
652 			}
653 		}
654 	}
655 	cmd = mrsas_get_mfi_cmd(sc);
656 
657 	if (!cmd)
658 		return -ENOMEM;
659 
660 	dcmd = &cmd->frame->dcmd;
661 
662 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
663 
664 	/*
665 	 * Prepare DCMD for aen registration
666 	 */
667 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
668 
669 	dcmd->cmd = MFI_CMD_DCMD;
670 	dcmd->cmd_status = 0x0;
671 	dcmd->sge_count = 1;
672 	dcmd->flags = MFI_FRAME_DIR_READ;
673 	dcmd->timeout = 0;
674 	dcmd->pad_0 = 0;
675 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
676 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
677 	dcmd->mbox.w[0] = seq_num;
678 	sc->last_seq_num = seq_num;
679 	dcmd->mbox.w[1] = curr_aen.word;
680 	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
681 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
682 
683 	if (sc->aen_cmd != NULL) {
684 		mrsas_release_mfi_cmd(cmd);
685 		return 0;
686 	}
687 	/*
688 	 * Store reference to the cmd used to register for AEN. When an
689 	 * application wants us to register for AEN, we have to abort this
690 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
691 	 */
692 	sc->aen_cmd = cmd;
693 
694 	/*
695 	 * Issue the aen registration frame
696 	 */
697 	if (mrsas_issue_dcmd(sc, cmd)) {
698 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
699 		return (1);
700 	}
701 	return 0;
702 }
703 
704 /*
705  * mrsas_start_aen:	Subscribes to AEN during driver load time
706  * @instance:		Adapter soft state
707  */
708 static int
709 mrsas_start_aen(struct mrsas_softc *sc)
710 {
711 	struct mrsas_evt_log_info eli;
712 	union mrsas_evt_class_locale class_locale;
713 
714 
715 	/* Get the latest sequence number from FW */
716 
717 	memset(&eli, 0, sizeof(eli));
718 
719 	if (mrsas_get_seq_num(sc, &eli))
720 		return -1;
721 
722 	/* Register AEN with FW for latest sequence number plus 1 */
723 	class_locale.members.reserved = 0;
724 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
725 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
726 
727 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
728 	    class_locale.word);
729 
730 }
731 
732 /*
733  * mrsas_setup_msix:	Allocate MSI-x vectors
734  * @sc:					adapter soft state
735  */
736 static int
737 mrsas_setup_msix(struct mrsas_softc *sc)
738 {
739 	int i;
740 
741 	for (i = 0; i < sc->msix_vectors; i++) {
742 		sc->irq_context[i].sc = sc;
743 		sc->irq_context[i].MSIxIndex = i;
744 		sc->irq_id[i] = i + 1;
745 		sc->mrsas_irq[i] = bus_alloc_resource_any
746 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
747 		    ,RF_ACTIVE);
748 		if (sc->mrsas_irq[i] == NULL) {
749 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
750 			goto irq_alloc_failed;
751 		}
752 		if (bus_setup_intr(sc->mrsas_dev,
753 		    sc->mrsas_irq[i],
754 		    INTR_MPSAFE | INTR_TYPE_CAM,
755 		    NULL, mrsas_isr, &sc->irq_context[i],
756 		    &sc->intr_handle[i])) {
757 			device_printf(sc->mrsas_dev,
758 			    "Cannot set up MSI-x interrupt handler\n");
759 			goto irq_alloc_failed;
760 		}
761 	}
762 	return SUCCESS;
763 
764 irq_alloc_failed:
765 	mrsas_teardown_intr(sc);
766 	return (FAIL);
767 }
768 
769 /*
770  * mrsas_allocate_msix:		Setup MSI-x vectors
771  * @sc:						adapter soft state
772  */
773 static int
774 mrsas_allocate_msix(struct mrsas_softc *sc)
775 {
776 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
777 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
778 		    " of vectors\n", sc->msix_vectors);
779 	} else {
780 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
781 		goto irq_alloc_failed;
782 	}
783 	return SUCCESS;
784 
785 irq_alloc_failed:
786 	mrsas_teardown_intr(sc);
787 	return (FAIL);
788 }
789 
790 /*
791  * mrsas_attach:	PCI entry point
792  * input:			pointer to device struct
793  *
794  * Performs setup of PCI and registers, initializes mutexes and linked lists,
795  * registers interrupts and CAM, and initializes   the adapter/controller to
796  * its proper state.
797  */
798 static int
799 mrsas_attach(device_t dev)
800 {
801 	struct mrsas_softc *sc = device_get_softc(dev);
802 	uint32_t cmd, bar, error;
803 
804 	/* Look up our softc and initialize its fields. */
805 	sc->mrsas_dev = dev;
806 	sc->device_id = pci_get_device(dev);
807 
808 	mrsas_get_tunables(sc);
809 
810 	/*
811 	 * Set up PCI and registers
812 	 */
813 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
814 	if ((cmd & PCIM_CMD_PORTEN) == 0) {
815 		return (ENXIO);
816 	}
817 	/* Force the busmaster enable bit on. */
818 	cmd |= PCIM_CMD_BUSMASTEREN;
819 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
820 
821 	bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
822 
823 	sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
824 	if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
825 	    &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
826 	    == NULL) {
827 		device_printf(dev, "Cannot allocate PCI registers\n");
828 		goto attach_fail;
829 	}
830 	sc->bus_tag = rman_get_bustag(sc->reg_res);
831 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
832 
833 	/* Intialize mutexes */
834 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
835 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
836 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
837 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
838 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
839 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
840 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
841 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
842 
843 	/*
844 	 * Intialize a counting Semaphore to take care no. of concurrent
845 	 * IOCTLs
846 	 */
847 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION);
848 
849 	/* Intialize linked list */
850 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
851 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
852 
853 	mrsas_atomic_set(&sc->fw_outstanding, 0);
854 
855 	sc->io_cmds_highwater = 0;
856 
857 	/* Create a /dev entry for this device. */
858 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
859 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
860 	    device_get_unit(dev));
861 	if (device_get_unit(dev) == 0)
862 		make_dev_alias(sc->mrsas_cdev, "megaraid_sas_ioctl_node");
863 	if (sc->mrsas_cdev)
864 		sc->mrsas_cdev->si_drv1 = sc;
865 
866 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
867 	sc->UnevenSpanSupport = 0;
868 
869 	sc->msix_enable = 0;
870 
871 	/* Initialize Firmware */
872 	if (mrsas_init_fw(sc) != SUCCESS) {
873 		goto attach_fail_fw;
874 	}
875 	/* Register SCSI mid-layer */
876 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
877 		goto attach_fail_cam;
878 	}
879 	/* Register IRQs */
880 	if (mrsas_setup_irq(sc) != SUCCESS) {
881 		goto attach_fail_irq;
882 	}
883 	/* Enable Interrupts */
884 	mrsas_enable_intr(sc);
885 
886 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
887 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
888 	    device_get_unit(sc->mrsas_dev));
889 	if (error) {
890 		printf("Error %d starting rescan thread\n", error);
891 		goto attach_fail_irq;
892 	}
893 	mrsas_setup_sysctl(sc);
894 
895 	/* Initiate AEN (Asynchronous Event Notification) */
896 
897 	if (mrsas_start_aen(sc)) {
898 		printf("Error: start aen failed\n");
899 		goto fail_start_aen;
900 	}
901 	/*
902 	 * Add this controller to mrsas_mgmt_info structure so that it can be
903 	 * exported to management applications
904 	 */
905 	if (device_get_unit(dev) == 0)
906 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
907 
908 	mrsas_mgmt_info.count++;
909 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
910 	mrsas_mgmt_info.max_index++;
911 
912 	return (0);
913 
914 fail_start_aen:
915 attach_fail_irq:
916 	mrsas_teardown_intr(sc);
917 attach_fail_cam:
918 	mrsas_cam_detach(sc);
919 attach_fail_fw:
920 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
921 	if (sc->msix_enable == 1)
922 		pci_release_msi(sc->mrsas_dev);
923 	mrsas_free_mem(sc);
924 	mtx_destroy(&sc->sim_lock);
925 	mtx_destroy(&sc->aen_lock);
926 	mtx_destroy(&sc->pci_lock);
927 	mtx_destroy(&sc->io_lock);
928 	mtx_destroy(&sc->ioctl_lock);
929 	mtx_destroy(&sc->mpt_cmd_pool_lock);
930 	mtx_destroy(&sc->mfi_cmd_pool_lock);
931 	mtx_destroy(&sc->raidmap_lock);
932 	/* Destroy the counting semaphore created for Ioctl */
933 	sema_destroy(&sc->ioctl_count_sema);
934 attach_fail:
935 	destroy_dev(sc->mrsas_cdev);
936 	if (sc->reg_res) {
937 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
938 		    sc->reg_res_id, sc->reg_res);
939 	}
940 	return (ENXIO);
941 }
942 
943 /*
944  * mrsas_detach:	De-allocates and teardown resources
945  * input:			pointer to device struct
946  *
947  * This function is the entry point for device disconnect and detach.
948  * It performs memory de-allocations, shutdown of the controller and various
949  * teardown and destroy resource functions.
950  */
951 static int
952 mrsas_detach(device_t dev)
953 {
954 	struct mrsas_softc *sc;
955 	int i = 0;
956 
957 	sc = device_get_softc(dev);
958 	sc->remove_in_progress = 1;
959 
960 	/* Destroy the character device so no other IOCTL will be handled */
961 	destroy_dev(sc->mrsas_cdev);
962 
963 	/*
964 	 * Take the instance off the instance array. Note that we will not
965 	 * decrement the max_index. We let this array be sparse array
966 	 */
967 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
968 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
969 			mrsas_mgmt_info.count--;
970 			mrsas_mgmt_info.sc_ptr[i] = NULL;
971 			break;
972 		}
973 	}
974 
975 	if (sc->ocr_thread_active)
976 		wakeup(&sc->ocr_chan);
977 	while (sc->reset_in_progress) {
978 		i++;
979 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
980 			mrsas_dprint(sc, MRSAS_INFO,
981 			    "[%2d]waiting for ocr to be finished\n", i);
982 		}
983 		pause("mr_shutdown", hz);
984 	}
985 	i = 0;
986 	while (sc->ocr_thread_active) {
987 		i++;
988 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
989 			mrsas_dprint(sc, MRSAS_INFO,
990 			    "[%2d]waiting for "
991 			    "mrsas_ocr thread to quit ocr %d\n", i,
992 			    sc->ocr_thread_active);
993 		}
994 		pause("mr_shutdown", hz);
995 	}
996 	mrsas_flush_cache(sc);
997 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
998 	mrsas_disable_intr(sc);
999 	mrsas_cam_detach(sc);
1000 	mrsas_teardown_intr(sc);
1001 	mrsas_free_mem(sc);
1002 	mtx_destroy(&sc->sim_lock);
1003 	mtx_destroy(&sc->aen_lock);
1004 	mtx_destroy(&sc->pci_lock);
1005 	mtx_destroy(&sc->io_lock);
1006 	mtx_destroy(&sc->ioctl_lock);
1007 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1008 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1009 	mtx_destroy(&sc->raidmap_lock);
1010 
1011 	/* Wait for all the semaphores to be released */
1012 	while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1013 		pause("mr_shutdown", hz);
1014 
1015 	/* Destroy the counting semaphore created for Ioctl */
1016 	sema_destroy(&sc->ioctl_count_sema);
1017 
1018 	if (sc->reg_res) {
1019 		bus_release_resource(sc->mrsas_dev,
1020 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1021 	}
1022 	if (sc->sysctl_tree != NULL)
1023 		sysctl_ctx_free(&sc->sysctl_ctx);
1024 
1025 	return (0);
1026 }
1027 
1028 /*
1029  * mrsas_free_mem:		Frees allocated memory
1030  * input:				Adapter instance soft state
1031  *
1032  * This function is called from mrsas_detach() to free previously allocated
1033  * memory.
1034  */
1035 void
1036 mrsas_free_mem(struct mrsas_softc *sc)
1037 {
1038 	int i;
1039 	u_int32_t max_cmd;
1040 	struct mrsas_mfi_cmd *mfi_cmd;
1041 	struct mrsas_mpt_cmd *mpt_cmd;
1042 
1043 	/*
1044 	 * Free RAID map memory
1045 	 */
1046 	for (i = 0; i < 2; i++) {
1047 		if (sc->raidmap_phys_addr[i])
1048 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1049 		if (sc->raidmap_mem[i] != NULL)
1050 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1051 		if (sc->raidmap_tag[i] != NULL)
1052 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1053 
1054 		if (sc->ld_drv_map[i] != NULL)
1055 			free(sc->ld_drv_map[i], M_MRSAS);
1056 	}
1057 
1058 	/*
1059 	 * Free version buffer memroy
1060 	 */
1061 	if (sc->verbuf_phys_addr)
1062 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1063 	if (sc->verbuf_mem != NULL)
1064 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1065 	if (sc->verbuf_tag != NULL)
1066 		bus_dma_tag_destroy(sc->verbuf_tag);
1067 
1068 
1069 	/*
1070 	 * Free sense buffer memory
1071 	 */
1072 	if (sc->sense_phys_addr)
1073 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1074 	if (sc->sense_mem != NULL)
1075 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1076 	if (sc->sense_tag != NULL)
1077 		bus_dma_tag_destroy(sc->sense_tag);
1078 
1079 	/*
1080 	 * Free chain frame memory
1081 	 */
1082 	if (sc->chain_frame_phys_addr)
1083 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1084 	if (sc->chain_frame_mem != NULL)
1085 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1086 	if (sc->chain_frame_tag != NULL)
1087 		bus_dma_tag_destroy(sc->chain_frame_tag);
1088 
1089 	/*
1090 	 * Free IO Request memory
1091 	 */
1092 	if (sc->io_request_phys_addr)
1093 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1094 	if (sc->io_request_mem != NULL)
1095 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1096 	if (sc->io_request_tag != NULL)
1097 		bus_dma_tag_destroy(sc->io_request_tag);
1098 
1099 	/*
1100 	 * Free Reply Descriptor memory
1101 	 */
1102 	if (sc->reply_desc_phys_addr)
1103 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1104 	if (sc->reply_desc_mem != NULL)
1105 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1106 	if (sc->reply_desc_tag != NULL)
1107 		bus_dma_tag_destroy(sc->reply_desc_tag);
1108 
1109 	/*
1110 	 * Free event detail memory
1111 	 */
1112 	if (sc->evt_detail_phys_addr)
1113 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1114 	if (sc->evt_detail_mem != NULL)
1115 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1116 	if (sc->evt_detail_tag != NULL)
1117 		bus_dma_tag_destroy(sc->evt_detail_tag);
1118 
1119 	/*
1120 	 * Free MFI frames
1121 	 */
1122 	if (sc->mfi_cmd_list) {
1123 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1124 			mfi_cmd = sc->mfi_cmd_list[i];
1125 			mrsas_free_frame(sc, mfi_cmd);
1126 		}
1127 	}
1128 	if (sc->mficmd_frame_tag != NULL)
1129 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1130 
1131 	/*
1132 	 * Free MPT internal command list
1133 	 */
1134 	max_cmd = sc->max_fw_cmds;
1135 	if (sc->mpt_cmd_list) {
1136 		for (i = 0; i < max_cmd; i++) {
1137 			mpt_cmd = sc->mpt_cmd_list[i];
1138 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1139 			free(sc->mpt_cmd_list[i], M_MRSAS);
1140 		}
1141 		free(sc->mpt_cmd_list, M_MRSAS);
1142 		sc->mpt_cmd_list = NULL;
1143 	}
1144 	/*
1145 	 * Free MFI internal command list
1146 	 */
1147 
1148 	if (sc->mfi_cmd_list) {
1149 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1150 			free(sc->mfi_cmd_list[i], M_MRSAS);
1151 		}
1152 		free(sc->mfi_cmd_list, M_MRSAS);
1153 		sc->mfi_cmd_list = NULL;
1154 	}
1155 	/*
1156 	 * Free request descriptor memory
1157 	 */
1158 	free(sc->req_desc, M_MRSAS);
1159 	sc->req_desc = NULL;
1160 
1161 	/*
1162 	 * Destroy parent tag
1163 	 */
1164 	if (sc->mrsas_parent_tag != NULL)
1165 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1166 }
1167 
1168 /*
1169  * mrsas_teardown_intr:	Teardown interrupt
1170  * input:				Adapter instance soft state
1171  *
1172  * This function is called from mrsas_detach() to teardown and release bus
1173  * interrupt resourse.
1174  */
1175 void
1176 mrsas_teardown_intr(struct mrsas_softc *sc)
1177 {
1178 	int i;
1179 
1180 	if (!sc->msix_enable) {
1181 		if (sc->intr_handle[0])
1182 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1183 		if (sc->mrsas_irq[0] != NULL)
1184 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1185 			    sc->irq_id[0], sc->mrsas_irq[0]);
1186 		sc->intr_handle[0] = NULL;
1187 	} else {
1188 		for (i = 0; i < sc->msix_vectors; i++) {
1189 			if (sc->intr_handle[i])
1190 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1191 				    sc->intr_handle[i]);
1192 
1193 			if (sc->mrsas_irq[i] != NULL)
1194 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1195 				    sc->irq_id[i], sc->mrsas_irq[i]);
1196 
1197 			sc->intr_handle[i] = NULL;
1198 		}
1199 		pci_release_msi(sc->mrsas_dev);
1200 	}
1201 
1202 }
1203 
1204 /*
1205  * mrsas_suspend:	Suspend entry point
1206  * input:			Device struct pointer
1207  *
1208  * This function is the entry point for system suspend from the OS.
1209  */
1210 static int
1211 mrsas_suspend(device_t dev)
1212 {
1213 	struct mrsas_softc *sc;
1214 
1215 	sc = device_get_softc(dev);
1216 	return (0);
1217 }
1218 
1219 /*
1220  * mrsas_resume:	Resume entry point
1221  * input:			Device struct pointer
1222  *
1223  * This function is the entry point for system resume from the OS.
1224  */
1225 static int
1226 mrsas_resume(device_t dev)
1227 {
1228 	struct mrsas_softc *sc;
1229 
1230 	sc = device_get_softc(dev);
1231 	return (0);
1232 }
1233 
1234 /*
1235  * mrsas_ioctl:	IOCtl commands entry point.
1236  *
1237  * This function is the entry point for IOCtls from the OS.  It calls the
1238  * appropriate function for processing depending on the command received.
1239  */
1240 static int
1241 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1242 {
1243 	struct mrsas_softc *sc;
1244 	int ret = 0, i = 0;
1245 
1246 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1247 
1248 	/* get the Host number & the softc from data sent by the Application */
1249 	sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1250 
1251 	if ((mrsas_mgmt_info.max_index == user_ioc->host_no) || (sc == NULL)) {
1252 		printf("Please check the controller number\n");
1253 		if (sc == NULL)
1254 			printf("There is NO such Host no. %d\n", user_ioc->host_no);
1255 
1256 		return ENOENT;
1257 	}
1258 	if (sc->remove_in_progress) {
1259 		mrsas_dprint(sc, MRSAS_INFO,
1260 		    "Driver remove or shutdown called.\n");
1261 		return ENOENT;
1262 	}
1263 	mtx_lock_spin(&sc->ioctl_lock);
1264 	if (!sc->reset_in_progress) {
1265 		mtx_unlock_spin(&sc->ioctl_lock);
1266 		goto do_ioctl;
1267 	}
1268 	mtx_unlock_spin(&sc->ioctl_lock);
1269 	while (sc->reset_in_progress) {
1270 		i++;
1271 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1272 			mrsas_dprint(sc, MRSAS_INFO,
1273 			    "[%2d]waiting for "
1274 			    "OCR to be finished %d\n", i,
1275 			    sc->ocr_thread_active);
1276 		}
1277 		pause("mr_ioctl", hz);
1278 	}
1279 
1280 do_ioctl:
1281 	switch (cmd) {
1282 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1283 #ifdef COMPAT_FREEBSD32
1284 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1285 #endif
1286 		/*
1287 		 * Decrement the Ioctl counting Semaphore before getting an
1288 		 * mfi command
1289 		 */
1290 		sema_wait(&sc->ioctl_count_sema);
1291 
1292 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1293 
1294 		/* Increment the Ioctl counting semaphore value */
1295 		sema_post(&sc->ioctl_count_sema);
1296 
1297 		break;
1298 	case MRSAS_IOC_SCAN_BUS:
1299 		ret = mrsas_bus_scan(sc);
1300 		break;
1301 	default:
1302 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1303 		ret = ENOENT;
1304 	}
1305 
1306 	return (ret);
1307 }
1308 
1309 /*
1310  * mrsas_poll:	poll entry point for mrsas driver fd
1311  *
1312  * This function is the entry point for poll from the OS.  It waits for some AEN
1313  * events to be triggered from the controller and notifies back.
1314  */
1315 static int
1316 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1317 {
1318 	struct mrsas_softc *sc;
1319 	int revents = 0;
1320 
1321 	sc = dev->si_drv1;
1322 
1323 	if (poll_events & (POLLIN | POLLRDNORM)) {
1324 		if (sc->mrsas_aen_triggered) {
1325 			revents |= poll_events & (POLLIN | POLLRDNORM);
1326 		}
1327 	}
1328 	if (revents == 0) {
1329 		if (poll_events & (POLLIN | POLLRDNORM)) {
1330 			sc->mrsas_poll_waiting = 1;
1331 			selrecord(td, &sc->mrsas_select);
1332 		}
1333 	}
1334 	return revents;
1335 }
1336 
1337 /*
1338  * mrsas_setup_irq:	Set up interrupt
1339  * input:			Adapter instance soft state
1340  *
1341  * This function sets up interrupts as a bus resource, with flags indicating
1342  * resource permitting contemporaneous sharing and for resource to activate
1343  * atomically.
1344  */
1345 static int
1346 mrsas_setup_irq(struct mrsas_softc *sc)
1347 {
1348 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1349 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1350 
1351 	else {
1352 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1353 		sc->irq_context[0].sc = sc;
1354 		sc->irq_context[0].MSIxIndex = 0;
1355 		sc->irq_id[0] = 0;
1356 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1357 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1358 		if (sc->mrsas_irq[0] == NULL) {
1359 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1360 			    "interrupt\n");
1361 			return (FAIL);
1362 		}
1363 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1364 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1365 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1366 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1367 			    "interrupt\n");
1368 			return (FAIL);
1369 		}
1370 	}
1371 	return (0);
1372 }
1373 
1374 /*
1375  * mrsas_isr:	ISR entry point
1376  * input:		argument pointer
1377  *
1378  * This function is the interrupt service routine entry point.  There are two
1379  * types of interrupts, state change interrupt and response interrupt.  If an
1380  * interrupt is not ours, we just return.
1381  */
1382 void
1383 mrsas_isr(void *arg)
1384 {
1385 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1386 	struct mrsas_softc *sc = irq_context->sc;
1387 	int status = 0;
1388 
1389 	if (!sc->msix_vectors) {
1390 		status = mrsas_clear_intr(sc);
1391 		if (!status)
1392 			return;
1393 	}
1394 	/* If we are resetting, bail */
1395 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1396 		printf(" Entered into ISR when OCR is going active. \n");
1397 		mrsas_clear_intr(sc);
1398 		return;
1399 	}
1400 	/* Process for reply request and clear response interrupt */
1401 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1402 		mrsas_clear_intr(sc);
1403 
1404 	return;
1405 }
1406 
1407 /*
1408  * mrsas_complete_cmd:	Process reply request
1409  * input:				Adapter instance soft state
1410  *
1411  * This function is called from mrsas_isr() to process reply request and clear
1412  * response interrupt. Processing of the reply request entails walking
1413  * through the reply descriptor array for the command request  pended from
1414  * Firmware.  We look at the Function field to determine the command type and
1415  * perform the appropriate action.  Before we return, we clear the response
1416  * interrupt.
1417  */
1418 static int
1419 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1420 {
1421 	Mpi2ReplyDescriptorsUnion_t *desc;
1422 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1423 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1424 	struct mrsas_mpt_cmd *cmd_mpt;
1425 	struct mrsas_mfi_cmd *cmd_mfi;
1426 	u_int8_t arm, reply_descript_type;
1427 	u_int16_t smid, num_completed;
1428 	u_int8_t status, extStatus;
1429 	union desc_value desc_val;
1430 	PLD_LOAD_BALANCE_INFO lbinfo;
1431 	u_int32_t device_id;
1432 	int threshold_reply_count = 0;
1433 
1434 
1435 	/* If we have a hardware error, not need to continue */
1436 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1437 		return (DONE);
1438 
1439 	desc = sc->reply_desc_mem;
1440 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1441 	    + sc->last_reply_idx[MSIxIndex];
1442 
1443 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1444 
1445 	desc_val.word = desc->Words;
1446 	num_completed = 0;
1447 
1448 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1449 
1450 	/* Find our reply descriptor for the command and process */
1451 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1452 		smid = reply_desc->SMID;
1453 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1454 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1455 
1456 		status = scsi_io_req->RaidContext.status;
1457 		extStatus = scsi_io_req->RaidContext.exStatus;
1458 
1459 		switch (scsi_io_req->Function) {
1460 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1461 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1462 			lbinfo = &sc->load_balance_info[device_id];
1463 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1464 				arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1465 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1466 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1467 			}
1468 			/* Fall thru and complete IO */
1469 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1470 			mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1471 			mrsas_cmd_done(sc, cmd_mpt);
1472 			scsi_io_req->RaidContext.status = 0;
1473 			scsi_io_req->RaidContext.exStatus = 0;
1474 			mrsas_atomic_dec(&sc->fw_outstanding);
1475 			break;
1476 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1477 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1478 			mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1479 			cmd_mpt->flags = 0;
1480 			mrsas_release_mpt_cmd(cmd_mpt);
1481 			break;
1482 		}
1483 
1484 		sc->last_reply_idx[MSIxIndex]++;
1485 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1486 			sc->last_reply_idx[MSIxIndex] = 0;
1487 
1488 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1489 							 * 0xFFFFFFFFs */
1490 		num_completed++;
1491 		threshold_reply_count++;
1492 
1493 		/* Get the next reply descriptor */
1494 		if (!sc->last_reply_idx[MSIxIndex]) {
1495 			desc = sc->reply_desc_mem;
1496 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1497 		} else
1498 			desc++;
1499 
1500 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1501 		desc_val.word = desc->Words;
1502 
1503 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1504 
1505 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1506 			break;
1507 
1508 		/*
1509 		 * Write to reply post index after completing threshold reply
1510 		 * count and still there are more replies in reply queue
1511 		 * pending to be completed.
1512 		 */
1513 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1514 			if (sc->msix_enable) {
1515 				if ((sc->device_id == MRSAS_INVADER) ||
1516 				    (sc->device_id == MRSAS_FURY))
1517 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1518 					    ((MSIxIndex & 0x7) << 24) |
1519 					    sc->last_reply_idx[MSIxIndex]);
1520 				else
1521 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1522 					    sc->last_reply_idx[MSIxIndex]);
1523 			} else
1524 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1525 				    reply_post_host_index), sc->last_reply_idx[0]);
1526 
1527 			threshold_reply_count = 0;
1528 		}
1529 	}
1530 
1531 	/* No match, just return */
1532 	if (num_completed == 0)
1533 		return (DONE);
1534 
1535 	/* Clear response interrupt */
1536 	if (sc->msix_enable) {
1537 		if ((sc->device_id == MRSAS_INVADER) ||
1538 		    (sc->device_id == MRSAS_FURY)) {
1539 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1540 			    ((MSIxIndex & 0x7) << 24) |
1541 			    sc->last_reply_idx[MSIxIndex]);
1542 		} else
1543 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1544 			    sc->last_reply_idx[MSIxIndex]);
1545 	} else
1546 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1547 		    reply_post_host_index), sc->last_reply_idx[0]);
1548 
1549 	return (0);
1550 }
1551 
1552 /*
1553  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1554  * input:						Adapter instance soft state
1555  *
1556  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1557  * It checks the command status and maps the appropriate CAM status for the
1558  * CCB.
1559  */
1560 void
1561 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1562 {
1563 	struct mrsas_softc *sc = cmd->sc;
1564 	u_int8_t *sense_data;
1565 
1566 	switch (status) {
1567 	case MFI_STAT_OK:
1568 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1569 		break;
1570 	case MFI_STAT_SCSI_IO_FAILED:
1571 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1572 		cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1573 		sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1574 		if (sense_data) {
1575 			/* For now just copy 18 bytes back */
1576 			memcpy(sense_data, cmd->sense, 18);
1577 			cmd->ccb_ptr->csio.sense_len = 18;
1578 			cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1579 		}
1580 		break;
1581 	case MFI_STAT_LD_OFFLINE:
1582 	case MFI_STAT_DEVICE_NOT_FOUND:
1583 		if (cmd->ccb_ptr->ccb_h.target_lun)
1584 			cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1585 		else
1586 			cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1587 		break;
1588 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1589 		cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1590 		break;
1591 	default:
1592 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1593 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1594 		cmd->ccb_ptr->csio.scsi_status = status;
1595 	}
1596 	return;
1597 }
1598 
1599 /*
1600  * mrsas_alloc_mem:	Allocate DMAable memory
1601  * input:			Adapter instance soft state
1602  *
1603  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1604  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1605  * Kernel virtual address. Callback argument is physical memory address.
1606  */
1607 static int
1608 mrsas_alloc_mem(struct mrsas_softc *sc)
1609 {
1610 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1611 	          evt_detail_size, count;
1612 
1613 	/*
1614 	 * Allocate parent DMA tag
1615 	 */
1616 	if (bus_dma_tag_create(NULL,	/* parent */
1617 	    1,				/* alignment */
1618 	    0,				/* boundary */
1619 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1620 	    BUS_SPACE_MAXADDR,		/* highaddr */
1621 	    NULL, NULL,			/* filter, filterarg */
1622 	    MRSAS_MAX_IO_SIZE,		/* maxsize */
1623 	    MRSAS_MAX_SGL,		/* nsegments */
1624 	    MRSAS_MAX_IO_SIZE,		/* maxsegsize */
1625 	    0,				/* flags */
1626 	    NULL, NULL,			/* lockfunc, lockarg */
1627 	    &sc->mrsas_parent_tag	/* tag */
1628 	    )) {
1629 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1630 		return (ENOMEM);
1631 	}
1632 	/*
1633 	 * Allocate for version buffer
1634 	 */
1635 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1636 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1637 	    1, 0,
1638 	    BUS_SPACE_MAXADDR_32BIT,
1639 	    BUS_SPACE_MAXADDR,
1640 	    NULL, NULL,
1641 	    verbuf_size,
1642 	    1,
1643 	    verbuf_size,
1644 	    BUS_DMA_ALLOCNOW,
1645 	    NULL, NULL,
1646 	    &sc->verbuf_tag)) {
1647 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1648 		return (ENOMEM);
1649 	}
1650 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1651 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1652 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1653 		return (ENOMEM);
1654 	}
1655 	bzero(sc->verbuf_mem, verbuf_size);
1656 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1657 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1658 	    BUS_DMA_NOWAIT)) {
1659 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1660 		return (ENOMEM);
1661 	}
1662 	/*
1663 	 * Allocate IO Request Frames
1664 	 */
1665 	io_req_size = sc->io_frames_alloc_sz;
1666 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1667 	    16, 0,
1668 	    BUS_SPACE_MAXADDR_32BIT,
1669 	    BUS_SPACE_MAXADDR,
1670 	    NULL, NULL,
1671 	    io_req_size,
1672 	    1,
1673 	    io_req_size,
1674 	    BUS_DMA_ALLOCNOW,
1675 	    NULL, NULL,
1676 	    &sc->io_request_tag)) {
1677 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1678 		return (ENOMEM);
1679 	}
1680 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1681 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1682 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1683 		return (ENOMEM);
1684 	}
1685 	bzero(sc->io_request_mem, io_req_size);
1686 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1687 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1688 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1689 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1690 		return (ENOMEM);
1691 	}
1692 	/*
1693 	 * Allocate Chain Frames
1694 	 */
1695 	chain_frame_size = sc->chain_frames_alloc_sz;
1696 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1697 	    4, 0,
1698 	    BUS_SPACE_MAXADDR_32BIT,
1699 	    BUS_SPACE_MAXADDR,
1700 	    NULL, NULL,
1701 	    chain_frame_size,
1702 	    1,
1703 	    chain_frame_size,
1704 	    BUS_DMA_ALLOCNOW,
1705 	    NULL, NULL,
1706 	    &sc->chain_frame_tag)) {
1707 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1708 		return (ENOMEM);
1709 	}
1710 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1711 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1712 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1713 		return (ENOMEM);
1714 	}
1715 	bzero(sc->chain_frame_mem, chain_frame_size);
1716 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1717 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1718 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1719 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1720 		return (ENOMEM);
1721 	}
1722 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1723 	/*
1724 	 * Allocate Reply Descriptor Array
1725 	 */
1726 	reply_desc_size = sc->reply_alloc_sz * count;
1727 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1728 	    16, 0,
1729 	    BUS_SPACE_MAXADDR_32BIT,
1730 	    BUS_SPACE_MAXADDR,
1731 	    NULL, NULL,
1732 	    reply_desc_size,
1733 	    1,
1734 	    reply_desc_size,
1735 	    BUS_DMA_ALLOCNOW,
1736 	    NULL, NULL,
1737 	    &sc->reply_desc_tag)) {
1738 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1739 		return (ENOMEM);
1740 	}
1741 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1742 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1743 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1744 		return (ENOMEM);
1745 	}
1746 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1747 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1748 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1749 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1750 		return (ENOMEM);
1751 	}
1752 	/*
1753 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
1754 	 */
1755 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1756 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1757 	    64, 0,
1758 	    BUS_SPACE_MAXADDR_32BIT,
1759 	    BUS_SPACE_MAXADDR,
1760 	    NULL, NULL,
1761 	    sense_size,
1762 	    1,
1763 	    sense_size,
1764 	    BUS_DMA_ALLOCNOW,
1765 	    NULL, NULL,
1766 	    &sc->sense_tag)) {
1767 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1768 		return (ENOMEM);
1769 	}
1770 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1771 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1772 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1773 		return (ENOMEM);
1774 	}
1775 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1776 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1777 	    BUS_DMA_NOWAIT)) {
1778 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1779 		return (ENOMEM);
1780 	}
1781 	/*
1782 	 * Allocate for Event detail structure
1783 	 */
1784 	evt_detail_size = sizeof(struct mrsas_evt_detail);
1785 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1786 	    1, 0,
1787 	    BUS_SPACE_MAXADDR_32BIT,
1788 	    BUS_SPACE_MAXADDR,
1789 	    NULL, NULL,
1790 	    evt_detail_size,
1791 	    1,
1792 	    evt_detail_size,
1793 	    BUS_DMA_ALLOCNOW,
1794 	    NULL, NULL,
1795 	    &sc->evt_detail_tag)) {
1796 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1797 		return (ENOMEM);
1798 	}
1799 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1800 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1801 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1802 		return (ENOMEM);
1803 	}
1804 	bzero(sc->evt_detail_mem, evt_detail_size);
1805 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1806 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1807 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1808 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1809 		return (ENOMEM);
1810 	}
1811 	/*
1812 	 * Create a dma tag for data buffers; size will be the maximum
1813 	 * possible I/O size (280kB).
1814 	 */
1815 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1816 	    1,
1817 	    0,
1818 	    BUS_SPACE_MAXADDR,
1819 	    BUS_SPACE_MAXADDR,
1820 	    NULL, NULL,
1821 	    MRSAS_MAX_IO_SIZE,
1822 	    MRSAS_MAX_SGL,
1823 	    MRSAS_MAX_IO_SIZE,
1824 	    BUS_DMA_ALLOCNOW,
1825 	    busdma_lock_mutex,
1826 	    &sc->io_lock,
1827 	    &sc->data_tag)) {
1828 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1829 		return (ENOMEM);
1830 	}
1831 	return (0);
1832 }
1833 
1834 /*
1835  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
1836  * input:			callback argument, machine dependent type
1837  * 					that describes DMA segments, number of segments, error code
1838  *
1839  * This function is for the driver to receive mapping information resultant of
1840  * the bus_dmamap_load(). The information is actually not being used, but the
1841  * address is saved anyway.
1842  */
1843 void
1844 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1845 {
1846 	bus_addr_t *addr;
1847 
1848 	addr = arg;
1849 	*addr = segs[0].ds_addr;
1850 }
1851 
1852 /*
1853  * mrsas_setup_raidmap:	Set up RAID map.
1854  * input:				Adapter instance soft state
1855  *
1856  * Allocate DMA memory for the RAID maps and perform setup.
1857  */
1858 static int
1859 mrsas_setup_raidmap(struct mrsas_softc *sc)
1860 {
1861 	int i;
1862 
1863 	sc->drv_supported_vd_count =
1864 	    MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
1865 	sc->drv_supported_pd_count =
1866 	    MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
1867 
1868 	if (sc->max256vdSupport) {
1869 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
1870 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1871 	} else {
1872 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
1873 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1874 	}
1875 
1876 #if VD_EXT_DEBUG
1877 	device_printf(sc->mrsas_dev, "FW supports: max256vdSupport = %s\n",
1878 	    sc->max256vdSupport ? "YES" : "NO");
1879 	device_printf(sc->mrsas_dev, "FW supports %dVDs %dPDs\n"
1880 	    "DRIVER supports %dVDs  %dPDs \n",
1881 	    sc->fw_supported_vd_count, sc->fw_supported_pd_count,
1882 	    sc->drv_supported_vd_count, sc->drv_supported_pd_count);
1883 #endif
1884 
1885 	sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
1886 	    (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
1887 	sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
1888 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
1889 	    (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count - 1));
1890 
1891 	for (i = 0; i < 2; i++) {
1892 		sc->ld_drv_map[i] =
1893 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1894 		/* Do Error handling */
1895 		if (!sc->ld_drv_map[i]) {
1896 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1897 
1898 			if (i == 1)
1899 				free(sc->ld_drv_map[0], M_MRSAS);
1900 			/* ABORT driver initialization */
1901 			goto ABORT;
1902 		}
1903 	}
1904 
1905 	sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
1906 
1907 	if (sc->max256vdSupport)
1908 		sc->current_map_sz = sc->new_map_sz;
1909 	else
1910 		sc->current_map_sz = sc->old_map_sz;
1911 
1912 
1913 	for (int i = 0; i < 2; i++) {
1914 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
1915 		    4, 0,
1916 		    BUS_SPACE_MAXADDR_32BIT,
1917 		    BUS_SPACE_MAXADDR,
1918 		    NULL, NULL,
1919 		    sc->max_map_sz,
1920 		    1,
1921 		    sc->max_map_sz,
1922 		    BUS_DMA_ALLOCNOW,
1923 		    NULL, NULL,
1924 		    &sc->raidmap_tag[i])) {
1925 			device_printf(sc->mrsas_dev,
1926 			    "Cannot allocate raid map tag.\n");
1927 			return (ENOMEM);
1928 		}
1929 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
1930 		    (void **)&sc->raidmap_mem[i],
1931 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1932 			device_printf(sc->mrsas_dev,
1933 			    "Cannot allocate raidmap memory.\n");
1934 			return (ENOMEM);
1935 		}
1936 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
1937 
1938 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1939 		    sc->raidmap_mem[i], sc->max_map_sz,
1940 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1941 		    BUS_DMA_NOWAIT)) {
1942 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1943 			return (ENOMEM);
1944 		}
1945 		if (!sc->raidmap_mem[i]) {
1946 			device_printf(sc->mrsas_dev,
1947 			    "Cannot allocate memory for raid map.\n");
1948 			return (ENOMEM);
1949 		}
1950 	}
1951 
1952 	if (!mrsas_get_map_info(sc))
1953 		mrsas_sync_map_info(sc);
1954 
1955 	return (0);
1956 
1957 ABORT:
1958 	return (1);
1959 }
1960 
1961 /*
1962  * mrsas_init_fw:	Initialize Firmware
1963  * input:			Adapter soft state
1964  *
1965  * Calls transition_to_ready() to make sure Firmware is in operational state and
1966  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
1967  * issues internal commands to get the controller info after the IOC_INIT
1968  * command response is received by Firmware.  Note:  code relating to
1969  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
1970  * is left here as placeholder.
1971  */
1972 static int
1973 mrsas_init_fw(struct mrsas_softc *sc)
1974 {
1975 
1976 	int ret, loop, ocr = 0;
1977 	u_int32_t max_sectors_1;
1978 	u_int32_t max_sectors_2;
1979 	u_int32_t tmp_sectors;
1980 	struct mrsas_ctrl_info *ctrl_info;
1981 	u_int32_t scratch_pad_2;
1982 	int msix_enable = 0;
1983 	int fw_msix_count = 0;
1984 
1985 	/* Make sure Firmware is ready */
1986 	ret = mrsas_transition_to_ready(sc, ocr);
1987 	if (ret != SUCCESS) {
1988 		return (ret);
1989 	}
1990 	/* MSI-x index 0- reply post host index register */
1991 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
1992 	/* Check if MSI-X is supported while in ready state */
1993 	msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
1994 
1995 	if (msix_enable) {
1996 		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
1997 		    outbound_scratch_pad_2));
1998 
1999 		/* Check max MSI-X vectors */
2000 		if (sc->device_id == MRSAS_TBOLT) {
2001 			sc->msix_vectors = (scratch_pad_2
2002 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2003 			fw_msix_count = sc->msix_vectors;
2004 		} else {
2005 			/* Invader/Fury supports 96 MSI-X vectors */
2006 			sc->msix_vectors = ((scratch_pad_2
2007 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2008 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2009 			fw_msix_count = sc->msix_vectors;
2010 
2011 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2012 			    loop++) {
2013 				sc->msix_reg_offset[loop] =
2014 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2015 				    (loop * 0x10);
2016 			}
2017 		}
2018 
2019 		/* Don't bother allocating more MSI-X vectors than cpus */
2020 		sc->msix_vectors = min(sc->msix_vectors,
2021 		    mp_ncpus);
2022 
2023 		/* Allocate MSI-x vectors */
2024 		if (mrsas_allocate_msix(sc) == SUCCESS)
2025 			sc->msix_enable = 1;
2026 		else
2027 			sc->msix_enable = 0;
2028 
2029 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2030 		    "Online CPU %d Current MSIX <%d>\n",
2031 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2032 	}
2033 	if (mrsas_init_adapter(sc) != SUCCESS) {
2034 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2035 		return (1);
2036 	}
2037 	/* Allocate internal commands for pass-thru */
2038 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2039 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2040 		return (1);
2041 	}
2042 	/*
2043 	 * Get the controller info from FW, so that the MAX VD support
2044 	 * availability can be decided.
2045 	 */
2046 	ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2047 	if (!ctrl_info)
2048 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2049 
2050 	if (mrsas_get_ctrl_info(sc, ctrl_info)) {
2051 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2052 	}
2053 	sc->max256vdSupport =
2054 	    (u_int8_t)ctrl_info->adapterOperations3.supportMaxExtLDs;
2055 
2056 	if (ctrl_info->max_lds > 64) {
2057 		sc->max256vdSupport = 1;
2058 	}
2059 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2060 		device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
2061 		return (1);
2062 	}
2063 	/* For pass-thru, get PD/LD list and controller info */
2064 	memset(sc->pd_list, 0,
2065 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2066 	mrsas_get_pd_list(sc);
2067 
2068 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2069 	mrsas_get_ld_list(sc);
2070 
2071 	/*
2072 	 * Compute the max allowed sectors per IO: The controller info has
2073 	 * two limits on max sectors. Driver should use the minimum of these
2074 	 * two.
2075 	 *
2076 	 * 1 << stripe_sz_ops.min = max sectors per strip
2077 	 *
2078 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2079 	 * calculate max_sectors_1. So the number ended up as zero always.
2080 	 */
2081 	tmp_sectors = 0;
2082 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
2083 	    ctrl_info->max_strips_per_io;
2084 	max_sectors_2 = ctrl_info->max_request_size;
2085 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2086 	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2087 
2088 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2089 		sc->max_sectors_per_req = tmp_sectors;
2090 
2091 	sc->disableOnlineCtrlReset =
2092 	    ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2093 	sc->UnevenSpanSupport =
2094 	    ctrl_info->adapterOperations2.supportUnevenSpans;
2095 	if (sc->UnevenSpanSupport) {
2096 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2097 		    sc->UnevenSpanSupport);
2098 
2099 		if (MR_ValidateMapInfo(sc))
2100 			sc->fast_path_io = 1;
2101 		else
2102 			sc->fast_path_io = 0;
2103 	}
2104 	if (ctrl_info)
2105 		free(ctrl_info, M_MRSAS);
2106 
2107 	return (0);
2108 }
2109 
2110 /*
2111  * mrsas_init_adapter:	Initializes the adapter/controller
2112  * input:				Adapter soft state
2113  *
2114  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2115  * ROC/controller.  The FW register is read to determined the number of
2116  * commands that is supported.  All memory allocations for IO is based on
2117  * max_cmd.  Appropriate calculations are performed in this function.
2118  */
2119 int
2120 mrsas_init_adapter(struct mrsas_softc *sc)
2121 {
2122 	uint32_t status;
2123 	u_int32_t max_cmd;
2124 	int ret;
2125 	int i = 0;
2126 
2127 	/* Read FW status register */
2128 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2129 
2130 	/* Get operational params from status register */
2131 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2132 
2133 	/* Decrement the max supported by 1, to correlate with FW */
2134 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2135 	max_cmd = sc->max_fw_cmds;
2136 
2137 	/* Determine allocation size of command frames */
2138 	sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16);
2139 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2140 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2141 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2142 	sc->chain_frames_alloc_sz = 1024 * max_cmd;
2143 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2144 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2145 
2146 	sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
2147 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2148 
2149 	/* Used for pass thru MFI frame (DCMD) */
2150 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2151 
2152 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2153 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2154 
2155 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2156 
2157 	for (i = 0; i < count; i++)
2158 		sc->last_reply_idx[i] = 0;
2159 
2160 	ret = mrsas_alloc_mem(sc);
2161 	if (ret != SUCCESS)
2162 		return (ret);
2163 
2164 	ret = mrsas_alloc_mpt_cmds(sc);
2165 	if (ret != SUCCESS)
2166 		return (ret);
2167 
2168 	ret = mrsas_ioc_init(sc);
2169 	if (ret != SUCCESS)
2170 		return (ret);
2171 
2172 	return (0);
2173 }
2174 
2175 /*
2176  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2177  * input:				Adapter soft state
2178  *
2179  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2180  */
2181 int
2182 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2183 {
2184 	int ioc_init_size;
2185 
2186 	/* Allocate IOC INIT command */
2187 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2188 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2189 	    1, 0,
2190 	    BUS_SPACE_MAXADDR_32BIT,
2191 	    BUS_SPACE_MAXADDR,
2192 	    NULL, NULL,
2193 	    ioc_init_size,
2194 	    1,
2195 	    ioc_init_size,
2196 	    BUS_DMA_ALLOCNOW,
2197 	    NULL, NULL,
2198 	    &sc->ioc_init_tag)) {
2199 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2200 		return (ENOMEM);
2201 	}
2202 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2203 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2204 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2205 		return (ENOMEM);
2206 	}
2207 	bzero(sc->ioc_init_mem, ioc_init_size);
2208 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2209 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2210 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2211 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2212 		return (ENOMEM);
2213 	}
2214 	return (0);
2215 }
2216 
2217 /*
2218  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2219  * input:				Adapter soft state
2220  *
2221  * Deallocates memory of the IOC Init cmd.
2222  */
2223 void
2224 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2225 {
2226 	if (sc->ioc_init_phys_mem)
2227 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2228 	if (sc->ioc_init_mem != NULL)
2229 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2230 	if (sc->ioc_init_tag != NULL)
2231 		bus_dma_tag_destroy(sc->ioc_init_tag);
2232 }
2233 
2234 /*
2235  * mrsas_ioc_init:	Sends IOC Init command to FW
2236  * input:			Adapter soft state
2237  *
2238  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2239  */
2240 int
2241 mrsas_ioc_init(struct mrsas_softc *sc)
2242 {
2243 	struct mrsas_init_frame *init_frame;
2244 	pMpi2IOCInitRequest_t IOCInitMsg;
2245 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2246 	u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2247 	bus_addr_t phys_addr;
2248 	int i, retcode = 0;
2249 
2250 	/* Allocate memory for the IOC INIT command */
2251 	if (mrsas_alloc_ioc_cmd(sc)) {
2252 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2253 		return (1);
2254 	}
2255 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2256 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2257 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2258 	IOCInitMsg->MsgVersion = MPI2_VERSION;
2259 	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2260 	IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2261 	IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2262 	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2263 	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2264 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2265 
2266 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2267 	init_frame->cmd = MFI_CMD_INIT;
2268 	init_frame->cmd_status = 0xFF;
2269 	init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2270 
2271 	/* driver support Extended MSIX */
2272 	if ((sc->device_id == MRSAS_INVADER) ||
2273 	    (sc->device_id == MRSAS_FURY)) {
2274 		init_frame->driver_operations.
2275 		    mfi_capabilities.support_additional_msix = 1;
2276 	}
2277 	if (sc->verbuf_mem) {
2278 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2279 		    MRSAS_VERSION);
2280 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2281 		init_frame->driver_ver_hi = 0;
2282 	}
2283 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2284 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2285 	init_frame->queue_info_new_phys_addr_lo = phys_addr;
2286 	init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2287 
2288 	req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2289 	req_desc.MFAIo.RequestFlags =
2290 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2291 
2292 	mrsas_disable_intr(sc);
2293 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2294 	mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2295 
2296 	/*
2297 	 * Poll response timer to wait for Firmware response.  While this
2298 	 * timer with the DELAY call could block CPU, the time interval for
2299 	 * this is only 1 millisecond.
2300 	 */
2301 	if (init_frame->cmd_status == 0xFF) {
2302 		for (i = 0; i < (max_wait * 1000); i++) {
2303 			if (init_frame->cmd_status == 0xFF)
2304 				DELAY(1000);
2305 			else
2306 				break;
2307 		}
2308 	}
2309 	if (init_frame->cmd_status == 0)
2310 		mrsas_dprint(sc, MRSAS_OCR,
2311 		    "IOC INIT response received from FW.\n");
2312 	else {
2313 		if (init_frame->cmd_status == 0xFF)
2314 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2315 		else
2316 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2317 		retcode = 1;
2318 	}
2319 
2320 	mrsas_free_ioc_cmd(sc);
2321 	return (retcode);
2322 }
2323 
2324 /*
2325  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2326  * input:					Adapter instance soft state
2327  *
2328  * This function allocates the internal commands for IOs. Each command that is
2329  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2330  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2331  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2332  * max_fw_cmds.
2333  */
2334 int
2335 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2336 {
2337 	int i, j;
2338 	u_int32_t max_cmd, count;
2339 	struct mrsas_mpt_cmd *cmd;
2340 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2341 	u_int32_t offset, chain_offset, sense_offset;
2342 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2343 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2344 
2345 	max_cmd = sc->max_fw_cmds;
2346 
2347 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2348 	if (!sc->req_desc) {
2349 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2350 		return (ENOMEM);
2351 	}
2352 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2353 
2354 	/*
2355 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2356 	 * Allocate the dynamic array first and then allocate individual
2357 	 * commands.
2358 	 */
2359 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2360 	if (!sc->mpt_cmd_list) {
2361 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2362 		return (ENOMEM);
2363 	}
2364 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2365 	for (i = 0; i < max_cmd; i++) {
2366 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2367 		    M_MRSAS, M_NOWAIT);
2368 		if (!sc->mpt_cmd_list[i]) {
2369 			for (j = 0; j < i; j++)
2370 				free(sc->mpt_cmd_list[j], M_MRSAS);
2371 			free(sc->mpt_cmd_list, M_MRSAS);
2372 			sc->mpt_cmd_list = NULL;
2373 			return (ENOMEM);
2374 		}
2375 	}
2376 
2377 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2378 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2379 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2380 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2381 	sense_base = (u_int8_t *)sc->sense_mem;
2382 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2383 	for (i = 0; i < max_cmd; i++) {
2384 		cmd = sc->mpt_cmd_list[i];
2385 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2386 		chain_offset = 1024 * i;
2387 		sense_offset = MRSAS_SENSE_LEN * i;
2388 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2389 		cmd->index = i + 1;
2390 		cmd->ccb_ptr = NULL;
2391 		callout_init(&cmd->cm_callout, 0);
2392 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2393 		cmd->sc = sc;
2394 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2395 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2396 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2397 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2398 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2399 		cmd->sense = sense_base + sense_offset;
2400 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2401 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2402 			return (FAIL);
2403 		}
2404 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2405 	}
2406 
2407 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2408 	reply_desc = sc->reply_desc_mem;
2409 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2410 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2411 		reply_desc->Words = MRSAS_ULONG_MAX;
2412 	}
2413 	return (0);
2414 }
2415 
2416 /*
2417  * mrsas_fire_cmd:	Sends command to FW
2418  * input:			Adapter softstate
2419  * 					request descriptor address low
2420  * 					request descriptor address high
2421  *
2422  * This functions fires the command to Firmware by writing to the
2423  * inbound_low_queue_port and inbound_high_queue_port.
2424  */
2425 void
2426 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2427     u_int32_t req_desc_hi)
2428 {
2429 	mtx_lock(&sc->pci_lock);
2430 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2431 	    req_desc_lo);
2432 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2433 	    req_desc_hi);
2434 	mtx_unlock(&sc->pci_lock);
2435 }
2436 
2437 /*
2438  * mrsas_transition_to_ready:  Move FW to Ready state input:
2439  * Adapter instance soft state
2440  *
2441  * During the initialization, FW passes can potentially be in any one of several
2442  * possible states. If the FW in operational, waiting-for-handshake states,
2443  * driver must take steps to bring it to ready state. Otherwise, it has to
2444  * wait for the ready state.
2445  */
2446 int
2447 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2448 {
2449 	int i;
2450 	u_int8_t max_wait;
2451 	u_int32_t val, fw_state;
2452 	u_int32_t cur_state;
2453 	u_int32_t abs_state, curr_abs_state;
2454 
2455 	val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2456 	fw_state = val & MFI_STATE_MASK;
2457 	max_wait = MRSAS_RESET_WAIT_TIME;
2458 
2459 	if (fw_state != MFI_STATE_READY)
2460 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2461 
2462 	while (fw_state != MFI_STATE_READY) {
2463 		abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2464 		switch (fw_state) {
2465 		case MFI_STATE_FAULT:
2466 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2467 			if (ocr) {
2468 				cur_state = MFI_STATE_FAULT;
2469 				break;
2470 			} else
2471 				return -ENODEV;
2472 		case MFI_STATE_WAIT_HANDSHAKE:
2473 			/* Set the CLR bit in inbound doorbell */
2474 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2475 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2476 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2477 			break;
2478 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2479 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2480 			    MFI_INIT_HOTPLUG);
2481 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2482 			break;
2483 		case MFI_STATE_OPERATIONAL:
2484 			/*
2485 			 * Bring it to READY state; assuming max wait 10
2486 			 * secs
2487 			 */
2488 			mrsas_disable_intr(sc);
2489 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2490 			for (i = 0; i < max_wait * 1000; i++) {
2491 				if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2492 					DELAY(1000);
2493 				else
2494 					break;
2495 			}
2496 			cur_state = MFI_STATE_OPERATIONAL;
2497 			break;
2498 		case MFI_STATE_UNDEFINED:
2499 			/*
2500 			 * This state should not last for more than 2
2501 			 * seconds
2502 			 */
2503 			cur_state = MFI_STATE_UNDEFINED;
2504 			break;
2505 		case MFI_STATE_BB_INIT:
2506 			cur_state = MFI_STATE_BB_INIT;
2507 			break;
2508 		case MFI_STATE_FW_INIT:
2509 			cur_state = MFI_STATE_FW_INIT;
2510 			break;
2511 		case MFI_STATE_FW_INIT_2:
2512 			cur_state = MFI_STATE_FW_INIT_2;
2513 			break;
2514 		case MFI_STATE_DEVICE_SCAN:
2515 			cur_state = MFI_STATE_DEVICE_SCAN;
2516 			break;
2517 		case MFI_STATE_FLUSH_CACHE:
2518 			cur_state = MFI_STATE_FLUSH_CACHE;
2519 			break;
2520 		default:
2521 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2522 			return -ENODEV;
2523 		}
2524 
2525 		/*
2526 		 * The cur_state should not last for more than max_wait secs
2527 		 */
2528 		for (i = 0; i < (max_wait * 1000); i++) {
2529 			fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2530 			    outbound_scratch_pad)) & MFI_STATE_MASK);
2531 			curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2532 			    outbound_scratch_pad));
2533 			if (abs_state == curr_abs_state)
2534 				DELAY(1000);
2535 			else
2536 				break;
2537 		}
2538 
2539 		/*
2540 		 * Return error if fw_state hasn't changed after max_wait
2541 		 */
2542 		if (curr_abs_state == abs_state) {
2543 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2544 			    "in %d secs\n", fw_state, max_wait);
2545 			return -ENODEV;
2546 		}
2547 	}
2548 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2549 	return 0;
2550 }
2551 
2552 /*
2553  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
2554  * input:				Adapter soft state
2555  *
2556  * This function removes an MFI command from the command list.
2557  */
2558 struct mrsas_mfi_cmd *
2559 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2560 {
2561 	struct mrsas_mfi_cmd *cmd = NULL;
2562 
2563 	mtx_lock(&sc->mfi_cmd_pool_lock);
2564 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2565 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2566 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2567 	}
2568 	mtx_unlock(&sc->mfi_cmd_pool_lock);
2569 
2570 	return cmd;
2571 }
2572 
2573 /*
2574  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
2575  * input:				Adapter Context.
2576  *
2577  * This function will check FW status register and flag do_timeout_reset flag.
2578  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2579  * trigger reset.
2580  */
2581 static void
2582 mrsas_ocr_thread(void *arg)
2583 {
2584 	struct mrsas_softc *sc;
2585 	u_int32_t fw_status, fw_state;
2586 
2587 	sc = (struct mrsas_softc *)arg;
2588 
2589 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2590 
2591 	sc->ocr_thread_active = 1;
2592 	mtx_lock(&sc->sim_lock);
2593 	for (;;) {
2594 		/* Sleep for 1 second and check the queue status */
2595 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2596 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2597 		if (sc->remove_in_progress) {
2598 			mrsas_dprint(sc, MRSAS_OCR,
2599 			    "Exit due to shutdown from %s\n", __func__);
2600 			break;
2601 		}
2602 		fw_status = mrsas_read_reg(sc,
2603 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
2604 		fw_state = fw_status & MFI_STATE_MASK;
2605 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2606 			device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2607 			    sc->do_timedout_reset ? "IO Timeout" :
2608 			    "FW fault detected");
2609 			mtx_lock_spin(&sc->ioctl_lock);
2610 			sc->reset_in_progress = 1;
2611 			sc->reset_count++;
2612 			mtx_unlock_spin(&sc->ioctl_lock);
2613 			mrsas_xpt_freeze(sc);
2614 			mrsas_reset_ctrl(sc);
2615 			mrsas_xpt_release(sc);
2616 			sc->reset_in_progress = 0;
2617 			sc->do_timedout_reset = 0;
2618 		}
2619 	}
2620 	mtx_unlock(&sc->sim_lock);
2621 	sc->ocr_thread_active = 0;
2622 	mrsas_kproc_exit(0);
2623 }
2624 
2625 /*
2626  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
2627  * input:					Adapter Context.
2628  *
2629  * This function will clear reply descriptor so that post OCR driver and FW will
2630  * lost old history.
2631  */
2632 void
2633 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2634 {
2635 	int i, count;
2636 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2637 
2638 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2639 	for (i = 0; i < count; i++)
2640 		sc->last_reply_idx[i] = 0;
2641 
2642 	reply_desc = sc->reply_desc_mem;
2643 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2644 		reply_desc->Words = MRSAS_ULONG_MAX;
2645 	}
2646 }
2647 
2648 /*
2649  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
2650  * input:				Adapter Context.
2651  *
2652  * This function will run from thread context so that it can sleep. 1. Do not
2653  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2654  * to complete for 180 seconds. 3. If #2 does not find any outstanding
2655  * command Controller is in working state, so skip OCR. Otherwise, do
2656  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2657  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2658  * OCR, Re-fire Managment command and move Controller to Operation state.
2659  */
2660 int
2661 mrsas_reset_ctrl(struct mrsas_softc *sc)
2662 {
2663 	int retval = SUCCESS, i, j, retry = 0;
2664 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2665 	union ccb *ccb;
2666 	struct mrsas_mfi_cmd *mfi_cmd;
2667 	struct mrsas_mpt_cmd *mpt_cmd;
2668 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2669 
2670 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2671 		device_printf(sc->mrsas_dev,
2672 		    "mrsas: Hardware critical error, returning FAIL.\n");
2673 		return FAIL;
2674 	}
2675 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2676 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2677 	mrsas_disable_intr(sc);
2678 	DELAY(1000 * 1000);
2679 
2680 	/* First try waiting for commands to complete */
2681 	if (mrsas_wait_for_outstanding(sc)) {
2682 		mrsas_dprint(sc, MRSAS_OCR,
2683 		    "resetting adapter from %s.\n",
2684 		    __func__);
2685 		/* Now return commands back to the CAM layer */
2686 		for (i = 0; i < sc->max_fw_cmds; i++) {
2687 			mpt_cmd = sc->mpt_cmd_list[i];
2688 			if (mpt_cmd->ccb_ptr) {
2689 				ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2690 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2691 				mrsas_cmd_done(sc, mpt_cmd);
2692 				mrsas_atomic_dec(&sc->fw_outstanding);
2693 			}
2694 		}
2695 
2696 		status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2697 		    outbound_scratch_pad));
2698 		abs_state = status_reg & MFI_STATE_MASK;
2699 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
2700 		if (sc->disableOnlineCtrlReset ||
2701 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2702 			/* Reset not supported, kill adapter */
2703 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2704 			mrsas_kill_hba(sc);
2705 			sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2706 			retval = FAIL;
2707 			goto out;
2708 		}
2709 		/* Now try to reset the chip */
2710 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2711 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2712 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
2713 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2714 			    MPI2_WRSEQ_1ST_KEY_VALUE);
2715 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2716 			    MPI2_WRSEQ_2ND_KEY_VALUE);
2717 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2718 			    MPI2_WRSEQ_3RD_KEY_VALUE);
2719 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2720 			    MPI2_WRSEQ_4TH_KEY_VALUE);
2721 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2722 			    MPI2_WRSEQ_5TH_KEY_VALUE);
2723 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2724 			    MPI2_WRSEQ_6TH_KEY_VALUE);
2725 
2726 			/* Check that the diag write enable (DRWE) bit is on */
2727 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2728 			    fusion_host_diag));
2729 			retry = 0;
2730 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2731 				DELAY(100 * 1000);
2732 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2733 				    fusion_host_diag));
2734 				if (retry++ == 100) {
2735 					mrsas_dprint(sc, MRSAS_OCR,
2736 					    "Host diag unlock failed!\n");
2737 					break;
2738 				}
2739 			}
2740 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2741 				continue;
2742 
2743 			/* Send chip reset command */
2744 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2745 			    host_diag | HOST_DIAG_RESET_ADAPTER);
2746 			DELAY(3000 * 1000);
2747 
2748 			/* Make sure reset adapter bit is cleared */
2749 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2750 			    fusion_host_diag));
2751 			retry = 0;
2752 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2753 				DELAY(100 * 1000);
2754 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2755 				    fusion_host_diag));
2756 				if (retry++ == 1000) {
2757 					mrsas_dprint(sc, MRSAS_OCR,
2758 					    "Diag reset adapter never cleared!\n");
2759 					break;
2760 				}
2761 			}
2762 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
2763 				continue;
2764 
2765 			abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2766 			    outbound_scratch_pad)) & MFI_STATE_MASK;
2767 			retry = 0;
2768 
2769 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2770 				DELAY(100 * 1000);
2771 				abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2772 				    outbound_scratch_pad)) & MFI_STATE_MASK;
2773 			}
2774 			if (abs_state <= MFI_STATE_FW_INIT) {
2775 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2776 				    " state = 0x%x\n", abs_state);
2777 				continue;
2778 			}
2779 			/* Wait for FW to become ready */
2780 			if (mrsas_transition_to_ready(sc, 1)) {
2781 				mrsas_dprint(sc, MRSAS_OCR,
2782 				    "mrsas: Failed to transition controller to ready.\n");
2783 				continue;
2784 			}
2785 			mrsas_reset_reply_desc(sc);
2786 			if (mrsas_ioc_init(sc)) {
2787 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2788 				continue;
2789 			}
2790 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2791 			mrsas_enable_intr(sc);
2792 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2793 
2794 			/* Re-fire management commands */
2795 			for (j = 0; j < sc->max_fw_cmds; j++) {
2796 				mpt_cmd = sc->mpt_cmd_list[j];
2797 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2798 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2799 					if (mfi_cmd->frame->dcmd.opcode ==
2800 					    MR_DCMD_LD_MAP_GET_INFO) {
2801 						mrsas_release_mfi_cmd(mfi_cmd);
2802 						mrsas_release_mpt_cmd(mpt_cmd);
2803 					} else {
2804 						req_desc = mrsas_get_request_desc(sc,
2805 						    mfi_cmd->cmd_id.context.smid - 1);
2806 						mrsas_dprint(sc, MRSAS_OCR,
2807 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
2808 						    mfi_cmd->frame->dcmd.opcode, j);
2809 						if (!req_desc)
2810 							device_printf(sc->mrsas_dev,
2811 							    "Cannot build MPT cmd.\n");
2812 						else
2813 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
2814 							    req_desc->addr.u.high);
2815 					}
2816 				}
2817 			}
2818 
2819 			/* Reset load balance info */
2820 			memset(sc->load_balance_info, 0,
2821 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
2822 
2823 			if (!mrsas_get_map_info(sc))
2824 				mrsas_sync_map_info(sc);
2825 
2826 			/* Adapter reset completed successfully */
2827 			device_printf(sc->mrsas_dev, "Reset successful\n");
2828 			retval = SUCCESS;
2829 			goto out;
2830 		}
2831 		/* Reset failed, kill the adapter */
2832 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2833 		mrsas_kill_hba(sc);
2834 		retval = FAIL;
2835 	} else {
2836 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2837 		mrsas_enable_intr(sc);
2838 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2839 	}
2840 out:
2841 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2842 	mrsas_dprint(sc, MRSAS_OCR,
2843 	    "Reset Exit with %d.\n", retval);
2844 	return retval;
2845 }
2846 
2847 /*
2848  * mrsas_kill_hba:	Kill HBA when OCR is not supported
2849  * input:			Adapter Context.
2850  *
2851  * This function will kill HBA when OCR is not supported.
2852  */
2853 void
2854 mrsas_kill_hba(struct mrsas_softc *sc)
2855 {
2856 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2857 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2858 	    MFI_STOP_ADP);
2859 	/* Flush */
2860 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2861 }
2862 
2863 /*
2864  * mrsas_wait_for_outstanding:	Wait for outstanding commands
2865  * input:						Adapter Context.
2866  *
2867  * This function will wait for 180 seconds for outstanding commands to be
2868  * completed.
2869  */
2870 int
2871 mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2872 {
2873 	int i, outstanding, retval = 0;
2874 	u_int32_t fw_state, count, MSIxIndex;
2875 
2876 
2877 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2878 		if (sc->remove_in_progress) {
2879 			mrsas_dprint(sc, MRSAS_OCR,
2880 			    "Driver remove or shutdown called.\n");
2881 			retval = 1;
2882 			goto out;
2883 		}
2884 		/* Check if firmware is in fault state */
2885 		fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2886 		    outbound_scratch_pad)) & MFI_STATE_MASK;
2887 		if (fw_state == MFI_STATE_FAULT) {
2888 			mrsas_dprint(sc, MRSAS_OCR,
2889 			    "Found FW in FAULT state, will reset adapter.\n");
2890 			retval = 1;
2891 			goto out;
2892 		}
2893 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
2894 		if (!outstanding)
2895 			goto out;
2896 
2897 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2898 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2899 			    "commands to complete\n", i, outstanding);
2900 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2901 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2902 				mrsas_complete_cmd(sc, MSIxIndex);
2903 		}
2904 		DELAY(1000 * 1000);
2905 	}
2906 
2907 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
2908 		mrsas_dprint(sc, MRSAS_OCR,
2909 		    " pending commands remain after waiting,"
2910 		    " will reset adapter.\n");
2911 		retval = 1;
2912 	}
2913 out:
2914 	return retval;
2915 }
2916 
2917 /*
2918  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
2919  * input:					Command packet for return to free cmd pool
2920  *
2921  * This function returns the MFI command to the command list.
2922  */
2923 void
2924 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2925 {
2926 	struct mrsas_softc *sc = cmd->sc;
2927 
2928 	mtx_lock(&sc->mfi_cmd_pool_lock);
2929 	cmd->ccb_ptr = NULL;
2930 	cmd->cmd_id.frame_count = 0;
2931 	TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2932 	mtx_unlock(&sc->mfi_cmd_pool_lock);
2933 
2934 	return;
2935 }
2936 
2937 /*
2938  * mrsas_get_controller_info:	Returns FW's controller structure
2939  * input:						Adapter soft state
2940  * 								Controller information structure
2941  *
2942  * Issues an internal command (DCMD) to get the FW's controller structure. This
2943  * information is mainly used to find out the maximum IO transfer per command
2944  * supported by the FW.
2945  */
2946 static int
2947 mrsas_get_ctrl_info(struct mrsas_softc *sc,
2948     struct mrsas_ctrl_info *ctrl_info)
2949 {
2950 	int retcode = 0;
2951 	struct mrsas_mfi_cmd *cmd;
2952 	struct mrsas_dcmd_frame *dcmd;
2953 
2954 	cmd = mrsas_get_mfi_cmd(sc);
2955 
2956 	if (!cmd) {
2957 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2958 		return -ENOMEM;
2959 	}
2960 	dcmd = &cmd->frame->dcmd;
2961 
2962 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2963 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2964 		mrsas_release_mfi_cmd(cmd);
2965 		return -ENOMEM;
2966 	}
2967 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2968 
2969 	dcmd->cmd = MFI_CMD_DCMD;
2970 	dcmd->cmd_status = 0xFF;
2971 	dcmd->sge_count = 1;
2972 	dcmd->flags = MFI_FRAME_DIR_READ;
2973 	dcmd->timeout = 0;
2974 	dcmd->pad_0 = 0;
2975 	dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2976 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2977 	dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2978 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2979 
2980 	if (!mrsas_issue_polled(sc, cmd))
2981 		memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2982 	else
2983 		retcode = 1;
2984 
2985 	mrsas_free_ctlr_info_cmd(sc);
2986 	mrsas_release_mfi_cmd(cmd);
2987 	return (retcode);
2988 }
2989 
2990 /*
2991  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
2992  * input:						Adapter soft state
2993  *
2994  * Allocates DMAable memory for the controller info internal command.
2995  */
2996 int
2997 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2998 {
2999 	int ctlr_info_size;
3000 
3001 	/* Allocate get controller info command */
3002 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3003 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3004 	    1, 0,
3005 	    BUS_SPACE_MAXADDR_32BIT,
3006 	    BUS_SPACE_MAXADDR,
3007 	    NULL, NULL,
3008 	    ctlr_info_size,
3009 	    1,
3010 	    ctlr_info_size,
3011 	    BUS_DMA_ALLOCNOW,
3012 	    NULL, NULL,
3013 	    &sc->ctlr_info_tag)) {
3014 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3015 		return (ENOMEM);
3016 	}
3017 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3018 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3019 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3020 		return (ENOMEM);
3021 	}
3022 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3023 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3024 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3025 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3026 		return (ENOMEM);
3027 	}
3028 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3029 	return (0);
3030 }
3031 
3032 /*
3033  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3034  * input:						Adapter soft state
3035  *
3036  * Deallocates memory of the get controller info cmd.
3037  */
3038 void
3039 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3040 {
3041 	if (sc->ctlr_info_phys_addr)
3042 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3043 	if (sc->ctlr_info_mem != NULL)
3044 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3045 	if (sc->ctlr_info_tag != NULL)
3046 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3047 }
3048 
3049 /*
3050  * mrsas_issue_polled:	Issues a polling command
3051  * inputs:				Adapter soft state
3052  * 						Command packet to be issued
3053  *
3054  * This function is for posting of internal commands to Firmware.  MFI requires
3055  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3056  * the poll response timer is 180 seconds.
3057  */
3058 int
3059 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3060 {
3061 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3062 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3063 	int i, retcode = 0;
3064 
3065 	frame_hdr->cmd_status = 0xFF;
3066 	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3067 
3068 	/* Issue the frame using inbound queue port */
3069 	if (mrsas_issue_dcmd(sc, cmd)) {
3070 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3071 		return (1);
3072 	}
3073 	/*
3074 	 * Poll response timer to wait for Firmware response.  While this
3075 	 * timer with the DELAY call could block CPU, the time interval for
3076 	 * this is only 1 millisecond.
3077 	 */
3078 	if (frame_hdr->cmd_status == 0xFF) {
3079 		for (i = 0; i < (max_wait * 1000); i++) {
3080 			if (frame_hdr->cmd_status == 0xFF)
3081 				DELAY(1000);
3082 			else
3083 				break;
3084 		}
3085 	}
3086 	if (frame_hdr->cmd_status != 0) {
3087 		if (frame_hdr->cmd_status == 0xFF)
3088 			device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
3089 		else
3090 			device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
3091 		retcode = 1;
3092 	}
3093 	return (retcode);
3094 }
3095 
3096 /*
3097  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3098  * input:				Adapter soft state mfi cmd pointer
3099  *
3100  * This function is called by mrsas_issued_blocked_cmd() and
3101  * mrsas_issued_polled(), to build the MPT command and then fire the command
3102  * to Firmware.
3103  */
3104 int
3105 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3106 {
3107 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3108 
3109 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3110 	if (!req_desc) {
3111 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3112 		return (1);
3113 	}
3114 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3115 
3116 	return (0);
3117 }
3118 
3119 /*
3120  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3121  * input:				Adapter soft state mfi cmd to build
3122  *
3123  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3124  * command and prepares the MPT command to send to Firmware.
3125  */
3126 MRSAS_REQUEST_DESCRIPTOR_UNION *
3127 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3128 {
3129 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3130 	u_int16_t index;
3131 
3132 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3133 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3134 		return NULL;
3135 	}
3136 	index = cmd->cmd_id.context.smid;
3137 
3138 	req_desc = mrsas_get_request_desc(sc, index - 1);
3139 	if (!req_desc)
3140 		return NULL;
3141 
3142 	req_desc->addr.Words = 0;
3143 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3144 
3145 	req_desc->SCSIIO.SMID = index;
3146 
3147 	return (req_desc);
3148 }
3149 
3150 /*
3151  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3152  * input:						Adapter soft state mfi cmd pointer
3153  *
3154  * The MPT command and the io_request are setup as a passthru command. The SGE
3155  * chain address is set to frame_phys_addr of the MFI command.
3156  */
3157 u_int8_t
3158 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3159 {
3160 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3161 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3162 	struct mrsas_mpt_cmd *mpt_cmd;
3163 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3164 
3165 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3166 	if (!mpt_cmd)
3167 		return (1);
3168 
3169 	/* Save the smid. To be used for returning the cmd */
3170 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3171 
3172 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3173 
3174 	/*
3175 	 * For cmds where the flag is set, store the flag and check on
3176 	 * completion. For cmds with this flag, don't call
3177 	 * mrsas_complete_cmd.
3178 	 */
3179 
3180 	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3181 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3182 
3183 	io_req = mpt_cmd->io_request;
3184 
3185 	if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
3186 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3187 
3188 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3189 		sgl_ptr_end->Flags = 0;
3190 	}
3191 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3192 
3193 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3194 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3195 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3196 
3197 	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3198 
3199 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3200 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3201 
3202 	mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
3203 
3204 	return (0);
3205 }
3206 
3207 /*
3208  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3209  * input:					Adapter soft state Command to be issued
3210  *
3211  * This function waits on an event for the command to be returned from the ISR.
3212  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3213  * internal and ioctl commands.
3214  */
3215 int
3216 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3217 {
3218 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3219 	unsigned long total_time = 0;
3220 	int retcode = 0;
3221 
3222 	/* Initialize cmd_status */
3223 	cmd->cmd_status = ECONNREFUSED;
3224 
3225 	/* Build MPT-MFI command for issue to FW */
3226 	if (mrsas_issue_dcmd(sc, cmd)) {
3227 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3228 		return (1);
3229 	}
3230 	sc->chan = (void *)&cmd;
3231 
3232 	while (1) {
3233 		if (cmd->cmd_status == ECONNREFUSED) {
3234 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3235 		} else
3236 			break;
3237 		total_time++;
3238 		if (total_time >= max_wait) {
3239 			device_printf(sc->mrsas_dev,
3240 			    "Internal command timed out after %d seconds.\n", max_wait);
3241 			retcode = 1;
3242 			break;
3243 		}
3244 	}
3245 	return (retcode);
3246 }
3247 
3248 /*
3249  * mrsas_complete_mptmfi_passthru:	Completes a command
3250  * input:	@sc:					Adapter soft state
3251  * 			@cmd:					Command to be completed
3252  * 			@status:				cmd completion status
3253  *
3254  * This function is called from mrsas_complete_cmd() after an interrupt is
3255  * received from Firmware, and io_request->Function is
3256  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3257  */
3258 void
3259 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3260     u_int8_t status)
3261 {
3262 	struct mrsas_header *hdr = &cmd->frame->hdr;
3263 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3264 
3265 	/* Reset the retry counter for future re-tries */
3266 	cmd->retry_for_fw_reset = 0;
3267 
3268 	if (cmd->ccb_ptr)
3269 		cmd->ccb_ptr = NULL;
3270 
3271 	switch (hdr->cmd) {
3272 	case MFI_CMD_INVALID:
3273 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3274 		break;
3275 	case MFI_CMD_PD_SCSI_IO:
3276 	case MFI_CMD_LD_SCSI_IO:
3277 		/*
3278 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3279 		 * issued either through an IO path or an IOCTL path. If it
3280 		 * was via IOCTL, we will send it to internal completion.
3281 		 */
3282 		if (cmd->sync_cmd) {
3283 			cmd->sync_cmd = 0;
3284 			mrsas_wakeup(sc, cmd);
3285 			break;
3286 		}
3287 	case MFI_CMD_SMP:
3288 	case MFI_CMD_STP:
3289 	case MFI_CMD_DCMD:
3290 		/* Check for LD map update */
3291 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3292 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
3293 			sc->fast_path_io = 0;
3294 			mtx_lock(&sc->raidmap_lock);
3295 			if (cmd_status != 0) {
3296 				if (cmd_status != MFI_STAT_NOT_FOUND)
3297 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3298 				else {
3299 					mrsas_release_mfi_cmd(cmd);
3300 					mtx_unlock(&sc->raidmap_lock);
3301 					break;
3302 				}
3303 			} else
3304 				sc->map_id++;
3305 			mrsas_release_mfi_cmd(cmd);
3306 			if (MR_ValidateMapInfo(sc))
3307 				sc->fast_path_io = 0;
3308 			else
3309 				sc->fast_path_io = 1;
3310 			mrsas_sync_map_info(sc);
3311 			mtx_unlock(&sc->raidmap_lock);
3312 			break;
3313 		}
3314 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3315 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3316 			sc->mrsas_aen_triggered = 0;
3317 		}
3318 		/* See if got an event notification */
3319 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3320 			mrsas_complete_aen(sc, cmd);
3321 		else
3322 			mrsas_wakeup(sc, cmd);
3323 		break;
3324 	case MFI_CMD_ABORT:
3325 		/* Command issued to abort another cmd return */
3326 		mrsas_complete_abort(sc, cmd);
3327 		break;
3328 	default:
3329 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3330 		break;
3331 	}
3332 }
3333 
3334 /*
3335  * mrsas_wakeup:	Completes an internal command
3336  * input:			Adapter soft state
3337  * 					Command to be completed
3338  *
3339  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3340  * timer is started.  This function is called from
3341  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3342  * from the command wait.
3343  */
3344 void
3345 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3346 {
3347 	cmd->cmd_status = cmd->frame->io.cmd_status;
3348 
3349 	if (cmd->cmd_status == ECONNREFUSED)
3350 		cmd->cmd_status = 0;
3351 
3352 	sc->chan = (void *)&cmd;
3353 	wakeup_one((void *)&sc->chan);
3354 	return;
3355 }
3356 
3357 /*
3358  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
3359  * Adapter soft state Shutdown/Hibernate
3360  *
3361  * This function issues a DCMD internal command to Firmware to initiate shutdown
3362  * of the controller.
3363  */
3364 static void
3365 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3366 {
3367 	struct mrsas_mfi_cmd *cmd;
3368 	struct mrsas_dcmd_frame *dcmd;
3369 
3370 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3371 		return;
3372 
3373 	cmd = mrsas_get_mfi_cmd(sc);
3374 	if (!cmd) {
3375 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3376 		return;
3377 	}
3378 	if (sc->aen_cmd)
3379 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3380 
3381 	if (sc->map_update_cmd)
3382 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3383 
3384 	dcmd = &cmd->frame->dcmd;
3385 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3386 
3387 	dcmd->cmd = MFI_CMD_DCMD;
3388 	dcmd->cmd_status = 0x0;
3389 	dcmd->sge_count = 0;
3390 	dcmd->flags = MFI_FRAME_DIR_NONE;
3391 	dcmd->timeout = 0;
3392 	dcmd->pad_0 = 0;
3393 	dcmd->data_xfer_len = 0;
3394 	dcmd->opcode = opcode;
3395 
3396 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3397 
3398 	mrsas_issue_blocked_cmd(sc, cmd);
3399 	mrsas_release_mfi_cmd(cmd);
3400 
3401 	return;
3402 }
3403 
3404 /*
3405  * mrsas_flush_cache:         Requests FW to flush all its caches input:
3406  * Adapter soft state
3407  *
3408  * This function is issues a DCMD internal command to Firmware to initiate
3409  * flushing of all caches.
3410  */
3411 static void
3412 mrsas_flush_cache(struct mrsas_softc *sc)
3413 {
3414 	struct mrsas_mfi_cmd *cmd;
3415 	struct mrsas_dcmd_frame *dcmd;
3416 
3417 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3418 		return;
3419 
3420 	cmd = mrsas_get_mfi_cmd(sc);
3421 	if (!cmd) {
3422 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3423 		return;
3424 	}
3425 	dcmd = &cmd->frame->dcmd;
3426 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3427 
3428 	dcmd->cmd = MFI_CMD_DCMD;
3429 	dcmd->cmd_status = 0x0;
3430 	dcmd->sge_count = 0;
3431 	dcmd->flags = MFI_FRAME_DIR_NONE;
3432 	dcmd->timeout = 0;
3433 	dcmd->pad_0 = 0;
3434 	dcmd->data_xfer_len = 0;
3435 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3436 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3437 
3438 	mrsas_issue_blocked_cmd(sc, cmd);
3439 	mrsas_release_mfi_cmd(cmd);
3440 
3441 	return;
3442 }
3443 
3444 /*
3445  * mrsas_get_map_info:        Load and validate RAID map input:
3446  * Adapter instance soft state
3447  *
3448  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3449  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
3450  */
3451 static int
3452 mrsas_get_map_info(struct mrsas_softc *sc)
3453 {
3454 	uint8_t retcode = 0;
3455 
3456 	sc->fast_path_io = 0;
3457 	if (!mrsas_get_ld_map_info(sc)) {
3458 		retcode = MR_ValidateMapInfo(sc);
3459 		if (retcode == 0) {
3460 			sc->fast_path_io = 1;
3461 			return 0;
3462 		}
3463 	}
3464 	return 1;
3465 }
3466 
3467 /*
3468  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
3469  * Adapter instance soft state
3470  *
3471  * Issues an internal command (DCMD) to get the FW's controller PD list
3472  * structure.
3473  */
3474 static int
3475 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3476 {
3477 	int retcode = 0;
3478 	struct mrsas_mfi_cmd *cmd;
3479 	struct mrsas_dcmd_frame *dcmd;
3480 	void *map;
3481 	bus_addr_t map_phys_addr = 0;
3482 
3483 	cmd = mrsas_get_mfi_cmd(sc);
3484 	if (!cmd) {
3485 		device_printf(sc->mrsas_dev,
3486 		    "Cannot alloc for ld map info cmd.\n");
3487 		return 1;
3488 	}
3489 	dcmd = &cmd->frame->dcmd;
3490 
3491 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3492 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3493 	if (!map) {
3494 		device_printf(sc->mrsas_dev,
3495 		    "Failed to alloc mem for ld map info.\n");
3496 		mrsas_release_mfi_cmd(cmd);
3497 		return (ENOMEM);
3498 	}
3499 	memset(map, 0, sizeof(sc->max_map_sz));
3500 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3501 
3502 	dcmd->cmd = MFI_CMD_DCMD;
3503 	dcmd->cmd_status = 0xFF;
3504 	dcmd->sge_count = 1;
3505 	dcmd->flags = MFI_FRAME_DIR_READ;
3506 	dcmd->timeout = 0;
3507 	dcmd->pad_0 = 0;
3508 	dcmd->data_xfer_len = sc->current_map_sz;
3509 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3510 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3511 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
3512 
3513 	if (!mrsas_issue_polled(sc, cmd))
3514 		retcode = 0;
3515 	else {
3516 		device_printf(sc->mrsas_dev,
3517 		    "Fail to send get LD map info cmd.\n");
3518 		retcode = 1;
3519 	}
3520 	mrsas_release_mfi_cmd(cmd);
3521 
3522 	return (retcode);
3523 }
3524 
3525 /*
3526  * mrsas_sync_map_info:        Get FW's ld_map structure input:
3527  * Adapter instance soft state
3528  *
3529  * Issues an internal command (DCMD) to get the FW's controller PD list
3530  * structure.
3531  */
3532 static int
3533 mrsas_sync_map_info(struct mrsas_softc *sc)
3534 {
3535 	int retcode = 0, i;
3536 	struct mrsas_mfi_cmd *cmd;
3537 	struct mrsas_dcmd_frame *dcmd;
3538 	uint32_t size_sync_info, num_lds;
3539 	MR_LD_TARGET_SYNC *target_map = NULL;
3540 	MR_DRV_RAID_MAP_ALL *map;
3541 	MR_LD_RAID *raid;
3542 	MR_LD_TARGET_SYNC *ld_sync;
3543 	bus_addr_t map_phys_addr = 0;
3544 
3545 	cmd = mrsas_get_mfi_cmd(sc);
3546 	if (!cmd) {
3547 		device_printf(sc->mrsas_dev,
3548 		    "Cannot alloc for sync map info cmd\n");
3549 		return 1;
3550 	}
3551 	map = sc->ld_drv_map[sc->map_id & 1];
3552 	num_lds = map->raidMap.ldCount;
3553 
3554 	dcmd = &cmd->frame->dcmd;
3555 	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3556 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3557 
3558 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3559 	memset(target_map, 0, sc->max_map_sz);
3560 
3561 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3562 
3563 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3564 
3565 	for (i = 0; i < num_lds; i++, ld_sync++) {
3566 		raid = MR_LdRaidGet(i, map);
3567 		ld_sync->targetId = MR_GetLDTgtId(i, map);
3568 		ld_sync->seqNum = raid->seqNum;
3569 	}
3570 
3571 	dcmd->cmd = MFI_CMD_DCMD;
3572 	dcmd->cmd_status = 0xFF;
3573 	dcmd->sge_count = 1;
3574 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3575 	dcmd->timeout = 0;
3576 	dcmd->pad_0 = 0;
3577 	dcmd->data_xfer_len = sc->current_map_sz;
3578 	dcmd->mbox.b[0] = num_lds;
3579 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3580 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3581 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3582 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
3583 
3584 	sc->map_update_cmd = cmd;
3585 	if (mrsas_issue_dcmd(sc, cmd)) {
3586 		device_printf(sc->mrsas_dev,
3587 		    "Fail to send sync map info command.\n");
3588 		return (1);
3589 	}
3590 	return (retcode);
3591 }
3592 
3593 /*
3594  * mrsas_get_pd_list:           Returns FW's PD list structure input:
3595  * Adapter soft state
3596  *
3597  * Issues an internal command (DCMD) to get the FW's controller PD list
3598  * structure.  This information is mainly used to find out about system
3599  * supported by Firmware.
3600  */
3601 static int
3602 mrsas_get_pd_list(struct mrsas_softc *sc)
3603 {
3604 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
3605 	struct mrsas_mfi_cmd *cmd;
3606 	struct mrsas_dcmd_frame *dcmd;
3607 	struct MR_PD_LIST *pd_list_mem;
3608 	struct MR_PD_ADDRESS *pd_addr;
3609 	bus_addr_t pd_list_phys_addr = 0;
3610 	struct mrsas_tmp_dcmd *tcmd;
3611 
3612 	cmd = mrsas_get_mfi_cmd(sc);
3613 	if (!cmd) {
3614 		device_printf(sc->mrsas_dev,
3615 		    "Cannot alloc for get PD list cmd\n");
3616 		return 1;
3617 	}
3618 	dcmd = &cmd->frame->dcmd;
3619 
3620 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3621 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3622 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3623 		device_printf(sc->mrsas_dev,
3624 		    "Cannot alloc dmamap for get PD list cmd\n");
3625 		mrsas_release_mfi_cmd(cmd);
3626 		return (ENOMEM);
3627 	} else {
3628 		pd_list_mem = tcmd->tmp_dcmd_mem;
3629 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3630 	}
3631 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3632 
3633 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3634 	dcmd->mbox.b[1] = 0;
3635 	dcmd->cmd = MFI_CMD_DCMD;
3636 	dcmd->cmd_status = 0xFF;
3637 	dcmd->sge_count = 1;
3638 	dcmd->flags = MFI_FRAME_DIR_READ;
3639 	dcmd->timeout = 0;
3640 	dcmd->pad_0 = 0;
3641 	dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3642 	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3643 	dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3644 	dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3645 
3646 	if (!mrsas_issue_polled(sc, cmd))
3647 		retcode = 0;
3648 	else
3649 		retcode = 1;
3650 
3651 	/* Get the instance PD list */
3652 	pd_count = MRSAS_MAX_PD;
3653 	pd_addr = pd_list_mem->addr;
3654 	if (retcode == 0 && pd_list_mem->count < pd_count) {
3655 		memset(sc->local_pd_list, 0,
3656 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3657 		for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3658 			sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3659 			sc->local_pd_list[pd_addr->deviceId].driveType =
3660 			    pd_addr->scsiDevType;
3661 			sc->local_pd_list[pd_addr->deviceId].driveState =
3662 			    MR_PD_STATE_SYSTEM;
3663 			pd_addr++;
3664 		}
3665 	}
3666 	/*
3667 	 * Use mutext/spinlock if pd_list component size increase more than
3668 	 * 32 bit.
3669 	 */
3670 	memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3671 	mrsas_free_tmp_dcmd(tcmd);
3672 	mrsas_release_mfi_cmd(cmd);
3673 	free(tcmd, M_MRSAS);
3674 	return (retcode);
3675 }
3676 
3677 /*
3678  * mrsas_get_ld_list:           Returns FW's LD list structure input:
3679  * Adapter soft state
3680  *
3681  * Issues an internal command (DCMD) to get the FW's controller PD list
3682  * structure.  This information is mainly used to find out about supported by
3683  * the FW.
3684  */
3685 static int
3686 mrsas_get_ld_list(struct mrsas_softc *sc)
3687 {
3688 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3689 	struct mrsas_mfi_cmd *cmd;
3690 	struct mrsas_dcmd_frame *dcmd;
3691 	struct MR_LD_LIST *ld_list_mem;
3692 	bus_addr_t ld_list_phys_addr = 0;
3693 	struct mrsas_tmp_dcmd *tcmd;
3694 
3695 	cmd = mrsas_get_mfi_cmd(sc);
3696 	if (!cmd) {
3697 		device_printf(sc->mrsas_dev,
3698 		    "Cannot alloc for get LD list cmd\n");
3699 		return 1;
3700 	}
3701 	dcmd = &cmd->frame->dcmd;
3702 
3703 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3704 	ld_list_size = sizeof(struct MR_LD_LIST);
3705 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3706 		device_printf(sc->mrsas_dev,
3707 		    "Cannot alloc dmamap for get LD list cmd\n");
3708 		mrsas_release_mfi_cmd(cmd);
3709 		return (ENOMEM);
3710 	} else {
3711 		ld_list_mem = tcmd->tmp_dcmd_mem;
3712 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3713 	}
3714 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3715 
3716 	if (sc->max256vdSupport)
3717 		dcmd->mbox.b[0] = 1;
3718 
3719 	dcmd->cmd = MFI_CMD_DCMD;
3720 	dcmd->cmd_status = 0xFF;
3721 	dcmd->sge_count = 1;
3722 	dcmd->flags = MFI_FRAME_DIR_READ;
3723 	dcmd->timeout = 0;
3724 	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3725 	dcmd->opcode = MR_DCMD_LD_GET_LIST;
3726 	dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3727 	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3728 	dcmd->pad_0 = 0;
3729 
3730 	if (!mrsas_issue_polled(sc, cmd))
3731 		retcode = 0;
3732 	else
3733 		retcode = 1;
3734 
3735 #if VD_EXT_DEBUG
3736 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
3737 #endif
3738 
3739 	/* Get the instance LD list */
3740 	if ((retcode == 0) &&
3741 	    (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) {
3742 		sc->CurLdCount = ld_list_mem->ldCount;
3743 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
3744 		for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3745 			if (ld_list_mem->ldList[ld_index].state != 0) {
3746 				ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3747 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3748 			}
3749 		}
3750 	}
3751 	mrsas_free_tmp_dcmd(tcmd);
3752 	mrsas_release_mfi_cmd(cmd);
3753 	free(tcmd, M_MRSAS);
3754 	return (retcode);
3755 }
3756 
3757 /*
3758  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
3759  * Adapter soft state Temp command Size of alloction
3760  *
3761  * Allocates DMAable memory for a temporary internal command. The allocated
3762  * memory is initialized to all zeros upon successful loading of the dma
3763  * mapped memory.
3764  */
3765 int
3766 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
3767     struct mrsas_tmp_dcmd *tcmd, int size)
3768 {
3769 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3770 	    1, 0,
3771 	    BUS_SPACE_MAXADDR_32BIT,
3772 	    BUS_SPACE_MAXADDR,
3773 	    NULL, NULL,
3774 	    size,
3775 	    1,
3776 	    size,
3777 	    BUS_DMA_ALLOCNOW,
3778 	    NULL, NULL,
3779 	    &tcmd->tmp_dcmd_tag)) {
3780 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3781 		return (ENOMEM);
3782 	}
3783 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3784 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3785 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3786 		return (ENOMEM);
3787 	}
3788 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3789 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3790 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3791 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3792 		return (ENOMEM);
3793 	}
3794 	memset(tcmd->tmp_dcmd_mem, 0, size);
3795 	return (0);
3796 }
3797 
3798 /*
3799  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
3800  * temporary dcmd pointer
3801  *
3802  * Deallocates memory of the temporary command for use in the construction of
3803  * the internal DCMD.
3804  */
3805 void
3806 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3807 {
3808 	if (tmp->tmp_dcmd_phys_addr)
3809 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3810 	if (tmp->tmp_dcmd_mem != NULL)
3811 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3812 	if (tmp->tmp_dcmd_tag != NULL)
3813 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3814 }
3815 
3816 /*
3817  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
3818  * Adapter soft state Previously issued cmd to be aborted
3819  *
3820  * This function is used to abort previously issued commands, such as AEN and
3821  * RAID map sync map commands.  The abort command is sent as a DCMD internal
3822  * command and subsequently the driver will wait for a return status.  The
3823  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3824  */
3825 static int
3826 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3827     struct mrsas_mfi_cmd *cmd_to_abort)
3828 {
3829 	struct mrsas_mfi_cmd *cmd;
3830 	struct mrsas_abort_frame *abort_fr;
3831 	u_int8_t retcode = 0;
3832 	unsigned long total_time = 0;
3833 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3834 
3835 	cmd = mrsas_get_mfi_cmd(sc);
3836 	if (!cmd) {
3837 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3838 		return (1);
3839 	}
3840 	abort_fr = &cmd->frame->abort;
3841 
3842 	/* Prepare and issue the abort frame */
3843 	abort_fr->cmd = MFI_CMD_ABORT;
3844 	abort_fr->cmd_status = 0xFF;
3845 	abort_fr->flags = 0;
3846 	abort_fr->abort_context = cmd_to_abort->index;
3847 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3848 	abort_fr->abort_mfi_phys_addr_hi = 0;
3849 
3850 	cmd->sync_cmd = 1;
3851 	cmd->cmd_status = 0xFF;
3852 
3853 	if (mrsas_issue_dcmd(sc, cmd)) {
3854 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3855 		return (1);
3856 	}
3857 	/* Wait for this cmd to complete */
3858 	sc->chan = (void *)&cmd;
3859 	while (1) {
3860 		if (cmd->cmd_status == 0xFF) {
3861 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3862 		} else
3863 			break;
3864 		total_time++;
3865 		if (total_time >= max_wait) {
3866 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3867 			retcode = 1;
3868 			break;
3869 		}
3870 	}
3871 
3872 	cmd->sync_cmd = 0;
3873 	mrsas_release_mfi_cmd(cmd);
3874 	return (retcode);
3875 }
3876 
3877 /*
3878  * mrsas_complete_abort:      Completes aborting a command input:
3879  * Adapter soft state Cmd that was issued to abort another cmd
3880  *
3881  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
3882  * change after sending the command.  This function is called from
3883  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3884  */
3885 void
3886 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3887 {
3888 	if (cmd->sync_cmd) {
3889 		cmd->sync_cmd = 0;
3890 		cmd->cmd_status = 0;
3891 		sc->chan = (void *)&cmd;
3892 		wakeup_one((void *)&sc->chan);
3893 	}
3894 	return;
3895 }
3896 
3897 /*
3898  * mrsas_aen_handler:	AEN processing callback function from thread context
3899  * input:				Adapter soft state
3900  *
3901  * Asynchronous event handler
3902  */
3903 void
3904 mrsas_aen_handler(struct mrsas_softc *sc)
3905 {
3906 	union mrsas_evt_class_locale class_locale;
3907 	int doscan = 0;
3908 	u_int32_t seq_num;
3909 	int error;
3910 
3911 	if (!sc) {
3912 		device_printf(sc->mrsas_dev, "invalid instance!\n");
3913 		return;
3914 	}
3915 	if (sc->evt_detail_mem) {
3916 		switch (sc->evt_detail_mem->code) {
3917 		case MR_EVT_PD_INSERTED:
3918 			mrsas_get_pd_list(sc);
3919 			mrsas_bus_scan_sim(sc, sc->sim_1);
3920 			doscan = 0;
3921 			break;
3922 		case MR_EVT_PD_REMOVED:
3923 			mrsas_get_pd_list(sc);
3924 			mrsas_bus_scan_sim(sc, sc->sim_1);
3925 			doscan = 0;
3926 			break;
3927 		case MR_EVT_LD_OFFLINE:
3928 		case MR_EVT_CFG_CLEARED:
3929 		case MR_EVT_LD_DELETED:
3930 			mrsas_bus_scan_sim(sc, sc->sim_0);
3931 			doscan = 0;
3932 			break;
3933 		case MR_EVT_LD_CREATED:
3934 			mrsas_get_ld_list(sc);
3935 			mrsas_bus_scan_sim(sc, sc->sim_0);
3936 			doscan = 0;
3937 			break;
3938 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3939 		case MR_EVT_FOREIGN_CFG_IMPORTED:
3940 		case MR_EVT_LD_STATE_CHANGE:
3941 			doscan = 1;
3942 			break;
3943 		default:
3944 			doscan = 0;
3945 			break;
3946 		}
3947 	} else {
3948 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3949 		return;
3950 	}
3951 	if (doscan) {
3952 		mrsas_get_pd_list(sc);
3953 		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3954 		mrsas_bus_scan_sim(sc, sc->sim_1);
3955 		mrsas_get_ld_list(sc);
3956 		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3957 		mrsas_bus_scan_sim(sc, sc->sim_0);
3958 	}
3959 	seq_num = sc->evt_detail_mem->seq_num + 1;
3960 
3961 	/* Register AEN with FW for latest sequence number plus 1 */
3962 	class_locale.members.reserved = 0;
3963 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3964 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3965 
3966 	if (sc->aen_cmd != NULL)
3967 		return;
3968 
3969 	mtx_lock(&sc->aen_lock);
3970 	error = mrsas_register_aen(sc, seq_num,
3971 	    class_locale.word);
3972 	mtx_unlock(&sc->aen_lock);
3973 
3974 	if (error)
3975 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3976 
3977 }
3978 
3979 
3980 /*
3981  * mrsas_complete_aen:	Completes AEN command
3982  * input:				Adapter soft state
3983  * 						Cmd that was issued to abort another cmd
3984  *
3985  * This function will be called from ISR and will continue event processing from
3986  * thread context by enqueuing task in ev_tq (callback function
3987  * "mrsas_aen_handler").
3988  */
3989 void
3990 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3991 {
3992 	/*
3993 	 * Don't signal app if it is just an aborted previously registered
3994 	 * aen
3995 	 */
3996 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3997 		sc->mrsas_aen_triggered = 1;
3998 		if (sc->mrsas_poll_waiting) {
3999 			sc->mrsas_poll_waiting = 0;
4000 			selwakeup(&sc->mrsas_select);
4001 		}
4002 	} else
4003 		cmd->abort_aen = 0;
4004 
4005 	sc->aen_cmd = NULL;
4006 	mrsas_release_mfi_cmd(cmd);
4007 
4008 	if (!sc->remove_in_progress)
4009 		taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4010 
4011 	return;
4012 }
4013 
4014 static device_method_t mrsas_methods[] = {
4015 	DEVMETHOD(device_probe, mrsas_probe),
4016 	DEVMETHOD(device_attach, mrsas_attach),
4017 	DEVMETHOD(device_detach, mrsas_detach),
4018 	DEVMETHOD(device_suspend, mrsas_suspend),
4019 	DEVMETHOD(device_resume, mrsas_resume),
4020 	DEVMETHOD(bus_print_child, bus_generic_print_child),
4021 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4022 	{0, 0}
4023 };
4024 
4025 static driver_t mrsas_driver = {
4026 	"mrsas",
4027 	mrsas_methods,
4028 	sizeof(struct mrsas_softc)
4029 };
4030 
4031 static devclass_t mrsas_devclass;
4032 
4033 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4034 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
4035