xref: /freebsd/sys/dev/mrsas/mrsas.c (revision eb69d1f144a6fcc765d1b9d44a5ae8082353e70b)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/smp.h>
55 
56 
57 /*
58  * Function prototypes
59  */
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
66 
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
90 static int
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92     struct mrsas_mfi_cmd *cmd_to_abort);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95     u_long cmd, caddr_t arg);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
97 u_int8_t
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99     struct mrsas_mfi_cmd *mfi_cmd);
100 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int	mrsas_init_adapter(struct mrsas_softc *sc);
103 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int	mrsas_ioc_init(struct mrsas_softc *sc);
107 int	mrsas_bus_scan(struct mrsas_softc *sc);
108 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
114 int
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116     struct mrsas_mfi_cmd *cmd);
117 int
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119     int size);
120 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void	mrsas_disable_intr(struct mrsas_softc *sc);
125 void	mrsas_enable_intr(struct mrsas_softc *sc);
126 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void	mrsas_free_mem(struct mrsas_softc *sc);
128 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void	mrsas_isr(void *arg);
130 void	mrsas_teardown_intr(struct mrsas_softc *sc);
131 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void	mrsas_kill_hba(struct mrsas_softc *sc);
133 void	mrsas_aen_handler(struct mrsas_softc *sc);
134 void
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
136     u_int32_t value);
137 void
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139     u_int32_t req_desc_hi);
140 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 void
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143     struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 void
145 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
146     u_int8_t extStatus);
147 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
148 
149 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
150         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151 
152 extern int mrsas_cam_attach(struct mrsas_softc *sc);
153 extern void mrsas_cam_detach(struct mrsas_softc *sc);
154 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
155 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
156 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
166     u_int16_t index);
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
170 
171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
172 
173 /*
174  * PCI device struct and table
175  *
176  */
177 typedef struct mrsas_ident {
178 	uint16_t vendor;
179 	uint16_t device;
180 	uint16_t subvendor;
181 	uint16_t subdevice;
182 	const char *desc;
183 }	MRSAS_CTLR_ID;
184 
185 MRSAS_CTLR_ID device_table[] = {
186 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
192 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
193 	{0, 0, 0, 0, NULL}
194 };
195 
196 /*
197  * Character device entry points
198  *
199  */
200 static struct cdevsw mrsas_cdevsw = {
201 	.d_version = D_VERSION,
202 	.d_open = mrsas_open,
203 	.d_close = mrsas_close,
204 	.d_read = mrsas_read,
205 	.d_write = mrsas_write,
206 	.d_ioctl = mrsas_ioctl,
207 	.d_poll = mrsas_poll,
208 	.d_name = "mrsas",
209 };
210 
211 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
212 
213 /*
214  * In the cdevsw routines, we find our softc by using the si_drv1 member of
215  * struct cdev.  We set this variable to point to our softc in our attach
216  * routine when we create the /dev entry.
217  */
218 int
219 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
220 {
221 	struct mrsas_softc *sc;
222 
223 	sc = dev->si_drv1;
224 	return (0);
225 }
226 
227 int
228 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
229 {
230 	struct mrsas_softc *sc;
231 
232 	sc = dev->si_drv1;
233 	return (0);
234 }
235 
236 int
237 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
238 {
239 	struct mrsas_softc *sc;
240 
241 	sc = dev->si_drv1;
242 	return (0);
243 }
244 int
245 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
246 {
247 	struct mrsas_softc *sc;
248 
249 	sc = dev->si_drv1;
250 	return (0);
251 }
252 
253 /*
254  * Register Read/Write Functions
255  *
256  */
257 void
258 mrsas_write_reg(struct mrsas_softc *sc, int offset,
259     u_int32_t value)
260 {
261 	bus_space_tag_t bus_tag = sc->bus_tag;
262 	bus_space_handle_t bus_handle = sc->bus_handle;
263 
264 	bus_space_write_4(bus_tag, bus_handle, offset, value);
265 }
266 
267 u_int32_t
268 mrsas_read_reg(struct mrsas_softc *sc, int offset)
269 {
270 	bus_space_tag_t bus_tag = sc->bus_tag;
271 	bus_space_handle_t bus_handle = sc->bus_handle;
272 
273 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
274 }
275 
276 
277 /*
278  * Interrupt Disable/Enable/Clear Functions
279  *
280  */
281 void
282 mrsas_disable_intr(struct mrsas_softc *sc)
283 {
284 	u_int32_t mask = 0xFFFFFFFF;
285 	u_int32_t status;
286 
287 	sc->mask_interrupts = 1;
288 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
289 	/* Dummy read to force pci flush */
290 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
291 }
292 
293 void
294 mrsas_enable_intr(struct mrsas_softc *sc)
295 {
296 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
297 	u_int32_t status;
298 
299 	sc->mask_interrupts = 0;
300 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
301 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
302 
303 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
304 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
305 }
306 
307 static int
308 mrsas_clear_intr(struct mrsas_softc *sc)
309 {
310 	u_int32_t status;
311 
312 	/* Read received interrupt */
313 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
314 
315 	/* Not our interrupt, so just return */
316 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
317 		return (0);
318 
319 	/* We got a reply interrupt */
320 	return (1);
321 }
322 
323 /*
324  * PCI Support Functions
325  *
326  */
327 static struct mrsas_ident *
328 mrsas_find_ident(device_t dev)
329 {
330 	struct mrsas_ident *pci_device;
331 
332 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
333 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
334 		    (pci_device->device == pci_get_device(dev)) &&
335 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
336 		    (pci_device->subvendor == 0xffff)) &&
337 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
338 		    (pci_device->subdevice == 0xffff)))
339 			return (pci_device);
340 	}
341 	return (NULL);
342 }
343 
344 static int
345 mrsas_probe(device_t dev)
346 {
347 	static u_int8_t first_ctrl = 1;
348 	struct mrsas_ident *id;
349 
350 	if ((id = mrsas_find_ident(dev)) != NULL) {
351 		if (first_ctrl) {
352 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
353 			    MRSAS_VERSION);
354 			first_ctrl = 0;
355 		}
356 		device_set_desc(dev, id->desc);
357 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
358 		return (-30);
359 	}
360 	return (ENXIO);
361 }
362 
363 /*
364  * mrsas_setup_sysctl:	setup sysctl values for mrsas
365  * input:				Adapter instance soft state
366  *
367  * Setup sysctl entries for mrsas driver.
368  */
369 static void
370 mrsas_setup_sysctl(struct mrsas_softc *sc)
371 {
372 	struct sysctl_ctx_list *sysctl_ctx = NULL;
373 	struct sysctl_oid *sysctl_tree = NULL;
374 	char tmpstr[80], tmpstr2[80];
375 
376 	/*
377 	 * Setup the sysctl variable so the user can change the debug level
378 	 * on the fly.
379 	 */
380 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
381 	    device_get_unit(sc->mrsas_dev));
382 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
383 
384 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
385 	if (sysctl_ctx != NULL)
386 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
387 
388 	if (sysctl_tree == NULL) {
389 		sysctl_ctx_init(&sc->sysctl_ctx);
390 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
391 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
392 		    CTLFLAG_RD, 0, tmpstr);
393 		if (sc->sysctl_tree == NULL)
394 			return;
395 		sysctl_ctx = &sc->sysctl_ctx;
396 		sysctl_tree = sc->sysctl_tree;
397 	}
398 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
399 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
400 	    "Disable the use of OCR");
401 
402 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
403 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
404 	    strlen(MRSAS_VERSION), "driver version");
405 
406 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
407 	    OID_AUTO, "reset_count", CTLFLAG_RD,
408 	    &sc->reset_count, 0, "number of ocr from start of the day");
409 
410 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
411 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
412 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
413 
414 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
415 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
416 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
417 
418 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
419 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
420 	    "Driver debug level");
421 
422 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
423 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
424 	    0, "Driver IO timeout value in mili-second.");
425 
426 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
427 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
428 	    &sc->mrsas_fw_fault_check_delay,
429 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
430 
431 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
433 	    &sc->reset_in_progress, 0, "ocr in progress status");
434 
435 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
437 	    &sc->block_sync_cache, 0,
438 	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
439 
440 }
441 
442 /*
443  * mrsas_get_tunables:	get tunable parameters.
444  * input:				Adapter instance soft state
445  *
446  * Get tunable parameters. This will help to debug driver at boot time.
447  */
448 static void
449 mrsas_get_tunables(struct mrsas_softc *sc)
450 {
451 	char tmpstr[80];
452 
453 	/* XXX default to some debugging for now */
454 	sc->mrsas_debug = MRSAS_FAULT;
455 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
456 	sc->mrsas_fw_fault_check_delay = 1;
457 	sc->reset_count = 0;
458 	sc->reset_in_progress = 0;
459 	sc->block_sync_cache = 0;
460 
461 	/*
462 	 * Grab the global variables.
463 	 */
464 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
465 
466 	/*
467 	 * Grab the global variables.
468 	 */
469 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
470 
471 	/* Grab the unit-instance variables */
472 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
473 	    device_get_unit(sc->mrsas_dev));
474 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
475 }
476 
477 /*
478  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
479  * Used to get sequence number at driver load time.
480  * input:		Adapter soft state
481  *
482  * Allocates DMAable memory for the event log info internal command.
483  */
484 int
485 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
486 {
487 	int el_info_size;
488 
489 	/* Allocate get event log info command */
490 	el_info_size = sizeof(struct mrsas_evt_log_info);
491 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
492 	    1, 0,
493 	    BUS_SPACE_MAXADDR_32BIT,
494 	    BUS_SPACE_MAXADDR,
495 	    NULL, NULL,
496 	    el_info_size,
497 	    1,
498 	    el_info_size,
499 	    BUS_DMA_ALLOCNOW,
500 	    NULL, NULL,
501 	    &sc->el_info_tag)) {
502 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
503 		return (ENOMEM);
504 	}
505 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
506 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
507 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
508 		return (ENOMEM);
509 	}
510 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
511 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
512 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
513 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
514 		return (ENOMEM);
515 	}
516 	memset(sc->el_info_mem, 0, el_info_size);
517 	return (0);
518 }
519 
520 /*
521  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
522  * input:					Adapter soft state
523  *
524  * Deallocates memory for the event log info internal command.
525  */
526 void
527 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
528 {
529 	if (sc->el_info_phys_addr)
530 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
531 	if (sc->el_info_mem != NULL)
532 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
533 	if (sc->el_info_tag != NULL)
534 		bus_dma_tag_destroy(sc->el_info_tag);
535 }
536 
537 /*
538  *  mrsas_get_seq_num:	Get latest event sequence number
539  *  @sc:				Adapter soft state
540  *  @eli:				Firmware event log sequence number information.
541  *
542  * Firmware maintains a log of all events in a non-volatile area.
543  * Driver get the sequence number using DCMD
544  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
545  */
546 
547 static int
548 mrsas_get_seq_num(struct mrsas_softc *sc,
549     struct mrsas_evt_log_info *eli)
550 {
551 	struct mrsas_mfi_cmd *cmd;
552 	struct mrsas_dcmd_frame *dcmd;
553 	u_int8_t do_ocr = 1, retcode = 0;
554 
555 	cmd = mrsas_get_mfi_cmd(sc);
556 
557 	if (!cmd) {
558 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
559 		return -ENOMEM;
560 	}
561 	dcmd = &cmd->frame->dcmd;
562 
563 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
564 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
565 		mrsas_release_mfi_cmd(cmd);
566 		return -ENOMEM;
567 	}
568 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
569 
570 	dcmd->cmd = MFI_CMD_DCMD;
571 	dcmd->cmd_status = 0x0;
572 	dcmd->sge_count = 1;
573 	dcmd->flags = MFI_FRAME_DIR_READ;
574 	dcmd->timeout = 0;
575 	dcmd->pad_0 = 0;
576 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
577 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
578 	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
579 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
580 
581 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
582 	if (retcode == ETIMEDOUT)
583 		goto dcmd_timeout;
584 
585 	do_ocr = 0;
586 	/*
587 	 * Copy the data back into callers buffer
588 	 */
589 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
590 	mrsas_free_evt_log_info_cmd(sc);
591 
592 dcmd_timeout:
593 	if (do_ocr)
594 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
595 	else
596 		mrsas_release_mfi_cmd(cmd);
597 
598 	return retcode;
599 }
600 
601 
602 /*
603  *  mrsas_register_aen:		Register for asynchronous event notification
604  *  @sc:			Adapter soft state
605  *  @seq_num:			Starting sequence number
606  *  @class_locale:		Class of the event
607  *
608  *  This function subscribes for events beyond the @seq_num
609  *  and type @class_locale.
610  *
611  */
612 static int
613 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
614     u_int32_t class_locale_word)
615 {
616 	int ret_val;
617 	struct mrsas_mfi_cmd *cmd;
618 	struct mrsas_dcmd_frame *dcmd;
619 	union mrsas_evt_class_locale curr_aen;
620 	union mrsas_evt_class_locale prev_aen;
621 
622 	/*
623 	 * If there an AEN pending already (aen_cmd), check if the
624 	 * class_locale of that pending AEN is inclusive of the new AEN
625 	 * request we currently have. If it is, then we don't have to do
626 	 * anything. In other words, whichever events the current AEN request
627 	 * is subscribing to, have already been subscribed to. If the old_cmd
628 	 * is _not_ inclusive, then we have to abort that command, form a
629 	 * class_locale that is superset of both old and current and re-issue
630 	 * to the FW
631 	 */
632 
633 	curr_aen.word = class_locale_word;
634 
635 	if (sc->aen_cmd) {
636 
637 		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
638 
639 		/*
640 		 * A class whose enum value is smaller is inclusive of all
641 		 * higher values. If a PROGRESS (= -1) was previously
642 		 * registered, then a new registration requests for higher
643 		 * classes need not be sent to FW. They are automatically
644 		 * included. Locale numbers don't have such hierarchy. They
645 		 * are bitmap values
646 		 */
647 		if ((prev_aen.members.class <= curr_aen.members.class) &&
648 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
649 		    curr_aen.members.locale)) {
650 			/*
651 			 * Previously issued event registration includes
652 			 * current request. Nothing to do.
653 			 */
654 			return 0;
655 		} else {
656 			curr_aen.members.locale |= prev_aen.members.locale;
657 
658 			if (prev_aen.members.class < curr_aen.members.class)
659 				curr_aen.members.class = prev_aen.members.class;
660 
661 			sc->aen_cmd->abort_aen = 1;
662 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
663 			    sc->aen_cmd);
664 
665 			if (ret_val) {
666 				printf("mrsas: Failed to abort previous AEN command\n");
667 				return ret_val;
668 			} else
669 				sc->aen_cmd = NULL;
670 		}
671 	}
672 	cmd = mrsas_get_mfi_cmd(sc);
673 	if (!cmd)
674 		return ENOMEM;
675 
676 	dcmd = &cmd->frame->dcmd;
677 
678 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
679 
680 	/*
681 	 * Prepare DCMD for aen registration
682 	 */
683 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
684 
685 	dcmd->cmd = MFI_CMD_DCMD;
686 	dcmd->cmd_status = 0x0;
687 	dcmd->sge_count = 1;
688 	dcmd->flags = MFI_FRAME_DIR_READ;
689 	dcmd->timeout = 0;
690 	dcmd->pad_0 = 0;
691 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
692 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
693 	dcmd->mbox.w[0] = seq_num;
694 	sc->last_seq_num = seq_num;
695 	dcmd->mbox.w[1] = curr_aen.word;
696 	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
697 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
698 
699 	if (sc->aen_cmd != NULL) {
700 		mrsas_release_mfi_cmd(cmd);
701 		return 0;
702 	}
703 	/*
704 	 * Store reference to the cmd used to register for AEN. When an
705 	 * application wants us to register for AEN, we have to abort this
706 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
707 	 */
708 	sc->aen_cmd = cmd;
709 
710 	/*
711 	 * Issue the aen registration frame
712 	 */
713 	if (mrsas_issue_dcmd(sc, cmd)) {
714 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
715 		return (1);
716 	}
717 	return 0;
718 }
719 
720 /*
721  * mrsas_start_aen:	Subscribes to AEN during driver load time
722  * @instance:		Adapter soft state
723  */
724 static int
725 mrsas_start_aen(struct mrsas_softc *sc)
726 {
727 	struct mrsas_evt_log_info eli;
728 	union mrsas_evt_class_locale class_locale;
729 
730 
731 	/* Get the latest sequence number from FW */
732 
733 	memset(&eli, 0, sizeof(eli));
734 
735 	if (mrsas_get_seq_num(sc, &eli))
736 		return -1;
737 
738 	/* Register AEN with FW for latest sequence number plus 1 */
739 	class_locale.members.reserved = 0;
740 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
741 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
742 
743 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
744 	    class_locale.word);
745 
746 }
747 
748 /*
749  * mrsas_setup_msix:	Allocate MSI-x vectors
750  * @sc:					adapter soft state
751  */
752 static int
753 mrsas_setup_msix(struct mrsas_softc *sc)
754 {
755 	int i;
756 
757 	for (i = 0; i < sc->msix_vectors; i++) {
758 		sc->irq_context[i].sc = sc;
759 		sc->irq_context[i].MSIxIndex = i;
760 		sc->irq_id[i] = i + 1;
761 		sc->mrsas_irq[i] = bus_alloc_resource_any
762 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
763 		    ,RF_ACTIVE);
764 		if (sc->mrsas_irq[i] == NULL) {
765 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
766 			goto irq_alloc_failed;
767 		}
768 		if (bus_setup_intr(sc->mrsas_dev,
769 		    sc->mrsas_irq[i],
770 		    INTR_MPSAFE | INTR_TYPE_CAM,
771 		    NULL, mrsas_isr, &sc->irq_context[i],
772 		    &sc->intr_handle[i])) {
773 			device_printf(sc->mrsas_dev,
774 			    "Cannot set up MSI-x interrupt handler\n");
775 			goto irq_alloc_failed;
776 		}
777 	}
778 	return SUCCESS;
779 
780 irq_alloc_failed:
781 	mrsas_teardown_intr(sc);
782 	return (FAIL);
783 }
784 
785 /*
786  * mrsas_allocate_msix:		Setup MSI-x vectors
787  * @sc:						adapter soft state
788  */
789 static int
790 mrsas_allocate_msix(struct mrsas_softc *sc)
791 {
792 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
793 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
794 		    " of vectors\n", sc->msix_vectors);
795 	} else {
796 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
797 		goto irq_alloc_failed;
798 	}
799 	return SUCCESS;
800 
801 irq_alloc_failed:
802 	mrsas_teardown_intr(sc);
803 	return (FAIL);
804 }
805 
806 /*
807  * mrsas_attach:	PCI entry point
808  * input:			pointer to device struct
809  *
810  * Performs setup of PCI and registers, initializes mutexes and linked lists,
811  * registers interrupts and CAM, and initializes   the adapter/controller to
812  * its proper state.
813  */
814 static int
815 mrsas_attach(device_t dev)
816 {
817 	struct mrsas_softc *sc = device_get_softc(dev);
818 	uint32_t cmd, bar, error;
819 
820 	memset(sc, 0, sizeof(struct mrsas_softc));
821 
822 	/* Look up our softc and initialize its fields. */
823 	sc->mrsas_dev = dev;
824 	sc->device_id = pci_get_device(dev);
825 
826 	if ((sc->device_id == MRSAS_INVADER) ||
827 	    (sc->device_id == MRSAS_FURY) ||
828 	    (sc->device_id == MRSAS_INTRUDER) ||
829 	    (sc->device_id == MRSAS_INTRUDER_24) ||
830 	    (sc->device_id == MRSAS_CUTLASS_52) ||
831 	    (sc->device_id == MRSAS_CUTLASS_53)) {
832 		sc->mrsas_gen3_ctrl = 1;
833     }
834 
835 	mrsas_get_tunables(sc);
836 
837 	/*
838 	 * Set up PCI and registers
839 	 */
840 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
841 	if ((cmd & PCIM_CMD_PORTEN) == 0) {
842 		return (ENXIO);
843 	}
844 	/* Force the busmaster enable bit on. */
845 	cmd |= PCIM_CMD_BUSMASTEREN;
846 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
847 
848 	bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
849 
850 	sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
851 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
852 	    &(sc->reg_res_id), RF_ACTIVE))
853 	    == NULL) {
854 		device_printf(dev, "Cannot allocate PCI registers\n");
855 		goto attach_fail;
856 	}
857 	sc->bus_tag = rman_get_bustag(sc->reg_res);
858 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
859 
860 	/* Intialize mutexes */
861 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
862 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
863 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
864 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
865 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
866 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
867 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
868 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
869 
870 	/* Intialize linked list */
871 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
872 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
873 
874 	mrsas_atomic_set(&sc->fw_outstanding, 0);
875 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
876 
877 	sc->io_cmds_highwater = 0;
878 
879 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
880 	sc->UnevenSpanSupport = 0;
881 
882 	sc->msix_enable = 0;
883 
884 	/* Initialize Firmware */
885 	if (mrsas_init_fw(sc) != SUCCESS) {
886 		goto attach_fail_fw;
887 	}
888 	/* Register mrsas to CAM layer */
889 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
890 		goto attach_fail_cam;
891 	}
892 	/* Register IRQs */
893 	if (mrsas_setup_irq(sc) != SUCCESS) {
894 		goto attach_fail_irq;
895 	}
896 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
897 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
898 	    device_get_unit(sc->mrsas_dev));
899 	if (error) {
900 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
901 		goto attach_fail_ocr_thread;
902 	}
903 	/*
904 	 * After FW initialization and OCR thread creation
905 	 * we will defer the cdev creation, AEN setup on ICH callback
906 	 */
907 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
908 	sc->mrsas_ich.ich_arg = sc;
909 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
910 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
911 	}
912 	mrsas_setup_sysctl(sc);
913 	return SUCCESS;
914 
915 attach_fail_ocr_thread:
916 	if (sc->ocr_thread_active)
917 		wakeup(&sc->ocr_chan);
918 attach_fail_irq:
919 	mrsas_teardown_intr(sc);
920 attach_fail_cam:
921 	mrsas_cam_detach(sc);
922 attach_fail_fw:
923 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
924 	if (sc->msix_enable == 1)
925 		pci_release_msi(sc->mrsas_dev);
926 	mrsas_free_mem(sc);
927 	mtx_destroy(&sc->sim_lock);
928 	mtx_destroy(&sc->aen_lock);
929 	mtx_destroy(&sc->pci_lock);
930 	mtx_destroy(&sc->io_lock);
931 	mtx_destroy(&sc->ioctl_lock);
932 	mtx_destroy(&sc->mpt_cmd_pool_lock);
933 	mtx_destroy(&sc->mfi_cmd_pool_lock);
934 	mtx_destroy(&sc->raidmap_lock);
935 attach_fail:
936 	if (sc->reg_res) {
937 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
938 		    sc->reg_res_id, sc->reg_res);
939 	}
940 	return (ENXIO);
941 }
942 
943 /*
944  * Interrupt config hook
945  */
946 static void
947 mrsas_ich_startup(void *arg)
948 {
949 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
950 
951 	/*
952 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
953 	 */
954 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
955 	    IOCTL_SEMA_DESCRIPTION);
956 
957 	/* Create a /dev entry for mrsas controller. */
958 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
959 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
960 	    device_get_unit(sc->mrsas_dev));
961 
962 	if (device_get_unit(sc->mrsas_dev) == 0) {
963 		make_dev_alias_p(MAKEDEV_CHECKNAME,
964 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
965 		    "megaraid_sas_ioctl_node");
966 	}
967 	if (sc->mrsas_cdev)
968 		sc->mrsas_cdev->si_drv1 = sc;
969 
970 	/*
971 	 * Add this controller to mrsas_mgmt_info structure so that it can be
972 	 * exported to management applications
973 	 */
974 	if (device_get_unit(sc->mrsas_dev) == 0)
975 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
976 
977 	mrsas_mgmt_info.count++;
978 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
979 	mrsas_mgmt_info.max_index++;
980 
981 	/* Enable Interrupts */
982 	mrsas_enable_intr(sc);
983 
984 	/* Initiate AEN (Asynchronous Event Notification) */
985 	if (mrsas_start_aen(sc)) {
986 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
987 		    "Further events from the controller will not be communicated.\n"
988 		    "Either there is some problem in the controller"
989 		    "or the controller does not support AEN.\n"
990 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
991 	}
992 	if (sc->mrsas_ich.ich_arg != NULL) {
993 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
994 		config_intrhook_disestablish(&sc->mrsas_ich);
995 		sc->mrsas_ich.ich_arg = NULL;
996 	}
997 }
998 
999 /*
1000  * mrsas_detach:	De-allocates and teardown resources
1001  * input:			pointer to device struct
1002  *
1003  * This function is the entry point for device disconnect and detach.
1004  * It performs memory de-allocations, shutdown of the controller and various
1005  * teardown and destroy resource functions.
1006  */
1007 static int
1008 mrsas_detach(device_t dev)
1009 {
1010 	struct mrsas_softc *sc;
1011 	int i = 0;
1012 
1013 	sc = device_get_softc(dev);
1014 	sc->remove_in_progress = 1;
1015 
1016 	/* Destroy the character device so no other IOCTL will be handled */
1017 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1018 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1019 	destroy_dev(sc->mrsas_cdev);
1020 
1021 	/*
1022 	 * Take the instance off the instance array. Note that we will not
1023 	 * decrement the max_index. We let this array be sparse array
1024 	 */
1025 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1026 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1027 			mrsas_mgmt_info.count--;
1028 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1029 			break;
1030 		}
1031 	}
1032 
1033 	if (sc->ocr_thread_active)
1034 		wakeup(&sc->ocr_chan);
1035 	while (sc->reset_in_progress) {
1036 		i++;
1037 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1038 			mrsas_dprint(sc, MRSAS_INFO,
1039 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1040 		}
1041 		pause("mr_shutdown", hz);
1042 	}
1043 	i = 0;
1044 	while (sc->ocr_thread_active) {
1045 		i++;
1046 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1047 			mrsas_dprint(sc, MRSAS_INFO,
1048 			    "[%2d]waiting for "
1049 			    "mrsas_ocr thread to quit ocr %d\n", i,
1050 			    sc->ocr_thread_active);
1051 		}
1052 		pause("mr_shutdown", hz);
1053 	}
1054 	mrsas_flush_cache(sc);
1055 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1056 	mrsas_disable_intr(sc);
1057 	mrsas_cam_detach(sc);
1058 	mrsas_teardown_intr(sc);
1059 	mrsas_free_mem(sc);
1060 	mtx_destroy(&sc->sim_lock);
1061 	mtx_destroy(&sc->aen_lock);
1062 	mtx_destroy(&sc->pci_lock);
1063 	mtx_destroy(&sc->io_lock);
1064 	mtx_destroy(&sc->ioctl_lock);
1065 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1066 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1067 	mtx_destroy(&sc->raidmap_lock);
1068 
1069 	/* Wait for all the semaphores to be released */
1070 	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1071 		pause("mr_shutdown", hz);
1072 
1073 	/* Destroy the counting semaphore created for Ioctl */
1074 	sema_destroy(&sc->ioctl_count_sema);
1075 
1076 	if (sc->reg_res) {
1077 		bus_release_resource(sc->mrsas_dev,
1078 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1079 	}
1080 	if (sc->sysctl_tree != NULL)
1081 		sysctl_ctx_free(&sc->sysctl_ctx);
1082 
1083 	return (0);
1084 }
1085 
1086 /*
1087  * mrsas_free_mem:		Frees allocated memory
1088  * input:				Adapter instance soft state
1089  *
1090  * This function is called from mrsas_detach() to free previously allocated
1091  * memory.
1092  */
1093 void
1094 mrsas_free_mem(struct mrsas_softc *sc)
1095 {
1096 	int i;
1097 	u_int32_t max_cmd;
1098 	struct mrsas_mfi_cmd *mfi_cmd;
1099 	struct mrsas_mpt_cmd *mpt_cmd;
1100 
1101 	/*
1102 	 * Free RAID map memory
1103 	 */
1104 	for (i = 0; i < 2; i++) {
1105 		if (sc->raidmap_phys_addr[i])
1106 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1107 		if (sc->raidmap_mem[i] != NULL)
1108 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1109 		if (sc->raidmap_tag[i] != NULL)
1110 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1111 
1112 		if (sc->ld_drv_map[i] != NULL)
1113 			free(sc->ld_drv_map[i], M_MRSAS);
1114 	}
1115 	for (i = 0; i < 2; i++) {
1116 		if (sc->jbodmap_phys_addr[i])
1117 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1118 		if (sc->jbodmap_mem[i] != NULL)
1119 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1120 		if (sc->jbodmap_tag[i] != NULL)
1121 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1122 	}
1123 	/*
1124 	 * Free version buffer memory
1125 	 */
1126 	if (sc->verbuf_phys_addr)
1127 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1128 	if (sc->verbuf_mem != NULL)
1129 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1130 	if (sc->verbuf_tag != NULL)
1131 		bus_dma_tag_destroy(sc->verbuf_tag);
1132 
1133 
1134 	/*
1135 	 * Free sense buffer memory
1136 	 */
1137 	if (sc->sense_phys_addr)
1138 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1139 	if (sc->sense_mem != NULL)
1140 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1141 	if (sc->sense_tag != NULL)
1142 		bus_dma_tag_destroy(sc->sense_tag);
1143 
1144 	/*
1145 	 * Free chain frame memory
1146 	 */
1147 	if (sc->chain_frame_phys_addr)
1148 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1149 	if (sc->chain_frame_mem != NULL)
1150 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1151 	if (sc->chain_frame_tag != NULL)
1152 		bus_dma_tag_destroy(sc->chain_frame_tag);
1153 
1154 	/*
1155 	 * Free IO Request memory
1156 	 */
1157 	if (sc->io_request_phys_addr)
1158 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1159 	if (sc->io_request_mem != NULL)
1160 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1161 	if (sc->io_request_tag != NULL)
1162 		bus_dma_tag_destroy(sc->io_request_tag);
1163 
1164 	/*
1165 	 * Free Reply Descriptor memory
1166 	 */
1167 	if (sc->reply_desc_phys_addr)
1168 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1169 	if (sc->reply_desc_mem != NULL)
1170 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1171 	if (sc->reply_desc_tag != NULL)
1172 		bus_dma_tag_destroy(sc->reply_desc_tag);
1173 
1174 	/*
1175 	 * Free event detail memory
1176 	 */
1177 	if (sc->evt_detail_phys_addr)
1178 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1179 	if (sc->evt_detail_mem != NULL)
1180 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1181 	if (sc->evt_detail_tag != NULL)
1182 		bus_dma_tag_destroy(sc->evt_detail_tag);
1183 
1184 	/*
1185 	 * Free MFI frames
1186 	 */
1187 	if (sc->mfi_cmd_list) {
1188 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1189 			mfi_cmd = sc->mfi_cmd_list[i];
1190 			mrsas_free_frame(sc, mfi_cmd);
1191 		}
1192 	}
1193 	if (sc->mficmd_frame_tag != NULL)
1194 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1195 
1196 	/*
1197 	 * Free MPT internal command list
1198 	 */
1199 	max_cmd = sc->max_fw_cmds;
1200 	if (sc->mpt_cmd_list) {
1201 		for (i = 0; i < max_cmd; i++) {
1202 			mpt_cmd = sc->mpt_cmd_list[i];
1203 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1204 			free(sc->mpt_cmd_list[i], M_MRSAS);
1205 		}
1206 		free(sc->mpt_cmd_list, M_MRSAS);
1207 		sc->mpt_cmd_list = NULL;
1208 	}
1209 	/*
1210 	 * Free MFI internal command list
1211 	 */
1212 
1213 	if (sc->mfi_cmd_list) {
1214 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1215 			free(sc->mfi_cmd_list[i], M_MRSAS);
1216 		}
1217 		free(sc->mfi_cmd_list, M_MRSAS);
1218 		sc->mfi_cmd_list = NULL;
1219 	}
1220 	/*
1221 	 * Free request descriptor memory
1222 	 */
1223 	free(sc->req_desc, M_MRSAS);
1224 	sc->req_desc = NULL;
1225 
1226 	/*
1227 	 * Destroy parent tag
1228 	 */
1229 	if (sc->mrsas_parent_tag != NULL)
1230 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1231 
1232 	/*
1233 	 * Free ctrl_info memory
1234 	 */
1235 	if (sc->ctrl_info != NULL)
1236 		free(sc->ctrl_info, M_MRSAS);
1237 }
1238 
1239 /*
1240  * mrsas_teardown_intr:	Teardown interrupt
1241  * input:				Adapter instance soft state
1242  *
1243  * This function is called from mrsas_detach() to teardown and release bus
1244  * interrupt resourse.
1245  */
1246 void
1247 mrsas_teardown_intr(struct mrsas_softc *sc)
1248 {
1249 	int i;
1250 
1251 	if (!sc->msix_enable) {
1252 		if (sc->intr_handle[0])
1253 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1254 		if (sc->mrsas_irq[0] != NULL)
1255 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1256 			    sc->irq_id[0], sc->mrsas_irq[0]);
1257 		sc->intr_handle[0] = NULL;
1258 	} else {
1259 		for (i = 0; i < sc->msix_vectors; i++) {
1260 			if (sc->intr_handle[i])
1261 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1262 				    sc->intr_handle[i]);
1263 
1264 			if (sc->mrsas_irq[i] != NULL)
1265 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1266 				    sc->irq_id[i], sc->mrsas_irq[i]);
1267 
1268 			sc->intr_handle[i] = NULL;
1269 		}
1270 		pci_release_msi(sc->mrsas_dev);
1271 	}
1272 
1273 }
1274 
1275 /*
1276  * mrsas_suspend:	Suspend entry point
1277  * input:			Device struct pointer
1278  *
1279  * This function is the entry point for system suspend from the OS.
1280  */
1281 static int
1282 mrsas_suspend(device_t dev)
1283 {
1284 	/* This will be filled when the driver will have hibernation support */
1285 	return (0);
1286 }
1287 
1288 /*
1289  * mrsas_resume:	Resume entry point
1290  * input:			Device struct pointer
1291  *
1292  * This function is the entry point for system resume from the OS.
1293  */
1294 static int
1295 mrsas_resume(device_t dev)
1296 {
1297 	/* This will be filled when the driver will have hibernation support */
1298 	return (0);
1299 }
1300 
1301 /**
1302  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1303  *
1304  * This function will return softc instance based on cmd type.
1305  * In some case, application fire ioctl on required management instance and
1306  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1307  * case, else get the softc instance from host_no provided by application in
1308  * user data.
1309  */
1310 
1311 static struct mrsas_softc *
1312 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1313 {
1314 	struct mrsas_softc *sc = NULL;
1315 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1316 
1317 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1318 		sc = dev->si_drv1;
1319 	} else {
1320 		/*
1321 		 * get the Host number & the softc from data sent by the
1322 		 * Application
1323 		 */
1324 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1325 		if (sc == NULL)
1326 			printf("There is no Controller number %d\n",
1327 			    user_ioc->host_no);
1328 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1329 			mrsas_dprint(sc, MRSAS_FAULT,
1330 			    "Invalid Controller number %d\n", user_ioc->host_no);
1331 	}
1332 
1333 	return sc;
1334 }
1335 
1336 /*
1337  * mrsas_ioctl:	IOCtl commands entry point.
1338  *
1339  * This function is the entry point for IOCtls from the OS.  It calls the
1340  * appropriate function for processing depending on the command received.
1341  */
1342 static int
1343 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1344     struct thread *td)
1345 {
1346 	struct mrsas_softc *sc;
1347 	int ret = 0, i = 0;
1348 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1349 
1350 	sc = mrsas_get_softc_instance(dev, cmd, arg);
1351 	if (!sc)
1352 		return ENOENT;
1353 
1354 	if (sc->remove_in_progress ||
1355 		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1356 		mrsas_dprint(sc, MRSAS_INFO,
1357 		    "Either driver remove or shutdown called or "
1358 			"HW is in unrecoverable critical error state.\n");
1359 		return ENOENT;
1360 	}
1361 	mtx_lock_spin(&sc->ioctl_lock);
1362 	if (!sc->reset_in_progress) {
1363 		mtx_unlock_spin(&sc->ioctl_lock);
1364 		goto do_ioctl;
1365 	}
1366 	mtx_unlock_spin(&sc->ioctl_lock);
1367 	while (sc->reset_in_progress) {
1368 		i++;
1369 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1370 			mrsas_dprint(sc, MRSAS_INFO,
1371 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1372 		}
1373 		pause("mr_ioctl", hz);
1374 	}
1375 
1376 do_ioctl:
1377 	switch (cmd) {
1378 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1379 #ifdef COMPAT_FREEBSD32
1380 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1381 #endif
1382 		/*
1383 		 * Decrement the Ioctl counting Semaphore before getting an
1384 		 * mfi command
1385 		 */
1386 		sema_wait(&sc->ioctl_count_sema);
1387 
1388 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1389 
1390 		/* Increment the Ioctl counting semaphore value */
1391 		sema_post(&sc->ioctl_count_sema);
1392 
1393 		break;
1394 	case MRSAS_IOC_SCAN_BUS:
1395 		ret = mrsas_bus_scan(sc);
1396 		break;
1397 
1398 	case MRSAS_IOC_GET_PCI_INFO:
1399 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1400 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1401 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1402 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1403 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1404 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1405 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1406 		    "pci device no: %d, pci function no: %d,"
1407 		    "pci domain ID: %d\n",
1408 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1409 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1410 		ret = 0;
1411 		break;
1412 
1413 	default:
1414 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1415 		ret = ENOENT;
1416 	}
1417 
1418 	return (ret);
1419 }
1420 
1421 /*
1422  * mrsas_poll:	poll entry point for mrsas driver fd
1423  *
1424  * This function is the entry point for poll from the OS.  It waits for some AEN
1425  * events to be triggered from the controller and notifies back.
1426  */
1427 static int
1428 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1429 {
1430 	struct mrsas_softc *sc;
1431 	int revents = 0;
1432 
1433 	sc = dev->si_drv1;
1434 
1435 	if (poll_events & (POLLIN | POLLRDNORM)) {
1436 		if (sc->mrsas_aen_triggered) {
1437 			revents |= poll_events & (POLLIN | POLLRDNORM);
1438 		}
1439 	}
1440 	if (revents == 0) {
1441 		if (poll_events & (POLLIN | POLLRDNORM)) {
1442 			mtx_lock(&sc->aen_lock);
1443 			sc->mrsas_poll_waiting = 1;
1444 			selrecord(td, &sc->mrsas_select);
1445 			mtx_unlock(&sc->aen_lock);
1446 		}
1447 	}
1448 	return revents;
1449 }
1450 
1451 /*
1452  * mrsas_setup_irq:	Set up interrupt
1453  * input:			Adapter instance soft state
1454  *
1455  * This function sets up interrupts as a bus resource, with flags indicating
1456  * resource permitting contemporaneous sharing and for resource to activate
1457  * atomically.
1458  */
1459 static int
1460 mrsas_setup_irq(struct mrsas_softc *sc)
1461 {
1462 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1463 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1464 
1465 	else {
1466 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1467 		sc->irq_context[0].sc = sc;
1468 		sc->irq_context[0].MSIxIndex = 0;
1469 		sc->irq_id[0] = 0;
1470 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1471 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1472 		if (sc->mrsas_irq[0] == NULL) {
1473 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1474 			    "interrupt\n");
1475 			return (FAIL);
1476 		}
1477 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1478 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1479 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1480 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1481 			    "interrupt\n");
1482 			return (FAIL);
1483 		}
1484 	}
1485 	return (0);
1486 }
1487 
1488 /*
1489  * mrsas_isr:	ISR entry point
1490  * input:		argument pointer
1491  *
1492  * This function is the interrupt service routine entry point.  There are two
1493  * types of interrupts, state change interrupt and response interrupt.  If an
1494  * interrupt is not ours, we just return.
1495  */
1496 void
1497 mrsas_isr(void *arg)
1498 {
1499 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1500 	struct mrsas_softc *sc = irq_context->sc;
1501 	int status = 0;
1502 
1503 	if (sc->mask_interrupts)
1504 		return;
1505 
1506 	if (!sc->msix_vectors) {
1507 		status = mrsas_clear_intr(sc);
1508 		if (!status)
1509 			return;
1510 	}
1511 	/* If we are resetting, bail */
1512 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1513 		printf(" Entered into ISR when OCR is going active. \n");
1514 		mrsas_clear_intr(sc);
1515 		return;
1516 	}
1517 	/* Process for reply request and clear response interrupt */
1518 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1519 		mrsas_clear_intr(sc);
1520 
1521 	return;
1522 }
1523 
1524 /*
1525  * mrsas_complete_cmd:	Process reply request
1526  * input:				Adapter instance soft state
1527  *
1528  * This function is called from mrsas_isr() to process reply request and clear
1529  * response interrupt. Processing of the reply request entails walking
1530  * through the reply descriptor array for the command request  pended from
1531  * Firmware.  We look at the Function field to determine the command type and
1532  * perform the appropriate action.  Before we return, we clear the response
1533  * interrupt.
1534  */
1535 int
1536 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1537 {
1538 	Mpi2ReplyDescriptorsUnion_t *desc;
1539 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1540 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1541 	struct mrsas_mpt_cmd *cmd_mpt;
1542 	struct mrsas_mfi_cmd *cmd_mfi;
1543 	u_int8_t reply_descript_type;
1544 	u_int16_t smid, num_completed;
1545 	u_int8_t status, extStatus;
1546 	union desc_value desc_val;
1547 	PLD_LOAD_BALANCE_INFO lbinfo;
1548 	u_int32_t device_id;
1549 	int threshold_reply_count = 0;
1550 #if TM_DEBUG
1551 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1552 	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1553 #endif
1554 
1555 	/* If we have a hardware error, not need to continue */
1556 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1557 		return (DONE);
1558 
1559 	desc = sc->reply_desc_mem;
1560 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1561 	    + sc->last_reply_idx[MSIxIndex];
1562 
1563 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1564 
1565 	desc_val.word = desc->Words;
1566 	num_completed = 0;
1567 
1568 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1569 
1570 	/* Find our reply descriptor for the command and process */
1571 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1572 		smid = reply_desc->SMID;
1573 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1574 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1575 
1576 		status = scsi_io_req->RaidContext.status;
1577 		extStatus = scsi_io_req->RaidContext.exStatus;
1578 
1579 		switch (scsi_io_req->Function) {
1580 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1581 #if TM_DEBUG
1582 			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1583 			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1584 			    &mr_tm_req->TmRequest;
1585 			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1586 			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1587 #endif
1588             wakeup_one((void *)&sc->ocr_chan);
1589             break;
1590 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1591 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1592 			lbinfo = &sc->load_balance_info[device_id];
1593 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1594 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1595 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1596 			}
1597 			/* Fall thru and complete IO */
1598 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1599 			mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1600 			mrsas_cmd_done(sc, cmd_mpt);
1601 			scsi_io_req->RaidContext.status = 0;
1602 			scsi_io_req->RaidContext.exStatus = 0;
1603 			mrsas_atomic_dec(&sc->fw_outstanding);
1604 			break;
1605 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1606 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1607 			/*
1608 			 * Make sure NOT TO release the mfi command from the called
1609 			 * function's context if it is fired with issue_polled call.
1610 			 * And also make sure that the issue_polled call should only be
1611 			 * used if INTERRUPT IS DISABLED.
1612 			 */
1613 			if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1614 				mrsas_release_mfi_cmd(cmd_mfi);
1615 			else
1616 				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1617 			break;
1618 		}
1619 
1620 		sc->last_reply_idx[MSIxIndex]++;
1621 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1622 			sc->last_reply_idx[MSIxIndex] = 0;
1623 
1624 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1625 							 * 0xFFFFFFFFs */
1626 		num_completed++;
1627 		threshold_reply_count++;
1628 
1629 		/* Get the next reply descriptor */
1630 		if (!sc->last_reply_idx[MSIxIndex]) {
1631 			desc = sc->reply_desc_mem;
1632 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1633 		} else
1634 			desc++;
1635 
1636 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1637 		desc_val.word = desc->Words;
1638 
1639 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1640 
1641 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1642 			break;
1643 
1644 		/*
1645 		 * Write to reply post index after completing threshold reply
1646 		 * count and still there are more replies in reply queue
1647 		 * pending to be completed.
1648 		 */
1649 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1650 			if (sc->msix_enable) {
1651 				if (sc->mrsas_gen3_ctrl)
1652 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1653 					    ((MSIxIndex & 0x7) << 24) |
1654 					    sc->last_reply_idx[MSIxIndex]);
1655 				else
1656 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1657 					    sc->last_reply_idx[MSIxIndex]);
1658 			} else
1659 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1660 				    reply_post_host_index), sc->last_reply_idx[0]);
1661 
1662 			threshold_reply_count = 0;
1663 		}
1664 	}
1665 
1666 	/* No match, just return */
1667 	if (num_completed == 0)
1668 		return (DONE);
1669 
1670 	/* Clear response interrupt */
1671 	if (sc->msix_enable) {
1672 			if (sc->mrsas_gen3_ctrl) {
1673 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1674 			    ((MSIxIndex & 0x7) << 24) |
1675 			    sc->last_reply_idx[MSIxIndex]);
1676 		} else
1677 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1678 			    sc->last_reply_idx[MSIxIndex]);
1679 	} else
1680 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1681 		    reply_post_host_index), sc->last_reply_idx[0]);
1682 
1683 	return (0);
1684 }
1685 
1686 /*
1687  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1688  * input:						Adapter instance soft state
1689  *
1690  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1691  * It checks the command status and maps the appropriate CAM status for the
1692  * CCB.
1693  */
1694 void
1695 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1696 {
1697 	struct mrsas_softc *sc = cmd->sc;
1698 	u_int8_t *sense_data;
1699 
1700 	switch (status) {
1701 	case MFI_STAT_OK:
1702 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1703 		break;
1704 	case MFI_STAT_SCSI_IO_FAILED:
1705 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1706 		cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1707 		sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1708 		if (sense_data) {
1709 			/* For now just copy 18 bytes back */
1710 			memcpy(sense_data, cmd->sense, 18);
1711 			cmd->ccb_ptr->csio.sense_len = 18;
1712 			cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1713 		}
1714 		break;
1715 	case MFI_STAT_LD_OFFLINE:
1716 	case MFI_STAT_DEVICE_NOT_FOUND:
1717 		if (cmd->ccb_ptr->ccb_h.target_lun)
1718 			cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1719 		else
1720 			cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1721 		break;
1722 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1723 		cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1724 		break;
1725 	default:
1726 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1727 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1728 		cmd->ccb_ptr->csio.scsi_status = status;
1729 	}
1730 	return;
1731 }
1732 
1733 /*
1734  * mrsas_alloc_mem:	Allocate DMAable memory
1735  * input:			Adapter instance soft state
1736  *
1737  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1738  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1739  * Kernel virtual address. Callback argument is physical memory address.
1740  */
1741 static int
1742 mrsas_alloc_mem(struct mrsas_softc *sc)
1743 {
1744 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1745 	          chain_frame_size, evt_detail_size, count;
1746 
1747 	/*
1748 	 * Allocate parent DMA tag
1749 	 */
1750 	if (bus_dma_tag_create(NULL,	/* parent */
1751 	    1,				/* alignment */
1752 	    0,				/* boundary */
1753 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1754 	    BUS_SPACE_MAXADDR,		/* highaddr */
1755 	    NULL, NULL,			/* filter, filterarg */
1756 	    MAXPHYS,			/* maxsize */
1757 	    sc->max_num_sge,		/* nsegments */
1758 	    MAXPHYS,			/* maxsegsize */
1759 	    0,				/* flags */
1760 	    NULL, NULL,			/* lockfunc, lockarg */
1761 	    &sc->mrsas_parent_tag	/* tag */
1762 	    )) {
1763 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1764 		return (ENOMEM);
1765 	}
1766 	/*
1767 	 * Allocate for version buffer
1768 	 */
1769 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1770 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1771 	    1, 0,
1772 	    BUS_SPACE_MAXADDR_32BIT,
1773 	    BUS_SPACE_MAXADDR,
1774 	    NULL, NULL,
1775 	    verbuf_size,
1776 	    1,
1777 	    verbuf_size,
1778 	    BUS_DMA_ALLOCNOW,
1779 	    NULL, NULL,
1780 	    &sc->verbuf_tag)) {
1781 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1782 		return (ENOMEM);
1783 	}
1784 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1785 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1786 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1787 		return (ENOMEM);
1788 	}
1789 	bzero(sc->verbuf_mem, verbuf_size);
1790 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1791 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1792 	    BUS_DMA_NOWAIT)) {
1793 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1794 		return (ENOMEM);
1795 	}
1796 	/*
1797 	 * Allocate IO Request Frames
1798 	 */
1799 	io_req_size = sc->io_frames_alloc_sz;
1800 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1801 	    16, 0,
1802 	    BUS_SPACE_MAXADDR_32BIT,
1803 	    BUS_SPACE_MAXADDR,
1804 	    NULL, NULL,
1805 	    io_req_size,
1806 	    1,
1807 	    io_req_size,
1808 	    BUS_DMA_ALLOCNOW,
1809 	    NULL, NULL,
1810 	    &sc->io_request_tag)) {
1811 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1812 		return (ENOMEM);
1813 	}
1814 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1815 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1816 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1817 		return (ENOMEM);
1818 	}
1819 	bzero(sc->io_request_mem, io_req_size);
1820 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1821 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1822 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1823 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1824 		return (ENOMEM);
1825 	}
1826 	/*
1827 	 * Allocate Chain Frames
1828 	 */
1829 	chain_frame_size = sc->chain_frames_alloc_sz;
1830 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1831 	    4, 0,
1832 	    BUS_SPACE_MAXADDR_32BIT,
1833 	    BUS_SPACE_MAXADDR,
1834 	    NULL, NULL,
1835 	    chain_frame_size,
1836 	    1,
1837 	    chain_frame_size,
1838 	    BUS_DMA_ALLOCNOW,
1839 	    NULL, NULL,
1840 	    &sc->chain_frame_tag)) {
1841 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1842 		return (ENOMEM);
1843 	}
1844 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1845 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1846 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1847 		return (ENOMEM);
1848 	}
1849 	bzero(sc->chain_frame_mem, chain_frame_size);
1850 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1851 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1852 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1853 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1854 		return (ENOMEM);
1855 	}
1856 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1857 	/*
1858 	 * Allocate Reply Descriptor Array
1859 	 */
1860 	reply_desc_size = sc->reply_alloc_sz * count;
1861 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1862 	    16, 0,
1863 	    BUS_SPACE_MAXADDR_32BIT,
1864 	    BUS_SPACE_MAXADDR,
1865 	    NULL, NULL,
1866 	    reply_desc_size,
1867 	    1,
1868 	    reply_desc_size,
1869 	    BUS_DMA_ALLOCNOW,
1870 	    NULL, NULL,
1871 	    &sc->reply_desc_tag)) {
1872 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1873 		return (ENOMEM);
1874 	}
1875 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1876 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1877 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1878 		return (ENOMEM);
1879 	}
1880 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1881 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1882 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1883 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1884 		return (ENOMEM);
1885 	}
1886 	/*
1887 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
1888 	 */
1889 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1890 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1891 	    64, 0,
1892 	    BUS_SPACE_MAXADDR_32BIT,
1893 	    BUS_SPACE_MAXADDR,
1894 	    NULL, NULL,
1895 	    sense_size,
1896 	    1,
1897 	    sense_size,
1898 	    BUS_DMA_ALLOCNOW,
1899 	    NULL, NULL,
1900 	    &sc->sense_tag)) {
1901 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1902 		return (ENOMEM);
1903 	}
1904 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1905 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1906 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1907 		return (ENOMEM);
1908 	}
1909 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1910 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1911 	    BUS_DMA_NOWAIT)) {
1912 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1913 		return (ENOMEM);
1914 	}
1915 	/*
1916 	 * Allocate for Event detail structure
1917 	 */
1918 	evt_detail_size = sizeof(struct mrsas_evt_detail);
1919 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1920 	    1, 0,
1921 	    BUS_SPACE_MAXADDR_32BIT,
1922 	    BUS_SPACE_MAXADDR,
1923 	    NULL, NULL,
1924 	    evt_detail_size,
1925 	    1,
1926 	    evt_detail_size,
1927 	    BUS_DMA_ALLOCNOW,
1928 	    NULL, NULL,
1929 	    &sc->evt_detail_tag)) {
1930 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1931 		return (ENOMEM);
1932 	}
1933 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1934 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1935 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1936 		return (ENOMEM);
1937 	}
1938 	bzero(sc->evt_detail_mem, evt_detail_size);
1939 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1940 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1941 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1942 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1943 		return (ENOMEM);
1944 	}
1945 	/*
1946 	 * Create a dma tag for data buffers; size will be the maximum
1947 	 * possible I/O size (280kB).
1948 	 */
1949 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1950 	    1,
1951 	    0,
1952 	    BUS_SPACE_MAXADDR,
1953 	    BUS_SPACE_MAXADDR,
1954 	    NULL, NULL,
1955 	    MAXPHYS,
1956 	    sc->max_num_sge,		/* nsegments */
1957 	    MAXPHYS,
1958 	    BUS_DMA_ALLOCNOW,
1959 	    busdma_lock_mutex,
1960 	    &sc->io_lock,
1961 	    &sc->data_tag)) {
1962 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1963 		return (ENOMEM);
1964 	}
1965 	return (0);
1966 }
1967 
1968 /*
1969  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
1970  * input:			callback argument, machine dependent type
1971  * 					that describes DMA segments, number of segments, error code
1972  *
1973  * This function is for the driver to receive mapping information resultant of
1974  * the bus_dmamap_load(). The information is actually not being used, but the
1975  * address is saved anyway.
1976  */
1977 void
1978 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1979 {
1980 	bus_addr_t *addr;
1981 
1982 	addr = arg;
1983 	*addr = segs[0].ds_addr;
1984 }
1985 
1986 /*
1987  * mrsas_setup_raidmap:	Set up RAID map.
1988  * input:				Adapter instance soft state
1989  *
1990  * Allocate DMA memory for the RAID maps and perform setup.
1991  */
1992 static int
1993 mrsas_setup_raidmap(struct mrsas_softc *sc)
1994 {
1995 	int i;
1996 
1997 	for (i = 0; i < 2; i++) {
1998 		sc->ld_drv_map[i] =
1999 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2000 		/* Do Error handling */
2001 		if (!sc->ld_drv_map[i]) {
2002 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2003 
2004 			if (i == 1)
2005 				free(sc->ld_drv_map[0], M_MRSAS);
2006 			/* ABORT driver initialization */
2007 			goto ABORT;
2008 		}
2009 	}
2010 
2011 	for (int i = 0; i < 2; i++) {
2012 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2013 		    4, 0,
2014 		    BUS_SPACE_MAXADDR_32BIT,
2015 		    BUS_SPACE_MAXADDR,
2016 		    NULL, NULL,
2017 		    sc->max_map_sz,
2018 		    1,
2019 		    sc->max_map_sz,
2020 		    BUS_DMA_ALLOCNOW,
2021 		    NULL, NULL,
2022 		    &sc->raidmap_tag[i])) {
2023 			device_printf(sc->mrsas_dev,
2024 			    "Cannot allocate raid map tag.\n");
2025 			return (ENOMEM);
2026 		}
2027 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2028 		    (void **)&sc->raidmap_mem[i],
2029 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2030 			device_printf(sc->mrsas_dev,
2031 			    "Cannot allocate raidmap memory.\n");
2032 			return (ENOMEM);
2033 		}
2034 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2035 
2036 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2037 		    sc->raidmap_mem[i], sc->max_map_sz,
2038 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2039 		    BUS_DMA_NOWAIT)) {
2040 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2041 			return (ENOMEM);
2042 		}
2043 		if (!sc->raidmap_mem[i]) {
2044 			device_printf(sc->mrsas_dev,
2045 			    "Cannot allocate memory for raid map.\n");
2046 			return (ENOMEM);
2047 		}
2048 	}
2049 
2050 	if (!mrsas_get_map_info(sc))
2051 		mrsas_sync_map_info(sc);
2052 
2053 	return (0);
2054 
2055 ABORT:
2056 	return (1);
2057 }
2058 
2059 /**
2060  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2061  * @sc:				Adapter soft state
2062  *
2063  * Return 0 on success.
2064  */
2065 void
2066 megasas_setup_jbod_map(struct mrsas_softc *sc)
2067 {
2068 	int i;
2069 	uint32_t pd_seq_map_sz;
2070 
2071 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2072 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2073 
2074 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2075 		sc->use_seqnum_jbod_fp = 0;
2076 		return;
2077 	}
2078 	if (sc->jbodmap_mem[0])
2079 		goto skip_alloc;
2080 
2081 	for (i = 0; i < 2; i++) {
2082 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2083 		    4, 0,
2084 		    BUS_SPACE_MAXADDR_32BIT,
2085 		    BUS_SPACE_MAXADDR,
2086 		    NULL, NULL,
2087 		    pd_seq_map_sz,
2088 		    1,
2089 		    pd_seq_map_sz,
2090 		    BUS_DMA_ALLOCNOW,
2091 		    NULL, NULL,
2092 		    &sc->jbodmap_tag[i])) {
2093 			device_printf(sc->mrsas_dev,
2094 			    "Cannot allocate jbod map tag.\n");
2095 			return;
2096 		}
2097 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2098 		    (void **)&sc->jbodmap_mem[i],
2099 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2100 			device_printf(sc->mrsas_dev,
2101 			    "Cannot allocate jbod map memory.\n");
2102 			return;
2103 		}
2104 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2105 
2106 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2107 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2108 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2109 		    BUS_DMA_NOWAIT)) {
2110 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2111 			return;
2112 		}
2113 		if (!sc->jbodmap_mem[i]) {
2114 			device_printf(sc->mrsas_dev,
2115 			    "Cannot allocate memory for jbod map.\n");
2116 			sc->use_seqnum_jbod_fp = 0;
2117 			return;
2118 		}
2119 	}
2120 
2121 skip_alloc:
2122 	if (!megasas_sync_pd_seq_num(sc, false) &&
2123 	    !megasas_sync_pd_seq_num(sc, true))
2124 		sc->use_seqnum_jbod_fp = 1;
2125 	else
2126 		sc->use_seqnum_jbod_fp = 0;
2127 
2128 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2129 }
2130 
2131 /*
2132  * mrsas_init_fw:	Initialize Firmware
2133  * input:			Adapter soft state
2134  *
2135  * Calls transition_to_ready() to make sure Firmware is in operational state and
2136  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2137  * issues internal commands to get the controller info after the IOC_INIT
2138  * command response is received by Firmware.  Note:  code relating to
2139  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2140  * is left here as placeholder.
2141  */
2142 static int
2143 mrsas_init_fw(struct mrsas_softc *sc)
2144 {
2145 
2146 	int ret, loop, ocr = 0;
2147 	u_int32_t max_sectors_1;
2148 	u_int32_t max_sectors_2;
2149 	u_int32_t tmp_sectors;
2150 	u_int32_t scratch_pad_2;
2151 	int msix_enable = 0;
2152 	int fw_msix_count = 0;
2153 
2154 	/* Make sure Firmware is ready */
2155 	ret = mrsas_transition_to_ready(sc, ocr);
2156 	if (ret != SUCCESS) {
2157 		return (ret);
2158 	}
2159 	/* MSI-x index 0- reply post host index register */
2160 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2161 	/* Check if MSI-X is supported while in ready state */
2162 	msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2163 
2164 	if (msix_enable) {
2165 		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2166 		    outbound_scratch_pad_2));
2167 
2168 		/* Check max MSI-X vectors */
2169 		if (sc->device_id == MRSAS_TBOLT) {
2170 			sc->msix_vectors = (scratch_pad_2
2171 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2172 			fw_msix_count = sc->msix_vectors;
2173 		} else {
2174 			/* Invader/Fury supports 96 MSI-X vectors */
2175 			sc->msix_vectors = ((scratch_pad_2
2176 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2177 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2178 			fw_msix_count = sc->msix_vectors;
2179 
2180 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2181 			    loop++) {
2182 				sc->msix_reg_offset[loop] =
2183 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2184 				    (loop * 0x10);
2185 			}
2186 		}
2187 
2188 		/* Don't bother allocating more MSI-X vectors than cpus */
2189 		sc->msix_vectors = min(sc->msix_vectors,
2190 		    mp_ncpus);
2191 
2192 		/* Allocate MSI-x vectors */
2193 		if (mrsas_allocate_msix(sc) == SUCCESS)
2194 			sc->msix_enable = 1;
2195 		else
2196 			sc->msix_enable = 0;
2197 
2198 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2199 		    "Online CPU %d Current MSIX <%d>\n",
2200 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2201 	}
2202 	if (mrsas_init_adapter(sc) != SUCCESS) {
2203 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2204 		return (1);
2205 	}
2206 	/* Allocate internal commands for pass-thru */
2207 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2208 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2209 		return (1);
2210 	}
2211 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2212 	if (!sc->ctrl_info) {
2213 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2214 		return (1);
2215 	}
2216 	/*
2217 	 * Get the controller info from FW, so that the MAX VD support
2218 	 * availability can be decided.
2219 	 */
2220 	if (mrsas_get_ctrl_info(sc)) {
2221 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2222 		return (1);
2223 	}
2224 	sc->secure_jbod_support =
2225 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2226 
2227 	if (sc->secure_jbod_support)
2228 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2229 
2230 	if (sc->use_seqnum_jbod_fp)
2231 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2232 
2233 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2234 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2235 		    "There seems to be some problem in the controller\n"
2236 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2237 	}
2238 	megasas_setup_jbod_map(sc);
2239 
2240 	/* For pass-thru, get PD/LD list and controller info */
2241 	memset(sc->pd_list, 0,
2242 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2243 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2244 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2245 		return (1);
2246 	}
2247 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2248 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2249 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2250 		return (1);
2251 	}
2252 	/*
2253 	 * Compute the max allowed sectors per IO: The controller info has
2254 	 * two limits on max sectors. Driver should use the minimum of these
2255 	 * two.
2256 	 *
2257 	 * 1 << stripe_sz_ops.min = max sectors per strip
2258 	 *
2259 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2260 	 * calculate max_sectors_1. So the number ended up as zero always.
2261 	 */
2262 	tmp_sectors = 0;
2263 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2264 	    sc->ctrl_info->max_strips_per_io;
2265 	max_sectors_2 = sc->ctrl_info->max_request_size;
2266 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2267 	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2268 
2269 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2270 		sc->max_sectors_per_req = tmp_sectors;
2271 
2272 	sc->disableOnlineCtrlReset =
2273 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2274 	sc->UnevenSpanSupport =
2275 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2276 	if (sc->UnevenSpanSupport) {
2277 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2278 		    sc->UnevenSpanSupport);
2279 
2280 		if (MR_ValidateMapInfo(sc))
2281 			sc->fast_path_io = 1;
2282 		else
2283 			sc->fast_path_io = 0;
2284 	}
2285 	return (0);
2286 }
2287 
2288 /*
2289  * mrsas_init_adapter:	Initializes the adapter/controller
2290  * input:				Adapter soft state
2291  *
2292  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2293  * ROC/controller.  The FW register is read to determined the number of
2294  * commands that is supported.  All memory allocations for IO is based on
2295  * max_cmd.  Appropriate calculations are performed in this function.
2296  */
2297 int
2298 mrsas_init_adapter(struct mrsas_softc *sc)
2299 {
2300 	uint32_t status;
2301 	u_int32_t max_cmd, scratch_pad_2;
2302 	int ret;
2303 	int i = 0;
2304 
2305 	/* Read FW status register */
2306 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2307 
2308 	/* Get operational params from status register */
2309 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2310 
2311 	/* Decrement the max supported by 1, to correlate with FW */
2312 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2313 	max_cmd = sc->max_fw_cmds;
2314 
2315 	/* Determine allocation size of command frames */
2316 	sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2317 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2318 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2319 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2320 	scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2321 	    outbound_scratch_pad_2));
2322 	/*
2323 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2324 	 * Firmware support extended IO chain frame which is 4 time more
2325 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2326 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2327 	 */
2328 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2329 		sc->max_chain_frame_sz =
2330 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2331 		    * MEGASAS_1MB_IO;
2332 	else
2333 		sc->max_chain_frame_sz =
2334 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2335 		    * MEGASAS_256K_IO;
2336 
2337 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2338 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2339 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2340 
2341 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2342 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2343 
2344 	mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2345 	    sc->max_num_sge, sc->max_chain_frame_sz);
2346 
2347 	/* Used for pass thru MFI frame (DCMD) */
2348 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2349 
2350 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2351 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2352 
2353 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2354 
2355 	for (i = 0; i < count; i++)
2356 		sc->last_reply_idx[i] = 0;
2357 
2358 	ret = mrsas_alloc_mem(sc);
2359 	if (ret != SUCCESS)
2360 		return (ret);
2361 
2362 	ret = mrsas_alloc_mpt_cmds(sc);
2363 	if (ret != SUCCESS)
2364 		return (ret);
2365 
2366 	ret = mrsas_ioc_init(sc);
2367 	if (ret != SUCCESS)
2368 		return (ret);
2369 
2370 	return (0);
2371 }
2372 
2373 /*
2374  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2375  * input:				Adapter soft state
2376  *
2377  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2378  */
2379 int
2380 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2381 {
2382 	int ioc_init_size;
2383 
2384 	/* Allocate IOC INIT command */
2385 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2386 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2387 	    1, 0,
2388 	    BUS_SPACE_MAXADDR_32BIT,
2389 	    BUS_SPACE_MAXADDR,
2390 	    NULL, NULL,
2391 	    ioc_init_size,
2392 	    1,
2393 	    ioc_init_size,
2394 	    BUS_DMA_ALLOCNOW,
2395 	    NULL, NULL,
2396 	    &sc->ioc_init_tag)) {
2397 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2398 		return (ENOMEM);
2399 	}
2400 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2401 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2402 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2403 		return (ENOMEM);
2404 	}
2405 	bzero(sc->ioc_init_mem, ioc_init_size);
2406 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2407 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2408 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2409 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2410 		return (ENOMEM);
2411 	}
2412 	return (0);
2413 }
2414 
2415 /*
2416  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2417  * input:				Adapter soft state
2418  *
2419  * Deallocates memory of the IOC Init cmd.
2420  */
2421 void
2422 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2423 {
2424 	if (sc->ioc_init_phys_mem)
2425 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2426 	if (sc->ioc_init_mem != NULL)
2427 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2428 	if (sc->ioc_init_tag != NULL)
2429 		bus_dma_tag_destroy(sc->ioc_init_tag);
2430 }
2431 
2432 /*
2433  * mrsas_ioc_init:	Sends IOC Init command to FW
2434  * input:			Adapter soft state
2435  *
2436  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2437  */
2438 int
2439 mrsas_ioc_init(struct mrsas_softc *sc)
2440 {
2441 	struct mrsas_init_frame *init_frame;
2442 	pMpi2IOCInitRequest_t IOCInitMsg;
2443 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2444 	u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2445 	bus_addr_t phys_addr;
2446 	int i, retcode = 0;
2447 	u_int32_t scratch_pad_2;
2448 
2449 	/* Allocate memory for the IOC INIT command */
2450 	if (mrsas_alloc_ioc_cmd(sc)) {
2451 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2452 		return (1);
2453 	}
2454 
2455 	if (!sc->block_sync_cache) {
2456 		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2457 		    outbound_scratch_pad_2));
2458 		sc->fw_sync_cache_support = (scratch_pad_2 &
2459 		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2460 	}
2461 
2462 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2463 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2464 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2465 	IOCInitMsg->MsgVersion = MPI2_VERSION;
2466 	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2467 	IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2468 	IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2469 	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2470 	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2471 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2472 
2473 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2474 	init_frame->cmd = MFI_CMD_INIT;
2475 	init_frame->cmd_status = 0xFF;
2476 	init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2477 
2478 	/* driver support Extended MSIX */
2479 		if (sc->mrsas_gen3_ctrl) {
2480 		init_frame->driver_operations.
2481 		    mfi_capabilities.support_additional_msix = 1;
2482 	}
2483 	if (sc->verbuf_mem) {
2484 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2485 		    MRSAS_VERSION);
2486 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2487 		init_frame->driver_ver_hi = 0;
2488 	}
2489 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2490 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2491 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2492 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2493 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2494 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2495 	init_frame->queue_info_new_phys_addr_lo = phys_addr;
2496 	init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2497 
2498 	req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2499 	req_desc.MFAIo.RequestFlags =
2500 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2501 
2502 	mrsas_disable_intr(sc);
2503 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2504 	mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2505 
2506 	/*
2507 	 * Poll response timer to wait for Firmware response.  While this
2508 	 * timer with the DELAY call could block CPU, the time interval for
2509 	 * this is only 1 millisecond.
2510 	 */
2511 	if (init_frame->cmd_status == 0xFF) {
2512 		for (i = 0; i < (max_wait * 1000); i++) {
2513 			if (init_frame->cmd_status == 0xFF)
2514 				DELAY(1000);
2515 			else
2516 				break;
2517 		}
2518 	}
2519 	if (init_frame->cmd_status == 0)
2520 		mrsas_dprint(sc, MRSAS_OCR,
2521 		    "IOC INIT response received from FW.\n");
2522 	else {
2523 		if (init_frame->cmd_status == 0xFF)
2524 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2525 		else
2526 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2527 		retcode = 1;
2528 	}
2529 
2530 	mrsas_free_ioc_cmd(sc);
2531 	return (retcode);
2532 }
2533 
2534 /*
2535  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2536  * input:					Adapter instance soft state
2537  *
2538  * This function allocates the internal commands for IOs. Each command that is
2539  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2540  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2541  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2542  * max_fw_cmds.
2543  */
2544 int
2545 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2546 {
2547 	int i, j;
2548 	u_int32_t max_cmd, count;
2549 	struct mrsas_mpt_cmd *cmd;
2550 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2551 	u_int32_t offset, chain_offset, sense_offset;
2552 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2553 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2554 
2555 	max_cmd = sc->max_fw_cmds;
2556 
2557 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2558 	if (!sc->req_desc) {
2559 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2560 		return (ENOMEM);
2561 	}
2562 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2563 
2564 	/*
2565 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2566 	 * Allocate the dynamic array first and then allocate individual
2567 	 * commands.
2568 	 */
2569 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2570 	if (!sc->mpt_cmd_list) {
2571 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2572 		return (ENOMEM);
2573 	}
2574 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2575 	for (i = 0; i < max_cmd; i++) {
2576 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2577 		    M_MRSAS, M_NOWAIT);
2578 		if (!sc->mpt_cmd_list[i]) {
2579 			for (j = 0; j < i; j++)
2580 				free(sc->mpt_cmd_list[j], M_MRSAS);
2581 			free(sc->mpt_cmd_list, M_MRSAS);
2582 			sc->mpt_cmd_list = NULL;
2583 			return (ENOMEM);
2584 		}
2585 	}
2586 
2587 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2588 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2589 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2590 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2591 	sense_base = (u_int8_t *)sc->sense_mem;
2592 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2593 	for (i = 0; i < max_cmd; i++) {
2594 		cmd = sc->mpt_cmd_list[i];
2595 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2596 		chain_offset = sc->max_chain_frame_sz * i;
2597 		sense_offset = MRSAS_SENSE_LEN * i;
2598 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2599 		cmd->index = i + 1;
2600 		cmd->ccb_ptr = NULL;
2601 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2602 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2603 		cmd->sc = sc;
2604 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2605 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2606 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2607 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2608 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2609 		cmd->sense = sense_base + sense_offset;
2610 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2611 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2612 			return (FAIL);
2613 		}
2614 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2615 	}
2616 
2617 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2618 	reply_desc = sc->reply_desc_mem;
2619 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2620 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2621 		reply_desc->Words = MRSAS_ULONG_MAX;
2622 	}
2623 	return (0);
2624 }
2625 
2626 /*
2627  * mrsas_fire_cmd:	Sends command to FW
2628  * input:			Adapter softstate
2629  * 					request descriptor address low
2630  * 					request descriptor address high
2631  *
2632  * This functions fires the command to Firmware by writing to the
2633  * inbound_low_queue_port and inbound_high_queue_port.
2634  */
2635 void
2636 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2637     u_int32_t req_desc_hi)
2638 {
2639 	mtx_lock(&sc->pci_lock);
2640 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2641 	    req_desc_lo);
2642 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2643 	    req_desc_hi);
2644 	mtx_unlock(&sc->pci_lock);
2645 }
2646 
2647 /*
2648  * mrsas_transition_to_ready:  Move FW to Ready state input:
2649  * Adapter instance soft state
2650  *
2651  * During the initialization, FW passes can potentially be in any one of several
2652  * possible states. If the FW in operational, waiting-for-handshake states,
2653  * driver must take steps to bring it to ready state. Otherwise, it has to
2654  * wait for the ready state.
2655  */
2656 int
2657 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2658 {
2659 	int i;
2660 	u_int8_t max_wait;
2661 	u_int32_t val, fw_state;
2662 	u_int32_t cur_state;
2663 	u_int32_t abs_state, curr_abs_state;
2664 
2665 	val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2666 	fw_state = val & MFI_STATE_MASK;
2667 	max_wait = MRSAS_RESET_WAIT_TIME;
2668 
2669 	if (fw_state != MFI_STATE_READY)
2670 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2671 
2672 	while (fw_state != MFI_STATE_READY) {
2673 		abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2674 		switch (fw_state) {
2675 		case MFI_STATE_FAULT:
2676 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2677 			if (ocr) {
2678 				cur_state = MFI_STATE_FAULT;
2679 				break;
2680 			} else
2681 				return -ENODEV;
2682 		case MFI_STATE_WAIT_HANDSHAKE:
2683 			/* Set the CLR bit in inbound doorbell */
2684 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2685 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2686 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2687 			break;
2688 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2689 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2690 			    MFI_INIT_HOTPLUG);
2691 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2692 			break;
2693 		case MFI_STATE_OPERATIONAL:
2694 			/*
2695 			 * Bring it to READY state; assuming max wait 10
2696 			 * secs
2697 			 */
2698 			mrsas_disable_intr(sc);
2699 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2700 			for (i = 0; i < max_wait * 1000; i++) {
2701 				if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2702 					DELAY(1000);
2703 				else
2704 					break;
2705 			}
2706 			cur_state = MFI_STATE_OPERATIONAL;
2707 			break;
2708 		case MFI_STATE_UNDEFINED:
2709 			/*
2710 			 * This state should not last for more than 2
2711 			 * seconds
2712 			 */
2713 			cur_state = MFI_STATE_UNDEFINED;
2714 			break;
2715 		case MFI_STATE_BB_INIT:
2716 			cur_state = MFI_STATE_BB_INIT;
2717 			break;
2718 		case MFI_STATE_FW_INIT:
2719 			cur_state = MFI_STATE_FW_INIT;
2720 			break;
2721 		case MFI_STATE_FW_INIT_2:
2722 			cur_state = MFI_STATE_FW_INIT_2;
2723 			break;
2724 		case MFI_STATE_DEVICE_SCAN:
2725 			cur_state = MFI_STATE_DEVICE_SCAN;
2726 			break;
2727 		case MFI_STATE_FLUSH_CACHE:
2728 			cur_state = MFI_STATE_FLUSH_CACHE;
2729 			break;
2730 		default:
2731 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2732 			return -ENODEV;
2733 		}
2734 
2735 		/*
2736 		 * The cur_state should not last for more than max_wait secs
2737 		 */
2738 		for (i = 0; i < (max_wait * 1000); i++) {
2739 			fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2740 			    outbound_scratch_pad)) & MFI_STATE_MASK);
2741 			curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2742 			    outbound_scratch_pad));
2743 			if (abs_state == curr_abs_state)
2744 				DELAY(1000);
2745 			else
2746 				break;
2747 		}
2748 
2749 		/*
2750 		 * Return error if fw_state hasn't changed after max_wait
2751 		 */
2752 		if (curr_abs_state == abs_state) {
2753 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2754 			    "in %d secs\n", fw_state, max_wait);
2755 			return -ENODEV;
2756 		}
2757 	}
2758 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2759 	return 0;
2760 }
2761 
2762 /*
2763  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
2764  * input:				Adapter soft state
2765  *
2766  * This function removes an MFI command from the command list.
2767  */
2768 struct mrsas_mfi_cmd *
2769 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2770 {
2771 	struct mrsas_mfi_cmd *cmd = NULL;
2772 
2773 	mtx_lock(&sc->mfi_cmd_pool_lock);
2774 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2775 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2776 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2777 	}
2778 	mtx_unlock(&sc->mfi_cmd_pool_lock);
2779 
2780 	return cmd;
2781 }
2782 
2783 /*
2784  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
2785  * input:				Adapter Context.
2786  *
2787  * This function will check FW status register and flag do_timeout_reset flag.
2788  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2789  * trigger reset.
2790  */
2791 static void
2792 mrsas_ocr_thread(void *arg)
2793 {
2794 	struct mrsas_softc *sc;
2795 	u_int32_t fw_status, fw_state;
2796 	u_int8_t tm_target_reset_failed = 0;
2797 
2798 	sc = (struct mrsas_softc *)arg;
2799 
2800 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2801 
2802 	sc->ocr_thread_active = 1;
2803 	mtx_lock(&sc->sim_lock);
2804 	for (;;) {
2805 		/* Sleep for 1 second and check the queue status */
2806 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2807 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2808 		if (sc->remove_in_progress ||
2809 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2810 			mrsas_dprint(sc, MRSAS_OCR,
2811 			    "Exit due to %s from %s\n",
2812 			    sc->remove_in_progress ? "Shutdown" :
2813 			    "Hardware critical error", __func__);
2814 			break;
2815 		}
2816 		fw_status = mrsas_read_reg(sc,
2817 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
2818 		fw_state = fw_status & MFI_STATE_MASK;
2819 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
2820 			mrsas_atomic_read(&sc->target_reset_outstanding)) {
2821 
2822 			/* First, freeze further IOs to come to the SIM */
2823 			mrsas_xpt_freeze(sc);
2824 
2825 			/* If this is an IO timeout then go for target reset */
2826 			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
2827 				device_printf(sc->mrsas_dev, "Initiating Target RESET "
2828 				    "because of SCSI IO timeout!\n");
2829 
2830 				/* Let the remaining IOs to complete */
2831 				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2832 				      "mrsas_reset_targets", 5 * hz);
2833 
2834 				/* Try to reset the target device */
2835 				if (mrsas_reset_targets(sc) == FAIL)
2836 					tm_target_reset_failed = 1;
2837 			}
2838 
2839 			/* If this is a DCMD timeout or FW fault,
2840 			 * then go for controller reset
2841 			 */
2842 			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
2843 			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
2844 				if (tm_target_reset_failed)
2845 					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
2846 					    "TM FAILURE!\n");
2847 				else
2848 					device_printf(sc->mrsas_dev, "Initiaiting OCR "
2849 						"because of %s!\n", sc->do_timedout_reset ?
2850 						"DCMD IO Timeout" : "FW fault");
2851 
2852 				mtx_lock_spin(&sc->ioctl_lock);
2853 				sc->reset_in_progress = 1;
2854 				mtx_unlock_spin(&sc->ioctl_lock);
2855 				sc->reset_count++;
2856 
2857 				/*
2858 				 * Wait for the AEN task to be completed if it is running.
2859 				 */
2860 				mtx_unlock(&sc->sim_lock);
2861 				taskqueue_drain(sc->ev_tq, &sc->ev_task);
2862 				mtx_lock(&sc->sim_lock);
2863 
2864 				taskqueue_block(sc->ev_tq);
2865 				/* Try to reset the controller */
2866 				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2867 
2868 				sc->do_timedout_reset = 0;
2869 				sc->reset_in_progress = 0;
2870 				tm_target_reset_failed = 0;
2871 				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
2872 				memset(sc->target_reset_pool, 0,
2873 				    sizeof(sc->target_reset_pool));
2874 				taskqueue_unblock(sc->ev_tq);
2875 			}
2876 
2877 			/* Now allow IOs to come to the SIM */
2878 			 mrsas_xpt_release(sc);
2879 		}
2880 	}
2881 	mtx_unlock(&sc->sim_lock);
2882 	sc->ocr_thread_active = 0;
2883 	mrsas_kproc_exit(0);
2884 }
2885 
2886 /*
2887  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
2888  * input:					Adapter Context.
2889  *
2890  * This function will clear reply descriptor so that post OCR driver and FW will
2891  * lost old history.
2892  */
2893 void
2894 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2895 {
2896 	int i, count;
2897 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2898 
2899 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2900 	for (i = 0; i < count; i++)
2901 		sc->last_reply_idx[i] = 0;
2902 
2903 	reply_desc = sc->reply_desc_mem;
2904 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2905 		reply_desc->Words = MRSAS_ULONG_MAX;
2906 	}
2907 }
2908 
2909 /*
2910  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
2911  * input:				Adapter Context.
2912  *
2913  * This function will run from thread context so that it can sleep. 1. Do not
2914  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2915  * to complete for 180 seconds. 3. If #2 does not find any outstanding
2916  * command Controller is in working state, so skip OCR. Otherwise, do
2917  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2918  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2919  * OCR, Re-fire Management command and move Controller to Operation state.
2920  */
2921 int
2922 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2923 {
2924 	int retval = SUCCESS, i, j, retry = 0;
2925 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2926 	union ccb *ccb;
2927 	struct mrsas_mfi_cmd *mfi_cmd;
2928 	struct mrsas_mpt_cmd *mpt_cmd;
2929 	union mrsas_evt_class_locale class_locale;
2930 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2931 
2932 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2933 		device_printf(sc->mrsas_dev,
2934 		    "mrsas: Hardware critical error, returning FAIL.\n");
2935 		return FAIL;
2936 	}
2937 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2938 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2939 	mrsas_disable_intr(sc);
2940 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2941 	    sc->mrsas_fw_fault_check_delay * hz);
2942 
2943 	/* First try waiting for commands to complete */
2944 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2945 		mrsas_dprint(sc, MRSAS_OCR,
2946 		    "resetting adapter from %s.\n",
2947 		    __func__);
2948 		/* Now return commands back to the CAM layer */
2949 		mtx_unlock(&sc->sim_lock);
2950 		for (i = 0; i < sc->max_fw_cmds; i++) {
2951 			mpt_cmd = sc->mpt_cmd_list[i];
2952 			if (mpt_cmd->ccb_ptr) {
2953 				ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2954 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2955 				mrsas_cmd_done(sc, mpt_cmd);
2956 				mrsas_atomic_dec(&sc->fw_outstanding);
2957 			}
2958 		}
2959 		mtx_lock(&sc->sim_lock);
2960 
2961 		status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2962 		    outbound_scratch_pad));
2963 		abs_state = status_reg & MFI_STATE_MASK;
2964 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
2965 		if (sc->disableOnlineCtrlReset ||
2966 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2967 			/* Reset not supported, kill adapter */
2968 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2969 			mrsas_kill_hba(sc);
2970 			retval = FAIL;
2971 			goto out;
2972 		}
2973 		/* Now try to reset the chip */
2974 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2975 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2976 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
2977 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2978 			    MPI2_WRSEQ_1ST_KEY_VALUE);
2979 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2980 			    MPI2_WRSEQ_2ND_KEY_VALUE);
2981 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2982 			    MPI2_WRSEQ_3RD_KEY_VALUE);
2983 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2984 			    MPI2_WRSEQ_4TH_KEY_VALUE);
2985 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2986 			    MPI2_WRSEQ_5TH_KEY_VALUE);
2987 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2988 			    MPI2_WRSEQ_6TH_KEY_VALUE);
2989 
2990 			/* Check that the diag write enable (DRWE) bit is on */
2991 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2992 			    fusion_host_diag));
2993 			retry = 0;
2994 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2995 				DELAY(100 * 1000);
2996 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2997 				    fusion_host_diag));
2998 				if (retry++ == 100) {
2999 					mrsas_dprint(sc, MRSAS_OCR,
3000 					    "Host diag unlock failed!\n");
3001 					break;
3002 				}
3003 			}
3004 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3005 				continue;
3006 
3007 			/* Send chip reset command */
3008 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3009 			    host_diag | HOST_DIAG_RESET_ADAPTER);
3010 			DELAY(3000 * 1000);
3011 
3012 			/* Make sure reset adapter bit is cleared */
3013 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3014 			    fusion_host_diag));
3015 			retry = 0;
3016 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3017 				DELAY(100 * 1000);
3018 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3019 				    fusion_host_diag));
3020 				if (retry++ == 1000) {
3021 					mrsas_dprint(sc, MRSAS_OCR,
3022 					    "Diag reset adapter never cleared!\n");
3023 					break;
3024 				}
3025 			}
3026 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3027 				continue;
3028 
3029 			abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3030 			    outbound_scratch_pad)) & MFI_STATE_MASK;
3031 			retry = 0;
3032 
3033 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3034 				DELAY(100 * 1000);
3035 				abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3036 				    outbound_scratch_pad)) & MFI_STATE_MASK;
3037 			}
3038 			if (abs_state <= MFI_STATE_FW_INIT) {
3039 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3040 				    " state = 0x%x\n", abs_state);
3041 				continue;
3042 			}
3043 			/* Wait for FW to become ready */
3044 			if (mrsas_transition_to_ready(sc, 1)) {
3045 				mrsas_dprint(sc, MRSAS_OCR,
3046 				    "mrsas: Failed to transition controller to ready.\n");
3047 				continue;
3048 			}
3049 			mrsas_reset_reply_desc(sc);
3050 			if (mrsas_ioc_init(sc)) {
3051 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3052 				continue;
3053 			}
3054 			for (j = 0; j < sc->max_fw_cmds; j++) {
3055 				mpt_cmd = sc->mpt_cmd_list[j];
3056 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3057 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3058 					/* If not an IOCTL then release the command else re-fire */
3059 					if (!mfi_cmd->sync_cmd) {
3060 						mrsas_release_mfi_cmd(mfi_cmd);
3061 					} else {
3062 						req_desc = mrsas_get_request_desc(sc,
3063 						    mfi_cmd->cmd_id.context.smid - 1);
3064 						mrsas_dprint(sc, MRSAS_OCR,
3065 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3066 						    mfi_cmd->frame->dcmd.opcode, j);
3067 						if (!req_desc)
3068 							device_printf(sc->mrsas_dev,
3069 							    "Cannot build MPT cmd.\n");
3070 						else
3071 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3072 							    req_desc->addr.u.high);
3073 					}
3074 				}
3075 			}
3076 
3077 			/* Reset load balance info */
3078 			memset(sc->load_balance_info, 0,
3079 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3080 
3081 			if (mrsas_get_ctrl_info(sc)) {
3082 				mrsas_kill_hba(sc);
3083 				retval = FAIL;
3084 				goto out;
3085 			}
3086 			if (!mrsas_get_map_info(sc))
3087 				mrsas_sync_map_info(sc);
3088 
3089 			megasas_setup_jbod_map(sc);
3090 
3091 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3092 			mrsas_enable_intr(sc);
3093 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3094 
3095 			/* Register AEN with FW for last sequence number */
3096 			class_locale.members.reserved = 0;
3097 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3098 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3099 
3100 			mtx_unlock(&sc->sim_lock);
3101 			if (mrsas_register_aen(sc, sc->last_seq_num,
3102 			    class_locale.word)) {
3103 				device_printf(sc->mrsas_dev,
3104 				    "ERROR: AEN registration FAILED from OCR !!! "
3105 				    "Further events from the controller cannot be notified."
3106 				    "Either there is some problem in the controller"
3107 				    "or the controller does not support AEN.\n"
3108 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3109 			}
3110 			mtx_lock(&sc->sim_lock);
3111 
3112 			/* Adapter reset completed successfully */
3113 			device_printf(sc->mrsas_dev, "Reset successful\n");
3114 			retval = SUCCESS;
3115 			goto out;
3116 		}
3117 		/* Reset failed, kill the adapter */
3118 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3119 		mrsas_kill_hba(sc);
3120 		retval = FAIL;
3121 	} else {
3122 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3123 		mrsas_enable_intr(sc);
3124 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3125 	}
3126 out:
3127 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3128 	mrsas_dprint(sc, MRSAS_OCR,
3129 	    "Reset Exit with %d.\n", retval);
3130 	return retval;
3131 }
3132 
3133 /*
3134  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3135  * input:			Adapter Context.
3136  *
3137  * This function will kill HBA when OCR is not supported.
3138  */
3139 void
3140 mrsas_kill_hba(struct mrsas_softc *sc)
3141 {
3142 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3143 	DELAY(1000 * 1000);
3144 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3145 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3146 	    MFI_STOP_ADP);
3147 	/* Flush */
3148 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3149 	mrsas_complete_outstanding_ioctls(sc);
3150 }
3151 
3152 /**
3153  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3154  * input:			Controller softc
3155  *
3156  * Returns void
3157  */
3158 void
3159 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3160 {
3161 	int i;
3162 	struct mrsas_mpt_cmd *cmd_mpt;
3163 	struct mrsas_mfi_cmd *cmd_mfi;
3164 	u_int32_t count, MSIxIndex;
3165 
3166 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3167 	for (i = 0; i < sc->max_fw_cmds; i++) {
3168 		cmd_mpt = sc->mpt_cmd_list[i];
3169 
3170 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3171 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3172 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3173 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3174 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3175 					    cmd_mpt->io_request->RaidContext.status);
3176 			}
3177 		}
3178 	}
3179 }
3180 
3181 /*
3182  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3183  * input:						Adapter Context.
3184  *
3185  * This function will wait for 180 seconds for outstanding commands to be
3186  * completed.
3187  */
3188 int
3189 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3190 {
3191 	int i, outstanding, retval = 0;
3192 	u_int32_t fw_state, count, MSIxIndex;
3193 
3194 
3195 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3196 		if (sc->remove_in_progress) {
3197 			mrsas_dprint(sc, MRSAS_OCR,
3198 			    "Driver remove or shutdown called.\n");
3199 			retval = 1;
3200 			goto out;
3201 		}
3202 		/* Check if firmware is in fault state */
3203 		fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3204 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3205 		if (fw_state == MFI_STATE_FAULT) {
3206 			mrsas_dprint(sc, MRSAS_OCR,
3207 			    "Found FW in FAULT state, will reset adapter.\n");
3208 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3209 			mtx_unlock(&sc->sim_lock);
3210 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3211 				mrsas_complete_cmd(sc, MSIxIndex);
3212 			mtx_lock(&sc->sim_lock);
3213 			retval = 1;
3214 			goto out;
3215 		}
3216 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3217 			mrsas_dprint(sc, MRSAS_OCR,
3218 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3219 			retval = 1;
3220 			goto out;
3221 		}
3222 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3223 		if (!outstanding)
3224 			goto out;
3225 
3226 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3227 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3228 			    "commands to complete\n", i, outstanding);
3229 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3230 			mtx_unlock(&sc->sim_lock);
3231 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3232 				mrsas_complete_cmd(sc, MSIxIndex);
3233 			mtx_lock(&sc->sim_lock);
3234 		}
3235 		DELAY(1000 * 1000);
3236 	}
3237 
3238 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3239 		mrsas_dprint(sc, MRSAS_OCR,
3240 		    " pending commands remain after waiting,"
3241 		    " will reset adapter.\n");
3242 		retval = 1;
3243 	}
3244 out:
3245 	return retval;
3246 }
3247 
3248 /*
3249  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3250  * input:					Command packet for return to free cmd pool
3251  *
3252  * This function returns the MFI & MPT command to the command list.
3253  */
3254 void
3255 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3256 {
3257 	struct mrsas_softc *sc = cmd_mfi->sc;
3258 	struct mrsas_mpt_cmd *cmd_mpt;
3259 
3260 
3261 	mtx_lock(&sc->mfi_cmd_pool_lock);
3262 	/*
3263 	 * Release the mpt command (if at all it is allocated
3264 	 * associated with the mfi command
3265 	 */
3266 	if (cmd_mfi->cmd_id.context.smid) {
3267 		mtx_lock(&sc->mpt_cmd_pool_lock);
3268 		/* Get the mpt cmd from mfi cmd frame's smid value */
3269 		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3270 		cmd_mpt->flags = 0;
3271 		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3272 		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3273 		mtx_unlock(&sc->mpt_cmd_pool_lock);
3274 	}
3275 	/* Release the mfi command */
3276 	cmd_mfi->ccb_ptr = NULL;
3277 	cmd_mfi->cmd_id.frame_count = 0;
3278 	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3279 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3280 
3281 	return;
3282 }
3283 
3284 /*
3285  * mrsas_get_controller_info:	Returns FW's controller structure
3286  * input:						Adapter soft state
3287  * 								Controller information structure
3288  *
3289  * Issues an internal command (DCMD) to get the FW's controller structure. This
3290  * information is mainly used to find out the maximum IO transfer per command
3291  * supported by the FW.
3292  */
3293 static int
3294 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3295 {
3296 	int retcode = 0;
3297 	u_int8_t do_ocr = 1;
3298 	struct mrsas_mfi_cmd *cmd;
3299 	struct mrsas_dcmd_frame *dcmd;
3300 
3301 	cmd = mrsas_get_mfi_cmd(sc);
3302 
3303 	if (!cmd) {
3304 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3305 		return -ENOMEM;
3306 	}
3307 	dcmd = &cmd->frame->dcmd;
3308 
3309 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3310 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3311 		mrsas_release_mfi_cmd(cmd);
3312 		return -ENOMEM;
3313 	}
3314 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3315 
3316 	dcmd->cmd = MFI_CMD_DCMD;
3317 	dcmd->cmd_status = 0xFF;
3318 	dcmd->sge_count = 1;
3319 	dcmd->flags = MFI_FRAME_DIR_READ;
3320 	dcmd->timeout = 0;
3321 	dcmd->pad_0 = 0;
3322 	dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3323 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3324 	dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3325 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3326 
3327 	if (!sc->mask_interrupts)
3328 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3329 	else
3330 		retcode = mrsas_issue_polled(sc, cmd);
3331 
3332 	if (retcode == ETIMEDOUT)
3333 		goto dcmd_timeout;
3334 	else
3335 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3336 
3337 	do_ocr = 0;
3338 	mrsas_update_ext_vd_details(sc);
3339 
3340 	sc->use_seqnum_jbod_fp =
3341 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3342 	sc->disableOnlineCtrlReset =
3343 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3344 
3345 dcmd_timeout:
3346 	mrsas_free_ctlr_info_cmd(sc);
3347 
3348 	if (do_ocr)
3349 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3350 
3351 	if (!sc->mask_interrupts)
3352 		mrsas_release_mfi_cmd(cmd);
3353 
3354 	return (retcode);
3355 }
3356 
3357 /*
3358  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3359  * input:
3360  *	sc - Controller's softc
3361 */
3362 static void
3363 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3364 {
3365 	sc->max256vdSupport =
3366 	sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3367 	/* Below is additional check to address future FW enhancement */
3368 	if (sc->ctrl_info->max_lds > 64)
3369 		sc->max256vdSupport = 1;
3370 
3371 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3372 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3373 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3374 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3375 	if (sc->max256vdSupport) {
3376 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3377 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3378 	} else {
3379 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3380 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3381 	}
3382 
3383 	sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3384 	    (sizeof(MR_LD_SPAN_MAP) *
3385 	    (sc->fw_supported_vd_count - 1));
3386 	sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3387 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3388 	    (sizeof(MR_LD_SPAN_MAP) *
3389 	    (sc->drv_supported_vd_count - 1));
3390 
3391 	sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3392 
3393 	if (sc->max256vdSupport)
3394 		sc->current_map_sz = sc->new_map_sz;
3395 	else
3396 		sc->current_map_sz = sc->old_map_sz;
3397 }
3398 
3399 /*
3400  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3401  * input:						Adapter soft state
3402  *
3403  * Allocates DMAable memory for the controller info internal command.
3404  */
3405 int
3406 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3407 {
3408 	int ctlr_info_size;
3409 
3410 	/* Allocate get controller info command */
3411 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3412 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3413 	    1, 0,
3414 	    BUS_SPACE_MAXADDR_32BIT,
3415 	    BUS_SPACE_MAXADDR,
3416 	    NULL, NULL,
3417 	    ctlr_info_size,
3418 	    1,
3419 	    ctlr_info_size,
3420 	    BUS_DMA_ALLOCNOW,
3421 	    NULL, NULL,
3422 	    &sc->ctlr_info_tag)) {
3423 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3424 		return (ENOMEM);
3425 	}
3426 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3427 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3428 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3429 		return (ENOMEM);
3430 	}
3431 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3432 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3433 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3434 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3435 		return (ENOMEM);
3436 	}
3437 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3438 	return (0);
3439 }
3440 
3441 /*
3442  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3443  * input:						Adapter soft state
3444  *
3445  * Deallocates memory of the get controller info cmd.
3446  */
3447 void
3448 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3449 {
3450 	if (sc->ctlr_info_phys_addr)
3451 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3452 	if (sc->ctlr_info_mem != NULL)
3453 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3454 	if (sc->ctlr_info_tag != NULL)
3455 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3456 }
3457 
3458 /*
3459  * mrsas_issue_polled:	Issues a polling command
3460  * inputs:				Adapter soft state
3461  * 						Command packet to be issued
3462  *
3463  * This function is for posting of internal commands to Firmware.  MFI requires
3464  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3465  * the poll response timer is 180 seconds.
3466  */
3467 int
3468 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3469 {
3470 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3471 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3472 	int i, retcode = SUCCESS;
3473 
3474 	frame_hdr->cmd_status = 0xFF;
3475 	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3476 
3477 	/* Issue the frame using inbound queue port */
3478 	if (mrsas_issue_dcmd(sc, cmd)) {
3479 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3480 		return (1);
3481 	}
3482 	/*
3483 	 * Poll response timer to wait for Firmware response.  While this
3484 	 * timer with the DELAY call could block CPU, the time interval for
3485 	 * this is only 1 millisecond.
3486 	 */
3487 	if (frame_hdr->cmd_status == 0xFF) {
3488 		for (i = 0; i < (max_wait * 1000); i++) {
3489 			if (frame_hdr->cmd_status == 0xFF)
3490 				DELAY(1000);
3491 			else
3492 				break;
3493 		}
3494 	}
3495 	if (frame_hdr->cmd_status == 0xFF) {
3496 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3497 		    "seconds from %s\n", max_wait, __func__);
3498 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3499 		    cmd->frame->dcmd.opcode);
3500 		retcode = ETIMEDOUT;
3501 	}
3502 	return (retcode);
3503 }
3504 
3505 /*
3506  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3507  * input:				Adapter soft state mfi cmd pointer
3508  *
3509  * This function is called by mrsas_issued_blocked_cmd() and
3510  * mrsas_issued_polled(), to build the MPT command and then fire the command
3511  * to Firmware.
3512  */
3513 int
3514 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3515 {
3516 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3517 
3518 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3519 	if (!req_desc) {
3520 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3521 		return (1);
3522 	}
3523 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3524 
3525 	return (0);
3526 }
3527 
3528 /*
3529  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3530  * input:				Adapter soft state mfi cmd to build
3531  *
3532  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3533  * command and prepares the MPT command to send to Firmware.
3534  */
3535 MRSAS_REQUEST_DESCRIPTOR_UNION *
3536 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3537 {
3538 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3539 	u_int16_t index;
3540 
3541 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3542 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3543 		return NULL;
3544 	}
3545 	index = cmd->cmd_id.context.smid;
3546 
3547 	req_desc = mrsas_get_request_desc(sc, index - 1);
3548 	if (!req_desc)
3549 		return NULL;
3550 
3551 	req_desc->addr.Words = 0;
3552 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3553 
3554 	req_desc->SCSIIO.SMID = index;
3555 
3556 	return (req_desc);
3557 }
3558 
3559 /*
3560  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3561  * input:						Adapter soft state mfi cmd pointer
3562  *
3563  * The MPT command and the io_request are setup as a passthru command. The SGE
3564  * chain address is set to frame_phys_addr of the MFI command.
3565  */
3566 u_int8_t
3567 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3568 {
3569 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3570 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3571 	struct mrsas_mpt_cmd *mpt_cmd;
3572 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3573 
3574 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3575 	if (!mpt_cmd)
3576 		return (1);
3577 
3578 	/* Save the smid. To be used for returning the cmd */
3579 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3580 
3581 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3582 
3583 	/*
3584 	 * For cmds where the flag is set, store the flag and check on
3585 	 * completion. For cmds with this flag, don't call
3586 	 * mrsas_complete_cmd.
3587 	 */
3588 
3589 	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3590 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3591 
3592 	io_req = mpt_cmd->io_request;
3593 
3594 		if (sc->mrsas_gen3_ctrl) {
3595 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3596 
3597 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3598 		sgl_ptr_end->Flags = 0;
3599 	}
3600 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3601 
3602 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3603 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3604 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3605 
3606 	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3607 
3608 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3609 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3610 
3611 	mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3612 
3613 	return (0);
3614 }
3615 
3616 /*
3617  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3618  * input:					Adapter soft state Command to be issued
3619  *
3620  * This function waits on an event for the command to be returned from the ISR.
3621  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3622  * internal and ioctl commands.
3623  */
3624 int
3625 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3626 {
3627 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3628 	unsigned long total_time = 0;
3629 	int retcode = SUCCESS;
3630 
3631 	/* Initialize cmd_status */
3632 	cmd->cmd_status = 0xFF;
3633 
3634 	/* Build MPT-MFI command for issue to FW */
3635 	if (mrsas_issue_dcmd(sc, cmd)) {
3636 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3637 		return (1);
3638 	}
3639 	sc->chan = (void *)&cmd;
3640 
3641 	while (1) {
3642 		if (cmd->cmd_status == 0xFF) {
3643 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3644 		} else
3645 			break;
3646 
3647 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3648 					 * command */
3649 			total_time++;
3650 			if (total_time >= max_wait) {
3651 				device_printf(sc->mrsas_dev,
3652 				    "Internal command timed out after %d seconds.\n", max_wait);
3653 				retcode = 1;
3654 				break;
3655 			}
3656 		}
3657 	}
3658 
3659 	if (cmd->cmd_status == 0xFF) {
3660 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3661 		    "seconds from %s\n", max_wait, __func__);
3662 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3663 		    cmd->frame->dcmd.opcode);
3664 		retcode = ETIMEDOUT;
3665 	}
3666 	return (retcode);
3667 }
3668 
3669 /*
3670  * mrsas_complete_mptmfi_passthru:	Completes a command
3671  * input:	@sc:					Adapter soft state
3672  * 			@cmd:					Command to be completed
3673  * 			@status:				cmd completion status
3674  *
3675  * This function is called from mrsas_complete_cmd() after an interrupt is
3676  * received from Firmware, and io_request->Function is
3677  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3678  */
3679 void
3680 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3681     u_int8_t status)
3682 {
3683 	struct mrsas_header *hdr = &cmd->frame->hdr;
3684 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3685 
3686 	/* Reset the retry counter for future re-tries */
3687 	cmd->retry_for_fw_reset = 0;
3688 
3689 	if (cmd->ccb_ptr)
3690 		cmd->ccb_ptr = NULL;
3691 
3692 	switch (hdr->cmd) {
3693 	case MFI_CMD_INVALID:
3694 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3695 		break;
3696 	case MFI_CMD_PD_SCSI_IO:
3697 	case MFI_CMD_LD_SCSI_IO:
3698 		/*
3699 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3700 		 * issued either through an IO path or an IOCTL path. If it
3701 		 * was via IOCTL, we will send it to internal completion.
3702 		 */
3703 		if (cmd->sync_cmd) {
3704 			cmd->sync_cmd = 0;
3705 			mrsas_wakeup(sc, cmd);
3706 			break;
3707 		}
3708 	case MFI_CMD_SMP:
3709 	case MFI_CMD_STP:
3710 	case MFI_CMD_DCMD:
3711 		/* Check for LD map update */
3712 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3713 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
3714 			sc->fast_path_io = 0;
3715 			mtx_lock(&sc->raidmap_lock);
3716 			sc->map_update_cmd = NULL;
3717 			if (cmd_status != 0) {
3718 				if (cmd_status != MFI_STAT_NOT_FOUND)
3719 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3720 				else {
3721 					mrsas_release_mfi_cmd(cmd);
3722 					mtx_unlock(&sc->raidmap_lock);
3723 					break;
3724 				}
3725 			} else
3726 				sc->map_id++;
3727 			mrsas_release_mfi_cmd(cmd);
3728 			if (MR_ValidateMapInfo(sc))
3729 				sc->fast_path_io = 0;
3730 			else
3731 				sc->fast_path_io = 1;
3732 			mrsas_sync_map_info(sc);
3733 			mtx_unlock(&sc->raidmap_lock);
3734 			break;
3735 		}
3736 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3737 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3738 			sc->mrsas_aen_triggered = 0;
3739 		}
3740 		/* FW has an updated PD sequence */
3741 		if ((cmd->frame->dcmd.opcode ==
3742 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3743 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
3744 
3745 			mtx_lock(&sc->raidmap_lock);
3746 			sc->jbod_seq_cmd = NULL;
3747 			mrsas_release_mfi_cmd(cmd);
3748 
3749 			if (cmd_status == MFI_STAT_OK) {
3750 				sc->pd_seq_map_id++;
3751 				/* Re-register a pd sync seq num cmd */
3752 				if (megasas_sync_pd_seq_num(sc, true))
3753 					sc->use_seqnum_jbod_fp = 0;
3754 			} else {
3755 				sc->use_seqnum_jbod_fp = 0;
3756 				device_printf(sc->mrsas_dev,
3757 				    "Jbod map sync failed, status=%x\n", cmd_status);
3758 			}
3759 			mtx_unlock(&sc->raidmap_lock);
3760 			break;
3761 		}
3762 		/* See if got an event notification */
3763 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3764 			mrsas_complete_aen(sc, cmd);
3765 		else
3766 			mrsas_wakeup(sc, cmd);
3767 		break;
3768 	case MFI_CMD_ABORT:
3769 		/* Command issued to abort another cmd return */
3770 		mrsas_complete_abort(sc, cmd);
3771 		break;
3772 	default:
3773 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3774 		break;
3775 	}
3776 }
3777 
3778 /*
3779  * mrsas_wakeup:	Completes an internal command
3780  * input:			Adapter soft state
3781  * 					Command to be completed
3782  *
3783  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3784  * timer is started.  This function is called from
3785  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3786  * from the command wait.
3787  */
3788 void
3789 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3790 {
3791 	cmd->cmd_status = cmd->frame->io.cmd_status;
3792 
3793 	if (cmd->cmd_status == 0xFF)
3794 		cmd->cmd_status = 0;
3795 
3796 	sc->chan = (void *)&cmd;
3797 	wakeup_one((void *)&sc->chan);
3798 	return;
3799 }
3800 
3801 /*
3802  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
3803  * Adapter soft state Shutdown/Hibernate
3804  *
3805  * This function issues a DCMD internal command to Firmware to initiate shutdown
3806  * of the controller.
3807  */
3808 static void
3809 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3810 {
3811 	struct mrsas_mfi_cmd *cmd;
3812 	struct mrsas_dcmd_frame *dcmd;
3813 
3814 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3815 		return;
3816 
3817 	cmd = mrsas_get_mfi_cmd(sc);
3818 	if (!cmd) {
3819 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3820 		return;
3821 	}
3822 	if (sc->aen_cmd)
3823 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3824 	if (sc->map_update_cmd)
3825 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3826 	if (sc->jbod_seq_cmd)
3827 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3828 
3829 	dcmd = &cmd->frame->dcmd;
3830 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3831 
3832 	dcmd->cmd = MFI_CMD_DCMD;
3833 	dcmd->cmd_status = 0x0;
3834 	dcmd->sge_count = 0;
3835 	dcmd->flags = MFI_FRAME_DIR_NONE;
3836 	dcmd->timeout = 0;
3837 	dcmd->pad_0 = 0;
3838 	dcmd->data_xfer_len = 0;
3839 	dcmd->opcode = opcode;
3840 
3841 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3842 
3843 	mrsas_issue_blocked_cmd(sc, cmd);
3844 	mrsas_release_mfi_cmd(cmd);
3845 
3846 	return;
3847 }
3848 
3849 /*
3850  * mrsas_flush_cache:         Requests FW to flush all its caches input:
3851  * Adapter soft state
3852  *
3853  * This function is issues a DCMD internal command to Firmware to initiate
3854  * flushing of all caches.
3855  */
3856 static void
3857 mrsas_flush_cache(struct mrsas_softc *sc)
3858 {
3859 	struct mrsas_mfi_cmd *cmd;
3860 	struct mrsas_dcmd_frame *dcmd;
3861 
3862 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3863 		return;
3864 
3865 	cmd = mrsas_get_mfi_cmd(sc);
3866 	if (!cmd) {
3867 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3868 		return;
3869 	}
3870 	dcmd = &cmd->frame->dcmd;
3871 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3872 
3873 	dcmd->cmd = MFI_CMD_DCMD;
3874 	dcmd->cmd_status = 0x0;
3875 	dcmd->sge_count = 0;
3876 	dcmd->flags = MFI_FRAME_DIR_NONE;
3877 	dcmd->timeout = 0;
3878 	dcmd->pad_0 = 0;
3879 	dcmd->data_xfer_len = 0;
3880 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3881 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3882 
3883 	mrsas_issue_blocked_cmd(sc, cmd);
3884 	mrsas_release_mfi_cmd(cmd);
3885 
3886 	return;
3887 }
3888 
3889 int
3890 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3891 {
3892 	int retcode = 0;
3893 	u_int8_t do_ocr = 1;
3894 	struct mrsas_mfi_cmd *cmd;
3895 	struct mrsas_dcmd_frame *dcmd;
3896 	uint32_t pd_seq_map_sz;
3897 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3898 	bus_addr_t pd_seq_h;
3899 
3900 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3901 	    (sizeof(struct MR_PD_CFG_SEQ) *
3902 	    (MAX_PHYSICAL_DEVICES - 1));
3903 
3904 	cmd = mrsas_get_mfi_cmd(sc);
3905 	if (!cmd) {
3906 		device_printf(sc->mrsas_dev,
3907 		    "Cannot alloc for ld map info cmd.\n");
3908 		return 1;
3909 	}
3910 	dcmd = &cmd->frame->dcmd;
3911 
3912 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3913 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3914 	if (!pd_sync) {
3915 		device_printf(sc->mrsas_dev,
3916 		    "Failed to alloc mem for jbod map info.\n");
3917 		mrsas_release_mfi_cmd(cmd);
3918 		return (ENOMEM);
3919 	}
3920 	memset(pd_sync, 0, pd_seq_map_sz);
3921 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3922 	dcmd->cmd = MFI_CMD_DCMD;
3923 	dcmd->cmd_status = 0xFF;
3924 	dcmd->sge_count = 1;
3925 	dcmd->timeout = 0;
3926 	dcmd->pad_0 = 0;
3927 	dcmd->data_xfer_len = (pd_seq_map_sz);
3928 	dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3929 	dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3930 	dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3931 
3932 	if (pend) {
3933 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3934 		dcmd->flags = (MFI_FRAME_DIR_WRITE);
3935 		sc->jbod_seq_cmd = cmd;
3936 		if (mrsas_issue_dcmd(sc, cmd)) {
3937 			device_printf(sc->mrsas_dev,
3938 			    "Fail to send sync map info command.\n");
3939 			return 1;
3940 		} else
3941 			return 0;
3942 	} else
3943 		dcmd->flags = MFI_FRAME_DIR_READ;
3944 
3945 	retcode = mrsas_issue_polled(sc, cmd);
3946 	if (retcode == ETIMEDOUT)
3947 		goto dcmd_timeout;
3948 
3949 	if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3950 		device_printf(sc->mrsas_dev,
3951 		    "driver supports max %d JBOD, but FW reports %d\n",
3952 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
3953 		retcode = -EINVAL;
3954 	}
3955 	if (!retcode)
3956 		sc->pd_seq_map_id++;
3957 	do_ocr = 0;
3958 
3959 dcmd_timeout:
3960 	if (do_ocr)
3961 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3962 
3963 	return (retcode);
3964 }
3965 
3966 /*
3967  * mrsas_get_map_info:        Load and validate RAID map input:
3968  * Adapter instance soft state
3969  *
3970  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3971  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
3972  */
3973 static int
3974 mrsas_get_map_info(struct mrsas_softc *sc)
3975 {
3976 	uint8_t retcode = 0;
3977 
3978 	sc->fast_path_io = 0;
3979 	if (!mrsas_get_ld_map_info(sc)) {
3980 		retcode = MR_ValidateMapInfo(sc);
3981 		if (retcode == 0) {
3982 			sc->fast_path_io = 1;
3983 			return 0;
3984 		}
3985 	}
3986 	return 1;
3987 }
3988 
3989 /*
3990  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
3991  * Adapter instance soft state
3992  *
3993  * Issues an internal command (DCMD) to get the FW's controller PD list
3994  * structure.
3995  */
3996 static int
3997 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3998 {
3999 	int retcode = 0;
4000 	struct mrsas_mfi_cmd *cmd;
4001 	struct mrsas_dcmd_frame *dcmd;
4002 	void *map;
4003 	bus_addr_t map_phys_addr = 0;
4004 
4005 	cmd = mrsas_get_mfi_cmd(sc);
4006 	if (!cmd) {
4007 		device_printf(sc->mrsas_dev,
4008 		    "Cannot alloc for ld map info cmd.\n");
4009 		return 1;
4010 	}
4011 	dcmd = &cmd->frame->dcmd;
4012 
4013 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4014 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4015 	if (!map) {
4016 		device_printf(sc->mrsas_dev,
4017 		    "Failed to alloc mem for ld map info.\n");
4018 		mrsas_release_mfi_cmd(cmd);
4019 		return (ENOMEM);
4020 	}
4021 	memset(map, 0, sizeof(sc->max_map_sz));
4022 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4023 
4024 	dcmd->cmd = MFI_CMD_DCMD;
4025 	dcmd->cmd_status = 0xFF;
4026 	dcmd->sge_count = 1;
4027 	dcmd->flags = MFI_FRAME_DIR_READ;
4028 	dcmd->timeout = 0;
4029 	dcmd->pad_0 = 0;
4030 	dcmd->data_xfer_len = sc->current_map_sz;
4031 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4032 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4033 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
4034 
4035 	retcode = mrsas_issue_polled(sc, cmd);
4036 	if (retcode == ETIMEDOUT)
4037 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4038 
4039 	return (retcode);
4040 }
4041 
4042 /*
4043  * mrsas_sync_map_info:        Get FW's ld_map structure input:
4044  * Adapter instance soft state
4045  *
4046  * Issues an internal command (DCMD) to get the FW's controller PD list
4047  * structure.
4048  */
4049 static int
4050 mrsas_sync_map_info(struct mrsas_softc *sc)
4051 {
4052 	int retcode = 0, i;
4053 	struct mrsas_mfi_cmd *cmd;
4054 	struct mrsas_dcmd_frame *dcmd;
4055 	uint32_t size_sync_info, num_lds;
4056 	MR_LD_TARGET_SYNC *target_map = NULL;
4057 	MR_DRV_RAID_MAP_ALL *map;
4058 	MR_LD_RAID *raid;
4059 	MR_LD_TARGET_SYNC *ld_sync;
4060 	bus_addr_t map_phys_addr = 0;
4061 
4062 	cmd = mrsas_get_mfi_cmd(sc);
4063 	if (!cmd) {
4064 		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4065 		return ENOMEM;
4066 	}
4067 	map = sc->ld_drv_map[sc->map_id & 1];
4068 	num_lds = map->raidMap.ldCount;
4069 
4070 	dcmd = &cmd->frame->dcmd;
4071 	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4072 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4073 
4074 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4075 	memset(target_map, 0, sc->max_map_sz);
4076 
4077 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4078 
4079 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4080 
4081 	for (i = 0; i < num_lds; i++, ld_sync++) {
4082 		raid = MR_LdRaidGet(i, map);
4083 		ld_sync->targetId = MR_GetLDTgtId(i, map);
4084 		ld_sync->seqNum = raid->seqNum;
4085 	}
4086 
4087 	dcmd->cmd = MFI_CMD_DCMD;
4088 	dcmd->cmd_status = 0xFF;
4089 	dcmd->sge_count = 1;
4090 	dcmd->flags = MFI_FRAME_DIR_WRITE;
4091 	dcmd->timeout = 0;
4092 	dcmd->pad_0 = 0;
4093 	dcmd->data_xfer_len = sc->current_map_sz;
4094 	dcmd->mbox.b[0] = num_lds;
4095 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4096 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4097 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4098 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
4099 
4100 	sc->map_update_cmd = cmd;
4101 	if (mrsas_issue_dcmd(sc, cmd)) {
4102 		device_printf(sc->mrsas_dev,
4103 		    "Fail to send sync map info command.\n");
4104 		return (1);
4105 	}
4106 	return (retcode);
4107 }
4108 
4109 /*
4110  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4111  * Adapter soft state
4112  *
4113  * Issues an internal command (DCMD) to get the FW's controller PD list
4114  * structure.  This information is mainly used to find out about system
4115  * supported by Firmware.
4116  */
4117 static int
4118 mrsas_get_pd_list(struct mrsas_softc *sc)
4119 {
4120 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4121 	u_int8_t do_ocr = 1;
4122 	struct mrsas_mfi_cmd *cmd;
4123 	struct mrsas_dcmd_frame *dcmd;
4124 	struct MR_PD_LIST *pd_list_mem;
4125 	struct MR_PD_ADDRESS *pd_addr;
4126 	bus_addr_t pd_list_phys_addr = 0;
4127 	struct mrsas_tmp_dcmd *tcmd;
4128 
4129 	cmd = mrsas_get_mfi_cmd(sc);
4130 	if (!cmd) {
4131 		device_printf(sc->mrsas_dev,
4132 		    "Cannot alloc for get PD list cmd\n");
4133 		return 1;
4134 	}
4135 	dcmd = &cmd->frame->dcmd;
4136 
4137 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4138 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4139 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4140 		device_printf(sc->mrsas_dev,
4141 		    "Cannot alloc dmamap for get PD list cmd\n");
4142 		mrsas_release_mfi_cmd(cmd);
4143 		mrsas_free_tmp_dcmd(tcmd);
4144 		free(tcmd, M_MRSAS);
4145 		return (ENOMEM);
4146 	} else {
4147 		pd_list_mem = tcmd->tmp_dcmd_mem;
4148 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4149 	}
4150 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4151 
4152 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4153 	dcmd->mbox.b[1] = 0;
4154 	dcmd->cmd = MFI_CMD_DCMD;
4155 	dcmd->cmd_status = 0xFF;
4156 	dcmd->sge_count = 1;
4157 	dcmd->flags = MFI_FRAME_DIR_READ;
4158 	dcmd->timeout = 0;
4159 	dcmd->pad_0 = 0;
4160 	dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4161 	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4162 	dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4163 	dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4164 
4165 	if (!sc->mask_interrupts)
4166 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4167 	else
4168 		retcode = mrsas_issue_polled(sc, cmd);
4169 
4170 	if (retcode == ETIMEDOUT)
4171 		goto dcmd_timeout;
4172 
4173 	/* Get the instance PD list */
4174 	pd_count = MRSAS_MAX_PD;
4175 	pd_addr = pd_list_mem->addr;
4176 	if (pd_list_mem->count < pd_count) {
4177 		memset(sc->local_pd_list, 0,
4178 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4179 		for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4180 			sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4181 			sc->local_pd_list[pd_addr->deviceId].driveType =
4182 			    pd_addr->scsiDevType;
4183 			sc->local_pd_list[pd_addr->deviceId].driveState =
4184 			    MR_PD_STATE_SYSTEM;
4185 			pd_addr++;
4186 		}
4187 		/*
4188 		 * Use mutext/spinlock if pd_list component size increase more than
4189 		 * 32 bit.
4190 		 */
4191 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4192 		do_ocr = 0;
4193 	}
4194 dcmd_timeout:
4195 	mrsas_free_tmp_dcmd(tcmd);
4196 	free(tcmd, M_MRSAS);
4197 
4198 	if (do_ocr)
4199 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4200 
4201 	if (!sc->mask_interrupts)
4202 		mrsas_release_mfi_cmd(cmd);
4203 
4204 	return (retcode);
4205 }
4206 
4207 /*
4208  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4209  * Adapter soft state
4210  *
4211  * Issues an internal command (DCMD) to get the FW's controller PD list
4212  * structure.  This information is mainly used to find out about supported by
4213  * the FW.
4214  */
4215 static int
4216 mrsas_get_ld_list(struct mrsas_softc *sc)
4217 {
4218 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4219 	u_int8_t do_ocr = 1;
4220 	struct mrsas_mfi_cmd *cmd;
4221 	struct mrsas_dcmd_frame *dcmd;
4222 	struct MR_LD_LIST *ld_list_mem;
4223 	bus_addr_t ld_list_phys_addr = 0;
4224 	struct mrsas_tmp_dcmd *tcmd;
4225 
4226 	cmd = mrsas_get_mfi_cmd(sc);
4227 	if (!cmd) {
4228 		device_printf(sc->mrsas_dev,
4229 		    "Cannot alloc for get LD list cmd\n");
4230 		return 1;
4231 	}
4232 	dcmd = &cmd->frame->dcmd;
4233 
4234 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4235 	ld_list_size = sizeof(struct MR_LD_LIST);
4236 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4237 		device_printf(sc->mrsas_dev,
4238 		    "Cannot alloc dmamap for get LD list cmd\n");
4239 		mrsas_release_mfi_cmd(cmd);
4240 		mrsas_free_tmp_dcmd(tcmd);
4241 		free(tcmd, M_MRSAS);
4242 		return (ENOMEM);
4243 	} else {
4244 		ld_list_mem = tcmd->tmp_dcmd_mem;
4245 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4246 	}
4247 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4248 
4249 	if (sc->max256vdSupport)
4250 		dcmd->mbox.b[0] = 1;
4251 
4252 	dcmd->cmd = MFI_CMD_DCMD;
4253 	dcmd->cmd_status = 0xFF;
4254 	dcmd->sge_count = 1;
4255 	dcmd->flags = MFI_FRAME_DIR_READ;
4256 	dcmd->timeout = 0;
4257 	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4258 	dcmd->opcode = MR_DCMD_LD_GET_LIST;
4259 	dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4260 	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4261 	dcmd->pad_0 = 0;
4262 
4263 	if (!sc->mask_interrupts)
4264 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4265 	else
4266 		retcode = mrsas_issue_polled(sc, cmd);
4267 
4268 	if (retcode == ETIMEDOUT)
4269 		goto dcmd_timeout;
4270 
4271 #if VD_EXT_DEBUG
4272 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4273 #endif
4274 
4275 	/* Get the instance LD list */
4276 	if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4277 		sc->CurLdCount = ld_list_mem->ldCount;
4278 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4279 		for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4280 			if (ld_list_mem->ldList[ld_index].state != 0) {
4281 				ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4282 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4283 			}
4284 		}
4285 		do_ocr = 0;
4286 	}
4287 dcmd_timeout:
4288 	mrsas_free_tmp_dcmd(tcmd);
4289 	free(tcmd, M_MRSAS);
4290 
4291 	if (do_ocr)
4292 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4293 	if (!sc->mask_interrupts)
4294 		mrsas_release_mfi_cmd(cmd);
4295 
4296 	return (retcode);
4297 }
4298 
4299 /*
4300  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4301  * Adapter soft state Temp command Size of alloction
4302  *
4303  * Allocates DMAable memory for a temporary internal command. The allocated
4304  * memory is initialized to all zeros upon successful loading of the dma
4305  * mapped memory.
4306  */
4307 int
4308 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4309     struct mrsas_tmp_dcmd *tcmd, int size)
4310 {
4311 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4312 	    1, 0,
4313 	    BUS_SPACE_MAXADDR_32BIT,
4314 	    BUS_SPACE_MAXADDR,
4315 	    NULL, NULL,
4316 	    size,
4317 	    1,
4318 	    size,
4319 	    BUS_DMA_ALLOCNOW,
4320 	    NULL, NULL,
4321 	    &tcmd->tmp_dcmd_tag)) {
4322 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4323 		return (ENOMEM);
4324 	}
4325 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4326 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4327 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4328 		return (ENOMEM);
4329 	}
4330 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4331 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4332 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4333 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4334 		return (ENOMEM);
4335 	}
4336 	memset(tcmd->tmp_dcmd_mem, 0, size);
4337 	return (0);
4338 }
4339 
4340 /*
4341  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4342  * temporary dcmd pointer
4343  *
4344  * Deallocates memory of the temporary command for use in the construction of
4345  * the internal DCMD.
4346  */
4347 void
4348 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4349 {
4350 	if (tmp->tmp_dcmd_phys_addr)
4351 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4352 	if (tmp->tmp_dcmd_mem != NULL)
4353 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4354 	if (tmp->tmp_dcmd_tag != NULL)
4355 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4356 }
4357 
4358 /*
4359  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4360  * Adapter soft state Previously issued cmd to be aborted
4361  *
4362  * This function is used to abort previously issued commands, such as AEN and
4363  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4364  * command and subsequently the driver will wait for a return status.  The
4365  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4366  */
4367 static int
4368 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4369     struct mrsas_mfi_cmd *cmd_to_abort)
4370 {
4371 	struct mrsas_mfi_cmd *cmd;
4372 	struct mrsas_abort_frame *abort_fr;
4373 	u_int8_t retcode = 0;
4374 	unsigned long total_time = 0;
4375 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4376 
4377 	cmd = mrsas_get_mfi_cmd(sc);
4378 	if (!cmd) {
4379 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4380 		return (1);
4381 	}
4382 	abort_fr = &cmd->frame->abort;
4383 
4384 	/* Prepare and issue the abort frame */
4385 	abort_fr->cmd = MFI_CMD_ABORT;
4386 	abort_fr->cmd_status = 0xFF;
4387 	abort_fr->flags = 0;
4388 	abort_fr->abort_context = cmd_to_abort->index;
4389 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4390 	abort_fr->abort_mfi_phys_addr_hi = 0;
4391 
4392 	cmd->sync_cmd = 1;
4393 	cmd->cmd_status = 0xFF;
4394 
4395 	if (mrsas_issue_dcmd(sc, cmd)) {
4396 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4397 		return (1);
4398 	}
4399 	/* Wait for this cmd to complete */
4400 	sc->chan = (void *)&cmd;
4401 	while (1) {
4402 		if (cmd->cmd_status == 0xFF) {
4403 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4404 		} else
4405 			break;
4406 		total_time++;
4407 		if (total_time >= max_wait) {
4408 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4409 			retcode = 1;
4410 			break;
4411 		}
4412 	}
4413 
4414 	cmd->sync_cmd = 0;
4415 	mrsas_release_mfi_cmd(cmd);
4416 	return (retcode);
4417 }
4418 
4419 /*
4420  * mrsas_complete_abort:      Completes aborting a command input:
4421  * Adapter soft state Cmd that was issued to abort another cmd
4422  *
4423  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4424  * change after sending the command.  This function is called from
4425  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4426  */
4427 void
4428 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4429 {
4430 	if (cmd->sync_cmd) {
4431 		cmd->sync_cmd = 0;
4432 		cmd->cmd_status = 0;
4433 		sc->chan = (void *)&cmd;
4434 		wakeup_one((void *)&sc->chan);
4435 	}
4436 	return;
4437 }
4438 
4439 /*
4440  * mrsas_aen_handler:	AEN processing callback function from thread context
4441  * input:				Adapter soft state
4442  *
4443  * Asynchronous event handler
4444  */
4445 void
4446 mrsas_aen_handler(struct mrsas_softc *sc)
4447 {
4448 	union mrsas_evt_class_locale class_locale;
4449 	int doscan = 0;
4450 	u_int32_t seq_num;
4451  	int error, fail_aen = 0;
4452 
4453 	if (sc == NULL) {
4454 		printf("invalid instance!\n");
4455 		return;
4456 	}
4457 	if (sc->remove_in_progress || sc->reset_in_progress) {
4458 		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4459 			__func__, __LINE__);
4460 		return;
4461 	}
4462 	if (sc->evt_detail_mem) {
4463 		switch (sc->evt_detail_mem->code) {
4464 		case MR_EVT_PD_INSERTED:
4465 			fail_aen = mrsas_get_pd_list(sc);
4466 			if (!fail_aen)
4467 				mrsas_bus_scan_sim(sc, sc->sim_1);
4468 			else
4469 				goto skip_register_aen;
4470 			break;
4471 		case MR_EVT_PD_REMOVED:
4472 			fail_aen = mrsas_get_pd_list(sc);
4473 			if (!fail_aen)
4474 				mrsas_bus_scan_sim(sc, sc->sim_1);
4475 			else
4476 				goto skip_register_aen;
4477 			break;
4478 		case MR_EVT_LD_OFFLINE:
4479 		case MR_EVT_CFG_CLEARED:
4480 		case MR_EVT_LD_DELETED:
4481 			mrsas_bus_scan_sim(sc, sc->sim_0);
4482 			break;
4483 		case MR_EVT_LD_CREATED:
4484 			fail_aen = mrsas_get_ld_list(sc);
4485 			if (!fail_aen)
4486 				mrsas_bus_scan_sim(sc, sc->sim_0);
4487 			else
4488 				goto skip_register_aen;
4489 			break;
4490 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4491 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4492 		case MR_EVT_LD_STATE_CHANGE:
4493 			doscan = 1;
4494 			break;
4495 		case MR_EVT_CTRL_PROP_CHANGED:
4496 			fail_aen = mrsas_get_ctrl_info(sc);
4497 			if (fail_aen)
4498 				goto skip_register_aen;
4499 			break;
4500 		default:
4501 			break;
4502 		}
4503 	} else {
4504 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4505 		return;
4506 	}
4507 	if (doscan) {
4508 		fail_aen = mrsas_get_pd_list(sc);
4509 		if (!fail_aen) {
4510 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4511 			mrsas_bus_scan_sim(sc, sc->sim_1);
4512 		} else
4513 			goto skip_register_aen;
4514 
4515 		fail_aen = mrsas_get_ld_list(sc);
4516 		if (!fail_aen) {
4517 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4518 			mrsas_bus_scan_sim(sc, sc->sim_0);
4519 		} else
4520 			goto skip_register_aen;
4521 	}
4522 	seq_num = sc->evt_detail_mem->seq_num + 1;
4523 
4524 	/* Register AEN with FW for latest sequence number plus 1 */
4525 	class_locale.members.reserved = 0;
4526 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4527 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4528 
4529 	if (sc->aen_cmd != NULL)
4530 		return;
4531 
4532 	mtx_lock(&sc->aen_lock);
4533 	error = mrsas_register_aen(sc, seq_num,
4534 	    class_locale.word);
4535 	mtx_unlock(&sc->aen_lock);
4536 
4537 	if (error)
4538 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4539 
4540 skip_register_aen:
4541 	return;
4542 
4543 }
4544 
4545 
4546 /*
4547  * mrsas_complete_aen:	Completes AEN command
4548  * input:				Adapter soft state
4549  * 						Cmd that was issued to abort another cmd
4550  *
4551  * This function will be called from ISR and will continue event processing from
4552  * thread context by enqueuing task in ev_tq (callback function
4553  * "mrsas_aen_handler").
4554  */
4555 void
4556 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4557 {
4558 	/*
4559 	 * Don't signal app if it is just an aborted previously registered
4560 	 * aen
4561 	 */
4562 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4563 		sc->mrsas_aen_triggered = 1;
4564 		mtx_lock(&sc->aen_lock);
4565 		if (sc->mrsas_poll_waiting) {
4566 			sc->mrsas_poll_waiting = 0;
4567 			selwakeup(&sc->mrsas_select);
4568 		}
4569 		mtx_unlock(&sc->aen_lock);
4570 	} else
4571 		cmd->abort_aen = 0;
4572 
4573 	sc->aen_cmd = NULL;
4574 	mrsas_release_mfi_cmd(cmd);
4575 
4576 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4577 
4578 	return;
4579 }
4580 
4581 static device_method_t mrsas_methods[] = {
4582 	DEVMETHOD(device_probe, mrsas_probe),
4583 	DEVMETHOD(device_attach, mrsas_attach),
4584 	DEVMETHOD(device_detach, mrsas_detach),
4585 	DEVMETHOD(device_suspend, mrsas_suspend),
4586 	DEVMETHOD(device_resume, mrsas_resume),
4587 	DEVMETHOD(bus_print_child, bus_generic_print_child),
4588 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4589 	{0, 0}
4590 };
4591 
4592 static driver_t mrsas_driver = {
4593 	"mrsas",
4594 	mrsas_methods,
4595 	sizeof(struct mrsas_softc)
4596 };
4597 
4598 static devclass_t mrsas_devclass;
4599 
4600 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4601 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
4602