xref: /freebsd/sys/dev/mrsas/mrsas.c (revision 3fc36ee018bb836bd1796067cf4ef8683f166ebc)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/smp.h>
55 
56 
57 /*
58  * Function prototypes
59  */
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
66 
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
90 static int
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92     struct mrsas_mfi_cmd *cmd_to_abort);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95     u_long cmd, caddr_t arg);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
97 u_int8_t
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99     struct mrsas_mfi_cmd *mfi_cmd);
100 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int	mrsas_init_adapter(struct mrsas_softc *sc);
103 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int	mrsas_ioc_init(struct mrsas_softc *sc);
107 int	mrsas_bus_scan(struct mrsas_softc *sc);
108 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int
114 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
115     struct mrsas_mfi_cmd *cmd);
116 int
117 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
118     int size);
119 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
120 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
121 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void	mrsas_disable_intr(struct mrsas_softc *sc);
124 void	mrsas_enable_intr(struct mrsas_softc *sc);
125 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
126 void	mrsas_free_mem(struct mrsas_softc *sc);
127 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
128 void	mrsas_isr(void *arg);
129 void	mrsas_teardown_intr(struct mrsas_softc *sc);
130 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
131 void	mrsas_kill_hba(struct mrsas_softc *sc);
132 void	mrsas_aen_handler(struct mrsas_softc *sc);
133 void
134 mrsas_write_reg(struct mrsas_softc *sc, int offset,
135     u_int32_t value);
136 void
137 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
138     u_int32_t req_desc_hi);
139 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
140 void
141 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
142     struct mrsas_mfi_cmd *cmd, u_int8_t status);
143 void
144 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
145     u_int8_t extStatus);
146 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
147 
148 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
149         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
150 
151 extern int mrsas_cam_attach(struct mrsas_softc *sc);
152 extern void mrsas_cam_detach(struct mrsas_softc *sc);
153 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
154 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
155 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
156 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
166     u_int16_t index);
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
170 
171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
172 
173 /*
174  * PCI device struct and table
175  *
176  */
177 typedef struct mrsas_ident {
178 	uint16_t vendor;
179 	uint16_t device;
180 	uint16_t subvendor;
181 	uint16_t subdevice;
182 	const char *desc;
183 }	MRSAS_CTLR_ID;
184 
185 MRSAS_CTLR_ID device_table[] = {
186 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
192 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
193 	{0, 0, 0, 0, NULL}
194 };
195 
196 /*
197  * Character device entry points
198  *
199  */
200 static struct cdevsw mrsas_cdevsw = {
201 	.d_version = D_VERSION,
202 	.d_open = mrsas_open,
203 	.d_close = mrsas_close,
204 	.d_read = mrsas_read,
205 	.d_write = mrsas_write,
206 	.d_ioctl = mrsas_ioctl,
207 	.d_poll = mrsas_poll,
208 	.d_name = "mrsas",
209 };
210 
211 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
212 
213 /*
214  * In the cdevsw routines, we find our softc by using the si_drv1 member of
215  * struct cdev.  We set this variable to point to our softc in our attach
216  * routine when we create the /dev entry.
217  */
218 int
219 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
220 {
221 	struct mrsas_softc *sc;
222 
223 	sc = dev->si_drv1;
224 	return (0);
225 }
226 
227 int
228 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
229 {
230 	struct mrsas_softc *sc;
231 
232 	sc = dev->si_drv1;
233 	return (0);
234 }
235 
236 int
237 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
238 {
239 	struct mrsas_softc *sc;
240 
241 	sc = dev->si_drv1;
242 	return (0);
243 }
244 int
245 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
246 {
247 	struct mrsas_softc *sc;
248 
249 	sc = dev->si_drv1;
250 	return (0);
251 }
252 
253 /*
254  * Register Read/Write Functions
255  *
256  */
257 void
258 mrsas_write_reg(struct mrsas_softc *sc, int offset,
259     u_int32_t value)
260 {
261 	bus_space_tag_t bus_tag = sc->bus_tag;
262 	bus_space_handle_t bus_handle = sc->bus_handle;
263 
264 	bus_space_write_4(bus_tag, bus_handle, offset, value);
265 }
266 
267 u_int32_t
268 mrsas_read_reg(struct mrsas_softc *sc, int offset)
269 {
270 	bus_space_tag_t bus_tag = sc->bus_tag;
271 	bus_space_handle_t bus_handle = sc->bus_handle;
272 
273 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
274 }
275 
276 
277 /*
278  * Interrupt Disable/Enable/Clear Functions
279  *
280  */
281 void
282 mrsas_disable_intr(struct mrsas_softc *sc)
283 {
284 	u_int32_t mask = 0xFFFFFFFF;
285 	u_int32_t status;
286 
287 	sc->mask_interrupts = 1;
288 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
289 	/* Dummy read to force pci flush */
290 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
291 }
292 
293 void
294 mrsas_enable_intr(struct mrsas_softc *sc)
295 {
296 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
297 	u_int32_t status;
298 
299 	sc->mask_interrupts = 0;
300 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
301 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
302 
303 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
304 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
305 }
306 
307 static int
308 mrsas_clear_intr(struct mrsas_softc *sc)
309 {
310 	u_int32_t status, fw_status, fw_state;
311 
312 	/* Read received interrupt */
313 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
314 
315 	/*
316 	 * If FW state change interrupt is received, write to it again to
317 	 * clear
318 	 */
319 	if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
320 		fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
321 		    outbound_scratch_pad));
322 		fw_state = fw_status & MFI_STATE_MASK;
323 		if (fw_state == MFI_STATE_FAULT) {
324 			device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
325 			if (sc->ocr_thread_active)
326 				wakeup(&sc->ocr_chan);
327 		}
328 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
329 		mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
330 		return (1);
331 	}
332 	/* Not our interrupt, so just return */
333 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
334 		return (0);
335 
336 	/* We got a reply interrupt */
337 	return (1);
338 }
339 
340 /*
341  * PCI Support Functions
342  *
343  */
344 static struct mrsas_ident *
345 mrsas_find_ident(device_t dev)
346 {
347 	struct mrsas_ident *pci_device;
348 
349 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
350 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
351 		    (pci_device->device == pci_get_device(dev)) &&
352 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
353 		    (pci_device->subvendor == 0xffff)) &&
354 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
355 		    (pci_device->subdevice == 0xffff)))
356 			return (pci_device);
357 	}
358 	return (NULL);
359 }
360 
361 static int
362 mrsas_probe(device_t dev)
363 {
364 	static u_int8_t first_ctrl = 1;
365 	struct mrsas_ident *id;
366 
367 	if ((id = mrsas_find_ident(dev)) != NULL) {
368 		if (first_ctrl) {
369 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
370 			    MRSAS_VERSION);
371 			first_ctrl = 0;
372 		}
373 		device_set_desc(dev, id->desc);
374 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
375 		return (-30);
376 	}
377 	return (ENXIO);
378 }
379 
380 /*
381  * mrsas_setup_sysctl:	setup sysctl values for mrsas
382  * input:				Adapter instance soft state
383  *
384  * Setup sysctl entries for mrsas driver.
385  */
386 static void
387 mrsas_setup_sysctl(struct mrsas_softc *sc)
388 {
389 	struct sysctl_ctx_list *sysctl_ctx = NULL;
390 	struct sysctl_oid *sysctl_tree = NULL;
391 	char tmpstr[80], tmpstr2[80];
392 
393 	/*
394 	 * Setup the sysctl variable so the user can change the debug level
395 	 * on the fly.
396 	 */
397 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
398 	    device_get_unit(sc->mrsas_dev));
399 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
400 
401 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
402 	if (sysctl_ctx != NULL)
403 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
404 
405 	if (sysctl_tree == NULL) {
406 		sysctl_ctx_init(&sc->sysctl_ctx);
407 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
408 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
409 		    CTLFLAG_RD, 0, tmpstr);
410 		if (sc->sysctl_tree == NULL)
411 			return;
412 		sysctl_ctx = &sc->sysctl_ctx;
413 		sysctl_tree = sc->sysctl_tree;
414 	}
415 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
417 	    "Disable the use of OCR");
418 
419 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
421 	    strlen(MRSAS_VERSION), "driver version");
422 
423 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 	    OID_AUTO, "reset_count", CTLFLAG_RD,
425 	    &sc->reset_count, 0, "number of ocr from start of the day");
426 
427 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
429 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
430 
431 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
433 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
434 
435 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
437 	    "Driver debug level");
438 
439 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
440 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
441 	    0, "Driver IO timeout value in mili-second.");
442 
443 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
444 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
445 	    &sc->mrsas_fw_fault_check_delay,
446 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
447 
448 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
449 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
450 	    &sc->reset_in_progress, 0, "ocr in progress status");
451 
452 }
453 
454 /*
455  * mrsas_get_tunables:	get tunable parameters.
456  * input:				Adapter instance soft state
457  *
458  * Get tunable parameters. This will help to debug driver at boot time.
459  */
460 static void
461 mrsas_get_tunables(struct mrsas_softc *sc)
462 {
463 	char tmpstr[80];
464 
465 	/* XXX default to some debugging for now */
466 	sc->mrsas_debug = MRSAS_FAULT;
467 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
468 	sc->mrsas_fw_fault_check_delay = 1;
469 	sc->reset_count = 0;
470 	sc->reset_in_progress = 0;
471 
472 	/*
473 	 * Grab the global variables.
474 	 */
475 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
476 
477 	/*
478 	 * Grab the global variables.
479 	 */
480 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
481 
482 	/* Grab the unit-instance variables */
483 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
484 	    device_get_unit(sc->mrsas_dev));
485 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
486 }
487 
488 /*
489  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
490  * Used to get sequence number at driver load time.
491  * input:		Adapter soft state
492  *
493  * Allocates DMAable memory for the event log info internal command.
494  */
495 int
496 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
497 {
498 	int el_info_size;
499 
500 	/* Allocate get event log info command */
501 	el_info_size = sizeof(struct mrsas_evt_log_info);
502 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
503 	    1, 0,
504 	    BUS_SPACE_MAXADDR_32BIT,
505 	    BUS_SPACE_MAXADDR,
506 	    NULL, NULL,
507 	    el_info_size,
508 	    1,
509 	    el_info_size,
510 	    BUS_DMA_ALLOCNOW,
511 	    NULL, NULL,
512 	    &sc->el_info_tag)) {
513 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
514 		return (ENOMEM);
515 	}
516 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
517 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
518 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
519 		return (ENOMEM);
520 	}
521 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
522 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
523 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
524 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
525 		return (ENOMEM);
526 	}
527 	memset(sc->el_info_mem, 0, el_info_size);
528 	return (0);
529 }
530 
531 /*
532  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
533  * input:					Adapter soft state
534  *
535  * Deallocates memory for the event log info internal command.
536  */
537 void
538 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
539 {
540 	if (sc->el_info_phys_addr)
541 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
542 	if (sc->el_info_mem != NULL)
543 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
544 	if (sc->el_info_tag != NULL)
545 		bus_dma_tag_destroy(sc->el_info_tag);
546 }
547 
548 /*
549  *  mrsas_get_seq_num:	Get latest event sequence number
550  *  @sc:				Adapter soft state
551  *  @eli:				Firmware event log sequence number information.
552  *
553  * Firmware maintains a log of all events in a non-volatile area.
554  * Driver get the sequence number using DCMD
555  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
556  */
557 
558 static int
559 mrsas_get_seq_num(struct mrsas_softc *sc,
560     struct mrsas_evt_log_info *eli)
561 {
562 	struct mrsas_mfi_cmd *cmd;
563 	struct mrsas_dcmd_frame *dcmd;
564 	u_int8_t do_ocr = 1, retcode = 0;
565 
566 	cmd = mrsas_get_mfi_cmd(sc);
567 
568 	if (!cmd) {
569 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
570 		return -ENOMEM;
571 	}
572 	dcmd = &cmd->frame->dcmd;
573 
574 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
575 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
576 		mrsas_release_mfi_cmd(cmd);
577 		return -ENOMEM;
578 	}
579 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
580 
581 	dcmd->cmd = MFI_CMD_DCMD;
582 	dcmd->cmd_status = 0x0;
583 	dcmd->sge_count = 1;
584 	dcmd->flags = MFI_FRAME_DIR_READ;
585 	dcmd->timeout = 0;
586 	dcmd->pad_0 = 0;
587 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
588 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
589 	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
590 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
591 
592 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
593 	if (retcode == ETIMEDOUT)
594 		goto dcmd_timeout;
595 
596 	do_ocr = 0;
597 	/*
598 	 * Copy the data back into callers buffer
599 	 */
600 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
601 	mrsas_free_evt_log_info_cmd(sc);
602 
603 dcmd_timeout:
604 	if (do_ocr)
605 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
606 	else
607 		mrsas_release_mfi_cmd(cmd);
608 
609 	return retcode;
610 }
611 
612 
613 /*
614  *  mrsas_register_aen:		Register for asynchronous event notification
615  *  @sc:			Adapter soft state
616  *  @seq_num:			Starting sequence number
617  *  @class_locale:		Class of the event
618  *
619  *  This function subscribes for events beyond the @seq_num
620  *  and type @class_locale.
621  *
622  */
623 static int
624 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
625     u_int32_t class_locale_word)
626 {
627 	int ret_val;
628 	struct mrsas_mfi_cmd *cmd;
629 	struct mrsas_dcmd_frame *dcmd;
630 	union mrsas_evt_class_locale curr_aen;
631 	union mrsas_evt_class_locale prev_aen;
632 
633 	/*
634 	 * If there an AEN pending already (aen_cmd), check if the
635 	 * class_locale of that pending AEN is inclusive of the new AEN
636 	 * request we currently have. If it is, then we don't have to do
637 	 * anything. In other words, whichever events the current AEN request
638 	 * is subscribing to, have already been subscribed to. If the old_cmd
639 	 * is _not_ inclusive, then we have to abort that command, form a
640 	 * class_locale that is superset of both old and current and re-issue
641 	 * to the FW
642 	 */
643 
644 	curr_aen.word = class_locale_word;
645 
646 	if (sc->aen_cmd) {
647 
648 		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
649 
650 		/*
651 		 * A class whose enum value is smaller is inclusive of all
652 		 * higher values. If a PROGRESS (= -1) was previously
653 		 * registered, then a new registration requests for higher
654 		 * classes need not be sent to FW. They are automatically
655 		 * included. Locale numbers don't have such hierarchy. They
656 		 * are bitmap values
657 		 */
658 		if ((prev_aen.members.class <= curr_aen.members.class) &&
659 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
660 		    curr_aen.members.locale)) {
661 			/*
662 			 * Previously issued event registration includes
663 			 * current request. Nothing to do.
664 			 */
665 			return 0;
666 		} else {
667 			curr_aen.members.locale |= prev_aen.members.locale;
668 
669 			if (prev_aen.members.class < curr_aen.members.class)
670 				curr_aen.members.class = prev_aen.members.class;
671 
672 			sc->aen_cmd->abort_aen = 1;
673 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
674 			    sc->aen_cmd);
675 
676 			if (ret_val) {
677 				printf("mrsas: Failed to abort "
678 				    "previous AEN command\n");
679 				return ret_val;
680 			}
681 		}
682 	}
683 	cmd = mrsas_get_mfi_cmd(sc);
684 
685 	if (!cmd)
686 		return -ENOMEM;
687 
688 	dcmd = &cmd->frame->dcmd;
689 
690 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
691 
692 	/*
693 	 * Prepare DCMD for aen registration
694 	 */
695 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
696 
697 	dcmd->cmd = MFI_CMD_DCMD;
698 	dcmd->cmd_status = 0x0;
699 	dcmd->sge_count = 1;
700 	dcmd->flags = MFI_FRAME_DIR_READ;
701 	dcmd->timeout = 0;
702 	dcmd->pad_0 = 0;
703 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
704 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
705 	dcmd->mbox.w[0] = seq_num;
706 	sc->last_seq_num = seq_num;
707 	dcmd->mbox.w[1] = curr_aen.word;
708 	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
709 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
710 
711 	if (sc->aen_cmd != NULL) {
712 		mrsas_release_mfi_cmd(cmd);
713 		return 0;
714 	}
715 	/*
716 	 * Store reference to the cmd used to register for AEN. When an
717 	 * application wants us to register for AEN, we have to abort this
718 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
719 	 */
720 	sc->aen_cmd = cmd;
721 
722 	/*
723 	 * Issue the aen registration frame
724 	 */
725 	if (mrsas_issue_dcmd(sc, cmd)) {
726 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
727 		return (1);
728 	}
729 	return 0;
730 }
731 
732 /*
733  * mrsas_start_aen:	Subscribes to AEN during driver load time
734  * @instance:		Adapter soft state
735  */
736 static int
737 mrsas_start_aen(struct mrsas_softc *sc)
738 {
739 	struct mrsas_evt_log_info eli;
740 	union mrsas_evt_class_locale class_locale;
741 
742 
743 	/* Get the latest sequence number from FW */
744 
745 	memset(&eli, 0, sizeof(eli));
746 
747 	if (mrsas_get_seq_num(sc, &eli))
748 		return -1;
749 
750 	/* Register AEN with FW for latest sequence number plus 1 */
751 	class_locale.members.reserved = 0;
752 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
753 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
754 
755 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
756 	    class_locale.word);
757 
758 }
759 
760 /*
761  * mrsas_setup_msix:	Allocate MSI-x vectors
762  * @sc:					adapter soft state
763  */
764 static int
765 mrsas_setup_msix(struct mrsas_softc *sc)
766 {
767 	int i;
768 
769 	for (i = 0; i < sc->msix_vectors; i++) {
770 		sc->irq_context[i].sc = sc;
771 		sc->irq_context[i].MSIxIndex = i;
772 		sc->irq_id[i] = i + 1;
773 		sc->mrsas_irq[i] = bus_alloc_resource_any
774 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
775 		    ,RF_ACTIVE);
776 		if (sc->mrsas_irq[i] == NULL) {
777 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
778 			goto irq_alloc_failed;
779 		}
780 		if (bus_setup_intr(sc->mrsas_dev,
781 		    sc->mrsas_irq[i],
782 		    INTR_MPSAFE | INTR_TYPE_CAM,
783 		    NULL, mrsas_isr, &sc->irq_context[i],
784 		    &sc->intr_handle[i])) {
785 			device_printf(sc->mrsas_dev,
786 			    "Cannot set up MSI-x interrupt handler\n");
787 			goto irq_alloc_failed;
788 		}
789 	}
790 	return SUCCESS;
791 
792 irq_alloc_failed:
793 	mrsas_teardown_intr(sc);
794 	return (FAIL);
795 }
796 
797 /*
798  * mrsas_allocate_msix:		Setup MSI-x vectors
799  * @sc:						adapter soft state
800  */
801 static int
802 mrsas_allocate_msix(struct mrsas_softc *sc)
803 {
804 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
805 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
806 		    " of vectors\n", sc->msix_vectors);
807 	} else {
808 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
809 		goto irq_alloc_failed;
810 	}
811 	return SUCCESS;
812 
813 irq_alloc_failed:
814 	mrsas_teardown_intr(sc);
815 	return (FAIL);
816 }
817 
818 /*
819  * mrsas_attach:	PCI entry point
820  * input:			pointer to device struct
821  *
822  * Performs setup of PCI and registers, initializes mutexes and linked lists,
823  * registers interrupts and CAM, and initializes   the adapter/controller to
824  * its proper state.
825  */
826 static int
827 mrsas_attach(device_t dev)
828 {
829 	struct mrsas_softc *sc = device_get_softc(dev);
830 	uint32_t cmd, bar, error;
831 
832 	memset(sc, 0, sizeof(struct mrsas_softc));
833 
834 	/* Look up our softc and initialize its fields. */
835 	sc->mrsas_dev = dev;
836 	sc->device_id = pci_get_device(dev);
837 
838 	mrsas_get_tunables(sc);
839 
840 	/*
841 	 * Set up PCI and registers
842 	 */
843 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
844 	if ((cmd & PCIM_CMD_PORTEN) == 0) {
845 		return (ENXIO);
846 	}
847 	/* Force the busmaster enable bit on. */
848 	cmd |= PCIM_CMD_BUSMASTEREN;
849 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
850 
851 	bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
852 
853 	sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
854 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
855 	    &(sc->reg_res_id), RF_ACTIVE))
856 	    == NULL) {
857 		device_printf(dev, "Cannot allocate PCI registers\n");
858 		goto attach_fail;
859 	}
860 	sc->bus_tag = rman_get_bustag(sc->reg_res);
861 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
862 
863 	/* Intialize mutexes */
864 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
865 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
866 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
867 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
868 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
869 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
870 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
871 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
872 
873 	/* Intialize linked list */
874 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
875 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
876 
877 	mrsas_atomic_set(&sc->fw_outstanding, 0);
878 
879 	sc->io_cmds_highwater = 0;
880 
881 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
882 	sc->UnevenSpanSupport = 0;
883 
884 	sc->msix_enable = 0;
885 
886 	/* Initialize Firmware */
887 	if (mrsas_init_fw(sc) != SUCCESS) {
888 		goto attach_fail_fw;
889 	}
890 	/* Register mrsas to CAM layer */
891 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
892 		goto attach_fail_cam;
893 	}
894 	/* Register IRQs */
895 	if (mrsas_setup_irq(sc) != SUCCESS) {
896 		goto attach_fail_irq;
897 	}
898 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
899 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
900 	    device_get_unit(sc->mrsas_dev));
901 	if (error) {
902 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
903 		goto attach_fail_ocr_thread;
904 	}
905 	/*
906 	 * After FW initialization and OCR thread creation
907 	 * we will defer the cdev creation, AEN setup on ICH callback
908 	 */
909 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
910 	sc->mrsas_ich.ich_arg = sc;
911 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
912 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
913 	}
914 	mrsas_setup_sysctl(sc);
915 	return SUCCESS;
916 
917 attach_fail_ocr_thread:
918 	if (sc->ocr_thread_active)
919 		wakeup(&sc->ocr_chan);
920 attach_fail_irq:
921 	mrsas_teardown_intr(sc);
922 attach_fail_cam:
923 	mrsas_cam_detach(sc);
924 attach_fail_fw:
925 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
926 	if (sc->msix_enable == 1)
927 		pci_release_msi(sc->mrsas_dev);
928 	mrsas_free_mem(sc);
929 	mtx_destroy(&sc->sim_lock);
930 	mtx_destroy(&sc->aen_lock);
931 	mtx_destroy(&sc->pci_lock);
932 	mtx_destroy(&sc->io_lock);
933 	mtx_destroy(&sc->ioctl_lock);
934 	mtx_destroy(&sc->mpt_cmd_pool_lock);
935 	mtx_destroy(&sc->mfi_cmd_pool_lock);
936 	mtx_destroy(&sc->raidmap_lock);
937 attach_fail:
938 	if (sc->reg_res) {
939 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
940 		    sc->reg_res_id, sc->reg_res);
941 	}
942 	return (ENXIO);
943 }
944 
945 /*
946  * Interrupt config hook
947  */
948 static void
949 mrsas_ich_startup(void *arg)
950 {
951 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
952 
953 	/*
954 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
955 	 */
956 	sema_init(&sc->ioctl_count_sema,
957 	    MRSAS_MAX_MFI_CMDS - 5,
958 	    IOCTL_SEMA_DESCRIPTION);
959 
960 	/* Create a /dev entry for mrsas controller. */
961 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
962 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
963 	    device_get_unit(sc->mrsas_dev));
964 
965 	if (device_get_unit(sc->mrsas_dev) == 0) {
966 		make_dev_alias_p(MAKEDEV_CHECKNAME,
967 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
968 		    "megaraid_sas_ioctl_node");
969 	}
970 	if (sc->mrsas_cdev)
971 		sc->mrsas_cdev->si_drv1 = sc;
972 
973 	/*
974 	 * Add this controller to mrsas_mgmt_info structure so that it can be
975 	 * exported to management applications
976 	 */
977 	if (device_get_unit(sc->mrsas_dev) == 0)
978 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
979 
980 	mrsas_mgmt_info.count++;
981 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
982 	mrsas_mgmt_info.max_index++;
983 
984 	/* Enable Interrupts */
985 	mrsas_enable_intr(sc);
986 
987 	/* Initiate AEN (Asynchronous Event Notification) */
988 	if (mrsas_start_aen(sc)) {
989 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
990 		    "Further events from the controller will not be communicated.\n"
991 		    "Either there is some problem in the controller"
992 		    "or the controller does not support AEN.\n"
993 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
994 	}
995 	if (sc->mrsas_ich.ich_arg != NULL) {
996 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
997 		config_intrhook_disestablish(&sc->mrsas_ich);
998 		sc->mrsas_ich.ich_arg = NULL;
999 	}
1000 }
1001 
1002 /*
1003  * mrsas_detach:	De-allocates and teardown resources
1004  * input:			pointer to device struct
1005  *
1006  * This function is the entry point for device disconnect and detach.
1007  * It performs memory de-allocations, shutdown of the controller and various
1008  * teardown and destroy resource functions.
1009  */
1010 static int
1011 mrsas_detach(device_t dev)
1012 {
1013 	struct mrsas_softc *sc;
1014 	int i = 0;
1015 
1016 	sc = device_get_softc(dev);
1017 	sc->remove_in_progress = 1;
1018 
1019 	/* Destroy the character device so no other IOCTL will be handled */
1020 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1021 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1022 	destroy_dev(sc->mrsas_cdev);
1023 
1024 	/*
1025 	 * Take the instance off the instance array. Note that we will not
1026 	 * decrement the max_index. We let this array be sparse array
1027 	 */
1028 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1029 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1030 			mrsas_mgmt_info.count--;
1031 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1032 			break;
1033 		}
1034 	}
1035 
1036 	if (sc->ocr_thread_active)
1037 		wakeup(&sc->ocr_chan);
1038 	while (sc->reset_in_progress) {
1039 		i++;
1040 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1041 			mrsas_dprint(sc, MRSAS_INFO,
1042 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1043 		}
1044 		pause("mr_shutdown", hz);
1045 	}
1046 	i = 0;
1047 	while (sc->ocr_thread_active) {
1048 		i++;
1049 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1050 			mrsas_dprint(sc, MRSAS_INFO,
1051 			    "[%2d]waiting for "
1052 			    "mrsas_ocr thread to quit ocr %d\n", i,
1053 			    sc->ocr_thread_active);
1054 		}
1055 		pause("mr_shutdown", hz);
1056 	}
1057 	mrsas_flush_cache(sc);
1058 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1059 	mrsas_disable_intr(sc);
1060 	mrsas_cam_detach(sc);
1061 	mrsas_teardown_intr(sc);
1062 	mrsas_free_mem(sc);
1063 	mtx_destroy(&sc->sim_lock);
1064 	mtx_destroy(&sc->aen_lock);
1065 	mtx_destroy(&sc->pci_lock);
1066 	mtx_destroy(&sc->io_lock);
1067 	mtx_destroy(&sc->ioctl_lock);
1068 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1069 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1070 	mtx_destroy(&sc->raidmap_lock);
1071 
1072 	/* Wait for all the semaphores to be released */
1073 	while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1074 		pause("mr_shutdown", hz);
1075 
1076 	/* Destroy the counting semaphore created for Ioctl */
1077 	sema_destroy(&sc->ioctl_count_sema);
1078 
1079 	if (sc->reg_res) {
1080 		bus_release_resource(sc->mrsas_dev,
1081 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1082 	}
1083 	if (sc->sysctl_tree != NULL)
1084 		sysctl_ctx_free(&sc->sysctl_ctx);
1085 
1086 	return (0);
1087 }
1088 
1089 /*
1090  * mrsas_free_mem:		Frees allocated memory
1091  * input:				Adapter instance soft state
1092  *
1093  * This function is called from mrsas_detach() to free previously allocated
1094  * memory.
1095  */
1096 void
1097 mrsas_free_mem(struct mrsas_softc *sc)
1098 {
1099 	int i;
1100 	u_int32_t max_cmd;
1101 	struct mrsas_mfi_cmd *mfi_cmd;
1102 	struct mrsas_mpt_cmd *mpt_cmd;
1103 
1104 	/*
1105 	 * Free RAID map memory
1106 	 */
1107 	for (i = 0; i < 2; i++) {
1108 		if (sc->raidmap_phys_addr[i])
1109 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1110 		if (sc->raidmap_mem[i] != NULL)
1111 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1112 		if (sc->raidmap_tag[i] != NULL)
1113 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1114 
1115 		if (sc->ld_drv_map[i] != NULL)
1116 			free(sc->ld_drv_map[i], M_MRSAS);
1117 	}
1118 	for (i = 0; i < 2; i++) {
1119 		if (sc->jbodmap_phys_addr[i])
1120 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1121 		if (sc->jbodmap_mem[i] != NULL)
1122 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1123 		if (sc->jbodmap_tag[i] != NULL)
1124 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1125 	}
1126 	/*
1127 	 * Free version buffer memory
1128 	 */
1129 	if (sc->verbuf_phys_addr)
1130 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1131 	if (sc->verbuf_mem != NULL)
1132 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1133 	if (sc->verbuf_tag != NULL)
1134 		bus_dma_tag_destroy(sc->verbuf_tag);
1135 
1136 
1137 	/*
1138 	 * Free sense buffer memory
1139 	 */
1140 	if (sc->sense_phys_addr)
1141 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1142 	if (sc->sense_mem != NULL)
1143 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1144 	if (sc->sense_tag != NULL)
1145 		bus_dma_tag_destroy(sc->sense_tag);
1146 
1147 	/*
1148 	 * Free chain frame memory
1149 	 */
1150 	if (sc->chain_frame_phys_addr)
1151 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1152 	if (sc->chain_frame_mem != NULL)
1153 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1154 	if (sc->chain_frame_tag != NULL)
1155 		bus_dma_tag_destroy(sc->chain_frame_tag);
1156 
1157 	/*
1158 	 * Free IO Request memory
1159 	 */
1160 	if (sc->io_request_phys_addr)
1161 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1162 	if (sc->io_request_mem != NULL)
1163 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1164 	if (sc->io_request_tag != NULL)
1165 		bus_dma_tag_destroy(sc->io_request_tag);
1166 
1167 	/*
1168 	 * Free Reply Descriptor memory
1169 	 */
1170 	if (sc->reply_desc_phys_addr)
1171 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1172 	if (sc->reply_desc_mem != NULL)
1173 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1174 	if (sc->reply_desc_tag != NULL)
1175 		bus_dma_tag_destroy(sc->reply_desc_tag);
1176 
1177 	/*
1178 	 * Free event detail memory
1179 	 */
1180 	if (sc->evt_detail_phys_addr)
1181 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1182 	if (sc->evt_detail_mem != NULL)
1183 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1184 	if (sc->evt_detail_tag != NULL)
1185 		bus_dma_tag_destroy(sc->evt_detail_tag);
1186 
1187 	/*
1188 	 * Free MFI frames
1189 	 */
1190 	if (sc->mfi_cmd_list) {
1191 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1192 			mfi_cmd = sc->mfi_cmd_list[i];
1193 			mrsas_free_frame(sc, mfi_cmd);
1194 		}
1195 	}
1196 	if (sc->mficmd_frame_tag != NULL)
1197 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1198 
1199 	/*
1200 	 * Free MPT internal command list
1201 	 */
1202 	max_cmd = sc->max_fw_cmds;
1203 	if (sc->mpt_cmd_list) {
1204 		for (i = 0; i < max_cmd; i++) {
1205 			mpt_cmd = sc->mpt_cmd_list[i];
1206 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1207 			free(sc->mpt_cmd_list[i], M_MRSAS);
1208 		}
1209 		free(sc->mpt_cmd_list, M_MRSAS);
1210 		sc->mpt_cmd_list = NULL;
1211 	}
1212 	/*
1213 	 * Free MFI internal command list
1214 	 */
1215 
1216 	if (sc->mfi_cmd_list) {
1217 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1218 			free(sc->mfi_cmd_list[i], M_MRSAS);
1219 		}
1220 		free(sc->mfi_cmd_list, M_MRSAS);
1221 		sc->mfi_cmd_list = NULL;
1222 	}
1223 	/*
1224 	 * Free request descriptor memory
1225 	 */
1226 	free(sc->req_desc, M_MRSAS);
1227 	sc->req_desc = NULL;
1228 
1229 	/*
1230 	 * Destroy parent tag
1231 	 */
1232 	if (sc->mrsas_parent_tag != NULL)
1233 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1234 
1235 	/*
1236 	 * Free ctrl_info memory
1237 	 */
1238 	if (sc->ctrl_info != NULL)
1239 		free(sc->ctrl_info, M_MRSAS);
1240 }
1241 
1242 /*
1243  * mrsas_teardown_intr:	Teardown interrupt
1244  * input:				Adapter instance soft state
1245  *
1246  * This function is called from mrsas_detach() to teardown and release bus
1247  * interrupt resourse.
1248  */
1249 void
1250 mrsas_teardown_intr(struct mrsas_softc *sc)
1251 {
1252 	int i;
1253 
1254 	if (!sc->msix_enable) {
1255 		if (sc->intr_handle[0])
1256 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1257 		if (sc->mrsas_irq[0] != NULL)
1258 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1259 			    sc->irq_id[0], sc->mrsas_irq[0]);
1260 		sc->intr_handle[0] = NULL;
1261 	} else {
1262 		for (i = 0; i < sc->msix_vectors; i++) {
1263 			if (sc->intr_handle[i])
1264 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1265 				    sc->intr_handle[i]);
1266 
1267 			if (sc->mrsas_irq[i] != NULL)
1268 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1269 				    sc->irq_id[i], sc->mrsas_irq[i]);
1270 
1271 			sc->intr_handle[i] = NULL;
1272 		}
1273 		pci_release_msi(sc->mrsas_dev);
1274 	}
1275 
1276 }
1277 
1278 /*
1279  * mrsas_suspend:	Suspend entry point
1280  * input:			Device struct pointer
1281  *
1282  * This function is the entry point for system suspend from the OS.
1283  */
1284 static int
1285 mrsas_suspend(device_t dev)
1286 {
1287 	/* This will be filled when the driver will have hibernation support */
1288 	return (0);
1289 }
1290 
1291 /*
1292  * mrsas_resume:	Resume entry point
1293  * input:			Device struct pointer
1294  *
1295  * This function is the entry point for system resume from the OS.
1296  */
1297 static int
1298 mrsas_resume(device_t dev)
1299 {
1300 	/* This will be filled when the driver will have hibernation support */
1301 	return (0);
1302 }
1303 
1304 /**
1305  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1306  *
1307  * This function will return softc instance based on cmd type.
1308  * In some case, application fire ioctl on required management instance and
1309  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1310  * case, else get the softc instance from host_no provided by application in
1311  * user data.
1312  */
1313 
1314 static struct mrsas_softc *
1315 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1316 {
1317 	struct mrsas_softc *sc = NULL;
1318 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1319 
1320 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1321 		sc = dev->si_drv1;
1322 	} else {
1323 		/*
1324 		 * get the Host number & the softc from data sent by the
1325 		 * Application
1326 		 */
1327 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1328 		if (sc == NULL)
1329 			printf("There is no Controller number %d\n",
1330 			    user_ioc->host_no);
1331 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1332 			mrsas_dprint(sc, MRSAS_FAULT,
1333 			    "Invalid Controller number %d\n", user_ioc->host_no);
1334 	}
1335 
1336 	return sc;
1337 }
1338 
1339 /*
1340  * mrsas_ioctl:	IOCtl commands entry point.
1341  *
1342  * This function is the entry point for IOCtls from the OS.  It calls the
1343  * appropriate function for processing depending on the command received.
1344  */
1345 static int
1346 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1347     struct thread *td)
1348 {
1349 	struct mrsas_softc *sc;
1350 	int ret = 0, i = 0;
1351 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1352 
1353 	sc = mrsas_get_softc_instance(dev, cmd, arg);
1354 	if (!sc)
1355 		return ENOENT;
1356 
1357 	if (sc->remove_in_progress) {
1358 		mrsas_dprint(sc, MRSAS_INFO,
1359 		    "Driver remove or shutdown called.\n");
1360 		return ENOENT;
1361 	}
1362 	mtx_lock_spin(&sc->ioctl_lock);
1363 	if (!sc->reset_in_progress) {
1364 		mtx_unlock_spin(&sc->ioctl_lock);
1365 		goto do_ioctl;
1366 	}
1367 	mtx_unlock_spin(&sc->ioctl_lock);
1368 	while (sc->reset_in_progress) {
1369 		i++;
1370 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1371 			mrsas_dprint(sc, MRSAS_INFO,
1372 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1373 		}
1374 		pause("mr_ioctl", hz);
1375 	}
1376 
1377 do_ioctl:
1378 	switch (cmd) {
1379 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1380 #ifdef COMPAT_FREEBSD32
1381 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1382 #endif
1383 		/*
1384 		 * Decrement the Ioctl counting Semaphore before getting an
1385 		 * mfi command
1386 		 */
1387 		sema_wait(&sc->ioctl_count_sema);
1388 
1389 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1390 
1391 		/* Increment the Ioctl counting semaphore value */
1392 		sema_post(&sc->ioctl_count_sema);
1393 
1394 		break;
1395 	case MRSAS_IOC_SCAN_BUS:
1396 		ret = mrsas_bus_scan(sc);
1397 		break;
1398 
1399 	case MRSAS_IOC_GET_PCI_INFO:
1400 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1401 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1402 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1403 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1404 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1405 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1406 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1407 		    "pci device no: %d, pci function no: %d,"
1408 		    "pci domain ID: %d\n",
1409 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1410 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1411 		ret = 0;
1412 		break;
1413 
1414 	default:
1415 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1416 		ret = ENOENT;
1417 	}
1418 
1419 	return (ret);
1420 }
1421 
1422 /*
1423  * mrsas_poll:	poll entry point for mrsas driver fd
1424  *
1425  * This function is the entry point for poll from the OS.  It waits for some AEN
1426  * events to be triggered from the controller and notifies back.
1427  */
1428 static int
1429 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1430 {
1431 	struct mrsas_softc *sc;
1432 	int revents = 0;
1433 
1434 	sc = dev->si_drv1;
1435 
1436 	if (poll_events & (POLLIN | POLLRDNORM)) {
1437 		if (sc->mrsas_aen_triggered) {
1438 			revents |= poll_events & (POLLIN | POLLRDNORM);
1439 		}
1440 	}
1441 	if (revents == 0) {
1442 		if (poll_events & (POLLIN | POLLRDNORM)) {
1443 			mtx_lock(&sc->aen_lock);
1444 			sc->mrsas_poll_waiting = 1;
1445 			selrecord(td, &sc->mrsas_select);
1446 			mtx_unlock(&sc->aen_lock);
1447 		}
1448 	}
1449 	return revents;
1450 }
1451 
1452 /*
1453  * mrsas_setup_irq:	Set up interrupt
1454  * input:			Adapter instance soft state
1455  *
1456  * This function sets up interrupts as a bus resource, with flags indicating
1457  * resource permitting contemporaneous sharing and for resource to activate
1458  * atomically.
1459  */
1460 static int
1461 mrsas_setup_irq(struct mrsas_softc *sc)
1462 {
1463 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1464 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1465 
1466 	else {
1467 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1468 		sc->irq_context[0].sc = sc;
1469 		sc->irq_context[0].MSIxIndex = 0;
1470 		sc->irq_id[0] = 0;
1471 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1472 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1473 		if (sc->mrsas_irq[0] == NULL) {
1474 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1475 			    "interrupt\n");
1476 			return (FAIL);
1477 		}
1478 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1479 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1480 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1481 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1482 			    "interrupt\n");
1483 			return (FAIL);
1484 		}
1485 	}
1486 	return (0);
1487 }
1488 
1489 /*
1490  * mrsas_isr:	ISR entry point
1491  * input:		argument pointer
1492  *
1493  * This function is the interrupt service routine entry point.  There are two
1494  * types of interrupts, state change interrupt and response interrupt.  If an
1495  * interrupt is not ours, we just return.
1496  */
1497 void
1498 mrsas_isr(void *arg)
1499 {
1500 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1501 	struct mrsas_softc *sc = irq_context->sc;
1502 	int status = 0;
1503 
1504 	if (sc->mask_interrupts)
1505 		return;
1506 
1507 	if (!sc->msix_vectors) {
1508 		status = mrsas_clear_intr(sc);
1509 		if (!status)
1510 			return;
1511 	}
1512 	/* If we are resetting, bail */
1513 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1514 		printf(" Entered into ISR when OCR is going active. \n");
1515 		mrsas_clear_intr(sc);
1516 		return;
1517 	}
1518 	/* Process for reply request and clear response interrupt */
1519 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1520 		mrsas_clear_intr(sc);
1521 
1522 	return;
1523 }
1524 
1525 /*
1526  * mrsas_complete_cmd:	Process reply request
1527  * input:				Adapter instance soft state
1528  *
1529  * This function is called from mrsas_isr() to process reply request and clear
1530  * response interrupt. Processing of the reply request entails walking
1531  * through the reply descriptor array for the command request  pended from
1532  * Firmware.  We look at the Function field to determine the command type and
1533  * perform the appropriate action.  Before we return, we clear the response
1534  * interrupt.
1535  */
1536 int
1537 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1538 {
1539 	Mpi2ReplyDescriptorsUnion_t *desc;
1540 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1541 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1542 	struct mrsas_mpt_cmd *cmd_mpt;
1543 	struct mrsas_mfi_cmd *cmd_mfi;
1544 	u_int8_t reply_descript_type;
1545 	u_int16_t smid, num_completed;
1546 	u_int8_t status, extStatus;
1547 	union desc_value desc_val;
1548 	PLD_LOAD_BALANCE_INFO lbinfo;
1549 	u_int32_t device_id;
1550 	int threshold_reply_count = 0;
1551 
1552 
1553 	/* If we have a hardware error, not need to continue */
1554 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1555 		return (DONE);
1556 
1557 	desc = sc->reply_desc_mem;
1558 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1559 	    + sc->last_reply_idx[MSIxIndex];
1560 
1561 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1562 
1563 	desc_val.word = desc->Words;
1564 	num_completed = 0;
1565 
1566 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1567 
1568 	/* Find our reply descriptor for the command and process */
1569 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1570 		smid = reply_desc->SMID;
1571 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1572 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1573 
1574 		status = scsi_io_req->RaidContext.status;
1575 		extStatus = scsi_io_req->RaidContext.exStatus;
1576 
1577 		switch (scsi_io_req->Function) {
1578 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1579 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1580 			lbinfo = &sc->load_balance_info[device_id];
1581 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1582 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1583 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1584 			}
1585 			/* Fall thru and complete IO */
1586 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1587 			mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1588 			mrsas_cmd_done(sc, cmd_mpt);
1589 			scsi_io_req->RaidContext.status = 0;
1590 			scsi_io_req->RaidContext.exStatus = 0;
1591 			mrsas_atomic_dec(&sc->fw_outstanding);
1592 			break;
1593 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1594 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1595 			mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1596 			cmd_mpt->flags = 0;
1597 			mrsas_release_mpt_cmd(cmd_mpt);
1598 			break;
1599 		}
1600 
1601 		sc->last_reply_idx[MSIxIndex]++;
1602 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1603 			sc->last_reply_idx[MSIxIndex] = 0;
1604 
1605 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1606 							 * 0xFFFFFFFFs */
1607 		num_completed++;
1608 		threshold_reply_count++;
1609 
1610 		/* Get the next reply descriptor */
1611 		if (!sc->last_reply_idx[MSIxIndex]) {
1612 			desc = sc->reply_desc_mem;
1613 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1614 		} else
1615 			desc++;
1616 
1617 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1618 		desc_val.word = desc->Words;
1619 
1620 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1621 
1622 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1623 			break;
1624 
1625 		/*
1626 		 * Write to reply post index after completing threshold reply
1627 		 * count and still there are more replies in reply queue
1628 		 * pending to be completed.
1629 		 */
1630 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1631 			if (sc->msix_enable) {
1632 				if ((sc->device_id == MRSAS_INVADER) ||
1633 				    (sc->device_id == MRSAS_FURY) ||
1634 				    (sc->device_id == MRSAS_INTRUDER) ||
1635 				    (sc->device_id == MRSAS_INTRUDER_24) ||
1636 				    (sc->device_id == MRSAS_CUTLASS_52) ||
1637 				    (sc->device_id == MRSAS_CUTLASS_53))
1638 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1639 					    ((MSIxIndex & 0x7) << 24) |
1640 					    sc->last_reply_idx[MSIxIndex]);
1641 				else
1642 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1643 					    sc->last_reply_idx[MSIxIndex]);
1644 			} else
1645 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1646 				    reply_post_host_index), sc->last_reply_idx[0]);
1647 
1648 			threshold_reply_count = 0;
1649 		}
1650 	}
1651 
1652 	/* No match, just return */
1653 	if (num_completed == 0)
1654 		return (DONE);
1655 
1656 	/* Clear response interrupt */
1657 	if (sc->msix_enable) {
1658 		if ((sc->device_id == MRSAS_INVADER) ||
1659 		    (sc->device_id == MRSAS_FURY) ||
1660 		    (sc->device_id == MRSAS_INTRUDER) ||
1661 		    (sc->device_id == MRSAS_INTRUDER_24) ||
1662 		    (sc->device_id == MRSAS_CUTLASS_52) ||
1663 		    (sc->device_id == MRSAS_CUTLASS_53)) {
1664 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1665 			    ((MSIxIndex & 0x7) << 24) |
1666 			    sc->last_reply_idx[MSIxIndex]);
1667 		} else
1668 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1669 			    sc->last_reply_idx[MSIxIndex]);
1670 	} else
1671 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1672 		    reply_post_host_index), sc->last_reply_idx[0]);
1673 
1674 	return (0);
1675 }
1676 
1677 /*
1678  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1679  * input:						Adapter instance soft state
1680  *
1681  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1682  * It checks the command status and maps the appropriate CAM status for the
1683  * CCB.
1684  */
1685 void
1686 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1687 {
1688 	struct mrsas_softc *sc = cmd->sc;
1689 	u_int8_t *sense_data;
1690 
1691 	switch (status) {
1692 	case MFI_STAT_OK:
1693 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1694 		break;
1695 	case MFI_STAT_SCSI_IO_FAILED:
1696 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1697 		cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1698 		sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1699 		if (sense_data) {
1700 			/* For now just copy 18 bytes back */
1701 			memcpy(sense_data, cmd->sense, 18);
1702 			cmd->ccb_ptr->csio.sense_len = 18;
1703 			cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1704 		}
1705 		break;
1706 	case MFI_STAT_LD_OFFLINE:
1707 	case MFI_STAT_DEVICE_NOT_FOUND:
1708 		if (cmd->ccb_ptr->ccb_h.target_lun)
1709 			cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1710 		else
1711 			cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1712 		break;
1713 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1714 		cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1715 		break;
1716 	default:
1717 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1718 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1719 		cmd->ccb_ptr->csio.scsi_status = status;
1720 	}
1721 	return;
1722 }
1723 
1724 /*
1725  * mrsas_alloc_mem:	Allocate DMAable memory
1726  * input:			Adapter instance soft state
1727  *
1728  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1729  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1730  * Kernel virtual address. Callback argument is physical memory address.
1731  */
1732 static int
1733 mrsas_alloc_mem(struct mrsas_softc *sc)
1734 {
1735 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1736 	          chain_frame_size, evt_detail_size, count;
1737 
1738 	/*
1739 	 * Allocate parent DMA tag
1740 	 */
1741 	if (bus_dma_tag_create(NULL,	/* parent */
1742 	    1,				/* alignment */
1743 	    0,				/* boundary */
1744 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1745 	    BUS_SPACE_MAXADDR,		/* highaddr */
1746 	    NULL, NULL,			/* filter, filterarg */
1747 	    MAXPHYS,			/* maxsize */
1748 	    sc->max_num_sge,		/* nsegments */
1749 	    MAXPHYS,			/* maxsegsize */
1750 	    0,				/* flags */
1751 	    NULL, NULL,			/* lockfunc, lockarg */
1752 	    &sc->mrsas_parent_tag	/* tag */
1753 	    )) {
1754 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1755 		return (ENOMEM);
1756 	}
1757 	/*
1758 	 * Allocate for version buffer
1759 	 */
1760 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1761 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1762 	    1, 0,
1763 	    BUS_SPACE_MAXADDR_32BIT,
1764 	    BUS_SPACE_MAXADDR,
1765 	    NULL, NULL,
1766 	    verbuf_size,
1767 	    1,
1768 	    verbuf_size,
1769 	    BUS_DMA_ALLOCNOW,
1770 	    NULL, NULL,
1771 	    &sc->verbuf_tag)) {
1772 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1773 		return (ENOMEM);
1774 	}
1775 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1776 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1777 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1778 		return (ENOMEM);
1779 	}
1780 	bzero(sc->verbuf_mem, verbuf_size);
1781 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1782 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1783 	    BUS_DMA_NOWAIT)) {
1784 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1785 		return (ENOMEM);
1786 	}
1787 	/*
1788 	 * Allocate IO Request Frames
1789 	 */
1790 	io_req_size = sc->io_frames_alloc_sz;
1791 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1792 	    16, 0,
1793 	    BUS_SPACE_MAXADDR_32BIT,
1794 	    BUS_SPACE_MAXADDR,
1795 	    NULL, NULL,
1796 	    io_req_size,
1797 	    1,
1798 	    io_req_size,
1799 	    BUS_DMA_ALLOCNOW,
1800 	    NULL, NULL,
1801 	    &sc->io_request_tag)) {
1802 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1803 		return (ENOMEM);
1804 	}
1805 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1806 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1807 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1808 		return (ENOMEM);
1809 	}
1810 	bzero(sc->io_request_mem, io_req_size);
1811 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1812 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1813 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1814 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1815 		return (ENOMEM);
1816 	}
1817 	/*
1818 	 * Allocate Chain Frames
1819 	 */
1820 	chain_frame_size = sc->chain_frames_alloc_sz;
1821 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1822 	    4, 0,
1823 	    BUS_SPACE_MAXADDR_32BIT,
1824 	    BUS_SPACE_MAXADDR,
1825 	    NULL, NULL,
1826 	    chain_frame_size,
1827 	    1,
1828 	    chain_frame_size,
1829 	    BUS_DMA_ALLOCNOW,
1830 	    NULL, NULL,
1831 	    &sc->chain_frame_tag)) {
1832 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1833 		return (ENOMEM);
1834 	}
1835 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1836 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1837 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1838 		return (ENOMEM);
1839 	}
1840 	bzero(sc->chain_frame_mem, chain_frame_size);
1841 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1842 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1843 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1844 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1845 		return (ENOMEM);
1846 	}
1847 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1848 	/*
1849 	 * Allocate Reply Descriptor Array
1850 	 */
1851 	reply_desc_size = sc->reply_alloc_sz * count;
1852 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1853 	    16, 0,
1854 	    BUS_SPACE_MAXADDR_32BIT,
1855 	    BUS_SPACE_MAXADDR,
1856 	    NULL, NULL,
1857 	    reply_desc_size,
1858 	    1,
1859 	    reply_desc_size,
1860 	    BUS_DMA_ALLOCNOW,
1861 	    NULL, NULL,
1862 	    &sc->reply_desc_tag)) {
1863 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1864 		return (ENOMEM);
1865 	}
1866 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1867 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1868 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1869 		return (ENOMEM);
1870 	}
1871 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1872 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1873 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1874 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1875 		return (ENOMEM);
1876 	}
1877 	/*
1878 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
1879 	 */
1880 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1881 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1882 	    64, 0,
1883 	    BUS_SPACE_MAXADDR_32BIT,
1884 	    BUS_SPACE_MAXADDR,
1885 	    NULL, NULL,
1886 	    sense_size,
1887 	    1,
1888 	    sense_size,
1889 	    BUS_DMA_ALLOCNOW,
1890 	    NULL, NULL,
1891 	    &sc->sense_tag)) {
1892 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1893 		return (ENOMEM);
1894 	}
1895 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1896 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1897 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1898 		return (ENOMEM);
1899 	}
1900 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1901 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1902 	    BUS_DMA_NOWAIT)) {
1903 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1904 		return (ENOMEM);
1905 	}
1906 	/*
1907 	 * Allocate for Event detail structure
1908 	 */
1909 	evt_detail_size = sizeof(struct mrsas_evt_detail);
1910 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1911 	    1, 0,
1912 	    BUS_SPACE_MAXADDR_32BIT,
1913 	    BUS_SPACE_MAXADDR,
1914 	    NULL, NULL,
1915 	    evt_detail_size,
1916 	    1,
1917 	    evt_detail_size,
1918 	    BUS_DMA_ALLOCNOW,
1919 	    NULL, NULL,
1920 	    &sc->evt_detail_tag)) {
1921 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1922 		return (ENOMEM);
1923 	}
1924 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1925 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1926 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1927 		return (ENOMEM);
1928 	}
1929 	bzero(sc->evt_detail_mem, evt_detail_size);
1930 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1931 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1932 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1933 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1934 		return (ENOMEM);
1935 	}
1936 	/*
1937 	 * Create a dma tag for data buffers; size will be the maximum
1938 	 * possible I/O size (280kB).
1939 	 */
1940 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1941 	    1,
1942 	    0,
1943 	    BUS_SPACE_MAXADDR,
1944 	    BUS_SPACE_MAXADDR,
1945 	    NULL, NULL,
1946 	    MAXPHYS,
1947 	    sc->max_num_sge,		/* nsegments */
1948 	    MAXPHYS,
1949 	    BUS_DMA_ALLOCNOW,
1950 	    busdma_lock_mutex,
1951 	    &sc->io_lock,
1952 	    &sc->data_tag)) {
1953 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1954 		return (ENOMEM);
1955 	}
1956 	return (0);
1957 }
1958 
1959 /*
1960  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
1961  * input:			callback argument, machine dependent type
1962  * 					that describes DMA segments, number of segments, error code
1963  *
1964  * This function is for the driver to receive mapping information resultant of
1965  * the bus_dmamap_load(). The information is actually not being used, but the
1966  * address is saved anyway.
1967  */
1968 void
1969 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1970 {
1971 	bus_addr_t *addr;
1972 
1973 	addr = arg;
1974 	*addr = segs[0].ds_addr;
1975 }
1976 
1977 /*
1978  * mrsas_setup_raidmap:	Set up RAID map.
1979  * input:				Adapter instance soft state
1980  *
1981  * Allocate DMA memory for the RAID maps and perform setup.
1982  */
1983 static int
1984 mrsas_setup_raidmap(struct mrsas_softc *sc)
1985 {
1986 	int i;
1987 
1988 	for (i = 0; i < 2; i++) {
1989 		sc->ld_drv_map[i] =
1990 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1991 		/* Do Error handling */
1992 		if (!sc->ld_drv_map[i]) {
1993 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1994 
1995 			if (i == 1)
1996 				free(sc->ld_drv_map[0], M_MRSAS);
1997 			/* ABORT driver initialization */
1998 			goto ABORT;
1999 		}
2000 	}
2001 
2002 	for (int i = 0; i < 2; i++) {
2003 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2004 		    4, 0,
2005 		    BUS_SPACE_MAXADDR_32BIT,
2006 		    BUS_SPACE_MAXADDR,
2007 		    NULL, NULL,
2008 		    sc->max_map_sz,
2009 		    1,
2010 		    sc->max_map_sz,
2011 		    BUS_DMA_ALLOCNOW,
2012 		    NULL, NULL,
2013 		    &sc->raidmap_tag[i])) {
2014 			device_printf(sc->mrsas_dev,
2015 			    "Cannot allocate raid map tag.\n");
2016 			return (ENOMEM);
2017 		}
2018 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2019 		    (void **)&sc->raidmap_mem[i],
2020 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2021 			device_printf(sc->mrsas_dev,
2022 			    "Cannot allocate raidmap memory.\n");
2023 			return (ENOMEM);
2024 		}
2025 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2026 
2027 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2028 		    sc->raidmap_mem[i], sc->max_map_sz,
2029 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2030 		    BUS_DMA_NOWAIT)) {
2031 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2032 			return (ENOMEM);
2033 		}
2034 		if (!sc->raidmap_mem[i]) {
2035 			device_printf(sc->mrsas_dev,
2036 			    "Cannot allocate memory for raid map.\n");
2037 			return (ENOMEM);
2038 		}
2039 	}
2040 
2041 	if (!mrsas_get_map_info(sc))
2042 		mrsas_sync_map_info(sc);
2043 
2044 	return (0);
2045 
2046 ABORT:
2047 	return (1);
2048 }
2049 
2050 /**
2051  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2052  * @sc:				Adapter soft state
2053  *
2054  * Return 0 on success.
2055  */
2056 void
2057 megasas_setup_jbod_map(struct mrsas_softc *sc)
2058 {
2059 	int i;
2060 	uint32_t pd_seq_map_sz;
2061 
2062 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2063 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2064 
2065 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2066 		sc->use_seqnum_jbod_fp = 0;
2067 		return;
2068 	}
2069 	if (sc->jbodmap_mem[0])
2070 		goto skip_alloc;
2071 
2072 	for (i = 0; i < 2; i++) {
2073 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2074 		    4, 0,
2075 		    BUS_SPACE_MAXADDR_32BIT,
2076 		    BUS_SPACE_MAXADDR,
2077 		    NULL, NULL,
2078 		    pd_seq_map_sz,
2079 		    1,
2080 		    pd_seq_map_sz,
2081 		    BUS_DMA_ALLOCNOW,
2082 		    NULL, NULL,
2083 		    &sc->jbodmap_tag[i])) {
2084 			device_printf(sc->mrsas_dev,
2085 			    "Cannot allocate jbod map tag.\n");
2086 			return;
2087 		}
2088 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2089 		    (void **)&sc->jbodmap_mem[i],
2090 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2091 			device_printf(sc->mrsas_dev,
2092 			    "Cannot allocate jbod map memory.\n");
2093 			return;
2094 		}
2095 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2096 
2097 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2098 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2099 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2100 		    BUS_DMA_NOWAIT)) {
2101 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2102 			return;
2103 		}
2104 		if (!sc->jbodmap_mem[i]) {
2105 			device_printf(sc->mrsas_dev,
2106 			    "Cannot allocate memory for jbod map.\n");
2107 			sc->use_seqnum_jbod_fp = 0;
2108 			return;
2109 		}
2110 	}
2111 
2112 skip_alloc:
2113 	if (!megasas_sync_pd_seq_num(sc, false) &&
2114 	    !megasas_sync_pd_seq_num(sc, true))
2115 		sc->use_seqnum_jbod_fp = 1;
2116 	else
2117 		sc->use_seqnum_jbod_fp = 0;
2118 
2119 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2120 }
2121 
2122 /*
2123  * mrsas_init_fw:	Initialize Firmware
2124  * input:			Adapter soft state
2125  *
2126  * Calls transition_to_ready() to make sure Firmware is in operational state and
2127  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2128  * issues internal commands to get the controller info after the IOC_INIT
2129  * command response is received by Firmware.  Note:  code relating to
2130  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2131  * is left here as placeholder.
2132  */
2133 static int
2134 mrsas_init_fw(struct mrsas_softc *sc)
2135 {
2136 
2137 	int ret, loop, ocr = 0;
2138 	u_int32_t max_sectors_1;
2139 	u_int32_t max_sectors_2;
2140 	u_int32_t tmp_sectors;
2141 	u_int32_t scratch_pad_2;
2142 	int msix_enable = 0;
2143 	int fw_msix_count = 0;
2144 
2145 	/* Make sure Firmware is ready */
2146 	ret = mrsas_transition_to_ready(sc, ocr);
2147 	if (ret != SUCCESS) {
2148 		return (ret);
2149 	}
2150 	/* MSI-x index 0- reply post host index register */
2151 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2152 	/* Check if MSI-X is supported while in ready state */
2153 	msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2154 
2155 	if (msix_enable) {
2156 		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2157 		    outbound_scratch_pad_2));
2158 
2159 		/* Check max MSI-X vectors */
2160 		if (sc->device_id == MRSAS_TBOLT) {
2161 			sc->msix_vectors = (scratch_pad_2
2162 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2163 			fw_msix_count = sc->msix_vectors;
2164 		} else {
2165 			/* Invader/Fury supports 96 MSI-X vectors */
2166 			sc->msix_vectors = ((scratch_pad_2
2167 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2168 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2169 			fw_msix_count = sc->msix_vectors;
2170 
2171 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2172 			    loop++) {
2173 				sc->msix_reg_offset[loop] =
2174 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2175 				    (loop * 0x10);
2176 			}
2177 		}
2178 
2179 		/* Don't bother allocating more MSI-X vectors than cpus */
2180 		sc->msix_vectors = min(sc->msix_vectors,
2181 		    mp_ncpus);
2182 
2183 		/* Allocate MSI-x vectors */
2184 		if (mrsas_allocate_msix(sc) == SUCCESS)
2185 			sc->msix_enable = 1;
2186 		else
2187 			sc->msix_enable = 0;
2188 
2189 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2190 		    "Online CPU %d Current MSIX <%d>\n",
2191 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2192 	}
2193 	if (mrsas_init_adapter(sc) != SUCCESS) {
2194 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2195 		return (1);
2196 	}
2197 	/* Allocate internal commands for pass-thru */
2198 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2199 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2200 		return (1);
2201 	}
2202 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2203 	if (!sc->ctrl_info) {
2204 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2205 		return (1);
2206 	}
2207 	/*
2208 	 * Get the controller info from FW, so that the MAX VD support
2209 	 * availability can be decided.
2210 	 */
2211 	if (mrsas_get_ctrl_info(sc)) {
2212 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2213 		return (1);
2214 	}
2215 	sc->secure_jbod_support =
2216 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2217 
2218 	if (sc->secure_jbod_support)
2219 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2220 
2221 	if (sc->use_seqnum_jbod_fp)
2222 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2223 
2224 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2225 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2226 		    "There seems to be some problem in the controller\n"
2227 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2228 	}
2229 	megasas_setup_jbod_map(sc);
2230 
2231 	/* For pass-thru, get PD/LD list and controller info */
2232 	memset(sc->pd_list, 0,
2233 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2234 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2235 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2236 		return (1);
2237 	}
2238 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2239 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2240 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2241 		return (1);
2242 	}
2243 	/*
2244 	 * Compute the max allowed sectors per IO: The controller info has
2245 	 * two limits on max sectors. Driver should use the minimum of these
2246 	 * two.
2247 	 *
2248 	 * 1 << stripe_sz_ops.min = max sectors per strip
2249 	 *
2250 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2251 	 * calculate max_sectors_1. So the number ended up as zero always.
2252 	 */
2253 	tmp_sectors = 0;
2254 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2255 	    sc->ctrl_info->max_strips_per_io;
2256 	max_sectors_2 = sc->ctrl_info->max_request_size;
2257 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2258 	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2259 
2260 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2261 		sc->max_sectors_per_req = tmp_sectors;
2262 
2263 	sc->disableOnlineCtrlReset =
2264 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2265 	sc->UnevenSpanSupport =
2266 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2267 	if (sc->UnevenSpanSupport) {
2268 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2269 		    sc->UnevenSpanSupport);
2270 
2271 		if (MR_ValidateMapInfo(sc))
2272 			sc->fast_path_io = 1;
2273 		else
2274 			sc->fast_path_io = 0;
2275 	}
2276 	return (0);
2277 }
2278 
2279 /*
2280  * mrsas_init_adapter:	Initializes the adapter/controller
2281  * input:				Adapter soft state
2282  *
2283  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2284  * ROC/controller.  The FW register is read to determined the number of
2285  * commands that is supported.  All memory allocations for IO is based on
2286  * max_cmd.  Appropriate calculations are performed in this function.
2287  */
2288 int
2289 mrsas_init_adapter(struct mrsas_softc *sc)
2290 {
2291 	uint32_t status;
2292 	u_int32_t max_cmd, scratch_pad_2;
2293 	int ret;
2294 	int i = 0;
2295 
2296 	/* Read FW status register */
2297 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2298 
2299 	/* Get operational params from status register */
2300 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2301 
2302 	/* Decrement the max supported by 1, to correlate with FW */
2303 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2304 	max_cmd = sc->max_fw_cmds;
2305 
2306 	/* Determine allocation size of command frames */
2307 	sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2308 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2309 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2310 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2311 	scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2312 	    outbound_scratch_pad_2));
2313 	/*
2314 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2315 	 * Firmware support extended IO chain frame which is 4 time more
2316 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2317 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2318 	 */
2319 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2320 		sc->max_chain_frame_sz =
2321 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2322 		    * MEGASAS_1MB_IO;
2323 	else
2324 		sc->max_chain_frame_sz =
2325 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2326 		    * MEGASAS_256K_IO;
2327 
2328 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2329 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2330 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2331 
2332 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2333 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2334 
2335 	mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2336 	    sc->max_num_sge, sc->max_chain_frame_sz);
2337 
2338 	/* Used for pass thru MFI frame (DCMD) */
2339 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2340 
2341 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2342 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2343 
2344 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2345 
2346 	for (i = 0; i < count; i++)
2347 		sc->last_reply_idx[i] = 0;
2348 
2349 	ret = mrsas_alloc_mem(sc);
2350 	if (ret != SUCCESS)
2351 		return (ret);
2352 
2353 	ret = mrsas_alloc_mpt_cmds(sc);
2354 	if (ret != SUCCESS)
2355 		return (ret);
2356 
2357 	ret = mrsas_ioc_init(sc);
2358 	if (ret != SUCCESS)
2359 		return (ret);
2360 
2361 	return (0);
2362 }
2363 
2364 /*
2365  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2366  * input:				Adapter soft state
2367  *
2368  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2369  */
2370 int
2371 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2372 {
2373 	int ioc_init_size;
2374 
2375 	/* Allocate IOC INIT command */
2376 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2377 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2378 	    1, 0,
2379 	    BUS_SPACE_MAXADDR_32BIT,
2380 	    BUS_SPACE_MAXADDR,
2381 	    NULL, NULL,
2382 	    ioc_init_size,
2383 	    1,
2384 	    ioc_init_size,
2385 	    BUS_DMA_ALLOCNOW,
2386 	    NULL, NULL,
2387 	    &sc->ioc_init_tag)) {
2388 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2389 		return (ENOMEM);
2390 	}
2391 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2392 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2393 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2394 		return (ENOMEM);
2395 	}
2396 	bzero(sc->ioc_init_mem, ioc_init_size);
2397 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2398 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2399 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2400 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2401 		return (ENOMEM);
2402 	}
2403 	return (0);
2404 }
2405 
2406 /*
2407  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2408  * input:				Adapter soft state
2409  *
2410  * Deallocates memory of the IOC Init cmd.
2411  */
2412 void
2413 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2414 {
2415 	if (sc->ioc_init_phys_mem)
2416 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2417 	if (sc->ioc_init_mem != NULL)
2418 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2419 	if (sc->ioc_init_tag != NULL)
2420 		bus_dma_tag_destroy(sc->ioc_init_tag);
2421 }
2422 
2423 /*
2424  * mrsas_ioc_init:	Sends IOC Init command to FW
2425  * input:			Adapter soft state
2426  *
2427  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2428  */
2429 int
2430 mrsas_ioc_init(struct mrsas_softc *sc)
2431 {
2432 	struct mrsas_init_frame *init_frame;
2433 	pMpi2IOCInitRequest_t IOCInitMsg;
2434 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2435 	u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2436 	bus_addr_t phys_addr;
2437 	int i, retcode = 0;
2438 
2439 	/* Allocate memory for the IOC INIT command */
2440 	if (mrsas_alloc_ioc_cmd(sc)) {
2441 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2442 		return (1);
2443 	}
2444 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2445 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2446 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2447 	IOCInitMsg->MsgVersion = MPI2_VERSION;
2448 	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2449 	IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2450 	IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2451 	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2452 	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2453 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2454 
2455 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2456 	init_frame->cmd = MFI_CMD_INIT;
2457 	init_frame->cmd_status = 0xFF;
2458 	init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2459 
2460 	/* driver support Extended MSIX */
2461 	if ((sc->device_id == MRSAS_INVADER) ||
2462 	    (sc->device_id == MRSAS_FURY) ||
2463 	    (sc->device_id == MRSAS_INTRUDER) ||
2464 	    (sc->device_id == MRSAS_INTRUDER_24) ||
2465 	    (sc->device_id == MRSAS_CUTLASS_52) ||
2466 	    (sc->device_id == MRSAS_CUTLASS_53)) {
2467 		init_frame->driver_operations.
2468 		    mfi_capabilities.support_additional_msix = 1;
2469 	}
2470 	if (sc->verbuf_mem) {
2471 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2472 		    MRSAS_VERSION);
2473 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2474 		init_frame->driver_ver_hi = 0;
2475 	}
2476 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2477 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2478 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2479 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2480 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2481 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2482 	init_frame->queue_info_new_phys_addr_lo = phys_addr;
2483 	init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2484 
2485 	req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2486 	req_desc.MFAIo.RequestFlags =
2487 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2488 
2489 	mrsas_disable_intr(sc);
2490 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2491 	mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2492 
2493 	/*
2494 	 * Poll response timer to wait for Firmware response.  While this
2495 	 * timer with the DELAY call could block CPU, the time interval for
2496 	 * this is only 1 millisecond.
2497 	 */
2498 	if (init_frame->cmd_status == 0xFF) {
2499 		for (i = 0; i < (max_wait * 1000); i++) {
2500 			if (init_frame->cmd_status == 0xFF)
2501 				DELAY(1000);
2502 			else
2503 				break;
2504 		}
2505 	}
2506 	if (init_frame->cmd_status == 0)
2507 		mrsas_dprint(sc, MRSAS_OCR,
2508 		    "IOC INIT response received from FW.\n");
2509 	else {
2510 		if (init_frame->cmd_status == 0xFF)
2511 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2512 		else
2513 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2514 		retcode = 1;
2515 	}
2516 
2517 	mrsas_free_ioc_cmd(sc);
2518 	return (retcode);
2519 }
2520 
2521 /*
2522  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2523  * input:					Adapter instance soft state
2524  *
2525  * This function allocates the internal commands for IOs. Each command that is
2526  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2527  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2528  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2529  * max_fw_cmds.
2530  */
2531 int
2532 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2533 {
2534 	int i, j;
2535 	u_int32_t max_cmd, count;
2536 	struct mrsas_mpt_cmd *cmd;
2537 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2538 	u_int32_t offset, chain_offset, sense_offset;
2539 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2540 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2541 
2542 	max_cmd = sc->max_fw_cmds;
2543 
2544 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2545 	if (!sc->req_desc) {
2546 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2547 		return (ENOMEM);
2548 	}
2549 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2550 
2551 	/*
2552 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2553 	 * Allocate the dynamic array first and then allocate individual
2554 	 * commands.
2555 	 */
2556 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2557 	if (!sc->mpt_cmd_list) {
2558 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2559 		return (ENOMEM);
2560 	}
2561 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2562 	for (i = 0; i < max_cmd; i++) {
2563 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2564 		    M_MRSAS, M_NOWAIT);
2565 		if (!sc->mpt_cmd_list[i]) {
2566 			for (j = 0; j < i; j++)
2567 				free(sc->mpt_cmd_list[j], M_MRSAS);
2568 			free(sc->mpt_cmd_list, M_MRSAS);
2569 			sc->mpt_cmd_list = NULL;
2570 			return (ENOMEM);
2571 		}
2572 	}
2573 
2574 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2575 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2576 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2577 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2578 	sense_base = (u_int8_t *)sc->sense_mem;
2579 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2580 	for (i = 0; i < max_cmd; i++) {
2581 		cmd = sc->mpt_cmd_list[i];
2582 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2583 		chain_offset = sc->max_chain_frame_sz * i;
2584 		sense_offset = MRSAS_SENSE_LEN * i;
2585 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2586 		cmd->index = i + 1;
2587 		cmd->ccb_ptr = NULL;
2588 		callout_init(&cmd->cm_callout, 0);
2589 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2590 		cmd->sc = sc;
2591 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2592 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2593 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2594 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2595 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2596 		cmd->sense = sense_base + sense_offset;
2597 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2598 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2599 			return (FAIL);
2600 		}
2601 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2602 	}
2603 
2604 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2605 	reply_desc = sc->reply_desc_mem;
2606 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2607 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2608 		reply_desc->Words = MRSAS_ULONG_MAX;
2609 	}
2610 	return (0);
2611 }
2612 
2613 /*
2614  * mrsas_fire_cmd:	Sends command to FW
2615  * input:			Adapter softstate
2616  * 					request descriptor address low
2617  * 					request descriptor address high
2618  *
2619  * This functions fires the command to Firmware by writing to the
2620  * inbound_low_queue_port and inbound_high_queue_port.
2621  */
2622 void
2623 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2624     u_int32_t req_desc_hi)
2625 {
2626 	mtx_lock(&sc->pci_lock);
2627 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2628 	    req_desc_lo);
2629 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2630 	    req_desc_hi);
2631 	mtx_unlock(&sc->pci_lock);
2632 }
2633 
2634 /*
2635  * mrsas_transition_to_ready:  Move FW to Ready state input:
2636  * Adapter instance soft state
2637  *
2638  * During the initialization, FW passes can potentially be in any one of several
2639  * possible states. If the FW in operational, waiting-for-handshake states,
2640  * driver must take steps to bring it to ready state. Otherwise, it has to
2641  * wait for the ready state.
2642  */
2643 int
2644 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2645 {
2646 	int i;
2647 	u_int8_t max_wait;
2648 	u_int32_t val, fw_state;
2649 	u_int32_t cur_state;
2650 	u_int32_t abs_state, curr_abs_state;
2651 
2652 	val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2653 	fw_state = val & MFI_STATE_MASK;
2654 	max_wait = MRSAS_RESET_WAIT_TIME;
2655 
2656 	if (fw_state != MFI_STATE_READY)
2657 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2658 
2659 	while (fw_state != MFI_STATE_READY) {
2660 		abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2661 		switch (fw_state) {
2662 		case MFI_STATE_FAULT:
2663 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2664 			if (ocr) {
2665 				cur_state = MFI_STATE_FAULT;
2666 				break;
2667 			} else
2668 				return -ENODEV;
2669 		case MFI_STATE_WAIT_HANDSHAKE:
2670 			/* Set the CLR bit in inbound doorbell */
2671 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2672 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2673 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2674 			break;
2675 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2676 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2677 			    MFI_INIT_HOTPLUG);
2678 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2679 			break;
2680 		case MFI_STATE_OPERATIONAL:
2681 			/*
2682 			 * Bring it to READY state; assuming max wait 10
2683 			 * secs
2684 			 */
2685 			mrsas_disable_intr(sc);
2686 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2687 			for (i = 0; i < max_wait * 1000; i++) {
2688 				if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2689 					DELAY(1000);
2690 				else
2691 					break;
2692 			}
2693 			cur_state = MFI_STATE_OPERATIONAL;
2694 			break;
2695 		case MFI_STATE_UNDEFINED:
2696 			/*
2697 			 * This state should not last for more than 2
2698 			 * seconds
2699 			 */
2700 			cur_state = MFI_STATE_UNDEFINED;
2701 			break;
2702 		case MFI_STATE_BB_INIT:
2703 			cur_state = MFI_STATE_BB_INIT;
2704 			break;
2705 		case MFI_STATE_FW_INIT:
2706 			cur_state = MFI_STATE_FW_INIT;
2707 			break;
2708 		case MFI_STATE_FW_INIT_2:
2709 			cur_state = MFI_STATE_FW_INIT_2;
2710 			break;
2711 		case MFI_STATE_DEVICE_SCAN:
2712 			cur_state = MFI_STATE_DEVICE_SCAN;
2713 			break;
2714 		case MFI_STATE_FLUSH_CACHE:
2715 			cur_state = MFI_STATE_FLUSH_CACHE;
2716 			break;
2717 		default:
2718 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2719 			return -ENODEV;
2720 		}
2721 
2722 		/*
2723 		 * The cur_state should not last for more than max_wait secs
2724 		 */
2725 		for (i = 0; i < (max_wait * 1000); i++) {
2726 			fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2727 			    outbound_scratch_pad)) & MFI_STATE_MASK);
2728 			curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2729 			    outbound_scratch_pad));
2730 			if (abs_state == curr_abs_state)
2731 				DELAY(1000);
2732 			else
2733 				break;
2734 		}
2735 
2736 		/*
2737 		 * Return error if fw_state hasn't changed after max_wait
2738 		 */
2739 		if (curr_abs_state == abs_state) {
2740 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2741 			    "in %d secs\n", fw_state, max_wait);
2742 			return -ENODEV;
2743 		}
2744 	}
2745 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2746 	return 0;
2747 }
2748 
2749 /*
2750  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
2751  * input:				Adapter soft state
2752  *
2753  * This function removes an MFI command from the command list.
2754  */
2755 struct mrsas_mfi_cmd *
2756 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2757 {
2758 	struct mrsas_mfi_cmd *cmd = NULL;
2759 
2760 	mtx_lock(&sc->mfi_cmd_pool_lock);
2761 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2762 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2763 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2764 	}
2765 	mtx_unlock(&sc->mfi_cmd_pool_lock);
2766 
2767 	return cmd;
2768 }
2769 
2770 /*
2771  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
2772  * input:				Adapter Context.
2773  *
2774  * This function will check FW status register and flag do_timeout_reset flag.
2775  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2776  * trigger reset.
2777  */
2778 static void
2779 mrsas_ocr_thread(void *arg)
2780 {
2781 	struct mrsas_softc *sc;
2782 	u_int32_t fw_status, fw_state;
2783 
2784 	sc = (struct mrsas_softc *)arg;
2785 
2786 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2787 
2788 	sc->ocr_thread_active = 1;
2789 	mtx_lock(&sc->sim_lock);
2790 	for (;;) {
2791 		/* Sleep for 1 second and check the queue status */
2792 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2793 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2794 		if (sc->remove_in_progress ||
2795 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2796 			mrsas_dprint(sc, MRSAS_OCR,
2797 			    "Exit due to %s from %s\n",
2798 			    sc->remove_in_progress ? "Shutdown" :
2799 			    "Hardware critical error", __func__);
2800 			break;
2801 		}
2802 		fw_status = mrsas_read_reg(sc,
2803 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
2804 		fw_state = fw_status & MFI_STATE_MASK;
2805 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2806 			device_printf(sc->mrsas_dev, "%s started due to %s!\n",
2807 			    sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
2808 			    sc->do_timedout_reset ? "IO Timeout" :
2809 			    "FW fault detected");
2810 			mtx_lock_spin(&sc->ioctl_lock);
2811 			sc->reset_in_progress = 1;
2812 			sc->reset_count++;
2813 			mtx_unlock_spin(&sc->ioctl_lock);
2814 			mrsas_xpt_freeze(sc);
2815 			mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2816 			mrsas_xpt_release(sc);
2817 			sc->reset_in_progress = 0;
2818 			sc->do_timedout_reset = 0;
2819 		}
2820 	}
2821 	mtx_unlock(&sc->sim_lock);
2822 	sc->ocr_thread_active = 0;
2823 	mrsas_kproc_exit(0);
2824 }
2825 
2826 /*
2827  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
2828  * input:					Adapter Context.
2829  *
2830  * This function will clear reply descriptor so that post OCR driver and FW will
2831  * lost old history.
2832  */
2833 void
2834 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2835 {
2836 	int i, count;
2837 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2838 
2839 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2840 	for (i = 0; i < count; i++)
2841 		sc->last_reply_idx[i] = 0;
2842 
2843 	reply_desc = sc->reply_desc_mem;
2844 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2845 		reply_desc->Words = MRSAS_ULONG_MAX;
2846 	}
2847 }
2848 
2849 /*
2850  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
2851  * input:				Adapter Context.
2852  *
2853  * This function will run from thread context so that it can sleep. 1. Do not
2854  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2855  * to complete for 180 seconds. 3. If #2 does not find any outstanding
2856  * command Controller is in working state, so skip OCR. Otherwise, do
2857  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2858  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2859  * OCR, Re-fire Management command and move Controller to Operation state.
2860  */
2861 int
2862 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2863 {
2864 	int retval = SUCCESS, i, j, retry = 0;
2865 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2866 	union ccb *ccb;
2867 	struct mrsas_mfi_cmd *mfi_cmd;
2868 	struct mrsas_mpt_cmd *mpt_cmd;
2869 	union mrsas_evt_class_locale class_locale;
2870 
2871 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2872 		device_printf(sc->mrsas_dev,
2873 		    "mrsas: Hardware critical error, returning FAIL.\n");
2874 		return FAIL;
2875 	}
2876 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2877 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2878 	mrsas_disable_intr(sc);
2879 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2880 	    sc->mrsas_fw_fault_check_delay * hz);
2881 
2882 	/* First try waiting for commands to complete */
2883 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2884 		mrsas_dprint(sc, MRSAS_OCR,
2885 		    "resetting adapter from %s.\n",
2886 		    __func__);
2887 		/* Now return commands back to the CAM layer */
2888 		mtx_unlock(&sc->sim_lock);
2889 		for (i = 0; i < sc->max_fw_cmds; i++) {
2890 			mpt_cmd = sc->mpt_cmd_list[i];
2891 			if (mpt_cmd->ccb_ptr) {
2892 				ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2893 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2894 				mrsas_cmd_done(sc, mpt_cmd);
2895 				mrsas_atomic_dec(&sc->fw_outstanding);
2896 			}
2897 		}
2898 		mtx_lock(&sc->sim_lock);
2899 
2900 		status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2901 		    outbound_scratch_pad));
2902 		abs_state = status_reg & MFI_STATE_MASK;
2903 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
2904 		if (sc->disableOnlineCtrlReset ||
2905 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2906 			/* Reset not supported, kill adapter */
2907 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2908 			mrsas_kill_hba(sc);
2909 			retval = FAIL;
2910 			goto out;
2911 		}
2912 		/* Now try to reset the chip */
2913 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2914 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2915 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
2916 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2917 			    MPI2_WRSEQ_1ST_KEY_VALUE);
2918 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2919 			    MPI2_WRSEQ_2ND_KEY_VALUE);
2920 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2921 			    MPI2_WRSEQ_3RD_KEY_VALUE);
2922 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2923 			    MPI2_WRSEQ_4TH_KEY_VALUE);
2924 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2925 			    MPI2_WRSEQ_5TH_KEY_VALUE);
2926 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2927 			    MPI2_WRSEQ_6TH_KEY_VALUE);
2928 
2929 			/* Check that the diag write enable (DRWE) bit is on */
2930 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2931 			    fusion_host_diag));
2932 			retry = 0;
2933 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2934 				DELAY(100 * 1000);
2935 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2936 				    fusion_host_diag));
2937 				if (retry++ == 100) {
2938 					mrsas_dprint(sc, MRSAS_OCR,
2939 					    "Host diag unlock failed!\n");
2940 					break;
2941 				}
2942 			}
2943 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2944 				continue;
2945 
2946 			/* Send chip reset command */
2947 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2948 			    host_diag | HOST_DIAG_RESET_ADAPTER);
2949 			DELAY(3000 * 1000);
2950 
2951 			/* Make sure reset adapter bit is cleared */
2952 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2953 			    fusion_host_diag));
2954 			retry = 0;
2955 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2956 				DELAY(100 * 1000);
2957 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2958 				    fusion_host_diag));
2959 				if (retry++ == 1000) {
2960 					mrsas_dprint(sc, MRSAS_OCR,
2961 					    "Diag reset adapter never cleared!\n");
2962 					break;
2963 				}
2964 			}
2965 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
2966 				continue;
2967 
2968 			abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2969 			    outbound_scratch_pad)) & MFI_STATE_MASK;
2970 			retry = 0;
2971 
2972 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2973 				DELAY(100 * 1000);
2974 				abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2975 				    outbound_scratch_pad)) & MFI_STATE_MASK;
2976 			}
2977 			if (abs_state <= MFI_STATE_FW_INIT) {
2978 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2979 				    " state = 0x%x\n", abs_state);
2980 				continue;
2981 			}
2982 			/* Wait for FW to become ready */
2983 			if (mrsas_transition_to_ready(sc, 1)) {
2984 				mrsas_dprint(sc, MRSAS_OCR,
2985 				    "mrsas: Failed to transition controller to ready.\n");
2986 				continue;
2987 			}
2988 			mrsas_reset_reply_desc(sc);
2989 			if (mrsas_ioc_init(sc)) {
2990 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2991 				continue;
2992 			}
2993 			for (j = 0; j < sc->max_fw_cmds; j++) {
2994 				mpt_cmd = sc->mpt_cmd_list[j];
2995 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2996 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2997 					mrsas_release_mfi_cmd(mfi_cmd);
2998 					mrsas_release_mpt_cmd(mpt_cmd);
2999 				}
3000 			}
3001 
3002 			sc->aen_cmd = NULL;
3003 
3004 			/* Reset load balance info */
3005 			memset(sc->load_balance_info, 0,
3006 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3007 
3008 			if (mrsas_get_ctrl_info(sc)) {
3009 				mrsas_kill_hba(sc);
3010 				retval = FAIL;
3011 				goto out;
3012 			}
3013 			if (!mrsas_get_map_info(sc))
3014 				mrsas_sync_map_info(sc);
3015 
3016 			megasas_setup_jbod_map(sc);
3017 
3018 			memset(sc->pd_list, 0,
3019 			    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3020 			if (mrsas_get_pd_list(sc) != SUCCESS) {
3021 				device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
3022 				    "Will get the latest PD LIST after OCR on event.\n");
3023 			}
3024 			memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
3025 			if (mrsas_get_ld_list(sc) != SUCCESS) {
3026 				device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
3027 				    "Will get the latest LD LIST after OCR on event.\n");
3028 			}
3029 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3030 			mrsas_enable_intr(sc);
3031 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3032 
3033 			/* Register AEN with FW for last sequence number */
3034 			class_locale.members.reserved = 0;
3035 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3036 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3037 
3038 			if (mrsas_register_aen(sc, sc->last_seq_num,
3039 			    class_locale.word)) {
3040 				device_printf(sc->mrsas_dev,
3041 				    "ERROR: AEN registration FAILED from OCR !!! "
3042 				    "Further events from the controller cannot be notified."
3043 				    "Either there is some problem in the controller"
3044 				    "or the controller does not support AEN.\n"
3045 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3046 			}
3047 			/* Adapter reset completed successfully */
3048 			device_printf(sc->mrsas_dev, "Reset successful\n");
3049 			retval = SUCCESS;
3050 			goto out;
3051 		}
3052 		/* Reset failed, kill the adapter */
3053 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3054 		mrsas_kill_hba(sc);
3055 		retval = FAIL;
3056 	} else {
3057 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3058 		mrsas_enable_intr(sc);
3059 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3060 	}
3061 out:
3062 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3063 	mrsas_dprint(sc, MRSAS_OCR,
3064 	    "Reset Exit with %d.\n", retval);
3065 	return retval;
3066 }
3067 
3068 /*
3069  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3070  * input:			Adapter Context.
3071  *
3072  * This function will kill HBA when OCR is not supported.
3073  */
3074 void
3075 mrsas_kill_hba(struct mrsas_softc *sc)
3076 {
3077 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3078 	DELAY(1000 * 1000);
3079 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3080 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3081 	    MFI_STOP_ADP);
3082 	/* Flush */
3083 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3084 	mrsas_complete_outstanding_ioctls(sc);
3085 }
3086 
3087 /**
3088  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3089  * input:			Controller softc
3090  *
3091  * Returns void
3092  */
3093 void
3094 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3095 {
3096 	int i;
3097 	struct mrsas_mpt_cmd *cmd_mpt;
3098 	struct mrsas_mfi_cmd *cmd_mfi;
3099 	u_int32_t count, MSIxIndex;
3100 
3101 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3102 	for (i = 0; i < sc->max_fw_cmds; i++) {
3103 		cmd_mpt = sc->mpt_cmd_list[i];
3104 
3105 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3106 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3107 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3108 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3109 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3110 					    cmd_mpt->io_request->RaidContext.status);
3111 			}
3112 		}
3113 	}
3114 }
3115 
3116 /*
3117  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3118  * input:						Adapter Context.
3119  *
3120  * This function will wait for 180 seconds for outstanding commands to be
3121  * completed.
3122  */
3123 int
3124 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3125 {
3126 	int i, outstanding, retval = 0;
3127 	u_int32_t fw_state, count, MSIxIndex;
3128 
3129 
3130 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3131 		if (sc->remove_in_progress) {
3132 			mrsas_dprint(sc, MRSAS_OCR,
3133 			    "Driver remove or shutdown called.\n");
3134 			retval = 1;
3135 			goto out;
3136 		}
3137 		/* Check if firmware is in fault state */
3138 		fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3139 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3140 		if (fw_state == MFI_STATE_FAULT) {
3141 			mrsas_dprint(sc, MRSAS_OCR,
3142 			    "Found FW in FAULT state, will reset adapter.\n");
3143 			retval = 1;
3144 			goto out;
3145 		}
3146 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3147 			mrsas_dprint(sc, MRSAS_OCR,
3148 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3149 			retval = 1;
3150 			goto out;
3151 		}
3152 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3153 		if (!outstanding)
3154 			goto out;
3155 
3156 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3157 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3158 			    "commands to complete\n", i, outstanding);
3159 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3160 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3161 				mrsas_complete_cmd(sc, MSIxIndex);
3162 		}
3163 		DELAY(1000 * 1000);
3164 	}
3165 
3166 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3167 		mrsas_dprint(sc, MRSAS_OCR,
3168 		    " pending commands remain after waiting,"
3169 		    " will reset adapter.\n");
3170 		retval = 1;
3171 	}
3172 out:
3173 	return retval;
3174 }
3175 
3176 /*
3177  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3178  * input:					Command packet for return to free cmd pool
3179  *
3180  * This function returns the MFI command to the command list.
3181  */
3182 void
3183 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
3184 {
3185 	struct mrsas_softc *sc = cmd->sc;
3186 
3187 	mtx_lock(&sc->mfi_cmd_pool_lock);
3188 	cmd->ccb_ptr = NULL;
3189 	cmd->cmd_id.frame_count = 0;
3190 	TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
3191 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3192 
3193 	return;
3194 }
3195 
3196 /*
3197  * mrsas_get_controller_info:	Returns FW's controller structure
3198  * input:						Adapter soft state
3199  * 								Controller information structure
3200  *
3201  * Issues an internal command (DCMD) to get the FW's controller structure. This
3202  * information is mainly used to find out the maximum IO transfer per command
3203  * supported by the FW.
3204  */
3205 static int
3206 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3207 {
3208 	int retcode = 0;
3209 	u_int8_t do_ocr = 1;
3210 	struct mrsas_mfi_cmd *cmd;
3211 	struct mrsas_dcmd_frame *dcmd;
3212 
3213 	cmd = mrsas_get_mfi_cmd(sc);
3214 
3215 	if (!cmd) {
3216 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3217 		return -ENOMEM;
3218 	}
3219 	dcmd = &cmd->frame->dcmd;
3220 
3221 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3222 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3223 		mrsas_release_mfi_cmd(cmd);
3224 		return -ENOMEM;
3225 	}
3226 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3227 
3228 	dcmd->cmd = MFI_CMD_DCMD;
3229 	dcmd->cmd_status = 0xFF;
3230 	dcmd->sge_count = 1;
3231 	dcmd->flags = MFI_FRAME_DIR_READ;
3232 	dcmd->timeout = 0;
3233 	dcmd->pad_0 = 0;
3234 	dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3235 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3236 	dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3237 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3238 
3239 	retcode = mrsas_issue_polled(sc, cmd);
3240 	if (retcode == ETIMEDOUT)
3241 		goto dcmd_timeout;
3242 	else
3243 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3244 
3245 	do_ocr = 0;
3246 	mrsas_update_ext_vd_details(sc);
3247 
3248 	sc->use_seqnum_jbod_fp =
3249 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3250 
3251 dcmd_timeout:
3252 	mrsas_free_ctlr_info_cmd(sc);
3253 
3254 	if (do_ocr)
3255 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3256 	else
3257 		mrsas_release_mfi_cmd(cmd);
3258 
3259 	return (retcode);
3260 }
3261 
3262 /*
3263  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3264  * input:
3265  *	sc - Controller's softc
3266 */
3267 static void
3268 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3269 {
3270 	sc->max256vdSupport =
3271 	sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3272 	/* Below is additional check to address future FW enhancement */
3273 	if (sc->ctrl_info->max_lds > 64)
3274 		sc->max256vdSupport = 1;
3275 
3276 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3277 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3278 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3279 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3280 	if (sc->max256vdSupport) {
3281 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3282 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3283 	} else {
3284 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3285 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3286 	}
3287 
3288 	sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3289 	    (sizeof(MR_LD_SPAN_MAP) *
3290 	    (sc->fw_supported_vd_count - 1));
3291 	sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3292 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3293 	    (sizeof(MR_LD_SPAN_MAP) *
3294 	    (sc->drv_supported_vd_count - 1));
3295 
3296 	sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3297 
3298 	if (sc->max256vdSupport)
3299 		sc->current_map_sz = sc->new_map_sz;
3300 	else
3301 		sc->current_map_sz = sc->old_map_sz;
3302 }
3303 
3304 /*
3305  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3306  * input:						Adapter soft state
3307  *
3308  * Allocates DMAable memory for the controller info internal command.
3309  */
3310 int
3311 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3312 {
3313 	int ctlr_info_size;
3314 
3315 	/* Allocate get controller info command */
3316 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3317 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3318 	    1, 0,
3319 	    BUS_SPACE_MAXADDR_32BIT,
3320 	    BUS_SPACE_MAXADDR,
3321 	    NULL, NULL,
3322 	    ctlr_info_size,
3323 	    1,
3324 	    ctlr_info_size,
3325 	    BUS_DMA_ALLOCNOW,
3326 	    NULL, NULL,
3327 	    &sc->ctlr_info_tag)) {
3328 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3329 		return (ENOMEM);
3330 	}
3331 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3332 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3333 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3334 		return (ENOMEM);
3335 	}
3336 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3337 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3338 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3339 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3340 		return (ENOMEM);
3341 	}
3342 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3343 	return (0);
3344 }
3345 
3346 /*
3347  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3348  * input:						Adapter soft state
3349  *
3350  * Deallocates memory of the get controller info cmd.
3351  */
3352 void
3353 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3354 {
3355 	if (sc->ctlr_info_phys_addr)
3356 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3357 	if (sc->ctlr_info_mem != NULL)
3358 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3359 	if (sc->ctlr_info_tag != NULL)
3360 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3361 }
3362 
3363 /*
3364  * mrsas_issue_polled:	Issues a polling command
3365  * inputs:				Adapter soft state
3366  * 						Command packet to be issued
3367  *
3368  * This function is for posting of internal commands to Firmware.  MFI requires
3369  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3370  * the poll response timer is 180 seconds.
3371  */
3372 int
3373 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3374 {
3375 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3376 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3377 	int i, retcode = SUCCESS;
3378 
3379 	frame_hdr->cmd_status = 0xFF;
3380 	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3381 
3382 	/* Issue the frame using inbound queue port */
3383 	if (mrsas_issue_dcmd(sc, cmd)) {
3384 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3385 		return (1);
3386 	}
3387 	/*
3388 	 * Poll response timer to wait for Firmware response.  While this
3389 	 * timer with the DELAY call could block CPU, the time interval for
3390 	 * this is only 1 millisecond.
3391 	 */
3392 	if (frame_hdr->cmd_status == 0xFF) {
3393 		for (i = 0; i < (max_wait * 1000); i++) {
3394 			if (frame_hdr->cmd_status == 0xFF)
3395 				DELAY(1000);
3396 			else
3397 				break;
3398 		}
3399 	}
3400 	if (frame_hdr->cmd_status == 0xFF) {
3401 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3402 		    "seconds from %s\n", max_wait, __func__);
3403 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3404 		    cmd->frame->dcmd.opcode);
3405 		retcode = ETIMEDOUT;
3406 	}
3407 	return (retcode);
3408 }
3409 
3410 /*
3411  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3412  * input:				Adapter soft state mfi cmd pointer
3413  *
3414  * This function is called by mrsas_issued_blocked_cmd() and
3415  * mrsas_issued_polled(), to build the MPT command and then fire the command
3416  * to Firmware.
3417  */
3418 int
3419 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3420 {
3421 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3422 
3423 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3424 	if (!req_desc) {
3425 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3426 		return (1);
3427 	}
3428 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3429 
3430 	return (0);
3431 }
3432 
3433 /*
3434  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3435  * input:				Adapter soft state mfi cmd to build
3436  *
3437  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3438  * command and prepares the MPT command to send to Firmware.
3439  */
3440 MRSAS_REQUEST_DESCRIPTOR_UNION *
3441 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3442 {
3443 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3444 	u_int16_t index;
3445 
3446 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3447 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3448 		return NULL;
3449 	}
3450 	index = cmd->cmd_id.context.smid;
3451 
3452 	req_desc = mrsas_get_request_desc(sc, index - 1);
3453 	if (!req_desc)
3454 		return NULL;
3455 
3456 	req_desc->addr.Words = 0;
3457 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3458 
3459 	req_desc->SCSIIO.SMID = index;
3460 
3461 	return (req_desc);
3462 }
3463 
3464 /*
3465  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3466  * input:						Adapter soft state mfi cmd pointer
3467  *
3468  * The MPT command and the io_request are setup as a passthru command. The SGE
3469  * chain address is set to frame_phys_addr of the MFI command.
3470  */
3471 u_int8_t
3472 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3473 {
3474 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3475 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3476 	struct mrsas_mpt_cmd *mpt_cmd;
3477 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3478 
3479 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3480 	if (!mpt_cmd)
3481 		return (1);
3482 
3483 	/* Save the smid. To be used for returning the cmd */
3484 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3485 
3486 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3487 
3488 	/*
3489 	 * For cmds where the flag is set, store the flag and check on
3490 	 * completion. For cmds with this flag, don't call
3491 	 * mrsas_complete_cmd.
3492 	 */
3493 
3494 	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3495 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3496 
3497 	io_req = mpt_cmd->io_request;
3498 
3499 	if ((sc->device_id == MRSAS_INVADER) ||
3500 	    (sc->device_id == MRSAS_FURY) ||
3501 	    (sc->device_id == MRSAS_INTRUDER) ||
3502 	    (sc->device_id == MRSAS_INTRUDER_24) ||
3503 	    (sc->device_id == MRSAS_CUTLASS_52) ||
3504 	    (sc->device_id == MRSAS_CUTLASS_53)) {
3505 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3506 
3507 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3508 		sgl_ptr_end->Flags = 0;
3509 	}
3510 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3511 
3512 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3513 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3514 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3515 
3516 	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3517 
3518 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3519 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3520 
3521 	mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3522 
3523 	return (0);
3524 }
3525 
3526 /*
3527  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3528  * input:					Adapter soft state Command to be issued
3529  *
3530  * This function waits on an event for the command to be returned from the ISR.
3531  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3532  * internal and ioctl commands.
3533  */
3534 int
3535 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3536 {
3537 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3538 	unsigned long total_time = 0;
3539 	int retcode = SUCCESS;
3540 
3541 	/* Initialize cmd_status */
3542 	cmd->cmd_status = 0xFF;
3543 
3544 	/* Build MPT-MFI command for issue to FW */
3545 	if (mrsas_issue_dcmd(sc, cmd)) {
3546 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3547 		return (1);
3548 	}
3549 	sc->chan = (void *)&cmd;
3550 
3551 	while (1) {
3552 		if (cmd->cmd_status == 0xFF) {
3553 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3554 		} else
3555 			break;
3556 
3557 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3558 					 * command */
3559 			total_time++;
3560 			if (total_time >= max_wait) {
3561 				device_printf(sc->mrsas_dev,
3562 				    "Internal command timed out after %d seconds.\n", max_wait);
3563 				retcode = 1;
3564 				break;
3565 			}
3566 		}
3567 	}
3568 
3569 	if (cmd->cmd_status == 0xFF) {
3570 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3571 		    "seconds from %s\n", max_wait, __func__);
3572 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3573 		    cmd->frame->dcmd.opcode);
3574 		retcode = ETIMEDOUT;
3575 	}
3576 	return (retcode);
3577 }
3578 
3579 /*
3580  * mrsas_complete_mptmfi_passthru:	Completes a command
3581  * input:	@sc:					Adapter soft state
3582  * 			@cmd:					Command to be completed
3583  * 			@status:				cmd completion status
3584  *
3585  * This function is called from mrsas_complete_cmd() after an interrupt is
3586  * received from Firmware, and io_request->Function is
3587  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3588  */
3589 void
3590 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3591     u_int8_t status)
3592 {
3593 	struct mrsas_header *hdr = &cmd->frame->hdr;
3594 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3595 
3596 	/* Reset the retry counter for future re-tries */
3597 	cmd->retry_for_fw_reset = 0;
3598 
3599 	if (cmd->ccb_ptr)
3600 		cmd->ccb_ptr = NULL;
3601 
3602 	switch (hdr->cmd) {
3603 	case MFI_CMD_INVALID:
3604 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3605 		break;
3606 	case MFI_CMD_PD_SCSI_IO:
3607 	case MFI_CMD_LD_SCSI_IO:
3608 		/*
3609 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3610 		 * issued either through an IO path or an IOCTL path. If it
3611 		 * was via IOCTL, we will send it to internal completion.
3612 		 */
3613 		if (cmd->sync_cmd) {
3614 			cmd->sync_cmd = 0;
3615 			mrsas_wakeup(sc, cmd);
3616 			break;
3617 		}
3618 	case MFI_CMD_SMP:
3619 	case MFI_CMD_STP:
3620 	case MFI_CMD_DCMD:
3621 		/* Check for LD map update */
3622 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3623 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
3624 			sc->fast_path_io = 0;
3625 			mtx_lock(&sc->raidmap_lock);
3626 			sc->map_update_cmd = NULL;
3627 			if (cmd_status != 0) {
3628 				if (cmd_status != MFI_STAT_NOT_FOUND)
3629 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3630 				else {
3631 					mrsas_release_mfi_cmd(cmd);
3632 					mtx_unlock(&sc->raidmap_lock);
3633 					break;
3634 				}
3635 			} else
3636 				sc->map_id++;
3637 			mrsas_release_mfi_cmd(cmd);
3638 			if (MR_ValidateMapInfo(sc))
3639 				sc->fast_path_io = 0;
3640 			else
3641 				sc->fast_path_io = 1;
3642 			mrsas_sync_map_info(sc);
3643 			mtx_unlock(&sc->raidmap_lock);
3644 			break;
3645 		}
3646 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3647 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3648 			sc->mrsas_aen_triggered = 0;
3649 		}
3650 		/* FW has an updated PD sequence */
3651 		if ((cmd->frame->dcmd.opcode ==
3652 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3653 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
3654 
3655 			mtx_lock(&sc->raidmap_lock);
3656 			sc->jbod_seq_cmd = NULL;
3657 			mrsas_release_mfi_cmd(cmd);
3658 
3659 			if (cmd_status == MFI_STAT_OK) {
3660 				sc->pd_seq_map_id++;
3661 				/* Re-register a pd sync seq num cmd */
3662 				if (megasas_sync_pd_seq_num(sc, true))
3663 					sc->use_seqnum_jbod_fp = 0;
3664 			} else {
3665 				sc->use_seqnum_jbod_fp = 0;
3666 				device_printf(sc->mrsas_dev,
3667 				    "Jbod map sync failed, status=%x\n", cmd_status);
3668 			}
3669 			mtx_unlock(&sc->raidmap_lock);
3670 			break;
3671 		}
3672 		/* See if got an event notification */
3673 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3674 			mrsas_complete_aen(sc, cmd);
3675 		else
3676 			mrsas_wakeup(sc, cmd);
3677 		break;
3678 	case MFI_CMD_ABORT:
3679 		/* Command issued to abort another cmd return */
3680 		mrsas_complete_abort(sc, cmd);
3681 		break;
3682 	default:
3683 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3684 		break;
3685 	}
3686 }
3687 
3688 /*
3689  * mrsas_wakeup:	Completes an internal command
3690  * input:			Adapter soft state
3691  * 					Command to be completed
3692  *
3693  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3694  * timer is started.  This function is called from
3695  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3696  * from the command wait.
3697  */
3698 void
3699 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3700 {
3701 	cmd->cmd_status = cmd->frame->io.cmd_status;
3702 
3703 	if (cmd->cmd_status == 0xFF)
3704 		cmd->cmd_status = 0;
3705 
3706 	sc->chan = (void *)&cmd;
3707 	wakeup_one((void *)&sc->chan);
3708 	return;
3709 }
3710 
3711 /*
3712  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
3713  * Adapter soft state Shutdown/Hibernate
3714  *
3715  * This function issues a DCMD internal command to Firmware to initiate shutdown
3716  * of the controller.
3717  */
3718 static void
3719 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3720 {
3721 	struct mrsas_mfi_cmd *cmd;
3722 	struct mrsas_dcmd_frame *dcmd;
3723 
3724 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3725 		return;
3726 
3727 	cmd = mrsas_get_mfi_cmd(sc);
3728 	if (!cmd) {
3729 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3730 		return;
3731 	}
3732 	if (sc->aen_cmd)
3733 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3734 	if (sc->map_update_cmd)
3735 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3736 	if (sc->jbod_seq_cmd)
3737 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3738 
3739 	dcmd = &cmd->frame->dcmd;
3740 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3741 
3742 	dcmd->cmd = MFI_CMD_DCMD;
3743 	dcmd->cmd_status = 0x0;
3744 	dcmd->sge_count = 0;
3745 	dcmd->flags = MFI_FRAME_DIR_NONE;
3746 	dcmd->timeout = 0;
3747 	dcmd->pad_0 = 0;
3748 	dcmd->data_xfer_len = 0;
3749 	dcmd->opcode = opcode;
3750 
3751 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3752 
3753 	mrsas_issue_blocked_cmd(sc, cmd);
3754 	mrsas_release_mfi_cmd(cmd);
3755 
3756 	return;
3757 }
3758 
3759 /*
3760  * mrsas_flush_cache:         Requests FW to flush all its caches input:
3761  * Adapter soft state
3762  *
3763  * This function is issues a DCMD internal command to Firmware to initiate
3764  * flushing of all caches.
3765  */
3766 static void
3767 mrsas_flush_cache(struct mrsas_softc *sc)
3768 {
3769 	struct mrsas_mfi_cmd *cmd;
3770 	struct mrsas_dcmd_frame *dcmd;
3771 
3772 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3773 		return;
3774 
3775 	cmd = mrsas_get_mfi_cmd(sc);
3776 	if (!cmd) {
3777 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3778 		return;
3779 	}
3780 	dcmd = &cmd->frame->dcmd;
3781 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3782 
3783 	dcmd->cmd = MFI_CMD_DCMD;
3784 	dcmd->cmd_status = 0x0;
3785 	dcmd->sge_count = 0;
3786 	dcmd->flags = MFI_FRAME_DIR_NONE;
3787 	dcmd->timeout = 0;
3788 	dcmd->pad_0 = 0;
3789 	dcmd->data_xfer_len = 0;
3790 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3791 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3792 
3793 	mrsas_issue_blocked_cmd(sc, cmd);
3794 	mrsas_release_mfi_cmd(cmd);
3795 
3796 	return;
3797 }
3798 
3799 int
3800 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3801 {
3802 	int retcode = 0;
3803 	u_int8_t do_ocr = 1;
3804 	struct mrsas_mfi_cmd *cmd;
3805 	struct mrsas_dcmd_frame *dcmd;
3806 	uint32_t pd_seq_map_sz;
3807 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3808 	bus_addr_t pd_seq_h;
3809 
3810 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3811 	    (sizeof(struct MR_PD_CFG_SEQ) *
3812 	    (MAX_PHYSICAL_DEVICES - 1));
3813 
3814 	cmd = mrsas_get_mfi_cmd(sc);
3815 	if (!cmd) {
3816 		device_printf(sc->mrsas_dev,
3817 		    "Cannot alloc for ld map info cmd.\n");
3818 		return 1;
3819 	}
3820 	dcmd = &cmd->frame->dcmd;
3821 
3822 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3823 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3824 	if (!pd_sync) {
3825 		device_printf(sc->mrsas_dev,
3826 		    "Failed to alloc mem for jbod map info.\n");
3827 		mrsas_release_mfi_cmd(cmd);
3828 		return (ENOMEM);
3829 	}
3830 	memset(pd_sync, 0, pd_seq_map_sz);
3831 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3832 	dcmd->cmd = MFI_CMD_DCMD;
3833 	dcmd->cmd_status = 0xFF;
3834 	dcmd->sge_count = 1;
3835 	dcmd->timeout = 0;
3836 	dcmd->pad_0 = 0;
3837 	dcmd->data_xfer_len = (pd_seq_map_sz);
3838 	dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3839 	dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3840 	dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3841 
3842 	if (pend) {
3843 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3844 		dcmd->flags = (MFI_FRAME_DIR_WRITE);
3845 		sc->jbod_seq_cmd = cmd;
3846 		if (mrsas_issue_dcmd(sc, cmd)) {
3847 			device_printf(sc->mrsas_dev,
3848 			    "Fail to send sync map info command.\n");
3849 			return 1;
3850 		} else
3851 			return 0;
3852 	} else
3853 		dcmd->flags = MFI_FRAME_DIR_READ;
3854 
3855 	retcode = mrsas_issue_polled(sc, cmd);
3856 	if (retcode == ETIMEDOUT)
3857 		goto dcmd_timeout;
3858 
3859 	if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3860 		device_printf(sc->mrsas_dev,
3861 		    "driver supports max %d JBOD, but FW reports %d\n",
3862 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
3863 		retcode = -EINVAL;
3864 	}
3865 	if (!retcode)
3866 		sc->pd_seq_map_id++;
3867 	do_ocr = 0;
3868 
3869 dcmd_timeout:
3870 	if (do_ocr)
3871 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3872 	else
3873 		mrsas_release_mfi_cmd(cmd);
3874 
3875 	return (retcode);
3876 }
3877 
3878 /*
3879  * mrsas_get_map_info:        Load and validate RAID map input:
3880  * Adapter instance soft state
3881  *
3882  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3883  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
3884  */
3885 static int
3886 mrsas_get_map_info(struct mrsas_softc *sc)
3887 {
3888 	uint8_t retcode = 0;
3889 
3890 	sc->fast_path_io = 0;
3891 	if (!mrsas_get_ld_map_info(sc)) {
3892 		retcode = MR_ValidateMapInfo(sc);
3893 		if (retcode == 0) {
3894 			sc->fast_path_io = 1;
3895 			return 0;
3896 		}
3897 	}
3898 	return 1;
3899 }
3900 
3901 /*
3902  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
3903  * Adapter instance soft state
3904  *
3905  * Issues an internal command (DCMD) to get the FW's controller PD list
3906  * structure.
3907  */
3908 static int
3909 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3910 {
3911 	int retcode = 0;
3912 	struct mrsas_mfi_cmd *cmd;
3913 	struct mrsas_dcmd_frame *dcmd;
3914 	void *map;
3915 	bus_addr_t map_phys_addr = 0;
3916 
3917 	cmd = mrsas_get_mfi_cmd(sc);
3918 	if (!cmd) {
3919 		device_printf(sc->mrsas_dev,
3920 		    "Cannot alloc for ld map info cmd.\n");
3921 		return 1;
3922 	}
3923 	dcmd = &cmd->frame->dcmd;
3924 
3925 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3926 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3927 	if (!map) {
3928 		device_printf(sc->mrsas_dev,
3929 		    "Failed to alloc mem for ld map info.\n");
3930 		mrsas_release_mfi_cmd(cmd);
3931 		return (ENOMEM);
3932 	}
3933 	memset(map, 0, sizeof(sc->max_map_sz));
3934 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3935 
3936 	dcmd->cmd = MFI_CMD_DCMD;
3937 	dcmd->cmd_status = 0xFF;
3938 	dcmd->sge_count = 1;
3939 	dcmd->flags = MFI_FRAME_DIR_READ;
3940 	dcmd->timeout = 0;
3941 	dcmd->pad_0 = 0;
3942 	dcmd->data_xfer_len = sc->current_map_sz;
3943 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3944 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3945 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
3946 
3947 	retcode = mrsas_issue_polled(sc, cmd);
3948 	if (retcode == ETIMEDOUT)
3949 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3950 	else
3951 		mrsas_release_mfi_cmd(cmd);
3952 
3953 	return (retcode);
3954 }
3955 
3956 /*
3957  * mrsas_sync_map_info:        Get FW's ld_map structure input:
3958  * Adapter instance soft state
3959  *
3960  * Issues an internal command (DCMD) to get the FW's controller PD list
3961  * structure.
3962  */
3963 static int
3964 mrsas_sync_map_info(struct mrsas_softc *sc)
3965 {
3966 	int retcode = 0, i;
3967 	struct mrsas_mfi_cmd *cmd;
3968 	struct mrsas_dcmd_frame *dcmd;
3969 	uint32_t size_sync_info, num_lds;
3970 	MR_LD_TARGET_SYNC *target_map = NULL;
3971 	MR_DRV_RAID_MAP_ALL *map;
3972 	MR_LD_RAID *raid;
3973 	MR_LD_TARGET_SYNC *ld_sync;
3974 	bus_addr_t map_phys_addr = 0;
3975 
3976 	cmd = mrsas_get_mfi_cmd(sc);
3977 	if (!cmd) {
3978 		device_printf(sc->mrsas_dev,
3979 		    "Cannot alloc for sync map info cmd\n");
3980 		return 1;
3981 	}
3982 	map = sc->ld_drv_map[sc->map_id & 1];
3983 	num_lds = map->raidMap.ldCount;
3984 
3985 	dcmd = &cmd->frame->dcmd;
3986 	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3987 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3988 
3989 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3990 	memset(target_map, 0, sc->max_map_sz);
3991 
3992 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3993 
3994 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3995 
3996 	for (i = 0; i < num_lds; i++, ld_sync++) {
3997 		raid = MR_LdRaidGet(i, map);
3998 		ld_sync->targetId = MR_GetLDTgtId(i, map);
3999 		ld_sync->seqNum = raid->seqNum;
4000 	}
4001 
4002 	dcmd->cmd = MFI_CMD_DCMD;
4003 	dcmd->cmd_status = 0xFF;
4004 	dcmd->sge_count = 1;
4005 	dcmd->flags = MFI_FRAME_DIR_WRITE;
4006 	dcmd->timeout = 0;
4007 	dcmd->pad_0 = 0;
4008 	dcmd->data_xfer_len = sc->current_map_sz;
4009 	dcmd->mbox.b[0] = num_lds;
4010 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4011 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4012 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4013 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
4014 
4015 	sc->map_update_cmd = cmd;
4016 	if (mrsas_issue_dcmd(sc, cmd)) {
4017 		device_printf(sc->mrsas_dev,
4018 		    "Fail to send sync map info command.\n");
4019 		return (1);
4020 	}
4021 	return (retcode);
4022 }
4023 
4024 /*
4025  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4026  * Adapter soft state
4027  *
4028  * Issues an internal command (DCMD) to get the FW's controller PD list
4029  * structure.  This information is mainly used to find out about system
4030  * supported by Firmware.
4031  */
4032 static int
4033 mrsas_get_pd_list(struct mrsas_softc *sc)
4034 {
4035 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4036 	u_int8_t do_ocr = 1;
4037 	struct mrsas_mfi_cmd *cmd;
4038 	struct mrsas_dcmd_frame *dcmd;
4039 	struct MR_PD_LIST *pd_list_mem;
4040 	struct MR_PD_ADDRESS *pd_addr;
4041 	bus_addr_t pd_list_phys_addr = 0;
4042 	struct mrsas_tmp_dcmd *tcmd;
4043 
4044 	cmd = mrsas_get_mfi_cmd(sc);
4045 	if (!cmd) {
4046 		device_printf(sc->mrsas_dev,
4047 		    "Cannot alloc for get PD list cmd\n");
4048 		return 1;
4049 	}
4050 	dcmd = &cmd->frame->dcmd;
4051 
4052 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4053 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4054 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4055 		device_printf(sc->mrsas_dev,
4056 		    "Cannot alloc dmamap for get PD list cmd\n");
4057 		mrsas_release_mfi_cmd(cmd);
4058 		mrsas_free_tmp_dcmd(tcmd);
4059 		free(tcmd, M_MRSAS);
4060 		return (ENOMEM);
4061 	} else {
4062 		pd_list_mem = tcmd->tmp_dcmd_mem;
4063 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4064 	}
4065 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4066 
4067 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4068 	dcmd->mbox.b[1] = 0;
4069 	dcmd->cmd = MFI_CMD_DCMD;
4070 	dcmd->cmd_status = 0xFF;
4071 	dcmd->sge_count = 1;
4072 	dcmd->flags = MFI_FRAME_DIR_READ;
4073 	dcmd->timeout = 0;
4074 	dcmd->pad_0 = 0;
4075 	dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4076 	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4077 	dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4078 	dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4079 
4080 	retcode = mrsas_issue_polled(sc, cmd);
4081 	if (retcode == ETIMEDOUT)
4082 		goto dcmd_timeout;
4083 
4084 	/* Get the instance PD list */
4085 	pd_count = MRSAS_MAX_PD;
4086 	pd_addr = pd_list_mem->addr;
4087 	if (pd_list_mem->count < pd_count) {
4088 		memset(sc->local_pd_list, 0,
4089 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4090 		for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4091 			sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4092 			sc->local_pd_list[pd_addr->deviceId].driveType =
4093 			    pd_addr->scsiDevType;
4094 			sc->local_pd_list[pd_addr->deviceId].driveState =
4095 			    MR_PD_STATE_SYSTEM;
4096 			pd_addr++;
4097 		}
4098 		/*
4099 		 * Use mutext/spinlock if pd_list component size increase more than
4100 		 * 32 bit.
4101 		 */
4102 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4103 		do_ocr = 0;
4104 	}
4105 dcmd_timeout:
4106 	mrsas_free_tmp_dcmd(tcmd);
4107 	free(tcmd, M_MRSAS);
4108 
4109 	if (do_ocr)
4110 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4111 	else
4112 		mrsas_release_mfi_cmd(cmd);
4113 
4114 	return (retcode);
4115 }
4116 
4117 /*
4118  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4119  * Adapter soft state
4120  *
4121  * Issues an internal command (DCMD) to get the FW's controller PD list
4122  * structure.  This information is mainly used to find out about supported by
4123  * the FW.
4124  */
4125 static int
4126 mrsas_get_ld_list(struct mrsas_softc *sc)
4127 {
4128 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4129 	u_int8_t do_ocr = 1;
4130 	struct mrsas_mfi_cmd *cmd;
4131 	struct mrsas_dcmd_frame *dcmd;
4132 	struct MR_LD_LIST *ld_list_mem;
4133 	bus_addr_t ld_list_phys_addr = 0;
4134 	struct mrsas_tmp_dcmd *tcmd;
4135 
4136 	cmd = mrsas_get_mfi_cmd(sc);
4137 	if (!cmd) {
4138 		device_printf(sc->mrsas_dev,
4139 		    "Cannot alloc for get LD list cmd\n");
4140 		return 1;
4141 	}
4142 	dcmd = &cmd->frame->dcmd;
4143 
4144 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4145 	ld_list_size = sizeof(struct MR_LD_LIST);
4146 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4147 		device_printf(sc->mrsas_dev,
4148 		    "Cannot alloc dmamap for get LD list cmd\n");
4149 		mrsas_release_mfi_cmd(cmd);
4150 		mrsas_free_tmp_dcmd(tcmd);
4151 		free(tcmd, M_MRSAS);
4152 		return (ENOMEM);
4153 	} else {
4154 		ld_list_mem = tcmd->tmp_dcmd_mem;
4155 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4156 	}
4157 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4158 
4159 	if (sc->max256vdSupport)
4160 		dcmd->mbox.b[0] = 1;
4161 
4162 	dcmd->cmd = MFI_CMD_DCMD;
4163 	dcmd->cmd_status = 0xFF;
4164 	dcmd->sge_count = 1;
4165 	dcmd->flags = MFI_FRAME_DIR_READ;
4166 	dcmd->timeout = 0;
4167 	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4168 	dcmd->opcode = MR_DCMD_LD_GET_LIST;
4169 	dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4170 	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4171 	dcmd->pad_0 = 0;
4172 
4173 	retcode = mrsas_issue_polled(sc, cmd);
4174 	if (retcode == ETIMEDOUT)
4175 		goto dcmd_timeout;
4176 
4177 #if VD_EXT_DEBUG
4178 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4179 #endif
4180 
4181 	/* Get the instance LD list */
4182 	if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4183 		sc->CurLdCount = ld_list_mem->ldCount;
4184 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4185 		for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4186 			if (ld_list_mem->ldList[ld_index].state != 0) {
4187 				ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4188 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4189 			}
4190 		}
4191 		do_ocr = 0;
4192 	}
4193 dcmd_timeout:
4194 	mrsas_free_tmp_dcmd(tcmd);
4195 	free(tcmd, M_MRSAS);
4196 
4197 	if (do_ocr)
4198 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4199 	else
4200 		mrsas_release_mfi_cmd(cmd);
4201 
4202 	return (retcode);
4203 }
4204 
4205 /*
4206  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4207  * Adapter soft state Temp command Size of alloction
4208  *
4209  * Allocates DMAable memory for a temporary internal command. The allocated
4210  * memory is initialized to all zeros upon successful loading of the dma
4211  * mapped memory.
4212  */
4213 int
4214 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4215     struct mrsas_tmp_dcmd *tcmd, int size)
4216 {
4217 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4218 	    1, 0,
4219 	    BUS_SPACE_MAXADDR_32BIT,
4220 	    BUS_SPACE_MAXADDR,
4221 	    NULL, NULL,
4222 	    size,
4223 	    1,
4224 	    size,
4225 	    BUS_DMA_ALLOCNOW,
4226 	    NULL, NULL,
4227 	    &tcmd->tmp_dcmd_tag)) {
4228 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4229 		return (ENOMEM);
4230 	}
4231 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4232 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4233 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4234 		return (ENOMEM);
4235 	}
4236 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4237 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4238 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4239 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4240 		return (ENOMEM);
4241 	}
4242 	memset(tcmd->tmp_dcmd_mem, 0, size);
4243 	return (0);
4244 }
4245 
4246 /*
4247  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4248  * temporary dcmd pointer
4249  *
4250  * Deallocates memory of the temporary command for use in the construction of
4251  * the internal DCMD.
4252  */
4253 void
4254 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4255 {
4256 	if (tmp->tmp_dcmd_phys_addr)
4257 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4258 	if (tmp->tmp_dcmd_mem != NULL)
4259 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4260 	if (tmp->tmp_dcmd_tag != NULL)
4261 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4262 }
4263 
4264 /*
4265  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4266  * Adapter soft state Previously issued cmd to be aborted
4267  *
4268  * This function is used to abort previously issued commands, such as AEN and
4269  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4270  * command and subsequently the driver will wait for a return status.  The
4271  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4272  */
4273 static int
4274 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4275     struct mrsas_mfi_cmd *cmd_to_abort)
4276 {
4277 	struct mrsas_mfi_cmd *cmd;
4278 	struct mrsas_abort_frame *abort_fr;
4279 	u_int8_t retcode = 0;
4280 	unsigned long total_time = 0;
4281 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4282 
4283 	cmd = mrsas_get_mfi_cmd(sc);
4284 	if (!cmd) {
4285 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4286 		return (1);
4287 	}
4288 	abort_fr = &cmd->frame->abort;
4289 
4290 	/* Prepare and issue the abort frame */
4291 	abort_fr->cmd = MFI_CMD_ABORT;
4292 	abort_fr->cmd_status = 0xFF;
4293 	abort_fr->flags = 0;
4294 	abort_fr->abort_context = cmd_to_abort->index;
4295 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4296 	abort_fr->abort_mfi_phys_addr_hi = 0;
4297 
4298 	cmd->sync_cmd = 1;
4299 	cmd->cmd_status = 0xFF;
4300 
4301 	if (mrsas_issue_dcmd(sc, cmd)) {
4302 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4303 		return (1);
4304 	}
4305 	/* Wait for this cmd to complete */
4306 	sc->chan = (void *)&cmd;
4307 	while (1) {
4308 		if (cmd->cmd_status == 0xFF) {
4309 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4310 		} else
4311 			break;
4312 		total_time++;
4313 		if (total_time >= max_wait) {
4314 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4315 			retcode = 1;
4316 			break;
4317 		}
4318 	}
4319 
4320 	cmd->sync_cmd = 0;
4321 	mrsas_release_mfi_cmd(cmd);
4322 	return (retcode);
4323 }
4324 
4325 /*
4326  * mrsas_complete_abort:      Completes aborting a command input:
4327  * Adapter soft state Cmd that was issued to abort another cmd
4328  *
4329  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4330  * change after sending the command.  This function is called from
4331  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4332  */
4333 void
4334 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4335 {
4336 	if (cmd->sync_cmd) {
4337 		cmd->sync_cmd = 0;
4338 		cmd->cmd_status = 0;
4339 		sc->chan = (void *)&cmd;
4340 		wakeup_one((void *)&sc->chan);
4341 	}
4342 	return;
4343 }
4344 
4345 /*
4346  * mrsas_aen_handler:	AEN processing callback function from thread context
4347  * input:				Adapter soft state
4348  *
4349  * Asynchronous event handler
4350  */
4351 void
4352 mrsas_aen_handler(struct mrsas_softc *sc)
4353 {
4354 	union mrsas_evt_class_locale class_locale;
4355 	int doscan = 0;
4356 	u_int32_t seq_num;
4357  	int error, fail_aen = 0;
4358 
4359 	if (sc == NULL) {
4360 		printf("invalid instance!\n");
4361 		return;
4362 	}
4363 	if (sc->evt_detail_mem) {
4364 		switch (sc->evt_detail_mem->code) {
4365 		case MR_EVT_PD_INSERTED:
4366 			fail_aen = mrsas_get_pd_list(sc);
4367 			if (!fail_aen)
4368 				mrsas_bus_scan_sim(sc, sc->sim_1);
4369 			else
4370 				goto skip_register_aen;
4371 			doscan = 0;
4372 			break;
4373 		case MR_EVT_PD_REMOVED:
4374 			fail_aen = mrsas_get_pd_list(sc);
4375 			if (!fail_aen)
4376 				mrsas_bus_scan_sim(sc, sc->sim_1);
4377 			else
4378 				goto skip_register_aen;
4379 			doscan = 0;
4380 			break;
4381 		case MR_EVT_LD_OFFLINE:
4382 		case MR_EVT_CFG_CLEARED:
4383 		case MR_EVT_LD_DELETED:
4384 			mrsas_bus_scan_sim(sc, sc->sim_0);
4385 			doscan = 0;
4386 			break;
4387 		case MR_EVT_LD_CREATED:
4388 			fail_aen = mrsas_get_ld_list(sc);
4389 			if (!fail_aen)
4390 				mrsas_bus_scan_sim(sc, sc->sim_0);
4391 			else
4392 				goto skip_register_aen;
4393 			doscan = 0;
4394 			break;
4395 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4396 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4397 		case MR_EVT_LD_STATE_CHANGE:
4398 			doscan = 1;
4399 			break;
4400 		default:
4401 			doscan = 0;
4402 			break;
4403 		}
4404 	} else {
4405 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4406 		return;
4407 	}
4408 	if (doscan) {
4409 		fail_aen = mrsas_get_pd_list(sc);
4410 		if (!fail_aen) {
4411 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4412 			mrsas_bus_scan_sim(sc, sc->sim_1);
4413 		} else
4414 			goto skip_register_aen;
4415 
4416 		fail_aen = mrsas_get_ld_list(sc);
4417 		if (!fail_aen) {
4418 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4419 			mrsas_bus_scan_sim(sc, sc->sim_0);
4420 		} else
4421 			goto skip_register_aen;
4422 	}
4423 	seq_num = sc->evt_detail_mem->seq_num + 1;
4424 
4425 	/* Register AEN with FW for latest sequence number plus 1 */
4426 	class_locale.members.reserved = 0;
4427 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4428 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4429 
4430 	if (sc->aen_cmd != NULL)
4431 		return;
4432 
4433 	mtx_lock(&sc->aen_lock);
4434 	error = mrsas_register_aen(sc, seq_num,
4435 	    class_locale.word);
4436 	mtx_unlock(&sc->aen_lock);
4437 
4438 	if (error)
4439 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4440 
4441 skip_register_aen:
4442 	return;
4443 
4444 }
4445 
4446 
4447 /*
4448  * mrsas_complete_aen:	Completes AEN command
4449  * input:				Adapter soft state
4450  * 						Cmd that was issued to abort another cmd
4451  *
4452  * This function will be called from ISR and will continue event processing from
4453  * thread context by enqueuing task in ev_tq (callback function
4454  * "mrsas_aen_handler").
4455  */
4456 void
4457 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4458 {
4459 	/*
4460 	 * Don't signal app if it is just an aborted previously registered
4461 	 * aen
4462 	 */
4463 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4464 		sc->mrsas_aen_triggered = 1;
4465 		mtx_lock(&sc->aen_lock);
4466 		if (sc->mrsas_poll_waiting) {
4467 			sc->mrsas_poll_waiting = 0;
4468 			selwakeup(&sc->mrsas_select);
4469 		}
4470 		mtx_unlock(&sc->aen_lock);
4471 	} else
4472 		cmd->abort_aen = 0;
4473 
4474 	sc->aen_cmd = NULL;
4475 	mrsas_release_mfi_cmd(cmd);
4476 
4477 	if (!sc->remove_in_progress)
4478 		taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4479 
4480 	return;
4481 }
4482 
4483 static device_method_t mrsas_methods[] = {
4484 	DEVMETHOD(device_probe, mrsas_probe),
4485 	DEVMETHOD(device_attach, mrsas_attach),
4486 	DEVMETHOD(device_detach, mrsas_detach),
4487 	DEVMETHOD(device_suspend, mrsas_suspend),
4488 	DEVMETHOD(device_resume, mrsas_resume),
4489 	DEVMETHOD(bus_print_child, bus_generic_print_child),
4490 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4491 	{0, 0}
4492 };
4493 
4494 static driver_t mrsas_driver = {
4495 	"mrsas",
4496 	mrsas_methods,
4497 	sizeof(struct mrsas_softc)
4498 };
4499 
4500 static devclass_t mrsas_devclass;
4501 
4502 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4503 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
4504