xref: /freebsd/sys/dev/mrsas/mrsas.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/smp.h>
55 
56 /*
57  * Function prototypes
58  */
59 static d_open_t mrsas_open;
60 static d_close_t mrsas_close;
61 static d_read_t mrsas_read;
62 static d_write_t mrsas_write;
63 static d_ioctl_t mrsas_ioctl;
64 static d_poll_t mrsas_poll;
65 
66 static void mrsas_ich_startup(void *arg);
67 static struct mrsas_mgmt_info mrsas_mgmt_info;
68 static struct mrsas_ident *mrsas_find_ident(device_t);
69 static int mrsas_setup_msix(struct mrsas_softc *sc);
70 static int mrsas_allocate_msix(struct mrsas_softc *sc);
71 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
72 static void mrsas_flush_cache(struct mrsas_softc *sc);
73 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
74 static void mrsas_ocr_thread(void *arg);
75 static int mrsas_get_map_info(struct mrsas_softc *sc);
76 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
77 static int mrsas_sync_map_info(struct mrsas_softc *sc);
78 static int mrsas_get_pd_list(struct mrsas_softc *sc);
79 static int mrsas_get_ld_list(struct mrsas_softc *sc);
80 static int mrsas_setup_irq(struct mrsas_softc *sc);
81 static int mrsas_alloc_mem(struct mrsas_softc *sc);
82 static int mrsas_init_fw(struct mrsas_softc *sc);
83 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
84 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
85 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
86 static int mrsas_clear_intr(struct mrsas_softc *sc);
87 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
88 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
89 static int
90 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
91     struct mrsas_mfi_cmd *cmd_to_abort);
92 static void
93 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
94 static struct mrsas_softc *
95 mrsas_get_softc_instance(struct cdev *dev,
96     u_long cmd, caddr_t arg);
97 u_int32_t
98 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
99 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
100 u_int8_t
101 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
102     struct mrsas_mfi_cmd *mfi_cmd);
103 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
104 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
105 int	mrsas_init_adapter(struct mrsas_softc *sc);
106 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
107 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
108 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
109 int	mrsas_ioc_init(struct mrsas_softc *sc);
110 int	mrsas_bus_scan(struct mrsas_softc *sc);
111 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
112 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
113 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
114 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
115 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
116 int mrsas_reset_targets(struct mrsas_softc *sc);
117 int
118 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
119     struct mrsas_mfi_cmd *cmd);
120 int
121 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
122     int size);
123 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
124 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
125 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
126 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
127 void	mrsas_disable_intr(struct mrsas_softc *sc);
128 void	mrsas_enable_intr(struct mrsas_softc *sc);
129 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
130 void	mrsas_free_mem(struct mrsas_softc *sc);
131 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
132 void	mrsas_isr(void *arg);
133 void	mrsas_teardown_intr(struct mrsas_softc *sc);
134 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
135 void	mrsas_kill_hba(struct mrsas_softc *sc);
136 void	mrsas_aen_handler(struct mrsas_softc *sc);
137 void
138 mrsas_write_reg(struct mrsas_softc *sc, int offset,
139     u_int32_t value);
140 void
141 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
142     u_int32_t req_desc_hi);
143 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
144 void
145 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
146     struct mrsas_mfi_cmd *cmd, u_int8_t status);
147 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
148 
149 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
150         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151 
152 extern int mrsas_cam_attach(struct mrsas_softc *sc);
153 extern void mrsas_cam_detach(struct mrsas_softc *sc);
154 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
155 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
156 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
166     u_int16_t index);
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
170 void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
171 
172 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
173 	union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
174 	u_int32_t data_length, u_int8_t *sense);
175 void
176 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
177     u_int32_t req_desc_hi);
178 
179 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
180     "MRSAS Driver Parameters");
181 
182 /*
183  * PCI device struct and table
184  *
185  */
186 typedef struct mrsas_ident {
187 	uint16_t vendor;
188 	uint16_t device;
189 	uint16_t subvendor;
190 	uint16_t subdevice;
191 	const char *desc;
192 }	MRSAS_CTLR_ID;
193 
194 MRSAS_CTLR_ID device_table[] = {
195 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
196 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
197 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
198 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
199 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
200 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
201 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
202 	{0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
203 	{0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
204 	{0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
205 	{0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
206 	{0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
207 	{0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
208 	{0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
209 	{0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
210 	{0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
211 	{0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
212 	{0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
213 	{0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
214 	{0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
215 	{0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
216 	{0, 0, 0, 0, NULL}
217 };
218 
219 /*
220  * Character device entry points
221  *
222  */
223 static struct cdevsw mrsas_cdevsw = {
224 	.d_version = D_VERSION,
225 	.d_open = mrsas_open,
226 	.d_close = mrsas_close,
227 	.d_read = mrsas_read,
228 	.d_write = mrsas_write,
229 	.d_ioctl = mrsas_ioctl,
230 	.d_poll = mrsas_poll,
231 	.d_name = "mrsas",
232 };
233 
234 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
235 
236 /*
237  * In the cdevsw routines, we find our softc by using the si_drv1 member of
238  * struct cdev.  We set this variable to point to our softc in our attach
239  * routine when we create the /dev entry.
240  */
241 int
242 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
243 {
244 	struct mrsas_softc *sc;
245 
246 	sc = dev->si_drv1;
247 	return (0);
248 }
249 
250 int
251 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
252 {
253 	struct mrsas_softc *sc;
254 
255 	sc = dev->si_drv1;
256 	return (0);
257 }
258 
259 int
260 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
261 {
262 	struct mrsas_softc *sc;
263 
264 	sc = dev->si_drv1;
265 	return (0);
266 }
267 int
268 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
269 {
270 	struct mrsas_softc *sc;
271 
272 	sc = dev->si_drv1;
273 	return (0);
274 }
275 
276 u_int32_t
277 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
278 {
279 	u_int32_t i = 0, ret_val;
280 
281 	if (sc->is_aero) {
282 		do {
283 			ret_val = mrsas_read_reg(sc, offset);
284 			i++;
285 		} while(ret_val == 0 && i < 3);
286 	} else
287 		ret_val = mrsas_read_reg(sc, offset);
288 
289 	return ret_val;
290 }
291 
292 /*
293  * Register Read/Write Functions
294  *
295  */
296 void
297 mrsas_write_reg(struct mrsas_softc *sc, int offset,
298     u_int32_t value)
299 {
300 	bus_space_tag_t bus_tag = sc->bus_tag;
301 	bus_space_handle_t bus_handle = sc->bus_handle;
302 
303 	bus_space_write_4(bus_tag, bus_handle, offset, value);
304 }
305 
306 u_int32_t
307 mrsas_read_reg(struct mrsas_softc *sc, int offset)
308 {
309 	bus_space_tag_t bus_tag = sc->bus_tag;
310 	bus_space_handle_t bus_handle = sc->bus_handle;
311 
312 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
313 }
314 
315 /*
316  * Interrupt Disable/Enable/Clear Functions
317  *
318  */
319 void
320 mrsas_disable_intr(struct mrsas_softc *sc)
321 {
322 	u_int32_t mask = 0xFFFFFFFF;
323 	u_int32_t status;
324 
325 	sc->mask_interrupts = 1;
326 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
327 	/* Dummy read to force pci flush */
328 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
329 }
330 
331 void
332 mrsas_enable_intr(struct mrsas_softc *sc)
333 {
334 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
335 	u_int32_t status;
336 
337 	sc->mask_interrupts = 0;
338 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
339 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
340 
341 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
342 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
343 }
344 
345 static int
346 mrsas_clear_intr(struct mrsas_softc *sc)
347 {
348 	u_int32_t status;
349 
350 	/* Read received interrupt */
351 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
352 
353 	/* Not our interrupt, so just return */
354 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
355 		return (0);
356 
357 	/* We got a reply interrupt */
358 	return (1);
359 }
360 
361 /*
362  * PCI Support Functions
363  *
364  */
365 static struct mrsas_ident *
366 mrsas_find_ident(device_t dev)
367 {
368 	struct mrsas_ident *pci_device;
369 
370 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
371 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
372 		    (pci_device->device == pci_get_device(dev)) &&
373 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
374 		    (pci_device->subvendor == 0xffff)) &&
375 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
376 		    (pci_device->subdevice == 0xffff)))
377 			return (pci_device);
378 	}
379 	return (NULL);
380 }
381 
382 static int
383 mrsas_probe(device_t dev)
384 {
385 	static u_int8_t first_ctrl = 1;
386 	struct mrsas_ident *id;
387 
388 	if ((id = mrsas_find_ident(dev)) != NULL) {
389 		if (first_ctrl) {
390 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
391 			    MRSAS_VERSION);
392 			first_ctrl = 0;
393 		}
394 		device_set_desc(dev, id->desc);
395 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
396 		return (-30);
397 	}
398 	return (ENXIO);
399 }
400 
401 /*
402  * mrsas_setup_sysctl:	setup sysctl values for mrsas
403  * input:				Adapter instance soft state
404  *
405  * Setup sysctl entries for mrsas driver.
406  */
407 static void
408 mrsas_setup_sysctl(struct mrsas_softc *sc)
409 {
410 	struct sysctl_ctx_list *sysctl_ctx = NULL;
411 	struct sysctl_oid *sysctl_tree = NULL;
412 	char tmpstr[80], tmpstr2[80];
413 
414 	/*
415 	 * Setup the sysctl variable so the user can change the debug level
416 	 * on the fly.
417 	 */
418 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
419 	    device_get_unit(sc->mrsas_dev));
420 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
421 
422 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
423 	if (sysctl_ctx != NULL)
424 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
425 
426 	if (sysctl_tree == NULL) {
427 		sysctl_ctx_init(&sc->sysctl_ctx);
428 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
429 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
430 		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
431 		if (sc->sysctl_tree == NULL)
432 			return;
433 		sysctl_ctx = &sc->sysctl_ctx;
434 		sysctl_tree = sc->sysctl_tree;
435 	}
436 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
437 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
438 	    "Disable the use of OCR");
439 
440 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
441 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
442 	    strlen(MRSAS_VERSION), "driver version");
443 
444 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
445 	    OID_AUTO, "reset_count", CTLFLAG_RD,
446 	    &sc->reset_count, 0, "number of ocr from start of the day");
447 
448 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
449 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
450 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
451 
452 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
453 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
454 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
455 
456 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
457 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
458 	    "Driver debug level");
459 
460 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
461 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
462 	    0, "Driver IO timeout value in mili-second.");
463 
464 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
465 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
466 	    &sc->mrsas_fw_fault_check_delay,
467 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
468 
469 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
470 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
471 	    &sc->reset_in_progress, 0, "ocr in progress status");
472 
473 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
474 	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
475 	    &sc->block_sync_cache, 0,
476 	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
477 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
478 	    OID_AUTO, "stream detection", CTLFLAG_RW,
479 		&sc->drv_stream_detection, 0,
480 		"Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
481 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
482 	    OID_AUTO, "prp_count", CTLFLAG_RD,
483 	    &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
484 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
485 	    OID_AUTO, "SGE holes", CTLFLAG_RD,
486 	    &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
487 }
488 
489 /*
490  * mrsas_get_tunables:	get tunable parameters.
491  * input:				Adapter instance soft state
492  *
493  * Get tunable parameters. This will help to debug driver at boot time.
494  */
495 static void
496 mrsas_get_tunables(struct mrsas_softc *sc)
497 {
498 	char tmpstr[80];
499 
500 	/* XXX default to some debugging for now */
501 	sc->mrsas_debug =
502 		(MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
503 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
504 	sc->mrsas_fw_fault_check_delay = 1;
505 	sc->reset_count = 0;
506 	sc->reset_in_progress = 0;
507 	sc->block_sync_cache = 0;
508 	sc->drv_stream_detection = 1;
509 
510 	/*
511 	 * Grab the global variables.
512 	 */
513 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
514 
515 	/*
516 	 * Grab the global variables.
517 	 */
518 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
519 
520 	/* Grab the unit-instance variables */
521 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
522 	    device_get_unit(sc->mrsas_dev));
523 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
524 }
525 
526 /*
527  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
528  * Used to get sequence number at driver load time.
529  * input:		Adapter soft state
530  *
531  * Allocates DMAable memory for the event log info internal command.
532  */
533 int
534 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
535 {
536 	int el_info_size;
537 
538 	/* Allocate get event log info command */
539 	el_info_size = sizeof(struct mrsas_evt_log_info);
540 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
541 	    1, 0,
542 	    BUS_SPACE_MAXADDR_32BIT,
543 	    BUS_SPACE_MAXADDR,
544 	    NULL, NULL,
545 	    el_info_size,
546 	    1,
547 	    el_info_size,
548 	    BUS_DMA_ALLOCNOW,
549 	    NULL, NULL,
550 	    &sc->el_info_tag)) {
551 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
552 		return (ENOMEM);
553 	}
554 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
555 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
556 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
557 		return (ENOMEM);
558 	}
559 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
560 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
561 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
562 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
563 		return (ENOMEM);
564 	}
565 	memset(sc->el_info_mem, 0, el_info_size);
566 	return (0);
567 }
568 
569 /*
570  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
571  * input:					Adapter soft state
572  *
573  * Deallocates memory for the event log info internal command.
574  */
575 void
576 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
577 {
578 	if (sc->el_info_phys_addr)
579 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
580 	if (sc->el_info_mem != NULL)
581 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
582 	if (sc->el_info_tag != NULL)
583 		bus_dma_tag_destroy(sc->el_info_tag);
584 }
585 
586 /*
587  *  mrsas_get_seq_num:	Get latest event sequence number
588  *  @sc:				Adapter soft state
589  *  @eli:				Firmware event log sequence number information.
590  *
591  * Firmware maintains a log of all events in a non-volatile area.
592  * Driver get the sequence number using DCMD
593  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
594  */
595 
596 static int
597 mrsas_get_seq_num(struct mrsas_softc *sc,
598     struct mrsas_evt_log_info *eli)
599 {
600 	struct mrsas_mfi_cmd *cmd;
601 	struct mrsas_dcmd_frame *dcmd;
602 	u_int8_t do_ocr = 1, retcode = 0;
603 
604 	cmd = mrsas_get_mfi_cmd(sc);
605 
606 	if (!cmd) {
607 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
608 		return -ENOMEM;
609 	}
610 	dcmd = &cmd->frame->dcmd;
611 
612 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
613 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
614 		mrsas_release_mfi_cmd(cmd);
615 		return -ENOMEM;
616 	}
617 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
618 
619 	dcmd->cmd = MFI_CMD_DCMD;
620 	dcmd->cmd_status = 0x0;
621 	dcmd->sge_count = 1;
622 	dcmd->flags = MFI_FRAME_DIR_READ;
623 	dcmd->timeout = 0;
624 	dcmd->pad_0 = 0;
625 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
626 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
627 	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
628 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
629 
630 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
631 	if (retcode == ETIMEDOUT)
632 		goto dcmd_timeout;
633 
634 	do_ocr = 0;
635 	/*
636 	 * Copy the data back into callers buffer
637 	 */
638 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
639 	mrsas_free_evt_log_info_cmd(sc);
640 
641 dcmd_timeout:
642 	if (do_ocr)
643 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
644 	else
645 		mrsas_release_mfi_cmd(cmd);
646 
647 	return retcode;
648 }
649 
650 /*
651  *  mrsas_register_aen:		Register for asynchronous event notification
652  *  @sc:			Adapter soft state
653  *  @seq_num:			Starting sequence number
654  *  @class_locale:		Class of the event
655  *
656  *  This function subscribes for events beyond the @seq_num
657  *  and type @class_locale.
658  *
659  */
660 static int
661 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
662     u_int32_t class_locale_word)
663 {
664 	int ret_val;
665 	struct mrsas_mfi_cmd *cmd;
666 	struct mrsas_dcmd_frame *dcmd;
667 	union mrsas_evt_class_locale curr_aen;
668 	union mrsas_evt_class_locale prev_aen;
669 
670 	/*
671 	 * If there an AEN pending already (aen_cmd), check if the
672 	 * class_locale of that pending AEN is inclusive of the new AEN
673 	 * request we currently have. If it is, then we don't have to do
674 	 * anything. In other words, whichever events the current AEN request
675 	 * is subscribing to, have already been subscribed to. If the old_cmd
676 	 * is _not_ inclusive, then we have to abort that command, form a
677 	 * class_locale that is superset of both old and current and re-issue
678 	 * to the FW
679 	 */
680 
681 	curr_aen.word = class_locale_word;
682 
683 	if (sc->aen_cmd) {
684 		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
685 
686 		/*
687 		 * A class whose enum value is smaller is inclusive of all
688 		 * higher values. If a PROGRESS (= -1) was previously
689 		 * registered, then a new registration requests for higher
690 		 * classes need not be sent to FW. They are automatically
691 		 * included. Locale numbers don't have such hierarchy. They
692 		 * are bitmap values
693 		 */
694 		if ((prev_aen.members.class <= curr_aen.members.class) &&
695 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
696 		    curr_aen.members.locale)) {
697 			/*
698 			 * Previously issued event registration includes
699 			 * current request. Nothing to do.
700 			 */
701 			return 0;
702 		} else {
703 			curr_aen.members.locale |= prev_aen.members.locale;
704 
705 			if (prev_aen.members.class < curr_aen.members.class)
706 				curr_aen.members.class = prev_aen.members.class;
707 
708 			sc->aen_cmd->abort_aen = 1;
709 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
710 			    sc->aen_cmd);
711 
712 			if (ret_val) {
713 				printf("mrsas: Failed to abort previous AEN command\n");
714 				return ret_val;
715 			} else
716 				sc->aen_cmd = NULL;
717 		}
718 	}
719 	cmd = mrsas_get_mfi_cmd(sc);
720 	if (!cmd)
721 		return ENOMEM;
722 
723 	dcmd = &cmd->frame->dcmd;
724 
725 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
726 
727 	/*
728 	 * Prepare DCMD for aen registration
729 	 */
730 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
731 
732 	dcmd->cmd = MFI_CMD_DCMD;
733 	dcmd->cmd_status = 0x0;
734 	dcmd->sge_count = 1;
735 	dcmd->flags = MFI_FRAME_DIR_READ;
736 	dcmd->timeout = 0;
737 	dcmd->pad_0 = 0;
738 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
739 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
740 	dcmd->mbox.w[0] = seq_num;
741 	sc->last_seq_num = seq_num;
742 	dcmd->mbox.w[1] = curr_aen.word;
743 	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
744 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
745 
746 	if (sc->aen_cmd != NULL) {
747 		mrsas_release_mfi_cmd(cmd);
748 		return 0;
749 	}
750 	/*
751 	 * Store reference to the cmd used to register for AEN. When an
752 	 * application wants us to register for AEN, we have to abort this
753 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
754 	 */
755 	sc->aen_cmd = cmd;
756 
757 	/*
758 	 * Issue the aen registration frame
759 	 */
760 	if (mrsas_issue_dcmd(sc, cmd)) {
761 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
762 		return (1);
763 	}
764 	return 0;
765 }
766 
767 /*
768  * mrsas_start_aen:	Subscribes to AEN during driver load time
769  * @instance:		Adapter soft state
770  */
771 static int
772 mrsas_start_aen(struct mrsas_softc *sc)
773 {
774 	struct mrsas_evt_log_info eli;
775 	union mrsas_evt_class_locale class_locale;
776 
777 	/* Get the latest sequence number from FW */
778 
779 	memset(&eli, 0, sizeof(eli));
780 
781 	if (mrsas_get_seq_num(sc, &eli))
782 		return -1;
783 
784 	/* Register AEN with FW for latest sequence number plus 1 */
785 	class_locale.members.reserved = 0;
786 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
787 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
788 
789 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
790 	    class_locale.word);
791 
792 }
793 
794 /*
795  * mrsas_setup_msix:	Allocate MSI-x vectors
796  * @sc:					adapter soft state
797  */
798 static int
799 mrsas_setup_msix(struct mrsas_softc *sc)
800 {
801 	int i;
802 
803 	for (i = 0; i < sc->msix_vectors; i++) {
804 		sc->irq_context[i].sc = sc;
805 		sc->irq_context[i].MSIxIndex = i;
806 		sc->irq_id[i] = i + 1;
807 		sc->mrsas_irq[i] = bus_alloc_resource_any
808 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
809 		    ,RF_ACTIVE);
810 		if (sc->mrsas_irq[i] == NULL) {
811 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
812 			goto irq_alloc_failed;
813 		}
814 		if (bus_setup_intr(sc->mrsas_dev,
815 		    sc->mrsas_irq[i],
816 		    INTR_MPSAFE | INTR_TYPE_CAM,
817 		    NULL, mrsas_isr, &sc->irq_context[i],
818 		    &sc->intr_handle[i])) {
819 			device_printf(sc->mrsas_dev,
820 			    "Cannot set up MSI-x interrupt handler\n");
821 			goto irq_alloc_failed;
822 		}
823 	}
824 	return SUCCESS;
825 
826 irq_alloc_failed:
827 	mrsas_teardown_intr(sc);
828 	return (FAIL);
829 }
830 
831 /*
832  * mrsas_allocate_msix:		Setup MSI-x vectors
833  * @sc:						adapter soft state
834  */
835 static int
836 mrsas_allocate_msix(struct mrsas_softc *sc)
837 {
838 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
839 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
840 		    " of vectors\n", sc->msix_vectors);
841 	} else {
842 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
843 		goto irq_alloc_failed;
844 	}
845 	return SUCCESS;
846 
847 irq_alloc_failed:
848 	mrsas_teardown_intr(sc);
849 	return (FAIL);
850 }
851 
852 /*
853  * mrsas_attach:	PCI entry point
854  * input:			pointer to device struct
855  *
856  * Performs setup of PCI and registers, initializes mutexes and linked lists,
857  * registers interrupts and CAM, and initializes   the adapter/controller to
858  * its proper state.
859  */
860 static int
861 mrsas_attach(device_t dev)
862 {
863 	struct mrsas_softc *sc = device_get_softc(dev);
864 	uint32_t cmd, error;
865 
866 	memset(sc, 0, sizeof(struct mrsas_softc));
867 
868 	/* Look up our softc and initialize its fields. */
869 	sc->mrsas_dev = dev;
870 	sc->device_id = pci_get_device(dev);
871 
872 	switch (sc->device_id) {
873 	case MRSAS_INVADER:
874 	case MRSAS_FURY:
875 	case MRSAS_INTRUDER:
876 	case MRSAS_INTRUDER_24:
877 	case MRSAS_CUTLASS_52:
878 	case MRSAS_CUTLASS_53:
879 		sc->mrsas_gen3_ctrl = 1;
880 		break;
881 	case MRSAS_VENTURA:
882 	case MRSAS_CRUSADER:
883 	case MRSAS_HARPOON:
884 	case MRSAS_TOMCAT:
885 	case MRSAS_VENTURA_4PORT:
886 	case MRSAS_CRUSADER_4PORT:
887 		sc->is_ventura = true;
888 		break;
889 	case MRSAS_AERO_10E1:
890 	case MRSAS_AERO_10E5:
891 		device_printf(dev, "Adapter is in configurable secure mode\n");
892 	case MRSAS_AERO_10E2:
893 	case MRSAS_AERO_10E6:
894 		sc->is_aero = true;
895 		break;
896 	case MRSAS_AERO_10E0:
897 	case MRSAS_AERO_10E3:
898 	case MRSAS_AERO_10E4:
899 	case MRSAS_AERO_10E7:
900 		device_printf(dev, "Adapter is in non-secure mode\n");
901 		return SUCCESS;
902 	}
903 
904 	mrsas_get_tunables(sc);
905 
906 	/*
907 	 * Set up PCI and registers
908 	 */
909 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
910 	if ((cmd & PCIM_CMD_PORTEN) == 0) {
911 		return (ENXIO);
912 	}
913 	/* Force the busmaster enable bit on. */
914 	cmd |= PCIM_CMD_BUSMASTEREN;
915 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
916 
917 	/* For Ventura/Aero system registers are mapped to BAR0 */
918 	if (sc->is_ventura || sc->is_aero)
919 		sc->reg_res_id = PCIR_BAR(0);	/* BAR0 offset */
920 	else
921 		sc->reg_res_id = PCIR_BAR(1);	/* BAR1 offset */
922 
923 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
924 	    &(sc->reg_res_id), RF_ACTIVE))
925 	    == NULL) {
926 		device_printf(dev, "Cannot allocate PCI registers\n");
927 		goto attach_fail;
928 	}
929 	sc->bus_tag = rman_get_bustag(sc->reg_res);
930 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
931 
932 	/* Intialize mutexes */
933 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
934 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
935 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
936 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
937 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
938 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
939 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
940 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
941 	mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
942 
943 	/* Intialize linked list */
944 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
945 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
946 
947 	mrsas_atomic_set(&sc->fw_outstanding, 0);
948 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
949 	mrsas_atomic_set(&sc->prp_count, 0);
950 	mrsas_atomic_set(&sc->sge_holes, 0);
951 
952 	sc->io_cmds_highwater = 0;
953 
954 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
955 	sc->UnevenSpanSupport = 0;
956 
957 	sc->msix_enable = 0;
958 
959 	/* Initialize Firmware */
960 	if (mrsas_init_fw(sc) != SUCCESS) {
961 		goto attach_fail_fw;
962 	}
963 	/* Register mrsas to CAM layer */
964 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
965 		goto attach_fail_cam;
966 	}
967 	/* Register IRQs */
968 	if (mrsas_setup_irq(sc) != SUCCESS) {
969 		goto attach_fail_irq;
970 	}
971 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
972 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
973 	    device_get_unit(sc->mrsas_dev));
974 	if (error) {
975 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
976 		goto attach_fail_ocr_thread;
977 	}
978 	/*
979 	 * After FW initialization and OCR thread creation
980 	 * we will defer the cdev creation, AEN setup on ICH callback
981 	 */
982 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
983 	sc->mrsas_ich.ich_arg = sc;
984 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
985 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
986 	}
987 	mrsas_setup_sysctl(sc);
988 	return SUCCESS;
989 
990 attach_fail_ocr_thread:
991 	if (sc->ocr_thread_active)
992 		wakeup(&sc->ocr_chan);
993 attach_fail_irq:
994 	mrsas_teardown_intr(sc);
995 attach_fail_cam:
996 	mrsas_cam_detach(sc);
997 attach_fail_fw:
998 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
999 	if (sc->msix_enable == 1)
1000 		pci_release_msi(sc->mrsas_dev);
1001 	mrsas_free_mem(sc);
1002 	mtx_destroy(&sc->sim_lock);
1003 	mtx_destroy(&sc->aen_lock);
1004 	mtx_destroy(&sc->pci_lock);
1005 	mtx_destroy(&sc->io_lock);
1006 	mtx_destroy(&sc->ioctl_lock);
1007 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1008 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1009 	mtx_destroy(&sc->raidmap_lock);
1010 	mtx_destroy(&sc->stream_lock);
1011 attach_fail:
1012 	if (sc->reg_res) {
1013 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
1014 		    sc->reg_res_id, sc->reg_res);
1015 	}
1016 	return (ENXIO);
1017 }
1018 
1019 /*
1020  * Interrupt config hook
1021  */
1022 static void
1023 mrsas_ich_startup(void *arg)
1024 {
1025 	int i = 0;
1026 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1027 
1028 	/*
1029 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
1030 	 */
1031 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
1032 	    IOCTL_SEMA_DESCRIPTION);
1033 
1034 	/* Create a /dev entry for mrsas controller. */
1035 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
1036 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
1037 	    device_get_unit(sc->mrsas_dev));
1038 
1039 	if (device_get_unit(sc->mrsas_dev) == 0) {
1040 		make_dev_alias_p(MAKEDEV_CHECKNAME,
1041 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1042 		    "megaraid_sas_ioctl_node");
1043 	}
1044 	if (sc->mrsas_cdev)
1045 		sc->mrsas_cdev->si_drv1 = sc;
1046 
1047 	/*
1048 	 * Add this controller to mrsas_mgmt_info structure so that it can be
1049 	 * exported to management applications
1050 	 */
1051 	if (device_get_unit(sc->mrsas_dev) == 0)
1052 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1053 
1054 	mrsas_mgmt_info.count++;
1055 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1056 	mrsas_mgmt_info.max_index++;
1057 
1058 	/* Enable Interrupts */
1059 	mrsas_enable_intr(sc);
1060 
1061 	/* Call DCMD get_pd_info for all system PDs */
1062 	for (i = 0; i < MRSAS_MAX_PD; i++) {
1063 		if ((sc->target_list[i].target_id != 0xffff) &&
1064 			sc->pd_info_mem)
1065 			mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1066 	}
1067 
1068 	/* Initiate AEN (Asynchronous Event Notification) */
1069 	if (mrsas_start_aen(sc)) {
1070 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1071 		    "Further events from the controller will not be communicated.\n"
1072 		    "Either there is some problem in the controller"
1073 		    "or the controller does not support AEN.\n"
1074 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
1075 	}
1076 	if (sc->mrsas_ich.ich_arg != NULL) {
1077 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1078 		config_intrhook_disestablish(&sc->mrsas_ich);
1079 		sc->mrsas_ich.ich_arg = NULL;
1080 	}
1081 }
1082 
1083 /*
1084  * mrsas_detach:	De-allocates and teardown resources
1085  * input:			pointer to device struct
1086  *
1087  * This function is the entry point for device disconnect and detach.
1088  * It performs memory de-allocations, shutdown of the controller and various
1089  * teardown and destroy resource functions.
1090  */
1091 static int
1092 mrsas_detach(device_t dev)
1093 {
1094 	struct mrsas_softc *sc;
1095 	int i = 0;
1096 
1097 	sc = device_get_softc(dev);
1098 	sc->remove_in_progress = 1;
1099 
1100 	/* Destroy the character device so no other IOCTL will be handled */
1101 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1102 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1103 	destroy_dev(sc->mrsas_cdev);
1104 
1105 	/*
1106 	 * Take the instance off the instance array. Note that we will not
1107 	 * decrement the max_index. We let this array be sparse array
1108 	 */
1109 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1110 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1111 			mrsas_mgmt_info.count--;
1112 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1113 			break;
1114 		}
1115 	}
1116 
1117 	if (sc->ocr_thread_active)
1118 		wakeup(&sc->ocr_chan);
1119 	while (sc->reset_in_progress) {
1120 		i++;
1121 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1122 			mrsas_dprint(sc, MRSAS_INFO,
1123 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1124 		}
1125 		pause("mr_shutdown", hz);
1126 	}
1127 	i = 0;
1128 	while (sc->ocr_thread_active) {
1129 		i++;
1130 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1131 			mrsas_dprint(sc, MRSAS_INFO,
1132 			    "[%2d]waiting for "
1133 			    "mrsas_ocr thread to quit ocr %d\n", i,
1134 			    sc->ocr_thread_active);
1135 		}
1136 		pause("mr_shutdown", hz);
1137 	}
1138 	mrsas_flush_cache(sc);
1139 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1140 	mrsas_disable_intr(sc);
1141 
1142 	if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1143 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1144 			free(sc->streamDetectByLD[i], M_MRSAS);
1145 		free(sc->streamDetectByLD, M_MRSAS);
1146 		sc->streamDetectByLD = NULL;
1147 	}
1148 
1149 	mrsas_cam_detach(sc);
1150 	mrsas_teardown_intr(sc);
1151 	mrsas_free_mem(sc);
1152 	mtx_destroy(&sc->sim_lock);
1153 	mtx_destroy(&sc->aen_lock);
1154 	mtx_destroy(&sc->pci_lock);
1155 	mtx_destroy(&sc->io_lock);
1156 	mtx_destroy(&sc->ioctl_lock);
1157 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1158 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1159 	mtx_destroy(&sc->raidmap_lock);
1160 	mtx_destroy(&sc->stream_lock);
1161 
1162 	/* Wait for all the semaphores to be released */
1163 	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1164 		pause("mr_shutdown", hz);
1165 
1166 	/* Destroy the counting semaphore created for Ioctl */
1167 	sema_destroy(&sc->ioctl_count_sema);
1168 
1169 	if (sc->reg_res) {
1170 		bus_release_resource(sc->mrsas_dev,
1171 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1172 	}
1173 	if (sc->sysctl_tree != NULL)
1174 		sysctl_ctx_free(&sc->sysctl_ctx);
1175 
1176 	return (0);
1177 }
1178 
1179 static int
1180 mrsas_shutdown(device_t dev)
1181 {
1182 	struct mrsas_softc *sc;
1183 	int i;
1184 
1185 	sc = device_get_softc(dev);
1186 	sc->remove_in_progress = 1;
1187 	if (!KERNEL_PANICKED()) {
1188 		if (sc->ocr_thread_active)
1189 			wakeup(&sc->ocr_chan);
1190 		i = 0;
1191 		while (sc->reset_in_progress && i < 15) {
1192 			i++;
1193 			if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1194 				mrsas_dprint(sc, MRSAS_INFO,
1195 				    "[%2d]waiting for OCR to be finished "
1196 				    "from %s\n", i, __func__);
1197 			}
1198 			pause("mr_shutdown", hz);
1199 		}
1200 		if (sc->reset_in_progress) {
1201 			mrsas_dprint(sc, MRSAS_INFO,
1202 			    "gave up waiting for OCR to be finished\n");
1203 		}
1204 	}
1205 
1206 	mrsas_flush_cache(sc);
1207 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1208 	mrsas_disable_intr(sc);
1209 	return (0);
1210 }
1211 
1212 /*
1213  * mrsas_free_mem:		Frees allocated memory
1214  * input:				Adapter instance soft state
1215  *
1216  * This function is called from mrsas_detach() to free previously allocated
1217  * memory.
1218  */
1219 void
1220 mrsas_free_mem(struct mrsas_softc *sc)
1221 {
1222 	int i;
1223 	u_int32_t max_fw_cmds;
1224 	struct mrsas_mfi_cmd *mfi_cmd;
1225 	struct mrsas_mpt_cmd *mpt_cmd;
1226 
1227 	/*
1228 	 * Free RAID map memory
1229 	 */
1230 	for (i = 0; i < 2; i++) {
1231 		if (sc->raidmap_phys_addr[i])
1232 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1233 		if (sc->raidmap_mem[i] != NULL)
1234 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1235 		if (sc->raidmap_tag[i] != NULL)
1236 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1237 
1238 		if (sc->ld_drv_map[i] != NULL)
1239 			free(sc->ld_drv_map[i], M_MRSAS);
1240 	}
1241 	for (i = 0; i < 2; i++) {
1242 		if (sc->jbodmap_phys_addr[i])
1243 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1244 		if (sc->jbodmap_mem[i] != NULL)
1245 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1246 		if (sc->jbodmap_tag[i] != NULL)
1247 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1248 	}
1249 	/*
1250 	 * Free version buffer memory
1251 	 */
1252 	if (sc->verbuf_phys_addr)
1253 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1254 	if (sc->verbuf_mem != NULL)
1255 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1256 	if (sc->verbuf_tag != NULL)
1257 		bus_dma_tag_destroy(sc->verbuf_tag);
1258 
1259 	/*
1260 	 * Free sense buffer memory
1261 	 */
1262 	if (sc->sense_phys_addr)
1263 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1264 	if (sc->sense_mem != NULL)
1265 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1266 	if (sc->sense_tag != NULL)
1267 		bus_dma_tag_destroy(sc->sense_tag);
1268 
1269 	/*
1270 	 * Free chain frame memory
1271 	 */
1272 	if (sc->chain_frame_phys_addr)
1273 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1274 	if (sc->chain_frame_mem != NULL)
1275 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1276 	if (sc->chain_frame_tag != NULL)
1277 		bus_dma_tag_destroy(sc->chain_frame_tag);
1278 
1279 	/*
1280 	 * Free IO Request memory
1281 	 */
1282 	if (sc->io_request_phys_addr)
1283 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1284 	if (sc->io_request_mem != NULL)
1285 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1286 	if (sc->io_request_tag != NULL)
1287 		bus_dma_tag_destroy(sc->io_request_tag);
1288 
1289 	/*
1290 	 * Free Reply Descriptor memory
1291 	 */
1292 	if (sc->reply_desc_phys_addr)
1293 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1294 	if (sc->reply_desc_mem != NULL)
1295 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1296 	if (sc->reply_desc_tag != NULL)
1297 		bus_dma_tag_destroy(sc->reply_desc_tag);
1298 
1299 	/*
1300 	 * Free event detail memory
1301 	 */
1302 	if (sc->evt_detail_phys_addr)
1303 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1304 	if (sc->evt_detail_mem != NULL)
1305 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1306 	if (sc->evt_detail_tag != NULL)
1307 		bus_dma_tag_destroy(sc->evt_detail_tag);
1308 
1309 	/*
1310 	 * Free PD info memory
1311 	 */
1312 	if (sc->pd_info_phys_addr)
1313 		bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1314 	if (sc->pd_info_mem != NULL)
1315 		bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1316 	if (sc->pd_info_tag != NULL)
1317 		bus_dma_tag_destroy(sc->pd_info_tag);
1318 
1319 	/*
1320 	 * Free MFI frames
1321 	 */
1322 	if (sc->mfi_cmd_list) {
1323 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1324 			mfi_cmd = sc->mfi_cmd_list[i];
1325 			mrsas_free_frame(sc, mfi_cmd);
1326 		}
1327 	}
1328 	if (sc->mficmd_frame_tag != NULL)
1329 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1330 
1331 	/*
1332 	 * Free MPT internal command list
1333 	 */
1334 	max_fw_cmds = sc->max_fw_cmds;
1335 	if (sc->mpt_cmd_list) {
1336 		for (i = 0; i < max_fw_cmds; i++) {
1337 			mpt_cmd = sc->mpt_cmd_list[i];
1338 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1339 			free(sc->mpt_cmd_list[i], M_MRSAS);
1340 		}
1341 		free(sc->mpt_cmd_list, M_MRSAS);
1342 		sc->mpt_cmd_list = NULL;
1343 	}
1344 	/*
1345 	 * Free MFI internal command list
1346 	 */
1347 
1348 	if (sc->mfi_cmd_list) {
1349 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1350 			free(sc->mfi_cmd_list[i], M_MRSAS);
1351 		}
1352 		free(sc->mfi_cmd_list, M_MRSAS);
1353 		sc->mfi_cmd_list = NULL;
1354 	}
1355 	/*
1356 	 * Free request descriptor memory
1357 	 */
1358 	free(sc->req_desc, M_MRSAS);
1359 	sc->req_desc = NULL;
1360 
1361 	/*
1362 	 * Destroy parent tag
1363 	 */
1364 	if (sc->mrsas_parent_tag != NULL)
1365 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1366 
1367 	/*
1368 	 * Free ctrl_info memory
1369 	 */
1370 	if (sc->ctrl_info != NULL)
1371 		free(sc->ctrl_info, M_MRSAS);
1372 }
1373 
1374 /*
1375  * mrsas_teardown_intr:	Teardown interrupt
1376  * input:				Adapter instance soft state
1377  *
1378  * This function is called from mrsas_detach() to teardown and release bus
1379  * interrupt resourse.
1380  */
1381 void
1382 mrsas_teardown_intr(struct mrsas_softc *sc)
1383 {
1384 	int i;
1385 
1386 	if (!sc->msix_enable) {
1387 		if (sc->intr_handle[0])
1388 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1389 		if (sc->mrsas_irq[0] != NULL)
1390 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1391 			    sc->irq_id[0], sc->mrsas_irq[0]);
1392 		sc->intr_handle[0] = NULL;
1393 	} else {
1394 		for (i = 0; i < sc->msix_vectors; i++) {
1395 			if (sc->intr_handle[i])
1396 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1397 				    sc->intr_handle[i]);
1398 
1399 			if (sc->mrsas_irq[i] != NULL)
1400 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1401 				    sc->irq_id[i], sc->mrsas_irq[i]);
1402 
1403 			sc->intr_handle[i] = NULL;
1404 		}
1405 		pci_release_msi(sc->mrsas_dev);
1406 	}
1407 
1408 }
1409 
1410 /*
1411  * mrsas_suspend:	Suspend entry point
1412  * input:			Device struct pointer
1413  *
1414  * This function is the entry point for system suspend from the OS.
1415  */
1416 static int
1417 mrsas_suspend(device_t dev)
1418 {
1419 	/* This will be filled when the driver will have hibernation support */
1420 	return (0);
1421 }
1422 
1423 /*
1424  * mrsas_resume:	Resume entry point
1425  * input:			Device struct pointer
1426  *
1427  * This function is the entry point for system resume from the OS.
1428  */
1429 static int
1430 mrsas_resume(device_t dev)
1431 {
1432 	/* This will be filled when the driver will have hibernation support */
1433 	return (0);
1434 }
1435 
1436 /**
1437  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1438  *
1439  * This function will return softc instance based on cmd type.
1440  * In some case, application fire ioctl on required management instance and
1441  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1442  * case, else get the softc instance from host_no provided by application in
1443  * user data.
1444  */
1445 
1446 static struct mrsas_softc *
1447 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1448 {
1449 	struct mrsas_softc *sc = NULL;
1450 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1451 
1452 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1453 		sc = dev->si_drv1;
1454 	} else {
1455 		/*
1456 		 * get the Host number & the softc from data sent by the
1457 		 * Application
1458 		 */
1459 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1460 		if (sc == NULL)
1461 			printf("There is no Controller number %d\n",
1462 			    user_ioc->host_no);
1463 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1464 			mrsas_dprint(sc, MRSAS_FAULT,
1465 			    "Invalid Controller number %d\n", user_ioc->host_no);
1466 	}
1467 
1468 	return sc;
1469 }
1470 
1471 /*
1472  * mrsas_ioctl:	IOCtl commands entry point.
1473  *
1474  * This function is the entry point for IOCtls from the OS.  It calls the
1475  * appropriate function for processing depending on the command received.
1476  */
1477 static int
1478 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1479     struct thread *td)
1480 {
1481 	struct mrsas_softc *sc;
1482 	int ret = 0, i = 0;
1483 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1484 
1485 	sc = mrsas_get_softc_instance(dev, cmd, arg);
1486 	if (!sc)
1487 		return ENOENT;
1488 
1489 	if (sc->remove_in_progress ||
1490 		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1491 		mrsas_dprint(sc, MRSAS_INFO,
1492 		    "Either driver remove or shutdown called or "
1493 			"HW is in unrecoverable critical error state.\n");
1494 		return ENOENT;
1495 	}
1496 	mtx_lock_spin(&sc->ioctl_lock);
1497 	if (!sc->reset_in_progress) {
1498 		mtx_unlock_spin(&sc->ioctl_lock);
1499 		goto do_ioctl;
1500 	}
1501 	mtx_unlock_spin(&sc->ioctl_lock);
1502 	while (sc->reset_in_progress) {
1503 		i++;
1504 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1505 			mrsas_dprint(sc, MRSAS_INFO,
1506 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1507 		}
1508 		pause("mr_ioctl", hz);
1509 	}
1510 
1511 do_ioctl:
1512 	switch (cmd) {
1513 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1514 #ifdef COMPAT_FREEBSD32
1515 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1516 #endif
1517 		/*
1518 		 * Decrement the Ioctl counting Semaphore before getting an
1519 		 * mfi command
1520 		 */
1521 		sema_wait(&sc->ioctl_count_sema);
1522 
1523 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1524 
1525 		/* Increment the Ioctl counting semaphore value */
1526 		sema_post(&sc->ioctl_count_sema);
1527 
1528 		break;
1529 	case MRSAS_IOC_SCAN_BUS:
1530 		ret = mrsas_bus_scan(sc);
1531 		break;
1532 
1533 	case MRSAS_IOC_GET_PCI_INFO:
1534 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1535 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1536 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1537 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1538 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1539 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1540 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1541 		    "pci device no: %d, pci function no: %d,"
1542 		    "pci domain ID: %d\n",
1543 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1544 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1545 		ret = 0;
1546 		break;
1547 
1548 	default:
1549 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1550 		ret = ENOENT;
1551 	}
1552 
1553 	return (ret);
1554 }
1555 
1556 /*
1557  * mrsas_poll:	poll entry point for mrsas driver fd
1558  *
1559  * This function is the entry point for poll from the OS.  It waits for some AEN
1560  * events to be triggered from the controller and notifies back.
1561  */
1562 static int
1563 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1564 {
1565 	struct mrsas_softc *sc;
1566 	int revents = 0;
1567 
1568 	sc = dev->si_drv1;
1569 
1570 	if (poll_events & (POLLIN | POLLRDNORM)) {
1571 		if (sc->mrsas_aen_triggered) {
1572 			revents |= poll_events & (POLLIN | POLLRDNORM);
1573 		}
1574 	}
1575 	if (revents == 0) {
1576 		if (poll_events & (POLLIN | POLLRDNORM)) {
1577 			mtx_lock(&sc->aen_lock);
1578 			sc->mrsas_poll_waiting = 1;
1579 			selrecord(td, &sc->mrsas_select);
1580 			mtx_unlock(&sc->aen_lock);
1581 		}
1582 	}
1583 	return revents;
1584 }
1585 
1586 /*
1587  * mrsas_setup_irq:	Set up interrupt
1588  * input:			Adapter instance soft state
1589  *
1590  * This function sets up interrupts as a bus resource, with flags indicating
1591  * resource permitting contemporaneous sharing and for resource to activate
1592  * atomically.
1593  */
1594 static int
1595 mrsas_setup_irq(struct mrsas_softc *sc)
1596 {
1597 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1598 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1599 
1600 	else {
1601 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1602 		sc->irq_context[0].sc = sc;
1603 		sc->irq_context[0].MSIxIndex = 0;
1604 		sc->irq_id[0] = 0;
1605 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1606 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1607 		if (sc->mrsas_irq[0] == NULL) {
1608 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1609 			    "interrupt\n");
1610 			return (FAIL);
1611 		}
1612 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1613 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1614 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1615 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1616 			    "interrupt\n");
1617 			return (FAIL);
1618 		}
1619 	}
1620 	return (0);
1621 }
1622 
1623 /*
1624  * mrsas_isr:	ISR entry point
1625  * input:		argument pointer
1626  *
1627  * This function is the interrupt service routine entry point.  There are two
1628  * types of interrupts, state change interrupt and response interrupt.  If an
1629  * interrupt is not ours, we just return.
1630  */
1631 void
1632 mrsas_isr(void *arg)
1633 {
1634 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1635 	struct mrsas_softc *sc = irq_context->sc;
1636 	int status = 0;
1637 
1638 	if (sc->mask_interrupts)
1639 		return;
1640 
1641 	if (!sc->msix_vectors) {
1642 		status = mrsas_clear_intr(sc);
1643 		if (!status)
1644 			return;
1645 	}
1646 	/* If we are resetting, bail */
1647 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1648 		printf(" Entered into ISR when OCR is going active. \n");
1649 		mrsas_clear_intr(sc);
1650 		return;
1651 	}
1652 	/* Process for reply request and clear response interrupt */
1653 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1654 		mrsas_clear_intr(sc);
1655 
1656 	return;
1657 }
1658 
1659 /*
1660  * mrsas_complete_cmd:	Process reply request
1661  * input:				Adapter instance soft state
1662  *
1663  * This function is called from mrsas_isr() to process reply request and clear
1664  * response interrupt. Processing of the reply request entails walking
1665  * through the reply descriptor array for the command request  pended from
1666  * Firmware.  We look at the Function field to determine the command type and
1667  * perform the appropriate action.  Before we return, we clear the response
1668  * interrupt.
1669  */
1670 int
1671 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1672 {
1673 	Mpi2ReplyDescriptorsUnion_t *desc;
1674 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1675 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1676 	struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1677 	struct mrsas_mfi_cmd *cmd_mfi;
1678 	u_int8_t reply_descript_type, *sense;
1679 	u_int16_t smid, num_completed;
1680 	u_int8_t status, extStatus;
1681 	union desc_value desc_val;
1682 	PLD_LOAD_BALANCE_INFO lbinfo;
1683 	u_int32_t device_id, data_length;
1684 	int threshold_reply_count = 0;
1685 #if TM_DEBUG
1686 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1687 	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1688 #endif
1689 
1690 	/* If we have a hardware error, not need to continue */
1691 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1692 		return (DONE);
1693 
1694 	desc = sc->reply_desc_mem;
1695 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1696 	    + sc->last_reply_idx[MSIxIndex];
1697 
1698 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1699 
1700 	desc_val.word = desc->Words;
1701 	num_completed = 0;
1702 
1703 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1704 
1705 	/* Find our reply descriptor for the command and process */
1706 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1707 		smid = reply_desc->SMID;
1708 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1709 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1710 
1711 		status = scsi_io_req->RaidContext.raid_context.status;
1712 		extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1713 		sense = cmd_mpt->sense;
1714 		data_length = scsi_io_req->DataLength;
1715 
1716 		switch (scsi_io_req->Function) {
1717 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1718 #if TM_DEBUG
1719 			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1720 			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1721 			    &mr_tm_req->TmRequest;
1722 			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1723 			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1724 #endif
1725             wakeup_one((void *)&sc->ocr_chan);
1726             break;
1727 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1728 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1729 			lbinfo = &sc->load_balance_info[device_id];
1730 			/* R1 load balancing for READ */
1731 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1732 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1733 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1734 			}
1735 			/* Fall thru and complete IO */
1736 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1737 			if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1738 				mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1739 				    extStatus, data_length, sense);
1740 				mrsas_cmd_done(sc, cmd_mpt);
1741 				mrsas_atomic_dec(&sc->fw_outstanding);
1742 			} else {
1743 				/*
1744 				 * If the peer  Raid  1/10 fast path failed,
1745 				 * mark IO as failed to the scsi layer.
1746 				 * Overwrite the current status by the failed status
1747 				 * and make sure that if any command fails,
1748 				 * driver returns fail status to CAM.
1749 				 */
1750 				cmd_mpt->cmd_completed = 1;
1751 				r1_cmd = cmd_mpt->peer_cmd;
1752 				if (r1_cmd->cmd_completed) {
1753 					if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1754 						status = r1_cmd->io_request->RaidContext.raid_context.status;
1755 						extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1756 						data_length = r1_cmd->io_request->DataLength;
1757 						sense = r1_cmd->sense;
1758 					}
1759 					r1_cmd->ccb_ptr = NULL;
1760 					if (r1_cmd->callout_owner) {
1761 						callout_stop(&r1_cmd->cm_callout);
1762 						r1_cmd->callout_owner  = false;
1763 					}
1764 					mrsas_release_mpt_cmd(r1_cmd);
1765 					mrsas_atomic_dec(&sc->fw_outstanding);
1766 					mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1767 					    extStatus, data_length, sense);
1768 					mrsas_cmd_done(sc, cmd_mpt);
1769 					mrsas_atomic_dec(&sc->fw_outstanding);
1770 				}
1771 			}
1772 			break;
1773 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1774 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1775 			/*
1776 			 * Make sure NOT TO release the mfi command from the called
1777 			 * function's context if it is fired with issue_polled call.
1778 			 * And also make sure that the issue_polled call should only be
1779 			 * used if INTERRUPT IS DISABLED.
1780 			 */
1781 			if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1782 				mrsas_release_mfi_cmd(cmd_mfi);
1783 			else
1784 				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1785 			break;
1786 		}
1787 
1788 		sc->last_reply_idx[MSIxIndex]++;
1789 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1790 			sc->last_reply_idx[MSIxIndex] = 0;
1791 
1792 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1793 							 * 0xFFFFFFFFs */
1794 		num_completed++;
1795 		threshold_reply_count++;
1796 
1797 		/* Get the next reply descriptor */
1798 		if (!sc->last_reply_idx[MSIxIndex]) {
1799 			desc = sc->reply_desc_mem;
1800 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1801 		} else
1802 			desc++;
1803 
1804 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1805 		desc_val.word = desc->Words;
1806 
1807 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1808 
1809 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1810 			break;
1811 
1812 		/*
1813 		 * Write to reply post index after completing threshold reply
1814 		 * count and still there are more replies in reply queue
1815 		 * pending to be completed.
1816 		 */
1817 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1818 			if (sc->msix_enable) {
1819 				if (sc->msix_combined)
1820 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1821 					    ((MSIxIndex & 0x7) << 24) |
1822 					    sc->last_reply_idx[MSIxIndex]);
1823 				else
1824 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1825 					    sc->last_reply_idx[MSIxIndex]);
1826 			} else
1827 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1828 				    reply_post_host_index), sc->last_reply_idx[0]);
1829 
1830 			threshold_reply_count = 0;
1831 		}
1832 	}
1833 
1834 	/* No match, just return */
1835 	if (num_completed == 0)
1836 		return (DONE);
1837 
1838 	/* Clear response interrupt */
1839 	if (sc->msix_enable) {
1840 		if (sc->msix_combined) {
1841 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1842 			    ((MSIxIndex & 0x7) << 24) |
1843 			    sc->last_reply_idx[MSIxIndex]);
1844 		} else
1845 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1846 			    sc->last_reply_idx[MSIxIndex]);
1847 	} else
1848 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1849 		    reply_post_host_index), sc->last_reply_idx[0]);
1850 
1851 	return (0);
1852 }
1853 
1854 /*
1855  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1856  * input:						Adapter instance soft state
1857  *
1858  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1859  * It checks the command status and maps the appropriate CAM status for the
1860  * CCB.
1861  */
1862 void
1863 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1864     u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1865 {
1866 	struct mrsas_softc *sc = cmd->sc;
1867 	u_int8_t *sense_data;
1868 
1869 	switch (status) {
1870 	case MFI_STAT_OK:
1871 		ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1872 		break;
1873 	case MFI_STAT_SCSI_IO_FAILED:
1874 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1875 		ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1876 		sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1877 		if (sense_data) {
1878 			/* For now just copy 18 bytes back */
1879 			memcpy(sense_data, sense, 18);
1880 			ccb_ptr->csio.sense_len = 18;
1881 			ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1882 		}
1883 		break;
1884 	case MFI_STAT_LD_OFFLINE:
1885 	case MFI_STAT_DEVICE_NOT_FOUND:
1886 		if (ccb_ptr->ccb_h.target_lun)
1887 			ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1888 		else
1889 			ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1890 		break;
1891 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1892 		ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1893 		break;
1894 	default:
1895 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1896 		ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1897 		ccb_ptr->csio.scsi_status = status;
1898 	}
1899 	return;
1900 }
1901 
1902 /*
1903  * mrsas_alloc_mem:	Allocate DMAable memory
1904  * input:			Adapter instance soft state
1905  *
1906  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1907  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1908  * Kernel virtual address. Callback argument is physical memory address.
1909  */
1910 static int
1911 mrsas_alloc_mem(struct mrsas_softc *sc)
1912 {
1913 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1914 		evt_detail_size, count, pd_info_size;
1915 
1916 	/*
1917 	 * Allocate parent DMA tag
1918 	 */
1919 	if (bus_dma_tag_create(NULL,	/* parent */
1920 	    1,				/* alignment */
1921 	    0,				/* boundary */
1922 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1923 	    BUS_SPACE_MAXADDR,		/* highaddr */
1924 	    NULL, NULL,			/* filter, filterarg */
1925 	    maxphys,			/* maxsize */
1926 	    sc->max_num_sge,		/* nsegments */
1927 	    maxphys,			/* maxsegsize */
1928 	    0,				/* flags */
1929 	    NULL, NULL,			/* lockfunc, lockarg */
1930 	    &sc->mrsas_parent_tag	/* tag */
1931 	    )) {
1932 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1933 		return (ENOMEM);
1934 	}
1935 	/*
1936 	 * Allocate for version buffer
1937 	 */
1938 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1939 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1940 	    1, 0,
1941 	    BUS_SPACE_MAXADDR_32BIT,
1942 	    BUS_SPACE_MAXADDR,
1943 	    NULL, NULL,
1944 	    verbuf_size,
1945 	    1,
1946 	    verbuf_size,
1947 	    BUS_DMA_ALLOCNOW,
1948 	    NULL, NULL,
1949 	    &sc->verbuf_tag)) {
1950 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1951 		return (ENOMEM);
1952 	}
1953 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1954 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1955 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1956 		return (ENOMEM);
1957 	}
1958 	bzero(sc->verbuf_mem, verbuf_size);
1959 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1960 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1961 	    BUS_DMA_NOWAIT)) {
1962 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1963 		return (ENOMEM);
1964 	}
1965 	/*
1966 	 * Allocate IO Request Frames
1967 	 */
1968 	io_req_size = sc->io_frames_alloc_sz;
1969 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1970 	    16, 0,
1971 	    BUS_SPACE_MAXADDR_32BIT,
1972 	    BUS_SPACE_MAXADDR,
1973 	    NULL, NULL,
1974 	    io_req_size,
1975 	    1,
1976 	    io_req_size,
1977 	    BUS_DMA_ALLOCNOW,
1978 	    NULL, NULL,
1979 	    &sc->io_request_tag)) {
1980 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1981 		return (ENOMEM);
1982 	}
1983 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1984 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1985 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1986 		return (ENOMEM);
1987 	}
1988 	bzero(sc->io_request_mem, io_req_size);
1989 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1990 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1991 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1992 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1993 		return (ENOMEM);
1994 	}
1995 	/*
1996 	 * Allocate Chain Frames
1997 	 */
1998 	chain_frame_size = sc->chain_frames_alloc_sz;
1999 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2000 	    4, 0,
2001 	    BUS_SPACE_MAXADDR_32BIT,
2002 	    BUS_SPACE_MAXADDR,
2003 	    NULL, NULL,
2004 	    chain_frame_size,
2005 	    1,
2006 	    chain_frame_size,
2007 	    BUS_DMA_ALLOCNOW,
2008 	    NULL, NULL,
2009 	    &sc->chain_frame_tag)) {
2010 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
2011 		return (ENOMEM);
2012 	}
2013 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
2014 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
2015 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
2016 		return (ENOMEM);
2017 	}
2018 	bzero(sc->chain_frame_mem, chain_frame_size);
2019 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
2020 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
2021 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
2022 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
2023 		return (ENOMEM);
2024 	}
2025 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2026 	/*
2027 	 * Allocate Reply Descriptor Array
2028 	 */
2029 	reply_desc_size = sc->reply_alloc_sz * count;
2030 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2031 	    16, 0,
2032 	    BUS_SPACE_MAXADDR_32BIT,
2033 	    BUS_SPACE_MAXADDR,
2034 	    NULL, NULL,
2035 	    reply_desc_size,
2036 	    1,
2037 	    reply_desc_size,
2038 	    BUS_DMA_ALLOCNOW,
2039 	    NULL, NULL,
2040 	    &sc->reply_desc_tag)) {
2041 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2042 		return (ENOMEM);
2043 	}
2044 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2045 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2046 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2047 		return (ENOMEM);
2048 	}
2049 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2050 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2051 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2052 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2053 		return (ENOMEM);
2054 	}
2055 	/*
2056 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
2057 	 */
2058 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2059 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2060 	    64, 0,
2061 	    BUS_SPACE_MAXADDR_32BIT,
2062 	    BUS_SPACE_MAXADDR,
2063 	    NULL, NULL,
2064 	    sense_size,
2065 	    1,
2066 	    sense_size,
2067 	    BUS_DMA_ALLOCNOW,
2068 	    NULL, NULL,
2069 	    &sc->sense_tag)) {
2070 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2071 		return (ENOMEM);
2072 	}
2073 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2074 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2075 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2076 		return (ENOMEM);
2077 	}
2078 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2079 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2080 	    BUS_DMA_NOWAIT)) {
2081 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2082 		return (ENOMEM);
2083 	}
2084 
2085 	/*
2086 	 * Allocate for Event detail structure
2087 	 */
2088 	evt_detail_size = sizeof(struct mrsas_evt_detail);
2089 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2090 	    1, 0,
2091 	    BUS_SPACE_MAXADDR_32BIT,
2092 	    BUS_SPACE_MAXADDR,
2093 	    NULL, NULL,
2094 	    evt_detail_size,
2095 	    1,
2096 	    evt_detail_size,
2097 	    BUS_DMA_ALLOCNOW,
2098 	    NULL, NULL,
2099 	    &sc->evt_detail_tag)) {
2100 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2101 		return (ENOMEM);
2102 	}
2103 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2104 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2105 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2106 		return (ENOMEM);
2107 	}
2108 	bzero(sc->evt_detail_mem, evt_detail_size);
2109 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2110 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2111 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2112 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2113 		return (ENOMEM);
2114 	}
2115 
2116 	/*
2117 	 * Allocate for PD INFO structure
2118 	 */
2119 	pd_info_size = sizeof(struct mrsas_pd_info);
2120 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2121 	    1, 0,
2122 	    BUS_SPACE_MAXADDR_32BIT,
2123 	    BUS_SPACE_MAXADDR,
2124 	    NULL, NULL,
2125 	    pd_info_size,
2126 	    1,
2127 	    pd_info_size,
2128 	    BUS_DMA_ALLOCNOW,
2129 	    NULL, NULL,
2130 	    &sc->pd_info_tag)) {
2131 		device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2132 		return (ENOMEM);
2133 	}
2134 	if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2135 	    BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2136 		device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2137 		return (ENOMEM);
2138 	}
2139 	bzero(sc->pd_info_mem, pd_info_size);
2140 	if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2141 	    sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2142 	    &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2143 		device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2144 		return (ENOMEM);
2145 	}
2146 
2147 	/*
2148 	 * Create a dma tag for data buffers; size will be the maximum
2149 	 * possible I/O size (280kB).
2150 	 */
2151 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2152 	    1,
2153 	    0,
2154 	    BUS_SPACE_MAXADDR,
2155 	    BUS_SPACE_MAXADDR,
2156 	    NULL, NULL,
2157 	    maxphys,
2158 	    sc->max_num_sge,		/* nsegments */
2159 	    maxphys,
2160 	    BUS_DMA_ALLOCNOW,
2161 	    busdma_lock_mutex,
2162 	    &sc->io_lock,
2163 	    &sc->data_tag)) {
2164 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2165 		return (ENOMEM);
2166 	}
2167 	return (0);
2168 }
2169 
2170 /*
2171  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
2172  * input:			callback argument, machine dependent type
2173  * 					that describes DMA segments, number of segments, error code
2174  *
2175  * This function is for the driver to receive mapping information resultant of
2176  * the bus_dmamap_load(). The information is actually not being used, but the
2177  * address is saved anyway.
2178  */
2179 void
2180 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2181 {
2182 	bus_addr_t *addr;
2183 
2184 	addr = arg;
2185 	*addr = segs[0].ds_addr;
2186 }
2187 
2188 /*
2189  * mrsas_setup_raidmap:	Set up RAID map.
2190  * input:				Adapter instance soft state
2191  *
2192  * Allocate DMA memory for the RAID maps and perform setup.
2193  */
2194 static int
2195 mrsas_setup_raidmap(struct mrsas_softc *sc)
2196 {
2197 	int i;
2198 
2199 	for (i = 0; i < 2; i++) {
2200 		sc->ld_drv_map[i] =
2201 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2202 		/* Do Error handling */
2203 		if (!sc->ld_drv_map[i]) {
2204 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2205 
2206 			if (i == 1)
2207 				free(sc->ld_drv_map[0], M_MRSAS);
2208 			/* ABORT driver initialization */
2209 			goto ABORT;
2210 		}
2211 	}
2212 
2213 	for (int i = 0; i < 2; i++) {
2214 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2215 		    4, 0,
2216 		    BUS_SPACE_MAXADDR_32BIT,
2217 		    BUS_SPACE_MAXADDR,
2218 		    NULL, NULL,
2219 		    sc->max_map_sz,
2220 		    1,
2221 		    sc->max_map_sz,
2222 		    BUS_DMA_ALLOCNOW,
2223 		    NULL, NULL,
2224 		    &sc->raidmap_tag[i])) {
2225 			device_printf(sc->mrsas_dev,
2226 			    "Cannot allocate raid map tag.\n");
2227 			return (ENOMEM);
2228 		}
2229 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2230 		    (void **)&sc->raidmap_mem[i],
2231 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2232 			device_printf(sc->mrsas_dev,
2233 			    "Cannot allocate raidmap memory.\n");
2234 			return (ENOMEM);
2235 		}
2236 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2237 
2238 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2239 		    sc->raidmap_mem[i], sc->max_map_sz,
2240 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2241 		    BUS_DMA_NOWAIT)) {
2242 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2243 			return (ENOMEM);
2244 		}
2245 		if (!sc->raidmap_mem[i]) {
2246 			device_printf(sc->mrsas_dev,
2247 			    "Cannot allocate memory for raid map.\n");
2248 			return (ENOMEM);
2249 		}
2250 	}
2251 
2252 	if (!mrsas_get_map_info(sc))
2253 		mrsas_sync_map_info(sc);
2254 
2255 	return (0);
2256 
2257 ABORT:
2258 	return (1);
2259 }
2260 
2261 /**
2262  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2263  * @sc:				Adapter soft state
2264  *
2265  * Return 0 on success.
2266  */
2267 void
2268 megasas_setup_jbod_map(struct mrsas_softc *sc)
2269 {
2270 	int i;
2271 	uint32_t pd_seq_map_sz;
2272 
2273 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2274 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2275 
2276 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2277 		sc->use_seqnum_jbod_fp = 0;
2278 		return;
2279 	}
2280 	if (sc->jbodmap_mem[0])
2281 		goto skip_alloc;
2282 
2283 	for (i = 0; i < 2; i++) {
2284 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2285 		    4, 0,
2286 		    BUS_SPACE_MAXADDR_32BIT,
2287 		    BUS_SPACE_MAXADDR,
2288 		    NULL, NULL,
2289 		    pd_seq_map_sz,
2290 		    1,
2291 		    pd_seq_map_sz,
2292 		    BUS_DMA_ALLOCNOW,
2293 		    NULL, NULL,
2294 		    &sc->jbodmap_tag[i])) {
2295 			device_printf(sc->mrsas_dev,
2296 			    "Cannot allocate jbod map tag.\n");
2297 			return;
2298 		}
2299 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2300 		    (void **)&sc->jbodmap_mem[i],
2301 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2302 			device_printf(sc->mrsas_dev,
2303 			    "Cannot allocate jbod map memory.\n");
2304 			return;
2305 		}
2306 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2307 
2308 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2309 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2310 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2311 		    BUS_DMA_NOWAIT)) {
2312 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2313 			return;
2314 		}
2315 		if (!sc->jbodmap_mem[i]) {
2316 			device_printf(sc->mrsas_dev,
2317 			    "Cannot allocate memory for jbod map.\n");
2318 			sc->use_seqnum_jbod_fp = 0;
2319 			return;
2320 		}
2321 	}
2322 
2323 skip_alloc:
2324 	if (!megasas_sync_pd_seq_num(sc, false) &&
2325 	    !megasas_sync_pd_seq_num(sc, true))
2326 		sc->use_seqnum_jbod_fp = 1;
2327 	else
2328 		sc->use_seqnum_jbod_fp = 0;
2329 
2330 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2331 }
2332 
2333 /*
2334  * mrsas_init_fw:	Initialize Firmware
2335  * input:			Adapter soft state
2336  *
2337  * Calls transition_to_ready() to make sure Firmware is in operational state and
2338  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2339  * issues internal commands to get the controller info after the IOC_INIT
2340  * command response is received by Firmware.  Note:  code relating to
2341  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2342  * is left here as placeholder.
2343  */
2344 static int
2345 mrsas_init_fw(struct mrsas_softc *sc)
2346 {
2347 
2348 	int ret, loop, ocr = 0;
2349 	u_int32_t max_sectors_1;
2350 	u_int32_t max_sectors_2;
2351 	u_int32_t tmp_sectors;
2352 	u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2353 	int msix_enable = 0;
2354 	int fw_msix_count = 0;
2355 	int i, j;
2356 
2357 	/* Make sure Firmware is ready */
2358 	ret = mrsas_transition_to_ready(sc, ocr);
2359 	if (ret != SUCCESS) {
2360 		return (ret);
2361 	}
2362 	if (sc->is_ventura || sc->is_aero) {
2363 		scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2364 #if VD_EXT_DEBUG
2365 		device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2366 #endif
2367 		sc->maxRaidMapSize = ((scratch_pad_3 >>
2368 		    MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2369 		    MR_MAX_RAID_MAP_SIZE_MASK);
2370 	}
2371 	/* MSI-x index 0- reply post host index register */
2372 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2373 	/* Check if MSI-X is supported while in ready state */
2374 	msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2375 
2376 	if (msix_enable) {
2377 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2378 		    outbound_scratch_pad_2));
2379 
2380 		/* Check max MSI-X vectors */
2381 		if (sc->device_id == MRSAS_TBOLT) {
2382 			sc->msix_vectors = (scratch_pad_2
2383 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2384 			fw_msix_count = sc->msix_vectors;
2385 		} else {
2386 			/* Invader/Fury supports 96 MSI-X vectors */
2387 			sc->msix_vectors = ((scratch_pad_2
2388 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2389 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2390 			fw_msix_count = sc->msix_vectors;
2391 
2392 			if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2393 				((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2394 				sc->msix_combined = true;
2395 			/*
2396 			 * Save 1-15 reply post index
2397 			 * address to local memory Index 0
2398 			 * is already saved from reg offset
2399 			 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2400 			 */
2401 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2402 			    loop++) {
2403 				sc->msix_reg_offset[loop] =
2404 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2405 				    (loop * 0x10);
2406 			}
2407 		}
2408 
2409 		/* Don't bother allocating more MSI-X vectors than cpus */
2410 		sc->msix_vectors = min(sc->msix_vectors,
2411 		    mp_ncpus);
2412 
2413 		/* Allocate MSI-x vectors */
2414 		if (mrsas_allocate_msix(sc) == SUCCESS)
2415 			sc->msix_enable = 1;
2416 		else
2417 			sc->msix_enable = 0;
2418 
2419 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2420 		    "Online CPU %d Current MSIX <%d>\n",
2421 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2422 	}
2423 	/*
2424      * MSI-X host index 0 is common for all adapter.
2425      * It is used for all MPT based Adapters.
2426 	 */
2427 	if (sc->msix_combined) {
2428 		sc->msix_reg_offset[0] =
2429 		    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2430 	}
2431 	if (mrsas_init_adapter(sc) != SUCCESS) {
2432 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2433 		return (1);
2434 	}
2435 
2436 	if (sc->is_ventura || sc->is_aero) {
2437 		scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2438 		    outbound_scratch_pad_4));
2439 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2440 			sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2441 
2442 		device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2443 	}
2444 
2445 	/* Allocate internal commands for pass-thru */
2446 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2447 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2448 		return (1);
2449 	}
2450 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2451 	if (!sc->ctrl_info) {
2452 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2453 		return (1);
2454 	}
2455 	/*
2456 	 * Get the controller info from FW, so that the MAX VD support
2457 	 * availability can be decided.
2458 	 */
2459 	if (mrsas_get_ctrl_info(sc)) {
2460 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2461 		return (1);
2462 	}
2463 	sc->secure_jbod_support =
2464 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2465 
2466 	if (sc->secure_jbod_support)
2467 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2468 
2469 	if (sc->use_seqnum_jbod_fp)
2470 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2471 
2472 	if (sc->support_morethan256jbod)
2473 		device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2474 
2475 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2476 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2477 		    "There seems to be some problem in the controller\n"
2478 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2479 	}
2480 	megasas_setup_jbod_map(sc);
2481 
2482 	memset(sc->target_list, 0,
2483 		MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2484 	for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2485 		sc->target_list[i].target_id = 0xffff;
2486 
2487 	/* For pass-thru, get PD/LD list and controller info */
2488 	memset(sc->pd_list, 0,
2489 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2490 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2491 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2492 		return (1);
2493 	}
2494 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2495 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2496 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2497 		return (1);
2498 	}
2499 
2500 	if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2501 		sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2502 						MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2503 		if (!sc->streamDetectByLD) {
2504 			device_printf(sc->mrsas_dev,
2505 				"unable to allocate stream detection for pool of LDs\n");
2506 			return (1);
2507 		}
2508 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2509 			sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2510 			if (!sc->streamDetectByLD[i]) {
2511 				device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2512 				for (j = 0; j < i; ++j)
2513 					free(sc->streamDetectByLD[j], M_MRSAS);
2514 				free(sc->streamDetectByLD, M_MRSAS);
2515 				sc->streamDetectByLD = NULL;
2516 				return (1);
2517 			}
2518 			memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2519 			sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2520 		}
2521 	}
2522 
2523 	/*
2524 	 * Compute the max allowed sectors per IO: The controller info has
2525 	 * two limits on max sectors. Driver should use the minimum of these
2526 	 * two.
2527 	 *
2528 	 * 1 << stripe_sz_ops.min = max sectors per strip
2529 	 *
2530 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2531 	 * calculate max_sectors_1. So the number ended up as zero always.
2532 	 */
2533 	tmp_sectors = 0;
2534 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2535 	    sc->ctrl_info->max_strips_per_io;
2536 	max_sectors_2 = sc->ctrl_info->max_request_size;
2537 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2538 	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2539 
2540 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2541 		sc->max_sectors_per_req = tmp_sectors;
2542 
2543 	sc->disableOnlineCtrlReset =
2544 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2545 	sc->UnevenSpanSupport =
2546 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2547 	if (sc->UnevenSpanSupport) {
2548 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2549 		    sc->UnevenSpanSupport);
2550 
2551 		if (MR_ValidateMapInfo(sc))
2552 			sc->fast_path_io = 1;
2553 		else
2554 			sc->fast_path_io = 0;
2555 	}
2556 
2557 	device_printf(sc->mrsas_dev, "max_fw_cmds: %u  max_scsi_cmds: %u\n",
2558 		sc->max_fw_cmds, sc->max_scsi_cmds);
2559 	return (0);
2560 }
2561 
2562 /*
2563  * mrsas_init_adapter:	Initializes the adapter/controller
2564  * input:				Adapter soft state
2565  *
2566  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2567  * ROC/controller.  The FW register is read to determined the number of
2568  * commands that is supported.  All memory allocations for IO is based on
2569  * max_cmd.  Appropriate calculations are performed in this function.
2570  */
2571 int
2572 mrsas_init_adapter(struct mrsas_softc *sc)
2573 {
2574 	uint32_t status;
2575 	u_int32_t scratch_pad_2;
2576 	int ret;
2577 	int i = 0;
2578 
2579 	/* Read FW status register */
2580 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2581 
2582 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2583 
2584 	/* Decrement the max supported by 1, to correlate with FW */
2585 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2586 	sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2587 
2588 	/* Determine allocation size of command frames */
2589 	sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2590 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2591 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2592 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2593 	    (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2594 	scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2595 	    outbound_scratch_pad_2));
2596 	/*
2597 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2598 	 * Firmware support extended IO chain frame which is 4 time more
2599 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2600 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2601 	 */
2602 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2603 		sc->max_chain_frame_sz =
2604 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2605 		    * MEGASAS_1MB_IO;
2606 	else
2607 		sc->max_chain_frame_sz =
2608 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2609 		    * MEGASAS_256K_IO;
2610 
2611 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2612 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2613 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2614 
2615 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2616 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2617 
2618 	mrsas_dprint(sc, MRSAS_INFO,
2619 	    "max sge: 0x%x, max chain frame size: 0x%x, "
2620 	    "max fw cmd: 0x%x\n", sc->max_num_sge,
2621 	    sc->max_chain_frame_sz, sc->max_fw_cmds);
2622 
2623 	/* Used for pass thru MFI frame (DCMD) */
2624 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2625 
2626 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2627 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2628 
2629 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2630 
2631 	for (i = 0; i < count; i++)
2632 		sc->last_reply_idx[i] = 0;
2633 
2634 	ret = mrsas_alloc_mem(sc);
2635 	if (ret != SUCCESS)
2636 		return (ret);
2637 
2638 	ret = mrsas_alloc_mpt_cmds(sc);
2639 	if (ret != SUCCESS)
2640 		return (ret);
2641 
2642 	ret = mrsas_ioc_init(sc);
2643 	if (ret != SUCCESS)
2644 		return (ret);
2645 
2646 	return (0);
2647 }
2648 
2649 /*
2650  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2651  * input:				Adapter soft state
2652  *
2653  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2654  */
2655 int
2656 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2657 {
2658 	int ioc_init_size;
2659 
2660 	/* Allocate IOC INIT command */
2661 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2662 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2663 	    1, 0,
2664 	    BUS_SPACE_MAXADDR_32BIT,
2665 	    BUS_SPACE_MAXADDR,
2666 	    NULL, NULL,
2667 	    ioc_init_size,
2668 	    1,
2669 	    ioc_init_size,
2670 	    BUS_DMA_ALLOCNOW,
2671 	    NULL, NULL,
2672 	    &sc->ioc_init_tag)) {
2673 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2674 		return (ENOMEM);
2675 	}
2676 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2677 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2678 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2679 		return (ENOMEM);
2680 	}
2681 	bzero(sc->ioc_init_mem, ioc_init_size);
2682 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2683 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2684 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2685 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2686 		return (ENOMEM);
2687 	}
2688 	return (0);
2689 }
2690 
2691 /*
2692  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2693  * input:				Adapter soft state
2694  *
2695  * Deallocates memory of the IOC Init cmd.
2696  */
2697 void
2698 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2699 {
2700 	if (sc->ioc_init_phys_mem)
2701 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2702 	if (sc->ioc_init_mem != NULL)
2703 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2704 	if (sc->ioc_init_tag != NULL)
2705 		bus_dma_tag_destroy(sc->ioc_init_tag);
2706 }
2707 
2708 /*
2709  * mrsas_ioc_init:	Sends IOC Init command to FW
2710  * input:			Adapter soft state
2711  *
2712  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2713  */
2714 int
2715 mrsas_ioc_init(struct mrsas_softc *sc)
2716 {
2717 	struct mrsas_init_frame *init_frame;
2718 	pMpi2IOCInitRequest_t IOCInitMsg;
2719 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2720 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2721 	bus_addr_t phys_addr;
2722 	int i, retcode = 0;
2723 	u_int32_t scratch_pad_2;
2724 
2725 	/* Allocate memory for the IOC INIT command */
2726 	if (mrsas_alloc_ioc_cmd(sc)) {
2727 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2728 		return (1);
2729 	}
2730 
2731 	if (!sc->block_sync_cache) {
2732 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2733 		    outbound_scratch_pad_2));
2734 		sc->fw_sync_cache_support = (scratch_pad_2 &
2735 		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2736 	}
2737 
2738 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2739 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2740 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2741 	IOCInitMsg->MsgVersion = MPI2_VERSION;
2742 	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2743 	IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2744 	IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2745 	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2746 	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2747 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2748 	IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2749 
2750 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2751 	init_frame->cmd = MFI_CMD_INIT;
2752 	init_frame->cmd_status = 0xFF;
2753 	init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2754 
2755 	/* driver support Extended MSIX */
2756 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2757 		init_frame->driver_operations.
2758 		    mfi_capabilities.support_additional_msix = 1;
2759 	}
2760 	if (sc->verbuf_mem) {
2761 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2762 		    MRSAS_VERSION);
2763 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2764 		init_frame->driver_ver_hi = 0;
2765 	}
2766 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2767 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2768 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2769 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2770 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2771 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2772 	init_frame->queue_info_new_phys_addr_lo = phys_addr;
2773 	init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2774 
2775 	req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2776 	req_desc.MFAIo.RequestFlags =
2777 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2778 
2779 	mrsas_disable_intr(sc);
2780 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2781 	mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2782 
2783 	/*
2784 	 * Poll response timer to wait for Firmware response.  While this
2785 	 * timer with the DELAY call could block CPU, the time interval for
2786 	 * this is only 1 millisecond.
2787 	 */
2788 	if (init_frame->cmd_status == 0xFF) {
2789 		for (i = 0; i < (max_wait * 1000); i++) {
2790 			if (init_frame->cmd_status == 0xFF)
2791 				DELAY(1000);
2792 			else
2793 				break;
2794 		}
2795 	}
2796 	if (init_frame->cmd_status == 0)
2797 		mrsas_dprint(sc, MRSAS_OCR,
2798 		    "IOC INIT response received from FW.\n");
2799 	else {
2800 		if (init_frame->cmd_status == 0xFF)
2801 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2802 		else
2803 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2804 		retcode = 1;
2805 	}
2806 
2807 	if (sc->is_aero) {
2808 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2809 		    outbound_scratch_pad_2));
2810 		sc->atomic_desc_support = (scratch_pad_2 &
2811 			MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2812 		device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2813 			sc->atomic_desc_support ? "Yes" : "No");
2814 	}
2815 
2816 	mrsas_free_ioc_cmd(sc);
2817 	return (retcode);
2818 }
2819 
2820 /*
2821  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2822  * input:					Adapter instance soft state
2823  *
2824  * This function allocates the internal commands for IOs. Each command that is
2825  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2826  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2827  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2828  * max_fw_cmds.
2829  */
2830 int
2831 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2832 {
2833 	int i, j;
2834 	u_int32_t max_fw_cmds, count;
2835 	struct mrsas_mpt_cmd *cmd;
2836 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2837 	u_int32_t offset, chain_offset, sense_offset;
2838 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2839 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2840 
2841 	max_fw_cmds = sc->max_fw_cmds;
2842 
2843 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2844 	if (!sc->req_desc) {
2845 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2846 		return (ENOMEM);
2847 	}
2848 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2849 
2850 	/*
2851 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2852 	 * Allocate the dynamic array first and then allocate individual
2853 	 * commands.
2854 	 */
2855 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2856 	    M_MRSAS, M_NOWAIT);
2857 	if (!sc->mpt_cmd_list) {
2858 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2859 		return (ENOMEM);
2860 	}
2861 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2862 	for (i = 0; i < max_fw_cmds; i++) {
2863 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2864 		    M_MRSAS, M_NOWAIT);
2865 		if (!sc->mpt_cmd_list[i]) {
2866 			for (j = 0; j < i; j++)
2867 				free(sc->mpt_cmd_list[j], M_MRSAS);
2868 			free(sc->mpt_cmd_list, M_MRSAS);
2869 			sc->mpt_cmd_list = NULL;
2870 			return (ENOMEM);
2871 		}
2872 	}
2873 
2874 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2875 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2876 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2877 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2878 	sense_base = (u_int8_t *)sc->sense_mem;
2879 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2880 	for (i = 0; i < max_fw_cmds; i++) {
2881 		cmd = sc->mpt_cmd_list[i];
2882 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2883 		chain_offset = sc->max_chain_frame_sz * i;
2884 		sense_offset = MRSAS_SENSE_LEN * i;
2885 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2886 		cmd->index = i + 1;
2887 		cmd->ccb_ptr = NULL;
2888 		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2889 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2890 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2891 		cmd->sc = sc;
2892 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2893 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2894 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2895 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2896 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2897 		cmd->sense = sense_base + sense_offset;
2898 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2899 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2900 			return (FAIL);
2901 		}
2902 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2903 	}
2904 
2905 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2906 	reply_desc = sc->reply_desc_mem;
2907 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2908 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2909 		reply_desc->Words = MRSAS_ULONG_MAX;
2910 	}
2911 	return (0);
2912 }
2913 
2914 /*
2915  * mrsas_write_64bit_req_dsc:	Writes 64 bit request descriptor to FW
2916  * input:			Adapter softstate
2917  * 				request descriptor address low
2918  * 				request descriptor address high
2919  */
2920 void
2921 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2922     u_int32_t req_desc_hi)
2923 {
2924 	mtx_lock(&sc->pci_lock);
2925 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2926 	    req_desc_lo);
2927 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2928 	    req_desc_hi);
2929 	mtx_unlock(&sc->pci_lock);
2930 }
2931 
2932 /*
2933  * mrsas_fire_cmd:	Sends command to FW
2934  * input:		Adapter softstate
2935  * 			request descriptor address low
2936  * 			request descriptor address high
2937  *
2938  * This functions fires the command to Firmware by writing to the
2939  * inbound_low_queue_port and inbound_high_queue_port.
2940  */
2941 void
2942 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2943     u_int32_t req_desc_hi)
2944 {
2945 	if (sc->atomic_desc_support)
2946 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2947 		    req_desc_lo);
2948 	else
2949 		mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2950 }
2951 
2952 /*
2953  * mrsas_transition_to_ready:  Move FW to Ready state input:
2954  * Adapter instance soft state
2955  *
2956  * During the initialization, FW passes can potentially be in any one of several
2957  * possible states. If the FW in operational, waiting-for-handshake states,
2958  * driver must take steps to bring it to ready state. Otherwise, it has to
2959  * wait for the ready state.
2960  */
2961 int
2962 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2963 {
2964 	int i;
2965 	u_int8_t max_wait;
2966 	u_int32_t val, fw_state;
2967 	u_int32_t cur_state;
2968 	u_int32_t abs_state, curr_abs_state;
2969 
2970 	val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2971 	fw_state = val & MFI_STATE_MASK;
2972 	max_wait = MRSAS_RESET_WAIT_TIME;
2973 
2974 	if (fw_state != MFI_STATE_READY)
2975 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2976 
2977 	while (fw_state != MFI_STATE_READY) {
2978 		abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2979 		switch (fw_state) {
2980 		case MFI_STATE_FAULT:
2981 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2982 			if (ocr) {
2983 				cur_state = MFI_STATE_FAULT;
2984 				break;
2985 			} else
2986 				return -ENODEV;
2987 		case MFI_STATE_WAIT_HANDSHAKE:
2988 			/* Set the CLR bit in inbound doorbell */
2989 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2990 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2991 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2992 			break;
2993 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2994 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2995 			    MFI_INIT_HOTPLUG);
2996 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2997 			break;
2998 		case MFI_STATE_OPERATIONAL:
2999 			/*
3000 			 * Bring it to READY state; assuming max wait 10
3001 			 * secs
3002 			 */
3003 			mrsas_disable_intr(sc);
3004 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
3005 			for (i = 0; i < max_wait * 1000; i++) {
3006 				if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
3007 					DELAY(1000);
3008 				else
3009 					break;
3010 			}
3011 			cur_state = MFI_STATE_OPERATIONAL;
3012 			break;
3013 		case MFI_STATE_UNDEFINED:
3014 			/*
3015 			 * This state should not last for more than 2
3016 			 * seconds
3017 			 */
3018 			cur_state = MFI_STATE_UNDEFINED;
3019 			break;
3020 		case MFI_STATE_BB_INIT:
3021 			cur_state = MFI_STATE_BB_INIT;
3022 			break;
3023 		case MFI_STATE_FW_INIT:
3024 			cur_state = MFI_STATE_FW_INIT;
3025 			break;
3026 		case MFI_STATE_FW_INIT_2:
3027 			cur_state = MFI_STATE_FW_INIT_2;
3028 			break;
3029 		case MFI_STATE_DEVICE_SCAN:
3030 			cur_state = MFI_STATE_DEVICE_SCAN;
3031 			break;
3032 		case MFI_STATE_FLUSH_CACHE:
3033 			cur_state = MFI_STATE_FLUSH_CACHE;
3034 			break;
3035 		default:
3036 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3037 			return -ENODEV;
3038 		}
3039 
3040 		/*
3041 		 * The cur_state should not last for more than max_wait secs
3042 		 */
3043 		for (i = 0; i < (max_wait * 1000); i++) {
3044 			fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3045 			    outbound_scratch_pad)) & MFI_STATE_MASK);
3046 			curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3047 			    outbound_scratch_pad));
3048 			if (abs_state == curr_abs_state)
3049 				DELAY(1000);
3050 			else
3051 				break;
3052 		}
3053 
3054 		/*
3055 		 * Return error if fw_state hasn't changed after max_wait
3056 		 */
3057 		if (curr_abs_state == abs_state) {
3058 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3059 			    "in %d secs\n", fw_state, max_wait);
3060 			return -ENODEV;
3061 		}
3062 	}
3063 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3064 	return 0;
3065 }
3066 
3067 /*
3068  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
3069  * input:				Adapter soft state
3070  *
3071  * This function removes an MFI command from the command list.
3072  */
3073 struct mrsas_mfi_cmd *
3074 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3075 {
3076 	struct mrsas_mfi_cmd *cmd = NULL;
3077 
3078 	mtx_lock(&sc->mfi_cmd_pool_lock);
3079 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3080 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3081 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3082 	}
3083 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3084 
3085 	return cmd;
3086 }
3087 
3088 /*
3089  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
3090  * input:				Adapter Context.
3091  *
3092  * This function will check FW status register and flag do_timeout_reset flag.
3093  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3094  * trigger reset.
3095  */
3096 static void
3097 mrsas_ocr_thread(void *arg)
3098 {
3099 	struct mrsas_softc *sc;
3100 	u_int32_t fw_status, fw_state;
3101 	u_int8_t tm_target_reset_failed = 0;
3102 
3103 	sc = (struct mrsas_softc *)arg;
3104 
3105 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3106 
3107 	sc->ocr_thread_active = 1;
3108 	mtx_lock(&sc->sim_lock);
3109 	for (;;) {
3110 		/* Sleep for 1 second and check the queue status */
3111 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3112 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3113 		if (sc->remove_in_progress ||
3114 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3115 			mrsas_dprint(sc, MRSAS_OCR,
3116 			    "Exit due to %s from %s\n",
3117 			    sc->remove_in_progress ? "Shutdown" :
3118 			    "Hardware critical error", __func__);
3119 			break;
3120 		}
3121 		fw_status = mrsas_read_reg_with_retries(sc,
3122 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
3123 		fw_state = fw_status & MFI_STATE_MASK;
3124 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3125 			mrsas_atomic_read(&sc->target_reset_outstanding)) {
3126 			/* First, freeze further IOs to come to the SIM */
3127 			mrsas_xpt_freeze(sc);
3128 
3129 			/* If this is an IO timeout then go for target reset */
3130 			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3131 				device_printf(sc->mrsas_dev, "Initiating Target RESET "
3132 				    "because of SCSI IO timeout!\n");
3133 
3134 				/* Let the remaining IOs to complete */
3135 				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3136 				      "mrsas_reset_targets", 5 * hz);
3137 
3138 				/* Try to reset the target device */
3139 				if (mrsas_reset_targets(sc) == FAIL)
3140 					tm_target_reset_failed = 1;
3141 			}
3142 
3143 			/* If this is a DCMD timeout or FW fault,
3144 			 * then go for controller reset
3145 			 */
3146 			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3147 			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3148 				if (tm_target_reset_failed)
3149 					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3150 					    "TM FAILURE!\n");
3151 				else
3152 					device_printf(sc->mrsas_dev, "Initiaiting OCR "
3153 						"because of %s!\n", sc->do_timedout_reset ?
3154 						"DCMD IO Timeout" : "FW fault");
3155 
3156 				mtx_lock_spin(&sc->ioctl_lock);
3157 				sc->reset_in_progress = 1;
3158 				mtx_unlock_spin(&sc->ioctl_lock);
3159 				sc->reset_count++;
3160 
3161 				/*
3162 				 * Wait for the AEN task to be completed if it is running.
3163 				 */
3164 				mtx_unlock(&sc->sim_lock);
3165 				taskqueue_drain(sc->ev_tq, &sc->ev_task);
3166 				mtx_lock(&sc->sim_lock);
3167 
3168 				taskqueue_block(sc->ev_tq);
3169 				/* Try to reset the controller */
3170 				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3171 
3172 				sc->do_timedout_reset = 0;
3173 				sc->reset_in_progress = 0;
3174 				tm_target_reset_failed = 0;
3175 				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3176 				memset(sc->target_reset_pool, 0,
3177 				    sizeof(sc->target_reset_pool));
3178 				taskqueue_unblock(sc->ev_tq);
3179 			}
3180 
3181 			/* Now allow IOs to come to the SIM */
3182 			 mrsas_xpt_release(sc);
3183 		}
3184 	}
3185 	mtx_unlock(&sc->sim_lock);
3186 	sc->ocr_thread_active = 0;
3187 	mrsas_kproc_exit(0);
3188 }
3189 
3190 /*
3191  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
3192  * input:					Adapter Context.
3193  *
3194  * This function will clear reply descriptor so that post OCR driver and FW will
3195  * lost old history.
3196  */
3197 void
3198 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3199 {
3200 	int i, count;
3201 	pMpi2ReplyDescriptorsUnion_t reply_desc;
3202 
3203 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3204 	for (i = 0; i < count; i++)
3205 		sc->last_reply_idx[i] = 0;
3206 
3207 	reply_desc = sc->reply_desc_mem;
3208 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3209 		reply_desc->Words = MRSAS_ULONG_MAX;
3210 	}
3211 }
3212 
3213 /*
3214  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
3215  * input:				Adapter Context.
3216  *
3217  * This function will run from thread context so that it can sleep. 1. Do not
3218  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3219  * to complete for 180 seconds. 3. If #2 does not find any outstanding
3220  * command Controller is in working state, so skip OCR. Otherwise, do
3221  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3222  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3223  * OCR, Re-fire Management command and move Controller to Operation state.
3224  */
3225 int
3226 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3227 {
3228 	int retval = SUCCESS, i, j, retry = 0;
3229 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3230 	union ccb *ccb;
3231 	struct mrsas_mfi_cmd *mfi_cmd;
3232 	struct mrsas_mpt_cmd *mpt_cmd;
3233 	union mrsas_evt_class_locale class_locale;
3234 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3235 
3236 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3237 		device_printf(sc->mrsas_dev,
3238 		    "mrsas: Hardware critical error, returning FAIL.\n");
3239 		return FAIL;
3240 	}
3241 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3242 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3243 	mrsas_disable_intr(sc);
3244 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3245 	    sc->mrsas_fw_fault_check_delay * hz);
3246 
3247 	/* First try waiting for commands to complete */
3248 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3249 		mrsas_dprint(sc, MRSAS_OCR,
3250 		    "resetting adapter from %s.\n",
3251 		    __func__);
3252 		/* Now return commands back to the CAM layer */
3253 		mtx_unlock(&sc->sim_lock);
3254 		for (i = 0; i < sc->max_fw_cmds; i++) {
3255 			mpt_cmd = sc->mpt_cmd_list[i];
3256 
3257 			if (mpt_cmd->peer_cmd) {
3258 				mrsas_dprint(sc, MRSAS_OCR,
3259 				    "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3260 				    i, mpt_cmd, mpt_cmd->peer_cmd);
3261 			}
3262 
3263 			if (mpt_cmd->ccb_ptr) {
3264 				if (mpt_cmd->callout_owner) {
3265 					ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3266 					ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3267 					mrsas_cmd_done(sc, mpt_cmd);
3268 				} else {
3269 					mpt_cmd->ccb_ptr = NULL;
3270 					mrsas_release_mpt_cmd(mpt_cmd);
3271 				}
3272 			}
3273 		}
3274 
3275 		mrsas_atomic_set(&sc->fw_outstanding, 0);
3276 
3277 		mtx_lock(&sc->sim_lock);
3278 
3279 		status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3280 		    outbound_scratch_pad));
3281 		abs_state = status_reg & MFI_STATE_MASK;
3282 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3283 		if (sc->disableOnlineCtrlReset ||
3284 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3285 			/* Reset not supported, kill adapter */
3286 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3287 			mrsas_kill_hba(sc);
3288 			retval = FAIL;
3289 			goto out;
3290 		}
3291 		/* Now try to reset the chip */
3292 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3293 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3294 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
3295 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3296 			    MPI2_WRSEQ_1ST_KEY_VALUE);
3297 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3298 			    MPI2_WRSEQ_2ND_KEY_VALUE);
3299 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3300 			    MPI2_WRSEQ_3RD_KEY_VALUE);
3301 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3302 			    MPI2_WRSEQ_4TH_KEY_VALUE);
3303 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3304 			    MPI2_WRSEQ_5TH_KEY_VALUE);
3305 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3306 			    MPI2_WRSEQ_6TH_KEY_VALUE);
3307 
3308 			/* Check that the diag write enable (DRWE) bit is on */
3309 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3310 			    fusion_host_diag));
3311 			retry = 0;
3312 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3313 				DELAY(100 * 1000);
3314 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3315 				    fusion_host_diag));
3316 				if (retry++ == 100) {
3317 					mrsas_dprint(sc, MRSAS_OCR,
3318 					    "Host diag unlock failed!\n");
3319 					break;
3320 				}
3321 			}
3322 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3323 				continue;
3324 
3325 			/* Send chip reset command */
3326 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3327 			    host_diag | HOST_DIAG_RESET_ADAPTER);
3328 			DELAY(3000 * 1000);
3329 
3330 			/* Make sure reset adapter bit is cleared */
3331 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3332 			    fusion_host_diag));
3333 			retry = 0;
3334 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3335 				DELAY(100 * 1000);
3336 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3337 				    fusion_host_diag));
3338 				if (retry++ == 1000) {
3339 					mrsas_dprint(sc, MRSAS_OCR,
3340 					    "Diag reset adapter never cleared!\n");
3341 					break;
3342 				}
3343 			}
3344 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3345 				continue;
3346 
3347 			abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3348 			    outbound_scratch_pad)) & MFI_STATE_MASK;
3349 			retry = 0;
3350 
3351 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3352 				DELAY(100 * 1000);
3353 				abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3354 				    outbound_scratch_pad)) & MFI_STATE_MASK;
3355 			}
3356 			if (abs_state <= MFI_STATE_FW_INIT) {
3357 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3358 				    " state = 0x%x\n", abs_state);
3359 				continue;
3360 			}
3361 			/* Wait for FW to become ready */
3362 			if (mrsas_transition_to_ready(sc, 1)) {
3363 				mrsas_dprint(sc, MRSAS_OCR,
3364 				    "mrsas: Failed to transition controller to ready.\n");
3365 				continue;
3366 			}
3367 			mrsas_reset_reply_desc(sc);
3368 			if (mrsas_ioc_init(sc)) {
3369 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3370 				continue;
3371 			}
3372 			for (j = 0; j < sc->max_fw_cmds; j++) {
3373 				mpt_cmd = sc->mpt_cmd_list[j];
3374 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3375 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3376 					/* If not an IOCTL then release the command else re-fire */
3377 					if (!mfi_cmd->sync_cmd) {
3378 						mrsas_release_mfi_cmd(mfi_cmd);
3379 					} else {
3380 						req_desc = mrsas_get_request_desc(sc,
3381 						    mfi_cmd->cmd_id.context.smid - 1);
3382 						mrsas_dprint(sc, MRSAS_OCR,
3383 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3384 						    mfi_cmd->frame->dcmd.opcode, j);
3385 						if (!req_desc)
3386 							device_printf(sc->mrsas_dev,
3387 							    "Cannot build MPT cmd.\n");
3388 						else
3389 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3390 							    req_desc->addr.u.high);
3391 					}
3392 				}
3393 			}
3394 
3395 			/* Reset load balance info */
3396 			memset(sc->load_balance_info, 0,
3397 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3398 
3399 			if (mrsas_get_ctrl_info(sc)) {
3400 				mrsas_kill_hba(sc);
3401 				retval = FAIL;
3402 				goto out;
3403 			}
3404 			if (!mrsas_get_map_info(sc))
3405 				mrsas_sync_map_info(sc);
3406 
3407 			megasas_setup_jbod_map(sc);
3408 
3409 			if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3410 				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3411 					memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3412 					sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3413 				}
3414 			}
3415 
3416 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3417 			mrsas_enable_intr(sc);
3418 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3419 
3420 			/* Register AEN with FW for last sequence number */
3421 			class_locale.members.reserved = 0;
3422 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3423 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3424 
3425 			mtx_unlock(&sc->sim_lock);
3426 			if (mrsas_register_aen(sc, sc->last_seq_num,
3427 			    class_locale.word)) {
3428 				device_printf(sc->mrsas_dev,
3429 				    "ERROR: AEN registration FAILED from OCR !!! "
3430 				    "Further events from the controller cannot be notified."
3431 				    "Either there is some problem in the controller"
3432 				    "or the controller does not support AEN.\n"
3433 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3434 			}
3435 			mtx_lock(&sc->sim_lock);
3436 
3437 			/* Adapter reset completed successfully */
3438 			device_printf(sc->mrsas_dev, "Reset successful\n");
3439 			retval = SUCCESS;
3440 			goto out;
3441 		}
3442 		/* Reset failed, kill the adapter */
3443 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3444 		mrsas_kill_hba(sc);
3445 		retval = FAIL;
3446 	} else {
3447 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3448 		mrsas_enable_intr(sc);
3449 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3450 	}
3451 out:
3452 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3453 	mrsas_dprint(sc, MRSAS_OCR,
3454 	    "Reset Exit with %d.\n", retval);
3455 	return retval;
3456 }
3457 
3458 /*
3459  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3460  * input:			Adapter Context.
3461  *
3462  * This function will kill HBA when OCR is not supported.
3463  */
3464 void
3465 mrsas_kill_hba(struct mrsas_softc *sc)
3466 {
3467 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3468 	DELAY(1000 * 1000);
3469 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3470 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3471 	    MFI_STOP_ADP);
3472 	/* Flush */
3473 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3474 	mrsas_complete_outstanding_ioctls(sc);
3475 }
3476 
3477 /**
3478  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3479  * input:			Controller softc
3480  *
3481  * Returns void
3482  */
3483 void
3484 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3485 {
3486 	int i;
3487 	struct mrsas_mpt_cmd *cmd_mpt;
3488 	struct mrsas_mfi_cmd *cmd_mfi;
3489 	u_int32_t count, MSIxIndex;
3490 
3491 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3492 	for (i = 0; i < sc->max_fw_cmds; i++) {
3493 		cmd_mpt = sc->mpt_cmd_list[i];
3494 
3495 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3496 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3497 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3498 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3499 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3500 					    cmd_mpt->io_request->RaidContext.raid_context.status);
3501 			}
3502 		}
3503 	}
3504 }
3505 
3506 /*
3507  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3508  * input:						Adapter Context.
3509  *
3510  * This function will wait for 180 seconds for outstanding commands to be
3511  * completed.
3512  */
3513 int
3514 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3515 {
3516 	int i, outstanding, retval = 0;
3517 	u_int32_t fw_state, count, MSIxIndex;
3518 
3519 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3520 		if (sc->remove_in_progress) {
3521 			mrsas_dprint(sc, MRSAS_OCR,
3522 			    "Driver remove or shutdown called.\n");
3523 			retval = 1;
3524 			goto out;
3525 		}
3526 		/* Check if firmware is in fault state */
3527 		fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3528 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3529 		if (fw_state == MFI_STATE_FAULT) {
3530 			mrsas_dprint(sc, MRSAS_OCR,
3531 			    "Found FW in FAULT state, will reset adapter.\n");
3532 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3533 			mtx_unlock(&sc->sim_lock);
3534 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3535 				mrsas_complete_cmd(sc, MSIxIndex);
3536 			mtx_lock(&sc->sim_lock);
3537 			retval = 1;
3538 			goto out;
3539 		}
3540 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3541 			mrsas_dprint(sc, MRSAS_OCR,
3542 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3543 			retval = 1;
3544 			goto out;
3545 		}
3546 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3547 		if (!outstanding)
3548 			goto out;
3549 
3550 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3551 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3552 			    "commands to complete\n", i, outstanding);
3553 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3554 			mtx_unlock(&sc->sim_lock);
3555 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3556 				mrsas_complete_cmd(sc, MSIxIndex);
3557 			mtx_lock(&sc->sim_lock);
3558 		}
3559 		DELAY(1000 * 1000);
3560 	}
3561 
3562 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3563 		mrsas_dprint(sc, MRSAS_OCR,
3564 		    " pending commands remain after waiting,"
3565 		    " will reset adapter.\n");
3566 		retval = 1;
3567 	}
3568 out:
3569 	return retval;
3570 }
3571 
3572 /*
3573  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3574  * input:					Command packet for return to free cmd pool
3575  *
3576  * This function returns the MFI & MPT command to the command list.
3577  */
3578 void
3579 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3580 {
3581 	struct mrsas_softc *sc = cmd_mfi->sc;
3582 	struct mrsas_mpt_cmd *cmd_mpt;
3583 
3584 	mtx_lock(&sc->mfi_cmd_pool_lock);
3585 	/*
3586 	 * Release the mpt command (if at all it is allocated
3587 	 * associated with the mfi command
3588 	 */
3589 	if (cmd_mfi->cmd_id.context.smid) {
3590 		mtx_lock(&sc->mpt_cmd_pool_lock);
3591 		/* Get the mpt cmd from mfi cmd frame's smid value */
3592 		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3593 		cmd_mpt->flags = 0;
3594 		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3595 		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3596 		mtx_unlock(&sc->mpt_cmd_pool_lock);
3597 	}
3598 	/* Release the mfi command */
3599 	cmd_mfi->ccb_ptr = NULL;
3600 	cmd_mfi->cmd_id.frame_count = 0;
3601 	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3602 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3603 
3604 	return;
3605 }
3606 
3607 /*
3608  * mrsas_get_controller_info:	Returns FW's controller structure
3609  * input:						Adapter soft state
3610  * 								Controller information structure
3611  *
3612  * Issues an internal command (DCMD) to get the FW's controller structure. This
3613  * information is mainly used to find out the maximum IO transfer per command
3614  * supported by the FW.
3615  */
3616 static int
3617 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3618 {
3619 	int retcode = 0;
3620 	u_int8_t do_ocr = 1;
3621 	struct mrsas_mfi_cmd *cmd;
3622 	struct mrsas_dcmd_frame *dcmd;
3623 
3624 	cmd = mrsas_get_mfi_cmd(sc);
3625 
3626 	if (!cmd) {
3627 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3628 		return -ENOMEM;
3629 	}
3630 	dcmd = &cmd->frame->dcmd;
3631 
3632 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3633 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3634 		mrsas_release_mfi_cmd(cmd);
3635 		return -ENOMEM;
3636 	}
3637 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3638 
3639 	dcmd->cmd = MFI_CMD_DCMD;
3640 	dcmd->cmd_status = 0xFF;
3641 	dcmd->sge_count = 1;
3642 	dcmd->flags = MFI_FRAME_DIR_READ;
3643 	dcmd->timeout = 0;
3644 	dcmd->pad_0 = 0;
3645 	dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3646 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3647 	dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3648 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3649 
3650 	if (!sc->mask_interrupts)
3651 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3652 	else
3653 		retcode = mrsas_issue_polled(sc, cmd);
3654 
3655 	if (retcode == ETIMEDOUT)
3656 		goto dcmd_timeout;
3657 	else
3658 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3659 
3660 	do_ocr = 0;
3661 	mrsas_update_ext_vd_details(sc);
3662 
3663 	sc->use_seqnum_jbod_fp =
3664 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3665 	sc->support_morethan256jbod =
3666 		sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3667 
3668 	sc->disableOnlineCtrlReset =
3669 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3670 
3671 dcmd_timeout:
3672 	mrsas_free_ctlr_info_cmd(sc);
3673 
3674 	if (do_ocr)
3675 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3676 
3677 	if (!sc->mask_interrupts)
3678 		mrsas_release_mfi_cmd(cmd);
3679 
3680 	return (retcode);
3681 }
3682 
3683 /*
3684  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3685  * input:
3686  *	sc - Controller's softc
3687 */
3688 static void
3689 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3690 {
3691 	u_int32_t ventura_map_sz = 0;
3692 	sc->max256vdSupport =
3693 		sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3694 
3695 	/* Below is additional check to address future FW enhancement */
3696 	if (sc->ctrl_info->max_lds > 64)
3697 		sc->max256vdSupport = 1;
3698 
3699 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3700 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3701 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3702 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3703 	if (sc->max256vdSupport) {
3704 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3705 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3706 	} else {
3707 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3708 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3709 	}
3710 
3711 	if (sc->maxRaidMapSize) {
3712 		ventura_map_sz = sc->maxRaidMapSize *
3713 		    MR_MIN_MAP_SIZE;
3714 		sc->current_map_sz = ventura_map_sz;
3715 		sc->max_map_sz = ventura_map_sz;
3716 	} else {
3717 		sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3718 		    (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3719 		sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3720 		sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3721 		if (sc->max256vdSupport)
3722 			sc->current_map_sz = sc->new_map_sz;
3723 		else
3724 			sc->current_map_sz = sc->old_map_sz;
3725 	}
3726 
3727 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3728 #if VD_EXT_DEBUG
3729 	device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3730 	    sc->maxRaidMapSize);
3731 	device_printf(sc->mrsas_dev,
3732 	    "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3733 	    "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3734 	    "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3735 	    sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3736 	    sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3737 #endif
3738 }
3739 
3740 /*
3741  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3742  * input:						Adapter soft state
3743  *
3744  * Allocates DMAable memory for the controller info internal command.
3745  */
3746 int
3747 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3748 {
3749 	int ctlr_info_size;
3750 
3751 	/* Allocate get controller info command */
3752 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3753 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3754 	    1, 0,
3755 	    BUS_SPACE_MAXADDR_32BIT,
3756 	    BUS_SPACE_MAXADDR,
3757 	    NULL, NULL,
3758 	    ctlr_info_size,
3759 	    1,
3760 	    ctlr_info_size,
3761 	    BUS_DMA_ALLOCNOW,
3762 	    NULL, NULL,
3763 	    &sc->ctlr_info_tag)) {
3764 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3765 		return (ENOMEM);
3766 	}
3767 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3768 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3769 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3770 		return (ENOMEM);
3771 	}
3772 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3773 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3774 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3775 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3776 		return (ENOMEM);
3777 	}
3778 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3779 	return (0);
3780 }
3781 
3782 /*
3783  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3784  * input:						Adapter soft state
3785  *
3786  * Deallocates memory of the get controller info cmd.
3787  */
3788 void
3789 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3790 {
3791 	if (sc->ctlr_info_phys_addr)
3792 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3793 	if (sc->ctlr_info_mem != NULL)
3794 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3795 	if (sc->ctlr_info_tag != NULL)
3796 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3797 }
3798 
3799 /*
3800  * mrsas_issue_polled:	Issues a polling command
3801  * inputs:				Adapter soft state
3802  * 						Command packet to be issued
3803  *
3804  * This function is for posting of internal commands to Firmware.  MFI requires
3805  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3806  * the poll response timer is 180 seconds.
3807  */
3808 int
3809 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3810 {
3811 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3812 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3813 	int i, retcode = SUCCESS;
3814 
3815 	frame_hdr->cmd_status = 0xFF;
3816 	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3817 
3818 	/* Issue the frame using inbound queue port */
3819 	if (mrsas_issue_dcmd(sc, cmd)) {
3820 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3821 		return (1);
3822 	}
3823 	/*
3824 	 * Poll response timer to wait for Firmware response.  While this
3825 	 * timer with the DELAY call could block CPU, the time interval for
3826 	 * this is only 1 millisecond.
3827 	 */
3828 	if (frame_hdr->cmd_status == 0xFF) {
3829 		for (i = 0; i < (max_wait * 1000); i++) {
3830 			if (frame_hdr->cmd_status == 0xFF)
3831 				DELAY(1000);
3832 			else
3833 				break;
3834 		}
3835 	}
3836 	if (frame_hdr->cmd_status == 0xFF) {
3837 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3838 		    "seconds from %s\n", max_wait, __func__);
3839 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3840 		    cmd->frame->dcmd.opcode);
3841 		retcode = ETIMEDOUT;
3842 	}
3843 	return (retcode);
3844 }
3845 
3846 /*
3847  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3848  * input:				Adapter soft state mfi cmd pointer
3849  *
3850  * This function is called by mrsas_issued_blocked_cmd() and
3851  * mrsas_issued_polled(), to build the MPT command and then fire the command
3852  * to Firmware.
3853  */
3854 int
3855 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3856 {
3857 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3858 
3859 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3860 	if (!req_desc) {
3861 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3862 		return (1);
3863 	}
3864 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3865 
3866 	return (0);
3867 }
3868 
3869 /*
3870  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3871  * input:				Adapter soft state mfi cmd to build
3872  *
3873  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3874  * command and prepares the MPT command to send to Firmware.
3875  */
3876 MRSAS_REQUEST_DESCRIPTOR_UNION *
3877 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3878 {
3879 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3880 	u_int16_t index;
3881 
3882 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3883 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3884 		return NULL;
3885 	}
3886 	index = cmd->cmd_id.context.smid;
3887 
3888 	req_desc = mrsas_get_request_desc(sc, index - 1);
3889 	if (!req_desc)
3890 		return NULL;
3891 
3892 	req_desc->addr.Words = 0;
3893 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3894 
3895 	req_desc->SCSIIO.SMID = index;
3896 
3897 	return (req_desc);
3898 }
3899 
3900 /*
3901  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3902  * input:						Adapter soft state mfi cmd pointer
3903  *
3904  * The MPT command and the io_request are setup as a passthru command. The SGE
3905  * chain address is set to frame_phys_addr of the MFI command.
3906  */
3907 u_int8_t
3908 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3909 {
3910 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3911 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3912 	struct mrsas_mpt_cmd *mpt_cmd;
3913 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3914 
3915 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3916 	if (!mpt_cmd)
3917 		return (1);
3918 
3919 	/* Save the smid. To be used for returning the cmd */
3920 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3921 
3922 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3923 
3924 	/*
3925 	 * For cmds where the flag is set, store the flag and check on
3926 	 * completion. For cmds with this flag, don't call
3927 	 * mrsas_complete_cmd.
3928 	 */
3929 
3930 	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3931 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3932 
3933 	io_req = mpt_cmd->io_request;
3934 
3935 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3936 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3937 
3938 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3939 		sgl_ptr_end->Flags = 0;
3940 	}
3941 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3942 
3943 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3944 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3945 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3946 
3947 	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3948 
3949 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3950 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3951 
3952 	mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3953 
3954 	return (0);
3955 }
3956 
3957 /*
3958  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3959  * input:					Adapter soft state Command to be issued
3960  *
3961  * This function waits on an event for the command to be returned from the ISR.
3962  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3963  * internal and ioctl commands.
3964  */
3965 int
3966 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3967 {
3968 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3969 	unsigned long total_time = 0;
3970 	int retcode = SUCCESS;
3971 
3972 	/* Initialize cmd_status */
3973 	cmd->cmd_status = 0xFF;
3974 
3975 	/* Build MPT-MFI command for issue to FW */
3976 	if (mrsas_issue_dcmd(sc, cmd)) {
3977 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3978 		return (1);
3979 	}
3980 	sc->chan = (void *)&cmd;
3981 
3982 	while (1) {
3983 		if (cmd->cmd_status == 0xFF) {
3984 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3985 		} else
3986 			break;
3987 
3988 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3989 					 * command */
3990 			total_time++;
3991 			if (total_time >= max_wait) {
3992 				device_printf(sc->mrsas_dev,
3993 				    "Internal command timed out after %d seconds.\n", max_wait);
3994 				retcode = 1;
3995 				break;
3996 			}
3997 		}
3998 	}
3999 
4000 	if (cmd->cmd_status == 0xFF) {
4001 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
4002 		    "seconds from %s\n", max_wait, __func__);
4003 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
4004 		    cmd->frame->dcmd.opcode);
4005 		retcode = ETIMEDOUT;
4006 	}
4007 	return (retcode);
4008 }
4009 
4010 /*
4011  * mrsas_complete_mptmfi_passthru:	Completes a command
4012  * input:	@sc:					Adapter soft state
4013  * 			@cmd:					Command to be completed
4014  * 			@status:				cmd completion status
4015  *
4016  * This function is called from mrsas_complete_cmd() after an interrupt is
4017  * received from Firmware, and io_request->Function is
4018  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4019  */
4020 void
4021 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4022     u_int8_t status)
4023 {
4024 	struct mrsas_header *hdr = &cmd->frame->hdr;
4025 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4026 
4027 	/* Reset the retry counter for future re-tries */
4028 	cmd->retry_for_fw_reset = 0;
4029 
4030 	if (cmd->ccb_ptr)
4031 		cmd->ccb_ptr = NULL;
4032 
4033 	switch (hdr->cmd) {
4034 	case MFI_CMD_INVALID:
4035 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4036 		break;
4037 	case MFI_CMD_PD_SCSI_IO:
4038 	case MFI_CMD_LD_SCSI_IO:
4039 		/*
4040 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4041 		 * issued either through an IO path or an IOCTL path. If it
4042 		 * was via IOCTL, we will send it to internal completion.
4043 		 */
4044 		if (cmd->sync_cmd) {
4045 			cmd->sync_cmd = 0;
4046 			mrsas_wakeup(sc, cmd);
4047 			break;
4048 		}
4049 	case MFI_CMD_SMP:
4050 	case MFI_CMD_STP:
4051 	case MFI_CMD_DCMD:
4052 		/* Check for LD map update */
4053 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4054 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
4055 			sc->fast_path_io = 0;
4056 			mtx_lock(&sc->raidmap_lock);
4057 			sc->map_update_cmd = NULL;
4058 			if (cmd_status != 0) {
4059 				if (cmd_status != MFI_STAT_NOT_FOUND)
4060 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4061 				else {
4062 					mrsas_release_mfi_cmd(cmd);
4063 					mtx_unlock(&sc->raidmap_lock);
4064 					break;
4065 				}
4066 			} else
4067 				sc->map_id++;
4068 			mrsas_release_mfi_cmd(cmd);
4069 			if (MR_ValidateMapInfo(sc))
4070 				sc->fast_path_io = 0;
4071 			else
4072 				sc->fast_path_io = 1;
4073 			mrsas_sync_map_info(sc);
4074 			mtx_unlock(&sc->raidmap_lock);
4075 			break;
4076 		}
4077 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4078 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4079 			sc->mrsas_aen_triggered = 0;
4080 		}
4081 		/* FW has an updated PD sequence */
4082 		if ((cmd->frame->dcmd.opcode ==
4083 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4084 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
4085 			mtx_lock(&sc->raidmap_lock);
4086 			sc->jbod_seq_cmd = NULL;
4087 			mrsas_release_mfi_cmd(cmd);
4088 
4089 			if (cmd_status == MFI_STAT_OK) {
4090 				sc->pd_seq_map_id++;
4091 				/* Re-register a pd sync seq num cmd */
4092 				if (megasas_sync_pd_seq_num(sc, true))
4093 					sc->use_seqnum_jbod_fp = 0;
4094 			} else {
4095 				sc->use_seqnum_jbod_fp = 0;
4096 				device_printf(sc->mrsas_dev,
4097 				    "Jbod map sync failed, status=%x\n", cmd_status);
4098 			}
4099 			mtx_unlock(&sc->raidmap_lock);
4100 			break;
4101 		}
4102 		/* See if got an event notification */
4103 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
4104 			mrsas_complete_aen(sc, cmd);
4105 		else
4106 			mrsas_wakeup(sc, cmd);
4107 		break;
4108 	case MFI_CMD_ABORT:
4109 		/* Command issued to abort another cmd return */
4110 		mrsas_complete_abort(sc, cmd);
4111 		break;
4112 	default:
4113 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4114 		break;
4115 	}
4116 }
4117 
4118 /*
4119  * mrsas_wakeup:	Completes an internal command
4120  * input:			Adapter soft state
4121  * 					Command to be completed
4122  *
4123  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4124  * timer is started.  This function is called from
4125  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4126  * from the command wait.
4127  */
4128 void
4129 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4130 {
4131 	cmd->cmd_status = cmd->frame->io.cmd_status;
4132 
4133 	if (cmd->cmd_status == 0xFF)
4134 		cmd->cmd_status = 0;
4135 
4136 	sc->chan = (void *)&cmd;
4137 	wakeup_one((void *)&sc->chan);
4138 	return;
4139 }
4140 
4141 /*
4142  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
4143  * Adapter soft state Shutdown/Hibernate
4144  *
4145  * This function issues a DCMD internal command to Firmware to initiate shutdown
4146  * of the controller.
4147  */
4148 static void
4149 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4150 {
4151 	struct mrsas_mfi_cmd *cmd;
4152 	struct mrsas_dcmd_frame *dcmd;
4153 
4154 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4155 		return;
4156 
4157 	cmd = mrsas_get_mfi_cmd(sc);
4158 	if (!cmd) {
4159 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4160 		return;
4161 	}
4162 	if (sc->aen_cmd)
4163 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4164 	if (sc->map_update_cmd)
4165 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4166 	if (sc->jbod_seq_cmd)
4167 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4168 
4169 	dcmd = &cmd->frame->dcmd;
4170 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4171 
4172 	dcmd->cmd = MFI_CMD_DCMD;
4173 	dcmd->cmd_status = 0x0;
4174 	dcmd->sge_count = 0;
4175 	dcmd->flags = MFI_FRAME_DIR_NONE;
4176 	dcmd->timeout = 0;
4177 	dcmd->pad_0 = 0;
4178 	dcmd->data_xfer_len = 0;
4179 	dcmd->opcode = opcode;
4180 
4181 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4182 
4183 	mrsas_issue_blocked_cmd(sc, cmd);
4184 	mrsas_release_mfi_cmd(cmd);
4185 
4186 	return;
4187 }
4188 
4189 /*
4190  * mrsas_flush_cache:         Requests FW to flush all its caches input:
4191  * Adapter soft state
4192  *
4193  * This function is issues a DCMD internal command to Firmware to initiate
4194  * flushing of all caches.
4195  */
4196 static void
4197 mrsas_flush_cache(struct mrsas_softc *sc)
4198 {
4199 	struct mrsas_mfi_cmd *cmd;
4200 	struct mrsas_dcmd_frame *dcmd;
4201 
4202 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4203 		return;
4204 
4205 	cmd = mrsas_get_mfi_cmd(sc);
4206 	if (!cmd) {
4207 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4208 		return;
4209 	}
4210 	dcmd = &cmd->frame->dcmd;
4211 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4212 
4213 	dcmd->cmd = MFI_CMD_DCMD;
4214 	dcmd->cmd_status = 0x0;
4215 	dcmd->sge_count = 0;
4216 	dcmd->flags = MFI_FRAME_DIR_NONE;
4217 	dcmd->timeout = 0;
4218 	dcmd->pad_0 = 0;
4219 	dcmd->data_xfer_len = 0;
4220 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4221 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4222 
4223 	mrsas_issue_blocked_cmd(sc, cmd);
4224 	mrsas_release_mfi_cmd(cmd);
4225 
4226 	return;
4227 }
4228 
4229 int
4230 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4231 {
4232 	int retcode = 0;
4233 	u_int8_t do_ocr = 1;
4234 	struct mrsas_mfi_cmd *cmd;
4235 	struct mrsas_dcmd_frame *dcmd;
4236 	uint32_t pd_seq_map_sz;
4237 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4238 	bus_addr_t pd_seq_h;
4239 
4240 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4241 	    (sizeof(struct MR_PD_CFG_SEQ) *
4242 	    (MAX_PHYSICAL_DEVICES - 1));
4243 
4244 	cmd = mrsas_get_mfi_cmd(sc);
4245 	if (!cmd) {
4246 		device_printf(sc->mrsas_dev,
4247 		    "Cannot alloc for ld map info cmd.\n");
4248 		return 1;
4249 	}
4250 	dcmd = &cmd->frame->dcmd;
4251 
4252 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4253 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4254 	if (!pd_sync) {
4255 		device_printf(sc->mrsas_dev,
4256 		    "Failed to alloc mem for jbod map info.\n");
4257 		mrsas_release_mfi_cmd(cmd);
4258 		return (ENOMEM);
4259 	}
4260 	memset(pd_sync, 0, pd_seq_map_sz);
4261 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4262 	dcmd->cmd = MFI_CMD_DCMD;
4263 	dcmd->cmd_status = 0xFF;
4264 	dcmd->sge_count = 1;
4265 	dcmd->timeout = 0;
4266 	dcmd->pad_0 = 0;
4267 	dcmd->data_xfer_len = (pd_seq_map_sz);
4268 	dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4269 	dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
4270 	dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
4271 
4272 	if (pend) {
4273 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4274 		dcmd->flags = (MFI_FRAME_DIR_WRITE);
4275 		sc->jbod_seq_cmd = cmd;
4276 		if (mrsas_issue_dcmd(sc, cmd)) {
4277 			device_printf(sc->mrsas_dev,
4278 			    "Fail to send sync map info command.\n");
4279 			return 1;
4280 		} else
4281 			return 0;
4282 	} else
4283 		dcmd->flags = MFI_FRAME_DIR_READ;
4284 
4285 	retcode = mrsas_issue_polled(sc, cmd);
4286 	if (retcode == ETIMEDOUT)
4287 		goto dcmd_timeout;
4288 
4289 	if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
4290 		device_printf(sc->mrsas_dev,
4291 		    "driver supports max %d JBOD, but FW reports %d\n",
4292 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
4293 		retcode = -EINVAL;
4294 	}
4295 	if (!retcode)
4296 		sc->pd_seq_map_id++;
4297 	do_ocr = 0;
4298 
4299 dcmd_timeout:
4300 	if (do_ocr)
4301 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4302 
4303 	return (retcode);
4304 }
4305 
4306 /*
4307  * mrsas_get_map_info:        Load and validate RAID map input:
4308  * Adapter instance soft state
4309  *
4310  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4311  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
4312  */
4313 static int
4314 mrsas_get_map_info(struct mrsas_softc *sc)
4315 {
4316 	uint8_t retcode = 0;
4317 
4318 	sc->fast_path_io = 0;
4319 	if (!mrsas_get_ld_map_info(sc)) {
4320 		retcode = MR_ValidateMapInfo(sc);
4321 		if (retcode == 0) {
4322 			sc->fast_path_io = 1;
4323 			return 0;
4324 		}
4325 	}
4326 	return 1;
4327 }
4328 
4329 /*
4330  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
4331  * Adapter instance soft state
4332  *
4333  * Issues an internal command (DCMD) to get the FW's controller PD list
4334  * structure.
4335  */
4336 static int
4337 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4338 {
4339 	int retcode = 0;
4340 	struct mrsas_mfi_cmd *cmd;
4341 	struct mrsas_dcmd_frame *dcmd;
4342 	void *map;
4343 	bus_addr_t map_phys_addr = 0;
4344 
4345 	cmd = mrsas_get_mfi_cmd(sc);
4346 	if (!cmd) {
4347 		device_printf(sc->mrsas_dev,
4348 		    "Cannot alloc for ld map info cmd.\n");
4349 		return 1;
4350 	}
4351 	dcmd = &cmd->frame->dcmd;
4352 
4353 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4354 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4355 	if (!map) {
4356 		device_printf(sc->mrsas_dev,
4357 		    "Failed to alloc mem for ld map info.\n");
4358 		mrsas_release_mfi_cmd(cmd);
4359 		return (ENOMEM);
4360 	}
4361 	memset(map, 0, sizeof(sc->max_map_sz));
4362 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4363 
4364 	dcmd->cmd = MFI_CMD_DCMD;
4365 	dcmd->cmd_status = 0xFF;
4366 	dcmd->sge_count = 1;
4367 	dcmd->flags = MFI_FRAME_DIR_READ;
4368 	dcmd->timeout = 0;
4369 	dcmd->pad_0 = 0;
4370 	dcmd->data_xfer_len = sc->current_map_sz;
4371 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4372 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4373 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
4374 
4375 	retcode = mrsas_issue_polled(sc, cmd);
4376 	if (retcode == ETIMEDOUT)
4377 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4378 
4379 	return (retcode);
4380 }
4381 
4382 /*
4383  * mrsas_sync_map_info:        Get FW's ld_map structure input:
4384  * Adapter instance soft state
4385  *
4386  * Issues an internal command (DCMD) to get the FW's controller PD list
4387  * structure.
4388  */
4389 static int
4390 mrsas_sync_map_info(struct mrsas_softc *sc)
4391 {
4392 	int retcode = 0, i;
4393 	struct mrsas_mfi_cmd *cmd;
4394 	struct mrsas_dcmd_frame *dcmd;
4395 	uint32_t size_sync_info, num_lds;
4396 	MR_LD_TARGET_SYNC *target_map = NULL;
4397 	MR_DRV_RAID_MAP_ALL *map;
4398 	MR_LD_RAID *raid;
4399 	MR_LD_TARGET_SYNC *ld_sync;
4400 	bus_addr_t map_phys_addr = 0;
4401 
4402 	cmd = mrsas_get_mfi_cmd(sc);
4403 	if (!cmd) {
4404 		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4405 		return ENOMEM;
4406 	}
4407 	map = sc->ld_drv_map[sc->map_id & 1];
4408 	num_lds = map->raidMap.ldCount;
4409 
4410 	dcmd = &cmd->frame->dcmd;
4411 	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4412 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4413 
4414 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4415 	memset(target_map, 0, sc->max_map_sz);
4416 
4417 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4418 
4419 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4420 
4421 	for (i = 0; i < num_lds; i++, ld_sync++) {
4422 		raid = MR_LdRaidGet(i, map);
4423 		ld_sync->targetId = MR_GetLDTgtId(i, map);
4424 		ld_sync->seqNum = raid->seqNum;
4425 	}
4426 
4427 	dcmd->cmd = MFI_CMD_DCMD;
4428 	dcmd->cmd_status = 0xFF;
4429 	dcmd->sge_count = 1;
4430 	dcmd->flags = MFI_FRAME_DIR_WRITE;
4431 	dcmd->timeout = 0;
4432 	dcmd->pad_0 = 0;
4433 	dcmd->data_xfer_len = sc->current_map_sz;
4434 	dcmd->mbox.b[0] = num_lds;
4435 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4436 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4437 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4438 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
4439 
4440 	sc->map_update_cmd = cmd;
4441 	if (mrsas_issue_dcmd(sc, cmd)) {
4442 		device_printf(sc->mrsas_dev,
4443 		    "Fail to send sync map info command.\n");
4444 		return (1);
4445 	}
4446 	return (retcode);
4447 }
4448 
4449 /* Input:	dcmd.opcode		- MR_DCMD_PD_GET_INFO
4450   *		dcmd.mbox.s[0]		- deviceId for this physical drive
4451   *		dcmd.sge IN		- ptr to returned MR_PD_INFO structure
4452   * Desc:	Firmware return the physical drive info structure
4453   *
4454   */
4455 static void
4456 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4457 {
4458 	int retcode;
4459 	u_int8_t do_ocr = 1;
4460 	struct mrsas_mfi_cmd *cmd;
4461 	struct mrsas_dcmd_frame *dcmd;
4462 
4463 	cmd = mrsas_get_mfi_cmd(sc);
4464 
4465 	if (!cmd) {
4466 		device_printf(sc->mrsas_dev,
4467 		    "Cannot alloc for get PD info cmd\n");
4468 		return;
4469 	}
4470 	dcmd = &cmd->frame->dcmd;
4471 
4472 	memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4473 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4474 
4475 	dcmd->mbox.s[0] = device_id;
4476 	dcmd->cmd = MFI_CMD_DCMD;
4477 	dcmd->cmd_status = 0xFF;
4478 	dcmd->sge_count = 1;
4479 	dcmd->flags = MFI_FRAME_DIR_READ;
4480 	dcmd->timeout = 0;
4481 	dcmd->pad_0 = 0;
4482 	dcmd->data_xfer_len = sizeof(struct mrsas_pd_info);
4483 	dcmd->opcode = MR_DCMD_PD_GET_INFO;
4484 	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->pd_info_phys_addr;
4485 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_pd_info);
4486 
4487 	if (!sc->mask_interrupts)
4488 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4489 	else
4490 		retcode = mrsas_issue_polled(sc, cmd);
4491 
4492 	if (retcode == ETIMEDOUT)
4493 		goto dcmd_timeout;
4494 
4495 	sc->target_list[device_id].interface_type =
4496 		sc->pd_info_mem->state.ddf.pdType.intf;
4497 
4498 	do_ocr = 0;
4499 
4500 dcmd_timeout:
4501 
4502 	if (do_ocr)
4503 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4504 
4505 	if (!sc->mask_interrupts)
4506 		mrsas_release_mfi_cmd(cmd);
4507 }
4508 
4509 /*
4510  * mrsas_add_target:				Add target ID of system PD/VD to driver's data structure.
4511  * sc:						Adapter's soft state
4512  * target_id:					Unique target id per controller(managed by driver)
4513  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4514  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4515  * return:					void
4516  * Descripton:					This function will be called whenever system PD or VD is created.
4517  */
4518 static void mrsas_add_target(struct mrsas_softc *sc,
4519 	u_int16_t target_id)
4520 {
4521 	sc->target_list[target_id].target_id = target_id;
4522 
4523 	device_printf(sc->mrsas_dev,
4524 		"%s created target ID: 0x%x\n",
4525 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4526 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4527 	/*
4528 	 * If interrupts are enabled, then only fire DCMD to get pd_info
4529 	 * for system PDs
4530 	 */
4531 	if (!sc->mask_interrupts && sc->pd_info_mem &&
4532 		(target_id < MRSAS_MAX_PD))
4533 		mrsas_get_pd_info(sc, target_id);
4534 
4535 }
4536 
4537 /*
4538  * mrsas_remove_target:			Remove target ID of system PD/VD from driver's data structure.
4539  * sc:						Adapter's soft state
4540  * target_id:					Unique target id per controller(managed by driver)
4541  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4542  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4543  * return:					void
4544  * Descripton:					This function will be called whenever system PD or VD is deleted
4545  */
4546 static void mrsas_remove_target(struct mrsas_softc *sc,
4547 	u_int16_t target_id)
4548 {
4549 	sc->target_list[target_id].target_id = 0xffff;
4550 	device_printf(sc->mrsas_dev,
4551 		"%s deleted target ID: 0x%x\n",
4552 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4553 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4554 }
4555 
4556 /*
4557  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4558  * Adapter soft state
4559  *
4560  * Issues an internal command (DCMD) to get the FW's controller PD list
4561  * structure.  This information is mainly used to find out about system
4562  * supported by Firmware.
4563  */
4564 static int
4565 mrsas_get_pd_list(struct mrsas_softc *sc)
4566 {
4567 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4568 	u_int8_t do_ocr = 1;
4569 	struct mrsas_mfi_cmd *cmd;
4570 	struct mrsas_dcmd_frame *dcmd;
4571 	struct MR_PD_LIST *pd_list_mem;
4572 	struct MR_PD_ADDRESS *pd_addr;
4573 	bus_addr_t pd_list_phys_addr = 0;
4574 	struct mrsas_tmp_dcmd *tcmd;
4575 
4576 	cmd = mrsas_get_mfi_cmd(sc);
4577 	if (!cmd) {
4578 		device_printf(sc->mrsas_dev,
4579 		    "Cannot alloc for get PD list cmd\n");
4580 		return 1;
4581 	}
4582 	dcmd = &cmd->frame->dcmd;
4583 
4584 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4585 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4586 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4587 		device_printf(sc->mrsas_dev,
4588 		    "Cannot alloc dmamap for get PD list cmd\n");
4589 		mrsas_release_mfi_cmd(cmd);
4590 		mrsas_free_tmp_dcmd(tcmd);
4591 		free(tcmd, M_MRSAS);
4592 		return (ENOMEM);
4593 	} else {
4594 		pd_list_mem = tcmd->tmp_dcmd_mem;
4595 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4596 	}
4597 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4598 
4599 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4600 	dcmd->mbox.b[1] = 0;
4601 	dcmd->cmd = MFI_CMD_DCMD;
4602 	dcmd->cmd_status = 0xFF;
4603 	dcmd->sge_count = 1;
4604 	dcmd->flags = MFI_FRAME_DIR_READ;
4605 	dcmd->timeout = 0;
4606 	dcmd->pad_0 = 0;
4607 	dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4608 	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4609 	dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4610 	dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4611 
4612 	if (!sc->mask_interrupts)
4613 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4614 	else
4615 		retcode = mrsas_issue_polled(sc, cmd);
4616 
4617 	if (retcode == ETIMEDOUT)
4618 		goto dcmd_timeout;
4619 
4620 	/* Get the instance PD list */
4621 	pd_count = MRSAS_MAX_PD;
4622 	pd_addr = pd_list_mem->addr;
4623 	if (pd_list_mem->count < pd_count) {
4624 		memset(sc->local_pd_list, 0,
4625 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4626 		for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4627 			sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4628 			sc->local_pd_list[pd_addr->deviceId].driveType =
4629 			    pd_addr->scsiDevType;
4630 			sc->local_pd_list[pd_addr->deviceId].driveState =
4631 			    MR_PD_STATE_SYSTEM;
4632 			if (sc->target_list[pd_addr->deviceId].target_id == 0xffff)
4633 				mrsas_add_target(sc, pd_addr->deviceId);
4634 			pd_addr++;
4635 		}
4636 		for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4637 			if ((sc->local_pd_list[pd_index].driveState !=
4638 				MR_PD_STATE_SYSTEM) &&
4639 				(sc->target_list[pd_index].target_id !=
4640 				0xffff)) {
4641 				mrsas_remove_target(sc, pd_index);
4642 			}
4643 		}
4644 		/*
4645 		 * Use mutext/spinlock if pd_list component size increase more than
4646 		 * 32 bit.
4647 		 */
4648 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4649 		do_ocr = 0;
4650 	}
4651 dcmd_timeout:
4652 	mrsas_free_tmp_dcmd(tcmd);
4653 	free(tcmd, M_MRSAS);
4654 
4655 	if (do_ocr)
4656 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4657 
4658 	if (!sc->mask_interrupts)
4659 		mrsas_release_mfi_cmd(cmd);
4660 
4661 	return (retcode);
4662 }
4663 
4664 /*
4665  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4666  * Adapter soft state
4667  *
4668  * Issues an internal command (DCMD) to get the FW's controller PD list
4669  * structure.  This information is mainly used to find out about supported by
4670  * the FW.
4671  */
4672 static int
4673 mrsas_get_ld_list(struct mrsas_softc *sc)
4674 {
4675 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4676 	u_int8_t do_ocr = 1;
4677 	struct mrsas_mfi_cmd *cmd;
4678 	struct mrsas_dcmd_frame *dcmd;
4679 	struct MR_LD_LIST *ld_list_mem;
4680 	bus_addr_t ld_list_phys_addr = 0;
4681 	struct mrsas_tmp_dcmd *tcmd;
4682 
4683 	cmd = mrsas_get_mfi_cmd(sc);
4684 	if (!cmd) {
4685 		device_printf(sc->mrsas_dev,
4686 		    "Cannot alloc for get LD list cmd\n");
4687 		return 1;
4688 	}
4689 	dcmd = &cmd->frame->dcmd;
4690 
4691 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4692 	ld_list_size = sizeof(struct MR_LD_LIST);
4693 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4694 		device_printf(sc->mrsas_dev,
4695 		    "Cannot alloc dmamap for get LD list cmd\n");
4696 		mrsas_release_mfi_cmd(cmd);
4697 		mrsas_free_tmp_dcmd(tcmd);
4698 		free(tcmd, M_MRSAS);
4699 		return (ENOMEM);
4700 	} else {
4701 		ld_list_mem = tcmd->tmp_dcmd_mem;
4702 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4703 	}
4704 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4705 
4706 	if (sc->max256vdSupport)
4707 		dcmd->mbox.b[0] = 1;
4708 
4709 	dcmd->cmd = MFI_CMD_DCMD;
4710 	dcmd->cmd_status = 0xFF;
4711 	dcmd->sge_count = 1;
4712 	dcmd->flags = MFI_FRAME_DIR_READ;
4713 	dcmd->timeout = 0;
4714 	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4715 	dcmd->opcode = MR_DCMD_LD_GET_LIST;
4716 	dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4717 	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4718 	dcmd->pad_0 = 0;
4719 
4720 	if (!sc->mask_interrupts)
4721 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4722 	else
4723 		retcode = mrsas_issue_polled(sc, cmd);
4724 
4725 	if (retcode == ETIMEDOUT)
4726 		goto dcmd_timeout;
4727 
4728 #if VD_EXT_DEBUG
4729 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4730 #endif
4731 
4732 	/* Get the instance LD list */
4733 	if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4734 		sc->CurLdCount = ld_list_mem->ldCount;
4735 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4736 		for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4737 			ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4738 			drv_tgt_id = ids + MRSAS_MAX_PD;
4739 			if (ld_list_mem->ldList[ld_index].state != 0) {
4740 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4741 				if (sc->target_list[drv_tgt_id].target_id ==
4742 					0xffff)
4743 					mrsas_add_target(sc, drv_tgt_id);
4744 			} else {
4745 				if (sc->target_list[drv_tgt_id].target_id !=
4746 					0xffff)
4747 					mrsas_remove_target(sc,
4748 						drv_tgt_id);
4749 			}
4750 		}
4751 
4752 		do_ocr = 0;
4753 	}
4754 dcmd_timeout:
4755 	mrsas_free_tmp_dcmd(tcmd);
4756 	free(tcmd, M_MRSAS);
4757 
4758 	if (do_ocr)
4759 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4760 	if (!sc->mask_interrupts)
4761 		mrsas_release_mfi_cmd(cmd);
4762 
4763 	return (retcode);
4764 }
4765 
4766 /*
4767  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4768  * Adapter soft state Temp command Size of alloction
4769  *
4770  * Allocates DMAable memory for a temporary internal command. The allocated
4771  * memory is initialized to all zeros upon successful loading of the dma
4772  * mapped memory.
4773  */
4774 int
4775 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4776     struct mrsas_tmp_dcmd *tcmd, int size)
4777 {
4778 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4779 	    1, 0,
4780 	    BUS_SPACE_MAXADDR_32BIT,
4781 	    BUS_SPACE_MAXADDR,
4782 	    NULL, NULL,
4783 	    size,
4784 	    1,
4785 	    size,
4786 	    BUS_DMA_ALLOCNOW,
4787 	    NULL, NULL,
4788 	    &tcmd->tmp_dcmd_tag)) {
4789 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4790 		return (ENOMEM);
4791 	}
4792 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4793 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4794 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4795 		return (ENOMEM);
4796 	}
4797 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4798 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4799 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4800 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4801 		return (ENOMEM);
4802 	}
4803 	memset(tcmd->tmp_dcmd_mem, 0, size);
4804 	return (0);
4805 }
4806 
4807 /*
4808  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4809  * temporary dcmd pointer
4810  *
4811  * Deallocates memory of the temporary command for use in the construction of
4812  * the internal DCMD.
4813  */
4814 void
4815 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4816 {
4817 	if (tmp->tmp_dcmd_phys_addr)
4818 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4819 	if (tmp->tmp_dcmd_mem != NULL)
4820 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4821 	if (tmp->tmp_dcmd_tag != NULL)
4822 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4823 }
4824 
4825 /*
4826  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4827  * Adapter soft state Previously issued cmd to be aborted
4828  *
4829  * This function is used to abort previously issued commands, such as AEN and
4830  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4831  * command and subsequently the driver will wait for a return status.  The
4832  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4833  */
4834 static int
4835 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4836     struct mrsas_mfi_cmd *cmd_to_abort)
4837 {
4838 	struct mrsas_mfi_cmd *cmd;
4839 	struct mrsas_abort_frame *abort_fr;
4840 	u_int8_t retcode = 0;
4841 	unsigned long total_time = 0;
4842 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4843 
4844 	cmd = mrsas_get_mfi_cmd(sc);
4845 	if (!cmd) {
4846 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4847 		return (1);
4848 	}
4849 	abort_fr = &cmd->frame->abort;
4850 
4851 	/* Prepare and issue the abort frame */
4852 	abort_fr->cmd = MFI_CMD_ABORT;
4853 	abort_fr->cmd_status = 0xFF;
4854 	abort_fr->flags = 0;
4855 	abort_fr->abort_context = cmd_to_abort->index;
4856 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4857 	abort_fr->abort_mfi_phys_addr_hi = 0;
4858 
4859 	cmd->sync_cmd = 1;
4860 	cmd->cmd_status = 0xFF;
4861 
4862 	if (mrsas_issue_dcmd(sc, cmd)) {
4863 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4864 		return (1);
4865 	}
4866 	/* Wait for this cmd to complete */
4867 	sc->chan = (void *)&cmd;
4868 	while (1) {
4869 		if (cmd->cmd_status == 0xFF) {
4870 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4871 		} else
4872 			break;
4873 		total_time++;
4874 		if (total_time >= max_wait) {
4875 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4876 			retcode = 1;
4877 			break;
4878 		}
4879 	}
4880 
4881 	cmd->sync_cmd = 0;
4882 	mrsas_release_mfi_cmd(cmd);
4883 	return (retcode);
4884 }
4885 
4886 /*
4887  * mrsas_complete_abort:      Completes aborting a command input:
4888  * Adapter soft state Cmd that was issued to abort another cmd
4889  *
4890  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4891  * change after sending the command.  This function is called from
4892  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4893  */
4894 void
4895 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4896 {
4897 	if (cmd->sync_cmd) {
4898 		cmd->sync_cmd = 0;
4899 		cmd->cmd_status = 0;
4900 		sc->chan = (void *)&cmd;
4901 		wakeup_one((void *)&sc->chan);
4902 	}
4903 	return;
4904 }
4905 
4906 /*
4907  * mrsas_aen_handler:	AEN processing callback function from thread context
4908  * input:				Adapter soft state
4909  *
4910  * Asynchronous event handler
4911  */
4912 void
4913 mrsas_aen_handler(struct mrsas_softc *sc)
4914 {
4915 	union mrsas_evt_class_locale class_locale;
4916 	int doscan = 0;
4917 	u_int32_t seq_num;
4918  	int error, fail_aen = 0;
4919 
4920 	if (sc == NULL) {
4921 		printf("invalid instance!\n");
4922 		return;
4923 	}
4924 	if (sc->remove_in_progress || sc->reset_in_progress) {
4925 		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4926 			__func__, __LINE__);
4927 		return;
4928 	}
4929 	if (sc->evt_detail_mem) {
4930 		switch (sc->evt_detail_mem->code) {
4931 		case MR_EVT_PD_INSERTED:
4932 			fail_aen = mrsas_get_pd_list(sc);
4933 			if (!fail_aen)
4934 				mrsas_bus_scan_sim(sc, sc->sim_1);
4935 			else
4936 				goto skip_register_aen;
4937 			break;
4938 		case MR_EVT_PD_REMOVED:
4939 			fail_aen = mrsas_get_pd_list(sc);
4940 			if (!fail_aen)
4941 				mrsas_bus_scan_sim(sc, sc->sim_1);
4942 			else
4943 				goto skip_register_aen;
4944 			break;
4945 		case MR_EVT_LD_OFFLINE:
4946 		case MR_EVT_CFG_CLEARED:
4947 		case MR_EVT_LD_DELETED:
4948 			mrsas_bus_scan_sim(sc, sc->sim_0);
4949 			break;
4950 		case MR_EVT_LD_CREATED:
4951 			fail_aen = mrsas_get_ld_list(sc);
4952 			if (!fail_aen)
4953 				mrsas_bus_scan_sim(sc, sc->sim_0);
4954 			else
4955 				goto skip_register_aen;
4956 			break;
4957 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4958 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4959 		case MR_EVT_LD_STATE_CHANGE:
4960 			doscan = 1;
4961 			break;
4962 		case MR_EVT_CTRL_PROP_CHANGED:
4963 			fail_aen = mrsas_get_ctrl_info(sc);
4964 			if (fail_aen)
4965 				goto skip_register_aen;
4966 			break;
4967 		default:
4968 			break;
4969 		}
4970 	} else {
4971 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4972 		return;
4973 	}
4974 	if (doscan) {
4975 		fail_aen = mrsas_get_pd_list(sc);
4976 		if (!fail_aen) {
4977 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4978 			mrsas_bus_scan_sim(sc, sc->sim_1);
4979 		} else
4980 			goto skip_register_aen;
4981 
4982 		fail_aen = mrsas_get_ld_list(sc);
4983 		if (!fail_aen) {
4984 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4985 			mrsas_bus_scan_sim(sc, sc->sim_0);
4986 		} else
4987 			goto skip_register_aen;
4988 	}
4989 	seq_num = sc->evt_detail_mem->seq_num + 1;
4990 
4991 	/* Register AEN with FW for latest sequence number plus 1 */
4992 	class_locale.members.reserved = 0;
4993 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4994 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4995 
4996 	if (sc->aen_cmd != NULL)
4997 		return;
4998 
4999 	mtx_lock(&sc->aen_lock);
5000 	error = mrsas_register_aen(sc, seq_num,
5001 	    class_locale.word);
5002 	mtx_unlock(&sc->aen_lock);
5003 
5004 	if (error)
5005 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5006 
5007 skip_register_aen:
5008 	return;
5009 
5010 }
5011 
5012 /*
5013  * mrsas_complete_aen:	Completes AEN command
5014  * input:				Adapter soft state
5015  * 						Cmd that was issued to abort another cmd
5016  *
5017  * This function will be called from ISR and will continue event processing from
5018  * thread context by enqueuing task in ev_tq (callback function
5019  * "mrsas_aen_handler").
5020  */
5021 void
5022 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5023 {
5024 	/*
5025 	 * Don't signal app if it is just an aborted previously registered
5026 	 * aen
5027 	 */
5028 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5029 		sc->mrsas_aen_triggered = 1;
5030 		mtx_lock(&sc->aen_lock);
5031 		if (sc->mrsas_poll_waiting) {
5032 			sc->mrsas_poll_waiting = 0;
5033 			selwakeup(&sc->mrsas_select);
5034 		}
5035 		mtx_unlock(&sc->aen_lock);
5036 	} else
5037 		cmd->abort_aen = 0;
5038 
5039 	sc->aen_cmd = NULL;
5040 	mrsas_release_mfi_cmd(cmd);
5041 
5042 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5043 
5044 	return;
5045 }
5046 
5047 static device_method_t mrsas_methods[] = {
5048 	DEVMETHOD(device_probe, mrsas_probe),
5049 	DEVMETHOD(device_attach, mrsas_attach),
5050 	DEVMETHOD(device_detach, mrsas_detach),
5051 	DEVMETHOD(device_shutdown, mrsas_shutdown),
5052 	DEVMETHOD(device_suspend, mrsas_suspend),
5053 	DEVMETHOD(device_resume, mrsas_resume),
5054 	DEVMETHOD(bus_print_child, bus_generic_print_child),
5055 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5056 	{0, 0}
5057 };
5058 
5059 static driver_t mrsas_driver = {
5060 	"mrsas",
5061 	mrsas_methods,
5062 	sizeof(struct mrsas_softc)
5063 };
5064 
5065 static devclass_t mrsas_devclass;
5066 
5067 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
5068 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
5069