xref: /freebsd/sys/dev/mrsas/mrsas.c (revision 19fae0f66023a97a9b464b3beeeabb2081f575b3)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/smp.h>
55 #include <sys/endian.h>
56 
57 /*
58  * Function prototypes
59  */
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_ioctl_t mrsas_ioctl;
63 static d_poll_t mrsas_poll;
64 
65 static void mrsas_ich_startup(void *arg);
66 static struct mrsas_mgmt_info mrsas_mgmt_info;
67 static struct mrsas_ident *mrsas_find_ident(device_t);
68 static int mrsas_setup_msix(struct mrsas_softc *sc);
69 static int mrsas_allocate_msix(struct mrsas_softc *sc);
70 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
71 static void mrsas_flush_cache(struct mrsas_softc *sc);
72 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
73 static void mrsas_ocr_thread(void *arg);
74 static int mrsas_get_map_info(struct mrsas_softc *sc);
75 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
76 static int mrsas_sync_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_pd_list(struct mrsas_softc *sc);
78 static int mrsas_get_ld_list(struct mrsas_softc *sc);
79 static int mrsas_setup_irq(struct mrsas_softc *sc);
80 static int mrsas_alloc_mem(struct mrsas_softc *sc);
81 static int mrsas_init_fw(struct mrsas_softc *sc);
82 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
83 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
84 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
85 static int mrsas_clear_intr(struct mrsas_softc *sc);
86 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
87 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
88 static int
89 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
90     struct mrsas_mfi_cmd *cmd_to_abort);
91 static void
92 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95     u_long cmd, caddr_t arg);
96 u_int32_t
97 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
98 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
99 u_int8_t
100 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
101     struct mrsas_mfi_cmd *mfi_cmd);
102 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
103 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
104 int	mrsas_init_adapter(struct mrsas_softc *sc);
105 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
106 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
107 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
108 int	mrsas_ioc_init(struct mrsas_softc *sc);
109 int	mrsas_bus_scan(struct mrsas_softc *sc);
110 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
111 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
112 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
113 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
114 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
115 int mrsas_reset_targets(struct mrsas_softc *sc);
116 int
117 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
118     struct mrsas_mfi_cmd *cmd);
119 int
120 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
121     int size);
122 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
123 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
125 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
126 void	mrsas_disable_intr(struct mrsas_softc *sc);
127 void	mrsas_enable_intr(struct mrsas_softc *sc);
128 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
129 void	mrsas_free_mem(struct mrsas_softc *sc);
130 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
131 void	mrsas_isr(void *arg);
132 void	mrsas_teardown_intr(struct mrsas_softc *sc);
133 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
134 void	mrsas_kill_hba(struct mrsas_softc *sc);
135 void	mrsas_aen_handler(struct mrsas_softc *sc);
136 void
137 mrsas_write_reg(struct mrsas_softc *sc, int offset,
138     u_int32_t value);
139 void
140 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
141     u_int32_t req_desc_hi);
142 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
143 void
144 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
145     struct mrsas_mfi_cmd *cmd, u_int8_t status);
146 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
147 
148 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
149         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
150 
151 extern int mrsas_cam_attach(struct mrsas_softc *sc);
152 extern void mrsas_cam_detach(struct mrsas_softc *sc);
153 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
154 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
155 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
156 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
157 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
158 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
159 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
160 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
162 extern void mrsas_xpt_release(struct mrsas_softc *sc);
163 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
164 mrsas_get_request_desc(struct mrsas_softc *sc,
165     u_int16_t index);
166 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
167 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
168 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
169 void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
170 
171 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
172 	union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
173 	u_int32_t data_length, u_int8_t *sense);
174 void
175 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
176     u_int32_t req_desc_hi);
177 
178 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
179     "MRSAS Driver Parameters");
180 
181 /*
182  * PCI device struct and table
183  *
184  */
185 typedef struct mrsas_ident {
186 	uint16_t vendor;
187 	uint16_t device;
188 	uint16_t subvendor;
189 	uint16_t subdevice;
190 	const char *desc;
191 }	MRSAS_CTLR_ID;
192 
193 MRSAS_CTLR_ID device_table[] = {
194 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
195 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
196 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
197 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
198 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
199 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
200 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
201 	{0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
202 	{0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
203 	{0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
204 	{0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
205 	{0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
206 	{0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
207 	{0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
208 	{0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
209 	{0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
210 	{0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
211 	{0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
212 	{0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
213 	{0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
214 	{0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
215 	{0, 0, 0, 0, NULL}
216 };
217 
218 /*
219  * Character device entry points
220  *
221  */
222 static struct cdevsw mrsas_cdevsw = {
223 	.d_version = D_VERSION,
224 	.d_open = mrsas_open,
225 	.d_close = mrsas_close,
226 	.d_ioctl = mrsas_ioctl,
227 	.d_poll = mrsas_poll,
228 	.d_name = "mrsas",
229 };
230 
231 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
232 
233 int
234 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
235 {
236 
237 	return (0);
238 }
239 
240 int
241 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
242 {
243 
244 	return (0);
245 }
246 
247 u_int32_t
248 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
249 {
250 	u_int32_t i = 0, ret_val;
251 
252 	if (sc->is_aero) {
253 		do {
254 			ret_val = mrsas_read_reg(sc, offset);
255 			i++;
256 		} while(ret_val == 0 && i < 3);
257 	} else
258 		ret_val = mrsas_read_reg(sc, offset);
259 
260 	return ret_val;
261 }
262 
263 /*
264  * Register Read/Write Functions
265  *
266  */
267 void
268 mrsas_write_reg(struct mrsas_softc *sc, int offset,
269     u_int32_t value)
270 {
271 	bus_space_tag_t bus_tag = sc->bus_tag;
272 	bus_space_handle_t bus_handle = sc->bus_handle;
273 
274 	bus_space_write_4(bus_tag, bus_handle, offset, value);
275 }
276 
277 u_int32_t
278 mrsas_read_reg(struct mrsas_softc *sc, int offset)
279 {
280 	bus_space_tag_t bus_tag = sc->bus_tag;
281 	bus_space_handle_t bus_handle = sc->bus_handle;
282 
283 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
284 }
285 
286 /*
287  * Interrupt Disable/Enable/Clear Functions
288  *
289  */
290 void
291 mrsas_disable_intr(struct mrsas_softc *sc)
292 {
293 	u_int32_t mask = 0xFFFFFFFF;
294 
295 	sc->mask_interrupts = 1;
296 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
297 	/* Dummy read to force pci flush */
298 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
299 }
300 
301 void
302 mrsas_enable_intr(struct mrsas_softc *sc)
303 {
304 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
305 
306 	sc->mask_interrupts = 0;
307 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
308 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
309 
310 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
311 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
312 }
313 
314 static int
315 mrsas_clear_intr(struct mrsas_softc *sc)
316 {
317 	u_int32_t status;
318 
319 	/* Read received interrupt */
320 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
321 
322 	/* Not our interrupt, so just return */
323 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
324 		return (0);
325 
326 	/* We got a reply interrupt */
327 	return (1);
328 }
329 
330 /*
331  * PCI Support Functions
332  *
333  */
334 static struct mrsas_ident *
335 mrsas_find_ident(device_t dev)
336 {
337 	struct mrsas_ident *pci_device;
338 
339 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
340 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
341 		    (pci_device->device == pci_get_device(dev)) &&
342 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
343 		    (pci_device->subvendor == 0xffff)) &&
344 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
345 		    (pci_device->subdevice == 0xffff)))
346 			return (pci_device);
347 	}
348 	return (NULL);
349 }
350 
351 static int
352 mrsas_probe(device_t dev)
353 {
354 	static u_int8_t first_ctrl = 1;
355 	struct mrsas_ident *id;
356 
357 	if ((id = mrsas_find_ident(dev)) != NULL) {
358 		if (first_ctrl) {
359 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
360 			    MRSAS_VERSION);
361 			first_ctrl = 0;
362 		}
363 		device_set_desc(dev, id->desc);
364 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
365 		return (-30);
366 	}
367 	return (ENXIO);
368 }
369 
370 /*
371  * mrsas_setup_sysctl:	setup sysctl values for mrsas
372  * input:				Adapter instance soft state
373  *
374  * Setup sysctl entries for mrsas driver.
375  */
376 static void
377 mrsas_setup_sysctl(struct mrsas_softc *sc)
378 {
379 	struct sysctl_ctx_list *sysctl_ctx = NULL;
380 	struct sysctl_oid *sysctl_tree = NULL;
381 	char tmpstr[80], tmpstr2[80];
382 
383 	/*
384 	 * Setup the sysctl variable so the user can change the debug level
385 	 * on the fly.
386 	 */
387 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
388 	    device_get_unit(sc->mrsas_dev));
389 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
390 
391 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
392 	if (sysctl_ctx != NULL)
393 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
394 
395 	if (sysctl_tree == NULL) {
396 		sysctl_ctx_init(&sc->sysctl_ctx);
397 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
398 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
399 		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
400 		if (sc->sysctl_tree == NULL)
401 			return;
402 		sysctl_ctx = &sc->sysctl_ctx;
403 		sysctl_tree = sc->sysctl_tree;
404 	}
405 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
406 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
407 	    "Disable the use of OCR");
408 
409 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
410 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
411 	    strlen(MRSAS_VERSION), "driver version");
412 
413 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 	    OID_AUTO, "reset_count", CTLFLAG_RD,
415 	    &sc->reset_count, 0, "number of ocr from start of the day");
416 
417 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
419 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
420 
421 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
423 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
424 
425 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
427 	    "Driver debug level");
428 
429 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
430 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
431 	    0, "Driver IO timeout value in mili-second.");
432 
433 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
434 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
435 	    &sc->mrsas_fw_fault_check_delay,
436 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
437 
438 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
439 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
440 	    &sc->reset_in_progress, 0, "ocr in progress status");
441 
442 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
443 	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
444 	    &sc->block_sync_cache, 0,
445 	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
446 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
447 	    OID_AUTO, "stream detection", CTLFLAG_RW,
448 		&sc->drv_stream_detection, 0,
449 		"Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
450 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
451 	    OID_AUTO, "prp_count", CTLFLAG_RD,
452 	    &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
453 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
454 	    OID_AUTO, "SGE holes", CTLFLAG_RD,
455 	    &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
456 }
457 
458 /*
459  * mrsas_get_tunables:	get tunable parameters.
460  * input:				Adapter instance soft state
461  *
462  * Get tunable parameters. This will help to debug driver at boot time.
463  */
464 static void
465 mrsas_get_tunables(struct mrsas_softc *sc)
466 {
467 	char tmpstr[80];
468 
469 	/* XXX default to some debugging for now */
470 	sc->mrsas_debug =
471 		(MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
472 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
473 	sc->mrsas_fw_fault_check_delay = 1;
474 	sc->reset_count = 0;
475 	sc->reset_in_progress = 0;
476 	sc->block_sync_cache = 0;
477 	sc->drv_stream_detection = 1;
478 
479 	/*
480 	 * Grab the global variables.
481 	 */
482 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
483 
484 	/*
485 	 * Grab the global variables.
486 	 */
487 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
488 
489 	/* Grab the unit-instance variables */
490 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
491 	    device_get_unit(sc->mrsas_dev));
492 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
493 }
494 
495 /*
496  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
497  * Used to get sequence number at driver load time.
498  * input:		Adapter soft state
499  *
500  * Allocates DMAable memory for the event log info internal command.
501  */
502 int
503 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
504 {
505 	int el_info_size;
506 
507 	/* Allocate get event log info command */
508 	el_info_size = sizeof(struct mrsas_evt_log_info);
509 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
510 	    1, 0,
511 	    BUS_SPACE_MAXADDR_32BIT,
512 	    BUS_SPACE_MAXADDR,
513 	    NULL, NULL,
514 	    el_info_size,
515 	    1,
516 	    el_info_size,
517 	    BUS_DMA_ALLOCNOW,
518 	    NULL, NULL,
519 	    &sc->el_info_tag)) {
520 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
521 		return (ENOMEM);
522 	}
523 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
524 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
525 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
526 		return (ENOMEM);
527 	}
528 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
529 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
530 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
531 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
532 		return (ENOMEM);
533 	}
534 	memset(sc->el_info_mem, 0, el_info_size);
535 	return (0);
536 }
537 
538 /*
539  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
540  * input:					Adapter soft state
541  *
542  * Deallocates memory for the event log info internal command.
543  */
544 void
545 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
546 {
547 	if (sc->el_info_phys_addr)
548 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
549 	if (sc->el_info_mem != NULL)
550 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
551 	if (sc->el_info_tag != NULL)
552 		bus_dma_tag_destroy(sc->el_info_tag);
553 }
554 
555 /*
556  *  mrsas_get_seq_num:	Get latest event sequence number
557  *  @sc:				Adapter soft state
558  *  @eli:				Firmware event log sequence number information.
559  *
560  * Firmware maintains a log of all events in a non-volatile area.
561  * Driver get the sequence number using DCMD
562  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
563  */
564 
565 static int
566 mrsas_get_seq_num(struct mrsas_softc *sc,
567     struct mrsas_evt_log_info *eli)
568 {
569 	struct mrsas_mfi_cmd *cmd;
570 	struct mrsas_dcmd_frame *dcmd;
571 	u_int8_t do_ocr = 1, retcode = 0;
572 
573 	cmd = mrsas_get_mfi_cmd(sc);
574 
575 	if (!cmd) {
576 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
577 		return -ENOMEM;
578 	}
579 	dcmd = &cmd->frame->dcmd;
580 
581 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
582 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
583 		mrsas_release_mfi_cmd(cmd);
584 		return -ENOMEM;
585 	}
586 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
587 
588 	dcmd->cmd = MFI_CMD_DCMD;
589 	dcmd->cmd_status = 0x0;
590 	dcmd->sge_count = 1;
591 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
592 	dcmd->timeout = 0;
593 	dcmd->pad_0 = 0;
594 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
595 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
596 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
597 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
598 
599 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
600 	if (retcode == ETIMEDOUT)
601 		goto dcmd_timeout;
602 
603 	do_ocr = 0;
604 	/*
605 	 * Copy the data back into callers buffer
606 	 */
607 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
608 	mrsas_free_evt_log_info_cmd(sc);
609 
610 dcmd_timeout:
611 	if (do_ocr)
612 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
613 	else
614 		mrsas_release_mfi_cmd(cmd);
615 
616 	return retcode;
617 }
618 
619 /*
620  *  mrsas_register_aen:		Register for asynchronous event notification
621  *  @sc:			Adapter soft state
622  *  @seq_num:			Starting sequence number
623  *  @class_locale:		Class of the event
624  *
625  *  This function subscribes for events beyond the @seq_num
626  *  and type @class_locale.
627  *
628  */
629 static int
630 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
631     u_int32_t class_locale_word)
632 {
633 	int ret_val;
634 	struct mrsas_mfi_cmd *cmd;
635 	struct mrsas_dcmd_frame *dcmd;
636 	union mrsas_evt_class_locale curr_aen;
637 	union mrsas_evt_class_locale prev_aen;
638 
639 	/*
640 	 * If there an AEN pending already (aen_cmd), check if the
641 	 * class_locale of that pending AEN is inclusive of the new AEN
642 	 * request we currently have. If it is, then we don't have to do
643 	 * anything. In other words, whichever events the current AEN request
644 	 * is subscribing to, have already been subscribed to. If the old_cmd
645 	 * is _not_ inclusive, then we have to abort that command, form a
646 	 * class_locale that is superset of both old and current and re-issue
647 	 * to the FW
648 	 */
649 
650 	curr_aen.word = class_locale_word;
651 
652 	if (sc->aen_cmd) {
653 		prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
654 
655 		/*
656 		 * A class whose enum value is smaller is inclusive of all
657 		 * higher values. If a PROGRESS (= -1) was previously
658 		 * registered, then a new registration requests for higher
659 		 * classes need not be sent to FW. They are automatically
660 		 * included. Locale numbers don't have such hierarchy. They
661 		 * are bitmap values
662 		 */
663 		if ((prev_aen.members.class <= curr_aen.members.class) &&
664 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
665 		    curr_aen.members.locale)) {
666 			/*
667 			 * Previously issued event registration includes
668 			 * current request. Nothing to do.
669 			 */
670 			return 0;
671 		} else {
672 			curr_aen.members.locale |= prev_aen.members.locale;
673 
674 			if (prev_aen.members.class < curr_aen.members.class)
675 				curr_aen.members.class = prev_aen.members.class;
676 
677 			sc->aen_cmd->abort_aen = 1;
678 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
679 			    sc->aen_cmd);
680 
681 			if (ret_val) {
682 				printf("mrsas: Failed to abort previous AEN command\n");
683 				return ret_val;
684 			} else
685 				sc->aen_cmd = NULL;
686 		}
687 	}
688 	cmd = mrsas_get_mfi_cmd(sc);
689 	if (!cmd)
690 		return ENOMEM;
691 
692 	dcmd = &cmd->frame->dcmd;
693 
694 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
695 
696 	/*
697 	 * Prepare DCMD for aen registration
698 	 */
699 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
700 
701 	dcmd->cmd = MFI_CMD_DCMD;
702 	dcmd->cmd_status = 0x0;
703 	dcmd->sge_count = 1;
704 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
705 	dcmd->timeout = 0;
706 	dcmd->pad_0 = 0;
707 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
708 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
709 	dcmd->mbox.w[0] = htole32(seq_num);
710 	sc->last_seq_num = seq_num;
711 	dcmd->mbox.w[1] = htole32(curr_aen.word);
712 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
713 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
714 
715 	if (sc->aen_cmd != NULL) {
716 		mrsas_release_mfi_cmd(cmd);
717 		return 0;
718 	}
719 	/*
720 	 * Store reference to the cmd used to register for AEN. When an
721 	 * application wants us to register for AEN, we have to abort this
722 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
723 	 */
724 	sc->aen_cmd = cmd;
725 
726 	/*
727 	 * Issue the aen registration frame
728 	 */
729 	if (mrsas_issue_dcmd(sc, cmd)) {
730 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
731 		return (1);
732 	}
733 	return 0;
734 }
735 
736 /*
737  * mrsas_start_aen:	Subscribes to AEN during driver load time
738  * @instance:		Adapter soft state
739  */
740 static int
741 mrsas_start_aen(struct mrsas_softc *sc)
742 {
743 	struct mrsas_evt_log_info eli;
744 	union mrsas_evt_class_locale class_locale;
745 
746 	/* Get the latest sequence number from FW */
747 
748 	memset(&eli, 0, sizeof(eli));
749 
750 	if (mrsas_get_seq_num(sc, &eli))
751 		return -1;
752 
753 	/* Register AEN with FW for latest sequence number plus 1 */
754 	class_locale.members.reserved = 0;
755 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
756 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
757 
758 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
759 	    class_locale.word);
760 
761 }
762 
763 /*
764  * mrsas_setup_msix:	Allocate MSI-x vectors
765  * @sc:					adapter soft state
766  */
767 static int
768 mrsas_setup_msix(struct mrsas_softc *sc)
769 {
770 	int i;
771 
772 	for (i = 0; i < sc->msix_vectors; i++) {
773 		sc->irq_context[i].sc = sc;
774 		sc->irq_context[i].MSIxIndex = i;
775 		sc->irq_id[i] = i + 1;
776 		sc->mrsas_irq[i] = bus_alloc_resource_any
777 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
778 		    ,RF_ACTIVE);
779 		if (sc->mrsas_irq[i] == NULL) {
780 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
781 			goto irq_alloc_failed;
782 		}
783 		if (bus_setup_intr(sc->mrsas_dev,
784 		    sc->mrsas_irq[i],
785 		    INTR_MPSAFE | INTR_TYPE_CAM,
786 		    NULL, mrsas_isr, &sc->irq_context[i],
787 		    &sc->intr_handle[i])) {
788 			device_printf(sc->mrsas_dev,
789 			    "Cannot set up MSI-x interrupt handler\n");
790 			goto irq_alloc_failed;
791 		}
792 	}
793 	return SUCCESS;
794 
795 irq_alloc_failed:
796 	mrsas_teardown_intr(sc);
797 	return (FAIL);
798 }
799 
800 /*
801  * mrsas_allocate_msix:		Setup MSI-x vectors
802  * @sc:						adapter soft state
803  */
804 static int
805 mrsas_allocate_msix(struct mrsas_softc *sc)
806 {
807 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
808 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
809 		    " of vectors\n", sc->msix_vectors);
810 	} else {
811 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
812 		goto irq_alloc_failed;
813 	}
814 	return SUCCESS;
815 
816 irq_alloc_failed:
817 	mrsas_teardown_intr(sc);
818 	return (FAIL);
819 }
820 
821 /*
822  * mrsas_attach:	PCI entry point
823  * input:			pointer to device struct
824  *
825  * Performs setup of PCI and registers, initializes mutexes and linked lists,
826  * registers interrupts and CAM, and initializes   the adapter/controller to
827  * its proper state.
828  */
829 static int
830 mrsas_attach(device_t dev)
831 {
832 	struct mrsas_softc *sc = device_get_softc(dev);
833 	uint32_t cmd, error;
834 
835 	memset(sc, 0, sizeof(struct mrsas_softc));
836 
837 	/* Look up our softc and initialize its fields. */
838 	sc->mrsas_dev = dev;
839 	sc->device_id = pci_get_device(dev);
840 
841 	switch (sc->device_id) {
842 	case MRSAS_INVADER:
843 	case MRSAS_FURY:
844 	case MRSAS_INTRUDER:
845 	case MRSAS_INTRUDER_24:
846 	case MRSAS_CUTLASS_52:
847 	case MRSAS_CUTLASS_53:
848 		sc->mrsas_gen3_ctrl = 1;
849 		break;
850 	case MRSAS_VENTURA:
851 	case MRSAS_CRUSADER:
852 	case MRSAS_HARPOON:
853 	case MRSAS_TOMCAT:
854 	case MRSAS_VENTURA_4PORT:
855 	case MRSAS_CRUSADER_4PORT:
856 		sc->is_ventura = true;
857 		break;
858 	case MRSAS_AERO_10E1:
859 	case MRSAS_AERO_10E5:
860 		device_printf(dev, "Adapter is in configurable secure mode\n");
861 	case MRSAS_AERO_10E2:
862 	case MRSAS_AERO_10E6:
863 		sc->is_aero = true;
864 		break;
865 	case MRSAS_AERO_10E0:
866 	case MRSAS_AERO_10E3:
867 	case MRSAS_AERO_10E4:
868 	case MRSAS_AERO_10E7:
869 		device_printf(dev, "Adapter is in non-secure mode\n");
870 		return SUCCESS;
871 	}
872 
873 	mrsas_get_tunables(sc);
874 
875 	/*
876 	 * Set up PCI and registers
877 	 */
878 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
879 	/* Force the busmaster enable bit on. */
880 	cmd |= PCIM_CMD_BUSMASTEREN;
881 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
882 
883 	/* For Ventura/Aero system registers are mapped to BAR0 */
884 	if (sc->is_ventura || sc->is_aero)
885 		sc->reg_res_id = PCIR_BAR(0);	/* BAR0 offset */
886 	else
887 		sc->reg_res_id = PCIR_BAR(1);	/* BAR1 offset */
888 
889 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
890 	    &(sc->reg_res_id), RF_ACTIVE))
891 	    == NULL) {
892 		device_printf(dev, "Cannot allocate PCI registers\n");
893 		goto attach_fail;
894 	}
895 	sc->bus_tag = rman_get_bustag(sc->reg_res);
896 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
897 
898 	/* Intialize mutexes */
899 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
900 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
901 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
902 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
903 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
904 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
905 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
906 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
907 	mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
908 
909 	/* Intialize linked list */
910 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
911 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
912 
913 	mrsas_atomic_set(&sc->fw_outstanding, 0);
914 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
915 	mrsas_atomic_set(&sc->prp_count, 0);
916 	mrsas_atomic_set(&sc->sge_holes, 0);
917 
918 	sc->io_cmds_highwater = 0;
919 
920 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
921 	sc->UnevenSpanSupport = 0;
922 
923 	sc->msix_enable = 0;
924 
925 	/* Initialize Firmware */
926 	if (mrsas_init_fw(sc) != SUCCESS) {
927 		goto attach_fail_fw;
928 	}
929 	/* Register mrsas to CAM layer */
930 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
931 		goto attach_fail_cam;
932 	}
933 	/* Register IRQs */
934 	if (mrsas_setup_irq(sc) != SUCCESS) {
935 		goto attach_fail_irq;
936 	}
937 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
938 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
939 	    device_get_unit(sc->mrsas_dev));
940 	if (error) {
941 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
942 		goto attach_fail_ocr_thread;
943 	}
944 	/*
945 	 * After FW initialization and OCR thread creation
946 	 * we will defer the cdev creation, AEN setup on ICH callback
947 	 */
948 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
949 	sc->mrsas_ich.ich_arg = sc;
950 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
951 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
952 	}
953 	mrsas_setup_sysctl(sc);
954 	return SUCCESS;
955 
956 attach_fail_ocr_thread:
957 	if (sc->ocr_thread_active)
958 		wakeup(&sc->ocr_chan);
959 attach_fail_irq:
960 	mrsas_teardown_intr(sc);
961 attach_fail_cam:
962 	mrsas_cam_detach(sc);
963 attach_fail_fw:
964 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
965 	if (sc->msix_enable == 1)
966 		pci_release_msi(sc->mrsas_dev);
967 	mrsas_free_mem(sc);
968 	mtx_destroy(&sc->sim_lock);
969 	mtx_destroy(&sc->aen_lock);
970 	mtx_destroy(&sc->pci_lock);
971 	mtx_destroy(&sc->io_lock);
972 	mtx_destroy(&sc->ioctl_lock);
973 	mtx_destroy(&sc->mpt_cmd_pool_lock);
974 	mtx_destroy(&sc->mfi_cmd_pool_lock);
975 	mtx_destroy(&sc->raidmap_lock);
976 	mtx_destroy(&sc->stream_lock);
977 attach_fail:
978 	if (sc->reg_res) {
979 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
980 		    sc->reg_res_id, sc->reg_res);
981 	}
982 	return (ENXIO);
983 }
984 
985 /*
986  * Interrupt config hook
987  */
988 static void
989 mrsas_ich_startup(void *arg)
990 {
991 	int i = 0;
992 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
993 
994 	/*
995 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
996 	 */
997 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
998 	    IOCTL_SEMA_DESCRIPTION);
999 
1000 	/* Create a /dev entry for mrsas controller. */
1001 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
1002 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
1003 	    device_get_unit(sc->mrsas_dev));
1004 
1005 	if (device_get_unit(sc->mrsas_dev) == 0) {
1006 		make_dev_alias_p(MAKEDEV_CHECKNAME,
1007 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1008 		    "megaraid_sas_ioctl_node");
1009 	}
1010 	if (sc->mrsas_cdev)
1011 		sc->mrsas_cdev->si_drv1 = sc;
1012 
1013 	/*
1014 	 * Add this controller to mrsas_mgmt_info structure so that it can be
1015 	 * exported to management applications
1016 	 */
1017 	if (device_get_unit(sc->mrsas_dev) == 0)
1018 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1019 
1020 	mrsas_mgmt_info.count++;
1021 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1022 	mrsas_mgmt_info.max_index++;
1023 
1024 	/* Enable Interrupts */
1025 	mrsas_enable_intr(sc);
1026 
1027 	/* Call DCMD get_pd_info for all system PDs */
1028 	for (i = 0; i < MRSAS_MAX_PD; i++) {
1029 		if ((sc->target_list[i].target_id != 0xffff) &&
1030 			sc->pd_info_mem)
1031 			mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1032 	}
1033 
1034 	/* Initiate AEN (Asynchronous Event Notification) */
1035 	if (mrsas_start_aen(sc)) {
1036 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1037 		    "Further events from the controller will not be communicated.\n"
1038 		    "Either there is some problem in the controller"
1039 		    "or the controller does not support AEN.\n"
1040 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
1041 	}
1042 	if (sc->mrsas_ich.ich_arg != NULL) {
1043 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1044 		config_intrhook_disestablish(&sc->mrsas_ich);
1045 		sc->mrsas_ich.ich_arg = NULL;
1046 	}
1047 }
1048 
1049 /*
1050  * mrsas_detach:	De-allocates and teardown resources
1051  * input:			pointer to device struct
1052  *
1053  * This function is the entry point for device disconnect and detach.
1054  * It performs memory de-allocations, shutdown of the controller and various
1055  * teardown and destroy resource functions.
1056  */
1057 static int
1058 mrsas_detach(device_t dev)
1059 {
1060 	struct mrsas_softc *sc;
1061 	int i = 0;
1062 
1063 	sc = device_get_softc(dev);
1064 	sc->remove_in_progress = 1;
1065 
1066 	/* Destroy the character device so no other IOCTL will be handled */
1067 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1068 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1069 	destroy_dev(sc->mrsas_cdev);
1070 
1071 	/*
1072 	 * Take the instance off the instance array. Note that we will not
1073 	 * decrement the max_index. We let this array be sparse array
1074 	 */
1075 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1076 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1077 			mrsas_mgmt_info.count--;
1078 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1079 			break;
1080 		}
1081 	}
1082 
1083 	if (sc->ocr_thread_active)
1084 		wakeup(&sc->ocr_chan);
1085 	while (sc->reset_in_progress) {
1086 		i++;
1087 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1088 			mrsas_dprint(sc, MRSAS_INFO,
1089 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1090 		}
1091 		pause("mr_shutdown", hz);
1092 	}
1093 	i = 0;
1094 	while (sc->ocr_thread_active) {
1095 		i++;
1096 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1097 			mrsas_dprint(sc, MRSAS_INFO,
1098 			    "[%2d]waiting for "
1099 			    "mrsas_ocr thread to quit ocr %d\n", i,
1100 			    sc->ocr_thread_active);
1101 		}
1102 		pause("mr_shutdown", hz);
1103 	}
1104 	mrsas_flush_cache(sc);
1105 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1106 	mrsas_disable_intr(sc);
1107 
1108 	if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1109 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1110 			free(sc->streamDetectByLD[i], M_MRSAS);
1111 		free(sc->streamDetectByLD, M_MRSAS);
1112 		sc->streamDetectByLD = NULL;
1113 	}
1114 
1115 	mrsas_cam_detach(sc);
1116 	mrsas_teardown_intr(sc);
1117 	mrsas_free_mem(sc);
1118 	mtx_destroy(&sc->sim_lock);
1119 	mtx_destroy(&sc->aen_lock);
1120 	mtx_destroy(&sc->pci_lock);
1121 	mtx_destroy(&sc->io_lock);
1122 	mtx_destroy(&sc->ioctl_lock);
1123 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1124 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1125 	mtx_destroy(&sc->raidmap_lock);
1126 	mtx_destroy(&sc->stream_lock);
1127 
1128 	/* Wait for all the semaphores to be released */
1129 	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1130 		pause("mr_shutdown", hz);
1131 
1132 	/* Destroy the counting semaphore created for Ioctl */
1133 	sema_destroy(&sc->ioctl_count_sema);
1134 
1135 	if (sc->reg_res) {
1136 		bus_release_resource(sc->mrsas_dev,
1137 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1138 	}
1139 	if (sc->sysctl_tree != NULL)
1140 		sysctl_ctx_free(&sc->sysctl_ctx);
1141 
1142 	return (0);
1143 }
1144 
1145 static int
1146 mrsas_shutdown(device_t dev)
1147 {
1148 	struct mrsas_softc *sc;
1149 	int i;
1150 
1151 	sc = device_get_softc(dev);
1152 	sc->remove_in_progress = 1;
1153 	if (!KERNEL_PANICKED()) {
1154 		if (sc->ocr_thread_active)
1155 			wakeup(&sc->ocr_chan);
1156 		i = 0;
1157 		while (sc->reset_in_progress && i < 15) {
1158 			i++;
1159 			if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1160 				mrsas_dprint(sc, MRSAS_INFO,
1161 				    "[%2d]waiting for OCR to be finished "
1162 				    "from %s\n", i, __func__);
1163 			}
1164 			pause("mr_shutdown", hz);
1165 		}
1166 		if (sc->reset_in_progress) {
1167 			mrsas_dprint(sc, MRSAS_INFO,
1168 			    "gave up waiting for OCR to be finished\n");
1169 			return (0);
1170 		}
1171 	}
1172 
1173 	mrsas_flush_cache(sc);
1174 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1175 	mrsas_disable_intr(sc);
1176 	return (0);
1177 }
1178 
1179 /*
1180  * mrsas_free_mem:		Frees allocated memory
1181  * input:				Adapter instance soft state
1182  *
1183  * This function is called from mrsas_detach() to free previously allocated
1184  * memory.
1185  */
1186 void
1187 mrsas_free_mem(struct mrsas_softc *sc)
1188 {
1189 	int i;
1190 	u_int32_t max_fw_cmds;
1191 	struct mrsas_mfi_cmd *mfi_cmd;
1192 	struct mrsas_mpt_cmd *mpt_cmd;
1193 
1194 	/*
1195 	 * Free RAID map memory
1196 	 */
1197 	for (i = 0; i < 2; i++) {
1198 		if (sc->raidmap_phys_addr[i])
1199 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1200 		if (sc->raidmap_mem[i] != NULL)
1201 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1202 		if (sc->raidmap_tag[i] != NULL)
1203 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1204 
1205 		if (sc->ld_drv_map[i] != NULL)
1206 			free(sc->ld_drv_map[i], M_MRSAS);
1207 	}
1208 	for (i = 0; i < 2; i++) {
1209 		if (sc->jbodmap_phys_addr[i])
1210 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1211 		if (sc->jbodmap_mem[i] != NULL)
1212 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1213 		if (sc->jbodmap_tag[i] != NULL)
1214 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1215 	}
1216 	/*
1217 	 * Free version buffer memory
1218 	 */
1219 	if (sc->verbuf_phys_addr)
1220 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1221 	if (sc->verbuf_mem != NULL)
1222 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1223 	if (sc->verbuf_tag != NULL)
1224 		bus_dma_tag_destroy(sc->verbuf_tag);
1225 
1226 	/*
1227 	 * Free sense buffer memory
1228 	 */
1229 	if (sc->sense_phys_addr)
1230 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1231 	if (sc->sense_mem != NULL)
1232 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1233 	if (sc->sense_tag != NULL)
1234 		bus_dma_tag_destroy(sc->sense_tag);
1235 
1236 	/*
1237 	 * Free chain frame memory
1238 	 */
1239 	if (sc->chain_frame_phys_addr)
1240 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1241 	if (sc->chain_frame_mem != NULL)
1242 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1243 	if (sc->chain_frame_tag != NULL)
1244 		bus_dma_tag_destroy(sc->chain_frame_tag);
1245 
1246 	/*
1247 	 * Free IO Request memory
1248 	 */
1249 	if (sc->io_request_phys_addr)
1250 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1251 	if (sc->io_request_mem != NULL)
1252 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1253 	if (sc->io_request_tag != NULL)
1254 		bus_dma_tag_destroy(sc->io_request_tag);
1255 
1256 	/*
1257 	 * Free Reply Descriptor memory
1258 	 */
1259 	if (sc->reply_desc_phys_addr)
1260 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1261 	if (sc->reply_desc_mem != NULL)
1262 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1263 	if (sc->reply_desc_tag != NULL)
1264 		bus_dma_tag_destroy(sc->reply_desc_tag);
1265 
1266 	/*
1267 	 * Free event detail memory
1268 	 */
1269 	if (sc->evt_detail_phys_addr)
1270 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1271 	if (sc->evt_detail_mem != NULL)
1272 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1273 	if (sc->evt_detail_tag != NULL)
1274 		bus_dma_tag_destroy(sc->evt_detail_tag);
1275 
1276 	/*
1277 	 * Free PD info memory
1278 	 */
1279 	if (sc->pd_info_phys_addr)
1280 		bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1281 	if (sc->pd_info_mem != NULL)
1282 		bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1283 	if (sc->pd_info_tag != NULL)
1284 		bus_dma_tag_destroy(sc->pd_info_tag);
1285 
1286 	/*
1287 	 * Free MFI frames
1288 	 */
1289 	if (sc->mfi_cmd_list) {
1290 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1291 			mfi_cmd = sc->mfi_cmd_list[i];
1292 			mrsas_free_frame(sc, mfi_cmd);
1293 		}
1294 	}
1295 	if (sc->mficmd_frame_tag != NULL)
1296 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1297 
1298 	/*
1299 	 * Free MPT internal command list
1300 	 */
1301 	max_fw_cmds = sc->max_fw_cmds;
1302 	if (sc->mpt_cmd_list) {
1303 		for (i = 0; i < max_fw_cmds; i++) {
1304 			mpt_cmd = sc->mpt_cmd_list[i];
1305 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1306 			free(sc->mpt_cmd_list[i], M_MRSAS);
1307 		}
1308 		free(sc->mpt_cmd_list, M_MRSAS);
1309 		sc->mpt_cmd_list = NULL;
1310 	}
1311 	/*
1312 	 * Free MFI internal command list
1313 	 */
1314 
1315 	if (sc->mfi_cmd_list) {
1316 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1317 			free(sc->mfi_cmd_list[i], M_MRSAS);
1318 		}
1319 		free(sc->mfi_cmd_list, M_MRSAS);
1320 		sc->mfi_cmd_list = NULL;
1321 	}
1322 	/*
1323 	 * Free request descriptor memory
1324 	 */
1325 	free(sc->req_desc, M_MRSAS);
1326 	sc->req_desc = NULL;
1327 
1328 	/*
1329 	 * Destroy parent tag
1330 	 */
1331 	if (sc->mrsas_parent_tag != NULL)
1332 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1333 
1334 	/*
1335 	 * Free ctrl_info memory
1336 	 */
1337 	if (sc->ctrl_info != NULL)
1338 		free(sc->ctrl_info, M_MRSAS);
1339 }
1340 
1341 /*
1342  * mrsas_teardown_intr:	Teardown interrupt
1343  * input:				Adapter instance soft state
1344  *
1345  * This function is called from mrsas_detach() to teardown and release bus
1346  * interrupt resourse.
1347  */
1348 void
1349 mrsas_teardown_intr(struct mrsas_softc *sc)
1350 {
1351 	int i;
1352 
1353 	if (!sc->msix_enable) {
1354 		if (sc->intr_handle[0])
1355 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1356 		if (sc->mrsas_irq[0] != NULL)
1357 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1358 			    sc->irq_id[0], sc->mrsas_irq[0]);
1359 		sc->intr_handle[0] = NULL;
1360 	} else {
1361 		for (i = 0; i < sc->msix_vectors; i++) {
1362 			if (sc->intr_handle[i])
1363 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1364 				    sc->intr_handle[i]);
1365 
1366 			if (sc->mrsas_irq[i] != NULL)
1367 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1368 				    sc->irq_id[i], sc->mrsas_irq[i]);
1369 
1370 			sc->intr_handle[i] = NULL;
1371 		}
1372 		pci_release_msi(sc->mrsas_dev);
1373 	}
1374 
1375 }
1376 
1377 /*
1378  * mrsas_suspend:	Suspend entry point
1379  * input:			Device struct pointer
1380  *
1381  * This function is the entry point for system suspend from the OS.
1382  */
1383 static int
1384 mrsas_suspend(device_t dev)
1385 {
1386 	/* This will be filled when the driver will have hibernation support */
1387 	return (0);
1388 }
1389 
1390 /*
1391  * mrsas_resume:	Resume entry point
1392  * input:			Device struct pointer
1393  *
1394  * This function is the entry point for system resume from the OS.
1395  */
1396 static int
1397 mrsas_resume(device_t dev)
1398 {
1399 	/* This will be filled when the driver will have hibernation support */
1400 	return (0);
1401 }
1402 
1403 /**
1404  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1405  *
1406  * This function will return softc instance based on cmd type.
1407  * In some case, application fire ioctl on required management instance and
1408  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1409  * case, else get the softc instance from host_no provided by application in
1410  * user data.
1411  */
1412 
1413 static struct mrsas_softc *
1414 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1415 {
1416 	struct mrsas_softc *sc = NULL;
1417 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1418 
1419 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1420 		sc = dev->si_drv1;
1421 	} else {
1422 		/*
1423 		 * get the Host number & the softc from data sent by the
1424 		 * Application
1425 		 */
1426 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1427 		if (sc == NULL)
1428 			printf("There is no Controller number %d\n",
1429 			    user_ioc->host_no);
1430 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1431 			mrsas_dprint(sc, MRSAS_FAULT,
1432 			    "Invalid Controller number %d\n", user_ioc->host_no);
1433 	}
1434 
1435 	return sc;
1436 }
1437 
1438 /*
1439  * mrsas_ioctl:	IOCtl commands entry point.
1440  *
1441  * This function is the entry point for IOCtls from the OS.  It calls the
1442  * appropriate function for processing depending on the command received.
1443  */
1444 static int
1445 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1446     struct thread *td)
1447 {
1448 	struct mrsas_softc *sc;
1449 	int ret = 0, i = 0;
1450 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1451 
1452 	switch (cmd) {
1453 	case MFIIO_PASSTHRU:
1454                 sc = (struct mrsas_softc *)(dev->si_drv1);
1455 		break;
1456 	default:
1457 		sc = mrsas_get_softc_instance(dev, cmd, arg);
1458 		break;
1459         }
1460 	if (!sc)
1461 		return ENOENT;
1462 
1463 	if (sc->remove_in_progress ||
1464 		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1465 		mrsas_dprint(sc, MRSAS_INFO,
1466 		    "Either driver remove or shutdown called or "
1467 			"HW is in unrecoverable critical error state.\n");
1468 		return ENOENT;
1469 	}
1470 	mtx_lock_spin(&sc->ioctl_lock);
1471 	if (!sc->reset_in_progress) {
1472 		mtx_unlock_spin(&sc->ioctl_lock);
1473 		goto do_ioctl;
1474 	}
1475 	mtx_unlock_spin(&sc->ioctl_lock);
1476 	while (sc->reset_in_progress) {
1477 		i++;
1478 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1479 			mrsas_dprint(sc, MRSAS_INFO,
1480 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1481 		}
1482 		pause("mr_ioctl", hz);
1483 	}
1484 
1485 do_ioctl:
1486 	switch (cmd) {
1487 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1488 #ifdef COMPAT_FREEBSD32
1489 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1490 #endif
1491 		/*
1492 		 * Decrement the Ioctl counting Semaphore before getting an
1493 		 * mfi command
1494 		 */
1495 		sema_wait(&sc->ioctl_count_sema);
1496 
1497 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1498 
1499 		/* Increment the Ioctl counting semaphore value */
1500 		sema_post(&sc->ioctl_count_sema);
1501 
1502 		break;
1503 	case MRSAS_IOC_SCAN_BUS:
1504 		ret = mrsas_bus_scan(sc);
1505 		break;
1506 
1507 	case MRSAS_IOC_GET_PCI_INFO:
1508 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1509 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1510 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1511 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1512 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1513 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1514 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1515 		    "pci device no: %d, pci function no: %d,"
1516 		    "pci domain ID: %d\n",
1517 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1518 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1519 		ret = 0;
1520 		break;
1521 
1522 	case MFIIO_PASSTHRU:
1523 		ret = mrsas_user_command(sc, (struct mfi_ioc_passthru *)arg);
1524 		break;
1525 
1526 	default:
1527 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1528 		ret = ENOENT;
1529 	}
1530 
1531 	return (ret);
1532 }
1533 
1534 /*
1535  * mrsas_poll:	poll entry point for mrsas driver fd
1536  *
1537  * This function is the entry point for poll from the OS.  It waits for some AEN
1538  * events to be triggered from the controller and notifies back.
1539  */
1540 static int
1541 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1542 {
1543 	struct mrsas_softc *sc;
1544 	int revents = 0;
1545 
1546 	sc = dev->si_drv1;
1547 
1548 	if (poll_events & (POLLIN | POLLRDNORM)) {
1549 		if (sc->mrsas_aen_triggered) {
1550 			revents |= poll_events & (POLLIN | POLLRDNORM);
1551 		}
1552 	}
1553 	if (revents == 0) {
1554 		if (poll_events & (POLLIN | POLLRDNORM)) {
1555 			mtx_lock(&sc->aen_lock);
1556 			sc->mrsas_poll_waiting = 1;
1557 			selrecord(td, &sc->mrsas_select);
1558 			mtx_unlock(&sc->aen_lock);
1559 		}
1560 	}
1561 	return revents;
1562 }
1563 
1564 /*
1565  * mrsas_setup_irq:	Set up interrupt
1566  * input:			Adapter instance soft state
1567  *
1568  * This function sets up interrupts as a bus resource, with flags indicating
1569  * resource permitting contemporaneous sharing and for resource to activate
1570  * atomically.
1571  */
1572 static int
1573 mrsas_setup_irq(struct mrsas_softc *sc)
1574 {
1575 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1576 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1577 
1578 	else {
1579 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1580 		sc->irq_context[0].sc = sc;
1581 		sc->irq_context[0].MSIxIndex = 0;
1582 		sc->irq_id[0] = 0;
1583 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1584 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1585 		if (sc->mrsas_irq[0] == NULL) {
1586 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1587 			    "interrupt\n");
1588 			return (FAIL);
1589 		}
1590 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1591 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1592 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1593 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1594 			    "interrupt\n");
1595 			return (FAIL);
1596 		}
1597 	}
1598 	return (0);
1599 }
1600 
1601 /*
1602  * mrsas_isr:	ISR entry point
1603  * input:		argument pointer
1604  *
1605  * This function is the interrupt service routine entry point.  There are two
1606  * types of interrupts, state change interrupt and response interrupt.  If an
1607  * interrupt is not ours, we just return.
1608  */
1609 void
1610 mrsas_isr(void *arg)
1611 {
1612 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1613 	struct mrsas_softc *sc = irq_context->sc;
1614 	int status = 0;
1615 
1616 	if (sc->mask_interrupts)
1617 		return;
1618 
1619 	if (!sc->msix_vectors) {
1620 		status = mrsas_clear_intr(sc);
1621 		if (!status)
1622 			return;
1623 	}
1624 	/* If we are resetting, bail */
1625 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1626 		printf(" Entered into ISR when OCR is going active. \n");
1627 		mrsas_clear_intr(sc);
1628 		return;
1629 	}
1630 	/* Process for reply request and clear response interrupt */
1631 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1632 		mrsas_clear_intr(sc);
1633 
1634 	return;
1635 }
1636 
1637 /*
1638  * mrsas_complete_cmd:	Process reply request
1639  * input:				Adapter instance soft state
1640  *
1641  * This function is called from mrsas_isr() to process reply request and clear
1642  * response interrupt. Processing of the reply request entails walking
1643  * through the reply descriptor array for the command request  pended from
1644  * Firmware.  We look at the Function field to determine the command type and
1645  * perform the appropriate action.  Before we return, we clear the response
1646  * interrupt.
1647  */
1648 int
1649 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1650 {
1651 	Mpi2ReplyDescriptorsUnion_t *desc;
1652 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1653 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1654 	struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1655 	struct mrsas_mfi_cmd *cmd_mfi;
1656 	u_int8_t reply_descript_type, *sense;
1657 	u_int16_t smid, num_completed;
1658 	u_int8_t status, extStatus;
1659 	union desc_value desc_val;
1660 	PLD_LOAD_BALANCE_INFO lbinfo;
1661 	u_int32_t device_id, data_length;
1662 	int threshold_reply_count = 0;
1663 #if TM_DEBUG
1664 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1665 	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1666 #endif
1667 
1668 	/* If we have a hardware error, not need to continue */
1669 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1670 		return (DONE);
1671 
1672 	desc = sc->reply_desc_mem;
1673 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1674 	    + sc->last_reply_idx[MSIxIndex];
1675 
1676 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1677 
1678 	desc_val.word = desc->Words;
1679 	num_completed = 0;
1680 
1681 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1682 
1683 	/* Find our reply descriptor for the command and process */
1684 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1685 		smid = le16toh(reply_desc->SMID);
1686 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1687 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1688 
1689 		status = scsi_io_req->RaidContext.raid_context.status;
1690 		extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1691 		sense = cmd_mpt->sense;
1692 		data_length = scsi_io_req->DataLength;
1693 
1694 		switch (scsi_io_req->Function) {
1695 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1696 #if TM_DEBUG
1697 			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1698 			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1699 			    &mr_tm_req->TmRequest;
1700 			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1701 			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1702 #endif
1703             wakeup_one((void *)&sc->ocr_chan);
1704             break;
1705 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1706 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1707 			lbinfo = &sc->load_balance_info[device_id];
1708 			/* R1 load balancing for READ */
1709 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1710 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1711 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1712 			}
1713 			/* Fall thru and complete IO */
1714 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1715 			if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1716 				mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1717 				    extStatus, le32toh(data_length), sense);
1718 				mrsas_cmd_done(sc, cmd_mpt);
1719 				mrsas_atomic_dec(&sc->fw_outstanding);
1720 			} else {
1721 				/*
1722 				 * If the peer  Raid  1/10 fast path failed,
1723 				 * mark IO as failed to the scsi layer.
1724 				 * Overwrite the current status by the failed status
1725 				 * and make sure that if any command fails,
1726 				 * driver returns fail status to CAM.
1727 				 */
1728 				cmd_mpt->cmd_completed = 1;
1729 				r1_cmd = cmd_mpt->peer_cmd;
1730 				if (r1_cmd->cmd_completed) {
1731 					if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1732 						status = r1_cmd->io_request->RaidContext.raid_context.status;
1733 						extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1734 						data_length = r1_cmd->io_request->DataLength;
1735 						sense = r1_cmd->sense;
1736 					}
1737 					r1_cmd->ccb_ptr = NULL;
1738 					if (r1_cmd->callout_owner) {
1739 						callout_stop(&r1_cmd->cm_callout);
1740 						r1_cmd->callout_owner  = false;
1741 					}
1742 					mrsas_release_mpt_cmd(r1_cmd);
1743 					mrsas_atomic_dec(&sc->fw_outstanding);
1744 					mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1745 					    extStatus, le32toh(data_length), sense);
1746 					mrsas_cmd_done(sc, cmd_mpt);
1747 					mrsas_atomic_dec(&sc->fw_outstanding);
1748 				}
1749 			}
1750 			break;
1751 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1752 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1753 			/*
1754 			 * Make sure NOT TO release the mfi command from the called
1755 			 * function's context if it is fired with issue_polled call.
1756 			 * And also make sure that the issue_polled call should only be
1757 			 * used if INTERRUPT IS DISABLED.
1758 			 */
1759 			if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1760 				mrsas_release_mfi_cmd(cmd_mfi);
1761 			else
1762 				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1763 			break;
1764 		}
1765 
1766 		sc->last_reply_idx[MSIxIndex]++;
1767 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1768 			sc->last_reply_idx[MSIxIndex] = 0;
1769 
1770 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1771 							 * 0xFFFFFFFFs */
1772 		num_completed++;
1773 		threshold_reply_count++;
1774 
1775 		/* Get the next reply descriptor */
1776 		if (!sc->last_reply_idx[MSIxIndex]) {
1777 			desc = sc->reply_desc_mem;
1778 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1779 		} else
1780 			desc++;
1781 
1782 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1783 		desc_val.word = desc->Words;
1784 
1785 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1786 
1787 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1788 			break;
1789 
1790 		/*
1791 		 * Write to reply post index after completing threshold reply
1792 		 * count and still there are more replies in reply queue
1793 		 * pending to be completed.
1794 		 */
1795 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1796 			if (sc->msix_enable) {
1797 				if (sc->msix_combined)
1798 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1799 					    ((MSIxIndex & 0x7) << 24) |
1800 					    sc->last_reply_idx[MSIxIndex]);
1801 				else
1802 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1803 					    sc->last_reply_idx[MSIxIndex]);
1804 			} else
1805 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1806 				    reply_post_host_index), sc->last_reply_idx[0]);
1807 
1808 			threshold_reply_count = 0;
1809 		}
1810 	}
1811 
1812 	/* No match, just return */
1813 	if (num_completed == 0)
1814 		return (DONE);
1815 
1816 	/* Clear response interrupt */
1817 	if (sc->msix_enable) {
1818 		if (sc->msix_combined) {
1819 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1820 			    ((MSIxIndex & 0x7) << 24) |
1821 			    sc->last_reply_idx[MSIxIndex]);
1822 		} else
1823 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1824 			    sc->last_reply_idx[MSIxIndex]);
1825 	} else
1826 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1827 		    reply_post_host_index), sc->last_reply_idx[0]);
1828 
1829 	return (0);
1830 }
1831 
1832 /*
1833  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1834  * input:						Adapter instance soft state
1835  *
1836  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1837  * It checks the command status and maps the appropriate CAM status for the
1838  * CCB.
1839  */
1840 void
1841 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1842     u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1843 {
1844 	struct mrsas_softc *sc = cmd->sc;
1845 	u_int8_t *sense_data;
1846 
1847 	switch (status) {
1848 	case MFI_STAT_OK:
1849 		ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1850 		break;
1851 	case MFI_STAT_SCSI_IO_FAILED:
1852 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1853 		ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1854 		sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1855 		if (sense_data) {
1856 			/* For now just copy 18 bytes back */
1857 			memcpy(sense_data, sense, 18);
1858 			ccb_ptr->csio.sense_len = 18;
1859 			ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1860 		}
1861 		break;
1862 	case MFI_STAT_LD_OFFLINE:
1863 	case MFI_STAT_DEVICE_NOT_FOUND:
1864 		if (ccb_ptr->ccb_h.target_lun)
1865 			ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1866 		else
1867 			ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1868 		break;
1869 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1870 		ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1871 		break;
1872 	default:
1873 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1874 		ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1875 		ccb_ptr->csio.scsi_status = status;
1876 	}
1877 	return;
1878 }
1879 
1880 /*
1881  * mrsas_alloc_mem:	Allocate DMAable memory
1882  * input:			Adapter instance soft state
1883  *
1884  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1885  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1886  * Kernel virtual address. Callback argument is physical memory address.
1887  */
1888 static int
1889 mrsas_alloc_mem(struct mrsas_softc *sc)
1890 {
1891 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1892 		evt_detail_size, count, pd_info_size;
1893 
1894 	/*
1895 	 * Allocate parent DMA tag
1896 	 */
1897 	if (bus_dma_tag_create(
1898 	    bus_get_dma_tag(sc->mrsas_dev),	/* parent */
1899 	    1,				/* alignment */
1900 	    0,				/* boundary */
1901 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1902 	    BUS_SPACE_MAXADDR,		/* highaddr */
1903 	    NULL, NULL,			/* filter, filterarg */
1904 	    BUS_SPACE_MAXSIZE,		/* maxsize */
1905 	    BUS_SPACE_UNRESTRICTED,	/* nsegments */
1906 	    BUS_SPACE_MAXSIZE,		/* maxsegsize */
1907 	    0,				/* flags */
1908 	    NULL, NULL,			/* lockfunc, lockarg */
1909 	    &sc->mrsas_parent_tag	/* tag */
1910 	    )) {
1911 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1912 		return (ENOMEM);
1913 	}
1914 	/*
1915 	 * Allocate for version buffer
1916 	 */
1917 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1918 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1919 	    1, 0,
1920 	    BUS_SPACE_MAXADDR_32BIT,
1921 	    BUS_SPACE_MAXADDR,
1922 	    NULL, NULL,
1923 	    verbuf_size,
1924 	    1,
1925 	    verbuf_size,
1926 	    BUS_DMA_ALLOCNOW,
1927 	    NULL, NULL,
1928 	    &sc->verbuf_tag)) {
1929 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1930 		return (ENOMEM);
1931 	}
1932 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1933 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1934 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1935 		return (ENOMEM);
1936 	}
1937 	bzero(sc->verbuf_mem, verbuf_size);
1938 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1939 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1940 	    BUS_DMA_NOWAIT)) {
1941 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1942 		return (ENOMEM);
1943 	}
1944 	/*
1945 	 * Allocate IO Request Frames
1946 	 */
1947 	io_req_size = sc->io_frames_alloc_sz;
1948 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1949 	    16, 0,
1950 	    BUS_SPACE_MAXADDR_32BIT,
1951 	    BUS_SPACE_MAXADDR,
1952 	    NULL, NULL,
1953 	    io_req_size,
1954 	    1,
1955 	    io_req_size,
1956 	    BUS_DMA_ALLOCNOW,
1957 	    NULL, NULL,
1958 	    &sc->io_request_tag)) {
1959 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1960 		return (ENOMEM);
1961 	}
1962 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1963 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1964 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1965 		return (ENOMEM);
1966 	}
1967 	bzero(sc->io_request_mem, io_req_size);
1968 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1969 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1970 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1971 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1972 		return (ENOMEM);
1973 	}
1974 	/*
1975 	 * Allocate Chain Frames
1976 	 */
1977 	chain_frame_size = sc->chain_frames_alloc_sz;
1978 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1979 	    4, 0,
1980 	    BUS_SPACE_MAXADDR_32BIT,
1981 	    BUS_SPACE_MAXADDR,
1982 	    NULL, NULL,
1983 	    chain_frame_size,
1984 	    1,
1985 	    chain_frame_size,
1986 	    BUS_DMA_ALLOCNOW,
1987 	    NULL, NULL,
1988 	    &sc->chain_frame_tag)) {
1989 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1990 		return (ENOMEM);
1991 	}
1992 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1993 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1994 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1995 		return (ENOMEM);
1996 	}
1997 	bzero(sc->chain_frame_mem, chain_frame_size);
1998 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1999 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
2000 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
2001 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
2002 		return (ENOMEM);
2003 	}
2004 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2005 	/*
2006 	 * Allocate Reply Descriptor Array
2007 	 */
2008 	reply_desc_size = sc->reply_alloc_sz * count;
2009 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2010 	    16, 0,
2011 	    BUS_SPACE_MAXADDR_32BIT,
2012 	    BUS_SPACE_MAXADDR,
2013 	    NULL, NULL,
2014 	    reply_desc_size,
2015 	    1,
2016 	    reply_desc_size,
2017 	    BUS_DMA_ALLOCNOW,
2018 	    NULL, NULL,
2019 	    &sc->reply_desc_tag)) {
2020 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2021 		return (ENOMEM);
2022 	}
2023 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2024 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2025 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2026 		return (ENOMEM);
2027 	}
2028 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2029 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2030 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2031 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2032 		return (ENOMEM);
2033 	}
2034 	/*
2035 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
2036 	 */
2037 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2038 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2039 	    64, 0,
2040 	    BUS_SPACE_MAXADDR_32BIT,
2041 	    BUS_SPACE_MAXADDR,
2042 	    NULL, NULL,
2043 	    sense_size,
2044 	    1,
2045 	    sense_size,
2046 	    BUS_DMA_ALLOCNOW,
2047 	    NULL, NULL,
2048 	    &sc->sense_tag)) {
2049 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2050 		return (ENOMEM);
2051 	}
2052 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2053 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2054 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2055 		return (ENOMEM);
2056 	}
2057 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2058 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2059 	    BUS_DMA_NOWAIT)) {
2060 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2061 		return (ENOMEM);
2062 	}
2063 
2064 	/*
2065 	 * Allocate for Event detail structure
2066 	 */
2067 	evt_detail_size = sizeof(struct mrsas_evt_detail);
2068 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2069 	    1, 0,
2070 	    BUS_SPACE_MAXADDR_32BIT,
2071 	    BUS_SPACE_MAXADDR,
2072 	    NULL, NULL,
2073 	    evt_detail_size,
2074 	    1,
2075 	    evt_detail_size,
2076 	    BUS_DMA_ALLOCNOW,
2077 	    NULL, NULL,
2078 	    &sc->evt_detail_tag)) {
2079 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2080 		return (ENOMEM);
2081 	}
2082 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2083 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2084 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2085 		return (ENOMEM);
2086 	}
2087 	bzero(sc->evt_detail_mem, evt_detail_size);
2088 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2089 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2090 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2091 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2092 		return (ENOMEM);
2093 	}
2094 
2095 	/*
2096 	 * Allocate for PD INFO structure
2097 	 */
2098 	pd_info_size = sizeof(struct mrsas_pd_info);
2099 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2100 	    1, 0,
2101 	    BUS_SPACE_MAXADDR_32BIT,
2102 	    BUS_SPACE_MAXADDR,
2103 	    NULL, NULL,
2104 	    pd_info_size,
2105 	    1,
2106 	    pd_info_size,
2107 	    BUS_DMA_ALLOCNOW,
2108 	    NULL, NULL,
2109 	    &sc->pd_info_tag)) {
2110 		device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2111 		return (ENOMEM);
2112 	}
2113 	if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2114 	    BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2115 		device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2116 		return (ENOMEM);
2117 	}
2118 	bzero(sc->pd_info_mem, pd_info_size);
2119 	if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2120 	    sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2121 	    &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2122 		device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2123 		return (ENOMEM);
2124 	}
2125 
2126 	/*
2127 	 * Create a dma tag for data buffers; size will be the maximum
2128 	 * possible I/O size (280kB).
2129 	 */
2130 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2131 	    1,
2132 	    0,
2133 	    BUS_SPACE_MAXADDR,
2134 	    BUS_SPACE_MAXADDR,
2135 	    NULL, NULL,
2136 	    maxphys,
2137 	    sc->max_num_sge,		/* nsegments */
2138 	    maxphys,
2139 	    BUS_DMA_ALLOCNOW,
2140 	    busdma_lock_mutex,
2141 	    &sc->io_lock,
2142 	    &sc->data_tag)) {
2143 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2144 		return (ENOMEM);
2145 	}
2146 	return (0);
2147 }
2148 
2149 /*
2150  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
2151  * input:			callback argument, machine dependent type
2152  * 					that describes DMA segments, number of segments, error code
2153  *
2154  * This function is for the driver to receive mapping information resultant of
2155  * the bus_dmamap_load(). The information is actually not being used, but the
2156  * address is saved anyway.
2157  */
2158 void
2159 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2160 {
2161 	bus_addr_t *addr;
2162 
2163 	addr = arg;
2164 	*addr = segs[0].ds_addr;
2165 }
2166 
2167 /*
2168  * mrsas_setup_raidmap:	Set up RAID map.
2169  * input:				Adapter instance soft state
2170  *
2171  * Allocate DMA memory for the RAID maps and perform setup.
2172  */
2173 static int
2174 mrsas_setup_raidmap(struct mrsas_softc *sc)
2175 {
2176 	int i;
2177 
2178 	for (i = 0; i < 2; i++) {
2179 		sc->ld_drv_map[i] =
2180 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2181 		/* Do Error handling */
2182 		if (!sc->ld_drv_map[i]) {
2183 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2184 
2185 			if (i == 1)
2186 				free(sc->ld_drv_map[0], M_MRSAS);
2187 			/* ABORT driver initialization */
2188 			goto ABORT;
2189 		}
2190 	}
2191 
2192 	for (int i = 0; i < 2; i++) {
2193 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2194 		    4, 0,
2195 		    BUS_SPACE_MAXADDR_32BIT,
2196 		    BUS_SPACE_MAXADDR,
2197 		    NULL, NULL,
2198 		    sc->max_map_sz,
2199 		    1,
2200 		    sc->max_map_sz,
2201 		    BUS_DMA_ALLOCNOW,
2202 		    NULL, NULL,
2203 		    &sc->raidmap_tag[i])) {
2204 			device_printf(sc->mrsas_dev,
2205 			    "Cannot allocate raid map tag.\n");
2206 			return (ENOMEM);
2207 		}
2208 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2209 		    (void **)&sc->raidmap_mem[i],
2210 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2211 			device_printf(sc->mrsas_dev,
2212 			    "Cannot allocate raidmap memory.\n");
2213 			return (ENOMEM);
2214 		}
2215 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2216 
2217 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2218 		    sc->raidmap_mem[i], sc->max_map_sz,
2219 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2220 		    BUS_DMA_NOWAIT)) {
2221 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2222 			return (ENOMEM);
2223 		}
2224 		if (!sc->raidmap_mem[i]) {
2225 			device_printf(sc->mrsas_dev,
2226 			    "Cannot allocate memory for raid map.\n");
2227 			return (ENOMEM);
2228 		}
2229 	}
2230 
2231 	if (!mrsas_get_map_info(sc))
2232 		mrsas_sync_map_info(sc);
2233 
2234 	return (0);
2235 
2236 ABORT:
2237 	return (1);
2238 }
2239 
2240 /**
2241  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2242  * @sc:				Adapter soft state
2243  *
2244  * Return 0 on success.
2245  */
2246 void
2247 megasas_setup_jbod_map(struct mrsas_softc *sc)
2248 {
2249 	int i;
2250 	uint32_t pd_seq_map_sz;
2251 
2252 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2253 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2254 
2255 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2256 		sc->use_seqnum_jbod_fp = 0;
2257 		return;
2258 	}
2259 	if (sc->jbodmap_mem[0])
2260 		goto skip_alloc;
2261 
2262 	for (i = 0; i < 2; i++) {
2263 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2264 		    4, 0,
2265 		    BUS_SPACE_MAXADDR_32BIT,
2266 		    BUS_SPACE_MAXADDR,
2267 		    NULL, NULL,
2268 		    pd_seq_map_sz,
2269 		    1,
2270 		    pd_seq_map_sz,
2271 		    BUS_DMA_ALLOCNOW,
2272 		    NULL, NULL,
2273 		    &sc->jbodmap_tag[i])) {
2274 			device_printf(sc->mrsas_dev,
2275 			    "Cannot allocate jbod map tag.\n");
2276 			return;
2277 		}
2278 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2279 		    (void **)&sc->jbodmap_mem[i],
2280 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2281 			device_printf(sc->mrsas_dev,
2282 			    "Cannot allocate jbod map memory.\n");
2283 			return;
2284 		}
2285 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2286 
2287 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2288 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2289 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2290 		    BUS_DMA_NOWAIT)) {
2291 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2292 			return;
2293 		}
2294 		if (!sc->jbodmap_mem[i]) {
2295 			device_printf(sc->mrsas_dev,
2296 			    "Cannot allocate memory for jbod map.\n");
2297 			sc->use_seqnum_jbod_fp = 0;
2298 			return;
2299 		}
2300 	}
2301 
2302 skip_alloc:
2303 	if (!megasas_sync_pd_seq_num(sc, false) &&
2304 	    !megasas_sync_pd_seq_num(sc, true))
2305 		sc->use_seqnum_jbod_fp = 1;
2306 	else
2307 		sc->use_seqnum_jbod_fp = 0;
2308 
2309 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2310 }
2311 
2312 /*
2313  * mrsas_init_fw:	Initialize Firmware
2314  * input:			Adapter soft state
2315  *
2316  * Calls transition_to_ready() to make sure Firmware is in operational state and
2317  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2318  * issues internal commands to get the controller info after the IOC_INIT
2319  * command response is received by Firmware.  Note:  code relating to
2320  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2321  * is left here as placeholder.
2322  */
2323 static int
2324 mrsas_init_fw(struct mrsas_softc *sc)
2325 {
2326 
2327 	int ret, loop, ocr = 0;
2328 	u_int32_t max_sectors_1;
2329 	u_int32_t max_sectors_2;
2330 	u_int32_t tmp_sectors;
2331 	u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2332 	int msix_enable = 0;
2333 	int fw_msix_count = 0;
2334 	int i, j;
2335 
2336 	/* Make sure Firmware is ready */
2337 	ret = mrsas_transition_to_ready(sc, ocr);
2338 	if (ret != SUCCESS) {
2339 		return (ret);
2340 	}
2341 	if (sc->is_ventura || sc->is_aero) {
2342 		scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2343 #if VD_EXT_DEBUG
2344 		device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2345 #endif
2346 		sc->maxRaidMapSize = ((scratch_pad_3 >>
2347 		    MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2348 		    MR_MAX_RAID_MAP_SIZE_MASK);
2349 	}
2350 	/* MSI-x index 0- reply post host index register */
2351 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2352 	/* Check if MSI-X is supported while in ready state */
2353 	msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2354 
2355 	if (msix_enable) {
2356 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2357 		    outbound_scratch_pad_2));
2358 
2359 		/* Check max MSI-X vectors */
2360 		if (sc->device_id == MRSAS_TBOLT) {
2361 			sc->msix_vectors = (scratch_pad_2
2362 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2363 			fw_msix_count = sc->msix_vectors;
2364 		} else {
2365 			/* Invader/Fury supports 96 MSI-X vectors */
2366 			sc->msix_vectors = ((scratch_pad_2
2367 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2368 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2369 			fw_msix_count = sc->msix_vectors;
2370 
2371 			if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2372 				((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2373 				sc->msix_combined = true;
2374 			/*
2375 			 * Save 1-15 reply post index
2376 			 * address to local memory Index 0
2377 			 * is already saved from reg offset
2378 			 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2379 			 */
2380 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2381 			    loop++) {
2382 				sc->msix_reg_offset[loop] =
2383 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2384 				    (loop * 0x10);
2385 			}
2386 		}
2387 
2388 		/* Don't bother allocating more MSI-X vectors than cpus */
2389 		sc->msix_vectors = min(sc->msix_vectors,
2390 		    mp_ncpus);
2391 
2392 		/* Allocate MSI-x vectors */
2393 		if (mrsas_allocate_msix(sc) == SUCCESS)
2394 			sc->msix_enable = 1;
2395 		else
2396 			sc->msix_enable = 0;
2397 
2398 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2399 		    "Online CPU %d Current MSIX <%d>\n",
2400 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2401 	}
2402 	/*
2403      * MSI-X host index 0 is common for all adapter.
2404      * It is used for all MPT based Adapters.
2405 	 */
2406 	if (sc->msix_combined) {
2407 		sc->msix_reg_offset[0] =
2408 		    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2409 	}
2410 	if (mrsas_init_adapter(sc) != SUCCESS) {
2411 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2412 		return (1);
2413 	}
2414 
2415 	if (sc->is_ventura || sc->is_aero) {
2416 		scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2417 		    outbound_scratch_pad_4));
2418 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2419 			sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2420 
2421 		device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2422 	}
2423 
2424 	/* Allocate internal commands for pass-thru */
2425 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2426 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2427 		return (1);
2428 	}
2429 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2430 	if (!sc->ctrl_info) {
2431 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2432 		return (1);
2433 	}
2434 	/*
2435 	 * Get the controller info from FW, so that the MAX VD support
2436 	 * availability can be decided.
2437 	 */
2438 	if (mrsas_get_ctrl_info(sc)) {
2439 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2440 		return (1);
2441 	}
2442 	sc->secure_jbod_support =
2443 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2444 
2445 	if (sc->secure_jbod_support)
2446 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2447 
2448 	if (sc->use_seqnum_jbod_fp)
2449 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2450 
2451 	if (sc->support_morethan256jbod)
2452 		device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2453 
2454 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2455 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2456 		    "There seems to be some problem in the controller\n"
2457 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2458 	}
2459 	megasas_setup_jbod_map(sc);
2460 
2461 	memset(sc->target_list, 0,
2462 		MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2463 	for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2464 		sc->target_list[i].target_id = 0xffff;
2465 
2466 	/* For pass-thru, get PD/LD list and controller info */
2467 	memset(sc->pd_list, 0,
2468 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2469 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2470 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2471 		return (1);
2472 	}
2473 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2474 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2475 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2476 		return (1);
2477 	}
2478 
2479 	if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2480 		sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2481 						MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2482 		if (!sc->streamDetectByLD) {
2483 			device_printf(sc->mrsas_dev,
2484 				"unable to allocate stream detection for pool of LDs\n");
2485 			return (1);
2486 		}
2487 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2488 			sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2489 			if (!sc->streamDetectByLD[i]) {
2490 				device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2491 				for (j = 0; j < i; ++j)
2492 					free(sc->streamDetectByLD[j], M_MRSAS);
2493 				free(sc->streamDetectByLD, M_MRSAS);
2494 				sc->streamDetectByLD = NULL;
2495 				return (1);
2496 			}
2497 			memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2498 			sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2499 		}
2500 	}
2501 
2502 	/*
2503 	 * Compute the max allowed sectors per IO: The controller info has
2504 	 * two limits on max sectors. Driver should use the minimum of these
2505 	 * two.
2506 	 *
2507 	 * 1 << stripe_sz_ops.min = max sectors per strip
2508 	 *
2509 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2510 	 * calculate max_sectors_1. So the number ended up as zero always.
2511 	 */
2512 	tmp_sectors = 0;
2513 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2514 	    sc->ctrl_info->max_strips_per_io;
2515 	max_sectors_2 = sc->ctrl_info->max_request_size;
2516 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2517 	sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512;
2518 
2519 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2520 		sc->max_sectors_per_req = tmp_sectors;
2521 
2522 	sc->disableOnlineCtrlReset =
2523 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2524 	sc->UnevenSpanSupport =
2525 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2526 	if (sc->UnevenSpanSupport) {
2527 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2528 		    sc->UnevenSpanSupport);
2529 
2530 		if (MR_ValidateMapInfo(sc))
2531 			sc->fast_path_io = 1;
2532 		else
2533 			sc->fast_path_io = 0;
2534 	}
2535 
2536 	device_printf(sc->mrsas_dev, "max_fw_cmds: %u  max_scsi_cmds: %u\n",
2537 		sc->max_fw_cmds, sc->max_scsi_cmds);
2538 	return (0);
2539 }
2540 
2541 /*
2542  * mrsas_init_adapter:	Initializes the adapter/controller
2543  * input:				Adapter soft state
2544  *
2545  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2546  * ROC/controller.  The FW register is read to determined the number of
2547  * commands that is supported.  All memory allocations for IO is based on
2548  * max_cmd.  Appropriate calculations are performed in this function.
2549  */
2550 int
2551 mrsas_init_adapter(struct mrsas_softc *sc)
2552 {
2553 	uint32_t status;
2554 	u_int32_t scratch_pad_2;
2555 	int ret;
2556 	int i = 0;
2557 
2558 	/* Read FW status register */
2559 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2560 
2561 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2562 
2563 	/* Decrement the max supported by 1, to correlate with FW */
2564 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2565 	sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2566 
2567 	/* Determine allocation size of command frames */
2568 	sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2569 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2570 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2571 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2572 	    (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2573 	scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2574 	    outbound_scratch_pad_2));
2575 
2576 	mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
2577 	    "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
2578 	    "sc->io_frames_alloc_sz 0x%x\n", __func__,
2579 	    sc->reply_q_depth, sc->request_alloc_sz,
2580 	    sc->reply_alloc_sz, sc->io_frames_alloc_sz);
2581 
2582 	/*
2583 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2584 	 * Firmware support extended IO chain frame which is 4 time more
2585 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2586 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2587 	 */
2588 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2589 		sc->max_chain_frame_sz =
2590 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2591 		    * MEGASAS_1MB_IO;
2592 	else
2593 		sc->max_chain_frame_sz =
2594 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2595 		    * MEGASAS_256K_IO;
2596 
2597 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2598 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2599 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2600 
2601 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2602 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2603 
2604 	mrsas_dprint(sc, MRSAS_INFO,
2605 	    "max sge: 0x%x, max chain frame size: 0x%x, "
2606 	    "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
2607 	    sc->max_num_sge,
2608 	    sc->max_chain_frame_sz, sc->max_fw_cmds,
2609 	    sc->chain_frames_alloc_sz);
2610 
2611 	/* Used for pass thru MFI frame (DCMD) */
2612 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2613 
2614 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2615 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2616 
2617 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2618 
2619 	for (i = 0; i < count; i++)
2620 		sc->last_reply_idx[i] = 0;
2621 
2622 	ret = mrsas_alloc_mem(sc);
2623 	if (ret != SUCCESS)
2624 		return (ret);
2625 
2626 	ret = mrsas_alloc_mpt_cmds(sc);
2627 	if (ret != SUCCESS)
2628 		return (ret);
2629 
2630 	ret = mrsas_ioc_init(sc);
2631 	if (ret != SUCCESS)
2632 		return (ret);
2633 
2634 	return (0);
2635 }
2636 
2637 /*
2638  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2639  * input:				Adapter soft state
2640  *
2641  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2642  */
2643 int
2644 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2645 {
2646 	int ioc_init_size;
2647 
2648 	/* Allocate IOC INIT command */
2649 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2650 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2651 	    1, 0,
2652 	    BUS_SPACE_MAXADDR_32BIT,
2653 	    BUS_SPACE_MAXADDR,
2654 	    NULL, NULL,
2655 	    ioc_init_size,
2656 	    1,
2657 	    ioc_init_size,
2658 	    BUS_DMA_ALLOCNOW,
2659 	    NULL, NULL,
2660 	    &sc->ioc_init_tag)) {
2661 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2662 		return (ENOMEM);
2663 	}
2664 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2665 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2666 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2667 		return (ENOMEM);
2668 	}
2669 	bzero(sc->ioc_init_mem, ioc_init_size);
2670 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2671 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2672 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2673 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2674 		return (ENOMEM);
2675 	}
2676 	return (0);
2677 }
2678 
2679 /*
2680  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2681  * input:				Adapter soft state
2682  *
2683  * Deallocates memory of the IOC Init cmd.
2684  */
2685 void
2686 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2687 {
2688 	if (sc->ioc_init_phys_mem)
2689 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2690 	if (sc->ioc_init_mem != NULL)
2691 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2692 	if (sc->ioc_init_tag != NULL)
2693 		bus_dma_tag_destroy(sc->ioc_init_tag);
2694 }
2695 
2696 /*
2697  * mrsas_ioc_init:	Sends IOC Init command to FW
2698  * input:			Adapter soft state
2699  *
2700  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2701  */
2702 int
2703 mrsas_ioc_init(struct mrsas_softc *sc)
2704 {
2705 	struct mrsas_init_frame *init_frame;
2706 	pMpi2IOCInitRequest_t IOCInitMsg;
2707 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2708 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2709 	bus_addr_t phys_addr;
2710 	int i, retcode = 0;
2711 	u_int32_t scratch_pad_2;
2712 
2713 	/* Allocate memory for the IOC INIT command */
2714 	if (mrsas_alloc_ioc_cmd(sc)) {
2715 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2716 		return (1);
2717 	}
2718 
2719 	if (!sc->block_sync_cache) {
2720 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2721 		    outbound_scratch_pad_2));
2722 		sc->fw_sync_cache_support = (scratch_pad_2 &
2723 		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2724 	}
2725 
2726 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2727 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2728 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2729 	IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
2730 	IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
2731 	IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
2732 	IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
2733 	IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
2734 	IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
2735 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2736 	IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2737 
2738 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2739 	init_frame->cmd = MFI_CMD_INIT;
2740 	init_frame->cmd_status = 0xFF;
2741 	init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
2742 
2743 	/* driver support Extended MSIX */
2744 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2745 		init_frame->driver_operations.
2746 		    mfi_capabilities.support_additional_msix = 1;
2747 	}
2748 	if (sc->verbuf_mem) {
2749 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2750 		    MRSAS_VERSION);
2751 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2752 		init_frame->driver_ver_hi = 0;
2753 	}
2754 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2755 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2756 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2757 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2758 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2759 
2760 	init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
2761 
2762 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2763 	init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
2764 	init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
2765 
2766 	req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
2767 	req_desc.MFAIo.RequestFlags =
2768 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2769 
2770 	mrsas_disable_intr(sc);
2771 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2772 	mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2773 
2774 	/*
2775 	 * Poll response timer to wait for Firmware response.  While this
2776 	 * timer with the DELAY call could block CPU, the time interval for
2777 	 * this is only 1 millisecond.
2778 	 */
2779 	if (init_frame->cmd_status == 0xFF) {
2780 		for (i = 0; i < (max_wait * 1000); i++) {
2781 			if (init_frame->cmd_status == 0xFF)
2782 				DELAY(1000);
2783 			else
2784 				break;
2785 		}
2786 	}
2787 	if (init_frame->cmd_status == 0)
2788 		mrsas_dprint(sc, MRSAS_OCR,
2789 		    "IOC INIT response received from FW.\n");
2790 	else {
2791 		if (init_frame->cmd_status == 0xFF)
2792 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2793 		else
2794 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2795 		retcode = 1;
2796 	}
2797 
2798 	if (sc->is_aero) {
2799 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2800 		    outbound_scratch_pad_2));
2801 		sc->atomic_desc_support = (scratch_pad_2 &
2802 			MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2803 		device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2804 			sc->atomic_desc_support ? "Yes" : "No");
2805 	}
2806 
2807 	mrsas_free_ioc_cmd(sc);
2808 	return (retcode);
2809 }
2810 
2811 /*
2812  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2813  * input:					Adapter instance soft state
2814  *
2815  * This function allocates the internal commands for IOs. Each command that is
2816  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2817  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2818  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2819  * max_fw_cmds.
2820  */
2821 int
2822 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2823 {
2824 	int i, j;
2825 	u_int32_t max_fw_cmds, count;
2826 	struct mrsas_mpt_cmd *cmd;
2827 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2828 	u_int32_t offset, chain_offset, sense_offset;
2829 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2830 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2831 
2832 	max_fw_cmds = sc->max_fw_cmds;
2833 
2834 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2835 	if (!sc->req_desc) {
2836 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2837 		return (ENOMEM);
2838 	}
2839 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2840 
2841 	/*
2842 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2843 	 * Allocate the dynamic array first and then allocate individual
2844 	 * commands.
2845 	 */
2846 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2847 	    M_MRSAS, M_NOWAIT);
2848 	if (!sc->mpt_cmd_list) {
2849 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2850 		return (ENOMEM);
2851 	}
2852 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2853 	for (i = 0; i < max_fw_cmds; i++) {
2854 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2855 		    M_MRSAS, M_NOWAIT);
2856 		if (!sc->mpt_cmd_list[i]) {
2857 			for (j = 0; j < i; j++)
2858 				free(sc->mpt_cmd_list[j], M_MRSAS);
2859 			free(sc->mpt_cmd_list, M_MRSAS);
2860 			sc->mpt_cmd_list = NULL;
2861 			return (ENOMEM);
2862 		}
2863 	}
2864 
2865 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2866 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2867 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2868 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2869 	sense_base = (u_int8_t *)sc->sense_mem;
2870 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2871 	for (i = 0; i < max_fw_cmds; i++) {
2872 		cmd = sc->mpt_cmd_list[i];
2873 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2874 		chain_offset = sc->max_chain_frame_sz * i;
2875 		sense_offset = MRSAS_SENSE_LEN * i;
2876 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2877 		cmd->index = i + 1;
2878 		cmd->ccb_ptr = NULL;
2879 		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2880 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2881 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2882 		cmd->sc = sc;
2883 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2884 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2885 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2886 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2887 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2888 		cmd->sense = sense_base + sense_offset;
2889 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2890 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2891 			return (FAIL);
2892 		}
2893 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2894 	}
2895 
2896 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2897 	reply_desc = sc->reply_desc_mem;
2898 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2899 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2900 		reply_desc->Words = MRSAS_ULONG_MAX;
2901 	}
2902 	return (0);
2903 }
2904 
2905 /*
2906  * mrsas_write_64bit_req_dsc:	Writes 64 bit request descriptor to FW
2907  * input:			Adapter softstate
2908  * 				request descriptor address low
2909  * 				request descriptor address high
2910  */
2911 void
2912 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2913     u_int32_t req_desc_hi)
2914 {
2915 	mtx_lock(&sc->pci_lock);
2916 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2917 	    le32toh(req_desc_lo));
2918 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2919 	    le32toh(req_desc_hi));
2920 	mtx_unlock(&sc->pci_lock);
2921 }
2922 
2923 /*
2924  * mrsas_fire_cmd:	Sends command to FW
2925  * input:		Adapter softstate
2926  * 			request descriptor address low
2927  * 			request descriptor address high
2928  *
2929  * This functions fires the command to Firmware by writing to the
2930  * inbound_low_queue_port and inbound_high_queue_port.
2931  */
2932 void
2933 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2934     u_int32_t req_desc_hi)
2935 {
2936 	if (sc->atomic_desc_support)
2937 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2938 		    le32toh(req_desc_lo));
2939 	else
2940 		mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2941 }
2942 
2943 /*
2944  * mrsas_transition_to_ready:  Move FW to Ready state input:
2945  * Adapter instance soft state
2946  *
2947  * During the initialization, FW passes can potentially be in any one of several
2948  * possible states. If the FW in operational, waiting-for-handshake states,
2949  * driver must take steps to bring it to ready state. Otherwise, it has to
2950  * wait for the ready state.
2951  */
2952 int
2953 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2954 {
2955 	int i;
2956 	u_int8_t max_wait;
2957 	u_int32_t val, fw_state;
2958 	u_int32_t cur_state __unused;
2959 	u_int32_t abs_state, curr_abs_state;
2960 
2961 	val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2962 	fw_state = val & MFI_STATE_MASK;
2963 	max_wait = MRSAS_RESET_WAIT_TIME;
2964 
2965 	if (fw_state != MFI_STATE_READY)
2966 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2967 
2968 	while (fw_state != MFI_STATE_READY) {
2969 		abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2970 		switch (fw_state) {
2971 		case MFI_STATE_FAULT:
2972 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2973 			if (ocr) {
2974 				cur_state = MFI_STATE_FAULT;
2975 				break;
2976 			} else
2977 				return -ENODEV;
2978 		case MFI_STATE_WAIT_HANDSHAKE:
2979 			/* Set the CLR bit in inbound doorbell */
2980 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2981 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2982 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2983 			break;
2984 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2985 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2986 			    MFI_INIT_HOTPLUG);
2987 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2988 			break;
2989 		case MFI_STATE_OPERATIONAL:
2990 			/*
2991 			 * Bring it to READY state; assuming max wait 10
2992 			 * secs
2993 			 */
2994 			mrsas_disable_intr(sc);
2995 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2996 			for (i = 0; i < max_wait * 1000; i++) {
2997 				if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2998 					DELAY(1000);
2999 				else
3000 					break;
3001 			}
3002 			cur_state = MFI_STATE_OPERATIONAL;
3003 			break;
3004 		case MFI_STATE_UNDEFINED:
3005 			/*
3006 			 * This state should not last for more than 2
3007 			 * seconds
3008 			 */
3009 			cur_state = MFI_STATE_UNDEFINED;
3010 			break;
3011 		case MFI_STATE_BB_INIT:
3012 			cur_state = MFI_STATE_BB_INIT;
3013 			break;
3014 		case MFI_STATE_FW_INIT:
3015 			cur_state = MFI_STATE_FW_INIT;
3016 			break;
3017 		case MFI_STATE_FW_INIT_2:
3018 			cur_state = MFI_STATE_FW_INIT_2;
3019 			break;
3020 		case MFI_STATE_DEVICE_SCAN:
3021 			cur_state = MFI_STATE_DEVICE_SCAN;
3022 			break;
3023 		case MFI_STATE_FLUSH_CACHE:
3024 			cur_state = MFI_STATE_FLUSH_CACHE;
3025 			break;
3026 		default:
3027 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3028 			return -ENODEV;
3029 		}
3030 
3031 		/*
3032 		 * The cur_state should not last for more than max_wait secs
3033 		 */
3034 		for (i = 0; i < (max_wait * 1000); i++) {
3035 			fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3036 			    outbound_scratch_pad)) & MFI_STATE_MASK);
3037 			curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3038 			    outbound_scratch_pad));
3039 			if (abs_state == curr_abs_state)
3040 				DELAY(1000);
3041 			else
3042 				break;
3043 		}
3044 
3045 		/*
3046 		 * Return error if fw_state hasn't changed after max_wait
3047 		 */
3048 		if (curr_abs_state == abs_state) {
3049 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3050 			    "in %d secs\n", fw_state, max_wait);
3051 			return -ENODEV;
3052 		}
3053 	}
3054 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3055 	return 0;
3056 }
3057 
3058 /*
3059  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
3060  * input:				Adapter soft state
3061  *
3062  * This function removes an MFI command from the command list.
3063  */
3064 struct mrsas_mfi_cmd *
3065 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3066 {
3067 	struct mrsas_mfi_cmd *cmd = NULL;
3068 
3069 	mtx_lock(&sc->mfi_cmd_pool_lock);
3070 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3071 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3072 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3073 	}
3074 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3075 
3076 	return cmd;
3077 }
3078 
3079 /*
3080  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
3081  * input:				Adapter Context.
3082  *
3083  * This function will check FW status register and flag do_timeout_reset flag.
3084  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3085  * trigger reset.
3086  */
3087 static void
3088 mrsas_ocr_thread(void *arg)
3089 {
3090 	struct mrsas_softc *sc;
3091 	u_int32_t fw_status, fw_state;
3092 	u_int8_t tm_target_reset_failed = 0;
3093 
3094 	sc = (struct mrsas_softc *)arg;
3095 
3096 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3097 	sc->ocr_thread_active = 1;
3098 	mtx_lock(&sc->sim_lock);
3099 	for (;;) {
3100 		/* Sleep for 1 second and check the queue status */
3101 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3102 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3103 		if (sc->remove_in_progress ||
3104 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3105 			mrsas_dprint(sc, MRSAS_OCR,
3106 			    "Exit due to %s from %s\n",
3107 			    sc->remove_in_progress ? "Shutdown" :
3108 			    "Hardware critical error", __func__);
3109 			break;
3110 		}
3111 		fw_status = mrsas_read_reg_with_retries(sc,
3112 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
3113 		fw_state = fw_status & MFI_STATE_MASK;
3114 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3115 			mrsas_atomic_read(&sc->target_reset_outstanding)) {
3116 			/* First, freeze further IOs to come to the SIM */
3117 			mrsas_xpt_freeze(sc);
3118 
3119 			/* If this is an IO timeout then go for target reset */
3120 			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3121 				device_printf(sc->mrsas_dev, "Initiating Target RESET "
3122 				    "because of SCSI IO timeout!\n");
3123 
3124 				/* Let the remaining IOs to complete */
3125 				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3126 				      "mrsas_reset_targets", 5 * hz);
3127 
3128 				/* Try to reset the target device */
3129 				if (mrsas_reset_targets(sc) == FAIL)
3130 					tm_target_reset_failed = 1;
3131 			}
3132 
3133 			/* If this is a DCMD timeout or FW fault,
3134 			 * then go for controller reset
3135 			 */
3136 			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3137 			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3138 				if (tm_target_reset_failed)
3139 					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3140 					    "TM FAILURE!\n");
3141 				else
3142 					device_printf(sc->mrsas_dev, "Initiaiting OCR "
3143 						"because of %s!\n", sc->do_timedout_reset ?
3144 						"DCMD IO Timeout" : "FW fault");
3145 
3146 				mtx_lock_spin(&sc->ioctl_lock);
3147 				sc->reset_in_progress = 1;
3148 				mtx_unlock_spin(&sc->ioctl_lock);
3149 				sc->reset_count++;
3150 
3151 				/*
3152 				 * Wait for the AEN task to be completed if it is running.
3153 				 */
3154 				mtx_unlock(&sc->sim_lock);
3155 				taskqueue_drain(sc->ev_tq, &sc->ev_task);
3156 				mtx_lock(&sc->sim_lock);
3157 
3158 				taskqueue_block(sc->ev_tq);
3159 				/* Try to reset the controller */
3160 				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3161 
3162 				sc->do_timedout_reset = 0;
3163 				sc->reset_in_progress = 0;
3164 				tm_target_reset_failed = 0;
3165 				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3166 				memset(sc->target_reset_pool, 0,
3167 				    sizeof(sc->target_reset_pool));
3168 				taskqueue_unblock(sc->ev_tq);
3169 			}
3170 
3171 			/* Now allow IOs to come to the SIM */
3172 			 mrsas_xpt_release(sc);
3173 		}
3174 	}
3175 	mtx_unlock(&sc->sim_lock);
3176 	sc->ocr_thread_active = 0;
3177 	mrsas_kproc_exit(0);
3178 }
3179 
3180 /*
3181  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
3182  * input:					Adapter Context.
3183  *
3184  * This function will clear reply descriptor so that post OCR driver and FW will
3185  * lost old history.
3186  */
3187 void
3188 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3189 {
3190 	int i, count;
3191 	pMpi2ReplyDescriptorsUnion_t reply_desc;
3192 
3193 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3194 	for (i = 0; i < count; i++)
3195 		sc->last_reply_idx[i] = 0;
3196 
3197 	reply_desc = sc->reply_desc_mem;
3198 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3199 		reply_desc->Words = MRSAS_ULONG_MAX;
3200 	}
3201 }
3202 
3203 /*
3204  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
3205  * input:				Adapter Context.
3206  *
3207  * This function will run from thread context so that it can sleep. 1. Do not
3208  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3209  * to complete for 180 seconds. 3. If #2 does not find any outstanding
3210  * command Controller is in working state, so skip OCR. Otherwise, do
3211  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3212  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3213  * OCR, Re-fire Management command and move Controller to Operation state.
3214  */
3215 int
3216 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3217 {
3218 	int retval = SUCCESS, i, j, retry = 0;
3219 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3220 	union ccb *ccb;
3221 	struct mrsas_mfi_cmd *mfi_cmd;
3222 	struct mrsas_mpt_cmd *mpt_cmd;
3223 	union mrsas_evt_class_locale class_locale;
3224 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3225 
3226 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3227 		device_printf(sc->mrsas_dev,
3228 		    "mrsas: Hardware critical error, returning FAIL.\n");
3229 		return FAIL;
3230 	}
3231 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3232 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3233 	mrsas_disable_intr(sc);
3234 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3235 	    sc->mrsas_fw_fault_check_delay * hz);
3236 
3237 	/* First try waiting for commands to complete */
3238 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3239 		mrsas_dprint(sc, MRSAS_OCR,
3240 		    "resetting adapter from %s.\n",
3241 		    __func__);
3242 		/* Now return commands back to the CAM layer */
3243 		mtx_unlock(&sc->sim_lock);
3244 		for (i = 0; i < sc->max_fw_cmds; i++) {
3245 			mpt_cmd = sc->mpt_cmd_list[i];
3246 
3247 			if (mpt_cmd->peer_cmd) {
3248 				mrsas_dprint(sc, MRSAS_OCR,
3249 				    "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3250 				    i, mpt_cmd, mpt_cmd->peer_cmd);
3251 			}
3252 
3253 			if (mpt_cmd->ccb_ptr) {
3254 				if (mpt_cmd->callout_owner) {
3255 					ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3256 					ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3257 					mrsas_cmd_done(sc, mpt_cmd);
3258 				} else {
3259 					mpt_cmd->ccb_ptr = NULL;
3260 					mrsas_release_mpt_cmd(mpt_cmd);
3261 				}
3262 			}
3263 		}
3264 
3265 		mrsas_atomic_set(&sc->fw_outstanding, 0);
3266 
3267 		mtx_lock(&sc->sim_lock);
3268 
3269 		status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3270 		    outbound_scratch_pad));
3271 		abs_state = status_reg & MFI_STATE_MASK;
3272 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3273 		if (sc->disableOnlineCtrlReset ||
3274 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3275 			/* Reset not supported, kill adapter */
3276 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3277 			mrsas_kill_hba(sc);
3278 			retval = FAIL;
3279 			goto out;
3280 		}
3281 		/* Now try to reset the chip */
3282 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3283 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3284 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
3285 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3286 			    MPI2_WRSEQ_1ST_KEY_VALUE);
3287 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3288 			    MPI2_WRSEQ_2ND_KEY_VALUE);
3289 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3290 			    MPI2_WRSEQ_3RD_KEY_VALUE);
3291 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3292 			    MPI2_WRSEQ_4TH_KEY_VALUE);
3293 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3294 			    MPI2_WRSEQ_5TH_KEY_VALUE);
3295 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3296 			    MPI2_WRSEQ_6TH_KEY_VALUE);
3297 
3298 			/* Check that the diag write enable (DRWE) bit is on */
3299 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3300 			    fusion_host_diag));
3301 			retry = 0;
3302 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3303 				DELAY(100 * 1000);
3304 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3305 				    fusion_host_diag));
3306 				if (retry++ == 100) {
3307 					mrsas_dprint(sc, MRSAS_OCR,
3308 					    "Host diag unlock failed!\n");
3309 					break;
3310 				}
3311 			}
3312 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3313 				continue;
3314 
3315 			/* Send chip reset command */
3316 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3317 			    host_diag | HOST_DIAG_RESET_ADAPTER);
3318 			DELAY(3000 * 1000);
3319 
3320 			/* Make sure reset adapter bit is cleared */
3321 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3322 			    fusion_host_diag));
3323 			retry = 0;
3324 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3325 				DELAY(100 * 1000);
3326 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3327 				    fusion_host_diag));
3328 				if (retry++ == 1000) {
3329 					mrsas_dprint(sc, MRSAS_OCR,
3330 					    "Diag reset adapter never cleared!\n");
3331 					break;
3332 				}
3333 			}
3334 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3335 				continue;
3336 
3337 			abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3338 			    outbound_scratch_pad)) & MFI_STATE_MASK;
3339 			retry = 0;
3340 
3341 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3342 				DELAY(100 * 1000);
3343 				abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3344 				    outbound_scratch_pad)) & MFI_STATE_MASK;
3345 			}
3346 			if (abs_state <= MFI_STATE_FW_INIT) {
3347 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3348 				    " state = 0x%x\n", abs_state);
3349 				continue;
3350 			}
3351 			/* Wait for FW to become ready */
3352 			if (mrsas_transition_to_ready(sc, 1)) {
3353 				mrsas_dprint(sc, MRSAS_OCR,
3354 				    "mrsas: Failed to transition controller to ready.\n");
3355 				continue;
3356 			}
3357 			mrsas_reset_reply_desc(sc);
3358 			if (mrsas_ioc_init(sc)) {
3359 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3360 				continue;
3361 			}
3362 			for (j = 0; j < sc->max_fw_cmds; j++) {
3363 				mpt_cmd = sc->mpt_cmd_list[j];
3364 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3365 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3366 					/* If not an IOCTL then release the command else re-fire */
3367 					if (!mfi_cmd->sync_cmd) {
3368 						mrsas_release_mfi_cmd(mfi_cmd);
3369 					} else {
3370 						req_desc = mrsas_get_request_desc(sc,
3371 						    mfi_cmd->cmd_id.context.smid - 1);
3372 						mrsas_dprint(sc, MRSAS_OCR,
3373 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3374 						    mfi_cmd->frame->dcmd.opcode, j);
3375 						if (!req_desc)
3376 							device_printf(sc->mrsas_dev,
3377 							    "Cannot build MPT cmd.\n");
3378 						else
3379 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3380 							    req_desc->addr.u.high);
3381 					}
3382 				}
3383 			}
3384 
3385 			/* Reset load balance info */
3386 			memset(sc->load_balance_info, 0,
3387 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3388 
3389 			if (mrsas_get_ctrl_info(sc)) {
3390 				mrsas_kill_hba(sc);
3391 				retval = FAIL;
3392 				goto out;
3393 			}
3394 			if (!mrsas_get_map_info(sc))
3395 				mrsas_sync_map_info(sc);
3396 
3397 			megasas_setup_jbod_map(sc);
3398 
3399 			if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3400 				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3401 					memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3402 					sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3403 				}
3404 			}
3405 
3406 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3407 			mrsas_enable_intr(sc);
3408 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3409 
3410 			/* Register AEN with FW for last sequence number */
3411 			class_locale.members.reserved = 0;
3412 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3413 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3414 
3415 			mtx_unlock(&sc->sim_lock);
3416 			if (mrsas_register_aen(sc, sc->last_seq_num,
3417 			    class_locale.word)) {
3418 				device_printf(sc->mrsas_dev,
3419 				    "ERROR: AEN registration FAILED from OCR !!! "
3420 				    "Further events from the controller cannot be notified."
3421 				    "Either there is some problem in the controller"
3422 				    "or the controller does not support AEN.\n"
3423 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3424 			}
3425 			mtx_lock(&sc->sim_lock);
3426 
3427 			/* Adapter reset completed successfully */
3428 			device_printf(sc->mrsas_dev, "Reset successful\n");
3429 			retval = SUCCESS;
3430 			goto out;
3431 		}
3432 		/* Reset failed, kill the adapter */
3433 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3434 		mrsas_kill_hba(sc);
3435 		retval = FAIL;
3436 	} else {
3437 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3438 		mrsas_enable_intr(sc);
3439 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3440 	}
3441 out:
3442 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3443 	mrsas_dprint(sc, MRSAS_OCR,
3444 	    "Reset Exit with %d.\n", retval);
3445 	return retval;
3446 }
3447 
3448 /*
3449  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3450  * input:			Adapter Context.
3451  *
3452  * This function will kill HBA when OCR is not supported.
3453  */
3454 void
3455 mrsas_kill_hba(struct mrsas_softc *sc)
3456 {
3457 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3458 	DELAY(1000 * 1000);
3459 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3460 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3461 	    MFI_STOP_ADP);
3462 	/* Flush */
3463 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3464 	mrsas_complete_outstanding_ioctls(sc);
3465 }
3466 
3467 /**
3468  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3469  * input:			Controller softc
3470  *
3471  * Returns void
3472  */
3473 void
3474 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3475 {
3476 	int i;
3477 	struct mrsas_mpt_cmd *cmd_mpt;
3478 	struct mrsas_mfi_cmd *cmd_mfi;
3479 	u_int32_t count, MSIxIndex;
3480 
3481 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3482 	for (i = 0; i < sc->max_fw_cmds; i++) {
3483 		cmd_mpt = sc->mpt_cmd_list[i];
3484 
3485 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3486 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3487 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3488 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3489 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3490 					    cmd_mpt->io_request->RaidContext.raid_context.status);
3491 			}
3492 		}
3493 	}
3494 }
3495 
3496 /*
3497  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3498  * input:						Adapter Context.
3499  *
3500  * This function will wait for 180 seconds for outstanding commands to be
3501  * completed.
3502  */
3503 int
3504 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3505 {
3506 	int i, outstanding, retval = 0;
3507 	u_int32_t fw_state, count, MSIxIndex;
3508 
3509 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3510 		if (sc->remove_in_progress) {
3511 			mrsas_dprint(sc, MRSAS_OCR,
3512 			    "Driver remove or shutdown called.\n");
3513 			retval = 1;
3514 			goto out;
3515 		}
3516 		/* Check if firmware is in fault state */
3517 		fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3518 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3519 		if (fw_state == MFI_STATE_FAULT) {
3520 			mrsas_dprint(sc, MRSAS_OCR,
3521 			    "Found FW in FAULT state, will reset adapter.\n");
3522 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3523 			mtx_unlock(&sc->sim_lock);
3524 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3525 				mrsas_complete_cmd(sc, MSIxIndex);
3526 			mtx_lock(&sc->sim_lock);
3527 			retval = 1;
3528 			goto out;
3529 		}
3530 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3531 			mrsas_dprint(sc, MRSAS_OCR,
3532 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3533 			retval = 1;
3534 			goto out;
3535 		}
3536 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3537 		if (!outstanding)
3538 			goto out;
3539 
3540 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3541 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3542 			    "commands to complete\n", i, outstanding);
3543 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3544 			mtx_unlock(&sc->sim_lock);
3545 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3546 				mrsas_complete_cmd(sc, MSIxIndex);
3547 			mtx_lock(&sc->sim_lock);
3548 		}
3549 		DELAY(1000 * 1000);
3550 	}
3551 
3552 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3553 		mrsas_dprint(sc, MRSAS_OCR,
3554 		    " pending commands remain after waiting,"
3555 		    " will reset adapter.\n");
3556 		retval = 1;
3557 	}
3558 out:
3559 	return retval;
3560 }
3561 
3562 /*
3563  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3564  * input:					Command packet for return to free cmd pool
3565  *
3566  * This function returns the MFI & MPT command to the command list.
3567  */
3568 void
3569 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3570 {
3571 	struct mrsas_softc *sc = cmd_mfi->sc;
3572 	struct mrsas_mpt_cmd *cmd_mpt;
3573 
3574 	mtx_lock(&sc->mfi_cmd_pool_lock);
3575 	/*
3576 	 * Release the mpt command (if at all it is allocated
3577 	 * associated with the mfi command
3578 	 */
3579 	if (cmd_mfi->cmd_id.context.smid) {
3580 		mtx_lock(&sc->mpt_cmd_pool_lock);
3581 		/* Get the mpt cmd from mfi cmd frame's smid value */
3582 		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3583 		cmd_mpt->flags = 0;
3584 		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3585 		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3586 		mtx_unlock(&sc->mpt_cmd_pool_lock);
3587 	}
3588 	/* Release the mfi command */
3589 	cmd_mfi->ccb_ptr = NULL;
3590 	cmd_mfi->cmd_id.frame_count = 0;
3591 	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3592 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3593 
3594 	return;
3595 }
3596 
3597 /*
3598  * mrsas_get_controller_info:	Returns FW's controller structure
3599  * input:						Adapter soft state
3600  * 								Controller information structure
3601  *
3602  * Issues an internal command (DCMD) to get the FW's controller structure. This
3603  * information is mainly used to find out the maximum IO transfer per command
3604  * supported by the FW.
3605  */
3606 static int
3607 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3608 {
3609 	int retcode = 0;
3610 	u_int8_t do_ocr = 1;
3611 	struct mrsas_mfi_cmd *cmd;
3612 	struct mrsas_dcmd_frame *dcmd;
3613 
3614 	cmd = mrsas_get_mfi_cmd(sc);
3615 
3616 	if (!cmd) {
3617 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3618 		return -ENOMEM;
3619 	}
3620 	dcmd = &cmd->frame->dcmd;
3621 
3622 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3623 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3624 		mrsas_release_mfi_cmd(cmd);
3625 		return -ENOMEM;
3626 	}
3627 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3628 
3629 	dcmd->cmd = MFI_CMD_DCMD;
3630 	dcmd->cmd_status = 0xFF;
3631 	dcmd->sge_count = 1;
3632 	dcmd->flags = MFI_FRAME_DIR_READ;
3633 	dcmd->timeout = 0;
3634 	dcmd->pad_0 = 0;
3635 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
3636 	dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
3637 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
3638 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
3639 
3640 	if (!sc->mask_interrupts)
3641 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3642 	else
3643 		retcode = mrsas_issue_polled(sc, cmd);
3644 
3645 	if (retcode == ETIMEDOUT)
3646 		goto dcmd_timeout;
3647 	else {
3648 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3649 		le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
3650 		le32_to_cpus(&sc->ctrl_info->adapterOperations2);
3651 		le32_to_cpus(&sc->ctrl_info->adapterOperations3);
3652 		le16_to_cpus(&sc->ctrl_info->adapterOperations4);
3653 	}
3654 
3655 	do_ocr = 0;
3656 	mrsas_update_ext_vd_details(sc);
3657 
3658 	sc->use_seqnum_jbod_fp =
3659 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3660 	sc->support_morethan256jbod =
3661 		sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3662 
3663 	sc->disableOnlineCtrlReset =
3664 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3665 
3666 dcmd_timeout:
3667 	mrsas_free_ctlr_info_cmd(sc);
3668 
3669 	if (do_ocr)
3670 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3671 
3672 	if (!sc->mask_interrupts)
3673 		mrsas_release_mfi_cmd(cmd);
3674 
3675 	return (retcode);
3676 }
3677 
3678 /*
3679  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3680  * input:
3681  *	sc - Controller's softc
3682 */
3683 static void
3684 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3685 {
3686 	u_int32_t ventura_map_sz = 0;
3687 	sc->max256vdSupport =
3688 		sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3689 
3690 	/* Below is additional check to address future FW enhancement */
3691 	if (sc->ctrl_info->max_lds > 64)
3692 		sc->max256vdSupport = 1;
3693 
3694 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3695 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3696 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3697 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3698 	if (sc->max256vdSupport) {
3699 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3700 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3701 	} else {
3702 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3703 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3704 	}
3705 
3706 	if (sc->maxRaidMapSize) {
3707 		ventura_map_sz = sc->maxRaidMapSize *
3708 		    MR_MIN_MAP_SIZE;
3709 		sc->current_map_sz = ventura_map_sz;
3710 		sc->max_map_sz = ventura_map_sz;
3711 	} else {
3712 		sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3713 		    (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3714 		sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3715 		sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3716 		if (sc->max256vdSupport)
3717 			sc->current_map_sz = sc->new_map_sz;
3718 		else
3719 			sc->current_map_sz = sc->old_map_sz;
3720 	}
3721 
3722 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3723 #if VD_EXT_DEBUG
3724 	device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3725 	    sc->maxRaidMapSize);
3726 	device_printf(sc->mrsas_dev,
3727 	    "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3728 	    "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3729 	    "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3730 	    sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3731 	    sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3732 #endif
3733 }
3734 
3735 /*
3736  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3737  * input:						Adapter soft state
3738  *
3739  * Allocates DMAable memory for the controller info internal command.
3740  */
3741 int
3742 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3743 {
3744 	int ctlr_info_size;
3745 
3746 	/* Allocate get controller info command */
3747 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3748 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3749 	    1, 0,
3750 	    BUS_SPACE_MAXADDR_32BIT,
3751 	    BUS_SPACE_MAXADDR,
3752 	    NULL, NULL,
3753 	    ctlr_info_size,
3754 	    1,
3755 	    ctlr_info_size,
3756 	    BUS_DMA_ALLOCNOW,
3757 	    NULL, NULL,
3758 	    &sc->ctlr_info_tag)) {
3759 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3760 		return (ENOMEM);
3761 	}
3762 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3763 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3764 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3765 		return (ENOMEM);
3766 	}
3767 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3768 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3769 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3770 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3771 		return (ENOMEM);
3772 	}
3773 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3774 	return (0);
3775 }
3776 
3777 /*
3778  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3779  * input:						Adapter soft state
3780  *
3781  * Deallocates memory of the get controller info cmd.
3782  */
3783 void
3784 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3785 {
3786 	if (sc->ctlr_info_phys_addr)
3787 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3788 	if (sc->ctlr_info_mem != NULL)
3789 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3790 	if (sc->ctlr_info_tag != NULL)
3791 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3792 }
3793 
3794 /*
3795  * mrsas_issue_polled:	Issues a polling command
3796  * inputs:				Adapter soft state
3797  * 						Command packet to be issued
3798  *
3799  * This function is for posting of internal commands to Firmware.  MFI requires
3800  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3801  * the poll response timer is 180 seconds.
3802  */
3803 int
3804 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3805 {
3806 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3807 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3808 	int i, retcode = SUCCESS;
3809 
3810 	frame_hdr->cmd_status = 0xFF;
3811 	frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
3812 
3813 	/* Issue the frame using inbound queue port */
3814 	if (mrsas_issue_dcmd(sc, cmd)) {
3815 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3816 		return (1);
3817 	}
3818 	/*
3819 	 * Poll response timer to wait for Firmware response.  While this
3820 	 * timer with the DELAY call could block CPU, the time interval for
3821 	 * this is only 1 millisecond.
3822 	 */
3823 	if (frame_hdr->cmd_status == 0xFF) {
3824 		for (i = 0; i < (max_wait * 1000); i++) {
3825 			if (frame_hdr->cmd_status == 0xFF)
3826 				DELAY(1000);
3827 			else
3828 				break;
3829 		}
3830 	}
3831 	if (frame_hdr->cmd_status == 0xFF) {
3832 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3833 		    "seconds from %s\n", max_wait, __func__);
3834 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3835 		    cmd->frame->dcmd.opcode);
3836 		retcode = ETIMEDOUT;
3837 	}
3838 	return (retcode);
3839 }
3840 
3841 /*
3842  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3843  * input:				Adapter soft state mfi cmd pointer
3844  *
3845  * This function is called by mrsas_issued_blocked_cmd() and
3846  * mrsas_issued_polled(), to build the MPT command and then fire the command
3847  * to Firmware.
3848  */
3849 int
3850 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3851 {
3852 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3853 
3854 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3855 	if (!req_desc) {
3856 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3857 		return (1);
3858 	}
3859 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3860 
3861 	return (0);
3862 }
3863 
3864 /*
3865  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3866  * input:				Adapter soft state mfi cmd to build
3867  *
3868  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3869  * command and prepares the MPT command to send to Firmware.
3870  */
3871 MRSAS_REQUEST_DESCRIPTOR_UNION *
3872 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3873 {
3874 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3875 	u_int16_t index;
3876 
3877 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3878 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3879 		return NULL;
3880 	}
3881 	index = cmd->cmd_id.context.smid;
3882 
3883 	req_desc = mrsas_get_request_desc(sc, index - 1);
3884 	if (!req_desc)
3885 		return NULL;
3886 
3887 	req_desc->addr.Words = 0;
3888 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3889 
3890 	req_desc->SCSIIO.SMID = htole16(index);
3891 
3892 	return (req_desc);
3893 }
3894 
3895 /*
3896  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3897  * input:						Adapter soft state mfi cmd pointer
3898  *
3899  * The MPT command and the io_request are setup as a passthru command. The SGE
3900  * chain address is set to frame_phys_addr of the MFI command.
3901  */
3902 u_int8_t
3903 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3904 {
3905 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3906 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3907 	struct mrsas_mpt_cmd *mpt_cmd;
3908 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3909 
3910 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3911 	if (!mpt_cmd)
3912 		return (1);
3913 
3914 	/* Save the smid. To be used for returning the cmd */
3915 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3916 
3917 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3918 
3919 	/*
3920 	 * For cmds where the flag is set, store the flag and check on
3921 	 * completion. For cmds with this flag, don't call
3922 	 * mrsas_complete_cmd.
3923 	 */
3924 
3925 	if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3926 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3927 
3928 	io_req = mpt_cmd->io_request;
3929 
3930 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3931 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3932 
3933 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3934 		sgl_ptr_end->Flags = 0;
3935 	}
3936 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3937 
3938 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3939 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3940 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3941 
3942 	mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
3943 
3944 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3945 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3946 
3947 	mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
3948 
3949 	return (0);
3950 }
3951 
3952 /*
3953  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3954  * input:					Adapter soft state Command to be issued
3955  *
3956  * This function waits on an event for the command to be returned from the ISR.
3957  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3958  * internal and ioctl commands.
3959  */
3960 int
3961 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3962 {
3963 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3964 	unsigned long total_time = 0;
3965 	int retcode = SUCCESS;
3966 
3967 	/* Initialize cmd_status */
3968 	cmd->cmd_status = 0xFF;
3969 
3970 	/* Build MPT-MFI command for issue to FW */
3971 	if (mrsas_issue_dcmd(sc, cmd)) {
3972 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3973 		return (1);
3974 	}
3975 	sc->chan = (void *)&cmd;
3976 
3977 	while (1) {
3978 		if (cmd->cmd_status == 0xFF) {
3979 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3980 		} else
3981 			break;
3982 
3983 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3984 					 * command */
3985 			total_time++;
3986 			if (total_time >= max_wait) {
3987 				device_printf(sc->mrsas_dev,
3988 				    "Internal command timed out after %d seconds.\n", max_wait);
3989 				retcode = 1;
3990 				break;
3991 			}
3992 		}
3993 	}
3994 	sc->chan = NULL;
3995 
3996 	if (cmd->cmd_status == 0xFF) {
3997 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3998 		    "seconds from %s\n", max_wait, __func__);
3999 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
4000 		    cmd->frame->dcmd.opcode);
4001 		retcode = ETIMEDOUT;
4002 	}
4003 	return (retcode);
4004 }
4005 
4006 /*
4007  * mrsas_complete_mptmfi_passthru:	Completes a command
4008  * input:	@sc:					Adapter soft state
4009  * 			@cmd:					Command to be completed
4010  * 			@status:				cmd completion status
4011  *
4012  * This function is called from mrsas_complete_cmd() after an interrupt is
4013  * received from Firmware, and io_request->Function is
4014  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4015  */
4016 void
4017 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4018     u_int8_t status)
4019 {
4020 	struct mrsas_header *hdr = &cmd->frame->hdr;
4021 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4022 
4023 	/* Reset the retry counter for future re-tries */
4024 	cmd->retry_for_fw_reset = 0;
4025 
4026 	if (cmd->ccb_ptr)
4027 		cmd->ccb_ptr = NULL;
4028 
4029 	switch (hdr->cmd) {
4030 	case MFI_CMD_INVALID:
4031 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4032 		break;
4033 	case MFI_CMD_PD_SCSI_IO:
4034 	case MFI_CMD_LD_SCSI_IO:
4035 		/*
4036 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4037 		 * issued either through an IO path or an IOCTL path. If it
4038 		 * was via IOCTL, we will send it to internal completion.
4039 		 */
4040 		if (cmd->sync_cmd) {
4041 			cmd->sync_cmd = 0;
4042 			mrsas_wakeup(sc, cmd);
4043 			break;
4044 		}
4045 	case MFI_CMD_SMP:
4046 	case MFI_CMD_STP:
4047 	case MFI_CMD_DCMD:
4048 		/* Check for LD map update */
4049 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4050 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
4051 			sc->fast_path_io = 0;
4052 			mtx_lock(&sc->raidmap_lock);
4053 			sc->map_update_cmd = NULL;
4054 			if (cmd_status != 0) {
4055 				if (cmd_status != MFI_STAT_NOT_FOUND)
4056 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4057 				else {
4058 					mrsas_release_mfi_cmd(cmd);
4059 					mtx_unlock(&sc->raidmap_lock);
4060 					break;
4061 				}
4062 			} else
4063 				sc->map_id++;
4064 			mrsas_release_mfi_cmd(cmd);
4065 			if (MR_ValidateMapInfo(sc))
4066 				sc->fast_path_io = 0;
4067 			else
4068 				sc->fast_path_io = 1;
4069 			mrsas_sync_map_info(sc);
4070 			mtx_unlock(&sc->raidmap_lock);
4071 			break;
4072 		}
4073 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4074 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4075 			sc->mrsas_aen_triggered = 0;
4076 		}
4077 		/* FW has an updated PD sequence */
4078 		if ((cmd->frame->dcmd.opcode ==
4079 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4080 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
4081 			mtx_lock(&sc->raidmap_lock);
4082 			sc->jbod_seq_cmd = NULL;
4083 			mrsas_release_mfi_cmd(cmd);
4084 
4085 			if (cmd_status == MFI_STAT_OK) {
4086 				sc->pd_seq_map_id++;
4087 				/* Re-register a pd sync seq num cmd */
4088 				if (megasas_sync_pd_seq_num(sc, true))
4089 					sc->use_seqnum_jbod_fp = 0;
4090 			} else {
4091 				sc->use_seqnum_jbod_fp = 0;
4092 				device_printf(sc->mrsas_dev,
4093 				    "Jbod map sync failed, status=%x\n", cmd_status);
4094 			}
4095 			mtx_unlock(&sc->raidmap_lock);
4096 			break;
4097 		}
4098 		/* See if got an event notification */
4099 		if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
4100 			mrsas_complete_aen(sc, cmd);
4101 		else
4102 			mrsas_wakeup(sc, cmd);
4103 		break;
4104 	case MFI_CMD_ABORT:
4105 		/* Command issued to abort another cmd return */
4106 		mrsas_complete_abort(sc, cmd);
4107 		break;
4108 	default:
4109 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4110 		break;
4111 	}
4112 }
4113 
4114 /*
4115  * mrsas_wakeup:	Completes an internal command
4116  * input:			Adapter soft state
4117  * 					Command to be completed
4118  *
4119  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4120  * timer is started.  This function is called from
4121  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4122  * from the command wait.
4123  */
4124 void
4125 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4126 {
4127 	cmd->cmd_status = cmd->frame->io.cmd_status;
4128 
4129 	if (cmd->cmd_status == 0xFF)
4130 		cmd->cmd_status = 0;
4131 
4132 	sc->chan = (void *)&cmd;
4133 	wakeup_one((void *)&sc->chan);
4134 	return;
4135 }
4136 
4137 /*
4138  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
4139  * Adapter soft state Shutdown/Hibernate
4140  *
4141  * This function issues a DCMD internal command to Firmware to initiate shutdown
4142  * of the controller.
4143  */
4144 static void
4145 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4146 {
4147 	struct mrsas_mfi_cmd *cmd;
4148 	struct mrsas_dcmd_frame *dcmd;
4149 
4150 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4151 		return;
4152 
4153 	cmd = mrsas_get_mfi_cmd(sc);
4154 	if (!cmd) {
4155 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4156 		return;
4157 	}
4158 	if (sc->aen_cmd)
4159 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4160 	if (sc->map_update_cmd)
4161 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4162 	if (sc->jbod_seq_cmd)
4163 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4164 
4165 	dcmd = &cmd->frame->dcmd;
4166 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4167 
4168 	dcmd->cmd = MFI_CMD_DCMD;
4169 	dcmd->cmd_status = 0x0;
4170 	dcmd->sge_count = 0;
4171 	dcmd->flags = MFI_FRAME_DIR_NONE;
4172 	dcmd->timeout = 0;
4173 	dcmd->pad_0 = 0;
4174 	dcmd->data_xfer_len = 0;
4175 	dcmd->opcode = opcode;
4176 
4177 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4178 
4179 	mrsas_issue_blocked_cmd(sc, cmd);
4180 	mrsas_release_mfi_cmd(cmd);
4181 
4182 	return;
4183 }
4184 
4185 /*
4186  * mrsas_flush_cache:         Requests FW to flush all its caches input:
4187  * Adapter soft state
4188  *
4189  * This function is issues a DCMD internal command to Firmware to initiate
4190  * flushing of all caches.
4191  */
4192 static void
4193 mrsas_flush_cache(struct mrsas_softc *sc)
4194 {
4195 	struct mrsas_mfi_cmd *cmd;
4196 	struct mrsas_dcmd_frame *dcmd;
4197 
4198 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4199 		return;
4200 
4201 	cmd = mrsas_get_mfi_cmd(sc);
4202 	if (!cmd) {
4203 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4204 		return;
4205 	}
4206 	dcmd = &cmd->frame->dcmd;
4207 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4208 
4209 	dcmd->cmd = MFI_CMD_DCMD;
4210 	dcmd->cmd_status = 0x0;
4211 	dcmd->sge_count = 0;
4212 	dcmd->flags = MFI_FRAME_DIR_NONE;
4213 	dcmd->timeout = 0;
4214 	dcmd->pad_0 = 0;
4215 	dcmd->data_xfer_len = 0;
4216 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4217 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4218 
4219 	mrsas_issue_blocked_cmd(sc, cmd);
4220 	mrsas_release_mfi_cmd(cmd);
4221 
4222 	return;
4223 }
4224 
4225 int
4226 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4227 {
4228 	int retcode = 0;
4229 	u_int8_t do_ocr = 1;
4230 	struct mrsas_mfi_cmd *cmd;
4231 	struct mrsas_dcmd_frame *dcmd;
4232 	uint32_t pd_seq_map_sz;
4233 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4234 	bus_addr_t pd_seq_h;
4235 
4236 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4237 	    (sizeof(struct MR_PD_CFG_SEQ) *
4238 	    (MAX_PHYSICAL_DEVICES - 1));
4239 
4240 	cmd = mrsas_get_mfi_cmd(sc);
4241 	if (!cmd) {
4242 		device_printf(sc->mrsas_dev,
4243 		    "Cannot alloc for ld map info cmd.\n");
4244 		return 1;
4245 	}
4246 	dcmd = &cmd->frame->dcmd;
4247 
4248 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4249 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4250 	if (!pd_sync) {
4251 		device_printf(sc->mrsas_dev,
4252 		    "Failed to alloc mem for jbod map info.\n");
4253 		mrsas_release_mfi_cmd(cmd);
4254 		return (ENOMEM);
4255 	}
4256 	memset(pd_sync, 0, pd_seq_map_sz);
4257 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4258 	dcmd->cmd = MFI_CMD_DCMD;
4259 	dcmd->cmd_status = 0xFF;
4260 	dcmd->sge_count = 1;
4261 	dcmd->timeout = 0;
4262 	dcmd->pad_0 = 0;
4263 	dcmd->data_xfer_len = htole32(pd_seq_map_sz);
4264 	dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4265 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
4266 	dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
4267 
4268 	if (pend) {
4269 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4270 		dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4271 		sc->jbod_seq_cmd = cmd;
4272 		if (mrsas_issue_dcmd(sc, cmd)) {
4273 			device_printf(sc->mrsas_dev,
4274 			    "Fail to send sync map info command.\n");
4275 			return 1;
4276 		} else
4277 			return 0;
4278 	} else
4279 		dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4280 
4281 	retcode = mrsas_issue_polled(sc, cmd);
4282 	if (retcode == ETIMEDOUT)
4283 		goto dcmd_timeout;
4284 
4285 	if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
4286 		device_printf(sc->mrsas_dev,
4287 		    "driver supports max %d JBOD, but FW reports %d\n",
4288 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
4289 		retcode = -EINVAL;
4290 	}
4291 	if (!retcode)
4292 		sc->pd_seq_map_id++;
4293 	do_ocr = 0;
4294 
4295 dcmd_timeout:
4296 	if (do_ocr)
4297 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4298 
4299 	return (retcode);
4300 }
4301 
4302 /*
4303  * mrsas_get_map_info:        Load and validate RAID map input:
4304  * Adapter instance soft state
4305  *
4306  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4307  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
4308  */
4309 static int
4310 mrsas_get_map_info(struct mrsas_softc *sc)
4311 {
4312 	uint8_t retcode = 0;
4313 
4314 	sc->fast_path_io = 0;
4315 	if (!mrsas_get_ld_map_info(sc)) {
4316 		retcode = MR_ValidateMapInfo(sc);
4317 		if (retcode == 0) {
4318 			sc->fast_path_io = 1;
4319 			return 0;
4320 		}
4321 	}
4322 	return 1;
4323 }
4324 
4325 /*
4326  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
4327  * Adapter instance soft state
4328  *
4329  * Issues an internal command (DCMD) to get the FW's controller PD list
4330  * structure.
4331  */
4332 static int
4333 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4334 {
4335 	int retcode = 0;
4336 	struct mrsas_mfi_cmd *cmd;
4337 	struct mrsas_dcmd_frame *dcmd;
4338 	void *map;
4339 	bus_addr_t map_phys_addr = 0;
4340 
4341 	cmd = mrsas_get_mfi_cmd(sc);
4342 	if (!cmd) {
4343 		device_printf(sc->mrsas_dev,
4344 		    "Cannot alloc for ld map info cmd.\n");
4345 		return 1;
4346 	}
4347 	dcmd = &cmd->frame->dcmd;
4348 
4349 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4350 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4351 	if (!map) {
4352 		device_printf(sc->mrsas_dev,
4353 		    "Failed to alloc mem for ld map info.\n");
4354 		mrsas_release_mfi_cmd(cmd);
4355 		return (ENOMEM);
4356 	}
4357 	memset(map, 0, sizeof(sc->max_map_sz));
4358 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4359 
4360 	dcmd->cmd = MFI_CMD_DCMD;
4361 	dcmd->cmd_status = 0xFF;
4362 	dcmd->sge_count = 1;
4363 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4364 	dcmd->timeout = 0;
4365 	dcmd->pad_0 = 0;
4366 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4367 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4368 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4369 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4370 
4371 	retcode = mrsas_issue_polled(sc, cmd);
4372 	if (retcode == ETIMEDOUT)
4373 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4374 
4375 	return (retcode);
4376 }
4377 
4378 /*
4379  * mrsas_sync_map_info:        Get FW's ld_map structure input:
4380  * Adapter instance soft state
4381  *
4382  * Issues an internal command (DCMD) to get the FW's controller PD list
4383  * structure.
4384  */
4385 static int
4386 mrsas_sync_map_info(struct mrsas_softc *sc)
4387 {
4388 	int retcode = 0, i;
4389 	struct mrsas_mfi_cmd *cmd;
4390 	struct mrsas_dcmd_frame *dcmd;
4391 	uint32_t num_lds;
4392 	MR_LD_TARGET_SYNC *target_map = NULL;
4393 	MR_DRV_RAID_MAP_ALL *map;
4394 	MR_LD_RAID *raid;
4395 	MR_LD_TARGET_SYNC *ld_sync;
4396 	bus_addr_t map_phys_addr = 0;
4397 
4398 	cmd = mrsas_get_mfi_cmd(sc);
4399 	if (!cmd) {
4400 		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4401 		return ENOMEM;
4402 	}
4403 	map = sc->ld_drv_map[sc->map_id & 1];
4404 	num_lds = map->raidMap.ldCount;
4405 
4406 	dcmd = &cmd->frame->dcmd;
4407 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4408 
4409 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4410 	memset(target_map, 0, sc->max_map_sz);
4411 
4412 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4413 
4414 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4415 
4416 	for (i = 0; i < num_lds; i++, ld_sync++) {
4417 		raid = MR_LdRaidGet(i, map);
4418 		ld_sync->targetId = MR_GetLDTgtId(i, map);
4419 		ld_sync->seqNum = raid->seqNum;
4420 	}
4421 
4422 	dcmd->cmd = MFI_CMD_DCMD;
4423 	dcmd->cmd_status = 0xFF;
4424 	dcmd->sge_count = 1;
4425 	dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4426 	dcmd->timeout = 0;
4427 	dcmd->pad_0 = 0;
4428 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4429 	dcmd->mbox.b[0] = num_lds;
4430 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4431 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4432 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4433 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4434 
4435 	sc->map_update_cmd = cmd;
4436 	if (mrsas_issue_dcmd(sc, cmd)) {
4437 		device_printf(sc->mrsas_dev,
4438 		    "Fail to send sync map info command.\n");
4439 		return (1);
4440 	}
4441 	return (retcode);
4442 }
4443 
4444 /* Input:	dcmd.opcode		- MR_DCMD_PD_GET_INFO
4445   *		dcmd.mbox.s[0]		- deviceId for this physical drive
4446   *		dcmd.sge IN		- ptr to returned MR_PD_INFO structure
4447   * Desc:	Firmware return the physical drive info structure
4448   *
4449   */
4450 static void
4451 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4452 {
4453 	int retcode;
4454 	u_int8_t do_ocr = 1;
4455 	struct mrsas_mfi_cmd *cmd;
4456 	struct mrsas_dcmd_frame *dcmd;
4457 
4458 	cmd = mrsas_get_mfi_cmd(sc);
4459 
4460 	if (!cmd) {
4461 		device_printf(sc->mrsas_dev,
4462 		    "Cannot alloc for get PD info cmd\n");
4463 		return;
4464 	}
4465 	dcmd = &cmd->frame->dcmd;
4466 
4467 	memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4468 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4469 
4470 	dcmd->mbox.s[0] = htole16(device_id);
4471 	dcmd->cmd = MFI_CMD_DCMD;
4472 	dcmd->cmd_status = 0xFF;
4473 	dcmd->sge_count = 1;
4474 	dcmd->flags = MFI_FRAME_DIR_READ;
4475 	dcmd->timeout = 0;
4476 	dcmd->pad_0 = 0;
4477 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
4478 	dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
4479 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
4480 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
4481 
4482 	if (!sc->mask_interrupts)
4483 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4484 	else
4485 		retcode = mrsas_issue_polled(sc, cmd);
4486 
4487 	if (retcode == ETIMEDOUT)
4488 		goto dcmd_timeout;
4489 
4490 	sc->target_list[device_id].interface_type =
4491 		le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
4492 
4493 	do_ocr = 0;
4494 
4495 dcmd_timeout:
4496 
4497 	if (do_ocr)
4498 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4499 
4500 	if (!sc->mask_interrupts)
4501 		mrsas_release_mfi_cmd(cmd);
4502 }
4503 
4504 /*
4505  * mrsas_add_target:				Add target ID of system PD/VD to driver's data structure.
4506  * sc:						Adapter's soft state
4507  * target_id:					Unique target id per controller(managed by driver)
4508  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4509  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4510  * return:					void
4511  * Descripton:					This function will be called whenever system PD or VD is created.
4512  */
4513 static void mrsas_add_target(struct mrsas_softc *sc,
4514 	u_int16_t target_id)
4515 {
4516 	sc->target_list[target_id].target_id = target_id;
4517 
4518 	device_printf(sc->mrsas_dev,
4519 		"%s created target ID: 0x%x\n",
4520 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4521 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4522 	/*
4523 	 * If interrupts are enabled, then only fire DCMD to get pd_info
4524 	 * for system PDs
4525 	 */
4526 	if (!sc->mask_interrupts && sc->pd_info_mem &&
4527 		(target_id < MRSAS_MAX_PD))
4528 		mrsas_get_pd_info(sc, target_id);
4529 
4530 }
4531 
4532 /*
4533  * mrsas_remove_target:			Remove target ID of system PD/VD from driver's data structure.
4534  * sc:						Adapter's soft state
4535  * target_id:					Unique target id per controller(managed by driver)
4536  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4537  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4538  * return:					void
4539  * Descripton:					This function will be called whenever system PD or VD is deleted
4540  */
4541 static void mrsas_remove_target(struct mrsas_softc *sc,
4542 	u_int16_t target_id)
4543 {
4544 	sc->target_list[target_id].target_id = 0xffff;
4545 	device_printf(sc->mrsas_dev,
4546 		"%s deleted target ID: 0x%x\n",
4547 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4548 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4549 }
4550 
4551 /*
4552  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4553  * Adapter soft state
4554  *
4555  * Issues an internal command (DCMD) to get the FW's controller PD list
4556  * structure.  This information is mainly used to find out about system
4557  * supported by Firmware.
4558  */
4559 static int
4560 mrsas_get_pd_list(struct mrsas_softc *sc)
4561 {
4562 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4563 	u_int8_t do_ocr = 1;
4564 	struct mrsas_mfi_cmd *cmd;
4565 	struct mrsas_dcmd_frame *dcmd;
4566 	struct MR_PD_LIST *pd_list_mem;
4567 	struct MR_PD_ADDRESS *pd_addr;
4568 	bus_addr_t pd_list_phys_addr = 0;
4569 	struct mrsas_tmp_dcmd *tcmd;
4570 	u_int16_t dev_id;
4571 
4572 	cmd = mrsas_get_mfi_cmd(sc);
4573 	if (!cmd) {
4574 		device_printf(sc->mrsas_dev,
4575 		    "Cannot alloc for get PD list cmd\n");
4576 		return 1;
4577 	}
4578 	dcmd = &cmd->frame->dcmd;
4579 
4580 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4581 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4582 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4583 		device_printf(sc->mrsas_dev,
4584 		    "Cannot alloc dmamap for get PD list cmd\n");
4585 		mrsas_release_mfi_cmd(cmd);
4586 		mrsas_free_tmp_dcmd(tcmd);
4587 		free(tcmd, M_MRSAS);
4588 		return (ENOMEM);
4589 	} else {
4590 		pd_list_mem = tcmd->tmp_dcmd_mem;
4591 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4592 	}
4593 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4594 
4595 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4596 	dcmd->mbox.b[1] = 0;
4597 	dcmd->cmd = MFI_CMD_DCMD;
4598 	dcmd->cmd_status = 0xFF;
4599 	dcmd->sge_count = 1;
4600 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4601 	dcmd->timeout = 0;
4602 	dcmd->pad_0 = 0;
4603 	dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4604 	dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
4605 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
4606 	dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4607 
4608 	if (!sc->mask_interrupts)
4609 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4610 	else
4611 		retcode = mrsas_issue_polled(sc, cmd);
4612 
4613 	if (retcode == ETIMEDOUT)
4614 		goto dcmd_timeout;
4615 
4616 	/* Get the instance PD list */
4617 	pd_count = MRSAS_MAX_PD;
4618 	pd_addr = pd_list_mem->addr;
4619 	if (le32toh(pd_list_mem->count) < pd_count) {
4620 		memset(sc->local_pd_list, 0,
4621 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4622 		for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
4623 			dev_id = le16toh(pd_addr->deviceId);
4624 			sc->local_pd_list[dev_id].tid = dev_id;
4625 			sc->local_pd_list[dev_id].driveType =
4626 			    le16toh(pd_addr->scsiDevType);
4627 			sc->local_pd_list[dev_id].driveState =
4628 			    MR_PD_STATE_SYSTEM;
4629 			if (sc->target_list[dev_id].target_id == 0xffff)
4630 				mrsas_add_target(sc, dev_id);
4631 			pd_addr++;
4632 		}
4633 		for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4634 			if ((sc->local_pd_list[pd_index].driveState !=
4635 				MR_PD_STATE_SYSTEM) &&
4636 				(sc->target_list[pd_index].target_id !=
4637 				0xffff)) {
4638 				mrsas_remove_target(sc, pd_index);
4639 			}
4640 		}
4641 		/*
4642 		 * Use mutext/spinlock if pd_list component size increase more than
4643 		 * 32 bit.
4644 		 */
4645 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4646 		do_ocr = 0;
4647 	}
4648 dcmd_timeout:
4649 	mrsas_free_tmp_dcmd(tcmd);
4650 	free(tcmd, M_MRSAS);
4651 
4652 	if (do_ocr)
4653 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4654 
4655 	if (!sc->mask_interrupts)
4656 		mrsas_release_mfi_cmd(cmd);
4657 
4658 	return (retcode);
4659 }
4660 
4661 /*
4662  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4663  * Adapter soft state
4664  *
4665  * Issues an internal command (DCMD) to get the FW's controller PD list
4666  * structure.  This information is mainly used to find out about supported by
4667  * the FW.
4668  */
4669 static int
4670 mrsas_get_ld_list(struct mrsas_softc *sc)
4671 {
4672 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4673 	u_int8_t do_ocr = 1;
4674 	struct mrsas_mfi_cmd *cmd;
4675 	struct mrsas_dcmd_frame *dcmd;
4676 	struct MR_LD_LIST *ld_list_mem;
4677 	bus_addr_t ld_list_phys_addr = 0;
4678 	struct mrsas_tmp_dcmd *tcmd;
4679 
4680 	cmd = mrsas_get_mfi_cmd(sc);
4681 	if (!cmd) {
4682 		device_printf(sc->mrsas_dev,
4683 		    "Cannot alloc for get LD list cmd\n");
4684 		return 1;
4685 	}
4686 	dcmd = &cmd->frame->dcmd;
4687 
4688 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4689 	ld_list_size = sizeof(struct MR_LD_LIST);
4690 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4691 		device_printf(sc->mrsas_dev,
4692 		    "Cannot alloc dmamap for get LD list cmd\n");
4693 		mrsas_release_mfi_cmd(cmd);
4694 		mrsas_free_tmp_dcmd(tcmd);
4695 		free(tcmd, M_MRSAS);
4696 		return (ENOMEM);
4697 	} else {
4698 		ld_list_mem = tcmd->tmp_dcmd_mem;
4699 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4700 	}
4701 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4702 
4703 	if (sc->max256vdSupport)
4704 		dcmd->mbox.b[0] = 1;
4705 
4706 	dcmd->cmd = MFI_CMD_DCMD;
4707 	dcmd->cmd_status = 0xFF;
4708 	dcmd->sge_count = 1;
4709 	dcmd->flags = MFI_FRAME_DIR_READ;
4710 	dcmd->timeout = 0;
4711 	dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
4712 	dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
4713 	dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
4714 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
4715 	dcmd->pad_0 = 0;
4716 
4717 	if (!sc->mask_interrupts)
4718 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4719 	else
4720 		retcode = mrsas_issue_polled(sc, cmd);
4721 
4722 	if (retcode == ETIMEDOUT)
4723 		goto dcmd_timeout;
4724 
4725 #if VD_EXT_DEBUG
4726 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4727 #endif
4728 
4729 	/* Get the instance LD list */
4730 	if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
4731 		sc->CurLdCount = le32toh(ld_list_mem->ldCount);
4732 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4733 		for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
4734 			ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4735 			drv_tgt_id = ids + MRSAS_MAX_PD;
4736 			if (ld_list_mem->ldList[ld_index].state != 0) {
4737 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4738 				if (sc->target_list[drv_tgt_id].target_id ==
4739 					0xffff)
4740 					mrsas_add_target(sc, drv_tgt_id);
4741 			} else {
4742 				if (sc->target_list[drv_tgt_id].target_id !=
4743 					0xffff)
4744 					mrsas_remove_target(sc,
4745 						drv_tgt_id);
4746 			}
4747 		}
4748 
4749 		do_ocr = 0;
4750 	}
4751 dcmd_timeout:
4752 	mrsas_free_tmp_dcmd(tcmd);
4753 	free(tcmd, M_MRSAS);
4754 
4755 	if (do_ocr)
4756 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4757 	if (!sc->mask_interrupts)
4758 		mrsas_release_mfi_cmd(cmd);
4759 
4760 	return (retcode);
4761 }
4762 
4763 /*
4764  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4765  * Adapter soft state Temp command Size of allocation
4766  *
4767  * Allocates DMAable memory for a temporary internal command. The allocated
4768  * memory is initialized to all zeros upon successful loading of the dma
4769  * mapped memory.
4770  */
4771 int
4772 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4773     struct mrsas_tmp_dcmd *tcmd, int size)
4774 {
4775 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4776 	    1, 0,
4777 	    BUS_SPACE_MAXADDR_32BIT,
4778 	    BUS_SPACE_MAXADDR,
4779 	    NULL, NULL,
4780 	    size,
4781 	    1,
4782 	    size,
4783 	    BUS_DMA_ALLOCNOW,
4784 	    NULL, NULL,
4785 	    &tcmd->tmp_dcmd_tag)) {
4786 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4787 		return (ENOMEM);
4788 	}
4789 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4790 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4791 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4792 		return (ENOMEM);
4793 	}
4794 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4795 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4796 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4797 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4798 		return (ENOMEM);
4799 	}
4800 	memset(tcmd->tmp_dcmd_mem, 0, size);
4801 	return (0);
4802 }
4803 
4804 /*
4805  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4806  * temporary dcmd pointer
4807  *
4808  * Deallocates memory of the temporary command for use in the construction of
4809  * the internal DCMD.
4810  */
4811 void
4812 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4813 {
4814 	if (tmp->tmp_dcmd_phys_addr)
4815 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4816 	if (tmp->tmp_dcmd_mem != NULL)
4817 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4818 	if (tmp->tmp_dcmd_tag != NULL)
4819 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4820 }
4821 
4822 /*
4823  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4824  * Adapter soft state Previously issued cmd to be aborted
4825  *
4826  * This function is used to abort previously issued commands, such as AEN and
4827  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4828  * command and subsequently the driver will wait for a return status.  The
4829  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4830  */
4831 static int
4832 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4833     struct mrsas_mfi_cmd *cmd_to_abort)
4834 {
4835 	struct mrsas_mfi_cmd *cmd;
4836 	struct mrsas_abort_frame *abort_fr;
4837 	u_int8_t retcode = 0;
4838 	unsigned long total_time = 0;
4839 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4840 
4841 	cmd = mrsas_get_mfi_cmd(sc);
4842 	if (!cmd) {
4843 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4844 		return (1);
4845 	}
4846 	abort_fr = &cmd->frame->abort;
4847 
4848 	/* Prepare and issue the abort frame */
4849 	abort_fr->cmd = MFI_CMD_ABORT;
4850 	abort_fr->cmd_status = 0xFF;
4851 	abort_fr->flags = 0;
4852 	abort_fr->abort_context = cmd_to_abort->index;
4853 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4854 	abort_fr->abort_mfi_phys_addr_hi = 0;
4855 
4856 	cmd->sync_cmd = 1;
4857 	cmd->cmd_status = 0xFF;
4858 
4859 	if (mrsas_issue_dcmd(sc, cmd)) {
4860 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4861 		return (1);
4862 	}
4863 	/* Wait for this cmd to complete */
4864 	sc->chan = (void *)&cmd;
4865 	while (1) {
4866 		if (cmd->cmd_status == 0xFF) {
4867 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4868 		} else
4869 			break;
4870 		total_time++;
4871 		if (total_time >= max_wait) {
4872 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4873 			retcode = 1;
4874 			break;
4875 		}
4876 	}
4877 
4878 	cmd->sync_cmd = 0;
4879 	mrsas_release_mfi_cmd(cmd);
4880 	return (retcode);
4881 }
4882 
4883 /*
4884  * mrsas_complete_abort:      Completes aborting a command input:
4885  * Adapter soft state Cmd that was issued to abort another cmd
4886  *
4887  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4888  * change after sending the command.  This function is called from
4889  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4890  */
4891 void
4892 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4893 {
4894 	if (cmd->sync_cmd) {
4895 		cmd->sync_cmd = 0;
4896 		cmd->cmd_status = 0;
4897 		sc->chan = (void *)&cmd;
4898 		wakeup_one((void *)&sc->chan);
4899 	}
4900 	return;
4901 }
4902 
4903 /*
4904  * mrsas_aen_handler:	AEN processing callback function from thread context
4905  * input:				Adapter soft state
4906  *
4907  * Asynchronous event handler
4908  */
4909 void
4910 mrsas_aen_handler(struct mrsas_softc *sc)
4911 {
4912 	union mrsas_evt_class_locale class_locale;
4913 	int doscan = 0;
4914 	u_int32_t seq_num;
4915  	int error, fail_aen = 0;
4916 
4917 	if (sc == NULL) {
4918 		printf("invalid instance!\n");
4919 		return;
4920 	}
4921 	if (sc->remove_in_progress || sc->reset_in_progress) {
4922 		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4923 			__func__, __LINE__);
4924 		return;
4925 	}
4926 	if (sc->evt_detail_mem) {
4927 		switch (sc->evt_detail_mem->code) {
4928 		case MR_EVT_PD_INSERTED:
4929 			fail_aen = mrsas_get_pd_list(sc);
4930 			if (!fail_aen)
4931 				mrsas_bus_scan_sim(sc, sc->sim_1);
4932 			else
4933 				goto skip_register_aen;
4934 			break;
4935 		case MR_EVT_PD_REMOVED:
4936 			fail_aen = mrsas_get_pd_list(sc);
4937 			if (!fail_aen)
4938 				mrsas_bus_scan_sim(sc, sc->sim_1);
4939 			else
4940 				goto skip_register_aen;
4941 			break;
4942 		case MR_EVT_LD_OFFLINE:
4943 		case MR_EVT_CFG_CLEARED:
4944 		case MR_EVT_LD_DELETED:
4945 			mrsas_bus_scan_sim(sc, sc->sim_0);
4946 			break;
4947 		case MR_EVT_LD_CREATED:
4948 			fail_aen = mrsas_get_ld_list(sc);
4949 			if (!fail_aen)
4950 				mrsas_bus_scan_sim(sc, sc->sim_0);
4951 			else
4952 				goto skip_register_aen;
4953 			break;
4954 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4955 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4956 		case MR_EVT_LD_STATE_CHANGE:
4957 			doscan = 1;
4958 			break;
4959 		case MR_EVT_CTRL_PROP_CHANGED:
4960 			fail_aen = mrsas_get_ctrl_info(sc);
4961 			if (fail_aen)
4962 				goto skip_register_aen;
4963 			break;
4964 		default:
4965 			break;
4966 		}
4967 	} else {
4968 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4969 		return;
4970 	}
4971 	if (doscan) {
4972 		fail_aen = mrsas_get_pd_list(sc);
4973 		if (!fail_aen) {
4974 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4975 			mrsas_bus_scan_sim(sc, sc->sim_1);
4976 		} else
4977 			goto skip_register_aen;
4978 
4979 		fail_aen = mrsas_get_ld_list(sc);
4980 		if (!fail_aen) {
4981 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4982 			mrsas_bus_scan_sim(sc, sc->sim_0);
4983 		} else
4984 			goto skip_register_aen;
4985 	}
4986 	seq_num = sc->evt_detail_mem->seq_num + 1;
4987 
4988 	/* Register AEN with FW for latest sequence number plus 1 */
4989 	class_locale.members.reserved = 0;
4990 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4991 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4992 
4993 	if (sc->aen_cmd != NULL)
4994 		return;
4995 
4996 	mtx_lock(&sc->aen_lock);
4997 	error = mrsas_register_aen(sc, seq_num,
4998 	    class_locale.word);
4999 	mtx_unlock(&sc->aen_lock);
5000 
5001 	if (error)
5002 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5003 
5004 skip_register_aen:
5005 	return;
5006 
5007 }
5008 
5009 /*
5010  * mrsas_complete_aen:	Completes AEN command
5011  * input:				Adapter soft state
5012  * 						Cmd that was issued to abort another cmd
5013  *
5014  * This function will be called from ISR and will continue event processing from
5015  * thread context by enqueuing task in ev_tq (callback function
5016  * "mrsas_aen_handler").
5017  */
5018 void
5019 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5020 {
5021 	/*
5022 	 * Don't signal app if it is just an aborted previously registered
5023 	 * aen
5024 	 */
5025 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5026 		sc->mrsas_aen_triggered = 1;
5027 		mtx_lock(&sc->aen_lock);
5028 		if (sc->mrsas_poll_waiting) {
5029 			sc->mrsas_poll_waiting = 0;
5030 			selwakeup(&sc->mrsas_select);
5031 		}
5032 		mtx_unlock(&sc->aen_lock);
5033 	} else
5034 		cmd->abort_aen = 0;
5035 
5036 	sc->aen_cmd = NULL;
5037 	mrsas_release_mfi_cmd(cmd);
5038 
5039 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5040 
5041 	return;
5042 }
5043 
5044 static device_method_t mrsas_methods[] = {
5045 	DEVMETHOD(device_probe, mrsas_probe),
5046 	DEVMETHOD(device_attach, mrsas_attach),
5047 	DEVMETHOD(device_detach, mrsas_detach),
5048 	DEVMETHOD(device_shutdown, mrsas_shutdown),
5049 	DEVMETHOD(device_suspend, mrsas_suspend),
5050 	DEVMETHOD(device_resume, mrsas_resume),
5051 	DEVMETHOD(bus_print_child, bus_generic_print_child),
5052 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5053 	{0, 0}
5054 };
5055 
5056 static driver_t mrsas_driver = {
5057 	"mrsas",
5058 	mrsas_methods,
5059 	sizeof(struct mrsas_softc)
5060 };
5061 
5062 DRIVER_MODULE(mrsas, pci, mrsas_driver, 0, 0);
5063 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
5064