xref: /freebsd/sys/dev/mrsas/mrsas.c (revision 783d3ff6d7fae619db8a7990b8a6387de0c677b5)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 #include <dev/mrsas/mrsas.h>
42 #include <dev/mrsas/mrsas_ioctl.h>
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 
47 #include <sys/sysctl.h>
48 #include <sys/types.h>
49 #include <sys/sysent.h>
50 #include <sys/kthread.h>
51 #include <sys/taskqueue.h>
52 #include <sys/smp.h>
53 #include <sys/endian.h>
54 
55 /*
56  * Function prototypes
57  */
58 static d_open_t mrsas_open;
59 static d_close_t mrsas_close;
60 static d_ioctl_t mrsas_ioctl;
61 static d_poll_t mrsas_poll;
62 
63 static void mrsas_ich_startup(void *arg);
64 static struct mrsas_mgmt_info mrsas_mgmt_info;
65 static struct mrsas_ident *mrsas_find_ident(device_t);
66 static int mrsas_setup_msix(struct mrsas_softc *sc);
67 static int mrsas_allocate_msix(struct mrsas_softc *sc);
68 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
69 static void mrsas_flush_cache(struct mrsas_softc *sc);
70 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
71 static void mrsas_ocr_thread(void *arg);
72 static int mrsas_get_map_info(struct mrsas_softc *sc);
73 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
74 static int mrsas_sync_map_info(struct mrsas_softc *sc);
75 static int mrsas_get_pd_list(struct mrsas_softc *sc);
76 static int mrsas_get_ld_list(struct mrsas_softc *sc);
77 static int mrsas_setup_irq(struct mrsas_softc *sc);
78 static int mrsas_alloc_mem(struct mrsas_softc *sc);
79 static int mrsas_init_fw(struct mrsas_softc *sc);
80 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
81 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
82 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
85 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
86 static int
87 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
88     struct mrsas_mfi_cmd *cmd_to_abort);
89 static void
90 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
91 static struct mrsas_softc *
92 mrsas_get_softc_instance(struct cdev *dev,
93     u_long cmd, caddr_t arg);
94 u_int32_t
95 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
97 u_int8_t
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99     struct mrsas_mfi_cmd *mfi_cmd);
100 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int	mrsas_init_adapter(struct mrsas_softc *sc);
103 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int	mrsas_ioc_init(struct mrsas_softc *sc);
107 int	mrsas_bus_scan(struct mrsas_softc *sc);
108 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
114 int
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116     struct mrsas_mfi_cmd *cmd);
117 int
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119     int size);
120 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void	mrsas_disable_intr(struct mrsas_softc *sc);
125 void	mrsas_enable_intr(struct mrsas_softc *sc);
126 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void	mrsas_free_mem(struct mrsas_softc *sc);
128 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void	mrsas_isr(void *arg);
130 void	mrsas_teardown_intr(struct mrsas_softc *sc);
131 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void	mrsas_kill_hba(struct mrsas_softc *sc);
133 void	mrsas_aen_handler(struct mrsas_softc *sc);
134 void
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
136     u_int32_t value);
137 void
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139     u_int32_t req_desc_hi);
140 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 void
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143     struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
145 
146 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
147         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
148 
149 extern int mrsas_cam_attach(struct mrsas_softc *sc);
150 extern void mrsas_cam_detach(struct mrsas_softc *sc);
151 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
152 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
153 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
154 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
155 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
156 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
157 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
158 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
159 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
160 extern void mrsas_xpt_release(struct mrsas_softc *sc);
161 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
162 mrsas_get_request_desc(struct mrsas_softc *sc,
163     u_int16_t index);
164 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
165 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
166 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
167 void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
168 
169 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
170 	union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
171 	u_int32_t data_length, u_int8_t *sense);
172 void
173 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
174     u_int32_t req_desc_hi);
175 
176 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
177     "MRSAS Driver Parameters");
178 
179 /*
180  * PCI device struct and table
181  *
182  */
183 typedef struct mrsas_ident {
184 	uint16_t vendor;
185 	uint16_t device;
186 	uint16_t subvendor;
187 	uint16_t subdevice;
188 	const char *desc;
189 }	MRSAS_CTLR_ID;
190 
191 MRSAS_CTLR_ID device_table[] = {
192 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
193 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
194 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
195 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
196 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
197 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
198 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
199 	{0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
200 	{0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
201 	{0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
202 	{0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
203 	{0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
204 	{0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
205 	{0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
206 	{0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
207 	{0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
208 	{0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
209 	{0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
210 	{0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
211 	{0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
212 	{0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
213 	{0, 0, 0, 0, NULL}
214 };
215 
216 /*
217  * Character device entry points
218  *
219  */
220 static struct cdevsw mrsas_cdevsw = {
221 	.d_version = D_VERSION,
222 	.d_open = mrsas_open,
223 	.d_close = mrsas_close,
224 	.d_ioctl = mrsas_ioctl,
225 	.d_poll = mrsas_poll,
226 	.d_name = "mrsas",
227 };
228 
229 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
230 
231 int
232 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
233 {
234 
235 	return (0);
236 }
237 
238 int
239 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
240 {
241 
242 	return (0);
243 }
244 
245 u_int32_t
246 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
247 {
248 	u_int32_t i = 0, ret_val;
249 
250 	if (sc->is_aero) {
251 		do {
252 			ret_val = mrsas_read_reg(sc, offset);
253 			i++;
254 		} while(ret_val == 0 && i < 3);
255 	} else
256 		ret_val = mrsas_read_reg(sc, offset);
257 
258 	return ret_val;
259 }
260 
261 /*
262  * Register Read/Write Functions
263  *
264  */
265 void
266 mrsas_write_reg(struct mrsas_softc *sc, int offset,
267     u_int32_t value)
268 {
269 	bus_space_tag_t bus_tag = sc->bus_tag;
270 	bus_space_handle_t bus_handle = sc->bus_handle;
271 
272 	bus_space_write_4(bus_tag, bus_handle, offset, value);
273 }
274 
275 u_int32_t
276 mrsas_read_reg(struct mrsas_softc *sc, int offset)
277 {
278 	bus_space_tag_t bus_tag = sc->bus_tag;
279 	bus_space_handle_t bus_handle = sc->bus_handle;
280 
281 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
282 }
283 
284 /*
285  * Interrupt Disable/Enable/Clear Functions
286  *
287  */
288 void
289 mrsas_disable_intr(struct mrsas_softc *sc)
290 {
291 	u_int32_t mask = 0xFFFFFFFF;
292 
293 	sc->mask_interrupts = 1;
294 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
295 	/* Dummy read to force pci flush */
296 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
297 }
298 
299 void
300 mrsas_enable_intr(struct mrsas_softc *sc)
301 {
302 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
303 
304 	sc->mask_interrupts = 0;
305 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
306 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
307 
308 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
309 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
310 }
311 
312 static int
313 mrsas_clear_intr(struct mrsas_softc *sc)
314 {
315 	u_int32_t status;
316 
317 	/* Read received interrupt */
318 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
319 
320 	/* Not our interrupt, so just return */
321 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
322 		return (0);
323 
324 	/* We got a reply interrupt */
325 	return (1);
326 }
327 
328 /*
329  * PCI Support Functions
330  *
331  */
332 static struct mrsas_ident *
333 mrsas_find_ident(device_t dev)
334 {
335 	struct mrsas_ident *pci_device;
336 
337 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
338 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
339 		    (pci_device->device == pci_get_device(dev)) &&
340 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
341 		    (pci_device->subvendor == 0xffff)) &&
342 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
343 		    (pci_device->subdevice == 0xffff)))
344 			return (pci_device);
345 	}
346 	return (NULL);
347 }
348 
349 static int
350 mrsas_probe(device_t dev)
351 {
352 	static u_int8_t first_ctrl = 1;
353 	struct mrsas_ident *id;
354 
355 	if ((id = mrsas_find_ident(dev)) != NULL) {
356 		if (first_ctrl) {
357 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
358 			    MRSAS_VERSION);
359 			first_ctrl = 0;
360 		}
361 		device_set_desc(dev, id->desc);
362 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
363 		return (-30);
364 	}
365 	return (ENXIO);
366 }
367 
368 /*
369  * mrsas_setup_sysctl:	setup sysctl values for mrsas
370  * input:				Adapter instance soft state
371  *
372  * Setup sysctl entries for mrsas driver.
373  */
374 static void
375 mrsas_setup_sysctl(struct mrsas_softc *sc)
376 {
377 	struct sysctl_ctx_list *sysctl_ctx = NULL;
378 	struct sysctl_oid *sysctl_tree = NULL;
379 	char tmpstr[80], tmpstr2[80];
380 
381 	/*
382 	 * Setup the sysctl variable so the user can change the debug level
383 	 * on the fly.
384 	 */
385 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
386 	    device_get_unit(sc->mrsas_dev));
387 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
388 
389 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
390 	if (sysctl_ctx != NULL)
391 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
392 
393 	if (sysctl_tree == NULL) {
394 		sysctl_ctx_init(&sc->sysctl_ctx);
395 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
396 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
397 		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
398 		if (sc->sysctl_tree == NULL)
399 			return;
400 		sysctl_ctx = &sc->sysctl_ctx;
401 		sysctl_tree = sc->sysctl_tree;
402 	}
403 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
404 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
405 	    "Disable the use of OCR");
406 
407 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
408 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
409 	    strlen(MRSAS_VERSION), "driver version");
410 
411 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
412 	    OID_AUTO, "reset_count", CTLFLAG_RD,
413 	    &sc->reset_count, 0, "number of ocr from start of the day");
414 
415 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
417 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
418 
419 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
421 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
422 
423 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
425 	    "Driver debug level");
426 
427 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
429 	    0, "Driver IO timeout value in mili-second.");
430 
431 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
433 	    &sc->mrsas_fw_fault_check_delay,
434 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
435 
436 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
437 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
438 	    &sc->reset_in_progress, 0, "ocr in progress status");
439 
440 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
441 	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
442 	    &sc->block_sync_cache, 0,
443 	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
444 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
445 	    OID_AUTO, "stream detection", CTLFLAG_RW,
446 		&sc->drv_stream_detection, 0,
447 		"Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
448 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
449 	    OID_AUTO, "prp_count", CTLFLAG_RD,
450 	    &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
451 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
452 	    OID_AUTO, "SGE holes", CTLFLAG_RD,
453 	    &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
454 }
455 
456 /*
457  * mrsas_get_tunables:	get tunable parameters.
458  * input:				Adapter instance soft state
459  *
460  * Get tunable parameters. This will help to debug driver at boot time.
461  */
462 static void
463 mrsas_get_tunables(struct mrsas_softc *sc)
464 {
465 	char tmpstr[80];
466 
467 	/* XXX default to some debugging for now */
468 	sc->mrsas_debug =
469 		(MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
470 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
471 	sc->mrsas_fw_fault_check_delay = 1;
472 	sc->reset_count = 0;
473 	sc->reset_in_progress = 0;
474 	sc->block_sync_cache = 0;
475 	sc->drv_stream_detection = 1;
476 
477 	/*
478 	 * Grab the global variables.
479 	 */
480 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
481 
482 	/*
483 	 * Grab the global variables.
484 	 */
485 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
486 
487 	/* Grab the unit-instance variables */
488 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
489 	    device_get_unit(sc->mrsas_dev));
490 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
491 }
492 
493 /*
494  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
495  * Used to get sequence number at driver load time.
496  * input:		Adapter soft state
497  *
498  * Allocates DMAable memory for the event log info internal command.
499  */
500 int
501 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
502 {
503 	int el_info_size;
504 
505 	/* Allocate get event log info command */
506 	el_info_size = sizeof(struct mrsas_evt_log_info);
507 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
508 	    1, 0,
509 	    BUS_SPACE_MAXADDR_32BIT,
510 	    BUS_SPACE_MAXADDR,
511 	    NULL, NULL,
512 	    el_info_size,
513 	    1,
514 	    el_info_size,
515 	    BUS_DMA_ALLOCNOW,
516 	    NULL, NULL,
517 	    &sc->el_info_tag)) {
518 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
519 		return (ENOMEM);
520 	}
521 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
522 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
523 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
524 		return (ENOMEM);
525 	}
526 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
527 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
528 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
529 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
530 		return (ENOMEM);
531 	}
532 	memset(sc->el_info_mem, 0, el_info_size);
533 	return (0);
534 }
535 
536 /*
537  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
538  * input:					Adapter soft state
539  *
540  * Deallocates memory for the event log info internal command.
541  */
542 void
543 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
544 {
545 	if (sc->el_info_phys_addr)
546 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
547 	if (sc->el_info_mem != NULL)
548 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
549 	if (sc->el_info_tag != NULL)
550 		bus_dma_tag_destroy(sc->el_info_tag);
551 }
552 
553 /*
554  *  mrsas_get_seq_num:	Get latest event sequence number
555  *  @sc:				Adapter soft state
556  *  @eli:				Firmware event log sequence number information.
557  *
558  * Firmware maintains a log of all events in a non-volatile area.
559  * Driver get the sequence number using DCMD
560  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
561  */
562 
563 static int
564 mrsas_get_seq_num(struct mrsas_softc *sc,
565     struct mrsas_evt_log_info *eli)
566 {
567 	struct mrsas_mfi_cmd *cmd;
568 	struct mrsas_dcmd_frame *dcmd;
569 	u_int8_t do_ocr = 1, retcode = 0;
570 
571 	cmd = mrsas_get_mfi_cmd(sc);
572 
573 	if (!cmd) {
574 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
575 		return -ENOMEM;
576 	}
577 	dcmd = &cmd->frame->dcmd;
578 
579 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
580 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
581 		mrsas_release_mfi_cmd(cmd);
582 		return -ENOMEM;
583 	}
584 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
585 
586 	dcmd->cmd = MFI_CMD_DCMD;
587 	dcmd->cmd_status = 0x0;
588 	dcmd->sge_count = 1;
589 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
590 	dcmd->timeout = 0;
591 	dcmd->pad_0 = 0;
592 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
593 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
594 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
595 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
596 
597 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
598 	if (retcode == ETIMEDOUT)
599 		goto dcmd_timeout;
600 
601 	do_ocr = 0;
602 	/*
603 	 * Copy the data back into callers buffer
604 	 */
605 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
606 	mrsas_free_evt_log_info_cmd(sc);
607 
608 dcmd_timeout:
609 	if (do_ocr)
610 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
611 	else
612 		mrsas_release_mfi_cmd(cmd);
613 
614 	return retcode;
615 }
616 
617 /*
618  *  mrsas_register_aen:		Register for asynchronous event notification
619  *  @sc:			Adapter soft state
620  *  @seq_num:			Starting sequence number
621  *  @class_locale:		Class of the event
622  *
623  *  This function subscribes for events beyond the @seq_num
624  *  and type @class_locale.
625  *
626  */
627 static int
628 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
629     u_int32_t class_locale_word)
630 {
631 	int ret_val;
632 	struct mrsas_mfi_cmd *cmd;
633 	struct mrsas_dcmd_frame *dcmd;
634 	union mrsas_evt_class_locale curr_aen;
635 	union mrsas_evt_class_locale prev_aen;
636 
637 	/*
638 	 * If there an AEN pending already (aen_cmd), check if the
639 	 * class_locale of that pending AEN is inclusive of the new AEN
640 	 * request we currently have. If it is, then we don't have to do
641 	 * anything. In other words, whichever events the current AEN request
642 	 * is subscribing to, have already been subscribed to. If the old_cmd
643 	 * is _not_ inclusive, then we have to abort that command, form a
644 	 * class_locale that is superset of both old and current and re-issue
645 	 * to the FW
646 	 */
647 
648 	curr_aen.word = class_locale_word;
649 
650 	if (sc->aen_cmd) {
651 		prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
652 
653 		/*
654 		 * A class whose enum value is smaller is inclusive of all
655 		 * higher values. If a PROGRESS (= -1) was previously
656 		 * registered, then a new registration requests for higher
657 		 * classes need not be sent to FW. They are automatically
658 		 * included. Locale numbers don't have such hierarchy. They
659 		 * are bitmap values
660 		 */
661 		if ((prev_aen.members.class <= curr_aen.members.class) &&
662 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
663 		    curr_aen.members.locale)) {
664 			/*
665 			 * Previously issued event registration includes
666 			 * current request. Nothing to do.
667 			 */
668 			return 0;
669 		} else {
670 			curr_aen.members.locale |= prev_aen.members.locale;
671 
672 			if (prev_aen.members.class < curr_aen.members.class)
673 				curr_aen.members.class = prev_aen.members.class;
674 
675 			sc->aen_cmd->abort_aen = 1;
676 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
677 			    sc->aen_cmd);
678 
679 			if (ret_val) {
680 				printf("mrsas: Failed to abort previous AEN command\n");
681 				return ret_val;
682 			} else
683 				sc->aen_cmd = NULL;
684 		}
685 	}
686 	cmd = mrsas_get_mfi_cmd(sc);
687 	if (!cmd)
688 		return ENOMEM;
689 
690 	dcmd = &cmd->frame->dcmd;
691 
692 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
693 
694 	/*
695 	 * Prepare DCMD for aen registration
696 	 */
697 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
698 
699 	dcmd->cmd = MFI_CMD_DCMD;
700 	dcmd->cmd_status = 0x0;
701 	dcmd->sge_count = 1;
702 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
703 	dcmd->timeout = 0;
704 	dcmd->pad_0 = 0;
705 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
706 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
707 	dcmd->mbox.w[0] = htole32(seq_num);
708 	sc->last_seq_num = seq_num;
709 	dcmd->mbox.w[1] = htole32(curr_aen.word);
710 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
711 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
712 
713 	if (sc->aen_cmd != NULL) {
714 		mrsas_release_mfi_cmd(cmd);
715 		return 0;
716 	}
717 	/*
718 	 * Store reference to the cmd used to register for AEN. When an
719 	 * application wants us to register for AEN, we have to abort this
720 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
721 	 */
722 	sc->aen_cmd = cmd;
723 
724 	/*
725 	 * Issue the aen registration frame
726 	 */
727 	if (mrsas_issue_dcmd(sc, cmd)) {
728 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
729 		return (1);
730 	}
731 	return 0;
732 }
733 
734 /*
735  * mrsas_start_aen:	Subscribes to AEN during driver load time
736  * @instance:		Adapter soft state
737  */
738 static int
739 mrsas_start_aen(struct mrsas_softc *sc)
740 {
741 	struct mrsas_evt_log_info eli;
742 	union mrsas_evt_class_locale class_locale;
743 
744 	/* Get the latest sequence number from FW */
745 
746 	memset(&eli, 0, sizeof(eli));
747 
748 	if (mrsas_get_seq_num(sc, &eli))
749 		return -1;
750 
751 	/* Register AEN with FW for latest sequence number plus 1 */
752 	class_locale.members.reserved = 0;
753 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
754 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
755 
756 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
757 	    class_locale.word);
758 
759 }
760 
761 /*
762  * mrsas_setup_msix:	Allocate MSI-x vectors
763  * @sc:					adapter soft state
764  */
765 static int
766 mrsas_setup_msix(struct mrsas_softc *sc)
767 {
768 	int i;
769 
770 	for (i = 0; i < sc->msix_vectors; i++) {
771 		sc->irq_context[i].sc = sc;
772 		sc->irq_context[i].MSIxIndex = i;
773 		sc->irq_id[i] = i + 1;
774 		sc->mrsas_irq[i] = bus_alloc_resource_any
775 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
776 		    ,RF_ACTIVE);
777 		if (sc->mrsas_irq[i] == NULL) {
778 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
779 			goto irq_alloc_failed;
780 		}
781 		if (bus_setup_intr(sc->mrsas_dev,
782 		    sc->mrsas_irq[i],
783 		    INTR_MPSAFE | INTR_TYPE_CAM,
784 		    NULL, mrsas_isr, &sc->irq_context[i],
785 		    &sc->intr_handle[i])) {
786 			device_printf(sc->mrsas_dev,
787 			    "Cannot set up MSI-x interrupt handler\n");
788 			goto irq_alloc_failed;
789 		}
790 	}
791 	return SUCCESS;
792 
793 irq_alloc_failed:
794 	mrsas_teardown_intr(sc);
795 	return (FAIL);
796 }
797 
798 /*
799  * mrsas_allocate_msix:		Setup MSI-x vectors
800  * @sc:						adapter soft state
801  */
802 static int
803 mrsas_allocate_msix(struct mrsas_softc *sc)
804 {
805 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
806 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
807 		    " of vectors\n", sc->msix_vectors);
808 	} else {
809 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
810 		goto irq_alloc_failed;
811 	}
812 	return SUCCESS;
813 
814 irq_alloc_failed:
815 	mrsas_teardown_intr(sc);
816 	return (FAIL);
817 }
818 
819 /*
820  * mrsas_attach:	PCI entry point
821  * input:			pointer to device struct
822  *
823  * Performs setup of PCI and registers, initializes mutexes and linked lists,
824  * registers interrupts and CAM, and initializes   the adapter/controller to
825  * its proper state.
826  */
827 static int
828 mrsas_attach(device_t dev)
829 {
830 	struct mrsas_softc *sc = device_get_softc(dev);
831 	uint32_t cmd, error;
832 
833 	memset(sc, 0, sizeof(struct mrsas_softc));
834 
835 	/* Look up our softc and initialize its fields. */
836 	sc->mrsas_dev = dev;
837 	sc->device_id = pci_get_device(dev);
838 
839 	switch (sc->device_id) {
840 	case MRSAS_INVADER:
841 	case MRSAS_FURY:
842 	case MRSAS_INTRUDER:
843 	case MRSAS_INTRUDER_24:
844 	case MRSAS_CUTLASS_52:
845 	case MRSAS_CUTLASS_53:
846 		sc->mrsas_gen3_ctrl = 1;
847 		break;
848 	case MRSAS_VENTURA:
849 	case MRSAS_CRUSADER:
850 	case MRSAS_HARPOON:
851 	case MRSAS_TOMCAT:
852 	case MRSAS_VENTURA_4PORT:
853 	case MRSAS_CRUSADER_4PORT:
854 		sc->is_ventura = true;
855 		break;
856 	case MRSAS_AERO_10E1:
857 	case MRSAS_AERO_10E5:
858 		device_printf(dev, "Adapter is in configurable secure mode\n");
859 	case MRSAS_AERO_10E2:
860 	case MRSAS_AERO_10E6:
861 		sc->is_aero = true;
862 		break;
863 	case MRSAS_AERO_10E0:
864 	case MRSAS_AERO_10E3:
865 	case MRSAS_AERO_10E4:
866 	case MRSAS_AERO_10E7:
867 		device_printf(dev, "Adapter is in non-secure mode\n");
868 		return SUCCESS;
869 	}
870 
871 	mrsas_get_tunables(sc);
872 
873 	/*
874 	 * Set up PCI and registers
875 	 */
876 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
877 	/* Force the busmaster enable bit on. */
878 	cmd |= PCIM_CMD_BUSMASTEREN;
879 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
880 
881 	/* For Ventura/Aero system registers are mapped to BAR0 */
882 	if (sc->is_ventura || sc->is_aero)
883 		sc->reg_res_id = PCIR_BAR(0);	/* BAR0 offset */
884 	else
885 		sc->reg_res_id = PCIR_BAR(1);	/* BAR1 offset */
886 
887 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
888 	    &(sc->reg_res_id), RF_ACTIVE))
889 	    == NULL) {
890 		device_printf(dev, "Cannot allocate PCI registers\n");
891 		goto attach_fail;
892 	}
893 	sc->bus_tag = rman_get_bustag(sc->reg_res);
894 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
895 
896 	/* Intialize mutexes */
897 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
898 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
899 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
900 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
901 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
902 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
903 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
904 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
905 	mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
906 
907 	/* Intialize linked list */
908 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
909 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
910 
911 	mrsas_atomic_set(&sc->fw_outstanding, 0);
912 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
913 	mrsas_atomic_set(&sc->prp_count, 0);
914 	mrsas_atomic_set(&sc->sge_holes, 0);
915 
916 	sc->io_cmds_highwater = 0;
917 
918 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
919 	sc->UnevenSpanSupport = 0;
920 
921 	sc->msix_enable = 0;
922 
923 	/* Initialize Firmware */
924 	if (mrsas_init_fw(sc) != SUCCESS) {
925 		goto attach_fail_fw;
926 	}
927 	/* Register mrsas to CAM layer */
928 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
929 		goto attach_fail_cam;
930 	}
931 	/* Register IRQs */
932 	if (mrsas_setup_irq(sc) != SUCCESS) {
933 		goto attach_fail_irq;
934 	}
935 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
936 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
937 	    device_get_unit(sc->mrsas_dev));
938 	if (error) {
939 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
940 		goto attach_fail_ocr_thread;
941 	}
942 	/*
943 	 * After FW initialization and OCR thread creation
944 	 * we will defer the cdev creation, AEN setup on ICH callback
945 	 */
946 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
947 	sc->mrsas_ich.ich_arg = sc;
948 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
949 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
950 	}
951 	mrsas_setup_sysctl(sc);
952 	return SUCCESS;
953 
954 attach_fail_ocr_thread:
955 	if (sc->ocr_thread_active)
956 		wakeup(&sc->ocr_chan);
957 attach_fail_irq:
958 	mrsas_teardown_intr(sc);
959 attach_fail_cam:
960 	mrsas_cam_detach(sc);
961 attach_fail_fw:
962 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
963 	if (sc->msix_enable == 1)
964 		pci_release_msi(sc->mrsas_dev);
965 	mrsas_free_mem(sc);
966 	mtx_destroy(&sc->sim_lock);
967 	mtx_destroy(&sc->aen_lock);
968 	mtx_destroy(&sc->pci_lock);
969 	mtx_destroy(&sc->io_lock);
970 	mtx_destroy(&sc->ioctl_lock);
971 	mtx_destroy(&sc->mpt_cmd_pool_lock);
972 	mtx_destroy(&sc->mfi_cmd_pool_lock);
973 	mtx_destroy(&sc->raidmap_lock);
974 	mtx_destroy(&sc->stream_lock);
975 attach_fail:
976 	if (sc->reg_res) {
977 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
978 		    sc->reg_res_id, sc->reg_res);
979 	}
980 	return (ENXIO);
981 }
982 
983 /*
984  * Interrupt config hook
985  */
986 static void
987 mrsas_ich_startup(void *arg)
988 {
989 	int i = 0;
990 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
991 
992 	/*
993 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
994 	 */
995 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
996 	    IOCTL_SEMA_DESCRIPTION);
997 
998 	/* Create a /dev entry for mrsas controller. */
999 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
1000 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
1001 	    device_get_unit(sc->mrsas_dev));
1002 
1003 	if (device_get_unit(sc->mrsas_dev) == 0) {
1004 		make_dev_alias_p(MAKEDEV_CHECKNAME,
1005 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1006 		    "megaraid_sas_ioctl_node");
1007 	}
1008 	if (sc->mrsas_cdev)
1009 		sc->mrsas_cdev->si_drv1 = sc;
1010 
1011 	/*
1012 	 * Add this controller to mrsas_mgmt_info structure so that it can be
1013 	 * exported to management applications
1014 	 */
1015 	if (device_get_unit(sc->mrsas_dev) == 0)
1016 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1017 
1018 	mrsas_mgmt_info.count++;
1019 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1020 	mrsas_mgmt_info.max_index++;
1021 
1022 	/* Enable Interrupts */
1023 	mrsas_enable_intr(sc);
1024 
1025 	/* Call DCMD get_pd_info for all system PDs */
1026 	for (i = 0; i < MRSAS_MAX_PD; i++) {
1027 		if ((sc->target_list[i].target_id != 0xffff) &&
1028 			sc->pd_info_mem)
1029 			mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1030 	}
1031 
1032 	/* Initiate AEN (Asynchronous Event Notification) */
1033 	if (mrsas_start_aen(sc)) {
1034 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1035 		    "Further events from the controller will not be communicated.\n"
1036 		    "Either there is some problem in the controller"
1037 		    "or the controller does not support AEN.\n"
1038 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
1039 	}
1040 	if (sc->mrsas_ich.ich_arg != NULL) {
1041 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1042 		config_intrhook_disestablish(&sc->mrsas_ich);
1043 		sc->mrsas_ich.ich_arg = NULL;
1044 	}
1045 }
1046 
1047 /*
1048  * mrsas_detach:	De-allocates and teardown resources
1049  * input:			pointer to device struct
1050  *
1051  * This function is the entry point for device disconnect and detach.
1052  * It performs memory de-allocations, shutdown of the controller and various
1053  * teardown and destroy resource functions.
1054  */
1055 static int
1056 mrsas_detach(device_t dev)
1057 {
1058 	struct mrsas_softc *sc;
1059 	int i = 0;
1060 
1061 	sc = device_get_softc(dev);
1062 	sc->remove_in_progress = 1;
1063 
1064 	/* Destroy the character device so no other IOCTL will be handled */
1065 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1066 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1067 	destroy_dev(sc->mrsas_cdev);
1068 
1069 	/*
1070 	 * Take the instance off the instance array. Note that we will not
1071 	 * decrement the max_index. We let this array be sparse array
1072 	 */
1073 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1074 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1075 			mrsas_mgmt_info.count--;
1076 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1077 			break;
1078 		}
1079 	}
1080 
1081 	if (sc->ocr_thread_active)
1082 		wakeup(&sc->ocr_chan);
1083 	while (sc->reset_in_progress) {
1084 		i++;
1085 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1086 			mrsas_dprint(sc, MRSAS_INFO,
1087 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1088 		}
1089 		pause("mr_shutdown", hz);
1090 	}
1091 	i = 0;
1092 	while (sc->ocr_thread_active) {
1093 		i++;
1094 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1095 			mrsas_dprint(sc, MRSAS_INFO,
1096 			    "[%2d]waiting for "
1097 			    "mrsas_ocr thread to quit ocr %d\n", i,
1098 			    sc->ocr_thread_active);
1099 		}
1100 		pause("mr_shutdown", hz);
1101 	}
1102 	mrsas_flush_cache(sc);
1103 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1104 	mrsas_disable_intr(sc);
1105 
1106 	if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1107 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1108 			free(sc->streamDetectByLD[i], M_MRSAS);
1109 		free(sc->streamDetectByLD, M_MRSAS);
1110 		sc->streamDetectByLD = NULL;
1111 	}
1112 
1113 	mrsas_cam_detach(sc);
1114 	mrsas_teardown_intr(sc);
1115 	mrsas_free_mem(sc);
1116 	mtx_destroy(&sc->sim_lock);
1117 	mtx_destroy(&sc->aen_lock);
1118 	mtx_destroy(&sc->pci_lock);
1119 	mtx_destroy(&sc->io_lock);
1120 	mtx_destroy(&sc->ioctl_lock);
1121 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1122 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1123 	mtx_destroy(&sc->raidmap_lock);
1124 	mtx_destroy(&sc->stream_lock);
1125 
1126 	/* Wait for all the semaphores to be released */
1127 	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1128 		pause("mr_shutdown", hz);
1129 
1130 	/* Destroy the counting semaphore created for Ioctl */
1131 	sema_destroy(&sc->ioctl_count_sema);
1132 
1133 	if (sc->reg_res) {
1134 		bus_release_resource(sc->mrsas_dev,
1135 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1136 	}
1137 	if (sc->sysctl_tree != NULL)
1138 		sysctl_ctx_free(&sc->sysctl_ctx);
1139 
1140 	return (0);
1141 }
1142 
1143 static int
1144 mrsas_shutdown(device_t dev)
1145 {
1146 	struct mrsas_softc *sc;
1147 	int i;
1148 
1149 	sc = device_get_softc(dev);
1150 	sc->remove_in_progress = 1;
1151 	if (!KERNEL_PANICKED()) {
1152 		if (sc->ocr_thread_active)
1153 			wakeup(&sc->ocr_chan);
1154 		i = 0;
1155 		while (sc->reset_in_progress && i < 15) {
1156 			i++;
1157 			if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1158 				mrsas_dprint(sc, MRSAS_INFO,
1159 				    "[%2d]waiting for OCR to be finished "
1160 				    "from %s\n", i, __func__);
1161 			}
1162 			pause("mr_shutdown", hz);
1163 		}
1164 		if (sc->reset_in_progress) {
1165 			mrsas_dprint(sc, MRSAS_INFO,
1166 			    "gave up waiting for OCR to be finished\n");
1167 			return (0);
1168 		}
1169 	}
1170 
1171 	mrsas_flush_cache(sc);
1172 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1173 	mrsas_disable_intr(sc);
1174 	return (0);
1175 }
1176 
1177 /*
1178  * mrsas_free_mem:		Frees allocated memory
1179  * input:				Adapter instance soft state
1180  *
1181  * This function is called from mrsas_detach() to free previously allocated
1182  * memory.
1183  */
1184 void
1185 mrsas_free_mem(struct mrsas_softc *sc)
1186 {
1187 	int i;
1188 	u_int32_t max_fw_cmds;
1189 	struct mrsas_mfi_cmd *mfi_cmd;
1190 	struct mrsas_mpt_cmd *mpt_cmd;
1191 
1192 	/*
1193 	 * Free RAID map memory
1194 	 */
1195 	for (i = 0; i < 2; i++) {
1196 		if (sc->raidmap_phys_addr[i])
1197 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1198 		if (sc->raidmap_mem[i] != NULL)
1199 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1200 		if (sc->raidmap_tag[i] != NULL)
1201 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1202 
1203 		if (sc->ld_drv_map[i] != NULL)
1204 			free(sc->ld_drv_map[i], M_MRSAS);
1205 	}
1206 	for (i = 0; i < 2; i++) {
1207 		if (sc->jbodmap_phys_addr[i])
1208 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1209 		if (sc->jbodmap_mem[i] != NULL)
1210 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1211 		if (sc->jbodmap_tag[i] != NULL)
1212 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1213 	}
1214 	/*
1215 	 * Free version buffer memory
1216 	 */
1217 	if (sc->verbuf_phys_addr)
1218 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1219 	if (sc->verbuf_mem != NULL)
1220 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1221 	if (sc->verbuf_tag != NULL)
1222 		bus_dma_tag_destroy(sc->verbuf_tag);
1223 
1224 	/*
1225 	 * Free sense buffer memory
1226 	 */
1227 	if (sc->sense_phys_addr)
1228 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1229 	if (sc->sense_mem != NULL)
1230 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1231 	if (sc->sense_tag != NULL)
1232 		bus_dma_tag_destroy(sc->sense_tag);
1233 
1234 	/*
1235 	 * Free chain frame memory
1236 	 */
1237 	if (sc->chain_frame_phys_addr)
1238 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1239 	if (sc->chain_frame_mem != NULL)
1240 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1241 	if (sc->chain_frame_tag != NULL)
1242 		bus_dma_tag_destroy(sc->chain_frame_tag);
1243 
1244 	/*
1245 	 * Free IO Request memory
1246 	 */
1247 	if (sc->io_request_phys_addr)
1248 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1249 	if (sc->io_request_mem != NULL)
1250 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1251 	if (sc->io_request_tag != NULL)
1252 		bus_dma_tag_destroy(sc->io_request_tag);
1253 
1254 	/*
1255 	 * Free Reply Descriptor memory
1256 	 */
1257 	if (sc->reply_desc_phys_addr)
1258 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1259 	if (sc->reply_desc_mem != NULL)
1260 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1261 	if (sc->reply_desc_tag != NULL)
1262 		bus_dma_tag_destroy(sc->reply_desc_tag);
1263 
1264 	/*
1265 	 * Free event detail memory
1266 	 */
1267 	if (sc->evt_detail_phys_addr)
1268 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1269 	if (sc->evt_detail_mem != NULL)
1270 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1271 	if (sc->evt_detail_tag != NULL)
1272 		bus_dma_tag_destroy(sc->evt_detail_tag);
1273 
1274 	/*
1275 	 * Free PD info memory
1276 	 */
1277 	if (sc->pd_info_phys_addr)
1278 		bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1279 	if (sc->pd_info_mem != NULL)
1280 		bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1281 	if (sc->pd_info_tag != NULL)
1282 		bus_dma_tag_destroy(sc->pd_info_tag);
1283 
1284 	/*
1285 	 * Free MFI frames
1286 	 */
1287 	if (sc->mfi_cmd_list) {
1288 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1289 			mfi_cmd = sc->mfi_cmd_list[i];
1290 			mrsas_free_frame(sc, mfi_cmd);
1291 		}
1292 	}
1293 	if (sc->mficmd_frame_tag != NULL)
1294 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1295 
1296 	/*
1297 	 * Free MPT internal command list
1298 	 */
1299 	max_fw_cmds = sc->max_fw_cmds;
1300 	if (sc->mpt_cmd_list) {
1301 		for (i = 0; i < max_fw_cmds; i++) {
1302 			mpt_cmd = sc->mpt_cmd_list[i];
1303 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1304 			free(sc->mpt_cmd_list[i], M_MRSAS);
1305 		}
1306 		free(sc->mpt_cmd_list, M_MRSAS);
1307 		sc->mpt_cmd_list = NULL;
1308 	}
1309 	/*
1310 	 * Free MFI internal command list
1311 	 */
1312 
1313 	if (sc->mfi_cmd_list) {
1314 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1315 			free(sc->mfi_cmd_list[i], M_MRSAS);
1316 		}
1317 		free(sc->mfi_cmd_list, M_MRSAS);
1318 		sc->mfi_cmd_list = NULL;
1319 	}
1320 	/*
1321 	 * Free request descriptor memory
1322 	 */
1323 	free(sc->req_desc, M_MRSAS);
1324 	sc->req_desc = NULL;
1325 
1326 	/*
1327 	 * Destroy parent tag
1328 	 */
1329 	if (sc->mrsas_parent_tag != NULL)
1330 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1331 
1332 	/*
1333 	 * Free ctrl_info memory
1334 	 */
1335 	if (sc->ctrl_info != NULL)
1336 		free(sc->ctrl_info, M_MRSAS);
1337 }
1338 
1339 /*
1340  * mrsas_teardown_intr:	Teardown interrupt
1341  * input:				Adapter instance soft state
1342  *
1343  * This function is called from mrsas_detach() to teardown and release bus
1344  * interrupt resourse.
1345  */
1346 void
1347 mrsas_teardown_intr(struct mrsas_softc *sc)
1348 {
1349 	int i;
1350 
1351 	if (!sc->msix_enable) {
1352 		if (sc->intr_handle[0])
1353 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1354 		if (sc->mrsas_irq[0] != NULL)
1355 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1356 			    sc->irq_id[0], sc->mrsas_irq[0]);
1357 		sc->intr_handle[0] = NULL;
1358 	} else {
1359 		for (i = 0; i < sc->msix_vectors; i++) {
1360 			if (sc->intr_handle[i])
1361 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1362 				    sc->intr_handle[i]);
1363 
1364 			if (sc->mrsas_irq[i] != NULL)
1365 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1366 				    sc->irq_id[i], sc->mrsas_irq[i]);
1367 
1368 			sc->intr_handle[i] = NULL;
1369 		}
1370 		pci_release_msi(sc->mrsas_dev);
1371 	}
1372 
1373 }
1374 
1375 /*
1376  * mrsas_suspend:	Suspend entry point
1377  * input:			Device struct pointer
1378  *
1379  * This function is the entry point for system suspend from the OS.
1380  */
1381 static int
1382 mrsas_suspend(device_t dev)
1383 {
1384 	/* This will be filled when the driver will have hibernation support */
1385 	return (0);
1386 }
1387 
1388 /*
1389  * mrsas_resume:	Resume entry point
1390  * input:			Device struct pointer
1391  *
1392  * This function is the entry point for system resume from the OS.
1393  */
1394 static int
1395 mrsas_resume(device_t dev)
1396 {
1397 	/* This will be filled when the driver will have hibernation support */
1398 	return (0);
1399 }
1400 
1401 /**
1402  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1403  *
1404  * This function will return softc instance based on cmd type.
1405  * In some case, application fire ioctl on required management instance and
1406  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1407  * case, else get the softc instance from host_no provided by application in
1408  * user data.
1409  */
1410 
1411 static struct mrsas_softc *
1412 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1413 {
1414 	struct mrsas_softc *sc = NULL;
1415 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1416 
1417 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1418 		sc = dev->si_drv1;
1419 	} else {
1420 		/*
1421 		 * get the Host number & the softc from data sent by the
1422 		 * Application
1423 		 */
1424 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1425 		if (sc == NULL)
1426 			printf("There is no Controller number %d\n",
1427 			    user_ioc->host_no);
1428 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1429 			mrsas_dprint(sc, MRSAS_FAULT,
1430 			    "Invalid Controller number %d\n", user_ioc->host_no);
1431 	}
1432 
1433 	return sc;
1434 }
1435 
1436 /*
1437  * mrsas_ioctl:	IOCtl commands entry point.
1438  *
1439  * This function is the entry point for IOCtls from the OS.  It calls the
1440  * appropriate function for processing depending on the command received.
1441  */
1442 static int
1443 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1444     struct thread *td)
1445 {
1446 	struct mrsas_softc *sc;
1447 	int ret = 0, i = 0;
1448 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1449 
1450 	switch (cmd) {
1451 	case MFIIO_PASSTHRU:
1452                 sc = (struct mrsas_softc *)(dev->si_drv1);
1453 		break;
1454 	default:
1455 		sc = mrsas_get_softc_instance(dev, cmd, arg);
1456 		break;
1457         }
1458 	if (!sc)
1459 		return ENOENT;
1460 
1461 	if (sc->remove_in_progress ||
1462 		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1463 		mrsas_dprint(sc, MRSAS_INFO,
1464 		    "Either driver remove or shutdown called or "
1465 			"HW is in unrecoverable critical error state.\n");
1466 		return ENOENT;
1467 	}
1468 	mtx_lock_spin(&sc->ioctl_lock);
1469 	if (!sc->reset_in_progress) {
1470 		mtx_unlock_spin(&sc->ioctl_lock);
1471 		goto do_ioctl;
1472 	}
1473 	mtx_unlock_spin(&sc->ioctl_lock);
1474 	while (sc->reset_in_progress) {
1475 		i++;
1476 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1477 			mrsas_dprint(sc, MRSAS_INFO,
1478 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1479 		}
1480 		pause("mr_ioctl", hz);
1481 	}
1482 
1483 do_ioctl:
1484 	switch (cmd) {
1485 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1486 #ifdef COMPAT_FREEBSD32
1487 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1488 #endif
1489 		/*
1490 		 * Decrement the Ioctl counting Semaphore before getting an
1491 		 * mfi command
1492 		 */
1493 		sema_wait(&sc->ioctl_count_sema);
1494 
1495 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1496 
1497 		/* Increment the Ioctl counting semaphore value */
1498 		sema_post(&sc->ioctl_count_sema);
1499 
1500 		break;
1501 	case MRSAS_IOC_SCAN_BUS:
1502 		ret = mrsas_bus_scan(sc);
1503 		break;
1504 
1505 	case MRSAS_IOC_GET_PCI_INFO:
1506 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1507 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1508 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1509 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1510 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1511 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1512 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1513 		    "pci device no: %d, pci function no: %d,"
1514 		    "pci domain ID: %d\n",
1515 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1516 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1517 		ret = 0;
1518 		break;
1519 
1520 	case MFIIO_PASSTHRU:
1521 		ret = mrsas_user_command(sc, (struct mfi_ioc_passthru *)arg);
1522 		break;
1523 
1524 	default:
1525 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1526 		ret = ENOENT;
1527 	}
1528 
1529 	return (ret);
1530 }
1531 
1532 /*
1533  * mrsas_poll:	poll entry point for mrsas driver fd
1534  *
1535  * This function is the entry point for poll from the OS.  It waits for some AEN
1536  * events to be triggered from the controller and notifies back.
1537  */
1538 static int
1539 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1540 {
1541 	struct mrsas_softc *sc;
1542 	int revents = 0;
1543 
1544 	sc = dev->si_drv1;
1545 
1546 	if (poll_events & (POLLIN | POLLRDNORM)) {
1547 		if (sc->mrsas_aen_triggered) {
1548 			revents |= poll_events & (POLLIN | POLLRDNORM);
1549 		}
1550 	}
1551 	if (revents == 0) {
1552 		if (poll_events & (POLLIN | POLLRDNORM)) {
1553 			mtx_lock(&sc->aen_lock);
1554 			sc->mrsas_poll_waiting = 1;
1555 			selrecord(td, &sc->mrsas_select);
1556 			mtx_unlock(&sc->aen_lock);
1557 		}
1558 	}
1559 	return revents;
1560 }
1561 
1562 /*
1563  * mrsas_setup_irq:	Set up interrupt
1564  * input:			Adapter instance soft state
1565  *
1566  * This function sets up interrupts as a bus resource, with flags indicating
1567  * resource permitting contemporaneous sharing and for resource to activate
1568  * atomically.
1569  */
1570 static int
1571 mrsas_setup_irq(struct mrsas_softc *sc)
1572 {
1573 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1574 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1575 
1576 	else {
1577 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1578 		sc->irq_context[0].sc = sc;
1579 		sc->irq_context[0].MSIxIndex = 0;
1580 		sc->irq_id[0] = 0;
1581 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1582 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1583 		if (sc->mrsas_irq[0] == NULL) {
1584 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1585 			    "interrupt\n");
1586 			return (FAIL);
1587 		}
1588 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1589 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1590 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1591 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1592 			    "interrupt\n");
1593 			return (FAIL);
1594 		}
1595 	}
1596 	return (0);
1597 }
1598 
1599 /*
1600  * mrsas_isr:	ISR entry point
1601  * input:		argument pointer
1602  *
1603  * This function is the interrupt service routine entry point.  There are two
1604  * types of interrupts, state change interrupt and response interrupt.  If an
1605  * interrupt is not ours, we just return.
1606  */
1607 void
1608 mrsas_isr(void *arg)
1609 {
1610 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1611 	struct mrsas_softc *sc = irq_context->sc;
1612 	int status = 0;
1613 
1614 	if (sc->mask_interrupts)
1615 		return;
1616 
1617 	if (!sc->msix_vectors) {
1618 		status = mrsas_clear_intr(sc);
1619 		if (!status)
1620 			return;
1621 	}
1622 	/* If we are resetting, bail */
1623 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1624 		printf(" Entered into ISR when OCR is going active. \n");
1625 		mrsas_clear_intr(sc);
1626 		return;
1627 	}
1628 	/* Process for reply request and clear response interrupt */
1629 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1630 		mrsas_clear_intr(sc);
1631 
1632 	return;
1633 }
1634 
1635 /*
1636  * mrsas_complete_cmd:	Process reply request
1637  * input:				Adapter instance soft state
1638  *
1639  * This function is called from mrsas_isr() to process reply request and clear
1640  * response interrupt. Processing of the reply request entails walking
1641  * through the reply descriptor array for the command request  pended from
1642  * Firmware.  We look at the Function field to determine the command type and
1643  * perform the appropriate action.  Before we return, we clear the response
1644  * interrupt.
1645  */
1646 int
1647 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1648 {
1649 	Mpi2ReplyDescriptorsUnion_t *desc;
1650 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1651 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1652 	struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1653 	struct mrsas_mfi_cmd *cmd_mfi;
1654 	u_int8_t reply_descript_type, *sense;
1655 	u_int16_t smid, num_completed;
1656 	u_int8_t status, extStatus;
1657 	union desc_value desc_val;
1658 	PLD_LOAD_BALANCE_INFO lbinfo;
1659 	u_int32_t device_id, data_length;
1660 	int threshold_reply_count = 0;
1661 #if TM_DEBUG
1662 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1663 	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1664 #endif
1665 
1666 	/* If we have a hardware error, not need to continue */
1667 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1668 		return (DONE);
1669 
1670 	desc = sc->reply_desc_mem;
1671 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1672 	    + sc->last_reply_idx[MSIxIndex];
1673 
1674 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1675 
1676 	desc_val.word = desc->Words;
1677 	num_completed = 0;
1678 
1679 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1680 
1681 	/* Find our reply descriptor for the command and process */
1682 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1683 		smid = le16toh(reply_desc->SMID);
1684 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1685 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1686 
1687 		status = scsi_io_req->RaidContext.raid_context.status;
1688 		extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1689 		sense = cmd_mpt->sense;
1690 		data_length = scsi_io_req->DataLength;
1691 
1692 		switch (scsi_io_req->Function) {
1693 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1694 #if TM_DEBUG
1695 			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1696 			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1697 			    &mr_tm_req->TmRequest;
1698 			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1699 			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1700 #endif
1701             wakeup_one((void *)&sc->ocr_chan);
1702             break;
1703 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1704 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1705 			lbinfo = &sc->load_balance_info[device_id];
1706 			/* R1 load balancing for READ */
1707 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1708 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1709 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1710 			}
1711 			/* Fall thru and complete IO */
1712 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1713 			if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1714 				mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1715 				    extStatus, le32toh(data_length), sense);
1716 				mrsas_cmd_done(sc, cmd_mpt);
1717 				mrsas_atomic_dec(&sc->fw_outstanding);
1718 			} else {
1719 				/*
1720 				 * If the peer  Raid  1/10 fast path failed,
1721 				 * mark IO as failed to the scsi layer.
1722 				 * Overwrite the current status by the failed status
1723 				 * and make sure that if any command fails,
1724 				 * driver returns fail status to CAM.
1725 				 */
1726 				cmd_mpt->cmd_completed = 1;
1727 				r1_cmd = cmd_mpt->peer_cmd;
1728 				if (r1_cmd->cmd_completed) {
1729 					if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1730 						status = r1_cmd->io_request->RaidContext.raid_context.status;
1731 						extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1732 						data_length = r1_cmd->io_request->DataLength;
1733 						sense = r1_cmd->sense;
1734 					}
1735 					mtx_lock(&sc->sim_lock);
1736 					r1_cmd->ccb_ptr = NULL;
1737 					if (r1_cmd->callout_owner) {
1738 						callout_stop(&r1_cmd->cm_callout);
1739 						r1_cmd->callout_owner  = false;
1740 					}
1741 					mtx_unlock(&sc->sim_lock);
1742 					mrsas_release_mpt_cmd(r1_cmd);
1743 					mrsas_atomic_dec(&sc->fw_outstanding);
1744 					mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1745 					    extStatus, le32toh(data_length), sense);
1746 					mrsas_cmd_done(sc, cmd_mpt);
1747 					mrsas_atomic_dec(&sc->fw_outstanding);
1748 				}
1749 			}
1750 			break;
1751 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1752 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1753 			/*
1754 			 * Make sure NOT TO release the mfi command from the called
1755 			 * function's context if it is fired with issue_polled call.
1756 			 * And also make sure that the issue_polled call should only be
1757 			 * used if INTERRUPT IS DISABLED.
1758 			 */
1759 			if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1760 				mrsas_release_mfi_cmd(cmd_mfi);
1761 			else
1762 				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1763 			break;
1764 		}
1765 
1766 		sc->last_reply_idx[MSIxIndex]++;
1767 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1768 			sc->last_reply_idx[MSIxIndex] = 0;
1769 
1770 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1771 							 * 0xFFFFFFFFs */
1772 		num_completed++;
1773 		threshold_reply_count++;
1774 
1775 		/* Get the next reply descriptor */
1776 		if (!sc->last_reply_idx[MSIxIndex]) {
1777 			desc = sc->reply_desc_mem;
1778 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1779 		} else
1780 			desc++;
1781 
1782 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1783 		desc_val.word = desc->Words;
1784 
1785 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1786 
1787 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1788 			break;
1789 
1790 		/*
1791 		 * Write to reply post index after completing threshold reply
1792 		 * count and still there are more replies in reply queue
1793 		 * pending to be completed.
1794 		 */
1795 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1796 			if (sc->msix_enable) {
1797 				if (sc->msix_combined)
1798 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1799 					    ((MSIxIndex & 0x7) << 24) |
1800 					    sc->last_reply_idx[MSIxIndex]);
1801 				else
1802 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1803 					    sc->last_reply_idx[MSIxIndex]);
1804 			} else
1805 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1806 				    reply_post_host_index), sc->last_reply_idx[0]);
1807 
1808 			threshold_reply_count = 0;
1809 		}
1810 	}
1811 
1812 	/* No match, just return */
1813 	if (num_completed == 0)
1814 		return (DONE);
1815 
1816 	/* Clear response interrupt */
1817 	if (sc->msix_enable) {
1818 		if (sc->msix_combined) {
1819 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1820 			    ((MSIxIndex & 0x7) << 24) |
1821 			    sc->last_reply_idx[MSIxIndex]);
1822 		} else
1823 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1824 			    sc->last_reply_idx[MSIxIndex]);
1825 	} else
1826 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1827 		    reply_post_host_index), sc->last_reply_idx[0]);
1828 
1829 	return (0);
1830 }
1831 
1832 /*
1833  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1834  * input:						Adapter instance soft state
1835  *
1836  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1837  * It checks the command status and maps the appropriate CAM status for the
1838  * CCB.
1839  */
1840 void
1841 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1842     u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1843 {
1844 	struct mrsas_softc *sc = cmd->sc;
1845 	u_int8_t *sense_data;
1846 
1847 	switch (status) {
1848 	case MFI_STAT_OK:
1849 		ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1850 		break;
1851 	case MFI_STAT_SCSI_IO_FAILED:
1852 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1853 		ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1854 		sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1855 		if (sense_data) {
1856 			/* For now just copy 18 bytes back */
1857 			memcpy(sense_data, sense, 18);
1858 			ccb_ptr->csio.sense_len = 18;
1859 			ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1860 		}
1861 		break;
1862 	case MFI_STAT_LD_OFFLINE:
1863 	case MFI_STAT_DEVICE_NOT_FOUND:
1864 		if (ccb_ptr->ccb_h.target_lun)
1865 			ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1866 		else
1867 			ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1868 		break;
1869 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1870 		ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1871 		break;
1872 	default:
1873 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1874 		ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1875 		ccb_ptr->csio.scsi_status = status;
1876 	}
1877 	return;
1878 }
1879 
1880 /*
1881  * mrsas_alloc_mem:	Allocate DMAable memory
1882  * input:			Adapter instance soft state
1883  *
1884  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1885  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1886  * Kernel virtual address. Callback argument is physical memory address.
1887  */
1888 static int
1889 mrsas_alloc_mem(struct mrsas_softc *sc)
1890 {
1891 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1892 		evt_detail_size, count, pd_info_size;
1893 
1894 	/*
1895 	 * Allocate parent DMA tag
1896 	 */
1897 	if (bus_dma_tag_create(
1898 	    bus_get_dma_tag(sc->mrsas_dev),	/* parent */
1899 	    1,				/* alignment */
1900 	    0,				/* boundary */
1901 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1902 	    BUS_SPACE_MAXADDR,		/* highaddr */
1903 	    NULL, NULL,			/* filter, filterarg */
1904 	    BUS_SPACE_MAXSIZE,		/* maxsize */
1905 	    BUS_SPACE_UNRESTRICTED,	/* nsegments */
1906 	    BUS_SPACE_MAXSIZE,		/* maxsegsize */
1907 	    0,				/* flags */
1908 	    NULL, NULL,			/* lockfunc, lockarg */
1909 	    &sc->mrsas_parent_tag	/* tag */
1910 	    )) {
1911 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1912 		return (ENOMEM);
1913 	}
1914 	/*
1915 	 * Allocate for version buffer
1916 	 */
1917 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1918 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1919 	    1, 0,
1920 	    BUS_SPACE_MAXADDR_32BIT,
1921 	    BUS_SPACE_MAXADDR,
1922 	    NULL, NULL,
1923 	    verbuf_size,
1924 	    1,
1925 	    verbuf_size,
1926 	    BUS_DMA_ALLOCNOW,
1927 	    NULL, NULL,
1928 	    &sc->verbuf_tag)) {
1929 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1930 		return (ENOMEM);
1931 	}
1932 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1933 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1934 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1935 		return (ENOMEM);
1936 	}
1937 	bzero(sc->verbuf_mem, verbuf_size);
1938 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1939 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1940 	    BUS_DMA_NOWAIT)) {
1941 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1942 		return (ENOMEM);
1943 	}
1944 	/*
1945 	 * Allocate IO Request Frames
1946 	 */
1947 	io_req_size = sc->io_frames_alloc_sz;
1948 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1949 	    16, 0,
1950 	    BUS_SPACE_MAXADDR_32BIT,
1951 	    BUS_SPACE_MAXADDR,
1952 	    NULL, NULL,
1953 	    io_req_size,
1954 	    1,
1955 	    io_req_size,
1956 	    BUS_DMA_ALLOCNOW,
1957 	    NULL, NULL,
1958 	    &sc->io_request_tag)) {
1959 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1960 		return (ENOMEM);
1961 	}
1962 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1963 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1964 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1965 		return (ENOMEM);
1966 	}
1967 	bzero(sc->io_request_mem, io_req_size);
1968 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1969 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1970 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1971 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1972 		return (ENOMEM);
1973 	}
1974 	/*
1975 	 * Allocate Chain Frames
1976 	 */
1977 	chain_frame_size = sc->chain_frames_alloc_sz;
1978 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1979 	    4, 0,
1980 	    BUS_SPACE_MAXADDR_32BIT,
1981 	    BUS_SPACE_MAXADDR,
1982 	    NULL, NULL,
1983 	    chain_frame_size,
1984 	    1,
1985 	    chain_frame_size,
1986 	    BUS_DMA_ALLOCNOW,
1987 	    NULL, NULL,
1988 	    &sc->chain_frame_tag)) {
1989 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1990 		return (ENOMEM);
1991 	}
1992 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1993 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1994 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1995 		return (ENOMEM);
1996 	}
1997 	bzero(sc->chain_frame_mem, chain_frame_size);
1998 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1999 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
2000 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
2001 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
2002 		return (ENOMEM);
2003 	}
2004 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2005 	/*
2006 	 * Allocate Reply Descriptor Array
2007 	 */
2008 	reply_desc_size = sc->reply_alloc_sz * count;
2009 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2010 	    16, 0,
2011 	    BUS_SPACE_MAXADDR_32BIT,
2012 	    BUS_SPACE_MAXADDR,
2013 	    NULL, NULL,
2014 	    reply_desc_size,
2015 	    1,
2016 	    reply_desc_size,
2017 	    BUS_DMA_ALLOCNOW,
2018 	    NULL, NULL,
2019 	    &sc->reply_desc_tag)) {
2020 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2021 		return (ENOMEM);
2022 	}
2023 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2024 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2025 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2026 		return (ENOMEM);
2027 	}
2028 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2029 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2030 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2031 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2032 		return (ENOMEM);
2033 	}
2034 	/*
2035 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
2036 	 */
2037 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2038 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2039 	    64, 0,
2040 	    BUS_SPACE_MAXADDR_32BIT,
2041 	    BUS_SPACE_MAXADDR,
2042 	    NULL, NULL,
2043 	    sense_size,
2044 	    1,
2045 	    sense_size,
2046 	    BUS_DMA_ALLOCNOW,
2047 	    NULL, NULL,
2048 	    &sc->sense_tag)) {
2049 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2050 		return (ENOMEM);
2051 	}
2052 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2053 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2054 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2055 		return (ENOMEM);
2056 	}
2057 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2058 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2059 	    BUS_DMA_NOWAIT)) {
2060 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2061 		return (ENOMEM);
2062 	}
2063 
2064 	/*
2065 	 * Allocate for Event detail structure
2066 	 */
2067 	evt_detail_size = sizeof(struct mrsas_evt_detail);
2068 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2069 	    1, 0,
2070 	    BUS_SPACE_MAXADDR_32BIT,
2071 	    BUS_SPACE_MAXADDR,
2072 	    NULL, NULL,
2073 	    evt_detail_size,
2074 	    1,
2075 	    evt_detail_size,
2076 	    BUS_DMA_ALLOCNOW,
2077 	    NULL, NULL,
2078 	    &sc->evt_detail_tag)) {
2079 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2080 		return (ENOMEM);
2081 	}
2082 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2083 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2084 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2085 		return (ENOMEM);
2086 	}
2087 	bzero(sc->evt_detail_mem, evt_detail_size);
2088 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2089 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2090 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2091 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2092 		return (ENOMEM);
2093 	}
2094 
2095 	/*
2096 	 * Allocate for PD INFO structure
2097 	 */
2098 	pd_info_size = sizeof(struct mrsas_pd_info);
2099 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2100 	    1, 0,
2101 	    BUS_SPACE_MAXADDR_32BIT,
2102 	    BUS_SPACE_MAXADDR,
2103 	    NULL, NULL,
2104 	    pd_info_size,
2105 	    1,
2106 	    pd_info_size,
2107 	    BUS_DMA_ALLOCNOW,
2108 	    NULL, NULL,
2109 	    &sc->pd_info_tag)) {
2110 		device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2111 		return (ENOMEM);
2112 	}
2113 	if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2114 	    BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2115 		device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2116 		return (ENOMEM);
2117 	}
2118 	bzero(sc->pd_info_mem, pd_info_size);
2119 	if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2120 	    sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2121 	    &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2122 		device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2123 		return (ENOMEM);
2124 	}
2125 
2126 	/*
2127 	 * Create a dma tag for data buffers; size will be the maximum
2128 	 * possible I/O size (280kB).
2129 	 */
2130 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2131 	    1,
2132 	    0,
2133 	    BUS_SPACE_MAXADDR,
2134 	    BUS_SPACE_MAXADDR,
2135 	    NULL, NULL,
2136 	    maxphys,
2137 	    sc->max_num_sge,		/* nsegments */
2138 	    maxphys,
2139 	    BUS_DMA_ALLOCNOW,
2140 	    busdma_lock_mutex,
2141 	    &sc->io_lock,
2142 	    &sc->data_tag)) {
2143 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2144 		return (ENOMEM);
2145 	}
2146 	return (0);
2147 }
2148 
2149 /*
2150  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
2151  * input:			callback argument, machine dependent type
2152  * 					that describes DMA segments, number of segments, error code
2153  *
2154  * This function is for the driver to receive mapping information resultant of
2155  * the bus_dmamap_load(). The information is actually not being used, but the
2156  * address is saved anyway.
2157  */
2158 void
2159 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2160 {
2161 	bus_addr_t *addr;
2162 
2163 	addr = arg;
2164 	*addr = segs[0].ds_addr;
2165 }
2166 
2167 /*
2168  * mrsas_setup_raidmap:	Set up RAID map.
2169  * input:				Adapter instance soft state
2170  *
2171  * Allocate DMA memory for the RAID maps and perform setup.
2172  */
2173 static int
2174 mrsas_setup_raidmap(struct mrsas_softc *sc)
2175 {
2176 	int i;
2177 
2178 	for (i = 0; i < 2; i++) {
2179 		sc->ld_drv_map[i] =
2180 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2181 		/* Do Error handling */
2182 		if (!sc->ld_drv_map[i]) {
2183 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2184 
2185 			if (i == 1)
2186 				free(sc->ld_drv_map[0], M_MRSAS);
2187 			/* ABORT driver initialization */
2188 			goto ABORT;
2189 		}
2190 	}
2191 
2192 	for (int i = 0; i < 2; i++) {
2193 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2194 		    4, 0,
2195 		    BUS_SPACE_MAXADDR_32BIT,
2196 		    BUS_SPACE_MAXADDR,
2197 		    NULL, NULL,
2198 		    sc->max_map_sz,
2199 		    1,
2200 		    sc->max_map_sz,
2201 		    BUS_DMA_ALLOCNOW,
2202 		    NULL, NULL,
2203 		    &sc->raidmap_tag[i])) {
2204 			device_printf(sc->mrsas_dev,
2205 			    "Cannot allocate raid map tag.\n");
2206 			return (ENOMEM);
2207 		}
2208 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2209 		    (void **)&sc->raidmap_mem[i],
2210 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2211 			device_printf(sc->mrsas_dev,
2212 			    "Cannot allocate raidmap memory.\n");
2213 			return (ENOMEM);
2214 		}
2215 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2216 
2217 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2218 		    sc->raidmap_mem[i], sc->max_map_sz,
2219 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2220 		    BUS_DMA_NOWAIT)) {
2221 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2222 			return (ENOMEM);
2223 		}
2224 		if (!sc->raidmap_mem[i]) {
2225 			device_printf(sc->mrsas_dev,
2226 			    "Cannot allocate memory for raid map.\n");
2227 			return (ENOMEM);
2228 		}
2229 	}
2230 
2231 	if (!mrsas_get_map_info(sc))
2232 		mrsas_sync_map_info(sc);
2233 
2234 	return (0);
2235 
2236 ABORT:
2237 	return (1);
2238 }
2239 
2240 /**
2241  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2242  * @sc:				Adapter soft state
2243  *
2244  * Return 0 on success.
2245  */
2246 void
2247 megasas_setup_jbod_map(struct mrsas_softc *sc)
2248 {
2249 	int i;
2250 	uint32_t pd_seq_map_sz;
2251 
2252 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2253 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2254 
2255 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2256 		sc->use_seqnum_jbod_fp = 0;
2257 		return;
2258 	}
2259 	if (sc->jbodmap_mem[0])
2260 		goto skip_alloc;
2261 
2262 	for (i = 0; i < 2; i++) {
2263 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2264 		    4, 0,
2265 		    BUS_SPACE_MAXADDR_32BIT,
2266 		    BUS_SPACE_MAXADDR,
2267 		    NULL, NULL,
2268 		    pd_seq_map_sz,
2269 		    1,
2270 		    pd_seq_map_sz,
2271 		    BUS_DMA_ALLOCNOW,
2272 		    NULL, NULL,
2273 		    &sc->jbodmap_tag[i])) {
2274 			device_printf(sc->mrsas_dev,
2275 			    "Cannot allocate jbod map tag.\n");
2276 			return;
2277 		}
2278 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2279 		    (void **)&sc->jbodmap_mem[i],
2280 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2281 			device_printf(sc->mrsas_dev,
2282 			    "Cannot allocate jbod map memory.\n");
2283 			return;
2284 		}
2285 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2286 
2287 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2288 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2289 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2290 		    BUS_DMA_NOWAIT)) {
2291 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2292 			return;
2293 		}
2294 		if (!sc->jbodmap_mem[i]) {
2295 			device_printf(sc->mrsas_dev,
2296 			    "Cannot allocate memory for jbod map.\n");
2297 			sc->use_seqnum_jbod_fp = 0;
2298 			return;
2299 		}
2300 	}
2301 
2302 skip_alloc:
2303 	if (!megasas_sync_pd_seq_num(sc, false) &&
2304 	    !megasas_sync_pd_seq_num(sc, true))
2305 		sc->use_seqnum_jbod_fp = 1;
2306 	else
2307 		sc->use_seqnum_jbod_fp = 0;
2308 
2309 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2310 }
2311 
2312 /*
2313  * mrsas_init_fw:	Initialize Firmware
2314  * input:			Adapter soft state
2315  *
2316  * Calls transition_to_ready() to make sure Firmware is in operational state and
2317  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2318  * issues internal commands to get the controller info after the IOC_INIT
2319  * command response is received by Firmware.  Note:  code relating to
2320  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2321  * is left here as placeholder.
2322  */
2323 static int
2324 mrsas_init_fw(struct mrsas_softc *sc)
2325 {
2326 
2327 	int ret, loop, ocr = 0;
2328 	u_int32_t max_sectors_1;
2329 	u_int32_t max_sectors_2;
2330 	u_int32_t tmp_sectors;
2331 	u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2332 	int msix_enable = 0;
2333 	int fw_msix_count = 0;
2334 	int i, j;
2335 
2336 	/* Make sure Firmware is ready */
2337 	ret = mrsas_transition_to_ready(sc, ocr);
2338 	if (ret != SUCCESS) {
2339 		return (ret);
2340 	}
2341 	if (sc->is_ventura || sc->is_aero) {
2342 		scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2343 #if VD_EXT_DEBUG
2344 		device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2345 #endif
2346 		sc->maxRaidMapSize = ((scratch_pad_3 >>
2347 		    MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2348 		    MR_MAX_RAID_MAP_SIZE_MASK);
2349 	}
2350 	/* MSI-x index 0- reply post host index register */
2351 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2352 	/* Check if MSI-X is supported while in ready state */
2353 	msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2354 
2355 	if (msix_enable) {
2356 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2357 		    outbound_scratch_pad_2));
2358 
2359 		/* Check max MSI-X vectors */
2360 		if (sc->device_id == MRSAS_TBOLT) {
2361 			sc->msix_vectors = (scratch_pad_2
2362 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2363 			fw_msix_count = sc->msix_vectors;
2364 		} else {
2365 			/* Invader/Fury supports 96 MSI-X vectors */
2366 			sc->msix_vectors = ((scratch_pad_2
2367 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2368 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2369 			fw_msix_count = sc->msix_vectors;
2370 
2371 			if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2372 				((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2373 				sc->msix_combined = true;
2374 			/*
2375 			 * Save 1-15 reply post index
2376 			 * address to local memory Index 0
2377 			 * is already saved from reg offset
2378 			 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2379 			 */
2380 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2381 			    loop++) {
2382 				sc->msix_reg_offset[loop] =
2383 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2384 				    (loop * 0x10);
2385 			}
2386 		}
2387 
2388 		/* Don't bother allocating more MSI-X vectors than cpus */
2389 		sc->msix_vectors = min(sc->msix_vectors,
2390 		    mp_ncpus);
2391 
2392 		/* Allocate MSI-x vectors */
2393 		if (mrsas_allocate_msix(sc) == SUCCESS)
2394 			sc->msix_enable = 1;
2395 		else
2396 			sc->msix_enable = 0;
2397 
2398 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2399 		    "Online CPU %d Current MSIX <%d>\n",
2400 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2401 	}
2402 	/*
2403      * MSI-X host index 0 is common for all adapter.
2404      * It is used for all MPT based Adapters.
2405 	 */
2406 	if (sc->msix_combined) {
2407 		sc->msix_reg_offset[0] =
2408 		    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2409 	}
2410 	if (mrsas_init_adapter(sc) != SUCCESS) {
2411 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2412 		return (1);
2413 	}
2414 
2415 	if (sc->is_ventura || sc->is_aero) {
2416 		scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2417 		    outbound_scratch_pad_4));
2418 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2419 			sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2420 
2421 		device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2422 	}
2423 
2424 	/* Allocate internal commands for pass-thru */
2425 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2426 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2427 		return (1);
2428 	}
2429 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2430 	if (!sc->ctrl_info) {
2431 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2432 		return (1);
2433 	}
2434 	/*
2435 	 * Get the controller info from FW, so that the MAX VD support
2436 	 * availability can be decided.
2437 	 */
2438 	if (mrsas_get_ctrl_info(sc)) {
2439 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2440 		return (1);
2441 	}
2442 	sc->secure_jbod_support =
2443 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2444 
2445 	if (sc->secure_jbod_support)
2446 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2447 
2448 	if (sc->use_seqnum_jbod_fp)
2449 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2450 
2451 	if (sc->support_morethan256jbod)
2452 		device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2453 
2454 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2455 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2456 		    "There seems to be some problem in the controller\n"
2457 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2458 	}
2459 	megasas_setup_jbod_map(sc);
2460 
2461 	memset(sc->target_list, 0,
2462 		MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2463 	for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2464 		sc->target_list[i].target_id = 0xffff;
2465 
2466 	/* For pass-thru, get PD/LD list and controller info */
2467 	memset(sc->pd_list, 0,
2468 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2469 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2470 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2471 		return (1);
2472 	}
2473 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2474 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2475 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2476 		return (1);
2477 	}
2478 
2479 	if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2480 		sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2481 						MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2482 		if (!sc->streamDetectByLD) {
2483 			device_printf(sc->mrsas_dev,
2484 				"unable to allocate stream detection for pool of LDs\n");
2485 			return (1);
2486 		}
2487 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2488 			sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2489 			if (!sc->streamDetectByLD[i]) {
2490 				device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2491 				for (j = 0; j < i; ++j)
2492 					free(sc->streamDetectByLD[j], M_MRSAS);
2493 				free(sc->streamDetectByLD, M_MRSAS);
2494 				sc->streamDetectByLD = NULL;
2495 				return (1);
2496 			}
2497 			memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2498 			sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2499 		}
2500 	}
2501 
2502 	/*
2503 	 * Compute the max allowed sectors per IO: The controller info has
2504 	 * two limits on max sectors. Driver should use the minimum of these
2505 	 * two.
2506 	 *
2507 	 * 1 << stripe_sz_ops.min = max sectors per strip
2508 	 *
2509 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2510 	 * calculate max_sectors_1. So the number ended up as zero always.
2511 	 */
2512 	tmp_sectors = 0;
2513 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2514 	    sc->ctrl_info->max_strips_per_io;
2515 	max_sectors_2 = sc->ctrl_info->max_request_size;
2516 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2517 	sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512;
2518 
2519 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2520 		sc->max_sectors_per_req = tmp_sectors;
2521 
2522 	sc->disableOnlineCtrlReset =
2523 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2524 	sc->UnevenSpanSupport =
2525 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2526 	if (sc->UnevenSpanSupport) {
2527 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2528 		    sc->UnevenSpanSupport);
2529 
2530 		if (MR_ValidateMapInfo(sc))
2531 			sc->fast_path_io = 1;
2532 		else
2533 			sc->fast_path_io = 0;
2534 	}
2535 
2536 	device_printf(sc->mrsas_dev, "max_fw_cmds: %u  max_scsi_cmds: %u\n",
2537 		sc->max_fw_cmds, sc->max_scsi_cmds);
2538 	return (0);
2539 }
2540 
2541 /*
2542  * mrsas_init_adapter:	Initializes the adapter/controller
2543  * input:				Adapter soft state
2544  *
2545  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2546  * ROC/controller.  The FW register is read to determined the number of
2547  * commands that is supported.  All memory allocations for IO is based on
2548  * max_cmd.  Appropriate calculations are performed in this function.
2549  */
2550 int
2551 mrsas_init_adapter(struct mrsas_softc *sc)
2552 {
2553 	uint32_t status;
2554 	u_int32_t scratch_pad_2;
2555 	int ret;
2556 	int i = 0;
2557 
2558 	/* Read FW status register */
2559 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2560 
2561 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2562 
2563 	/* Decrement the max supported by 1, to correlate with FW */
2564 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2565 	sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2566 
2567 	/* Determine allocation size of command frames */
2568 	sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2569 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2570 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2571 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2572 	    (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2573 	scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2574 	    outbound_scratch_pad_2));
2575 
2576 	mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
2577 	    "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
2578 	    "sc->io_frames_alloc_sz 0x%x\n", __func__,
2579 	    sc->reply_q_depth, sc->request_alloc_sz,
2580 	    sc->reply_alloc_sz, sc->io_frames_alloc_sz);
2581 
2582 	/*
2583 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2584 	 * Firmware support extended IO chain frame which is 4 time more
2585 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2586 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2587 	 */
2588 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2589 		sc->max_chain_frame_sz =
2590 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2591 		    * MEGASAS_1MB_IO;
2592 	else
2593 		sc->max_chain_frame_sz =
2594 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2595 		    * MEGASAS_256K_IO;
2596 
2597 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2598 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2599 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2600 
2601 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2602 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2603 
2604 	mrsas_dprint(sc, MRSAS_INFO,
2605 	    "max sge: 0x%x, max chain frame size: 0x%x, "
2606 	    "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
2607 	    sc->max_num_sge,
2608 	    sc->max_chain_frame_sz, sc->max_fw_cmds,
2609 	    sc->chain_frames_alloc_sz);
2610 
2611 	/* Used for pass thru MFI frame (DCMD) */
2612 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2613 
2614 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2615 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2616 
2617 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2618 
2619 	for (i = 0; i < count; i++)
2620 		sc->last_reply_idx[i] = 0;
2621 
2622 	ret = mrsas_alloc_mem(sc);
2623 	if (ret != SUCCESS)
2624 		return (ret);
2625 
2626 	ret = mrsas_alloc_mpt_cmds(sc);
2627 	if (ret != SUCCESS)
2628 		return (ret);
2629 
2630 	ret = mrsas_ioc_init(sc);
2631 	if (ret != SUCCESS)
2632 		return (ret);
2633 
2634 	return (0);
2635 }
2636 
2637 /*
2638  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2639  * input:				Adapter soft state
2640  *
2641  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2642  */
2643 int
2644 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2645 {
2646 	int ioc_init_size;
2647 
2648 	/* Allocate IOC INIT command */
2649 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2650 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2651 	    1, 0,
2652 	    BUS_SPACE_MAXADDR_32BIT,
2653 	    BUS_SPACE_MAXADDR,
2654 	    NULL, NULL,
2655 	    ioc_init_size,
2656 	    1,
2657 	    ioc_init_size,
2658 	    BUS_DMA_ALLOCNOW,
2659 	    NULL, NULL,
2660 	    &sc->ioc_init_tag)) {
2661 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2662 		return (ENOMEM);
2663 	}
2664 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2665 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2666 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2667 		return (ENOMEM);
2668 	}
2669 	bzero(sc->ioc_init_mem, ioc_init_size);
2670 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2671 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2672 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2673 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2674 		return (ENOMEM);
2675 	}
2676 	return (0);
2677 }
2678 
2679 /*
2680  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2681  * input:				Adapter soft state
2682  *
2683  * Deallocates memory of the IOC Init cmd.
2684  */
2685 void
2686 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2687 {
2688 	if (sc->ioc_init_phys_mem)
2689 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2690 	if (sc->ioc_init_mem != NULL)
2691 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2692 	if (sc->ioc_init_tag != NULL)
2693 		bus_dma_tag_destroy(sc->ioc_init_tag);
2694 }
2695 
2696 /*
2697  * mrsas_ioc_init:	Sends IOC Init command to FW
2698  * input:			Adapter soft state
2699  *
2700  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2701  */
2702 int
2703 mrsas_ioc_init(struct mrsas_softc *sc)
2704 {
2705 	struct mrsas_init_frame *init_frame;
2706 	pMpi2IOCInitRequest_t IOCInitMsg;
2707 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2708 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2709 	bus_addr_t phys_addr;
2710 	int i, retcode = 0;
2711 	u_int32_t scratch_pad_2;
2712 
2713 	/* Allocate memory for the IOC INIT command */
2714 	if (mrsas_alloc_ioc_cmd(sc)) {
2715 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2716 		return (1);
2717 	}
2718 
2719 	if (!sc->block_sync_cache) {
2720 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2721 		    outbound_scratch_pad_2));
2722 		sc->fw_sync_cache_support = (scratch_pad_2 &
2723 		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2724 	}
2725 
2726 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2727 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2728 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2729 	IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
2730 	IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
2731 	IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
2732 	IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
2733 	IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
2734 	IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
2735 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2736 	IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2737 
2738 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2739 	init_frame->cmd = MFI_CMD_INIT;
2740 	init_frame->cmd_status = 0xFF;
2741 	init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
2742 
2743 	/* driver support Extended MSIX */
2744 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2745 		init_frame->driver_operations.
2746 		    mfi_capabilities.support_additional_msix = 1;
2747 	}
2748 	if (sc->verbuf_mem) {
2749 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2750 		    MRSAS_VERSION);
2751 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2752 		init_frame->driver_ver_hi = 0;
2753 	}
2754 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2755 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2756 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2757 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2758 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2759 
2760 	init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
2761 
2762 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2763 	init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
2764 	init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
2765 
2766 	req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
2767 	req_desc.MFAIo.RequestFlags =
2768 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2769 
2770 	mrsas_disable_intr(sc);
2771 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2772 	mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2773 
2774 	/*
2775 	 * Poll response timer to wait for Firmware response.  While this
2776 	 * timer with the DELAY call could block CPU, the time interval for
2777 	 * this is only 1 millisecond.
2778 	 */
2779 	if (init_frame->cmd_status == 0xFF) {
2780 		for (i = 0; i < (max_wait * 1000); i++) {
2781 			if (init_frame->cmd_status == 0xFF)
2782 				DELAY(1000);
2783 			else
2784 				break;
2785 		}
2786 	}
2787 	if (init_frame->cmd_status == 0)
2788 		mrsas_dprint(sc, MRSAS_OCR,
2789 		    "IOC INIT response received from FW.\n");
2790 	else {
2791 		if (init_frame->cmd_status == 0xFF)
2792 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2793 		else
2794 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2795 		retcode = 1;
2796 	}
2797 
2798 	if (sc->is_aero) {
2799 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2800 		    outbound_scratch_pad_2));
2801 		sc->atomic_desc_support = (scratch_pad_2 &
2802 			MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2803 		device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2804 			sc->atomic_desc_support ? "Yes" : "No");
2805 	}
2806 
2807 	mrsas_free_ioc_cmd(sc);
2808 	return (retcode);
2809 }
2810 
2811 /*
2812  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2813  * input:					Adapter instance soft state
2814  *
2815  * This function allocates the internal commands for IOs. Each command that is
2816  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2817  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2818  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2819  * max_fw_cmds.
2820  */
2821 int
2822 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2823 {
2824 	int i, j;
2825 	u_int32_t max_fw_cmds, count;
2826 	struct mrsas_mpt_cmd *cmd;
2827 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2828 	u_int32_t offset, chain_offset, sense_offset;
2829 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2830 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2831 
2832 	max_fw_cmds = sc->max_fw_cmds;
2833 
2834 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2835 	if (!sc->req_desc) {
2836 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2837 		return (ENOMEM);
2838 	}
2839 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2840 
2841 	/*
2842 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2843 	 * Allocate the dynamic array first and then allocate individual
2844 	 * commands.
2845 	 */
2846 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2847 	    M_MRSAS, M_NOWAIT);
2848 	if (!sc->mpt_cmd_list) {
2849 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2850 		return (ENOMEM);
2851 	}
2852 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2853 	for (i = 0; i < max_fw_cmds; i++) {
2854 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2855 		    M_MRSAS, M_NOWAIT);
2856 		if (!sc->mpt_cmd_list[i]) {
2857 			for (j = 0; j < i; j++)
2858 				free(sc->mpt_cmd_list[j], M_MRSAS);
2859 			free(sc->mpt_cmd_list, M_MRSAS);
2860 			sc->mpt_cmd_list = NULL;
2861 			return (ENOMEM);
2862 		}
2863 	}
2864 
2865 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2866 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2867 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2868 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2869 	sense_base = (u_int8_t *)sc->sense_mem;
2870 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2871 	for (i = 0; i < max_fw_cmds; i++) {
2872 		cmd = sc->mpt_cmd_list[i];
2873 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2874 		chain_offset = sc->max_chain_frame_sz * i;
2875 		sense_offset = MRSAS_SENSE_LEN * i;
2876 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2877 		cmd->index = i + 1;
2878 		cmd->ccb_ptr = NULL;
2879 		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2880 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2881 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2882 		cmd->sc = sc;
2883 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2884 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2885 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2886 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2887 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2888 		cmd->sense = sense_base + sense_offset;
2889 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2890 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2891 			return (FAIL);
2892 		}
2893 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2894 	}
2895 
2896 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2897 	reply_desc = sc->reply_desc_mem;
2898 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2899 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2900 		reply_desc->Words = MRSAS_ULONG_MAX;
2901 	}
2902 	return (0);
2903 }
2904 
2905 /*
2906  * mrsas_write_64bit_req_dsc:	Writes 64 bit request descriptor to FW
2907  * input:			Adapter softstate
2908  * 				request descriptor address low
2909  * 				request descriptor address high
2910  */
2911 void
2912 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2913     u_int32_t req_desc_hi)
2914 {
2915 	mtx_lock(&sc->pci_lock);
2916 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2917 	    le32toh(req_desc_lo));
2918 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2919 	    le32toh(req_desc_hi));
2920 	mtx_unlock(&sc->pci_lock);
2921 }
2922 
2923 /*
2924  * mrsas_fire_cmd:	Sends command to FW
2925  * input:		Adapter softstate
2926  * 			request descriptor address low
2927  * 			request descriptor address high
2928  *
2929  * This functions fires the command to Firmware by writing to the
2930  * inbound_low_queue_port and inbound_high_queue_port.
2931  */
2932 void
2933 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2934     u_int32_t req_desc_hi)
2935 {
2936 	if (sc->atomic_desc_support)
2937 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2938 		    le32toh(req_desc_lo));
2939 	else
2940 		mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2941 }
2942 
2943 /*
2944  * mrsas_transition_to_ready:  Move FW to Ready state input:
2945  * Adapter instance soft state
2946  *
2947  * During the initialization, FW passes can potentially be in any one of several
2948  * possible states. If the FW in operational, waiting-for-handshake states,
2949  * driver must take steps to bring it to ready state. Otherwise, it has to
2950  * wait for the ready state.
2951  */
2952 int
2953 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2954 {
2955 	int i;
2956 	u_int8_t max_wait;
2957 	u_int32_t val, fw_state;
2958 	u_int32_t cur_state __unused;
2959 	u_int32_t abs_state, curr_abs_state;
2960 
2961 	val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2962 	fw_state = val & MFI_STATE_MASK;
2963 	max_wait = MRSAS_RESET_WAIT_TIME;
2964 
2965 	if (fw_state != MFI_STATE_READY)
2966 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2967 
2968 	while (fw_state != MFI_STATE_READY) {
2969 		abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2970 		switch (fw_state) {
2971 		case MFI_STATE_FAULT:
2972 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2973 			if (ocr) {
2974 				cur_state = MFI_STATE_FAULT;
2975 				break;
2976 			} else
2977 				return -ENODEV;
2978 		case MFI_STATE_WAIT_HANDSHAKE:
2979 			/* Set the CLR bit in inbound doorbell */
2980 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2981 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2982 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2983 			break;
2984 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2985 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2986 			    MFI_INIT_HOTPLUG);
2987 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2988 			break;
2989 		case MFI_STATE_OPERATIONAL:
2990 			/*
2991 			 * Bring it to READY state; assuming max wait 10
2992 			 * secs
2993 			 */
2994 			mrsas_disable_intr(sc);
2995 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2996 			for (i = 0; i < max_wait * 1000; i++) {
2997 				if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2998 					DELAY(1000);
2999 				else
3000 					break;
3001 			}
3002 			cur_state = MFI_STATE_OPERATIONAL;
3003 			break;
3004 		case MFI_STATE_UNDEFINED:
3005 			/*
3006 			 * This state should not last for more than 2
3007 			 * seconds
3008 			 */
3009 			cur_state = MFI_STATE_UNDEFINED;
3010 			break;
3011 		case MFI_STATE_BB_INIT:
3012 			cur_state = MFI_STATE_BB_INIT;
3013 			break;
3014 		case MFI_STATE_FW_INIT:
3015 			cur_state = MFI_STATE_FW_INIT;
3016 			break;
3017 		case MFI_STATE_FW_INIT_2:
3018 			cur_state = MFI_STATE_FW_INIT_2;
3019 			break;
3020 		case MFI_STATE_DEVICE_SCAN:
3021 			cur_state = MFI_STATE_DEVICE_SCAN;
3022 			break;
3023 		case MFI_STATE_FLUSH_CACHE:
3024 			cur_state = MFI_STATE_FLUSH_CACHE;
3025 			break;
3026 		default:
3027 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3028 			return -ENODEV;
3029 		}
3030 
3031 		/*
3032 		 * The cur_state should not last for more than max_wait secs
3033 		 */
3034 		for (i = 0; i < (max_wait * 1000); i++) {
3035 			fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3036 			    outbound_scratch_pad)) & MFI_STATE_MASK);
3037 			curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3038 			    outbound_scratch_pad));
3039 			if (abs_state == curr_abs_state)
3040 				DELAY(1000);
3041 			else
3042 				break;
3043 		}
3044 
3045 		/*
3046 		 * Return error if fw_state hasn't changed after max_wait
3047 		 */
3048 		if (curr_abs_state == abs_state) {
3049 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3050 			    "in %d secs\n", fw_state, max_wait);
3051 			return -ENODEV;
3052 		}
3053 	}
3054 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3055 	return 0;
3056 }
3057 
3058 /*
3059  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
3060  * input:				Adapter soft state
3061  *
3062  * This function removes an MFI command from the command list.
3063  */
3064 struct mrsas_mfi_cmd *
3065 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3066 {
3067 	struct mrsas_mfi_cmd *cmd = NULL;
3068 
3069 	mtx_lock(&sc->mfi_cmd_pool_lock);
3070 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3071 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3072 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3073 	}
3074 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3075 
3076 	return cmd;
3077 }
3078 
3079 /*
3080  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
3081  * input:				Adapter Context.
3082  *
3083  * This function will check FW status register and flag do_timeout_reset flag.
3084  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3085  * trigger reset.
3086  */
3087 static void
3088 mrsas_ocr_thread(void *arg)
3089 {
3090 	struct mrsas_softc *sc;
3091 	u_int32_t fw_status, fw_state;
3092 	u_int8_t tm_target_reset_failed = 0;
3093 
3094 	sc = (struct mrsas_softc *)arg;
3095 
3096 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3097 	sc->ocr_thread_active = 1;
3098 	mtx_lock(&sc->sim_lock);
3099 	for (;;) {
3100 		/* Sleep for 1 second and check the queue status */
3101 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3102 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3103 		if (sc->remove_in_progress ||
3104 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3105 			mrsas_dprint(sc, MRSAS_OCR,
3106 			    "Exit due to %s from %s\n",
3107 			    sc->remove_in_progress ? "Shutdown" :
3108 			    "Hardware critical error", __func__);
3109 			break;
3110 		}
3111 		fw_status = mrsas_read_reg_with_retries(sc,
3112 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
3113 		fw_state = fw_status & MFI_STATE_MASK;
3114 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3115 			mrsas_atomic_read(&sc->target_reset_outstanding)) {
3116 			/* First, freeze further IOs to come to the SIM */
3117 			mrsas_xpt_freeze(sc);
3118 
3119 			/* If this is an IO timeout then go for target reset */
3120 			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3121 				device_printf(sc->mrsas_dev, "Initiating Target RESET "
3122 				    "because of SCSI IO timeout!\n");
3123 
3124 				/* Let the remaining IOs to complete */
3125 				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3126 				      "mrsas_reset_targets", 5 * hz);
3127 
3128 				/* Try to reset the target device */
3129 				if (mrsas_reset_targets(sc) == FAIL)
3130 					tm_target_reset_failed = 1;
3131 			}
3132 
3133 			/* If this is a DCMD timeout or FW fault,
3134 			 * then go for controller reset
3135 			 */
3136 			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3137 			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3138 				if (tm_target_reset_failed)
3139 					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3140 					    "TM FAILURE!\n");
3141 				else
3142 					device_printf(sc->mrsas_dev, "Initiaiting OCR "
3143 						"because of %s!\n", sc->do_timedout_reset ?
3144 						"DCMD IO Timeout" : "FW fault");
3145 
3146 				mtx_lock_spin(&sc->ioctl_lock);
3147 				sc->reset_in_progress = 1;
3148 				mtx_unlock_spin(&sc->ioctl_lock);
3149 				sc->reset_count++;
3150 
3151 				/*
3152 				 * Wait for the AEN task to be completed if it is running.
3153 				 */
3154 				mtx_unlock(&sc->sim_lock);
3155 				taskqueue_drain(sc->ev_tq, &sc->ev_task);
3156 				mtx_lock(&sc->sim_lock);
3157 
3158 				taskqueue_block(sc->ev_tq);
3159 				/* Try to reset the controller */
3160 				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3161 
3162 				sc->do_timedout_reset = 0;
3163 				sc->reset_in_progress = 0;
3164 				tm_target_reset_failed = 0;
3165 				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3166 				memset(sc->target_reset_pool, 0,
3167 				    sizeof(sc->target_reset_pool));
3168 				taskqueue_unblock(sc->ev_tq);
3169 			}
3170 
3171 			/* Now allow IOs to come to the SIM */
3172 			 mrsas_xpt_release(sc);
3173 		}
3174 	}
3175 	mtx_unlock(&sc->sim_lock);
3176 	sc->ocr_thread_active = 0;
3177 	mrsas_kproc_exit(0);
3178 }
3179 
3180 /*
3181  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
3182  * input:					Adapter Context.
3183  *
3184  * This function will clear reply descriptor so that post OCR driver and FW will
3185  * lost old history.
3186  */
3187 void
3188 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3189 {
3190 	int i, count;
3191 	pMpi2ReplyDescriptorsUnion_t reply_desc;
3192 
3193 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3194 	for (i = 0; i < count; i++)
3195 		sc->last_reply_idx[i] = 0;
3196 
3197 	reply_desc = sc->reply_desc_mem;
3198 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3199 		reply_desc->Words = MRSAS_ULONG_MAX;
3200 	}
3201 }
3202 
3203 /*
3204  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
3205  * input:				Adapter Context.
3206  *
3207  * This function will run from thread context so that it can sleep. 1. Do not
3208  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3209  * to complete for 180 seconds. 3. If #2 does not find any outstanding
3210  * command Controller is in working state, so skip OCR. Otherwise, do
3211  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3212  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3213  * OCR, Re-fire Management command and move Controller to Operation state.
3214  */
3215 int
3216 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3217 {
3218 	int retval = SUCCESS, i, j, retry = 0;
3219 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3220 	union ccb *ccb;
3221 	struct mrsas_mfi_cmd *mfi_cmd;
3222 	struct mrsas_mpt_cmd *mpt_cmd;
3223 	union mrsas_evt_class_locale class_locale;
3224 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3225 
3226 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3227 		device_printf(sc->mrsas_dev,
3228 		    "mrsas: Hardware critical error, returning FAIL.\n");
3229 		return FAIL;
3230 	}
3231 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3232 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3233 	mrsas_disable_intr(sc);
3234 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3235 	    sc->mrsas_fw_fault_check_delay * hz);
3236 
3237 	/* First try waiting for commands to complete */
3238 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3239 		mrsas_dprint(sc, MRSAS_OCR,
3240 		    "resetting adapter from %s.\n",
3241 		    __func__);
3242 		/* Now return commands back to the CAM layer */
3243 		mtx_unlock(&sc->sim_lock);
3244 		for (i = 0; i < sc->max_fw_cmds; i++) {
3245 			mpt_cmd = sc->mpt_cmd_list[i];
3246 
3247 			if (mpt_cmd->peer_cmd) {
3248 				mrsas_dprint(sc, MRSAS_OCR,
3249 				    "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3250 				    i, mpt_cmd, mpt_cmd->peer_cmd);
3251 			}
3252 
3253 			if (mpt_cmd->ccb_ptr) {
3254 				if (mpt_cmd->callout_owner) {
3255 					ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3256 					ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3257 					mrsas_cmd_done(sc, mpt_cmd);
3258 				} else {
3259 					mpt_cmd->ccb_ptr = NULL;
3260 					mrsas_release_mpt_cmd(mpt_cmd);
3261 				}
3262 			}
3263 		}
3264 
3265 		mrsas_atomic_set(&sc->fw_outstanding, 0);
3266 
3267 		mtx_lock(&sc->sim_lock);
3268 
3269 		status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3270 		    outbound_scratch_pad));
3271 		abs_state = status_reg & MFI_STATE_MASK;
3272 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3273 		if (sc->disableOnlineCtrlReset ||
3274 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3275 			/* Reset not supported, kill adapter */
3276 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3277 			mrsas_kill_hba(sc);
3278 			retval = FAIL;
3279 			goto out;
3280 		}
3281 		/* Now try to reset the chip */
3282 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3283 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3284 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
3285 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3286 			    MPI2_WRSEQ_1ST_KEY_VALUE);
3287 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3288 			    MPI2_WRSEQ_2ND_KEY_VALUE);
3289 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3290 			    MPI2_WRSEQ_3RD_KEY_VALUE);
3291 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3292 			    MPI2_WRSEQ_4TH_KEY_VALUE);
3293 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3294 			    MPI2_WRSEQ_5TH_KEY_VALUE);
3295 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3296 			    MPI2_WRSEQ_6TH_KEY_VALUE);
3297 
3298 			/* Check that the diag write enable (DRWE) bit is on */
3299 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3300 			    fusion_host_diag));
3301 			retry = 0;
3302 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3303 				DELAY(100 * 1000);
3304 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3305 				    fusion_host_diag));
3306 				if (retry++ == 100) {
3307 					mrsas_dprint(sc, MRSAS_OCR,
3308 					    "Host diag unlock failed!\n");
3309 					break;
3310 				}
3311 			}
3312 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3313 				continue;
3314 
3315 			/* Send chip reset command */
3316 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3317 			    host_diag | HOST_DIAG_RESET_ADAPTER);
3318 			DELAY(3000 * 1000);
3319 
3320 			/* Make sure reset adapter bit is cleared */
3321 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3322 			    fusion_host_diag));
3323 			retry = 0;
3324 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3325 				DELAY(100 * 1000);
3326 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3327 				    fusion_host_diag));
3328 				if (retry++ == 1000) {
3329 					mrsas_dprint(sc, MRSAS_OCR,
3330 					    "Diag reset adapter never cleared!\n");
3331 					break;
3332 				}
3333 			}
3334 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3335 				continue;
3336 
3337 			abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3338 			    outbound_scratch_pad)) & MFI_STATE_MASK;
3339 			retry = 0;
3340 
3341 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3342 				DELAY(100 * 1000);
3343 				abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3344 				    outbound_scratch_pad)) & MFI_STATE_MASK;
3345 			}
3346 			if (abs_state <= MFI_STATE_FW_INIT) {
3347 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3348 				    " state = 0x%x\n", abs_state);
3349 				continue;
3350 			}
3351 			/* Wait for FW to become ready */
3352 			if (mrsas_transition_to_ready(sc, 1)) {
3353 				mrsas_dprint(sc, MRSAS_OCR,
3354 				    "mrsas: Failed to transition controller to ready.\n");
3355 				continue;
3356 			}
3357 			mrsas_reset_reply_desc(sc);
3358 			if (mrsas_ioc_init(sc)) {
3359 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3360 				continue;
3361 			}
3362 			for (j = 0; j < sc->max_fw_cmds; j++) {
3363 				mpt_cmd = sc->mpt_cmd_list[j];
3364 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3365 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3366 					/* If not an IOCTL then release the command else re-fire */
3367 					if (!mfi_cmd->sync_cmd) {
3368 						mrsas_release_mfi_cmd(mfi_cmd);
3369 					} else {
3370 						req_desc = mrsas_get_request_desc(sc,
3371 						    mfi_cmd->cmd_id.context.smid - 1);
3372 						mrsas_dprint(sc, MRSAS_OCR,
3373 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3374 						    mfi_cmd->frame->dcmd.opcode, j);
3375 						if (!req_desc)
3376 							device_printf(sc->mrsas_dev,
3377 							    "Cannot build MPT cmd.\n");
3378 						else
3379 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3380 							    req_desc->addr.u.high);
3381 					}
3382 				}
3383 			}
3384 
3385 			/* Reset load balance info */
3386 			memset(sc->load_balance_info, 0,
3387 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3388 
3389 			if (mrsas_get_ctrl_info(sc)) {
3390 				mrsas_kill_hba(sc);
3391 				retval = FAIL;
3392 				goto out;
3393 			}
3394 			if (!mrsas_get_map_info(sc))
3395 				mrsas_sync_map_info(sc);
3396 
3397 			megasas_setup_jbod_map(sc);
3398 
3399 			if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3400 				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3401 					memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3402 					sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3403 				}
3404 			}
3405 
3406 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3407 			mrsas_enable_intr(sc);
3408 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3409 
3410 			/* Register AEN with FW for last sequence number */
3411 			class_locale.members.reserved = 0;
3412 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3413 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3414 
3415 			mtx_unlock(&sc->sim_lock);
3416 			if (mrsas_register_aen(sc, sc->last_seq_num,
3417 			    class_locale.word)) {
3418 				device_printf(sc->mrsas_dev,
3419 				    "ERROR: AEN registration FAILED from OCR !!! "
3420 				    "Further events from the controller cannot be notified."
3421 				    "Either there is some problem in the controller"
3422 				    "or the controller does not support AEN.\n"
3423 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3424 			}
3425 			mtx_lock(&sc->sim_lock);
3426 
3427 			/* Adapter reset completed successfully */
3428 			device_printf(sc->mrsas_dev, "Reset successful\n");
3429 			retval = SUCCESS;
3430 			goto out;
3431 		}
3432 		/* Reset failed, kill the adapter */
3433 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3434 		mrsas_kill_hba(sc);
3435 		retval = FAIL;
3436 	} else {
3437 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3438 		mrsas_enable_intr(sc);
3439 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3440 	}
3441 out:
3442 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3443 	mrsas_dprint(sc, MRSAS_OCR,
3444 	    "Reset Exit with %d.\n", retval);
3445 	return retval;
3446 }
3447 
3448 /*
3449  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3450  * input:			Adapter Context.
3451  *
3452  * This function will kill HBA when OCR is not supported.
3453  */
3454 void
3455 mrsas_kill_hba(struct mrsas_softc *sc)
3456 {
3457 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3458 	DELAY(1000 * 1000);
3459 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3460 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3461 	    MFI_STOP_ADP);
3462 	/* Flush */
3463 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3464 	mrsas_complete_outstanding_ioctls(sc);
3465 }
3466 
3467 /**
3468  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3469  * input:			Controller softc
3470  *
3471  * Returns void
3472  */
3473 void
3474 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3475 {
3476 	int i;
3477 	struct mrsas_mpt_cmd *cmd_mpt;
3478 	struct mrsas_mfi_cmd *cmd_mfi;
3479 	u_int32_t count, MSIxIndex;
3480 
3481 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3482 	for (i = 0; i < sc->max_fw_cmds; i++) {
3483 		cmd_mpt = sc->mpt_cmd_list[i];
3484 
3485 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3486 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3487 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3488 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3489 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3490 					    cmd_mpt->io_request->RaidContext.raid_context.status);
3491 			}
3492 		}
3493 	}
3494 }
3495 
3496 /*
3497  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3498  * input:						Adapter Context.
3499  *
3500  * This function will wait for 180 seconds for outstanding commands to be
3501  * completed.
3502  */
3503 int
3504 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3505 {
3506 	int i, outstanding, retval = 0;
3507 	u_int32_t fw_state, count, MSIxIndex;
3508 
3509 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3510 		if (sc->remove_in_progress) {
3511 			mrsas_dprint(sc, MRSAS_OCR,
3512 			    "Driver remove or shutdown called.\n");
3513 			retval = 1;
3514 			goto out;
3515 		}
3516 		/* Check if firmware is in fault state */
3517 		fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3518 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3519 		if (fw_state == MFI_STATE_FAULT) {
3520 			mrsas_dprint(sc, MRSAS_OCR,
3521 			    "Found FW in FAULT state, will reset adapter.\n");
3522 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3523 			mtx_unlock(&sc->sim_lock);
3524 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3525 				mrsas_complete_cmd(sc, MSIxIndex);
3526 			mtx_lock(&sc->sim_lock);
3527 			retval = 1;
3528 			goto out;
3529 		}
3530 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3531 			mrsas_dprint(sc, MRSAS_OCR,
3532 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3533 			retval = 1;
3534 			goto out;
3535 		}
3536 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3537 		if (!outstanding)
3538 			goto out;
3539 
3540 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3541 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3542 			    "commands to complete\n", i, outstanding);
3543 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3544 			mtx_unlock(&sc->sim_lock);
3545 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3546 				mrsas_complete_cmd(sc, MSIxIndex);
3547 			mtx_lock(&sc->sim_lock);
3548 		}
3549 		DELAY(1000 * 1000);
3550 	}
3551 
3552 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3553 		mrsas_dprint(sc, MRSAS_OCR,
3554 		    " pending commands remain after waiting,"
3555 		    " will reset adapter.\n");
3556 		retval = 1;
3557 	}
3558 out:
3559 	return retval;
3560 }
3561 
3562 /*
3563  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3564  * input:					Command packet for return to free cmd pool
3565  *
3566  * This function returns the MFI & MPT command to the command list.
3567  */
3568 void
3569 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3570 {
3571 	struct mrsas_softc *sc = cmd_mfi->sc;
3572 	struct mrsas_mpt_cmd *cmd_mpt;
3573 
3574 	mtx_lock(&sc->mfi_cmd_pool_lock);
3575 	/*
3576 	 * Release the mpt command (if at all it is allocated
3577 	 * associated with the mfi command
3578 	 */
3579 	if (cmd_mfi->cmd_id.context.smid) {
3580 		mtx_lock(&sc->mpt_cmd_pool_lock);
3581 		/* Get the mpt cmd from mfi cmd frame's smid value */
3582 		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3583 		cmd_mpt->flags = 0;
3584 		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3585 		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3586 		mtx_unlock(&sc->mpt_cmd_pool_lock);
3587 	}
3588 	/* Release the mfi command */
3589 	cmd_mfi->ccb_ptr = NULL;
3590 	cmd_mfi->cmd_id.frame_count = 0;
3591 	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3592 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3593 
3594 	return;
3595 }
3596 
3597 /*
3598  * mrsas_get_controller_info:	Returns FW's controller structure
3599  * input:						Adapter soft state
3600  * 								Controller information structure
3601  *
3602  * Issues an internal command (DCMD) to get the FW's controller structure. This
3603  * information is mainly used to find out the maximum IO transfer per command
3604  * supported by the FW.
3605  */
3606 static int
3607 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3608 {
3609 	int retcode = 0;
3610 	u_int8_t do_ocr = 1;
3611 	struct mrsas_mfi_cmd *cmd;
3612 	struct mrsas_dcmd_frame *dcmd;
3613 
3614 	cmd = mrsas_get_mfi_cmd(sc);
3615 
3616 	if (!cmd) {
3617 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3618 		return -ENOMEM;
3619 	}
3620 	dcmd = &cmd->frame->dcmd;
3621 
3622 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3623 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3624 		mrsas_release_mfi_cmd(cmd);
3625 		return -ENOMEM;
3626 	}
3627 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3628 
3629 	dcmd->cmd = MFI_CMD_DCMD;
3630 	dcmd->cmd_status = 0xFF;
3631 	dcmd->sge_count = 1;
3632 	dcmd->flags = MFI_FRAME_DIR_READ;
3633 	dcmd->timeout = 0;
3634 	dcmd->pad_0 = 0;
3635 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
3636 	dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
3637 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
3638 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
3639 
3640 	if (!sc->mask_interrupts)
3641 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3642 	else
3643 		retcode = mrsas_issue_polled(sc, cmd);
3644 
3645 	if (retcode == ETIMEDOUT)
3646 		goto dcmd_timeout;
3647 	else {
3648 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3649 		le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
3650 		le32_to_cpus(&sc->ctrl_info->adapterOperations2);
3651 		le32_to_cpus(&sc->ctrl_info->adapterOperations3);
3652 		le16_to_cpus(&sc->ctrl_info->adapterOperations4);
3653 	}
3654 
3655 	do_ocr = 0;
3656 	mrsas_update_ext_vd_details(sc);
3657 
3658 	sc->use_seqnum_jbod_fp =
3659 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3660 	sc->support_morethan256jbod =
3661 		sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3662 
3663 	sc->disableOnlineCtrlReset =
3664 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3665 
3666 dcmd_timeout:
3667 	mrsas_free_ctlr_info_cmd(sc);
3668 
3669 	if (do_ocr)
3670 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3671 
3672 	if (!sc->mask_interrupts)
3673 		mrsas_release_mfi_cmd(cmd);
3674 
3675 	return (retcode);
3676 }
3677 
3678 /*
3679  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3680  * input:
3681  *	sc - Controller's softc
3682 */
3683 static void
3684 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3685 {
3686 	u_int32_t ventura_map_sz = 0;
3687 	sc->max256vdSupport =
3688 		sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3689 
3690 	/* Below is additional check to address future FW enhancement */
3691 	if (sc->ctrl_info->max_lds > 64)
3692 		sc->max256vdSupport = 1;
3693 
3694 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3695 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3696 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3697 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3698 	if (sc->max256vdSupport) {
3699 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3700 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3701 	} else {
3702 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3703 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3704 	}
3705 
3706 	if (sc->maxRaidMapSize) {
3707 		ventura_map_sz = sc->maxRaidMapSize *
3708 		    MR_MIN_MAP_SIZE;
3709 		sc->current_map_sz = ventura_map_sz;
3710 		sc->max_map_sz = ventura_map_sz;
3711 	} else {
3712 		sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3713 		    (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3714 		sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3715 		sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3716 		if (sc->max256vdSupport)
3717 			sc->current_map_sz = sc->new_map_sz;
3718 		else
3719 			sc->current_map_sz = sc->old_map_sz;
3720 	}
3721 
3722 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3723 #if VD_EXT_DEBUG
3724 	device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3725 	    sc->maxRaidMapSize);
3726 	device_printf(sc->mrsas_dev,
3727 	    "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3728 	    "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3729 	    "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3730 	    sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3731 	    sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3732 #endif
3733 }
3734 
3735 /*
3736  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3737  * input:						Adapter soft state
3738  *
3739  * Allocates DMAable memory for the controller info internal command.
3740  */
3741 int
3742 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3743 {
3744 	int ctlr_info_size;
3745 
3746 	/* Allocate get controller info command */
3747 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3748 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3749 	    1, 0,
3750 	    BUS_SPACE_MAXADDR_32BIT,
3751 	    BUS_SPACE_MAXADDR,
3752 	    NULL, NULL,
3753 	    ctlr_info_size,
3754 	    1,
3755 	    ctlr_info_size,
3756 	    BUS_DMA_ALLOCNOW,
3757 	    NULL, NULL,
3758 	    &sc->ctlr_info_tag)) {
3759 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3760 		return (ENOMEM);
3761 	}
3762 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3763 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3764 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3765 		return (ENOMEM);
3766 	}
3767 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3768 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3769 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3770 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3771 		return (ENOMEM);
3772 	}
3773 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3774 	return (0);
3775 }
3776 
3777 /*
3778  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3779  * input:						Adapter soft state
3780  *
3781  * Deallocates memory of the get controller info cmd.
3782  */
3783 void
3784 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3785 {
3786 	if (sc->ctlr_info_phys_addr)
3787 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3788 	if (sc->ctlr_info_mem != NULL)
3789 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3790 	if (sc->ctlr_info_tag != NULL)
3791 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3792 }
3793 
3794 /*
3795  * mrsas_issue_polled:	Issues a polling command
3796  * inputs:				Adapter soft state
3797  * 						Command packet to be issued
3798  *
3799  * This function is for posting of internal commands to Firmware.  MFI requires
3800  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3801  * the poll response timer is 180 seconds.
3802  */
3803 int
3804 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3805 {
3806 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3807 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3808 	int i, retcode = SUCCESS;
3809 
3810 	frame_hdr->cmd_status = 0xFF;
3811 	frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
3812 
3813 	/* Issue the frame using inbound queue port */
3814 	if (mrsas_issue_dcmd(sc, cmd)) {
3815 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3816 		return (1);
3817 	}
3818 	/*
3819 	 * Poll response timer to wait for Firmware response.  While this
3820 	 * timer with the DELAY call could block CPU, the time interval for
3821 	 * this is only 1 millisecond.
3822 	 */
3823 	if (frame_hdr->cmd_status == 0xFF) {
3824 		for (i = 0; i < (max_wait * 1000); i++) {
3825 			if (frame_hdr->cmd_status == 0xFF)
3826 				DELAY(1000);
3827 			else
3828 				break;
3829 		}
3830 	}
3831 	if (frame_hdr->cmd_status == 0xFF) {
3832 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3833 		    "seconds from %s\n", max_wait, __func__);
3834 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3835 		    cmd->frame->dcmd.opcode);
3836 		retcode = ETIMEDOUT;
3837 	}
3838 	return (retcode);
3839 }
3840 
3841 /*
3842  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3843  * input:				Adapter soft state mfi cmd pointer
3844  *
3845  * This function is called by mrsas_issued_blocked_cmd() and
3846  * mrsas_issued_polled(), to build the MPT command and then fire the command
3847  * to Firmware.
3848  */
3849 int
3850 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3851 {
3852 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3853 
3854 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3855 	if (!req_desc) {
3856 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3857 		return (1);
3858 	}
3859 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3860 
3861 	return (0);
3862 }
3863 
3864 /*
3865  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3866  * input:				Adapter soft state mfi cmd to build
3867  *
3868  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3869  * command and prepares the MPT command to send to Firmware.
3870  */
3871 MRSAS_REQUEST_DESCRIPTOR_UNION *
3872 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3873 {
3874 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3875 	u_int16_t index;
3876 
3877 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3878 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3879 		return NULL;
3880 	}
3881 	index = cmd->cmd_id.context.smid;
3882 
3883 	req_desc = mrsas_get_request_desc(sc, index - 1);
3884 	if (!req_desc)
3885 		return NULL;
3886 
3887 	req_desc->addr.Words = 0;
3888 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3889 
3890 	req_desc->SCSIIO.SMID = htole16(index);
3891 
3892 	return (req_desc);
3893 }
3894 
3895 /*
3896  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3897  * input:						Adapter soft state mfi cmd pointer
3898  *
3899  * The MPT command and the io_request are setup as a passthru command. The SGE
3900  * chain address is set to frame_phys_addr of the MFI command.
3901  */
3902 u_int8_t
3903 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3904 {
3905 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3906 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3907 	struct mrsas_mpt_cmd *mpt_cmd;
3908 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3909 
3910 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3911 	if (!mpt_cmd)
3912 		return (1);
3913 
3914 	/* Save the smid. To be used for returning the cmd */
3915 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3916 
3917 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3918 
3919 	/*
3920 	 * For cmds where the flag is set, store the flag and check on
3921 	 * completion. For cmds with this flag, don't call
3922 	 * mrsas_complete_cmd.
3923 	 */
3924 
3925 	if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3926 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3927 
3928 	io_req = mpt_cmd->io_request;
3929 
3930 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3931 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3932 
3933 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3934 		sgl_ptr_end->Flags = 0;
3935 	}
3936 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3937 
3938 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3939 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3940 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3941 
3942 	mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
3943 
3944 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3945 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3946 
3947 	mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
3948 
3949 	return (0);
3950 }
3951 
3952 /*
3953  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3954  * input:					Adapter soft state Command to be issued
3955  *
3956  * This function waits on an event for the command to be returned from the ISR.
3957  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3958  * internal and ioctl commands.
3959  */
3960 int
3961 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3962 {
3963 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3964 	unsigned long total_time = 0;
3965 	int retcode = SUCCESS;
3966 
3967 	/* Initialize cmd_status */
3968 	cmd->cmd_status = 0xFF;
3969 
3970 	/* Build MPT-MFI command for issue to FW */
3971 	if (mrsas_issue_dcmd(sc, cmd)) {
3972 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3973 		return (1);
3974 	}
3975 	sc->chan = (void *)&cmd;
3976 
3977 	while (1) {
3978 		if (cmd->cmd_status == 0xFF) {
3979 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3980 		} else
3981 			break;
3982 
3983 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3984 					 * command */
3985 			total_time++;
3986 			if (total_time >= max_wait) {
3987 				device_printf(sc->mrsas_dev,
3988 				    "Internal command timed out after %d seconds.\n", max_wait);
3989 				retcode = 1;
3990 				break;
3991 			}
3992 		}
3993 	}
3994 	sc->chan = NULL;
3995 
3996 	if (cmd->cmd_status == 0xFF) {
3997 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3998 		    "seconds from %s\n", max_wait, __func__);
3999 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
4000 		    cmd->frame->dcmd.opcode);
4001 		retcode = ETIMEDOUT;
4002 	}
4003 	return (retcode);
4004 }
4005 
4006 /*
4007  * mrsas_complete_mptmfi_passthru:	Completes a command
4008  * input:	@sc:					Adapter soft state
4009  * 			@cmd:					Command to be completed
4010  * 			@status:				cmd completion status
4011  *
4012  * This function is called from mrsas_complete_cmd() after an interrupt is
4013  * received from Firmware, and io_request->Function is
4014  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4015  */
4016 void
4017 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4018     u_int8_t status)
4019 {
4020 	struct mrsas_header *hdr = &cmd->frame->hdr;
4021 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4022 
4023 	/* Reset the retry counter for future re-tries */
4024 	cmd->retry_for_fw_reset = 0;
4025 
4026 	if (cmd->ccb_ptr)
4027 		cmd->ccb_ptr = NULL;
4028 
4029 	switch (hdr->cmd) {
4030 	case MFI_CMD_INVALID:
4031 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4032 		break;
4033 	case MFI_CMD_PD_SCSI_IO:
4034 	case MFI_CMD_LD_SCSI_IO:
4035 		/*
4036 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4037 		 * issued either through an IO path or an IOCTL path. If it
4038 		 * was via IOCTL, we will send it to internal completion.
4039 		 */
4040 		if (cmd->sync_cmd) {
4041 			cmd->sync_cmd = 0;
4042 			mrsas_wakeup(sc, cmd);
4043 			break;
4044 		}
4045 	case MFI_CMD_SMP:
4046 	case MFI_CMD_STP:
4047 	case MFI_CMD_DCMD:
4048 		/* Check for LD map update */
4049 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4050 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
4051 			sc->fast_path_io = 0;
4052 			mtx_lock(&sc->raidmap_lock);
4053 			sc->map_update_cmd = NULL;
4054 			if (cmd_status != 0) {
4055 				if (cmd_status != MFI_STAT_NOT_FOUND)
4056 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4057 				else {
4058 					mrsas_release_mfi_cmd(cmd);
4059 					mtx_unlock(&sc->raidmap_lock);
4060 					break;
4061 				}
4062 			} else
4063 				sc->map_id++;
4064 			mrsas_release_mfi_cmd(cmd);
4065 			if (MR_ValidateMapInfo(sc))
4066 				sc->fast_path_io = 0;
4067 			else
4068 				sc->fast_path_io = 1;
4069 			mrsas_sync_map_info(sc);
4070 			mtx_unlock(&sc->raidmap_lock);
4071 			break;
4072 		}
4073 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4074 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4075 			sc->mrsas_aen_triggered = 0;
4076 		}
4077 		/* FW has an updated PD sequence */
4078 		if ((cmd->frame->dcmd.opcode ==
4079 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4080 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
4081 			mtx_lock(&sc->raidmap_lock);
4082 			sc->jbod_seq_cmd = NULL;
4083 			mrsas_release_mfi_cmd(cmd);
4084 
4085 			if (cmd_status == MFI_STAT_OK) {
4086 				sc->pd_seq_map_id++;
4087 				/* Re-register a pd sync seq num cmd */
4088 				if (megasas_sync_pd_seq_num(sc, true))
4089 					sc->use_seqnum_jbod_fp = 0;
4090 			} else {
4091 				sc->use_seqnum_jbod_fp = 0;
4092 				device_printf(sc->mrsas_dev,
4093 				    "Jbod map sync failed, status=%x\n", cmd_status);
4094 			}
4095 			mtx_unlock(&sc->raidmap_lock);
4096 			break;
4097 		}
4098 		/* See if got an event notification */
4099 		if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
4100 			mrsas_complete_aen(sc, cmd);
4101 		else
4102 			mrsas_wakeup(sc, cmd);
4103 		break;
4104 	case MFI_CMD_ABORT:
4105 		/* Command issued to abort another cmd return */
4106 		mrsas_complete_abort(sc, cmd);
4107 		break;
4108 	default:
4109 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4110 		break;
4111 	}
4112 }
4113 
4114 /*
4115  * mrsas_wakeup:	Completes an internal command
4116  * input:			Adapter soft state
4117  * 					Command to be completed
4118  *
4119  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4120  * timer is started.  This function is called from
4121  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4122  * from the command wait.
4123  */
4124 void
4125 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4126 {
4127 	cmd->cmd_status = cmd->frame->io.cmd_status;
4128 
4129 	if (cmd->cmd_status == 0xFF)
4130 		cmd->cmd_status = 0;
4131 
4132 	sc->chan = (void *)&cmd;
4133 	wakeup_one((void *)&sc->chan);
4134 	return;
4135 }
4136 
4137 /*
4138  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
4139  * Adapter soft state Shutdown/Hibernate
4140  *
4141  * This function issues a DCMD internal command to Firmware to initiate shutdown
4142  * of the controller.
4143  */
4144 static void
4145 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4146 {
4147 	struct mrsas_mfi_cmd *cmd;
4148 	struct mrsas_dcmd_frame *dcmd;
4149 
4150 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4151 		return;
4152 
4153 	cmd = mrsas_get_mfi_cmd(sc);
4154 	if (!cmd) {
4155 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4156 		return;
4157 	}
4158 	if (sc->aen_cmd)
4159 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4160 	if (sc->map_update_cmd)
4161 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4162 	if (sc->jbod_seq_cmd)
4163 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4164 
4165 	dcmd = &cmd->frame->dcmd;
4166 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4167 
4168 	dcmd->cmd = MFI_CMD_DCMD;
4169 	dcmd->cmd_status = 0x0;
4170 	dcmd->sge_count = 0;
4171 	dcmd->flags = MFI_FRAME_DIR_NONE;
4172 	dcmd->timeout = 0;
4173 	dcmd->pad_0 = 0;
4174 	dcmd->data_xfer_len = 0;
4175 	dcmd->opcode = opcode;
4176 
4177 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4178 
4179 	mrsas_issue_blocked_cmd(sc, cmd);
4180 	mrsas_release_mfi_cmd(cmd);
4181 
4182 	return;
4183 }
4184 
4185 /*
4186  * mrsas_flush_cache:         Requests FW to flush all its caches input:
4187  * Adapter soft state
4188  *
4189  * This function is issues a DCMD internal command to Firmware to initiate
4190  * flushing of all caches.
4191  */
4192 static void
4193 mrsas_flush_cache(struct mrsas_softc *sc)
4194 {
4195 	struct mrsas_mfi_cmd *cmd;
4196 	struct mrsas_dcmd_frame *dcmd;
4197 
4198 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4199 		return;
4200 
4201 	cmd = mrsas_get_mfi_cmd(sc);
4202 	if (!cmd) {
4203 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4204 		return;
4205 	}
4206 	dcmd = &cmd->frame->dcmd;
4207 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4208 
4209 	dcmd->cmd = MFI_CMD_DCMD;
4210 	dcmd->cmd_status = 0x0;
4211 	dcmd->sge_count = 0;
4212 	dcmd->flags = MFI_FRAME_DIR_NONE;
4213 	dcmd->timeout = 0;
4214 	dcmd->pad_0 = 0;
4215 	dcmd->data_xfer_len = 0;
4216 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4217 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4218 
4219 	mrsas_issue_blocked_cmd(sc, cmd);
4220 	mrsas_release_mfi_cmd(cmd);
4221 
4222 	return;
4223 }
4224 
4225 int
4226 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4227 {
4228 	int retcode = 0;
4229 	u_int8_t do_ocr = 1;
4230 	struct mrsas_mfi_cmd *cmd;
4231 	struct mrsas_dcmd_frame *dcmd;
4232 	uint32_t pd_seq_map_sz;
4233 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4234 	bus_addr_t pd_seq_h;
4235 
4236 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4237 	    (sizeof(struct MR_PD_CFG_SEQ) *
4238 	    (MAX_PHYSICAL_DEVICES - 1));
4239 
4240 	cmd = mrsas_get_mfi_cmd(sc);
4241 	if (!cmd) {
4242 		device_printf(sc->mrsas_dev,
4243 		    "Cannot alloc for ld map info cmd.\n");
4244 		return 1;
4245 	}
4246 	dcmd = &cmd->frame->dcmd;
4247 
4248 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4249 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4250 	if (!pd_sync) {
4251 		device_printf(sc->mrsas_dev,
4252 		    "Failed to alloc mem for jbod map info.\n");
4253 		mrsas_release_mfi_cmd(cmd);
4254 		return (ENOMEM);
4255 	}
4256 	memset(pd_sync, 0, pd_seq_map_sz);
4257 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4258 	dcmd->cmd = MFI_CMD_DCMD;
4259 	dcmd->cmd_status = 0xFF;
4260 	dcmd->sge_count = 1;
4261 	dcmd->timeout = 0;
4262 	dcmd->pad_0 = 0;
4263 	dcmd->data_xfer_len = htole32(pd_seq_map_sz);
4264 	dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4265 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
4266 	dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
4267 
4268 	if (pend) {
4269 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4270 		dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4271 		sc->jbod_seq_cmd = cmd;
4272 		if (mrsas_issue_dcmd(sc, cmd)) {
4273 			device_printf(sc->mrsas_dev,
4274 			    "Fail to send sync map info command.\n");
4275 			return 1;
4276 		} else
4277 			return 0;
4278 	} else
4279 		dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4280 
4281 	retcode = mrsas_issue_polled(sc, cmd);
4282 	if (retcode == ETIMEDOUT)
4283 		goto dcmd_timeout;
4284 
4285 	if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
4286 		device_printf(sc->mrsas_dev,
4287 		    "driver supports max %d JBOD, but FW reports %d\n",
4288 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
4289 		retcode = -EINVAL;
4290 	}
4291 	if (!retcode)
4292 		sc->pd_seq_map_id++;
4293 	do_ocr = 0;
4294 
4295 dcmd_timeout:
4296 	if (do_ocr)
4297 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4298 
4299 	return (retcode);
4300 }
4301 
4302 /*
4303  * mrsas_get_map_info:        Load and validate RAID map input:
4304  * Adapter instance soft state
4305  *
4306  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4307  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
4308  */
4309 static int
4310 mrsas_get_map_info(struct mrsas_softc *sc)
4311 {
4312 	uint8_t retcode = 0;
4313 
4314 	sc->fast_path_io = 0;
4315 	if (!mrsas_get_ld_map_info(sc)) {
4316 		retcode = MR_ValidateMapInfo(sc);
4317 		if (retcode == 0) {
4318 			sc->fast_path_io = 1;
4319 			return 0;
4320 		}
4321 	}
4322 	return 1;
4323 }
4324 
4325 /*
4326  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
4327  * Adapter instance soft state
4328  *
4329  * Issues an internal command (DCMD) to get the FW's controller PD list
4330  * structure.
4331  */
4332 static int
4333 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4334 {
4335 	int retcode = 0;
4336 	struct mrsas_mfi_cmd *cmd;
4337 	struct mrsas_dcmd_frame *dcmd;
4338 	void *map;
4339 	bus_addr_t map_phys_addr = 0;
4340 
4341 	cmd = mrsas_get_mfi_cmd(sc);
4342 	if (!cmd) {
4343 		device_printf(sc->mrsas_dev,
4344 		    "Cannot alloc for ld map info cmd.\n");
4345 		return 1;
4346 	}
4347 	dcmd = &cmd->frame->dcmd;
4348 
4349 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4350 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4351 	if (!map) {
4352 		device_printf(sc->mrsas_dev,
4353 		    "Failed to alloc mem for ld map info.\n");
4354 		mrsas_release_mfi_cmd(cmd);
4355 		return (ENOMEM);
4356 	}
4357 	memset(map, 0, sizeof(sc->max_map_sz));
4358 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4359 
4360 	dcmd->cmd = MFI_CMD_DCMD;
4361 	dcmd->cmd_status = 0xFF;
4362 	dcmd->sge_count = 1;
4363 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4364 	dcmd->timeout = 0;
4365 	dcmd->pad_0 = 0;
4366 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4367 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4368 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4369 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4370 
4371 	retcode = mrsas_issue_polled(sc, cmd);
4372 	if (retcode == ETIMEDOUT)
4373 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4374 
4375 	return (retcode);
4376 }
4377 
4378 /*
4379  * mrsas_sync_map_info:        Get FW's ld_map structure input:
4380  * Adapter instance soft state
4381  *
4382  * Issues an internal command (DCMD) to get the FW's controller PD list
4383  * structure.
4384  */
4385 static int
4386 mrsas_sync_map_info(struct mrsas_softc *sc)
4387 {
4388 	int retcode = 0, i;
4389 	struct mrsas_mfi_cmd *cmd;
4390 	struct mrsas_dcmd_frame *dcmd;
4391 	uint32_t num_lds;
4392 	MR_LD_TARGET_SYNC *target_map = NULL;
4393 	MR_DRV_RAID_MAP_ALL *map;
4394 	MR_LD_RAID *raid;
4395 	MR_LD_TARGET_SYNC *ld_sync;
4396 	bus_addr_t map_phys_addr = 0;
4397 
4398 	cmd = mrsas_get_mfi_cmd(sc);
4399 	if (!cmd) {
4400 		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4401 		return ENOMEM;
4402 	}
4403 	map = sc->ld_drv_map[sc->map_id & 1];
4404 	num_lds = map->raidMap.ldCount;
4405 
4406 	dcmd = &cmd->frame->dcmd;
4407 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4408 
4409 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4410 	memset(target_map, 0, sc->max_map_sz);
4411 
4412 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4413 
4414 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4415 
4416 	for (i = 0; i < num_lds; i++, ld_sync++) {
4417 		raid = MR_LdRaidGet(i, map);
4418 		ld_sync->targetId = MR_GetLDTgtId(i, map);
4419 		ld_sync->seqNum = raid->seqNum;
4420 	}
4421 
4422 	dcmd->cmd = MFI_CMD_DCMD;
4423 	dcmd->cmd_status = 0xFF;
4424 	dcmd->sge_count = 1;
4425 	dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4426 	dcmd->timeout = 0;
4427 	dcmd->pad_0 = 0;
4428 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4429 	dcmd->mbox.b[0] = num_lds;
4430 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4431 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4432 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4433 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4434 
4435 	sc->map_update_cmd = cmd;
4436 	if (mrsas_issue_dcmd(sc, cmd)) {
4437 		device_printf(sc->mrsas_dev,
4438 		    "Fail to send sync map info command.\n");
4439 		return (1);
4440 	}
4441 	return (retcode);
4442 }
4443 
4444 /* Input:	dcmd.opcode		- MR_DCMD_PD_GET_INFO
4445   *		dcmd.mbox.s[0]		- deviceId for this physical drive
4446   *		dcmd.sge IN		- ptr to returned MR_PD_INFO structure
4447   * Desc:	Firmware return the physical drive info structure
4448   *
4449   */
4450 static void
4451 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4452 {
4453 	int retcode;
4454 	u_int8_t do_ocr = 1;
4455 	struct mrsas_mfi_cmd *cmd;
4456 	struct mrsas_dcmd_frame *dcmd;
4457 
4458 	cmd = mrsas_get_mfi_cmd(sc);
4459 
4460 	if (!cmd) {
4461 		device_printf(sc->mrsas_dev,
4462 		    "Cannot alloc for get PD info cmd\n");
4463 		return;
4464 	}
4465 	dcmd = &cmd->frame->dcmd;
4466 
4467 	memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4468 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4469 
4470 	dcmd->mbox.s[0] = htole16(device_id);
4471 	dcmd->cmd = MFI_CMD_DCMD;
4472 	dcmd->cmd_status = 0xFF;
4473 	dcmd->sge_count = 1;
4474 	dcmd->flags = MFI_FRAME_DIR_READ;
4475 	dcmd->timeout = 0;
4476 	dcmd->pad_0 = 0;
4477 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
4478 	dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
4479 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
4480 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
4481 
4482 	if (!sc->mask_interrupts)
4483 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4484 	else
4485 		retcode = mrsas_issue_polled(sc, cmd);
4486 
4487 	if (retcode == ETIMEDOUT)
4488 		goto dcmd_timeout;
4489 
4490 	sc->target_list[device_id].interface_type =
4491 		le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
4492 
4493 	do_ocr = 0;
4494 
4495 dcmd_timeout:
4496 
4497 	if (do_ocr)
4498 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4499 
4500 	if (!sc->mask_interrupts)
4501 		mrsas_release_mfi_cmd(cmd);
4502 }
4503 
4504 /*
4505  * mrsas_add_target:				Add target ID of system PD/VD to driver's data structure.
4506  * sc:						Adapter's soft state
4507  * target_id:					Unique target id per controller(managed by driver)
4508  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4509  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4510  * return:					void
4511  * Descripton:					This function will be called whenever system PD or VD is created.
4512  */
4513 static void mrsas_add_target(struct mrsas_softc *sc,
4514 	u_int16_t target_id)
4515 {
4516 	sc->target_list[target_id].target_id = target_id;
4517 
4518 	device_printf(sc->mrsas_dev,
4519 		"%s created target ID: 0x%x\n",
4520 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4521 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4522 	/*
4523 	 * If interrupts are enabled, then only fire DCMD to get pd_info
4524 	 * for system PDs
4525 	 */
4526 	if (!sc->mask_interrupts && sc->pd_info_mem &&
4527 		(target_id < MRSAS_MAX_PD))
4528 		mrsas_get_pd_info(sc, target_id);
4529 
4530 }
4531 
4532 /*
4533  * mrsas_remove_target:			Remove target ID of system PD/VD from driver's data structure.
4534  * sc:						Adapter's soft state
4535  * target_id:					Unique target id per controller(managed by driver)
4536  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4537  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4538  * return:					void
4539  * Descripton:					This function will be called whenever system PD or VD is deleted
4540  */
4541 static void mrsas_remove_target(struct mrsas_softc *sc,
4542 	u_int16_t target_id)
4543 {
4544 	sc->target_list[target_id].target_id = 0xffff;
4545 	device_printf(sc->mrsas_dev,
4546 		"%s deleted target ID: 0x%x\n",
4547 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4548 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4549 }
4550 
4551 /*
4552  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4553  * Adapter soft state
4554  *
4555  * Issues an internal command (DCMD) to get the FW's controller PD list
4556  * structure.  This information is mainly used to find out about system
4557  * supported by Firmware.
4558  */
4559 static int
4560 mrsas_get_pd_list(struct mrsas_softc *sc)
4561 {
4562 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4563 	u_int8_t do_ocr = 1;
4564 	struct mrsas_mfi_cmd *cmd;
4565 	struct mrsas_dcmd_frame *dcmd;
4566 	struct MR_PD_LIST *pd_list_mem;
4567 	struct MR_PD_ADDRESS *pd_addr;
4568 	bus_addr_t pd_list_phys_addr = 0;
4569 	struct mrsas_tmp_dcmd *tcmd;
4570 	u_int16_t dev_id;
4571 
4572 	cmd = mrsas_get_mfi_cmd(sc);
4573 	if (!cmd) {
4574 		device_printf(sc->mrsas_dev,
4575 		    "Cannot alloc for get PD list cmd\n");
4576 		return 1;
4577 	}
4578 	dcmd = &cmd->frame->dcmd;
4579 
4580 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4581 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4582 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4583 		device_printf(sc->mrsas_dev,
4584 		    "Cannot alloc dmamap for get PD list cmd\n");
4585 		mrsas_release_mfi_cmd(cmd);
4586 		mrsas_free_tmp_dcmd(tcmd);
4587 		free(tcmd, M_MRSAS);
4588 		return (ENOMEM);
4589 	} else {
4590 		pd_list_mem = tcmd->tmp_dcmd_mem;
4591 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4592 	}
4593 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4594 
4595 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4596 	dcmd->mbox.b[1] = 0;
4597 	dcmd->cmd = MFI_CMD_DCMD;
4598 	dcmd->cmd_status = 0xFF;
4599 	dcmd->sge_count = 1;
4600 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4601 	dcmd->timeout = 0;
4602 	dcmd->pad_0 = 0;
4603 	dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4604 	dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
4605 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
4606 	dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4607 
4608 	if (!sc->mask_interrupts)
4609 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4610 	else
4611 		retcode = mrsas_issue_polled(sc, cmd);
4612 
4613 	if (retcode == ETIMEDOUT)
4614 		goto dcmd_timeout;
4615 
4616 	/* Get the instance PD list */
4617 	pd_count = MRSAS_MAX_PD;
4618 	pd_addr = pd_list_mem->addr;
4619 	if (le32toh(pd_list_mem->count) < pd_count) {
4620 		memset(sc->local_pd_list, 0,
4621 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4622 		for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
4623 			dev_id = le16toh(pd_addr->deviceId);
4624 			sc->local_pd_list[dev_id].tid = dev_id;
4625 			sc->local_pd_list[dev_id].driveType =
4626 			    le16toh(pd_addr->scsiDevType);
4627 			sc->local_pd_list[dev_id].driveState =
4628 			    MR_PD_STATE_SYSTEM;
4629 			if (sc->target_list[dev_id].target_id == 0xffff)
4630 				mrsas_add_target(sc, dev_id);
4631 			pd_addr++;
4632 		}
4633 		for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4634 			if ((sc->local_pd_list[pd_index].driveState !=
4635 				MR_PD_STATE_SYSTEM) &&
4636 				(sc->target_list[pd_index].target_id !=
4637 				0xffff)) {
4638 				mrsas_remove_target(sc, pd_index);
4639 			}
4640 		}
4641 		/*
4642 		 * Use mutext/spinlock if pd_list component size increase more than
4643 		 * 32 bit.
4644 		 */
4645 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4646 		do_ocr = 0;
4647 	}
4648 dcmd_timeout:
4649 	mrsas_free_tmp_dcmd(tcmd);
4650 	free(tcmd, M_MRSAS);
4651 
4652 	if (do_ocr)
4653 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4654 
4655 	if (!sc->mask_interrupts)
4656 		mrsas_release_mfi_cmd(cmd);
4657 
4658 	return (retcode);
4659 }
4660 
4661 /*
4662  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4663  * Adapter soft state
4664  *
4665  * Issues an internal command (DCMD) to get the FW's controller PD list
4666  * structure.  This information is mainly used to find out about supported by
4667  * the FW.
4668  */
4669 static int
4670 mrsas_get_ld_list(struct mrsas_softc *sc)
4671 {
4672 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4673 	u_int8_t do_ocr = 1;
4674 	struct mrsas_mfi_cmd *cmd;
4675 	struct mrsas_dcmd_frame *dcmd;
4676 	struct MR_LD_LIST *ld_list_mem;
4677 	bus_addr_t ld_list_phys_addr = 0;
4678 	struct mrsas_tmp_dcmd *tcmd;
4679 
4680 	cmd = mrsas_get_mfi_cmd(sc);
4681 	if (!cmd) {
4682 		device_printf(sc->mrsas_dev,
4683 		    "Cannot alloc for get LD list cmd\n");
4684 		return 1;
4685 	}
4686 	dcmd = &cmd->frame->dcmd;
4687 
4688 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4689 	ld_list_size = sizeof(struct MR_LD_LIST);
4690 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4691 		device_printf(sc->mrsas_dev,
4692 		    "Cannot alloc dmamap for get LD list cmd\n");
4693 		mrsas_release_mfi_cmd(cmd);
4694 		mrsas_free_tmp_dcmd(tcmd);
4695 		free(tcmd, M_MRSAS);
4696 		return (ENOMEM);
4697 	} else {
4698 		ld_list_mem = tcmd->tmp_dcmd_mem;
4699 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4700 	}
4701 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4702 
4703 	if (sc->max256vdSupport)
4704 		dcmd->mbox.b[0] = 1;
4705 
4706 	dcmd->cmd = MFI_CMD_DCMD;
4707 	dcmd->cmd_status = 0xFF;
4708 	dcmd->sge_count = 1;
4709 	dcmd->flags = MFI_FRAME_DIR_READ;
4710 	dcmd->timeout = 0;
4711 	dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
4712 	dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
4713 	dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
4714 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
4715 	dcmd->pad_0 = 0;
4716 
4717 	if (!sc->mask_interrupts)
4718 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4719 	else
4720 		retcode = mrsas_issue_polled(sc, cmd);
4721 
4722 	if (retcode == ETIMEDOUT)
4723 		goto dcmd_timeout;
4724 
4725 #if VD_EXT_DEBUG
4726 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4727 #endif
4728 
4729 	/* Get the instance LD list */
4730 	if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
4731 		sc->CurLdCount = le32toh(ld_list_mem->ldCount);
4732 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4733 		for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
4734 			ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4735 			drv_tgt_id = ids + MRSAS_MAX_PD;
4736 			if (ld_list_mem->ldList[ld_index].state != 0) {
4737 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4738 				if (sc->target_list[drv_tgt_id].target_id ==
4739 					0xffff)
4740 					mrsas_add_target(sc, drv_tgt_id);
4741 			} else {
4742 				if (sc->target_list[drv_tgt_id].target_id !=
4743 					0xffff)
4744 					mrsas_remove_target(sc,
4745 						drv_tgt_id);
4746 			}
4747 		}
4748 
4749 		do_ocr = 0;
4750 	}
4751 dcmd_timeout:
4752 	mrsas_free_tmp_dcmd(tcmd);
4753 	free(tcmd, M_MRSAS);
4754 
4755 	if (do_ocr)
4756 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4757 	if (!sc->mask_interrupts)
4758 		mrsas_release_mfi_cmd(cmd);
4759 
4760 	return (retcode);
4761 }
4762 
4763 /*
4764  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4765  * Adapter soft state Temp command Size of allocation
4766  *
4767  * Allocates DMAable memory for a temporary internal command. The allocated
4768  * memory is initialized to all zeros upon successful loading of the dma
4769  * mapped memory.
4770  */
4771 int
4772 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4773     struct mrsas_tmp_dcmd *tcmd, int size)
4774 {
4775 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4776 	    1, 0,
4777 	    BUS_SPACE_MAXADDR_32BIT,
4778 	    BUS_SPACE_MAXADDR,
4779 	    NULL, NULL,
4780 	    size,
4781 	    1,
4782 	    size,
4783 	    BUS_DMA_ALLOCNOW,
4784 	    NULL, NULL,
4785 	    &tcmd->tmp_dcmd_tag)) {
4786 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4787 		return (ENOMEM);
4788 	}
4789 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4790 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4791 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4792 		return (ENOMEM);
4793 	}
4794 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4795 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4796 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4797 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4798 		return (ENOMEM);
4799 	}
4800 	memset(tcmd->tmp_dcmd_mem, 0, size);
4801 	return (0);
4802 }
4803 
4804 /*
4805  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4806  * temporary dcmd pointer
4807  *
4808  * Deallocates memory of the temporary command for use in the construction of
4809  * the internal DCMD.
4810  */
4811 void
4812 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4813 {
4814 	if (tmp->tmp_dcmd_phys_addr)
4815 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4816 	if (tmp->tmp_dcmd_mem != NULL)
4817 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4818 	if (tmp->tmp_dcmd_tag != NULL)
4819 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4820 }
4821 
4822 /*
4823  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4824  * Adapter soft state Previously issued cmd to be aborted
4825  *
4826  * This function is used to abort previously issued commands, such as AEN and
4827  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4828  * command and subsequently the driver will wait for a return status.  The
4829  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4830  */
4831 static int
4832 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4833     struct mrsas_mfi_cmd *cmd_to_abort)
4834 {
4835 	struct mrsas_mfi_cmd *cmd;
4836 	struct mrsas_abort_frame *abort_fr;
4837 	u_int8_t retcode = 0;
4838 	unsigned long total_time = 0;
4839 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4840 
4841 	cmd = mrsas_get_mfi_cmd(sc);
4842 	if (!cmd) {
4843 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4844 		return (1);
4845 	}
4846 	abort_fr = &cmd->frame->abort;
4847 
4848 	/* Prepare and issue the abort frame */
4849 	abort_fr->cmd = MFI_CMD_ABORT;
4850 	abort_fr->cmd_status = 0xFF;
4851 	abort_fr->flags = 0;
4852 	abort_fr->abort_context = cmd_to_abort->index;
4853 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4854 	abort_fr->abort_mfi_phys_addr_hi = 0;
4855 
4856 	cmd->sync_cmd = 1;
4857 	cmd->cmd_status = 0xFF;
4858 
4859 	if (mrsas_issue_dcmd(sc, cmd)) {
4860 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4861 		return (1);
4862 	}
4863 	/* Wait for this cmd to complete */
4864 	sc->chan = (void *)&cmd;
4865 	while (1) {
4866 		if (cmd->cmd_status == 0xFF) {
4867 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4868 		} else
4869 			break;
4870 		total_time++;
4871 		if (total_time >= max_wait) {
4872 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4873 			retcode = 1;
4874 			break;
4875 		}
4876 	}
4877 
4878 	cmd->sync_cmd = 0;
4879 	mrsas_release_mfi_cmd(cmd);
4880 	return (retcode);
4881 }
4882 
4883 /*
4884  * mrsas_complete_abort:      Completes aborting a command input:
4885  * Adapter soft state Cmd that was issued to abort another cmd
4886  *
4887  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4888  * change after sending the command.  This function is called from
4889  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4890  */
4891 void
4892 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4893 {
4894 	if (cmd->sync_cmd) {
4895 		cmd->sync_cmd = 0;
4896 		cmd->cmd_status = 0;
4897 		sc->chan = (void *)&cmd;
4898 		wakeup_one((void *)&sc->chan);
4899 	}
4900 	return;
4901 }
4902 
4903 /*
4904  * mrsas_aen_handler:	AEN processing callback function from thread context
4905  * input:				Adapter soft state
4906  *
4907  * Asynchronous event handler
4908  */
4909 void
4910 mrsas_aen_handler(struct mrsas_softc *sc)
4911 {
4912 	union mrsas_evt_class_locale class_locale;
4913 	int doscan = 0;
4914 	u_int32_t seq_num;
4915  	int error, fail_aen = 0;
4916 
4917 	if (sc == NULL) {
4918 		printf("invalid instance!\n");
4919 		return;
4920 	}
4921 	if (sc->remove_in_progress || sc->reset_in_progress) {
4922 		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4923 			__func__, __LINE__);
4924 		return;
4925 	}
4926 	if (sc->evt_detail_mem) {
4927 		switch (sc->evt_detail_mem->code) {
4928 		case MR_EVT_PD_INSERTED:
4929 			fail_aen = mrsas_get_pd_list(sc);
4930 			if (!fail_aen)
4931 				mrsas_bus_scan_sim(sc, sc->sim_1);
4932 			else
4933 				goto skip_register_aen;
4934 			break;
4935 		case MR_EVT_PD_REMOVED:
4936 			fail_aen = mrsas_get_pd_list(sc);
4937 			if (!fail_aen)
4938 				mrsas_bus_scan_sim(sc, sc->sim_1);
4939 			else
4940 				goto skip_register_aen;
4941 			break;
4942 		case MR_EVT_LD_OFFLINE:
4943 		case MR_EVT_CFG_CLEARED:
4944 		case MR_EVT_LD_DELETED:
4945 			mrsas_bus_scan_sim(sc, sc->sim_0);
4946 			break;
4947 		case MR_EVT_LD_CREATED:
4948 			fail_aen = mrsas_get_ld_list(sc);
4949 			if (!fail_aen)
4950 				mrsas_bus_scan_sim(sc, sc->sim_0);
4951 			else
4952 				goto skip_register_aen;
4953 			break;
4954 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4955 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4956 		case MR_EVT_LD_STATE_CHANGE:
4957 			doscan = 1;
4958 			break;
4959 		case MR_EVT_CTRL_PROP_CHANGED:
4960 			fail_aen = mrsas_get_ctrl_info(sc);
4961 			if (fail_aen)
4962 				goto skip_register_aen;
4963 			break;
4964 		default:
4965 			break;
4966 		}
4967 	} else {
4968 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4969 		return;
4970 	}
4971 	if (doscan) {
4972 		fail_aen = mrsas_get_pd_list(sc);
4973 		if (!fail_aen) {
4974 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4975 			mrsas_bus_scan_sim(sc, sc->sim_1);
4976 		} else
4977 			goto skip_register_aen;
4978 
4979 		fail_aen = mrsas_get_ld_list(sc);
4980 		if (!fail_aen) {
4981 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4982 			mrsas_bus_scan_sim(sc, sc->sim_0);
4983 		} else
4984 			goto skip_register_aen;
4985 	}
4986 	seq_num = sc->evt_detail_mem->seq_num + 1;
4987 
4988 	/* Register AEN with FW for latest sequence number plus 1 */
4989 	class_locale.members.reserved = 0;
4990 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4991 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4992 
4993 	if (sc->aen_cmd != NULL)
4994 		return;
4995 
4996 	mtx_lock(&sc->aen_lock);
4997 	error = mrsas_register_aen(sc, seq_num,
4998 	    class_locale.word);
4999 	mtx_unlock(&sc->aen_lock);
5000 
5001 	if (error)
5002 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5003 
5004 skip_register_aen:
5005 	return;
5006 
5007 }
5008 
5009 /*
5010  * mrsas_complete_aen:	Completes AEN command
5011  * input:				Adapter soft state
5012  * 						Cmd that was issued to abort another cmd
5013  *
5014  * This function will be called from ISR and will continue event processing from
5015  * thread context by enqueuing task in ev_tq (callback function
5016  * "mrsas_aen_handler").
5017  */
5018 void
5019 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5020 {
5021 	/*
5022 	 * Don't signal app if it is just an aborted previously registered
5023 	 * aen
5024 	 */
5025 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5026 		sc->mrsas_aen_triggered = 1;
5027 		mtx_lock(&sc->aen_lock);
5028 		if (sc->mrsas_poll_waiting) {
5029 			sc->mrsas_poll_waiting = 0;
5030 			selwakeup(&sc->mrsas_select);
5031 		}
5032 		mtx_unlock(&sc->aen_lock);
5033 	} else
5034 		cmd->abort_aen = 0;
5035 
5036 	sc->aen_cmd = NULL;
5037 	mrsas_release_mfi_cmd(cmd);
5038 
5039 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5040 
5041 	return;
5042 }
5043 
5044 static device_method_t mrsas_methods[] = {
5045 	DEVMETHOD(device_probe, mrsas_probe),
5046 	DEVMETHOD(device_attach, mrsas_attach),
5047 	DEVMETHOD(device_detach, mrsas_detach),
5048 	DEVMETHOD(device_shutdown, mrsas_shutdown),
5049 	DEVMETHOD(device_suspend, mrsas_suspend),
5050 	DEVMETHOD(device_resume, mrsas_resume),
5051 	DEVMETHOD(bus_print_child, bus_generic_print_child),
5052 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5053 	{0, 0}
5054 };
5055 
5056 static driver_t mrsas_driver = {
5057 	"mrsas",
5058 	mrsas_methods,
5059 	sizeof(struct mrsas_softc)
5060 };
5061 
5062 DRIVER_MODULE(mrsas, pci, mrsas_driver, 0, 0);
5063 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
5064