xref: /freebsd/sys/dev/mrsas/mrsas.c (revision 6780e684d49034610f82bea5d3bfb04d42e91628)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <dev/mrsas/mrsas.h>
44 #include <dev/mrsas/mrsas_ioctl.h>
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 
49 #include <sys/sysctl.h>
50 #include <sys/types.h>
51 #include <sys/sysent.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/smp.h>
55 
56 
57 /*
58  * Function prototypes
59  */
60 static d_open_t mrsas_open;
61 static d_close_t mrsas_close;
62 static d_read_t mrsas_read;
63 static d_write_t mrsas_write;
64 static d_ioctl_t mrsas_ioctl;
65 static d_poll_t mrsas_poll;
66 
67 static void mrsas_ich_startup(void *arg);
68 static struct mrsas_mgmt_info mrsas_mgmt_info;
69 static struct mrsas_ident *mrsas_find_ident(device_t);
70 static int mrsas_setup_msix(struct mrsas_softc *sc);
71 static int mrsas_allocate_msix(struct mrsas_softc *sc);
72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73 static void mrsas_flush_cache(struct mrsas_softc *sc);
74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75 static void mrsas_ocr_thread(void *arg);
76 static int mrsas_get_map_info(struct mrsas_softc *sc);
77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78 static int mrsas_sync_map_info(struct mrsas_softc *sc);
79 static int mrsas_get_pd_list(struct mrsas_softc *sc);
80 static int mrsas_get_ld_list(struct mrsas_softc *sc);
81 static int mrsas_setup_irq(struct mrsas_softc *sc);
82 static int mrsas_alloc_mem(struct mrsas_softc *sc);
83 static int mrsas_init_fw(struct mrsas_softc *sc);
84 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87 static int mrsas_clear_intr(struct mrsas_softc *sc);
88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
90 static int
91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92     struct mrsas_mfi_cmd *cmd_to_abort);
93 static struct mrsas_softc *
94 mrsas_get_softc_instance(struct cdev *dev,
95     u_long cmd, caddr_t arg);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
97 u_int8_t
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99     struct mrsas_mfi_cmd *mfi_cmd);
100 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int	mrsas_init_adapter(struct mrsas_softc *sc);
103 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int	mrsas_ioc_init(struct mrsas_softc *sc);
107 int	mrsas_bus_scan(struct mrsas_softc *sc);
108 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
114 int
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116     struct mrsas_mfi_cmd *cmd);
117 int
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119     int size);
120 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void	mrsas_disable_intr(struct mrsas_softc *sc);
125 void	mrsas_enable_intr(struct mrsas_softc *sc);
126 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void	mrsas_free_mem(struct mrsas_softc *sc);
128 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void	mrsas_isr(void *arg);
130 void	mrsas_teardown_intr(struct mrsas_softc *sc);
131 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void	mrsas_kill_hba(struct mrsas_softc *sc);
133 void	mrsas_aen_handler(struct mrsas_softc *sc);
134 void
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
136     u_int32_t value);
137 void
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139     u_int32_t req_desc_hi);
140 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 void
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143     struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 void
145 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
146     u_int8_t extStatus);
147 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
148 
149 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
150         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151 
152 extern int mrsas_cam_attach(struct mrsas_softc *sc);
153 extern void mrsas_cam_detach(struct mrsas_softc *sc);
154 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
155 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
156 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163 extern void mrsas_xpt_release(struct mrsas_softc *sc);
164 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165 mrsas_get_request_desc(struct mrsas_softc *sc,
166     u_int16_t index);
167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
170 
171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
172 
173 /*
174  * PCI device struct and table
175  *
176  */
177 typedef struct mrsas_ident {
178 	uint16_t vendor;
179 	uint16_t device;
180 	uint16_t subvendor;
181 	uint16_t subdevice;
182 	const char *desc;
183 }	MRSAS_CTLR_ID;
184 
185 MRSAS_CTLR_ID device_table[] = {
186 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
192 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
193 	{0, 0, 0, 0, NULL}
194 };
195 
196 /*
197  * Character device entry points
198  *
199  */
200 static struct cdevsw mrsas_cdevsw = {
201 	.d_version = D_VERSION,
202 	.d_open = mrsas_open,
203 	.d_close = mrsas_close,
204 	.d_read = mrsas_read,
205 	.d_write = mrsas_write,
206 	.d_ioctl = mrsas_ioctl,
207 	.d_poll = mrsas_poll,
208 	.d_name = "mrsas",
209 };
210 
211 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
212 
213 /*
214  * In the cdevsw routines, we find our softc by using the si_drv1 member of
215  * struct cdev.  We set this variable to point to our softc in our attach
216  * routine when we create the /dev entry.
217  */
218 int
219 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
220 {
221 	struct mrsas_softc *sc;
222 
223 	sc = dev->si_drv1;
224 	return (0);
225 }
226 
227 int
228 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
229 {
230 	struct mrsas_softc *sc;
231 
232 	sc = dev->si_drv1;
233 	return (0);
234 }
235 
236 int
237 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
238 {
239 	struct mrsas_softc *sc;
240 
241 	sc = dev->si_drv1;
242 	return (0);
243 }
244 int
245 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
246 {
247 	struct mrsas_softc *sc;
248 
249 	sc = dev->si_drv1;
250 	return (0);
251 }
252 
253 /*
254  * Register Read/Write Functions
255  *
256  */
257 void
258 mrsas_write_reg(struct mrsas_softc *sc, int offset,
259     u_int32_t value)
260 {
261 	bus_space_tag_t bus_tag = sc->bus_tag;
262 	bus_space_handle_t bus_handle = sc->bus_handle;
263 
264 	bus_space_write_4(bus_tag, bus_handle, offset, value);
265 }
266 
267 u_int32_t
268 mrsas_read_reg(struct mrsas_softc *sc, int offset)
269 {
270 	bus_space_tag_t bus_tag = sc->bus_tag;
271 	bus_space_handle_t bus_handle = sc->bus_handle;
272 
273 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
274 }
275 
276 
277 /*
278  * Interrupt Disable/Enable/Clear Functions
279  *
280  */
281 void
282 mrsas_disable_intr(struct mrsas_softc *sc)
283 {
284 	u_int32_t mask = 0xFFFFFFFF;
285 	u_int32_t status;
286 
287 	sc->mask_interrupts = 1;
288 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
289 	/* Dummy read to force pci flush */
290 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
291 }
292 
293 void
294 mrsas_enable_intr(struct mrsas_softc *sc)
295 {
296 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
297 	u_int32_t status;
298 
299 	sc->mask_interrupts = 0;
300 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
301 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
302 
303 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
304 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
305 }
306 
307 static int
308 mrsas_clear_intr(struct mrsas_softc *sc)
309 {
310 	u_int32_t status;
311 
312 	/* Read received interrupt */
313 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
314 
315 	/* Not our interrupt, so just return */
316 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
317 		return (0);
318 
319 	/* We got a reply interrupt */
320 	return (1);
321 }
322 
323 /*
324  * PCI Support Functions
325  *
326  */
327 static struct mrsas_ident *
328 mrsas_find_ident(device_t dev)
329 {
330 	struct mrsas_ident *pci_device;
331 
332 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
333 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
334 		    (pci_device->device == pci_get_device(dev)) &&
335 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
336 		    (pci_device->subvendor == 0xffff)) &&
337 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
338 		    (pci_device->subdevice == 0xffff)))
339 			return (pci_device);
340 	}
341 	return (NULL);
342 }
343 
344 static int
345 mrsas_probe(device_t dev)
346 {
347 	static u_int8_t first_ctrl = 1;
348 	struct mrsas_ident *id;
349 
350 	if ((id = mrsas_find_ident(dev)) != NULL) {
351 		if (first_ctrl) {
352 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
353 			    MRSAS_VERSION);
354 			first_ctrl = 0;
355 		}
356 		device_set_desc(dev, id->desc);
357 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
358 		return (-30);
359 	}
360 	return (ENXIO);
361 }
362 
363 /*
364  * mrsas_setup_sysctl:	setup sysctl values for mrsas
365  * input:				Adapter instance soft state
366  *
367  * Setup sysctl entries for mrsas driver.
368  */
369 static void
370 mrsas_setup_sysctl(struct mrsas_softc *sc)
371 {
372 	struct sysctl_ctx_list *sysctl_ctx = NULL;
373 	struct sysctl_oid *sysctl_tree = NULL;
374 	char tmpstr[80], tmpstr2[80];
375 
376 	/*
377 	 * Setup the sysctl variable so the user can change the debug level
378 	 * on the fly.
379 	 */
380 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
381 	    device_get_unit(sc->mrsas_dev));
382 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
383 
384 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
385 	if (sysctl_ctx != NULL)
386 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
387 
388 	if (sysctl_tree == NULL) {
389 		sysctl_ctx_init(&sc->sysctl_ctx);
390 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
391 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
392 		    CTLFLAG_RD, 0, tmpstr);
393 		if (sc->sysctl_tree == NULL)
394 			return;
395 		sysctl_ctx = &sc->sysctl_ctx;
396 		sysctl_tree = sc->sysctl_tree;
397 	}
398 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
399 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
400 	    "Disable the use of OCR");
401 
402 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
403 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
404 	    strlen(MRSAS_VERSION), "driver version");
405 
406 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
407 	    OID_AUTO, "reset_count", CTLFLAG_RD,
408 	    &sc->reset_count, 0, "number of ocr from start of the day");
409 
410 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
411 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
412 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
413 
414 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
415 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
416 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
417 
418 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
419 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
420 	    "Driver debug level");
421 
422 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
423 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
424 	    0, "Driver IO timeout value in mili-second.");
425 
426 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
427 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
428 	    &sc->mrsas_fw_fault_check_delay,
429 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
430 
431 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
433 	    &sc->reset_in_progress, 0, "ocr in progress status");
434 
435 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
437 	    &sc->block_sync_cache, 0,
438 	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
439 
440 }
441 
442 /*
443  * mrsas_get_tunables:	get tunable parameters.
444  * input:				Adapter instance soft state
445  *
446  * Get tunable parameters. This will help to debug driver at boot time.
447  */
448 static void
449 mrsas_get_tunables(struct mrsas_softc *sc)
450 {
451 	char tmpstr[80];
452 
453 	/* XXX default to some debugging for now */
454 	sc->mrsas_debug = MRSAS_FAULT;
455 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
456 	sc->mrsas_fw_fault_check_delay = 1;
457 	sc->reset_count = 0;
458 	sc->reset_in_progress = 0;
459 	sc->block_sync_cache = 0;
460 
461 	/*
462 	 * Grab the global variables.
463 	 */
464 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
465 
466 	/*
467 	 * Grab the global variables.
468 	 */
469 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
470 
471 	/* Grab the unit-instance variables */
472 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
473 	    device_get_unit(sc->mrsas_dev));
474 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
475 }
476 
477 /*
478  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
479  * Used to get sequence number at driver load time.
480  * input:		Adapter soft state
481  *
482  * Allocates DMAable memory for the event log info internal command.
483  */
484 int
485 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
486 {
487 	int el_info_size;
488 
489 	/* Allocate get event log info command */
490 	el_info_size = sizeof(struct mrsas_evt_log_info);
491 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
492 	    1, 0,
493 	    BUS_SPACE_MAXADDR_32BIT,
494 	    BUS_SPACE_MAXADDR,
495 	    NULL, NULL,
496 	    el_info_size,
497 	    1,
498 	    el_info_size,
499 	    BUS_DMA_ALLOCNOW,
500 	    NULL, NULL,
501 	    &sc->el_info_tag)) {
502 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
503 		return (ENOMEM);
504 	}
505 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
506 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
507 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
508 		return (ENOMEM);
509 	}
510 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
511 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
512 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
513 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
514 		return (ENOMEM);
515 	}
516 	memset(sc->el_info_mem, 0, el_info_size);
517 	return (0);
518 }
519 
520 /*
521  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
522  * input:					Adapter soft state
523  *
524  * Deallocates memory for the event log info internal command.
525  */
526 void
527 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
528 {
529 	if (sc->el_info_phys_addr)
530 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
531 	if (sc->el_info_mem != NULL)
532 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
533 	if (sc->el_info_tag != NULL)
534 		bus_dma_tag_destroy(sc->el_info_tag);
535 }
536 
537 /*
538  *  mrsas_get_seq_num:	Get latest event sequence number
539  *  @sc:				Adapter soft state
540  *  @eli:				Firmware event log sequence number information.
541  *
542  * Firmware maintains a log of all events in a non-volatile area.
543  * Driver get the sequence number using DCMD
544  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
545  */
546 
547 static int
548 mrsas_get_seq_num(struct mrsas_softc *sc,
549     struct mrsas_evt_log_info *eli)
550 {
551 	struct mrsas_mfi_cmd *cmd;
552 	struct mrsas_dcmd_frame *dcmd;
553 	u_int8_t do_ocr = 1, retcode = 0;
554 
555 	cmd = mrsas_get_mfi_cmd(sc);
556 
557 	if (!cmd) {
558 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
559 		return -ENOMEM;
560 	}
561 	dcmd = &cmd->frame->dcmd;
562 
563 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
564 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
565 		mrsas_release_mfi_cmd(cmd);
566 		return -ENOMEM;
567 	}
568 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
569 
570 	dcmd->cmd = MFI_CMD_DCMD;
571 	dcmd->cmd_status = 0x0;
572 	dcmd->sge_count = 1;
573 	dcmd->flags = MFI_FRAME_DIR_READ;
574 	dcmd->timeout = 0;
575 	dcmd->pad_0 = 0;
576 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
577 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
578 	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
579 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
580 
581 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
582 	if (retcode == ETIMEDOUT)
583 		goto dcmd_timeout;
584 
585 	do_ocr = 0;
586 	/*
587 	 * Copy the data back into callers buffer
588 	 */
589 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
590 	mrsas_free_evt_log_info_cmd(sc);
591 
592 dcmd_timeout:
593 	if (do_ocr)
594 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
595 	else
596 		mrsas_release_mfi_cmd(cmd);
597 
598 	return retcode;
599 }
600 
601 
602 /*
603  *  mrsas_register_aen:		Register for asynchronous event notification
604  *  @sc:			Adapter soft state
605  *  @seq_num:			Starting sequence number
606  *  @class_locale:		Class of the event
607  *
608  *  This function subscribes for events beyond the @seq_num
609  *  and type @class_locale.
610  *
611  */
612 static int
613 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
614     u_int32_t class_locale_word)
615 {
616 	int ret_val;
617 	struct mrsas_mfi_cmd *cmd;
618 	struct mrsas_dcmd_frame *dcmd;
619 	union mrsas_evt_class_locale curr_aen;
620 	union mrsas_evt_class_locale prev_aen;
621 
622 	/*
623 	 * If there an AEN pending already (aen_cmd), check if the
624 	 * class_locale of that pending AEN is inclusive of the new AEN
625 	 * request we currently have. If it is, then we don't have to do
626 	 * anything. In other words, whichever events the current AEN request
627 	 * is subscribing to, have already been subscribed to. If the old_cmd
628 	 * is _not_ inclusive, then we have to abort that command, form a
629 	 * class_locale that is superset of both old and current and re-issue
630 	 * to the FW
631 	 */
632 
633 	curr_aen.word = class_locale_word;
634 
635 	if (sc->aen_cmd) {
636 
637 		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
638 
639 		/*
640 		 * A class whose enum value is smaller is inclusive of all
641 		 * higher values. If a PROGRESS (= -1) was previously
642 		 * registered, then a new registration requests for higher
643 		 * classes need not be sent to FW. They are automatically
644 		 * included. Locale numbers don't have such hierarchy. They
645 		 * are bitmap values
646 		 */
647 		if ((prev_aen.members.class <= curr_aen.members.class) &&
648 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
649 		    curr_aen.members.locale)) {
650 			/*
651 			 * Previously issued event registration includes
652 			 * current request. Nothing to do.
653 			 */
654 			return 0;
655 		} else {
656 			curr_aen.members.locale |= prev_aen.members.locale;
657 
658 			if (prev_aen.members.class < curr_aen.members.class)
659 				curr_aen.members.class = prev_aen.members.class;
660 
661 			sc->aen_cmd->abort_aen = 1;
662 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
663 			    sc->aen_cmd);
664 
665 			if (ret_val) {
666 				printf("mrsas: Failed to abort previous AEN command\n");
667 				return ret_val;
668 			} else
669 				sc->aen_cmd = NULL;
670 		}
671 	}
672 	cmd = mrsas_get_mfi_cmd(sc);
673 	if (!cmd)
674 		return ENOMEM;
675 
676 	dcmd = &cmd->frame->dcmd;
677 
678 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
679 
680 	/*
681 	 * Prepare DCMD for aen registration
682 	 */
683 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
684 
685 	dcmd->cmd = MFI_CMD_DCMD;
686 	dcmd->cmd_status = 0x0;
687 	dcmd->sge_count = 1;
688 	dcmd->flags = MFI_FRAME_DIR_READ;
689 	dcmd->timeout = 0;
690 	dcmd->pad_0 = 0;
691 	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
692 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
693 	dcmd->mbox.w[0] = seq_num;
694 	sc->last_seq_num = seq_num;
695 	dcmd->mbox.w[1] = curr_aen.word;
696 	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
697 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
698 
699 	if (sc->aen_cmd != NULL) {
700 		mrsas_release_mfi_cmd(cmd);
701 		return 0;
702 	}
703 	/*
704 	 * Store reference to the cmd used to register for AEN. When an
705 	 * application wants us to register for AEN, we have to abort this
706 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
707 	 */
708 	sc->aen_cmd = cmd;
709 
710 	/*
711 	 * Issue the aen registration frame
712 	 */
713 	if (mrsas_issue_dcmd(sc, cmd)) {
714 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
715 		return (1);
716 	}
717 	return 0;
718 }
719 
720 /*
721  * mrsas_start_aen:	Subscribes to AEN during driver load time
722  * @instance:		Adapter soft state
723  */
724 static int
725 mrsas_start_aen(struct mrsas_softc *sc)
726 {
727 	struct mrsas_evt_log_info eli;
728 	union mrsas_evt_class_locale class_locale;
729 
730 
731 	/* Get the latest sequence number from FW */
732 
733 	memset(&eli, 0, sizeof(eli));
734 
735 	if (mrsas_get_seq_num(sc, &eli))
736 		return -1;
737 
738 	/* Register AEN with FW for latest sequence number plus 1 */
739 	class_locale.members.reserved = 0;
740 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
741 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
742 
743 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
744 	    class_locale.word);
745 
746 }
747 
748 /*
749  * mrsas_setup_msix:	Allocate MSI-x vectors
750  * @sc:					adapter soft state
751  */
752 static int
753 mrsas_setup_msix(struct mrsas_softc *sc)
754 {
755 	int i;
756 
757 	for (i = 0; i < sc->msix_vectors; i++) {
758 		sc->irq_context[i].sc = sc;
759 		sc->irq_context[i].MSIxIndex = i;
760 		sc->irq_id[i] = i + 1;
761 		sc->mrsas_irq[i] = bus_alloc_resource_any
762 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
763 		    ,RF_ACTIVE);
764 		if (sc->mrsas_irq[i] == NULL) {
765 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
766 			goto irq_alloc_failed;
767 		}
768 		if (bus_setup_intr(sc->mrsas_dev,
769 		    sc->mrsas_irq[i],
770 		    INTR_MPSAFE | INTR_TYPE_CAM,
771 		    NULL, mrsas_isr, &sc->irq_context[i],
772 		    &sc->intr_handle[i])) {
773 			device_printf(sc->mrsas_dev,
774 			    "Cannot set up MSI-x interrupt handler\n");
775 			goto irq_alloc_failed;
776 		}
777 	}
778 	return SUCCESS;
779 
780 irq_alloc_failed:
781 	mrsas_teardown_intr(sc);
782 	return (FAIL);
783 }
784 
785 /*
786  * mrsas_allocate_msix:		Setup MSI-x vectors
787  * @sc:						adapter soft state
788  */
789 static int
790 mrsas_allocate_msix(struct mrsas_softc *sc)
791 {
792 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
793 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
794 		    " of vectors\n", sc->msix_vectors);
795 	} else {
796 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
797 		goto irq_alloc_failed;
798 	}
799 	return SUCCESS;
800 
801 irq_alloc_failed:
802 	mrsas_teardown_intr(sc);
803 	return (FAIL);
804 }
805 
806 /*
807  * mrsas_attach:	PCI entry point
808  * input:			pointer to device struct
809  *
810  * Performs setup of PCI and registers, initializes mutexes and linked lists,
811  * registers interrupts and CAM, and initializes   the adapter/controller to
812  * its proper state.
813  */
814 static int
815 mrsas_attach(device_t dev)
816 {
817 	struct mrsas_softc *sc = device_get_softc(dev);
818 	uint32_t cmd, bar, error;
819 
820 	memset(sc, 0, sizeof(struct mrsas_softc));
821 
822 	/* Look up our softc and initialize its fields. */
823 	sc->mrsas_dev = dev;
824 	sc->device_id = pci_get_device(dev);
825 
826 	if ((sc->device_id == MRSAS_INVADER) ||
827 	    (sc->device_id == MRSAS_FURY) ||
828 	    (sc->device_id == MRSAS_INTRUDER) ||
829 	    (sc->device_id == MRSAS_INTRUDER_24) ||
830 	    (sc->device_id == MRSAS_CUTLASS_52) ||
831 	    (sc->device_id == MRSAS_CUTLASS_53)) {
832 		sc->mrsas_gen3_ctrl = 1;
833     }
834 
835 	mrsas_get_tunables(sc);
836 
837 	/*
838 	 * Set up PCI and registers
839 	 */
840 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
841 	if ((cmd & PCIM_CMD_PORTEN) == 0) {
842 		return (ENXIO);
843 	}
844 	/* Force the busmaster enable bit on. */
845 	cmd |= PCIM_CMD_BUSMASTEREN;
846 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
847 
848 	bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
849 
850 	sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
851 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
852 	    &(sc->reg_res_id), RF_ACTIVE))
853 	    == NULL) {
854 		device_printf(dev, "Cannot allocate PCI registers\n");
855 		goto attach_fail;
856 	}
857 	sc->bus_tag = rman_get_bustag(sc->reg_res);
858 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
859 
860 	/* Intialize mutexes */
861 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
862 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
863 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
864 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
865 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
866 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
867 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
868 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
869 
870 	/* Intialize linked list */
871 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
872 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
873 
874 	mrsas_atomic_set(&sc->fw_outstanding, 0);
875 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
876 
877 	sc->io_cmds_highwater = 0;
878 
879 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
880 	sc->UnevenSpanSupport = 0;
881 
882 	sc->msix_enable = 0;
883 
884 	/* Initialize Firmware */
885 	if (mrsas_init_fw(sc) != SUCCESS) {
886 		goto attach_fail_fw;
887 	}
888 	/* Register mrsas to CAM layer */
889 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
890 		goto attach_fail_cam;
891 	}
892 	/* Register IRQs */
893 	if (mrsas_setup_irq(sc) != SUCCESS) {
894 		goto attach_fail_irq;
895 	}
896 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
897 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
898 	    device_get_unit(sc->mrsas_dev));
899 	if (error) {
900 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
901 		goto attach_fail_ocr_thread;
902 	}
903 	/*
904 	 * After FW initialization and OCR thread creation
905 	 * we will defer the cdev creation, AEN setup on ICH callback
906 	 */
907 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
908 	sc->mrsas_ich.ich_arg = sc;
909 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
910 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
911 	}
912 	mrsas_setup_sysctl(sc);
913 	return SUCCESS;
914 
915 attach_fail_ocr_thread:
916 	if (sc->ocr_thread_active)
917 		wakeup(&sc->ocr_chan);
918 attach_fail_irq:
919 	mrsas_teardown_intr(sc);
920 attach_fail_cam:
921 	mrsas_cam_detach(sc);
922 attach_fail_fw:
923 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
924 	if (sc->msix_enable == 1)
925 		pci_release_msi(sc->mrsas_dev);
926 	mrsas_free_mem(sc);
927 	mtx_destroy(&sc->sim_lock);
928 	mtx_destroy(&sc->aen_lock);
929 	mtx_destroy(&sc->pci_lock);
930 	mtx_destroy(&sc->io_lock);
931 	mtx_destroy(&sc->ioctl_lock);
932 	mtx_destroy(&sc->mpt_cmd_pool_lock);
933 	mtx_destroy(&sc->mfi_cmd_pool_lock);
934 	mtx_destroy(&sc->raidmap_lock);
935 attach_fail:
936 	if (sc->reg_res) {
937 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
938 		    sc->reg_res_id, sc->reg_res);
939 	}
940 	return (ENXIO);
941 }
942 
943 /*
944  * Interrupt config hook
945  */
946 static void
947 mrsas_ich_startup(void *arg)
948 {
949 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
950 
951 	/*
952 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
953 	 */
954 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
955 	    IOCTL_SEMA_DESCRIPTION);
956 
957 	/* Create a /dev entry for mrsas controller. */
958 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
959 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
960 	    device_get_unit(sc->mrsas_dev));
961 
962 	if (device_get_unit(sc->mrsas_dev) == 0) {
963 		make_dev_alias_p(MAKEDEV_CHECKNAME,
964 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
965 		    "megaraid_sas_ioctl_node");
966 	}
967 	if (sc->mrsas_cdev)
968 		sc->mrsas_cdev->si_drv1 = sc;
969 
970 	/*
971 	 * Add this controller to mrsas_mgmt_info structure so that it can be
972 	 * exported to management applications
973 	 */
974 	if (device_get_unit(sc->mrsas_dev) == 0)
975 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
976 
977 	mrsas_mgmt_info.count++;
978 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
979 	mrsas_mgmt_info.max_index++;
980 
981 	/* Enable Interrupts */
982 	mrsas_enable_intr(sc);
983 
984 	/* Initiate AEN (Asynchronous Event Notification) */
985 	if (mrsas_start_aen(sc)) {
986 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
987 		    "Further events from the controller will not be communicated.\n"
988 		    "Either there is some problem in the controller"
989 		    "or the controller does not support AEN.\n"
990 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
991 	}
992 	if (sc->mrsas_ich.ich_arg != NULL) {
993 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
994 		config_intrhook_disestablish(&sc->mrsas_ich);
995 		sc->mrsas_ich.ich_arg = NULL;
996 	}
997 }
998 
999 /*
1000  * mrsas_detach:	De-allocates and teardown resources
1001  * input:			pointer to device struct
1002  *
1003  * This function is the entry point for device disconnect and detach.
1004  * It performs memory de-allocations, shutdown of the controller and various
1005  * teardown and destroy resource functions.
1006  */
1007 static int
1008 mrsas_detach(device_t dev)
1009 {
1010 	struct mrsas_softc *sc;
1011 	int i = 0;
1012 
1013 	sc = device_get_softc(dev);
1014 	sc->remove_in_progress = 1;
1015 
1016 	/* Destroy the character device so no other IOCTL will be handled */
1017 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1018 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1019 	destroy_dev(sc->mrsas_cdev);
1020 
1021 	/*
1022 	 * Take the instance off the instance array. Note that we will not
1023 	 * decrement the max_index. We let this array be sparse array
1024 	 */
1025 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1026 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1027 			mrsas_mgmt_info.count--;
1028 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1029 			break;
1030 		}
1031 	}
1032 
1033 	if (sc->ocr_thread_active)
1034 		wakeup(&sc->ocr_chan);
1035 	while (sc->reset_in_progress) {
1036 		i++;
1037 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1038 			mrsas_dprint(sc, MRSAS_INFO,
1039 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1040 		}
1041 		pause("mr_shutdown", hz);
1042 	}
1043 	i = 0;
1044 	while (sc->ocr_thread_active) {
1045 		i++;
1046 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1047 			mrsas_dprint(sc, MRSAS_INFO,
1048 			    "[%2d]waiting for "
1049 			    "mrsas_ocr thread to quit ocr %d\n", i,
1050 			    sc->ocr_thread_active);
1051 		}
1052 		pause("mr_shutdown", hz);
1053 	}
1054 	mrsas_flush_cache(sc);
1055 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1056 	mrsas_disable_intr(sc);
1057 	mrsas_cam_detach(sc);
1058 	mrsas_teardown_intr(sc);
1059 	mrsas_free_mem(sc);
1060 	mtx_destroy(&sc->sim_lock);
1061 	mtx_destroy(&sc->aen_lock);
1062 	mtx_destroy(&sc->pci_lock);
1063 	mtx_destroy(&sc->io_lock);
1064 	mtx_destroy(&sc->ioctl_lock);
1065 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1066 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1067 	mtx_destroy(&sc->raidmap_lock);
1068 
1069 	/* Wait for all the semaphores to be released */
1070 	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1071 		pause("mr_shutdown", hz);
1072 
1073 	/* Destroy the counting semaphore created for Ioctl */
1074 	sema_destroy(&sc->ioctl_count_sema);
1075 
1076 	if (sc->reg_res) {
1077 		bus_release_resource(sc->mrsas_dev,
1078 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1079 	}
1080 	if (sc->sysctl_tree != NULL)
1081 		sysctl_ctx_free(&sc->sysctl_ctx);
1082 
1083 	return (0);
1084 }
1085 
1086 /*
1087  * mrsas_free_mem:		Frees allocated memory
1088  * input:				Adapter instance soft state
1089  *
1090  * This function is called from mrsas_detach() to free previously allocated
1091  * memory.
1092  */
1093 void
1094 mrsas_free_mem(struct mrsas_softc *sc)
1095 {
1096 	int i;
1097 	u_int32_t max_cmd;
1098 	struct mrsas_mfi_cmd *mfi_cmd;
1099 	struct mrsas_mpt_cmd *mpt_cmd;
1100 
1101 	/*
1102 	 * Free RAID map memory
1103 	 */
1104 	for (i = 0; i < 2; i++) {
1105 		if (sc->raidmap_phys_addr[i])
1106 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1107 		if (sc->raidmap_mem[i] != NULL)
1108 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1109 		if (sc->raidmap_tag[i] != NULL)
1110 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1111 
1112 		if (sc->ld_drv_map[i] != NULL)
1113 			free(sc->ld_drv_map[i], M_MRSAS);
1114 	}
1115 	for (i = 0; i < 2; i++) {
1116 		if (sc->jbodmap_phys_addr[i])
1117 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1118 		if (sc->jbodmap_mem[i] != NULL)
1119 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1120 		if (sc->jbodmap_tag[i] != NULL)
1121 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1122 	}
1123 	/*
1124 	 * Free version buffer memory
1125 	 */
1126 	if (sc->verbuf_phys_addr)
1127 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1128 	if (sc->verbuf_mem != NULL)
1129 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1130 	if (sc->verbuf_tag != NULL)
1131 		bus_dma_tag_destroy(sc->verbuf_tag);
1132 
1133 
1134 	/*
1135 	 * Free sense buffer memory
1136 	 */
1137 	if (sc->sense_phys_addr)
1138 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1139 	if (sc->sense_mem != NULL)
1140 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1141 	if (sc->sense_tag != NULL)
1142 		bus_dma_tag_destroy(sc->sense_tag);
1143 
1144 	/*
1145 	 * Free chain frame memory
1146 	 */
1147 	if (sc->chain_frame_phys_addr)
1148 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1149 	if (sc->chain_frame_mem != NULL)
1150 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1151 	if (sc->chain_frame_tag != NULL)
1152 		bus_dma_tag_destroy(sc->chain_frame_tag);
1153 
1154 	/*
1155 	 * Free IO Request memory
1156 	 */
1157 	if (sc->io_request_phys_addr)
1158 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1159 	if (sc->io_request_mem != NULL)
1160 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1161 	if (sc->io_request_tag != NULL)
1162 		bus_dma_tag_destroy(sc->io_request_tag);
1163 
1164 	/*
1165 	 * Free Reply Descriptor memory
1166 	 */
1167 	if (sc->reply_desc_phys_addr)
1168 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1169 	if (sc->reply_desc_mem != NULL)
1170 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1171 	if (sc->reply_desc_tag != NULL)
1172 		bus_dma_tag_destroy(sc->reply_desc_tag);
1173 
1174 	/*
1175 	 * Free event detail memory
1176 	 */
1177 	if (sc->evt_detail_phys_addr)
1178 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1179 	if (sc->evt_detail_mem != NULL)
1180 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1181 	if (sc->evt_detail_tag != NULL)
1182 		bus_dma_tag_destroy(sc->evt_detail_tag);
1183 
1184 	/*
1185 	 * Free MFI frames
1186 	 */
1187 	if (sc->mfi_cmd_list) {
1188 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1189 			mfi_cmd = sc->mfi_cmd_list[i];
1190 			mrsas_free_frame(sc, mfi_cmd);
1191 		}
1192 	}
1193 	if (sc->mficmd_frame_tag != NULL)
1194 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1195 
1196 	/*
1197 	 * Free MPT internal command list
1198 	 */
1199 	max_cmd = sc->max_fw_cmds;
1200 	if (sc->mpt_cmd_list) {
1201 		for (i = 0; i < max_cmd; i++) {
1202 			mpt_cmd = sc->mpt_cmd_list[i];
1203 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1204 			free(sc->mpt_cmd_list[i], M_MRSAS);
1205 		}
1206 		free(sc->mpt_cmd_list, M_MRSAS);
1207 		sc->mpt_cmd_list = NULL;
1208 	}
1209 	/*
1210 	 * Free MFI internal command list
1211 	 */
1212 
1213 	if (sc->mfi_cmd_list) {
1214 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1215 			free(sc->mfi_cmd_list[i], M_MRSAS);
1216 		}
1217 		free(sc->mfi_cmd_list, M_MRSAS);
1218 		sc->mfi_cmd_list = NULL;
1219 	}
1220 	/*
1221 	 * Free request descriptor memory
1222 	 */
1223 	free(sc->req_desc, M_MRSAS);
1224 	sc->req_desc = NULL;
1225 
1226 	/*
1227 	 * Destroy parent tag
1228 	 */
1229 	if (sc->mrsas_parent_tag != NULL)
1230 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1231 
1232 	/*
1233 	 * Free ctrl_info memory
1234 	 */
1235 	if (sc->ctrl_info != NULL)
1236 		free(sc->ctrl_info, M_MRSAS);
1237 }
1238 
1239 /*
1240  * mrsas_teardown_intr:	Teardown interrupt
1241  * input:				Adapter instance soft state
1242  *
1243  * This function is called from mrsas_detach() to teardown and release bus
1244  * interrupt resourse.
1245  */
1246 void
1247 mrsas_teardown_intr(struct mrsas_softc *sc)
1248 {
1249 	int i;
1250 
1251 	if (!sc->msix_enable) {
1252 		if (sc->intr_handle[0])
1253 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1254 		if (sc->mrsas_irq[0] != NULL)
1255 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1256 			    sc->irq_id[0], sc->mrsas_irq[0]);
1257 		sc->intr_handle[0] = NULL;
1258 	} else {
1259 		for (i = 0; i < sc->msix_vectors; i++) {
1260 			if (sc->intr_handle[i])
1261 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1262 				    sc->intr_handle[i]);
1263 
1264 			if (sc->mrsas_irq[i] != NULL)
1265 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1266 				    sc->irq_id[i], sc->mrsas_irq[i]);
1267 
1268 			sc->intr_handle[i] = NULL;
1269 		}
1270 		pci_release_msi(sc->mrsas_dev);
1271 	}
1272 
1273 }
1274 
1275 /*
1276  * mrsas_suspend:	Suspend entry point
1277  * input:			Device struct pointer
1278  *
1279  * This function is the entry point for system suspend from the OS.
1280  */
1281 static int
1282 mrsas_suspend(device_t dev)
1283 {
1284 	/* This will be filled when the driver will have hibernation support */
1285 	return (0);
1286 }
1287 
1288 /*
1289  * mrsas_resume:	Resume entry point
1290  * input:			Device struct pointer
1291  *
1292  * This function is the entry point for system resume from the OS.
1293  */
1294 static int
1295 mrsas_resume(device_t dev)
1296 {
1297 	/* This will be filled when the driver will have hibernation support */
1298 	return (0);
1299 }
1300 
1301 /**
1302  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1303  *
1304  * This function will return softc instance based on cmd type.
1305  * In some case, application fire ioctl on required management instance and
1306  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1307  * case, else get the softc instance from host_no provided by application in
1308  * user data.
1309  */
1310 
1311 static struct mrsas_softc *
1312 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1313 {
1314 	struct mrsas_softc *sc = NULL;
1315 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1316 
1317 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1318 		sc = dev->si_drv1;
1319 	} else {
1320 		/*
1321 		 * get the Host number & the softc from data sent by the
1322 		 * Application
1323 		 */
1324 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1325 		if (sc == NULL)
1326 			printf("There is no Controller number %d\n",
1327 			    user_ioc->host_no);
1328 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1329 			mrsas_dprint(sc, MRSAS_FAULT,
1330 			    "Invalid Controller number %d\n", user_ioc->host_no);
1331 	}
1332 
1333 	return sc;
1334 }
1335 
1336 /*
1337  * mrsas_ioctl:	IOCtl commands entry point.
1338  *
1339  * This function is the entry point for IOCtls from the OS.  It calls the
1340  * appropriate function for processing depending on the command received.
1341  */
1342 static int
1343 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1344     struct thread *td)
1345 {
1346 	struct mrsas_softc *sc;
1347 	int ret = 0, i = 0;
1348 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1349 
1350 	sc = mrsas_get_softc_instance(dev, cmd, arg);
1351 	if (!sc)
1352 		return ENOENT;
1353 
1354 	if (sc->remove_in_progress ||
1355 		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1356 		mrsas_dprint(sc, MRSAS_INFO,
1357 		    "Either driver remove or shutdown called or "
1358 			"HW is in unrecoverable critical error state.\n");
1359 		return ENOENT;
1360 	}
1361 	mtx_lock_spin(&sc->ioctl_lock);
1362 	if (!sc->reset_in_progress) {
1363 		mtx_unlock_spin(&sc->ioctl_lock);
1364 		goto do_ioctl;
1365 	}
1366 	mtx_unlock_spin(&sc->ioctl_lock);
1367 	while (sc->reset_in_progress) {
1368 		i++;
1369 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1370 			mrsas_dprint(sc, MRSAS_INFO,
1371 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1372 		}
1373 		pause("mr_ioctl", hz);
1374 	}
1375 
1376 do_ioctl:
1377 	switch (cmd) {
1378 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1379 #ifdef COMPAT_FREEBSD32
1380 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1381 #endif
1382 		/*
1383 		 * Decrement the Ioctl counting Semaphore before getting an
1384 		 * mfi command
1385 		 */
1386 		sema_wait(&sc->ioctl_count_sema);
1387 
1388 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1389 
1390 		/* Increment the Ioctl counting semaphore value */
1391 		sema_post(&sc->ioctl_count_sema);
1392 
1393 		break;
1394 	case MRSAS_IOC_SCAN_BUS:
1395 		ret = mrsas_bus_scan(sc);
1396 		break;
1397 
1398 	case MRSAS_IOC_GET_PCI_INFO:
1399 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1400 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1401 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1402 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1403 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1404 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1405 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1406 		    "pci device no: %d, pci function no: %d,"
1407 		    "pci domain ID: %d\n",
1408 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1409 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1410 		ret = 0;
1411 		break;
1412 
1413 	default:
1414 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1415 		ret = ENOENT;
1416 	}
1417 
1418 	return (ret);
1419 }
1420 
1421 /*
1422  * mrsas_poll:	poll entry point for mrsas driver fd
1423  *
1424  * This function is the entry point for poll from the OS.  It waits for some AEN
1425  * events to be triggered from the controller and notifies back.
1426  */
1427 static int
1428 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1429 {
1430 	struct mrsas_softc *sc;
1431 	int revents = 0;
1432 
1433 	sc = dev->si_drv1;
1434 
1435 	if (poll_events & (POLLIN | POLLRDNORM)) {
1436 		if (sc->mrsas_aen_triggered) {
1437 			revents |= poll_events & (POLLIN | POLLRDNORM);
1438 		}
1439 	}
1440 	if (revents == 0) {
1441 		if (poll_events & (POLLIN | POLLRDNORM)) {
1442 			mtx_lock(&sc->aen_lock);
1443 			sc->mrsas_poll_waiting = 1;
1444 			selrecord(td, &sc->mrsas_select);
1445 			mtx_unlock(&sc->aen_lock);
1446 		}
1447 	}
1448 	return revents;
1449 }
1450 
1451 /*
1452  * mrsas_setup_irq:	Set up interrupt
1453  * input:			Adapter instance soft state
1454  *
1455  * This function sets up interrupts as a bus resource, with flags indicating
1456  * resource permitting contemporaneous sharing and for resource to activate
1457  * atomically.
1458  */
1459 static int
1460 mrsas_setup_irq(struct mrsas_softc *sc)
1461 {
1462 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1463 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1464 
1465 	else {
1466 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1467 		sc->irq_context[0].sc = sc;
1468 		sc->irq_context[0].MSIxIndex = 0;
1469 		sc->irq_id[0] = 0;
1470 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1471 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1472 		if (sc->mrsas_irq[0] == NULL) {
1473 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1474 			    "interrupt\n");
1475 			return (FAIL);
1476 		}
1477 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1478 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1479 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1480 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1481 			    "interrupt\n");
1482 			return (FAIL);
1483 		}
1484 	}
1485 	return (0);
1486 }
1487 
1488 /*
1489  * mrsas_isr:	ISR entry point
1490  * input:		argument pointer
1491  *
1492  * This function is the interrupt service routine entry point.  There are two
1493  * types of interrupts, state change interrupt and response interrupt.  If an
1494  * interrupt is not ours, we just return.
1495  */
1496 void
1497 mrsas_isr(void *arg)
1498 {
1499 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1500 	struct mrsas_softc *sc = irq_context->sc;
1501 	int status = 0;
1502 
1503 	if (sc->mask_interrupts)
1504 		return;
1505 
1506 	if (!sc->msix_vectors) {
1507 		status = mrsas_clear_intr(sc);
1508 		if (!status)
1509 			return;
1510 	}
1511 	/* If we are resetting, bail */
1512 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1513 		printf(" Entered into ISR when OCR is going active. \n");
1514 		mrsas_clear_intr(sc);
1515 		return;
1516 	}
1517 	/* Process for reply request and clear response interrupt */
1518 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1519 		mrsas_clear_intr(sc);
1520 
1521 	return;
1522 }
1523 
1524 /*
1525  * mrsas_complete_cmd:	Process reply request
1526  * input:				Adapter instance soft state
1527  *
1528  * This function is called from mrsas_isr() to process reply request and clear
1529  * response interrupt. Processing of the reply request entails walking
1530  * through the reply descriptor array for the command request  pended from
1531  * Firmware.  We look at the Function field to determine the command type and
1532  * perform the appropriate action.  Before we return, we clear the response
1533  * interrupt.
1534  */
1535 int
1536 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1537 {
1538 	Mpi2ReplyDescriptorsUnion_t *desc;
1539 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1540 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1541 	struct mrsas_mpt_cmd *cmd_mpt;
1542 	struct mrsas_mfi_cmd *cmd_mfi;
1543 	u_int8_t reply_descript_type;
1544 	u_int16_t smid, num_completed;
1545 	u_int8_t status, extStatus;
1546 	union desc_value desc_val;
1547 	PLD_LOAD_BALANCE_INFO lbinfo;
1548 	u_int32_t device_id;
1549 	int threshold_reply_count = 0;
1550 #if TM_DEBUG
1551 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1552 	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1553 #endif
1554 
1555 	/* If we have a hardware error, not need to continue */
1556 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1557 		return (DONE);
1558 
1559 	desc = sc->reply_desc_mem;
1560 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1561 	    + sc->last_reply_idx[MSIxIndex];
1562 
1563 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1564 
1565 	desc_val.word = desc->Words;
1566 	num_completed = 0;
1567 
1568 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1569 
1570 	/* Find our reply descriptor for the command and process */
1571 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1572 		smid = reply_desc->SMID;
1573 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1574 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1575 
1576 		status = scsi_io_req->RaidContext.status;
1577 		extStatus = scsi_io_req->RaidContext.exStatus;
1578 
1579 		switch (scsi_io_req->Function) {
1580 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1581 #if TM_DEBUG
1582 			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1583 			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1584 			    &mr_tm_req->TmRequest;
1585 			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1586 			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1587 #endif
1588             wakeup_one((void *)&sc->ocr_chan);
1589             break;
1590 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1591 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1592 			lbinfo = &sc->load_balance_info[device_id];
1593 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1594 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1595 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1596 			}
1597 			/* Fall thru and complete IO */
1598 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1599 			mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1600 			mrsas_cmd_done(sc, cmd_mpt);
1601 			scsi_io_req->RaidContext.status = 0;
1602 			scsi_io_req->RaidContext.exStatus = 0;
1603 			mrsas_atomic_dec(&sc->fw_outstanding);
1604 			break;
1605 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1606 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1607 			/*
1608 			 * Make sure NOT TO release the mfi command from the called
1609 			 * function's context if it is fired with issue_polled call.
1610 			 * And also make sure that the issue_polled call should only be
1611 			 * used if INTERRUPT IS DISABLED.
1612 			 */
1613 			if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1614 				mrsas_release_mfi_cmd(cmd_mfi);
1615 			else
1616 				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1617 			break;
1618 		}
1619 
1620 		sc->last_reply_idx[MSIxIndex]++;
1621 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1622 			sc->last_reply_idx[MSIxIndex] = 0;
1623 
1624 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1625 							 * 0xFFFFFFFFs */
1626 		num_completed++;
1627 		threshold_reply_count++;
1628 
1629 		/* Get the next reply descriptor */
1630 		if (!sc->last_reply_idx[MSIxIndex]) {
1631 			desc = sc->reply_desc_mem;
1632 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1633 		} else
1634 			desc++;
1635 
1636 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1637 		desc_val.word = desc->Words;
1638 
1639 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1640 
1641 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1642 			break;
1643 
1644 		/*
1645 		 * Write to reply post index after completing threshold reply
1646 		 * count and still there are more replies in reply queue
1647 		 * pending to be completed.
1648 		 */
1649 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1650 			if (sc->msix_enable) {
1651 				if (sc->mrsas_gen3_ctrl)
1652 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1653 					    ((MSIxIndex & 0x7) << 24) |
1654 					    sc->last_reply_idx[MSIxIndex]);
1655 				else
1656 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1657 					    sc->last_reply_idx[MSIxIndex]);
1658 			} else
1659 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1660 				    reply_post_host_index), sc->last_reply_idx[0]);
1661 
1662 			threshold_reply_count = 0;
1663 		}
1664 	}
1665 
1666 	/* No match, just return */
1667 	if (num_completed == 0)
1668 		return (DONE);
1669 
1670 	/* Clear response interrupt */
1671 	if (sc->msix_enable) {
1672 			if (sc->mrsas_gen3_ctrl) {
1673 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1674 			    ((MSIxIndex & 0x7) << 24) |
1675 			    sc->last_reply_idx[MSIxIndex]);
1676 		} else
1677 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1678 			    sc->last_reply_idx[MSIxIndex]);
1679 	} else
1680 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1681 		    reply_post_host_index), sc->last_reply_idx[0]);
1682 
1683 	return (0);
1684 }
1685 
1686 /*
1687  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1688  * input:						Adapter instance soft state
1689  *
1690  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1691  * It checks the command status and maps the appropriate CAM status for the
1692  * CCB.
1693  */
1694 void
1695 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1696 {
1697 	struct mrsas_softc *sc = cmd->sc;
1698 	u_int8_t *sense_data;
1699 
1700 	switch (status) {
1701 	case MFI_STAT_OK:
1702 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1703 		break;
1704 	case MFI_STAT_SCSI_IO_FAILED:
1705 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1706 		cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1707 		sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1708 		if (sense_data) {
1709 			/* For now just copy 18 bytes back */
1710 			memcpy(sense_data, cmd->sense, 18);
1711 			cmd->ccb_ptr->csio.sense_len = 18;
1712 			cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1713 		}
1714 		break;
1715 	case MFI_STAT_LD_OFFLINE:
1716 	case MFI_STAT_DEVICE_NOT_FOUND:
1717 		if (cmd->ccb_ptr->ccb_h.target_lun)
1718 			cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1719 		else
1720 			cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1721 		break;
1722 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1723 		cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1724 		break;
1725 	default:
1726 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1727 		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1728 		cmd->ccb_ptr->csio.scsi_status = status;
1729 	}
1730 	return;
1731 }
1732 
1733 /*
1734  * mrsas_alloc_mem:	Allocate DMAable memory
1735  * input:			Adapter instance soft state
1736  *
1737  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1738  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1739  * Kernel virtual address. Callback argument is physical memory address.
1740  */
1741 static int
1742 mrsas_alloc_mem(struct mrsas_softc *sc)
1743 {
1744 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1745 	          chain_frame_size, evt_detail_size, count;
1746 
1747 	/*
1748 	 * Allocate parent DMA tag
1749 	 */
1750 	if (bus_dma_tag_create(NULL,	/* parent */
1751 	    1,				/* alignment */
1752 	    0,				/* boundary */
1753 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1754 	    BUS_SPACE_MAXADDR,		/* highaddr */
1755 	    NULL, NULL,			/* filter, filterarg */
1756 	    MAXPHYS,			/* maxsize */
1757 	    sc->max_num_sge,		/* nsegments */
1758 	    MAXPHYS,			/* maxsegsize */
1759 	    0,				/* flags */
1760 	    NULL, NULL,			/* lockfunc, lockarg */
1761 	    &sc->mrsas_parent_tag	/* tag */
1762 	    )) {
1763 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1764 		return (ENOMEM);
1765 	}
1766 	/*
1767 	 * Allocate for version buffer
1768 	 */
1769 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1770 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1771 	    1, 0,
1772 	    BUS_SPACE_MAXADDR_32BIT,
1773 	    BUS_SPACE_MAXADDR,
1774 	    NULL, NULL,
1775 	    verbuf_size,
1776 	    1,
1777 	    verbuf_size,
1778 	    BUS_DMA_ALLOCNOW,
1779 	    NULL, NULL,
1780 	    &sc->verbuf_tag)) {
1781 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1782 		return (ENOMEM);
1783 	}
1784 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1785 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1786 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1787 		return (ENOMEM);
1788 	}
1789 	bzero(sc->verbuf_mem, verbuf_size);
1790 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1791 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1792 	    BUS_DMA_NOWAIT)) {
1793 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1794 		return (ENOMEM);
1795 	}
1796 	/*
1797 	 * Allocate IO Request Frames
1798 	 */
1799 	io_req_size = sc->io_frames_alloc_sz;
1800 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1801 	    16, 0,
1802 	    BUS_SPACE_MAXADDR_32BIT,
1803 	    BUS_SPACE_MAXADDR,
1804 	    NULL, NULL,
1805 	    io_req_size,
1806 	    1,
1807 	    io_req_size,
1808 	    BUS_DMA_ALLOCNOW,
1809 	    NULL, NULL,
1810 	    &sc->io_request_tag)) {
1811 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1812 		return (ENOMEM);
1813 	}
1814 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1815 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1816 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1817 		return (ENOMEM);
1818 	}
1819 	bzero(sc->io_request_mem, io_req_size);
1820 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1821 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1822 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1823 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1824 		return (ENOMEM);
1825 	}
1826 	/*
1827 	 * Allocate Chain Frames
1828 	 */
1829 	chain_frame_size = sc->chain_frames_alloc_sz;
1830 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1831 	    4, 0,
1832 	    BUS_SPACE_MAXADDR_32BIT,
1833 	    BUS_SPACE_MAXADDR,
1834 	    NULL, NULL,
1835 	    chain_frame_size,
1836 	    1,
1837 	    chain_frame_size,
1838 	    BUS_DMA_ALLOCNOW,
1839 	    NULL, NULL,
1840 	    &sc->chain_frame_tag)) {
1841 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1842 		return (ENOMEM);
1843 	}
1844 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1845 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1846 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1847 		return (ENOMEM);
1848 	}
1849 	bzero(sc->chain_frame_mem, chain_frame_size);
1850 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1851 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1852 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1853 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1854 		return (ENOMEM);
1855 	}
1856 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1857 	/*
1858 	 * Allocate Reply Descriptor Array
1859 	 */
1860 	reply_desc_size = sc->reply_alloc_sz * count;
1861 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1862 	    16, 0,
1863 	    BUS_SPACE_MAXADDR_32BIT,
1864 	    BUS_SPACE_MAXADDR,
1865 	    NULL, NULL,
1866 	    reply_desc_size,
1867 	    1,
1868 	    reply_desc_size,
1869 	    BUS_DMA_ALLOCNOW,
1870 	    NULL, NULL,
1871 	    &sc->reply_desc_tag)) {
1872 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1873 		return (ENOMEM);
1874 	}
1875 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1876 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1877 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1878 		return (ENOMEM);
1879 	}
1880 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1881 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1882 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1883 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1884 		return (ENOMEM);
1885 	}
1886 	/*
1887 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
1888 	 */
1889 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1890 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1891 	    64, 0,
1892 	    BUS_SPACE_MAXADDR_32BIT,
1893 	    BUS_SPACE_MAXADDR,
1894 	    NULL, NULL,
1895 	    sense_size,
1896 	    1,
1897 	    sense_size,
1898 	    BUS_DMA_ALLOCNOW,
1899 	    NULL, NULL,
1900 	    &sc->sense_tag)) {
1901 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1902 		return (ENOMEM);
1903 	}
1904 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1905 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1906 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1907 		return (ENOMEM);
1908 	}
1909 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1910 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1911 	    BUS_DMA_NOWAIT)) {
1912 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1913 		return (ENOMEM);
1914 	}
1915 	/*
1916 	 * Allocate for Event detail structure
1917 	 */
1918 	evt_detail_size = sizeof(struct mrsas_evt_detail);
1919 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1920 	    1, 0,
1921 	    BUS_SPACE_MAXADDR_32BIT,
1922 	    BUS_SPACE_MAXADDR,
1923 	    NULL, NULL,
1924 	    evt_detail_size,
1925 	    1,
1926 	    evt_detail_size,
1927 	    BUS_DMA_ALLOCNOW,
1928 	    NULL, NULL,
1929 	    &sc->evt_detail_tag)) {
1930 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1931 		return (ENOMEM);
1932 	}
1933 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1934 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1935 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1936 		return (ENOMEM);
1937 	}
1938 	bzero(sc->evt_detail_mem, evt_detail_size);
1939 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1940 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1941 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1942 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1943 		return (ENOMEM);
1944 	}
1945 	/*
1946 	 * Create a dma tag for data buffers; size will be the maximum
1947 	 * possible I/O size (280kB).
1948 	 */
1949 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1950 	    1,
1951 	    0,
1952 	    BUS_SPACE_MAXADDR,
1953 	    BUS_SPACE_MAXADDR,
1954 	    NULL, NULL,
1955 	    MAXPHYS,
1956 	    sc->max_num_sge,		/* nsegments */
1957 	    MAXPHYS,
1958 	    BUS_DMA_ALLOCNOW,
1959 	    busdma_lock_mutex,
1960 	    &sc->io_lock,
1961 	    &sc->data_tag)) {
1962 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1963 		return (ENOMEM);
1964 	}
1965 	return (0);
1966 }
1967 
1968 /*
1969  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
1970  * input:			callback argument, machine dependent type
1971  * 					that describes DMA segments, number of segments, error code
1972  *
1973  * This function is for the driver to receive mapping information resultant of
1974  * the bus_dmamap_load(). The information is actually not being used, but the
1975  * address is saved anyway.
1976  */
1977 void
1978 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1979 {
1980 	bus_addr_t *addr;
1981 
1982 	addr = arg;
1983 	*addr = segs[0].ds_addr;
1984 }
1985 
1986 /*
1987  * mrsas_setup_raidmap:	Set up RAID map.
1988  * input:				Adapter instance soft state
1989  *
1990  * Allocate DMA memory for the RAID maps and perform setup.
1991  */
1992 static int
1993 mrsas_setup_raidmap(struct mrsas_softc *sc)
1994 {
1995 	int i;
1996 
1997 	for (i = 0; i < 2; i++) {
1998 		sc->ld_drv_map[i] =
1999 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2000 		/* Do Error handling */
2001 		if (!sc->ld_drv_map[i]) {
2002 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2003 
2004 			if (i == 1)
2005 				free(sc->ld_drv_map[0], M_MRSAS);
2006 			/* ABORT driver initialization */
2007 			goto ABORT;
2008 		}
2009 	}
2010 
2011 	for (int i = 0; i < 2; i++) {
2012 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2013 		    4, 0,
2014 		    BUS_SPACE_MAXADDR_32BIT,
2015 		    BUS_SPACE_MAXADDR,
2016 		    NULL, NULL,
2017 		    sc->max_map_sz,
2018 		    1,
2019 		    sc->max_map_sz,
2020 		    BUS_DMA_ALLOCNOW,
2021 		    NULL, NULL,
2022 		    &sc->raidmap_tag[i])) {
2023 			device_printf(sc->mrsas_dev,
2024 			    "Cannot allocate raid map tag.\n");
2025 			return (ENOMEM);
2026 		}
2027 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2028 		    (void **)&sc->raidmap_mem[i],
2029 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2030 			device_printf(sc->mrsas_dev,
2031 			    "Cannot allocate raidmap memory.\n");
2032 			return (ENOMEM);
2033 		}
2034 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2035 
2036 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2037 		    sc->raidmap_mem[i], sc->max_map_sz,
2038 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2039 		    BUS_DMA_NOWAIT)) {
2040 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2041 			return (ENOMEM);
2042 		}
2043 		if (!sc->raidmap_mem[i]) {
2044 			device_printf(sc->mrsas_dev,
2045 			    "Cannot allocate memory for raid map.\n");
2046 			return (ENOMEM);
2047 		}
2048 	}
2049 
2050 	if (!mrsas_get_map_info(sc))
2051 		mrsas_sync_map_info(sc);
2052 
2053 	return (0);
2054 
2055 ABORT:
2056 	return (1);
2057 }
2058 
2059 /**
2060  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2061  * @sc:				Adapter soft state
2062  *
2063  * Return 0 on success.
2064  */
2065 void
2066 megasas_setup_jbod_map(struct mrsas_softc *sc)
2067 {
2068 	int i;
2069 	uint32_t pd_seq_map_sz;
2070 
2071 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2072 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2073 
2074 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2075 		sc->use_seqnum_jbod_fp = 0;
2076 		return;
2077 	}
2078 	if (sc->jbodmap_mem[0])
2079 		goto skip_alloc;
2080 
2081 	for (i = 0; i < 2; i++) {
2082 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2083 		    4, 0,
2084 		    BUS_SPACE_MAXADDR_32BIT,
2085 		    BUS_SPACE_MAXADDR,
2086 		    NULL, NULL,
2087 		    pd_seq_map_sz,
2088 		    1,
2089 		    pd_seq_map_sz,
2090 		    BUS_DMA_ALLOCNOW,
2091 		    NULL, NULL,
2092 		    &sc->jbodmap_tag[i])) {
2093 			device_printf(sc->mrsas_dev,
2094 			    "Cannot allocate jbod map tag.\n");
2095 			return;
2096 		}
2097 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2098 		    (void **)&sc->jbodmap_mem[i],
2099 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2100 			device_printf(sc->mrsas_dev,
2101 			    "Cannot allocate jbod map memory.\n");
2102 			return;
2103 		}
2104 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2105 
2106 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2107 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2108 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2109 		    BUS_DMA_NOWAIT)) {
2110 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2111 			return;
2112 		}
2113 		if (!sc->jbodmap_mem[i]) {
2114 			device_printf(sc->mrsas_dev,
2115 			    "Cannot allocate memory for jbod map.\n");
2116 			sc->use_seqnum_jbod_fp = 0;
2117 			return;
2118 		}
2119 	}
2120 
2121 skip_alloc:
2122 	if (!megasas_sync_pd_seq_num(sc, false) &&
2123 	    !megasas_sync_pd_seq_num(sc, true))
2124 		sc->use_seqnum_jbod_fp = 1;
2125 	else
2126 		sc->use_seqnum_jbod_fp = 0;
2127 
2128 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2129 }
2130 
2131 /*
2132  * mrsas_init_fw:	Initialize Firmware
2133  * input:			Adapter soft state
2134  *
2135  * Calls transition_to_ready() to make sure Firmware is in operational state and
2136  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2137  * issues internal commands to get the controller info after the IOC_INIT
2138  * command response is received by Firmware.  Note:  code relating to
2139  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2140  * is left here as placeholder.
2141  */
2142 static int
2143 mrsas_init_fw(struct mrsas_softc *sc)
2144 {
2145 
2146 	int ret, loop, ocr = 0;
2147 	u_int32_t max_sectors_1;
2148 	u_int32_t max_sectors_2;
2149 	u_int32_t tmp_sectors;
2150 	u_int32_t scratch_pad_2;
2151 	int msix_enable = 0;
2152 	int fw_msix_count = 0;
2153 
2154 	/* Make sure Firmware is ready */
2155 	ret = mrsas_transition_to_ready(sc, ocr);
2156 	if (ret != SUCCESS) {
2157 		return (ret);
2158 	}
2159 	/* MSI-x index 0- reply post host index register */
2160 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2161 	/* Check if MSI-X is supported while in ready state */
2162 	msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2163 
2164 	if (msix_enable) {
2165 		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2166 		    outbound_scratch_pad_2));
2167 
2168 		/* Check max MSI-X vectors */
2169 		if (sc->device_id == MRSAS_TBOLT) {
2170 			sc->msix_vectors = (scratch_pad_2
2171 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2172 			fw_msix_count = sc->msix_vectors;
2173 		} else {
2174 			/* Invader/Fury supports 96 MSI-X vectors */
2175 			sc->msix_vectors = ((scratch_pad_2
2176 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2177 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2178 			fw_msix_count = sc->msix_vectors;
2179 
2180 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2181 			    loop++) {
2182 				sc->msix_reg_offset[loop] =
2183 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2184 				    (loop * 0x10);
2185 			}
2186 		}
2187 
2188 		/* Don't bother allocating more MSI-X vectors than cpus */
2189 		sc->msix_vectors = min(sc->msix_vectors,
2190 		    mp_ncpus);
2191 
2192 		/* Allocate MSI-x vectors */
2193 		if (mrsas_allocate_msix(sc) == SUCCESS)
2194 			sc->msix_enable = 1;
2195 		else
2196 			sc->msix_enable = 0;
2197 
2198 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2199 		    "Online CPU %d Current MSIX <%d>\n",
2200 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2201 	}
2202 	if (mrsas_init_adapter(sc) != SUCCESS) {
2203 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2204 		return (1);
2205 	}
2206 	/* Allocate internal commands for pass-thru */
2207 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2208 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2209 		return (1);
2210 	}
2211 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2212 	if (!sc->ctrl_info) {
2213 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2214 		return (1);
2215 	}
2216 	/*
2217 	 * Get the controller info from FW, so that the MAX VD support
2218 	 * availability can be decided.
2219 	 */
2220 	if (mrsas_get_ctrl_info(sc)) {
2221 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2222 		return (1);
2223 	}
2224 	sc->secure_jbod_support =
2225 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2226 
2227 	if (sc->secure_jbod_support)
2228 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2229 
2230 	if (sc->use_seqnum_jbod_fp)
2231 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2232 
2233 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2234 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2235 		    "There seems to be some problem in the controller\n"
2236 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2237 	}
2238 	megasas_setup_jbod_map(sc);
2239 
2240 	/* For pass-thru, get PD/LD list and controller info */
2241 	memset(sc->pd_list, 0,
2242 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2243 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2244 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2245 		return (1);
2246 	}
2247 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2248 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2249 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2250 		return (1);
2251 	}
2252 	/*
2253 	 * Compute the max allowed sectors per IO: The controller info has
2254 	 * two limits on max sectors. Driver should use the minimum of these
2255 	 * two.
2256 	 *
2257 	 * 1 << stripe_sz_ops.min = max sectors per strip
2258 	 *
2259 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2260 	 * calculate max_sectors_1. So the number ended up as zero always.
2261 	 */
2262 	tmp_sectors = 0;
2263 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2264 	    sc->ctrl_info->max_strips_per_io;
2265 	max_sectors_2 = sc->ctrl_info->max_request_size;
2266 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2267 	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2268 
2269 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2270 		sc->max_sectors_per_req = tmp_sectors;
2271 
2272 	sc->disableOnlineCtrlReset =
2273 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2274 	sc->UnevenSpanSupport =
2275 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2276 	if (sc->UnevenSpanSupport) {
2277 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2278 		    sc->UnevenSpanSupport);
2279 
2280 		if (MR_ValidateMapInfo(sc))
2281 			sc->fast_path_io = 1;
2282 		else
2283 			sc->fast_path_io = 0;
2284 	}
2285 	return (0);
2286 }
2287 
2288 /*
2289  * mrsas_init_adapter:	Initializes the adapter/controller
2290  * input:				Adapter soft state
2291  *
2292  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2293  * ROC/controller.  The FW register is read to determined the number of
2294  * commands that is supported.  All memory allocations for IO is based on
2295  * max_cmd.  Appropriate calculations are performed in this function.
2296  */
2297 int
2298 mrsas_init_adapter(struct mrsas_softc *sc)
2299 {
2300 	uint32_t status;
2301 	u_int32_t max_cmd, scratch_pad_2;
2302 	int ret;
2303 	int i = 0;
2304 
2305 	/* Read FW status register */
2306 	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2307 
2308 	/* Get operational params from status register */
2309 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2310 
2311 	/* Decrement the max supported by 1, to correlate with FW */
2312 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2313 	max_cmd = sc->max_fw_cmds;
2314 
2315 	/* Determine allocation size of command frames */
2316 	sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2317 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2318 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2319 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2320 	scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2321 	    outbound_scratch_pad_2));
2322 	/*
2323 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2324 	 * Firmware support extended IO chain frame which is 4 time more
2325 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2326 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2327 	 */
2328 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2329 		sc->max_chain_frame_sz =
2330 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2331 		    * MEGASAS_1MB_IO;
2332 	else
2333 		sc->max_chain_frame_sz =
2334 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2335 		    * MEGASAS_256K_IO;
2336 
2337 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2338 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2339 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2340 
2341 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2342 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2343 
2344 	mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2345 	    sc->max_num_sge, sc->max_chain_frame_sz);
2346 
2347 	/* Used for pass thru MFI frame (DCMD) */
2348 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2349 
2350 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2351 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2352 
2353 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2354 
2355 	for (i = 0; i < count; i++)
2356 		sc->last_reply_idx[i] = 0;
2357 
2358 	ret = mrsas_alloc_mem(sc);
2359 	if (ret != SUCCESS)
2360 		return (ret);
2361 
2362 	ret = mrsas_alloc_mpt_cmds(sc);
2363 	if (ret != SUCCESS)
2364 		return (ret);
2365 
2366 	ret = mrsas_ioc_init(sc);
2367 	if (ret != SUCCESS)
2368 		return (ret);
2369 
2370 	return (0);
2371 }
2372 
2373 /*
2374  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2375  * input:				Adapter soft state
2376  *
2377  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2378  */
2379 int
2380 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2381 {
2382 	int ioc_init_size;
2383 
2384 	/* Allocate IOC INIT command */
2385 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2386 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2387 	    1, 0,
2388 	    BUS_SPACE_MAXADDR_32BIT,
2389 	    BUS_SPACE_MAXADDR,
2390 	    NULL, NULL,
2391 	    ioc_init_size,
2392 	    1,
2393 	    ioc_init_size,
2394 	    BUS_DMA_ALLOCNOW,
2395 	    NULL, NULL,
2396 	    &sc->ioc_init_tag)) {
2397 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2398 		return (ENOMEM);
2399 	}
2400 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2401 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2402 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2403 		return (ENOMEM);
2404 	}
2405 	bzero(sc->ioc_init_mem, ioc_init_size);
2406 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2407 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2408 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2409 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2410 		return (ENOMEM);
2411 	}
2412 	return (0);
2413 }
2414 
2415 /*
2416  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2417  * input:				Adapter soft state
2418  *
2419  * Deallocates memory of the IOC Init cmd.
2420  */
2421 void
2422 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2423 {
2424 	if (sc->ioc_init_phys_mem)
2425 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2426 	if (sc->ioc_init_mem != NULL)
2427 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2428 	if (sc->ioc_init_tag != NULL)
2429 		bus_dma_tag_destroy(sc->ioc_init_tag);
2430 }
2431 
2432 /*
2433  * mrsas_ioc_init:	Sends IOC Init command to FW
2434  * input:			Adapter soft state
2435  *
2436  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2437  */
2438 int
2439 mrsas_ioc_init(struct mrsas_softc *sc)
2440 {
2441 	struct mrsas_init_frame *init_frame;
2442 	pMpi2IOCInitRequest_t IOCInitMsg;
2443 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2444 	u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2445 	bus_addr_t phys_addr;
2446 	int i, retcode = 0;
2447 	u_int32_t scratch_pad_2;
2448 
2449 	/* Allocate memory for the IOC INIT command */
2450 	if (mrsas_alloc_ioc_cmd(sc)) {
2451 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2452 		return (1);
2453 	}
2454 
2455 	if (!sc->block_sync_cache) {
2456 		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2457 		    outbound_scratch_pad_2));
2458 		sc->fw_sync_cache_support = (scratch_pad_2 &
2459 		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2460 	}
2461 
2462 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2463 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2464 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2465 	IOCInitMsg->MsgVersion = MPI2_VERSION;
2466 	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2467 	IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2468 	IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2469 	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2470 	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2471 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2472 
2473 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2474 	init_frame->cmd = MFI_CMD_INIT;
2475 	init_frame->cmd_status = 0xFF;
2476 	init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2477 
2478 	/* driver support Extended MSIX */
2479 		if (sc->mrsas_gen3_ctrl) {
2480 		init_frame->driver_operations.
2481 		    mfi_capabilities.support_additional_msix = 1;
2482 	}
2483 	if (sc->verbuf_mem) {
2484 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2485 		    MRSAS_VERSION);
2486 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2487 		init_frame->driver_ver_hi = 0;
2488 	}
2489 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2490 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2491 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2492 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2493 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2494 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2495 	init_frame->queue_info_new_phys_addr_lo = phys_addr;
2496 	init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2497 
2498 	req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2499 	req_desc.MFAIo.RequestFlags =
2500 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2501 
2502 	mrsas_disable_intr(sc);
2503 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2504 	mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2505 
2506 	/*
2507 	 * Poll response timer to wait for Firmware response.  While this
2508 	 * timer with the DELAY call could block CPU, the time interval for
2509 	 * this is only 1 millisecond.
2510 	 */
2511 	if (init_frame->cmd_status == 0xFF) {
2512 		for (i = 0; i < (max_wait * 1000); i++) {
2513 			if (init_frame->cmd_status == 0xFF)
2514 				DELAY(1000);
2515 			else
2516 				break;
2517 		}
2518 	}
2519 	if (init_frame->cmd_status == 0)
2520 		mrsas_dprint(sc, MRSAS_OCR,
2521 		    "IOC INIT response received from FW.\n");
2522 	else {
2523 		if (init_frame->cmd_status == 0xFF)
2524 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2525 		else
2526 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2527 		retcode = 1;
2528 	}
2529 
2530 	mrsas_free_ioc_cmd(sc);
2531 	return (retcode);
2532 }
2533 
2534 /*
2535  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2536  * input:					Adapter instance soft state
2537  *
2538  * This function allocates the internal commands for IOs. Each command that is
2539  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2540  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2541  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2542  * max_fw_cmds.
2543  */
2544 int
2545 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2546 {
2547 	int i, j;
2548 	u_int32_t max_cmd, count;
2549 	struct mrsas_mpt_cmd *cmd;
2550 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2551 	u_int32_t offset, chain_offset, sense_offset;
2552 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2553 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2554 
2555 	max_cmd = sc->max_fw_cmds;
2556 
2557 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2558 	if (!sc->req_desc) {
2559 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2560 		return (ENOMEM);
2561 	}
2562 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2563 
2564 	/*
2565 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2566 	 * Allocate the dynamic array first and then allocate individual
2567 	 * commands.
2568 	 */
2569 	sc->mpt_cmd_list = mallocarray(max_cmd, sizeof(struct mrsas_mpt_cmd *),
2570 	    M_MRSAS, M_NOWAIT);
2571 	if (!sc->mpt_cmd_list) {
2572 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2573 		return (ENOMEM);
2574 	}
2575 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2576 	for (i = 0; i < max_cmd; i++) {
2577 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2578 		    M_MRSAS, M_NOWAIT);
2579 		if (!sc->mpt_cmd_list[i]) {
2580 			for (j = 0; j < i; j++)
2581 				free(sc->mpt_cmd_list[j], M_MRSAS);
2582 			free(sc->mpt_cmd_list, M_MRSAS);
2583 			sc->mpt_cmd_list = NULL;
2584 			return (ENOMEM);
2585 		}
2586 	}
2587 
2588 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2589 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2590 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2591 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2592 	sense_base = (u_int8_t *)sc->sense_mem;
2593 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2594 	for (i = 0; i < max_cmd; i++) {
2595 		cmd = sc->mpt_cmd_list[i];
2596 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2597 		chain_offset = sc->max_chain_frame_sz * i;
2598 		sense_offset = MRSAS_SENSE_LEN * i;
2599 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2600 		cmd->index = i + 1;
2601 		cmd->ccb_ptr = NULL;
2602 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2603 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2604 		cmd->sc = sc;
2605 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2606 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2607 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2608 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2609 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2610 		cmd->sense = sense_base + sense_offset;
2611 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2612 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2613 			return (FAIL);
2614 		}
2615 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2616 	}
2617 
2618 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2619 	reply_desc = sc->reply_desc_mem;
2620 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2621 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2622 		reply_desc->Words = MRSAS_ULONG_MAX;
2623 	}
2624 	return (0);
2625 }
2626 
2627 /*
2628  * mrsas_fire_cmd:	Sends command to FW
2629  * input:			Adapter softstate
2630  * 					request descriptor address low
2631  * 					request descriptor address high
2632  *
2633  * This functions fires the command to Firmware by writing to the
2634  * inbound_low_queue_port and inbound_high_queue_port.
2635  */
2636 void
2637 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2638     u_int32_t req_desc_hi)
2639 {
2640 	mtx_lock(&sc->pci_lock);
2641 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2642 	    req_desc_lo);
2643 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2644 	    req_desc_hi);
2645 	mtx_unlock(&sc->pci_lock);
2646 }
2647 
2648 /*
2649  * mrsas_transition_to_ready:  Move FW to Ready state input:
2650  * Adapter instance soft state
2651  *
2652  * During the initialization, FW passes can potentially be in any one of several
2653  * possible states. If the FW in operational, waiting-for-handshake states,
2654  * driver must take steps to bring it to ready state. Otherwise, it has to
2655  * wait for the ready state.
2656  */
2657 int
2658 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2659 {
2660 	int i;
2661 	u_int8_t max_wait;
2662 	u_int32_t val, fw_state;
2663 	u_int32_t cur_state;
2664 	u_int32_t abs_state, curr_abs_state;
2665 
2666 	val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2667 	fw_state = val & MFI_STATE_MASK;
2668 	max_wait = MRSAS_RESET_WAIT_TIME;
2669 
2670 	if (fw_state != MFI_STATE_READY)
2671 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2672 
2673 	while (fw_state != MFI_STATE_READY) {
2674 		abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2675 		switch (fw_state) {
2676 		case MFI_STATE_FAULT:
2677 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2678 			if (ocr) {
2679 				cur_state = MFI_STATE_FAULT;
2680 				break;
2681 			} else
2682 				return -ENODEV;
2683 		case MFI_STATE_WAIT_HANDSHAKE:
2684 			/* Set the CLR bit in inbound doorbell */
2685 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2686 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2687 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2688 			break;
2689 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2690 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2691 			    MFI_INIT_HOTPLUG);
2692 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2693 			break;
2694 		case MFI_STATE_OPERATIONAL:
2695 			/*
2696 			 * Bring it to READY state; assuming max wait 10
2697 			 * secs
2698 			 */
2699 			mrsas_disable_intr(sc);
2700 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2701 			for (i = 0; i < max_wait * 1000; i++) {
2702 				if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2703 					DELAY(1000);
2704 				else
2705 					break;
2706 			}
2707 			cur_state = MFI_STATE_OPERATIONAL;
2708 			break;
2709 		case MFI_STATE_UNDEFINED:
2710 			/*
2711 			 * This state should not last for more than 2
2712 			 * seconds
2713 			 */
2714 			cur_state = MFI_STATE_UNDEFINED;
2715 			break;
2716 		case MFI_STATE_BB_INIT:
2717 			cur_state = MFI_STATE_BB_INIT;
2718 			break;
2719 		case MFI_STATE_FW_INIT:
2720 			cur_state = MFI_STATE_FW_INIT;
2721 			break;
2722 		case MFI_STATE_FW_INIT_2:
2723 			cur_state = MFI_STATE_FW_INIT_2;
2724 			break;
2725 		case MFI_STATE_DEVICE_SCAN:
2726 			cur_state = MFI_STATE_DEVICE_SCAN;
2727 			break;
2728 		case MFI_STATE_FLUSH_CACHE:
2729 			cur_state = MFI_STATE_FLUSH_CACHE;
2730 			break;
2731 		default:
2732 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2733 			return -ENODEV;
2734 		}
2735 
2736 		/*
2737 		 * The cur_state should not last for more than max_wait secs
2738 		 */
2739 		for (i = 0; i < (max_wait * 1000); i++) {
2740 			fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2741 			    outbound_scratch_pad)) & MFI_STATE_MASK);
2742 			curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2743 			    outbound_scratch_pad));
2744 			if (abs_state == curr_abs_state)
2745 				DELAY(1000);
2746 			else
2747 				break;
2748 		}
2749 
2750 		/*
2751 		 * Return error if fw_state hasn't changed after max_wait
2752 		 */
2753 		if (curr_abs_state == abs_state) {
2754 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2755 			    "in %d secs\n", fw_state, max_wait);
2756 			return -ENODEV;
2757 		}
2758 	}
2759 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2760 	return 0;
2761 }
2762 
2763 /*
2764  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
2765  * input:				Adapter soft state
2766  *
2767  * This function removes an MFI command from the command list.
2768  */
2769 struct mrsas_mfi_cmd *
2770 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2771 {
2772 	struct mrsas_mfi_cmd *cmd = NULL;
2773 
2774 	mtx_lock(&sc->mfi_cmd_pool_lock);
2775 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2776 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2777 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2778 	}
2779 	mtx_unlock(&sc->mfi_cmd_pool_lock);
2780 
2781 	return cmd;
2782 }
2783 
2784 /*
2785  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
2786  * input:				Adapter Context.
2787  *
2788  * This function will check FW status register and flag do_timeout_reset flag.
2789  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2790  * trigger reset.
2791  */
2792 static void
2793 mrsas_ocr_thread(void *arg)
2794 {
2795 	struct mrsas_softc *sc;
2796 	u_int32_t fw_status, fw_state;
2797 	u_int8_t tm_target_reset_failed = 0;
2798 
2799 	sc = (struct mrsas_softc *)arg;
2800 
2801 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2802 
2803 	sc->ocr_thread_active = 1;
2804 	mtx_lock(&sc->sim_lock);
2805 	for (;;) {
2806 		/* Sleep for 1 second and check the queue status */
2807 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2808 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2809 		if (sc->remove_in_progress ||
2810 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2811 			mrsas_dprint(sc, MRSAS_OCR,
2812 			    "Exit due to %s from %s\n",
2813 			    sc->remove_in_progress ? "Shutdown" :
2814 			    "Hardware critical error", __func__);
2815 			break;
2816 		}
2817 		fw_status = mrsas_read_reg(sc,
2818 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
2819 		fw_state = fw_status & MFI_STATE_MASK;
2820 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
2821 			mrsas_atomic_read(&sc->target_reset_outstanding)) {
2822 
2823 			/* First, freeze further IOs to come to the SIM */
2824 			mrsas_xpt_freeze(sc);
2825 
2826 			/* If this is an IO timeout then go for target reset */
2827 			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
2828 				device_printf(sc->mrsas_dev, "Initiating Target RESET "
2829 				    "because of SCSI IO timeout!\n");
2830 
2831 				/* Let the remaining IOs to complete */
2832 				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2833 				      "mrsas_reset_targets", 5 * hz);
2834 
2835 				/* Try to reset the target device */
2836 				if (mrsas_reset_targets(sc) == FAIL)
2837 					tm_target_reset_failed = 1;
2838 			}
2839 
2840 			/* If this is a DCMD timeout or FW fault,
2841 			 * then go for controller reset
2842 			 */
2843 			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
2844 			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
2845 				if (tm_target_reset_failed)
2846 					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
2847 					    "TM FAILURE!\n");
2848 				else
2849 					device_printf(sc->mrsas_dev, "Initiaiting OCR "
2850 						"because of %s!\n", sc->do_timedout_reset ?
2851 						"DCMD IO Timeout" : "FW fault");
2852 
2853 				mtx_lock_spin(&sc->ioctl_lock);
2854 				sc->reset_in_progress = 1;
2855 				mtx_unlock_spin(&sc->ioctl_lock);
2856 				sc->reset_count++;
2857 
2858 				/*
2859 				 * Wait for the AEN task to be completed if it is running.
2860 				 */
2861 				mtx_unlock(&sc->sim_lock);
2862 				taskqueue_drain(sc->ev_tq, &sc->ev_task);
2863 				mtx_lock(&sc->sim_lock);
2864 
2865 				taskqueue_block(sc->ev_tq);
2866 				/* Try to reset the controller */
2867 				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2868 
2869 				sc->do_timedout_reset = 0;
2870 				sc->reset_in_progress = 0;
2871 				tm_target_reset_failed = 0;
2872 				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
2873 				memset(sc->target_reset_pool, 0,
2874 				    sizeof(sc->target_reset_pool));
2875 				taskqueue_unblock(sc->ev_tq);
2876 			}
2877 
2878 			/* Now allow IOs to come to the SIM */
2879 			 mrsas_xpt_release(sc);
2880 		}
2881 	}
2882 	mtx_unlock(&sc->sim_lock);
2883 	sc->ocr_thread_active = 0;
2884 	mrsas_kproc_exit(0);
2885 }
2886 
2887 /*
2888  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
2889  * input:					Adapter Context.
2890  *
2891  * This function will clear reply descriptor so that post OCR driver and FW will
2892  * lost old history.
2893  */
2894 void
2895 mrsas_reset_reply_desc(struct mrsas_softc *sc)
2896 {
2897 	int i, count;
2898 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2899 
2900 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2901 	for (i = 0; i < count; i++)
2902 		sc->last_reply_idx[i] = 0;
2903 
2904 	reply_desc = sc->reply_desc_mem;
2905 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2906 		reply_desc->Words = MRSAS_ULONG_MAX;
2907 	}
2908 }
2909 
2910 /*
2911  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
2912  * input:				Adapter Context.
2913  *
2914  * This function will run from thread context so that it can sleep. 1. Do not
2915  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2916  * to complete for 180 seconds. 3. If #2 does not find any outstanding
2917  * command Controller is in working state, so skip OCR. Otherwise, do
2918  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2919  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2920  * OCR, Re-fire Management command and move Controller to Operation state.
2921  */
2922 int
2923 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2924 {
2925 	int retval = SUCCESS, i, j, retry = 0;
2926 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2927 	union ccb *ccb;
2928 	struct mrsas_mfi_cmd *mfi_cmd;
2929 	struct mrsas_mpt_cmd *mpt_cmd;
2930 	union mrsas_evt_class_locale class_locale;
2931 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2932 
2933 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2934 		device_printf(sc->mrsas_dev,
2935 		    "mrsas: Hardware critical error, returning FAIL.\n");
2936 		return FAIL;
2937 	}
2938 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2939 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2940 	mrsas_disable_intr(sc);
2941 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2942 	    sc->mrsas_fw_fault_check_delay * hz);
2943 
2944 	/* First try waiting for commands to complete */
2945 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2946 		mrsas_dprint(sc, MRSAS_OCR,
2947 		    "resetting adapter from %s.\n",
2948 		    __func__);
2949 		/* Now return commands back to the CAM layer */
2950 		mtx_unlock(&sc->sim_lock);
2951 		for (i = 0; i < sc->max_fw_cmds; i++) {
2952 			mpt_cmd = sc->mpt_cmd_list[i];
2953 			if (mpt_cmd->ccb_ptr) {
2954 				ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2955 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2956 				mrsas_cmd_done(sc, mpt_cmd);
2957 				mrsas_atomic_dec(&sc->fw_outstanding);
2958 			}
2959 		}
2960 		mtx_lock(&sc->sim_lock);
2961 
2962 		status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2963 		    outbound_scratch_pad));
2964 		abs_state = status_reg & MFI_STATE_MASK;
2965 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
2966 		if (sc->disableOnlineCtrlReset ||
2967 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2968 			/* Reset not supported, kill adapter */
2969 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2970 			mrsas_kill_hba(sc);
2971 			retval = FAIL;
2972 			goto out;
2973 		}
2974 		/* Now try to reset the chip */
2975 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2976 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2977 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
2978 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2979 			    MPI2_WRSEQ_1ST_KEY_VALUE);
2980 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2981 			    MPI2_WRSEQ_2ND_KEY_VALUE);
2982 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2983 			    MPI2_WRSEQ_3RD_KEY_VALUE);
2984 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2985 			    MPI2_WRSEQ_4TH_KEY_VALUE);
2986 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2987 			    MPI2_WRSEQ_5TH_KEY_VALUE);
2988 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2989 			    MPI2_WRSEQ_6TH_KEY_VALUE);
2990 
2991 			/* Check that the diag write enable (DRWE) bit is on */
2992 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2993 			    fusion_host_diag));
2994 			retry = 0;
2995 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2996 				DELAY(100 * 1000);
2997 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2998 				    fusion_host_diag));
2999 				if (retry++ == 100) {
3000 					mrsas_dprint(sc, MRSAS_OCR,
3001 					    "Host diag unlock failed!\n");
3002 					break;
3003 				}
3004 			}
3005 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3006 				continue;
3007 
3008 			/* Send chip reset command */
3009 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3010 			    host_diag | HOST_DIAG_RESET_ADAPTER);
3011 			DELAY(3000 * 1000);
3012 
3013 			/* Make sure reset adapter bit is cleared */
3014 			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3015 			    fusion_host_diag));
3016 			retry = 0;
3017 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3018 				DELAY(100 * 1000);
3019 				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3020 				    fusion_host_diag));
3021 				if (retry++ == 1000) {
3022 					mrsas_dprint(sc, MRSAS_OCR,
3023 					    "Diag reset adapter never cleared!\n");
3024 					break;
3025 				}
3026 			}
3027 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3028 				continue;
3029 
3030 			abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3031 			    outbound_scratch_pad)) & MFI_STATE_MASK;
3032 			retry = 0;
3033 
3034 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3035 				DELAY(100 * 1000);
3036 				abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3037 				    outbound_scratch_pad)) & MFI_STATE_MASK;
3038 			}
3039 			if (abs_state <= MFI_STATE_FW_INIT) {
3040 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3041 				    " state = 0x%x\n", abs_state);
3042 				continue;
3043 			}
3044 			/* Wait for FW to become ready */
3045 			if (mrsas_transition_to_ready(sc, 1)) {
3046 				mrsas_dprint(sc, MRSAS_OCR,
3047 				    "mrsas: Failed to transition controller to ready.\n");
3048 				continue;
3049 			}
3050 			mrsas_reset_reply_desc(sc);
3051 			if (mrsas_ioc_init(sc)) {
3052 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3053 				continue;
3054 			}
3055 			for (j = 0; j < sc->max_fw_cmds; j++) {
3056 				mpt_cmd = sc->mpt_cmd_list[j];
3057 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3058 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3059 					/* If not an IOCTL then release the command else re-fire */
3060 					if (!mfi_cmd->sync_cmd) {
3061 						mrsas_release_mfi_cmd(mfi_cmd);
3062 					} else {
3063 						req_desc = mrsas_get_request_desc(sc,
3064 						    mfi_cmd->cmd_id.context.smid - 1);
3065 						mrsas_dprint(sc, MRSAS_OCR,
3066 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3067 						    mfi_cmd->frame->dcmd.opcode, j);
3068 						if (!req_desc)
3069 							device_printf(sc->mrsas_dev,
3070 							    "Cannot build MPT cmd.\n");
3071 						else
3072 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3073 							    req_desc->addr.u.high);
3074 					}
3075 				}
3076 			}
3077 
3078 			/* Reset load balance info */
3079 			memset(sc->load_balance_info, 0,
3080 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3081 
3082 			if (mrsas_get_ctrl_info(sc)) {
3083 				mrsas_kill_hba(sc);
3084 				retval = FAIL;
3085 				goto out;
3086 			}
3087 			if (!mrsas_get_map_info(sc))
3088 				mrsas_sync_map_info(sc);
3089 
3090 			megasas_setup_jbod_map(sc);
3091 
3092 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3093 			mrsas_enable_intr(sc);
3094 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3095 
3096 			/* Register AEN with FW for last sequence number */
3097 			class_locale.members.reserved = 0;
3098 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3099 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3100 
3101 			mtx_unlock(&sc->sim_lock);
3102 			if (mrsas_register_aen(sc, sc->last_seq_num,
3103 			    class_locale.word)) {
3104 				device_printf(sc->mrsas_dev,
3105 				    "ERROR: AEN registration FAILED from OCR !!! "
3106 				    "Further events from the controller cannot be notified."
3107 				    "Either there is some problem in the controller"
3108 				    "or the controller does not support AEN.\n"
3109 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3110 			}
3111 			mtx_lock(&sc->sim_lock);
3112 
3113 			/* Adapter reset completed successfully */
3114 			device_printf(sc->mrsas_dev, "Reset successful\n");
3115 			retval = SUCCESS;
3116 			goto out;
3117 		}
3118 		/* Reset failed, kill the adapter */
3119 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3120 		mrsas_kill_hba(sc);
3121 		retval = FAIL;
3122 	} else {
3123 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3124 		mrsas_enable_intr(sc);
3125 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3126 	}
3127 out:
3128 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3129 	mrsas_dprint(sc, MRSAS_OCR,
3130 	    "Reset Exit with %d.\n", retval);
3131 	return retval;
3132 }
3133 
3134 /*
3135  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3136  * input:			Adapter Context.
3137  *
3138  * This function will kill HBA when OCR is not supported.
3139  */
3140 void
3141 mrsas_kill_hba(struct mrsas_softc *sc)
3142 {
3143 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3144 	DELAY(1000 * 1000);
3145 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3146 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3147 	    MFI_STOP_ADP);
3148 	/* Flush */
3149 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3150 	mrsas_complete_outstanding_ioctls(sc);
3151 }
3152 
3153 /**
3154  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3155  * input:			Controller softc
3156  *
3157  * Returns void
3158  */
3159 void
3160 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3161 {
3162 	int i;
3163 	struct mrsas_mpt_cmd *cmd_mpt;
3164 	struct mrsas_mfi_cmd *cmd_mfi;
3165 	u_int32_t count, MSIxIndex;
3166 
3167 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3168 	for (i = 0; i < sc->max_fw_cmds; i++) {
3169 		cmd_mpt = sc->mpt_cmd_list[i];
3170 
3171 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3172 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3173 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3174 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3175 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3176 					    cmd_mpt->io_request->RaidContext.status);
3177 			}
3178 		}
3179 	}
3180 }
3181 
3182 /*
3183  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3184  * input:						Adapter Context.
3185  *
3186  * This function will wait for 180 seconds for outstanding commands to be
3187  * completed.
3188  */
3189 int
3190 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3191 {
3192 	int i, outstanding, retval = 0;
3193 	u_int32_t fw_state, count, MSIxIndex;
3194 
3195 
3196 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3197 		if (sc->remove_in_progress) {
3198 			mrsas_dprint(sc, MRSAS_OCR,
3199 			    "Driver remove or shutdown called.\n");
3200 			retval = 1;
3201 			goto out;
3202 		}
3203 		/* Check if firmware is in fault state */
3204 		fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3205 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3206 		if (fw_state == MFI_STATE_FAULT) {
3207 			mrsas_dprint(sc, MRSAS_OCR,
3208 			    "Found FW in FAULT state, will reset adapter.\n");
3209 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3210 			mtx_unlock(&sc->sim_lock);
3211 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3212 				mrsas_complete_cmd(sc, MSIxIndex);
3213 			mtx_lock(&sc->sim_lock);
3214 			retval = 1;
3215 			goto out;
3216 		}
3217 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3218 			mrsas_dprint(sc, MRSAS_OCR,
3219 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3220 			retval = 1;
3221 			goto out;
3222 		}
3223 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3224 		if (!outstanding)
3225 			goto out;
3226 
3227 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3228 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3229 			    "commands to complete\n", i, outstanding);
3230 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3231 			mtx_unlock(&sc->sim_lock);
3232 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3233 				mrsas_complete_cmd(sc, MSIxIndex);
3234 			mtx_lock(&sc->sim_lock);
3235 		}
3236 		DELAY(1000 * 1000);
3237 	}
3238 
3239 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3240 		mrsas_dprint(sc, MRSAS_OCR,
3241 		    " pending commands remain after waiting,"
3242 		    " will reset adapter.\n");
3243 		retval = 1;
3244 	}
3245 out:
3246 	return retval;
3247 }
3248 
3249 /*
3250  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3251  * input:					Command packet for return to free cmd pool
3252  *
3253  * This function returns the MFI & MPT command to the command list.
3254  */
3255 void
3256 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3257 {
3258 	struct mrsas_softc *sc = cmd_mfi->sc;
3259 	struct mrsas_mpt_cmd *cmd_mpt;
3260 
3261 
3262 	mtx_lock(&sc->mfi_cmd_pool_lock);
3263 	/*
3264 	 * Release the mpt command (if at all it is allocated
3265 	 * associated with the mfi command
3266 	 */
3267 	if (cmd_mfi->cmd_id.context.smid) {
3268 		mtx_lock(&sc->mpt_cmd_pool_lock);
3269 		/* Get the mpt cmd from mfi cmd frame's smid value */
3270 		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3271 		cmd_mpt->flags = 0;
3272 		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3273 		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3274 		mtx_unlock(&sc->mpt_cmd_pool_lock);
3275 	}
3276 	/* Release the mfi command */
3277 	cmd_mfi->ccb_ptr = NULL;
3278 	cmd_mfi->cmd_id.frame_count = 0;
3279 	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3280 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3281 
3282 	return;
3283 }
3284 
3285 /*
3286  * mrsas_get_controller_info:	Returns FW's controller structure
3287  * input:						Adapter soft state
3288  * 								Controller information structure
3289  *
3290  * Issues an internal command (DCMD) to get the FW's controller structure. This
3291  * information is mainly used to find out the maximum IO transfer per command
3292  * supported by the FW.
3293  */
3294 static int
3295 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3296 {
3297 	int retcode = 0;
3298 	u_int8_t do_ocr = 1;
3299 	struct mrsas_mfi_cmd *cmd;
3300 	struct mrsas_dcmd_frame *dcmd;
3301 
3302 	cmd = mrsas_get_mfi_cmd(sc);
3303 
3304 	if (!cmd) {
3305 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3306 		return -ENOMEM;
3307 	}
3308 	dcmd = &cmd->frame->dcmd;
3309 
3310 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3311 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3312 		mrsas_release_mfi_cmd(cmd);
3313 		return -ENOMEM;
3314 	}
3315 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3316 
3317 	dcmd->cmd = MFI_CMD_DCMD;
3318 	dcmd->cmd_status = 0xFF;
3319 	dcmd->sge_count = 1;
3320 	dcmd->flags = MFI_FRAME_DIR_READ;
3321 	dcmd->timeout = 0;
3322 	dcmd->pad_0 = 0;
3323 	dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3324 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3325 	dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3326 	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3327 
3328 	if (!sc->mask_interrupts)
3329 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3330 	else
3331 		retcode = mrsas_issue_polled(sc, cmd);
3332 
3333 	if (retcode == ETIMEDOUT)
3334 		goto dcmd_timeout;
3335 	else
3336 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3337 
3338 	do_ocr = 0;
3339 	mrsas_update_ext_vd_details(sc);
3340 
3341 	sc->use_seqnum_jbod_fp =
3342 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3343 	sc->disableOnlineCtrlReset =
3344 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3345 
3346 dcmd_timeout:
3347 	mrsas_free_ctlr_info_cmd(sc);
3348 
3349 	if (do_ocr)
3350 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3351 
3352 	if (!sc->mask_interrupts)
3353 		mrsas_release_mfi_cmd(cmd);
3354 
3355 	return (retcode);
3356 }
3357 
3358 /*
3359  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3360  * input:
3361  *	sc - Controller's softc
3362 */
3363 static void
3364 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3365 {
3366 	sc->max256vdSupport =
3367 	sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3368 	/* Below is additional check to address future FW enhancement */
3369 	if (sc->ctrl_info->max_lds > 64)
3370 		sc->max256vdSupport = 1;
3371 
3372 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3373 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3374 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3375 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3376 	if (sc->max256vdSupport) {
3377 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3378 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3379 	} else {
3380 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3381 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3382 	}
3383 
3384 	sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3385 	    (sizeof(MR_LD_SPAN_MAP) *
3386 	    (sc->fw_supported_vd_count - 1));
3387 	sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3388 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3389 	    (sizeof(MR_LD_SPAN_MAP) *
3390 	    (sc->drv_supported_vd_count - 1));
3391 
3392 	sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3393 
3394 	if (sc->max256vdSupport)
3395 		sc->current_map_sz = sc->new_map_sz;
3396 	else
3397 		sc->current_map_sz = sc->old_map_sz;
3398 }
3399 
3400 /*
3401  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3402  * input:						Adapter soft state
3403  *
3404  * Allocates DMAable memory for the controller info internal command.
3405  */
3406 int
3407 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3408 {
3409 	int ctlr_info_size;
3410 
3411 	/* Allocate get controller info command */
3412 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3413 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3414 	    1, 0,
3415 	    BUS_SPACE_MAXADDR_32BIT,
3416 	    BUS_SPACE_MAXADDR,
3417 	    NULL, NULL,
3418 	    ctlr_info_size,
3419 	    1,
3420 	    ctlr_info_size,
3421 	    BUS_DMA_ALLOCNOW,
3422 	    NULL, NULL,
3423 	    &sc->ctlr_info_tag)) {
3424 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3425 		return (ENOMEM);
3426 	}
3427 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3428 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3429 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3430 		return (ENOMEM);
3431 	}
3432 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3433 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3434 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3435 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3436 		return (ENOMEM);
3437 	}
3438 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3439 	return (0);
3440 }
3441 
3442 /*
3443  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3444  * input:						Adapter soft state
3445  *
3446  * Deallocates memory of the get controller info cmd.
3447  */
3448 void
3449 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3450 {
3451 	if (sc->ctlr_info_phys_addr)
3452 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3453 	if (sc->ctlr_info_mem != NULL)
3454 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3455 	if (sc->ctlr_info_tag != NULL)
3456 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3457 }
3458 
3459 /*
3460  * mrsas_issue_polled:	Issues a polling command
3461  * inputs:				Adapter soft state
3462  * 						Command packet to be issued
3463  *
3464  * This function is for posting of internal commands to Firmware.  MFI requires
3465  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3466  * the poll response timer is 180 seconds.
3467  */
3468 int
3469 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3470 {
3471 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3472 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3473 	int i, retcode = SUCCESS;
3474 
3475 	frame_hdr->cmd_status = 0xFF;
3476 	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3477 
3478 	/* Issue the frame using inbound queue port */
3479 	if (mrsas_issue_dcmd(sc, cmd)) {
3480 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3481 		return (1);
3482 	}
3483 	/*
3484 	 * Poll response timer to wait for Firmware response.  While this
3485 	 * timer with the DELAY call could block CPU, the time interval for
3486 	 * this is only 1 millisecond.
3487 	 */
3488 	if (frame_hdr->cmd_status == 0xFF) {
3489 		for (i = 0; i < (max_wait * 1000); i++) {
3490 			if (frame_hdr->cmd_status == 0xFF)
3491 				DELAY(1000);
3492 			else
3493 				break;
3494 		}
3495 	}
3496 	if (frame_hdr->cmd_status == 0xFF) {
3497 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3498 		    "seconds from %s\n", max_wait, __func__);
3499 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3500 		    cmd->frame->dcmd.opcode);
3501 		retcode = ETIMEDOUT;
3502 	}
3503 	return (retcode);
3504 }
3505 
3506 /*
3507  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3508  * input:				Adapter soft state mfi cmd pointer
3509  *
3510  * This function is called by mrsas_issued_blocked_cmd() and
3511  * mrsas_issued_polled(), to build the MPT command and then fire the command
3512  * to Firmware.
3513  */
3514 int
3515 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3516 {
3517 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3518 
3519 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3520 	if (!req_desc) {
3521 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3522 		return (1);
3523 	}
3524 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3525 
3526 	return (0);
3527 }
3528 
3529 /*
3530  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3531  * input:				Adapter soft state mfi cmd to build
3532  *
3533  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3534  * command and prepares the MPT command to send to Firmware.
3535  */
3536 MRSAS_REQUEST_DESCRIPTOR_UNION *
3537 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3538 {
3539 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3540 	u_int16_t index;
3541 
3542 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3543 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3544 		return NULL;
3545 	}
3546 	index = cmd->cmd_id.context.smid;
3547 
3548 	req_desc = mrsas_get_request_desc(sc, index - 1);
3549 	if (!req_desc)
3550 		return NULL;
3551 
3552 	req_desc->addr.Words = 0;
3553 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3554 
3555 	req_desc->SCSIIO.SMID = index;
3556 
3557 	return (req_desc);
3558 }
3559 
3560 /*
3561  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3562  * input:						Adapter soft state mfi cmd pointer
3563  *
3564  * The MPT command and the io_request are setup as a passthru command. The SGE
3565  * chain address is set to frame_phys_addr of the MFI command.
3566  */
3567 u_int8_t
3568 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3569 {
3570 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3571 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3572 	struct mrsas_mpt_cmd *mpt_cmd;
3573 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3574 
3575 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3576 	if (!mpt_cmd)
3577 		return (1);
3578 
3579 	/* Save the smid. To be used for returning the cmd */
3580 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3581 
3582 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3583 
3584 	/*
3585 	 * For cmds where the flag is set, store the flag and check on
3586 	 * completion. For cmds with this flag, don't call
3587 	 * mrsas_complete_cmd.
3588 	 */
3589 
3590 	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3591 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3592 
3593 	io_req = mpt_cmd->io_request;
3594 
3595 		if (sc->mrsas_gen3_ctrl) {
3596 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3597 
3598 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3599 		sgl_ptr_end->Flags = 0;
3600 	}
3601 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3602 
3603 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3604 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3605 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3606 
3607 	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3608 
3609 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3610 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3611 
3612 	mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3613 
3614 	return (0);
3615 }
3616 
3617 /*
3618  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3619  * input:					Adapter soft state Command to be issued
3620  *
3621  * This function waits on an event for the command to be returned from the ISR.
3622  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3623  * internal and ioctl commands.
3624  */
3625 int
3626 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3627 {
3628 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3629 	unsigned long total_time = 0;
3630 	int retcode = SUCCESS;
3631 
3632 	/* Initialize cmd_status */
3633 	cmd->cmd_status = 0xFF;
3634 
3635 	/* Build MPT-MFI command for issue to FW */
3636 	if (mrsas_issue_dcmd(sc, cmd)) {
3637 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3638 		return (1);
3639 	}
3640 	sc->chan = (void *)&cmd;
3641 
3642 	while (1) {
3643 		if (cmd->cmd_status == 0xFF) {
3644 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3645 		} else
3646 			break;
3647 
3648 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3649 					 * command */
3650 			total_time++;
3651 			if (total_time >= max_wait) {
3652 				device_printf(sc->mrsas_dev,
3653 				    "Internal command timed out after %d seconds.\n", max_wait);
3654 				retcode = 1;
3655 				break;
3656 			}
3657 		}
3658 	}
3659 
3660 	if (cmd->cmd_status == 0xFF) {
3661 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3662 		    "seconds from %s\n", max_wait, __func__);
3663 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3664 		    cmd->frame->dcmd.opcode);
3665 		retcode = ETIMEDOUT;
3666 	}
3667 	return (retcode);
3668 }
3669 
3670 /*
3671  * mrsas_complete_mptmfi_passthru:	Completes a command
3672  * input:	@sc:					Adapter soft state
3673  * 			@cmd:					Command to be completed
3674  * 			@status:				cmd completion status
3675  *
3676  * This function is called from mrsas_complete_cmd() after an interrupt is
3677  * received from Firmware, and io_request->Function is
3678  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3679  */
3680 void
3681 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3682     u_int8_t status)
3683 {
3684 	struct mrsas_header *hdr = &cmd->frame->hdr;
3685 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3686 
3687 	/* Reset the retry counter for future re-tries */
3688 	cmd->retry_for_fw_reset = 0;
3689 
3690 	if (cmd->ccb_ptr)
3691 		cmd->ccb_ptr = NULL;
3692 
3693 	switch (hdr->cmd) {
3694 	case MFI_CMD_INVALID:
3695 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3696 		break;
3697 	case MFI_CMD_PD_SCSI_IO:
3698 	case MFI_CMD_LD_SCSI_IO:
3699 		/*
3700 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3701 		 * issued either through an IO path or an IOCTL path. If it
3702 		 * was via IOCTL, we will send it to internal completion.
3703 		 */
3704 		if (cmd->sync_cmd) {
3705 			cmd->sync_cmd = 0;
3706 			mrsas_wakeup(sc, cmd);
3707 			break;
3708 		}
3709 	case MFI_CMD_SMP:
3710 	case MFI_CMD_STP:
3711 	case MFI_CMD_DCMD:
3712 		/* Check for LD map update */
3713 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3714 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
3715 			sc->fast_path_io = 0;
3716 			mtx_lock(&sc->raidmap_lock);
3717 			sc->map_update_cmd = NULL;
3718 			if (cmd_status != 0) {
3719 				if (cmd_status != MFI_STAT_NOT_FOUND)
3720 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3721 				else {
3722 					mrsas_release_mfi_cmd(cmd);
3723 					mtx_unlock(&sc->raidmap_lock);
3724 					break;
3725 				}
3726 			} else
3727 				sc->map_id++;
3728 			mrsas_release_mfi_cmd(cmd);
3729 			if (MR_ValidateMapInfo(sc))
3730 				sc->fast_path_io = 0;
3731 			else
3732 				sc->fast_path_io = 1;
3733 			mrsas_sync_map_info(sc);
3734 			mtx_unlock(&sc->raidmap_lock);
3735 			break;
3736 		}
3737 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3738 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3739 			sc->mrsas_aen_triggered = 0;
3740 		}
3741 		/* FW has an updated PD sequence */
3742 		if ((cmd->frame->dcmd.opcode ==
3743 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3744 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
3745 
3746 			mtx_lock(&sc->raidmap_lock);
3747 			sc->jbod_seq_cmd = NULL;
3748 			mrsas_release_mfi_cmd(cmd);
3749 
3750 			if (cmd_status == MFI_STAT_OK) {
3751 				sc->pd_seq_map_id++;
3752 				/* Re-register a pd sync seq num cmd */
3753 				if (megasas_sync_pd_seq_num(sc, true))
3754 					sc->use_seqnum_jbod_fp = 0;
3755 			} else {
3756 				sc->use_seqnum_jbod_fp = 0;
3757 				device_printf(sc->mrsas_dev,
3758 				    "Jbod map sync failed, status=%x\n", cmd_status);
3759 			}
3760 			mtx_unlock(&sc->raidmap_lock);
3761 			break;
3762 		}
3763 		/* See if got an event notification */
3764 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3765 			mrsas_complete_aen(sc, cmd);
3766 		else
3767 			mrsas_wakeup(sc, cmd);
3768 		break;
3769 	case MFI_CMD_ABORT:
3770 		/* Command issued to abort another cmd return */
3771 		mrsas_complete_abort(sc, cmd);
3772 		break;
3773 	default:
3774 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3775 		break;
3776 	}
3777 }
3778 
3779 /*
3780  * mrsas_wakeup:	Completes an internal command
3781  * input:			Adapter soft state
3782  * 					Command to be completed
3783  *
3784  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3785  * timer is started.  This function is called from
3786  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3787  * from the command wait.
3788  */
3789 void
3790 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3791 {
3792 	cmd->cmd_status = cmd->frame->io.cmd_status;
3793 
3794 	if (cmd->cmd_status == 0xFF)
3795 		cmd->cmd_status = 0;
3796 
3797 	sc->chan = (void *)&cmd;
3798 	wakeup_one((void *)&sc->chan);
3799 	return;
3800 }
3801 
3802 /*
3803  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
3804  * Adapter soft state Shutdown/Hibernate
3805  *
3806  * This function issues a DCMD internal command to Firmware to initiate shutdown
3807  * of the controller.
3808  */
3809 static void
3810 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3811 {
3812 	struct mrsas_mfi_cmd *cmd;
3813 	struct mrsas_dcmd_frame *dcmd;
3814 
3815 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3816 		return;
3817 
3818 	cmd = mrsas_get_mfi_cmd(sc);
3819 	if (!cmd) {
3820 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3821 		return;
3822 	}
3823 	if (sc->aen_cmd)
3824 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3825 	if (sc->map_update_cmd)
3826 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3827 	if (sc->jbod_seq_cmd)
3828 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3829 
3830 	dcmd = &cmd->frame->dcmd;
3831 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3832 
3833 	dcmd->cmd = MFI_CMD_DCMD;
3834 	dcmd->cmd_status = 0x0;
3835 	dcmd->sge_count = 0;
3836 	dcmd->flags = MFI_FRAME_DIR_NONE;
3837 	dcmd->timeout = 0;
3838 	dcmd->pad_0 = 0;
3839 	dcmd->data_xfer_len = 0;
3840 	dcmd->opcode = opcode;
3841 
3842 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3843 
3844 	mrsas_issue_blocked_cmd(sc, cmd);
3845 	mrsas_release_mfi_cmd(cmd);
3846 
3847 	return;
3848 }
3849 
3850 /*
3851  * mrsas_flush_cache:         Requests FW to flush all its caches input:
3852  * Adapter soft state
3853  *
3854  * This function is issues a DCMD internal command to Firmware to initiate
3855  * flushing of all caches.
3856  */
3857 static void
3858 mrsas_flush_cache(struct mrsas_softc *sc)
3859 {
3860 	struct mrsas_mfi_cmd *cmd;
3861 	struct mrsas_dcmd_frame *dcmd;
3862 
3863 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3864 		return;
3865 
3866 	cmd = mrsas_get_mfi_cmd(sc);
3867 	if (!cmd) {
3868 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3869 		return;
3870 	}
3871 	dcmd = &cmd->frame->dcmd;
3872 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3873 
3874 	dcmd->cmd = MFI_CMD_DCMD;
3875 	dcmd->cmd_status = 0x0;
3876 	dcmd->sge_count = 0;
3877 	dcmd->flags = MFI_FRAME_DIR_NONE;
3878 	dcmd->timeout = 0;
3879 	dcmd->pad_0 = 0;
3880 	dcmd->data_xfer_len = 0;
3881 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3882 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3883 
3884 	mrsas_issue_blocked_cmd(sc, cmd);
3885 	mrsas_release_mfi_cmd(cmd);
3886 
3887 	return;
3888 }
3889 
3890 int
3891 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3892 {
3893 	int retcode = 0;
3894 	u_int8_t do_ocr = 1;
3895 	struct mrsas_mfi_cmd *cmd;
3896 	struct mrsas_dcmd_frame *dcmd;
3897 	uint32_t pd_seq_map_sz;
3898 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3899 	bus_addr_t pd_seq_h;
3900 
3901 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3902 	    (sizeof(struct MR_PD_CFG_SEQ) *
3903 	    (MAX_PHYSICAL_DEVICES - 1));
3904 
3905 	cmd = mrsas_get_mfi_cmd(sc);
3906 	if (!cmd) {
3907 		device_printf(sc->mrsas_dev,
3908 		    "Cannot alloc for ld map info cmd.\n");
3909 		return 1;
3910 	}
3911 	dcmd = &cmd->frame->dcmd;
3912 
3913 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3914 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3915 	if (!pd_sync) {
3916 		device_printf(sc->mrsas_dev,
3917 		    "Failed to alloc mem for jbod map info.\n");
3918 		mrsas_release_mfi_cmd(cmd);
3919 		return (ENOMEM);
3920 	}
3921 	memset(pd_sync, 0, pd_seq_map_sz);
3922 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3923 	dcmd->cmd = MFI_CMD_DCMD;
3924 	dcmd->cmd_status = 0xFF;
3925 	dcmd->sge_count = 1;
3926 	dcmd->timeout = 0;
3927 	dcmd->pad_0 = 0;
3928 	dcmd->data_xfer_len = (pd_seq_map_sz);
3929 	dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3930 	dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3931 	dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3932 
3933 	if (pend) {
3934 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3935 		dcmd->flags = (MFI_FRAME_DIR_WRITE);
3936 		sc->jbod_seq_cmd = cmd;
3937 		if (mrsas_issue_dcmd(sc, cmd)) {
3938 			device_printf(sc->mrsas_dev,
3939 			    "Fail to send sync map info command.\n");
3940 			return 1;
3941 		} else
3942 			return 0;
3943 	} else
3944 		dcmd->flags = MFI_FRAME_DIR_READ;
3945 
3946 	retcode = mrsas_issue_polled(sc, cmd);
3947 	if (retcode == ETIMEDOUT)
3948 		goto dcmd_timeout;
3949 
3950 	if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3951 		device_printf(sc->mrsas_dev,
3952 		    "driver supports max %d JBOD, but FW reports %d\n",
3953 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
3954 		retcode = -EINVAL;
3955 	}
3956 	if (!retcode)
3957 		sc->pd_seq_map_id++;
3958 	do_ocr = 0;
3959 
3960 dcmd_timeout:
3961 	if (do_ocr)
3962 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3963 
3964 	return (retcode);
3965 }
3966 
3967 /*
3968  * mrsas_get_map_info:        Load and validate RAID map input:
3969  * Adapter instance soft state
3970  *
3971  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3972  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
3973  */
3974 static int
3975 mrsas_get_map_info(struct mrsas_softc *sc)
3976 {
3977 	uint8_t retcode = 0;
3978 
3979 	sc->fast_path_io = 0;
3980 	if (!mrsas_get_ld_map_info(sc)) {
3981 		retcode = MR_ValidateMapInfo(sc);
3982 		if (retcode == 0) {
3983 			sc->fast_path_io = 1;
3984 			return 0;
3985 		}
3986 	}
3987 	return 1;
3988 }
3989 
3990 /*
3991  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
3992  * Adapter instance soft state
3993  *
3994  * Issues an internal command (DCMD) to get the FW's controller PD list
3995  * structure.
3996  */
3997 static int
3998 mrsas_get_ld_map_info(struct mrsas_softc *sc)
3999 {
4000 	int retcode = 0;
4001 	struct mrsas_mfi_cmd *cmd;
4002 	struct mrsas_dcmd_frame *dcmd;
4003 	void *map;
4004 	bus_addr_t map_phys_addr = 0;
4005 
4006 	cmd = mrsas_get_mfi_cmd(sc);
4007 	if (!cmd) {
4008 		device_printf(sc->mrsas_dev,
4009 		    "Cannot alloc for ld map info cmd.\n");
4010 		return 1;
4011 	}
4012 	dcmd = &cmd->frame->dcmd;
4013 
4014 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4015 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4016 	if (!map) {
4017 		device_printf(sc->mrsas_dev,
4018 		    "Failed to alloc mem for ld map info.\n");
4019 		mrsas_release_mfi_cmd(cmd);
4020 		return (ENOMEM);
4021 	}
4022 	memset(map, 0, sizeof(sc->max_map_sz));
4023 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4024 
4025 	dcmd->cmd = MFI_CMD_DCMD;
4026 	dcmd->cmd_status = 0xFF;
4027 	dcmd->sge_count = 1;
4028 	dcmd->flags = MFI_FRAME_DIR_READ;
4029 	dcmd->timeout = 0;
4030 	dcmd->pad_0 = 0;
4031 	dcmd->data_xfer_len = sc->current_map_sz;
4032 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4033 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4034 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
4035 
4036 	retcode = mrsas_issue_polled(sc, cmd);
4037 	if (retcode == ETIMEDOUT)
4038 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4039 
4040 	return (retcode);
4041 }
4042 
4043 /*
4044  * mrsas_sync_map_info:        Get FW's ld_map structure input:
4045  * Adapter instance soft state
4046  *
4047  * Issues an internal command (DCMD) to get the FW's controller PD list
4048  * structure.
4049  */
4050 static int
4051 mrsas_sync_map_info(struct mrsas_softc *sc)
4052 {
4053 	int retcode = 0, i;
4054 	struct mrsas_mfi_cmd *cmd;
4055 	struct mrsas_dcmd_frame *dcmd;
4056 	uint32_t size_sync_info, num_lds;
4057 	MR_LD_TARGET_SYNC *target_map = NULL;
4058 	MR_DRV_RAID_MAP_ALL *map;
4059 	MR_LD_RAID *raid;
4060 	MR_LD_TARGET_SYNC *ld_sync;
4061 	bus_addr_t map_phys_addr = 0;
4062 
4063 	cmd = mrsas_get_mfi_cmd(sc);
4064 	if (!cmd) {
4065 		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4066 		return ENOMEM;
4067 	}
4068 	map = sc->ld_drv_map[sc->map_id & 1];
4069 	num_lds = map->raidMap.ldCount;
4070 
4071 	dcmd = &cmd->frame->dcmd;
4072 	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4073 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4074 
4075 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4076 	memset(target_map, 0, sc->max_map_sz);
4077 
4078 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4079 
4080 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4081 
4082 	for (i = 0; i < num_lds; i++, ld_sync++) {
4083 		raid = MR_LdRaidGet(i, map);
4084 		ld_sync->targetId = MR_GetLDTgtId(i, map);
4085 		ld_sync->seqNum = raid->seqNum;
4086 	}
4087 
4088 	dcmd->cmd = MFI_CMD_DCMD;
4089 	dcmd->cmd_status = 0xFF;
4090 	dcmd->sge_count = 1;
4091 	dcmd->flags = MFI_FRAME_DIR_WRITE;
4092 	dcmd->timeout = 0;
4093 	dcmd->pad_0 = 0;
4094 	dcmd->data_xfer_len = sc->current_map_sz;
4095 	dcmd->mbox.b[0] = num_lds;
4096 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4097 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4098 	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4099 	dcmd->sgl.sge32[0].length = sc->current_map_sz;
4100 
4101 	sc->map_update_cmd = cmd;
4102 	if (mrsas_issue_dcmd(sc, cmd)) {
4103 		device_printf(sc->mrsas_dev,
4104 		    "Fail to send sync map info command.\n");
4105 		return (1);
4106 	}
4107 	return (retcode);
4108 }
4109 
4110 /*
4111  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4112  * Adapter soft state
4113  *
4114  * Issues an internal command (DCMD) to get the FW's controller PD list
4115  * structure.  This information is mainly used to find out about system
4116  * supported by Firmware.
4117  */
4118 static int
4119 mrsas_get_pd_list(struct mrsas_softc *sc)
4120 {
4121 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4122 	u_int8_t do_ocr = 1;
4123 	struct mrsas_mfi_cmd *cmd;
4124 	struct mrsas_dcmd_frame *dcmd;
4125 	struct MR_PD_LIST *pd_list_mem;
4126 	struct MR_PD_ADDRESS *pd_addr;
4127 	bus_addr_t pd_list_phys_addr = 0;
4128 	struct mrsas_tmp_dcmd *tcmd;
4129 
4130 	cmd = mrsas_get_mfi_cmd(sc);
4131 	if (!cmd) {
4132 		device_printf(sc->mrsas_dev,
4133 		    "Cannot alloc for get PD list cmd\n");
4134 		return 1;
4135 	}
4136 	dcmd = &cmd->frame->dcmd;
4137 
4138 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4139 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4140 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4141 		device_printf(sc->mrsas_dev,
4142 		    "Cannot alloc dmamap for get PD list cmd\n");
4143 		mrsas_release_mfi_cmd(cmd);
4144 		mrsas_free_tmp_dcmd(tcmd);
4145 		free(tcmd, M_MRSAS);
4146 		return (ENOMEM);
4147 	} else {
4148 		pd_list_mem = tcmd->tmp_dcmd_mem;
4149 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4150 	}
4151 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4152 
4153 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4154 	dcmd->mbox.b[1] = 0;
4155 	dcmd->cmd = MFI_CMD_DCMD;
4156 	dcmd->cmd_status = 0xFF;
4157 	dcmd->sge_count = 1;
4158 	dcmd->flags = MFI_FRAME_DIR_READ;
4159 	dcmd->timeout = 0;
4160 	dcmd->pad_0 = 0;
4161 	dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4162 	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4163 	dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4164 	dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4165 
4166 	if (!sc->mask_interrupts)
4167 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4168 	else
4169 		retcode = mrsas_issue_polled(sc, cmd);
4170 
4171 	if (retcode == ETIMEDOUT)
4172 		goto dcmd_timeout;
4173 
4174 	/* Get the instance PD list */
4175 	pd_count = MRSAS_MAX_PD;
4176 	pd_addr = pd_list_mem->addr;
4177 	if (pd_list_mem->count < pd_count) {
4178 		memset(sc->local_pd_list, 0,
4179 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4180 		for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4181 			sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4182 			sc->local_pd_list[pd_addr->deviceId].driveType =
4183 			    pd_addr->scsiDevType;
4184 			sc->local_pd_list[pd_addr->deviceId].driveState =
4185 			    MR_PD_STATE_SYSTEM;
4186 			pd_addr++;
4187 		}
4188 		/*
4189 		 * Use mutext/spinlock if pd_list component size increase more than
4190 		 * 32 bit.
4191 		 */
4192 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4193 		do_ocr = 0;
4194 	}
4195 dcmd_timeout:
4196 	mrsas_free_tmp_dcmd(tcmd);
4197 	free(tcmd, M_MRSAS);
4198 
4199 	if (do_ocr)
4200 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4201 
4202 	if (!sc->mask_interrupts)
4203 		mrsas_release_mfi_cmd(cmd);
4204 
4205 	return (retcode);
4206 }
4207 
4208 /*
4209  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4210  * Adapter soft state
4211  *
4212  * Issues an internal command (DCMD) to get the FW's controller PD list
4213  * structure.  This information is mainly used to find out about supported by
4214  * the FW.
4215  */
4216 static int
4217 mrsas_get_ld_list(struct mrsas_softc *sc)
4218 {
4219 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4220 	u_int8_t do_ocr = 1;
4221 	struct mrsas_mfi_cmd *cmd;
4222 	struct mrsas_dcmd_frame *dcmd;
4223 	struct MR_LD_LIST *ld_list_mem;
4224 	bus_addr_t ld_list_phys_addr = 0;
4225 	struct mrsas_tmp_dcmd *tcmd;
4226 
4227 	cmd = mrsas_get_mfi_cmd(sc);
4228 	if (!cmd) {
4229 		device_printf(sc->mrsas_dev,
4230 		    "Cannot alloc for get LD list cmd\n");
4231 		return 1;
4232 	}
4233 	dcmd = &cmd->frame->dcmd;
4234 
4235 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4236 	ld_list_size = sizeof(struct MR_LD_LIST);
4237 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4238 		device_printf(sc->mrsas_dev,
4239 		    "Cannot alloc dmamap for get LD list cmd\n");
4240 		mrsas_release_mfi_cmd(cmd);
4241 		mrsas_free_tmp_dcmd(tcmd);
4242 		free(tcmd, M_MRSAS);
4243 		return (ENOMEM);
4244 	} else {
4245 		ld_list_mem = tcmd->tmp_dcmd_mem;
4246 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4247 	}
4248 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4249 
4250 	if (sc->max256vdSupport)
4251 		dcmd->mbox.b[0] = 1;
4252 
4253 	dcmd->cmd = MFI_CMD_DCMD;
4254 	dcmd->cmd_status = 0xFF;
4255 	dcmd->sge_count = 1;
4256 	dcmd->flags = MFI_FRAME_DIR_READ;
4257 	dcmd->timeout = 0;
4258 	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4259 	dcmd->opcode = MR_DCMD_LD_GET_LIST;
4260 	dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4261 	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4262 	dcmd->pad_0 = 0;
4263 
4264 	if (!sc->mask_interrupts)
4265 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4266 	else
4267 		retcode = mrsas_issue_polled(sc, cmd);
4268 
4269 	if (retcode == ETIMEDOUT)
4270 		goto dcmd_timeout;
4271 
4272 #if VD_EXT_DEBUG
4273 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4274 #endif
4275 
4276 	/* Get the instance LD list */
4277 	if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4278 		sc->CurLdCount = ld_list_mem->ldCount;
4279 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4280 		for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4281 			if (ld_list_mem->ldList[ld_index].state != 0) {
4282 				ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4283 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4284 			}
4285 		}
4286 		do_ocr = 0;
4287 	}
4288 dcmd_timeout:
4289 	mrsas_free_tmp_dcmd(tcmd);
4290 	free(tcmd, M_MRSAS);
4291 
4292 	if (do_ocr)
4293 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4294 	if (!sc->mask_interrupts)
4295 		mrsas_release_mfi_cmd(cmd);
4296 
4297 	return (retcode);
4298 }
4299 
4300 /*
4301  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4302  * Adapter soft state Temp command Size of alloction
4303  *
4304  * Allocates DMAable memory for a temporary internal command. The allocated
4305  * memory is initialized to all zeros upon successful loading of the dma
4306  * mapped memory.
4307  */
4308 int
4309 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4310     struct mrsas_tmp_dcmd *tcmd, int size)
4311 {
4312 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4313 	    1, 0,
4314 	    BUS_SPACE_MAXADDR_32BIT,
4315 	    BUS_SPACE_MAXADDR,
4316 	    NULL, NULL,
4317 	    size,
4318 	    1,
4319 	    size,
4320 	    BUS_DMA_ALLOCNOW,
4321 	    NULL, NULL,
4322 	    &tcmd->tmp_dcmd_tag)) {
4323 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4324 		return (ENOMEM);
4325 	}
4326 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4327 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4328 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4329 		return (ENOMEM);
4330 	}
4331 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4332 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4333 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4334 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4335 		return (ENOMEM);
4336 	}
4337 	memset(tcmd->tmp_dcmd_mem, 0, size);
4338 	return (0);
4339 }
4340 
4341 /*
4342  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4343  * temporary dcmd pointer
4344  *
4345  * Deallocates memory of the temporary command for use in the construction of
4346  * the internal DCMD.
4347  */
4348 void
4349 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4350 {
4351 	if (tmp->tmp_dcmd_phys_addr)
4352 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4353 	if (tmp->tmp_dcmd_mem != NULL)
4354 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4355 	if (tmp->tmp_dcmd_tag != NULL)
4356 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4357 }
4358 
4359 /*
4360  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4361  * Adapter soft state Previously issued cmd to be aborted
4362  *
4363  * This function is used to abort previously issued commands, such as AEN and
4364  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4365  * command and subsequently the driver will wait for a return status.  The
4366  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4367  */
4368 static int
4369 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4370     struct mrsas_mfi_cmd *cmd_to_abort)
4371 {
4372 	struct mrsas_mfi_cmd *cmd;
4373 	struct mrsas_abort_frame *abort_fr;
4374 	u_int8_t retcode = 0;
4375 	unsigned long total_time = 0;
4376 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4377 
4378 	cmd = mrsas_get_mfi_cmd(sc);
4379 	if (!cmd) {
4380 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4381 		return (1);
4382 	}
4383 	abort_fr = &cmd->frame->abort;
4384 
4385 	/* Prepare and issue the abort frame */
4386 	abort_fr->cmd = MFI_CMD_ABORT;
4387 	abort_fr->cmd_status = 0xFF;
4388 	abort_fr->flags = 0;
4389 	abort_fr->abort_context = cmd_to_abort->index;
4390 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4391 	abort_fr->abort_mfi_phys_addr_hi = 0;
4392 
4393 	cmd->sync_cmd = 1;
4394 	cmd->cmd_status = 0xFF;
4395 
4396 	if (mrsas_issue_dcmd(sc, cmd)) {
4397 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4398 		return (1);
4399 	}
4400 	/* Wait for this cmd to complete */
4401 	sc->chan = (void *)&cmd;
4402 	while (1) {
4403 		if (cmd->cmd_status == 0xFF) {
4404 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4405 		} else
4406 			break;
4407 		total_time++;
4408 		if (total_time >= max_wait) {
4409 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4410 			retcode = 1;
4411 			break;
4412 		}
4413 	}
4414 
4415 	cmd->sync_cmd = 0;
4416 	mrsas_release_mfi_cmd(cmd);
4417 	return (retcode);
4418 }
4419 
4420 /*
4421  * mrsas_complete_abort:      Completes aborting a command input:
4422  * Adapter soft state Cmd that was issued to abort another cmd
4423  *
4424  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4425  * change after sending the command.  This function is called from
4426  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4427  */
4428 void
4429 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4430 {
4431 	if (cmd->sync_cmd) {
4432 		cmd->sync_cmd = 0;
4433 		cmd->cmd_status = 0;
4434 		sc->chan = (void *)&cmd;
4435 		wakeup_one((void *)&sc->chan);
4436 	}
4437 	return;
4438 }
4439 
4440 /*
4441  * mrsas_aen_handler:	AEN processing callback function from thread context
4442  * input:				Adapter soft state
4443  *
4444  * Asynchronous event handler
4445  */
4446 void
4447 mrsas_aen_handler(struct mrsas_softc *sc)
4448 {
4449 	union mrsas_evt_class_locale class_locale;
4450 	int doscan = 0;
4451 	u_int32_t seq_num;
4452  	int error, fail_aen = 0;
4453 
4454 	if (sc == NULL) {
4455 		printf("invalid instance!\n");
4456 		return;
4457 	}
4458 	if (sc->remove_in_progress || sc->reset_in_progress) {
4459 		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4460 			__func__, __LINE__);
4461 		return;
4462 	}
4463 	if (sc->evt_detail_mem) {
4464 		switch (sc->evt_detail_mem->code) {
4465 		case MR_EVT_PD_INSERTED:
4466 			fail_aen = mrsas_get_pd_list(sc);
4467 			if (!fail_aen)
4468 				mrsas_bus_scan_sim(sc, sc->sim_1);
4469 			else
4470 				goto skip_register_aen;
4471 			break;
4472 		case MR_EVT_PD_REMOVED:
4473 			fail_aen = mrsas_get_pd_list(sc);
4474 			if (!fail_aen)
4475 				mrsas_bus_scan_sim(sc, sc->sim_1);
4476 			else
4477 				goto skip_register_aen;
4478 			break;
4479 		case MR_EVT_LD_OFFLINE:
4480 		case MR_EVT_CFG_CLEARED:
4481 		case MR_EVT_LD_DELETED:
4482 			mrsas_bus_scan_sim(sc, sc->sim_0);
4483 			break;
4484 		case MR_EVT_LD_CREATED:
4485 			fail_aen = mrsas_get_ld_list(sc);
4486 			if (!fail_aen)
4487 				mrsas_bus_scan_sim(sc, sc->sim_0);
4488 			else
4489 				goto skip_register_aen;
4490 			break;
4491 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4492 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4493 		case MR_EVT_LD_STATE_CHANGE:
4494 			doscan = 1;
4495 			break;
4496 		case MR_EVT_CTRL_PROP_CHANGED:
4497 			fail_aen = mrsas_get_ctrl_info(sc);
4498 			if (fail_aen)
4499 				goto skip_register_aen;
4500 			break;
4501 		default:
4502 			break;
4503 		}
4504 	} else {
4505 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4506 		return;
4507 	}
4508 	if (doscan) {
4509 		fail_aen = mrsas_get_pd_list(sc);
4510 		if (!fail_aen) {
4511 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4512 			mrsas_bus_scan_sim(sc, sc->sim_1);
4513 		} else
4514 			goto skip_register_aen;
4515 
4516 		fail_aen = mrsas_get_ld_list(sc);
4517 		if (!fail_aen) {
4518 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4519 			mrsas_bus_scan_sim(sc, sc->sim_0);
4520 		} else
4521 			goto skip_register_aen;
4522 	}
4523 	seq_num = sc->evt_detail_mem->seq_num + 1;
4524 
4525 	/* Register AEN with FW for latest sequence number plus 1 */
4526 	class_locale.members.reserved = 0;
4527 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4528 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4529 
4530 	if (sc->aen_cmd != NULL)
4531 		return;
4532 
4533 	mtx_lock(&sc->aen_lock);
4534 	error = mrsas_register_aen(sc, seq_num,
4535 	    class_locale.word);
4536 	mtx_unlock(&sc->aen_lock);
4537 
4538 	if (error)
4539 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4540 
4541 skip_register_aen:
4542 	return;
4543 
4544 }
4545 
4546 
4547 /*
4548  * mrsas_complete_aen:	Completes AEN command
4549  * input:				Adapter soft state
4550  * 						Cmd that was issued to abort another cmd
4551  *
4552  * This function will be called from ISR and will continue event processing from
4553  * thread context by enqueuing task in ev_tq (callback function
4554  * "mrsas_aen_handler").
4555  */
4556 void
4557 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4558 {
4559 	/*
4560 	 * Don't signal app if it is just an aborted previously registered
4561 	 * aen
4562 	 */
4563 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4564 		sc->mrsas_aen_triggered = 1;
4565 		mtx_lock(&sc->aen_lock);
4566 		if (sc->mrsas_poll_waiting) {
4567 			sc->mrsas_poll_waiting = 0;
4568 			selwakeup(&sc->mrsas_select);
4569 		}
4570 		mtx_unlock(&sc->aen_lock);
4571 	} else
4572 		cmd->abort_aen = 0;
4573 
4574 	sc->aen_cmd = NULL;
4575 	mrsas_release_mfi_cmd(cmd);
4576 
4577 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4578 
4579 	return;
4580 }
4581 
4582 static device_method_t mrsas_methods[] = {
4583 	DEVMETHOD(device_probe, mrsas_probe),
4584 	DEVMETHOD(device_attach, mrsas_attach),
4585 	DEVMETHOD(device_detach, mrsas_detach),
4586 	DEVMETHOD(device_suspend, mrsas_suspend),
4587 	DEVMETHOD(device_resume, mrsas_resume),
4588 	DEVMETHOD(bus_print_child, bus_generic_print_child),
4589 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4590 	{0, 0}
4591 };
4592 
4593 static driver_t mrsas_driver = {
4594 	"mrsas",
4595 	mrsas_methods,
4596 	sizeof(struct mrsas_softc)
4597 };
4598 
4599 static devclass_t mrsas_devclass;
4600 
4601 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4602 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
4603