xref: /freebsd/sys/dev/mfi/mfi_tbolt.c (revision 6486b015fc84e96725fef22b0e3363351399ae83)
1  /*-
2  * Redistribution and use in source and binary forms, with or without
3  * modification, are permitted provided that the following conditions
4  * are met:
5  *
6  *            Copyright 1994-2009 The FreeBSD Project.
7  *            All rights reserved.
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  *    THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FREEBSD PROJECT OR
19  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * The views and conclusions contained in the software and documentation
28  * are those of the authors and should not be interpreted as representing
29  * official policies,either expressed or implied, of the FreeBSD Project.
30  */
31 
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_mfi.h"
37 
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/bus.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/ioccom.h>
46 #include <sys/eventhandler.h>
47 #include <sys/callout.h>
48 #include <sys/uio.h>
49 #include <machine/bus.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
53 
54 #include <dev/mfi/mfireg.h>
55 #include <dev/mfi/mfi_ioctl.h>
56 #include <dev/mfi/mfivar.h>
57 
58 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc);
59 union mfi_mpi2_request_descriptor *
60 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
61 void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
62 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
63     struct mfi_cmd_tbolt *cmd);
64 static inline void mfi_tbolt_return_cmd(struct mfi_softc *sc,
65     struct mfi_cmd_tbolt *cmd);
66 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
67     *sc, struct mfi_command *cmd);
68 uint8_t
69 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
70 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
71     *sc, struct mfi_command *mfi_cmd);
72 int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd);
73 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
74     struct mfi_cmd_tbolt *cmd);
75 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
76     *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
77 static int mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command
78     *mfi_cmd, uint8_t *cdb);
79 void
80 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
81      uint8_t ext_status);
82 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
83 static void mfi_kill_hba (struct mfi_softc *sc);
84 static void mfi_process_fw_state_chg_isr(void *arg);
85 uint8_t mfi_tbolt_get_map_info(struct mfi_softc *sc);
86 
87 #define MFI_FUSION_ENABLE_INTERRUPT_MASK	(0x00000008)
88 
89 void
90 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
91 {
92 	MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
93 	MFI_READ4(sc, MFI_OMSK);
94 }
95 
96 void
97 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
98 {
99 	MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
100 	MFI_READ4(sc, MFI_OMSK);
101 }
102 
103 int32_t
104 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
105 {
106 	return MFI_READ4(sc, MFI_OSP0);
107 }
108 
109 int32_t
110 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
111 {
112 	int32_t status, mfi_status = 0;
113 
114 	status = MFI_READ4(sc, MFI_OSTS);
115 
116 	if (status & 1) {
117 		MFI_WRITE4(sc, MFI_OSTS, status);
118 		MFI_READ4(sc, MFI_OSTS);
119 		if (status & MFI_STATE_CHANGE_INTERRUPT) {
120 			mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
121 		}
122 
123 		return mfi_status;
124 	}
125 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
126 		return 1;
127 
128 	MFI_READ4(sc, MFI_OSTS);
129 	return 0;
130 }
131 
132 
133 void
134 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
135    uint32_t frame_cnt)
136 {
137 	bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
138 	    << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
139 	MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
140 	MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
141 }
142 
143 /**
144  * mfi_tbolt_adp_reset - For controller reset
145  * @regs: MFI register set
146  */
147 int mfi_tbolt_adp_reset(struct mfi_softc *sc)
148 {
149 	int retry = 0, i = 0;
150 	int HostDiag;
151 
152 	MFI_WRITE4(sc, MFI_WSR, 0xF);
153 	MFI_WRITE4(sc, MFI_WSR, 4);
154 	MFI_WRITE4(sc, MFI_WSR, 0xB);
155 	MFI_WRITE4(sc, MFI_WSR, 2);
156 	MFI_WRITE4(sc, MFI_WSR, 7);
157 	MFI_WRITE4(sc, MFI_WSR, 0xD);
158 
159 	for (i = 0; i < 10000; i++) ;
160 
161 	HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
162 
163 	while (!( HostDiag & DIAG_WRITE_ENABLE)) {
164 		for (i = 0; i < 1000; i++);
165 		HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
166 		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
167 		    "hostdiag=%x\n", retry, HostDiag);
168 
169 		if (retry++ >= 100)
170 			return 1;
171 	}
172 
173 	device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%x\n", HostDiag);
174 
175 	MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
176 
177 	for (i=0; i < 10; i++) {
178 		for (i = 0; i < 10000; i++);
179 	}
180 
181 	HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
182 	while (HostDiag & DIAG_RESET_ADAPTER) {
183 		for (i = 0; i < 1000; i++) ;
184 		HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
185 		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
186 		    "hostdiag=%x\n", retry, HostDiag);
187 
188 		if (retry++ >= 1000)
189 			return 1;
190 	}
191 	return 0;
192 }
193 
194 /*
195  *******************************************************************************************
196  * Description:
197  *      This routine initialize Thunderbolt specific device information
198  *******************************************************************************************
199  */
200 void mfi_tbolt_init_globals(struct mfi_softc *sc)
201 {
202 	/* Initialize single reply size and Message size */
203 	sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
204 	sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
205 
206 	/*
207 	 * Calculating how many SGEs allowed in a allocated main message
208 	 * (size of the Message - Raid SCSI IO message size(except SGE))
209 	 * / size of SGE
210 	 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
211 	 */
212 	sc->max_SGEs_in_main_message =
213 	    (uint8_t)((sc->raid_io_msg_size
214 	    - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
215 	    - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
216 	/*
217 	 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
218 	 * / size of SGL ;
219 	 * (1280 - 256) / 16 = 64
220 	 */
221 	sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
222 	    - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
223 	/*
224 	 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46  one is left for command
225 	 * colscing
226 	*/
227 	sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
228 	    + sc->max_SGEs_in_chain_message - 1;
229 	/*
230 	* This is the offset in number of 4 * 32bit words to the next chain
231 	* (0x100 - 0x10)/0x10 = 0xF(15)
232 	*/
233 	sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
234 	    - sizeof(MPI2_SGE_IO_UNION))/16;
235 	sc->chain_offset_value_for_mpt_ptmsg
236 	    = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
237 	sc->mfi_cmd_pool_tbolt = NULL;
238 	sc->request_desc_pool = NULL;
239 }
240 
241 /*
242  ****************************************************************************
243  * Description:
244  *      This function calculates the memory requirement for Thunderbolt
245  *      controller
246  * Return Value:
247  *      Total required memory in bytes
248  ****************************************************************************
249  */
250 
251 uint32_t mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
252 {
253 	uint32_t size;
254 	size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;	/* for Alignment */
255 	size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
256 	size += sc->reply_size * sc->mfi_max_fw_cmds;
257 	/* this is for SGL's */
258 	size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
259 	return size;
260 }
261 
262 /*
263  ****************************************************************************
264  * Description:
265  *      This function will prepare message pools for the Thunderbolt controller
266  * Arguments:
267  *      DevExt - HBA miniport driver's adapter data storage structure
268  *      pMemLocation - start of the memory allocated for Thunderbolt.
269  * Return Value:
270  *      TRUE if successful
271  *      FALSE if failed
272  ****************************************************************************
273  */
274 int mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
275     uint32_t tbolt_contg_length)
276 {
277 	uint32_t     offset = 0;
278 	uint8_t      *addr = mem_location;
279 
280 	/* Request Descriptor Base physical Address */
281 
282 	/* For Request Decriptors Virtual Memory */
283 	/* Initialise the aligned IO Frames Virtual Memory Pointer */
284 	if (((uintptr_t)addr) & (0xFF)) {
285 		addr = &addr[sc->raid_io_msg_size];
286 		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
287 		sc->request_message_pool_align = addr;
288 	} else
289 		sc->request_message_pool_align = addr;
290 
291 	offset = sc->request_message_pool_align - sc->request_message_pool;
292 	sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
293 
294 	/* DJA XXX should this be bus dma ??? */
295 	/* Skip request message pool */
296 	addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
297 	/* Reply Frame Pool is initialized */
298 	sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
299 	if (((uintptr_t)addr) & (0xFF)) {
300 		addr = &addr[sc->reply_size];
301 		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
302 	}
303 	sc->reply_frame_pool_align
304 		    = (struct mfi_mpi2_reply_header *)addr;
305 
306 	offset = (uintptr_t)sc->reply_frame_pool_align
307 	    - (uintptr_t)sc->request_message_pool;
308 	sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
309 
310 	/* Skip Reply Frame Pool */
311 	addr += sc->reply_size * sc->mfi_max_fw_cmds;
312 	sc->reply_pool_limit = addr;
313 
314 	/* initializing reply address to 0xFFFFFFFF */
315 	memset((uint8_t *)sc->reply_frame_pool, 0xFF,
316 	       (sc->reply_size * sc->mfi_max_fw_cmds));
317 
318 	offset = sc->reply_size * sc->mfi_max_fw_cmds;
319 	sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
320 	/* initialize the last_reply_idx to 0 */
321 	sc->last_reply_idx = 0;
322 	offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
323 	    sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
324 	if (offset > tbolt_contg_length)
325 		device_printf(sc->mfi_dev, "Error:Initialized more than "
326 		    "allocated\n");
327 	return 0;
328 }
329 
330 /*
331  ****************************************************************************
332  * Description:
333  *   This routine prepare and issue INIT2 frame to the Firmware
334  ****************************************************************************
335  */
336 
337 int
338 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
339 {
340 	struct MPI2_IOC_INIT_REQUEST   *mpi2IocInit;
341 	struct mfi_init_frame	*mfi_init;
342 	uintptr_t			offset = 0;
343 	bus_addr_t			phyAddress;
344 	MFI_ADDRESS			*mfiAddressTemp;
345 	struct mfi_command *cm;
346 	int error;
347 
348 	mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
349 	/* Check if initialization is already completed */
350 	if (sc->MFA_enabled) {
351 		return 1;
352 	}
353 
354 	mtx_lock(&sc->mfi_io_lock);
355 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
356 		mtx_unlock(&sc->mfi_io_lock);
357 		return (EBUSY);
358 	}
359 	cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
360 	cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
361 	cm->cm_dmamap = sc->mfi_tb_init_dmamap;
362 	cm->cm_frame->header.context = 0;
363 	cm->cm_sc = sc;
364 	cm->cm_index = 0;
365 
366 	/*
367 	 * Abuse the SG list area of the frame to hold the init_qinfo
368 	 * object;
369 	 */
370 	mfi_init = &cm->cm_frame->init;
371 
372 	bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
373 	mpi2IocInit->Function  = MPI2_FUNCTION_IOC_INIT;
374 	mpi2IocInit->WhoInit   = MPI2_WHOINIT_HOST_DRIVER;
375 
376 	/* set MsgVersion and HeaderVersion host driver was built with */
377 	mpi2IocInit->MsgVersion = MPI2_VERSION;
378 	mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
379 	mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
380 	mpi2IocInit->ReplyDescriptorPostQueueDepth
381 	    = (uint16_t)sc->mfi_max_fw_cmds;
382 	mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
383 
384 	/* Get physical address of reply frame pool */
385 	offset = (uintptr_t) sc->reply_frame_pool_align
386 	    - (uintptr_t)sc->request_message_pool;
387 	phyAddress = sc->mfi_tb_busaddr + offset;
388 	mfiAddressTemp =
389 	    (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
390 	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
391 	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
392 
393 	/* Get physical address of request message pool */
394 	offset = sc->request_message_pool_align - sc->request_message_pool;
395 	phyAddress =  sc->mfi_tb_busaddr + offset;
396 	mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
397 	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
398 	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
399 	mpi2IocInit->ReplyFreeQueueAddress =  0; /* Not supported by MR. */
400 	mpi2IocInit->TimeStamp = time_uptime;
401 
402 	if (sc->verbuf) {
403 		snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
404                 MEGASAS_VERSION);
405 		mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
406 		mfi_init->driver_ver_hi =
407 		    (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
408 	}
409 	/* Get the physical address of the mpi2 ioc init command */
410 	phyAddress =  sc->mfi_tb_ioc_init_busaddr;
411 	mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
412 	mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
413 	mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
414 
415 	mfi_init->header.cmd = MFI_CMD_INIT;
416 	mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
417 	mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
418 
419 	cm->cm_data = NULL;
420 	cm->cm_flags |= MFI_CMD_POLLED;
421 	cm->cm_timestamp = time_uptime;
422 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
423 		device_printf(sc->mfi_dev, "failed to send IOC init2 "
424 		    "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
425 		mfi_release_command(cm);
426 		mtx_unlock(&sc->mfi_io_lock);
427 		return (error);
428 	}
429 	mfi_release_command(cm);
430 	mtx_unlock(&sc->mfi_io_lock);
431 
432 	if (mfi_init->header.cmd_status == 0) {
433 		sc->MFA_enabled = 1;
434 	}
435 	else {
436 		device_printf(sc->mfi_dev, "Init command Failed %x\n",
437 		    mfi_init->header.cmd_status);
438 		return 1;
439 	}
440 
441 	return 0;
442 
443 }
444 
445 int mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
446 {
447 	struct mfi_cmd_tbolt *cmd;
448 	bus_addr_t io_req_base_phys;
449 	uint8_t *io_req_base;
450 	int i = 0, j = 0, offset = 0;
451 
452 	/*
453 	 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
454 	 * Allocate the dynamic array first and then allocate individual
455 	 * commands.
456 	 */
457 	sc->request_desc_pool = malloc(sizeof(
458 	    union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
459 	    M_MFIBUF, M_NOWAIT|M_ZERO);
460 	sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
461 	    * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
462 
463 	if (!sc->mfi_cmd_pool_tbolt) {
464 		device_printf(sc->mfi_dev, "out of memory. Could not alloc "
465 		    "memory for cmd_list_fusion\n");
466 		return 1;
467 	}
468 
469 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
470 		sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
471 		    struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
472 
473 		if (!sc->mfi_cmd_pool_tbolt[i]) {
474 			device_printf(sc->mfi_dev, "Could not alloc cmd list "
475 			    "fusion\n");
476 
477 			for (j = 0; j < i; j++)
478 				free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
479 
480 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
481 			sc->mfi_cmd_pool_tbolt = NULL;
482 		}
483 	}
484 
485 	/*
486 	 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
487 	 *list
488 	 */
489 	io_req_base = sc->request_message_pool_align
490 		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
491 	io_req_base_phys = sc->request_msg_busaddr
492 		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
493 
494 	/*
495 	 * Add all the commands to command pool (instance->cmd_pool)
496 	 */
497 	/* SMID 0 is reserved. Set SMID/index from 1 */
498 
499 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
500 		cmd = sc->mfi_cmd_pool_tbolt[i];
501 		offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
502 		cmd->index = i + 1;
503 		cmd->request_desc = (union mfi_mpi2_request_descriptor *)
504 		    (sc->request_desc_pool + i);
505 		cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
506 		    (io_req_base + offset);
507 		cmd->io_request_phys_addr = io_req_base_phys + offset;
508 		cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
509 		    + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
510 		cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
511 		    * MEGASAS_MAX_SZ_CHAIN_FRAME;
512 
513 		TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
514 	}
515 	return 0;
516 }
517 
518 int mfi_tbolt_reset(struct mfi_softc *sc)
519 {
520 	uint32_t fw_state;
521 
522 	mtx_lock(&sc->mfi_io_lock);
523 	if (sc->hw_crit_error) {
524 		device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
525 		mtx_unlock(&sc->mfi_io_lock);
526 		return 1;
527 	}
528 
529 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
530 		fw_state = sc->mfi_read_fw_status(sc);
531 		if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT) {
532 			if ((sc->disableOnlineCtrlReset == 0)
533 			    && (sc->adpreset == 0)) {
534 				device_printf(sc->mfi_dev, "Adapter RESET "
535 				    "condition is detected\n");
536 				sc->adpreset = 1;
537 				sc->issuepend_done = 0;
538 				sc->MFA_enabled = 0;
539 				sc->last_reply_idx = 0;
540 				mfi_process_fw_state_chg_isr((void *) sc);
541 			}
542 			mtx_unlock(&sc->mfi_io_lock);
543 			return 0;
544 		}
545 	}
546 	mtx_unlock(&sc->mfi_io_lock);
547 	return 1;
548 }
549 
550 /*
551  * mfi_intr_tbolt - isr entry point
552  */
553 void mfi_intr_tbolt(void *arg)
554 {
555 	struct mfi_softc *sc = (struct mfi_softc *)arg;
556 
557 	if (sc->mfi_check_clear_intr(sc) == 1) {
558 		return;
559 	}
560 	if (sc->mfi_detaching)
561 		return;
562 	mtx_lock(&sc->mfi_io_lock);
563 	mfi_tbolt_complete_cmd(sc);
564 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
565 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
566 	mfi_startio(sc);
567 	mtx_unlock(&sc->mfi_io_lock);
568 	return;
569 }
570 
571 /**
572  * map_cmd_status -	Maps FW cmd status to OS cmd status
573  * @cmd :		Pointer to cmd
574  * @status :		status of cmd returned by FW
575  * @ext_status :	ext status of cmd returned by FW
576  */
577 
578 void
579 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
580     uint8_t ext_status)
581 {
582 
583 	switch (status) {
584 
585 		case MFI_STAT_OK:
586 			mfi_cmd->cm_frame->header.cmd_status = 0;
587 			mfi_cmd->cm_frame->dcmd.header.cmd_status = 0;
588 			break;
589 
590 		case MFI_STAT_SCSI_IO_FAILED:
591 		case MFI_STAT_LD_INIT_IN_PROGRESS:
592 			mfi_cmd->cm_frame->header.cmd_status = status;
593 			mfi_cmd->cm_frame->header.scsi_status = ext_status;
594 			mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
595 			mfi_cmd->cm_frame->dcmd.header.scsi_status
596 			    = ext_status;
597 			break;
598 
599 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
600 			mfi_cmd->cm_frame->header.cmd_status = ext_status;
601 			mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
602 			break;
603 
604 		case MFI_STAT_LD_OFFLINE:
605 		case MFI_STAT_DEVICE_NOT_FOUND:
606 			mfi_cmd->cm_frame->header.cmd_status = status;
607 			mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
608 			break;
609 
610 		default:
611 			mfi_cmd->cm_frame->header.cmd_status = status;
612 			mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
613 			break;
614 		}
615 }
616 
617 /**
618  * mfi_tbolt_return_cmd -	Return a cmd to free command pool
619  * @instance:		Adapter soft state
620  * @cmd:		Command packet to be returned to free command pool
621  */
622 static inline void
623 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *cmd)
624 {
625 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
626 
627 	TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, cmd, next);
628 }
629 
630 void mfi_tbolt_complete_cmd(struct mfi_softc *sc)
631 {
632 	struct mfi_mpi2_reply_header *desc, *reply_desc;
633 	struct mfi_command *cmd_mfi;	/* For MFA Cmds */
634 	struct mfi_cmd_tbolt *cmd_tbolt;
635 	uint16_t smid;
636 	uint8_t reply_descript_type;
637 	struct mfi_mpi2_request_raid_scsi_io  *scsi_io_req;
638 	uint32_t status, extStatus;
639 	uint16_t num_completed;
640 	union desc_value val;
641 
642 	desc = (struct mfi_mpi2_reply_header *)
643 		((uintptr_t)sc->reply_frame_pool_align
644 		+ sc->last_reply_idx * sc->reply_size);
645 	reply_desc = desc;
646 
647 	if (!reply_desc)
648 		device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
649 
650 	reply_descript_type = reply_desc->ReplyFlags
651 	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
652 	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
653 		return;
654 
655 	num_completed = 0;
656 	val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
657 
658 	/* Read Reply descriptor */
659 	while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
660 
661 		smid = reply_desc->SMID;
662 		if (!smid || smid > sc->mfi_max_fw_cmds + 1) {
663 			device_printf(sc->mfi_dev, "smid is %x. Cannot "
664 			    "proceed. Returning \n", smid);
665 			return;
666 		}
667 
668 		cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
669 		cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
670 		scsi_io_req = cmd_tbolt->io_request;
671 
672 		/* Check if internal commands */
673 		status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
674 		extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
675 
676 		switch (scsi_io_req->Function) {
677 		case MPI2_FUNCTION_LD_IO_REQUEST:
678 			/* Regular Path IO. */
679 			/* Map the Fw Error Status. */
680 			map_tbolt_cmd_status(cmd_mfi, status,
681 			    extStatus);
682 			if ((cmd_mfi->cm_frame->dcmd.opcode
683 			    == MFI_DCMD_LD_MAP_GET_INFO)
684 			    && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1)) {
685 					if (cmd_mfi->cm_frame->header.cmd_status
686 					    != 0)
687 						device_printf(sc->mfi_dev,
688 						    "map sync failed\n");
689 					else {
690 						sc->map_id++;
691 						device_printf(sc->mfi_dev,
692 						    "map sync completed\n");
693 						mfi_release_command(cmd_mfi);
694 					}
695 				}
696 			if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
697 			    == MFI_ON_MFIQ_BUSY
698 			    && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
699 				/* BHARAT poll workaround */
700 				mfi_remove_busy(cmd_mfi);
701 				cmd_mfi->cm_error = 0;
702 				mfi_complete(sc, cmd_mfi);
703 			}
704 			mfi_tbolt_return_cmd(sc, cmd_tbolt);
705 			break;
706 		case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:
707 			map_tbolt_cmd_status(cmd_mfi, status, extStatus);
708 			if ((cmd_mfi->cm_frame->dcmd.opcode
709 			    == MFI_DCMD_LD_MAP_GET_INFO)
710 			    && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1)) {
711 				if (cmd_mfi->cm_frame->header.cmd_status != 0)
712 					device_printf(sc->mfi_dev,
713 					    "map sync failed\n");
714 				else {
715 					sc->map_id++;
716 					device_printf(sc->mfi_dev,
717 					    "map sync completed\n");
718 					mfi_release_command(cmd_mfi);
719 				}
720 			}
721 			if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
722 			    == MFI_ON_MFIQ_BUSY
723 			    && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
724 				/* BHARAT poll workaround */
725 				mfi_remove_busy(cmd_mfi);
726 				cmd_mfi->cm_error = 0;
727 				mfi_complete(sc, cmd_mfi);
728 			}
729 			mfi_tbolt_return_cmd(sc, cmd_tbolt);
730 			break;
731 		}
732 
733 		sc->last_reply_idx++;
734 		if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
735 			MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
736 			sc->last_reply_idx = 0;
737 		}
738 		/*set it back to all 0xfff.*/
739 		((union mfi_mpi2_reply_descriptor*)desc)->words =
740 			~((uint64_t)0x00);
741 
742 		num_completed++;
743 
744 		/* Get the next reply descriptor */
745 		desc = (struct mfi_mpi2_reply_header *)
746 		    ((uintptr_t)sc->reply_frame_pool_align
747 		    + sc->last_reply_idx * sc->reply_size);
748 		reply_desc = desc;
749 		val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
750 		reply_descript_type = reply_desc->ReplyFlags
751 		    & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
752 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
753 			break;
754 	}
755 
756 	if (!num_completed)
757 		return;
758 
759 	/* update replyIndex to FW */
760 	if (sc->last_reply_idx)
761 		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
762 
763 	return;
764 }
765 
766 /**
767  * mfi_get_cmd -	Get a command from the free pool
768  * @instance:		Adapter soft state
769  *
770  * Returns a free command from the pool
771  */
772 
773 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc
774 						  *sc)
775 {
776 	struct mfi_cmd_tbolt *cmd = NULL;
777 
778 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
779 
780 	cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh);
781 	TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
782 	memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
783 	memset((uint8_t *)cmd->io_request, 0,
784 	    MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
785 	return cmd;
786 }
787 
788 union mfi_mpi2_request_descriptor *
789 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
790 {
791 	uint8_t *p;
792 
793 	if (index >= sc->mfi_max_fw_cmds) {
794 		device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
795 		    "for descriptor\n", index);
796 		return NULL;
797 	}
798 	p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
799 	    * index;
800 	memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
801 	return (union mfi_mpi2_request_descriptor *)p;
802 }
803 
804 
805 /* Used to build IOCTL cmd */
806 uint8_t
807 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
808 {
809 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
810 	struct mfi_mpi2_request_raid_scsi_io *io_req;
811 	struct mfi_cmd_tbolt *cmd;
812 
813 	cmd = mfi_tbolt_get_cmd(sc);
814 	if (!cmd)
815 		return EBUSY;
816 	mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
817 	cmd->sync_cmd_idx = mfi_cmd->cm_index;
818 	io_req = cmd->io_request;
819 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
820 
821 	io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
822 	io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
823 	    SGL) / 4;
824 	io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
825 
826 	mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
827 
828 	/*
829 	  In MFI pass thru, nextChainOffset will always be zero to
830 	  indicate the end of the chain.
831 	*/
832 	mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
833 		| MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
834 
835 	/* setting the length to the maximum length */
836 	mpi25_ieee_chain->Length = 1024;
837 
838 	return 0;
839 }
840 
841 void
842 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
843     struct mfi_cmd_tbolt *cmd)
844 {
845 	uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
846 	struct mfi_mpi2_request_raid_scsi_io	*io_request;
847 	struct IO_REQUEST_INFO io_info;
848 
849 	device_id = mfi_cmd->cm_frame->io.header.target_id;
850 	io_request = cmd->io_request;
851 	io_request->RaidContext.TargetID = device_id;
852 	io_request->RaidContext.Status = 0;
853 	io_request->RaidContext.exStatus =0;
854 
855 	start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
856 	start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
857 
858 	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
859 	io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
860 	io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
861 	io_info.ldTgtId = device_id;
862 	if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
863 	    MFI_FRAME_DIR_READ)
864 		io_info.isRead = 1;
865 
866 		io_request->RaidContext.timeoutValue
867 		     = MFI_FUSION_FP_DEFAULT_TIMEOUT;
868 		io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
869 		io_request->DevHandle = device_id;
870 		cmd->request_desc->header.RequestFlags
871 		    = (MFI_REQ_DESCRIPT_FLAGS_LD_IO
872 		    << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
873 	if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
874 		io_request->RaidContext.RegLockLength = 0x100;
875 	io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
876 	    * MFI_SECTOR_LEN;
877 }
878 
879 int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd)
880 {
881 	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
882 	    || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
883 		return 1;
884 	else
885 		return 0;
886 }
887 
888 int
889 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, struct mfi_cmd_tbolt *cmd)
890 {
891 	uint32_t device_id;
892 	uint32_t sge_count;
893 	uint8_t cdb[32], cdb_len;
894 
895 	memset(cdb, 0, 32);
896 	struct mfi_mpi2_request_raid_scsi_io *io_request = cmd->io_request;
897 
898 	device_id = mfi_cmd->cm_frame->header.target_id;
899 
900 	/* Have to build CDB here for TB as BSD don't have a scsi layer */
901 	if ((cdb_len = mfi_tbolt_build_cdb(sc, mfi_cmd, cdb)) == 1)
902 		return 1;
903 
904 	/* Just the CDB length,rest of the Flags are zero */
905 	io_request->IoFlags = cdb_len;
906 	memcpy(io_request->CDB.CDB32, cdb, 32);
907 
908 	if (mfi_tbolt_is_ldio(mfi_cmd))
909 		mfi_tbolt_build_ldio(sc, mfi_cmd , cmd);
910 	else
911 		return 1;
912 
913 	/*
914 	 * Construct SGL
915 	 */
916 	sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
917 	    (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
918 	if (sge_count > sc->mfi_max_sge) {
919 		device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
920 		    "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
921 		return 1;
922 	}
923 	io_request->RaidContext.numSGE = sge_count;
924 	io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
925 
926 	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
927 		io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
928 	else
929 		io_request->Control = MPI2_SCSIIO_CONTROL_READ;
930 
931 	io_request->SGLOffset0 = offsetof(
932 	    struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
933 
934 	io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
935 	io_request->SenseBufferLength = MFI_SENSE_LEN;
936 	return 0;
937 }
938 
939 static int
940 mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
941     uint8_t *cdb)
942 {
943 	uint32_t lba_lo, lba_hi, num_lba;
944 	uint8_t cdb_len;
945 
946 	if (mfi_cmd == NULL || cdb == NULL)
947 		return 1;
948 	num_lba = mfi_cmd->cm_frame->io.header.data_len;
949 	lba_lo = mfi_cmd->cm_frame->io.lba_lo;
950 	lba_hi = mfi_cmd->cm_frame->io.lba_hi;
951 
952 	if ((num_lba <= 0xFF) && (lba_lo <= 0x1FFFFF)) {
953 		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
954 			/* Read 6 or Write 6 */
955 			cdb[0] = (uint8_t) (0x0A);
956 		else
957 			cdb[0] = (uint8_t) (0x08);
958 
959 		cdb[4] = (uint8_t) num_lba;
960 		cdb[3] = (uint8_t) (lba_lo & 0xFF);
961 		cdb[2] = (uint8_t) (lba_lo >> 8);
962 		cdb[1] = (uint8_t) ((lba_lo >> 16) & 0x1F);
963 		cdb_len = 6;
964 	}
965 	else if ((num_lba <= 0xFFFF) && (lba_lo <= 0xFFFFFFFF)) {
966 		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
967 			/* Read 10 or Write 10 */
968 			cdb[0] = (uint8_t) (0x2A);
969 		else
970 			cdb[0] = (uint8_t) (0x28);
971 		cdb[8] = (uint8_t) (num_lba & 0xFF);
972 		cdb[7] = (uint8_t) (num_lba >> 8);
973 		cdb[5] = (uint8_t) (lba_lo & 0xFF);
974 		cdb[4] = (uint8_t) (lba_lo >> 8);
975 		cdb[3] = (uint8_t) (lba_lo >> 16);
976 		cdb[2] = (uint8_t) (lba_lo >> 24);
977 		cdb_len = 10;
978 	}
979 	else if ((num_lba > 0xFFFF) && (lba_hi == 0)) {
980 		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
981 			/* Read 12 or Write 12 */
982 			cdb[0] = (uint8_t) (0xAA);
983 		else
984 			cdb[0] = (uint8_t) (0xA8);
985 		cdb[9] = (uint8_t) (num_lba & 0xFF);
986 		cdb[8] = (uint8_t) (num_lba >> 8);
987 		cdb[7] = (uint8_t) (num_lba >> 16);
988 		cdb[6] = (uint8_t) (num_lba >> 24);
989 		cdb[5] = (uint8_t) (lba_lo & 0xFF);
990 		cdb[4] = (uint8_t) (lba_lo >> 8);
991 		cdb[3] = (uint8_t) (lba_lo >> 16);
992 		cdb[2] = (uint8_t) (lba_lo >> 24);
993 		cdb_len = 12;
994 	} else {
995 		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
996 			cdb[0] = (uint8_t) (0x8A);
997 		else
998 			cdb[0] = (uint8_t) (0x88);
999 		cdb[13] = (uint8_t) (num_lba & 0xFF);
1000 		cdb[12] = (uint8_t) (num_lba >> 8);
1001 		cdb[11] = (uint8_t) (num_lba >> 16);
1002 		cdb[10] = (uint8_t) (num_lba >> 24);
1003 		cdb[9] = (uint8_t) (lba_lo & 0xFF);
1004 		cdb[8] = (uint8_t) (lba_lo >> 8);
1005 		cdb[7] = (uint8_t) (lba_lo >> 16);
1006 		cdb[6] = (uint8_t) (lba_lo >> 24);
1007 		cdb[5] = (uint8_t) (lba_hi & 0xFF);
1008 		cdb[4] = (uint8_t) (lba_hi >> 8);
1009 		cdb[3] = (uint8_t) (lba_hi >> 16);
1010 		cdb[2] = (uint8_t) (lba_hi >> 24);
1011 		cdb_len = 16;
1012 	}
1013 	return cdb_len;
1014 }
1015 
1016 static int
1017 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
1018 		   pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
1019 {
1020 	uint8_t i, sg_processed, sg_to_process;
1021 	uint8_t sge_count, sge_idx;
1022 	union mfi_sgl *os_sgl;
1023 
1024 	/*
1025 	 * Return 0 if there is no data transfer
1026 	 */
1027 	if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
1028 	 	device_printf(sc->mfi_dev, "Buffer empty \n");
1029 		return 0;
1030 	}
1031 	os_sgl = mfi_cmd->cm_sg;
1032 	sge_count = mfi_cmd->cm_frame->header.sg_count;
1033 
1034 	if (sge_count > sc->mfi_max_sge) {
1035 		device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
1036 		    os_sgl, sge_count);
1037 		return sge_count;
1038 	}
1039 
1040 	if (sge_count > sc->max_SGEs_in_main_message)
1041 		/* One element to store the chain info */
1042 		sge_idx = sc->max_SGEs_in_main_message - 1;
1043 	else
1044 		sge_idx = sge_count;
1045 
1046 	for (i = 0; i < sge_idx; i++) {
1047 		/*
1048 		 * For 32bit BSD we are getting 32 bit SGL's from OS
1049 		 * but FW only take 64 bit SGL's so copying from 32 bit
1050 		 * SGL's to 64.
1051 		 */
1052 		if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1053 			sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1054 			sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1055 		} else {
1056 			sgl_ptr->Length = os_sgl->sg32[i].len;
1057 			sgl_ptr->Address = os_sgl->sg32[i].addr;
1058 		}
1059 		sgl_ptr->Flags = 0;
1060 		sgl_ptr++;
1061 		cmd->io_request->ChainOffset = 0;
1062 	}
1063 
1064 	sg_processed = i;
1065 
1066 	if (sg_processed < sge_count) {
1067 		pMpi25IeeeSgeChain64_t sg_chain;
1068 		sg_to_process = sge_count - sg_processed;
1069 		cmd->io_request->ChainOffset =
1070 		    sc->chain_offset_value_for_main_message;
1071 		sg_chain = sgl_ptr;
1072 		/* Prepare chain element */
1073 		sg_chain->NextChainOffset = 0;
1074 		sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1075 		    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1076 		sg_chain->Length =  (sizeof(MPI2_SGE_IO_UNION) *
1077 		    (sge_count - sg_processed));
1078 		sg_chain->Address = cmd->sg_frame_phys_addr;
1079 		sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1080 		for (; i < sge_count; i++) {
1081 			if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1082 				sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1083 				sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1084 			} else {
1085 				sgl_ptr->Length = os_sgl->sg32[i].len;
1086 				sgl_ptr->Address = os_sgl->sg32[i].addr;
1087 			}
1088 			sgl_ptr->Flags = 0;
1089 			sgl_ptr++;
1090 		}
1091 	}
1092 	return sge_count;
1093 }
1094 
1095 union mfi_mpi2_request_descriptor *
1096 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1097 {
1098 	struct mfi_cmd_tbolt *cmd;
1099 	union mfi_mpi2_request_descriptor *req_desc = NULL;
1100 	uint16_t index;
1101 	cmd = mfi_tbolt_get_cmd(sc);
1102 	if (!cmd)
1103 		return NULL;
1104 	mfi_cmd->cm_extra_frames = cmd->index;
1105 	cmd->sync_cmd_idx = mfi_cmd->cm_index;
1106 
1107 	index = cmd->index;
1108 	req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1109 	if (mfi_tbolt_build_io(sc, mfi_cmd, cmd))
1110 		return NULL;
1111 	req_desc->header.SMID = index;
1112 	return req_desc;
1113 }
1114 
1115 union mfi_mpi2_request_descriptor *
1116 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1117 {
1118 	union mfi_mpi2_request_descriptor *req_desc = NULL;
1119 	uint16_t index;
1120 	if (mfi_build_mpt_pass_thru(sc, cmd)) {
1121 		device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1122 		    "cmd\n");
1123 		return NULL;
1124 	}
1125 	/* For fusion the frame_count variable is used for SMID */
1126 	index = cmd->cm_extra_frames;
1127 
1128 	req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1129 	if (!req_desc)
1130 		return NULL;
1131 
1132 	bzero(req_desc, sizeof(req_desc));
1133 	req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1134 	    MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1135 	req_desc->header.SMID = index;
1136 	return req_desc;
1137 }
1138 
1139 int
1140 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1141 {
1142 	struct mfi_frame_header *hdr;
1143 	uint8_t *cdb;
1144 	union mfi_mpi2_request_descriptor *req_desc = NULL;
1145 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1146 
1147 	hdr = &cm->cm_frame->header;
1148 	cdb = cm->cm_frame->pass.cdb;
1149 	if (sc->adpreset)
1150 		return 1;
1151 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1152 		cm->cm_timestamp = time_uptime;
1153 		mfi_enqueue_busy(cm);
1154 	}
1155 	else {
1156 		hdr->cmd_status = 0xff;
1157 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1158 	}
1159 
1160 	if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1161 		/* check for inquiry commands coming from CLI */
1162 		if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1163 			if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1164 			    NULL) {
1165 				device_printf(sc->mfi_dev, "Mapping from MFI "
1166 				    "to MPT Failed \n");
1167 				return 1;
1168 			}
1169 		}
1170 		else
1171 			device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1172 	}
1173 	else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1174 	    hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1175 		if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1176 			device_printf(sc->mfi_dev, "LDIO Failed \n");
1177 			return 1;
1178 		}
1179 	} else
1180 		if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1181 			device_printf(sc->mfi_dev, "Mapping from MFI to MPT "
1182 			    "Failed\n");
1183 			return 1;
1184 		}
1185 	MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1186 	MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1187 
1188 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1189 		return 0;
1190 
1191 	/* This is a polled command, so busy-wait for it to complete. */
1192 	while (hdr->cmd_status == 0xff) {
1193 		DELAY(1000);
1194 		tm -= 1;
1195 		if (tm <= 0)
1196 			break;
1197 	}
1198 
1199 	if (hdr->cmd_status == 0xff) {
1200 		device_printf(sc->mfi_dev, "Frame %p timed out "
1201 		      "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1202 		return (ETIMEDOUT);
1203 	}
1204 	return 0;
1205 }
1206 
1207 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc)
1208 {
1209 	struct mfi_command *cm, *tmp;
1210 
1211 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1212 	TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1213 
1214 		cm->retry_for_fw_reset++;
1215 
1216 		/*
1217 		 * If a command has continuously been tried multiple times
1218 		 * and causing a FW reset condition, no further recoveries
1219 		 * should be performed on the controller
1220 		 */
1221 		if (cm->retry_for_fw_reset == 3) {
1222 			device_printf(sc->mfi_dev, "megaraid_sas: command %d "
1223 			    "was tried multiple times during adapter reset"
1224 			    "Shutting down the HBA\n", cm->cm_index);
1225 			mfi_kill_hba(sc);
1226 			sc->hw_crit_error = 1;
1227 			return;
1228 		}
1229 
1230 		if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) {
1231 			struct mfi_cmd_tbolt *cmd;
1232 			mfi_remove_busy(cm);
1233 			cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames -
1234 			    1 ];
1235 			mfi_tbolt_return_cmd(sc, cmd);
1236 			if ((cm->cm_flags & MFI_ON_MFIQ_MASK) == 0) {
1237 				if (cm->cm_frame->dcmd.opcode !=
1238 				    MFI_DCMD_CTRL_EVENT_WAIT) {
1239 					device_printf(sc->mfi_dev,
1240 					    "APJ ****requeue command %d \n",
1241 					    cm->cm_index);
1242 					mfi_requeue_ready(cm);
1243 				}
1244 			}
1245 			else
1246 				mfi_release_command(cm);
1247 		}
1248 	}
1249 	mfi_startio(sc);
1250 }
1251 
1252 static void mfi_kill_hba (struct mfi_softc *sc)
1253 {
1254 	if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1255 		MFI_WRITE4 (sc, 0x00,MFI_STOP_ADP);
1256 	else
1257 		MFI_WRITE4 (sc, MFI_IDB,MFI_STOP_ADP);
1258 }
1259 
1260 static void mfi_process_fw_state_chg_isr(void *arg)
1261 {
1262 	struct mfi_softc *sc= (struct mfi_softc *)arg;
1263 	struct mfi_cmd_tbolt *cmd;
1264 	int error, status;
1265 
1266 	if (sc->adpreset == 1) {
1267 		device_printf(sc->mfi_dev, "First stage of FW reset "
1268 		     "initiated...\n");
1269 
1270 		sc->mfi_adp_reset(sc);
1271 		sc->mfi_enable_intr(sc);
1272 
1273 		device_printf(sc->mfi_dev, "First stage of reset complete, "
1274 		    "second stage initiated...\n");
1275 
1276 		sc->adpreset = 2;
1277 
1278 		/* waiting for about 20 second before start the second init */
1279 		for (int wait = 0; wait < 20000; wait++)
1280 			DELAY(1000);
1281 		device_printf(sc->mfi_dev, "Second stage of FW reset "
1282 		     "initiated...\n");
1283 		while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1284 
1285 		sc->mfi_disable_intr(sc);
1286 
1287 		/* We expect the FW state to be READY */
1288 		if (mfi_transition_firmware(sc)) {
1289 			device_printf(sc->mfi_dev, "controller is not in "
1290 			    "ready state\n");
1291 			mfi_kill_hba(sc);
1292 			sc->hw_crit_error= 1;
1293 			return ;
1294 		}
1295 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
1296 				return;
1297 
1298 		mtx_lock(&sc->mfi_io_lock);
1299 
1300 		sc->mfi_enable_intr(sc);
1301 		sc->adpreset = 0;
1302 		free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1303 		mfi_remove_busy(sc->mfi_aen_cm);
1304 		cmd = sc->mfi_cmd_pool_tbolt[sc->mfi_aen_cm->cm_extra_frames
1305 		    - 1];
1306 		mfi_tbolt_return_cmd(sc, cmd);
1307 		if (sc->mfi_aen_cm) {
1308 			mfi_release_command(sc->mfi_aen_cm);
1309 			sc->mfi_aen_cm = NULL;
1310 		}
1311 		if (sc->map_update_cmd) {
1312 			mfi_release_command(sc->map_update_cmd);
1313 			sc->map_update_cmd = NULL;
1314 		}
1315 		mfi_issue_pending_cmds_again(sc);
1316 
1317 		/*
1318 		 * Issue pending command can result in adapter being marked
1319 		 * dead because of too many re-tries. Check for that
1320 		 * condition before clearing the reset condition on the FW
1321 		 */
1322 		if (!sc->hw_crit_error) {
1323 			/*
1324 			 * Initiate AEN (Asynchronous Event Notification)
1325 			 */
1326 			mfi_aen_setup(sc, sc->last_seq_num);
1327 			sc->issuepend_done = 1;
1328 			device_printf(sc->mfi_dev, "second stage of reset "
1329 			    "complete, FW is ready now.\n");
1330 		} else {
1331 			device_printf(sc->mfi_dev, "second stage of reset "
1332 			     "never completed, hba was marked offline.\n");
1333 		}
1334 	} else {
1335 		device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1336 		    "called with unhandled value:%d\n", sc->adpreset);
1337 	}
1338 	mtx_unlock(&sc->mfi_io_lock);
1339 }
1340