xref: /freebsd/sys/dev/mfi/mfi.c (revision ea1764e5fcf0fc11680dc104e663ae3d1d0fd7be)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include "opt_mfi.h"
56 
57 #include <sys/systm.h>
58 #include <sys/abi_compat.h>
59 #include <sys/sysctl.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/poll.h>
63 #include <sys/selinfo.h>
64 #include <sys/bus.h>
65 #include <sys/conf.h>
66 #include <sys/eventhandler.h>
67 #include <sys/rman.h>
68 #include <sys/bio.h>
69 #include <sys/ioccom.h>
70 #include <sys/uio.h>
71 #include <sys/proc.h>
72 #include <sys/signalvar.h>
73 #include <sys/sysent.h>
74 #include <sys/taskqueue.h>
75 
76 #include <machine/bus.h>
77 #include <machine/resource.h>
78 
79 #include <dev/mfi/mfireg.h>
80 #include <dev/mfi/mfi_ioctl.h>
81 #include <dev/mfi/mfivar.h>
82 #include <sys/interrupt.h>
83 #include <sys/priority.h>
84 
85 static int	mfi_alloc_commands(struct mfi_softc *);
86 static int	mfi_comms_init(struct mfi_softc *);
87 static int	mfi_get_controller_info(struct mfi_softc *);
88 static int	mfi_get_log_state(struct mfi_softc *,
89 		    struct mfi_evt_log_state **);
90 static int	mfi_parse_entries(struct mfi_softc *, int, int);
91 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void	mfi_startup(void *arg);
93 static void	mfi_intr(void *arg);
94 static void	mfi_ldprobe(struct mfi_softc *sc);
95 static void	mfi_syspdprobe(struct mfi_softc *sc);
96 static void	mfi_handle_evt(void *context, int pending);
97 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void	mfi_aen_complete(struct mfi_command *);
99 static int	mfi_add_ld(struct mfi_softc *sc, int);
100 static void	mfi_add_ld_complete(struct mfi_command *);
101 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void	mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
104 static void	mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
109 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
110 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
111 static void	mfi_timeout(void *);
112 static int	mfi_user_command(struct mfi_softc *,
113 		    struct mfi_ioc_passthru *);
114 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
115 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
116 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
117 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
118 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
119 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
120 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
121 		    uint32_t frame_cnt);
122 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
123 		    uint32_t frame_cnt);
124 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
125 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
126 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
127 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
128 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
129 
130 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
131     "MFI driver parameters");
132 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
133 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
134            0, "event message locale");
135 
136 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
137 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
138            0, "event message class");
139 
140 static int	mfi_max_cmds = 128;
141 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
142 	   0, "Max commands limit (-1 = controller limit)");
143 
144 static int	mfi_detect_jbod_change = 1;
145 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
146 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
147 
148 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
149 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
150 	   &mfi_polled_cmd_timeout, 0,
151 	   "Polled command timeout - used for firmware flash etc (in seconds)");
152 
153 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
154 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
155 	   0, "Command timeout (in seconds)");
156 
157 /* Management interface */
158 static d_open_t		mfi_open;
159 static d_close_t	mfi_close;
160 static d_ioctl_t	mfi_ioctl;
161 static d_poll_t		mfi_poll;
162 
163 static struct cdevsw mfi_cdevsw = {
164 	.d_version = 	D_VERSION,
165 	.d_flags =	0,
166 	.d_open = 	mfi_open,
167 	.d_close =	mfi_close,
168 	.d_ioctl =	mfi_ioctl,
169 	.d_poll =	mfi_poll,
170 	.d_name =	"mfi",
171 };
172 
173 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
174 
175 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
176 struct mfi_skinny_dma_info mfi_skinny;
177 
178 static void
mfi_enable_intr_xscale(struct mfi_softc * sc)179 mfi_enable_intr_xscale(struct mfi_softc *sc)
180 {
181 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
182 }
183 
184 static void
mfi_enable_intr_ppc(struct mfi_softc * sc)185 mfi_enable_intr_ppc(struct mfi_softc *sc)
186 {
187 	if (sc->mfi_flags & MFI_FLAGS_1078) {
188 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
190 	}
191 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
192 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
193 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
194 	}
195 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
196 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
197 	}
198 }
199 
200 static int32_t
mfi_read_fw_status_xscale(struct mfi_softc * sc)201 mfi_read_fw_status_xscale(struct mfi_softc *sc)
202 {
203 	return MFI_READ4(sc, MFI_OMSG0);
204 }
205 
206 static int32_t
mfi_read_fw_status_ppc(struct mfi_softc * sc)207 mfi_read_fw_status_ppc(struct mfi_softc *sc)
208 {
209 	return MFI_READ4(sc, MFI_OSP0);
210 }
211 
212 static int
mfi_check_clear_intr_xscale(struct mfi_softc * sc)213 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
214 {
215 	int32_t status;
216 
217 	status = MFI_READ4(sc, MFI_OSTS);
218 	if ((status & MFI_OSTS_INTR_VALID) == 0)
219 		return 1;
220 
221 	MFI_WRITE4(sc, MFI_OSTS, status);
222 	return 0;
223 }
224 
225 static int
mfi_check_clear_intr_ppc(struct mfi_softc * sc)226 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
227 {
228 	int32_t status;
229 
230 	status = MFI_READ4(sc, MFI_OSTS);
231 	if (sc->mfi_flags & MFI_FLAGS_1078) {
232 		if (!(status & MFI_1078_RM)) {
233 			return 1;
234 		}
235 	}
236 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
237 		if (!(status & MFI_GEN2_RM)) {
238 			return 1;
239 		}
240 	}
241 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
242 		if (!(status & MFI_SKINNY_RM)) {
243 			return 1;
244 		}
245 	}
246 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
247 		MFI_WRITE4(sc, MFI_OSTS, status);
248 	else
249 		MFI_WRITE4(sc, MFI_ODCR0, status);
250 	return 0;
251 }
252 
253 static void
mfi_issue_cmd_xscale(struct mfi_softc * sc,bus_addr_t bus_add,uint32_t frame_cnt)254 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
255 {
256 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
257 }
258 
259 static void
mfi_issue_cmd_ppc(struct mfi_softc * sc,bus_addr_t bus_add,uint32_t frame_cnt)260 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
261 {
262 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
263 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
264 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
265 	} else {
266 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
267 	}
268 }
269 
270 int
mfi_transition_firmware(struct mfi_softc * sc)271 mfi_transition_firmware(struct mfi_softc *sc)
272 {
273 	uint32_t fw_state, cur_state;
274 	int max_wait, i;
275 	uint32_t cur_abs_reg_val = 0;
276 	uint32_t prev_abs_reg_val = 0;
277 
278 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
279 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
280 	while (fw_state != MFI_FWSTATE_READY) {
281 		if (bootverbose)
282 			device_printf(sc->mfi_dev, "Waiting for firmware to "
283 			"become ready\n");
284 		cur_state = fw_state;
285 		switch (fw_state) {
286 		case MFI_FWSTATE_FAULT:
287 			device_printf(sc->mfi_dev, "Firmware fault\n");
288 			return (ENXIO);
289 		case MFI_FWSTATE_WAIT_HANDSHAKE:
290 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
291 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
292 			else
293 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
294 			max_wait = MFI_RESET_WAIT_TIME;
295 			break;
296 		case MFI_FWSTATE_OPERATIONAL:
297 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
298 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
299 			else
300 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
301 			max_wait = MFI_RESET_WAIT_TIME;
302 			break;
303 		case MFI_FWSTATE_UNDEFINED:
304 		case MFI_FWSTATE_BB_INIT:
305 			max_wait = MFI_RESET_WAIT_TIME;
306 			break;
307 		case MFI_FWSTATE_FW_INIT_2:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_FW_INIT:
311 		case MFI_FWSTATE_FLUSH_CACHE:
312 			max_wait = MFI_RESET_WAIT_TIME;
313 			break;
314 		case MFI_FWSTATE_DEVICE_SCAN:
315 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
316 			prev_abs_reg_val = cur_abs_reg_val;
317 			break;
318 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
319 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
320 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
321 			else
322 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
323 			max_wait = MFI_RESET_WAIT_TIME;
324 			break;
325 		default:
326 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
327 			    fw_state);
328 			return (ENXIO);
329 		}
330 		for (i = 0; i < (max_wait * 10); i++) {
331 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
332 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
333 			if (fw_state == cur_state)
334 				DELAY(100000);
335 			else
336 				break;
337 		}
338 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
339 			/* Check the device scanning progress */
340 			if (prev_abs_reg_val != cur_abs_reg_val) {
341 				continue;
342 			}
343 		}
344 		if (fw_state == cur_state) {
345 			device_printf(sc->mfi_dev, "Firmware stuck in state "
346 			    "%#x\n", fw_state);
347 			return (ENXIO);
348 		}
349 	}
350 	return (0);
351 }
352 
353 static void
mfi_addr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)354 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
355 {
356 	bus_addr_t *addr;
357 
358 	addr = arg;
359 	*addr = segs[0].ds_addr;
360 }
361 
362 int
mfi_attach(struct mfi_softc * sc)363 mfi_attach(struct mfi_softc *sc)
364 {
365 	uint32_t status;
366 	int error, commsz, framessz, sensesz;
367 	int frames, unit, max_fw_sge, max_fw_cmds;
368 	uint32_t tb_mem_size = 0;
369 	struct cdev *dev_t;
370 
371 	if (sc == NULL)
372 		return EINVAL;
373 
374 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
375 	sx_init(&sc->mfi_config_lock, "MFI config");
376 	TAILQ_INIT(&sc->mfi_ld_tqh);
377 	TAILQ_INIT(&sc->mfi_syspd_tqh);
378 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
379 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
380 	TAILQ_INIT(&sc->mfi_evt_queue);
381 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
382 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
383 	TAILQ_INIT(&sc->mfi_aen_pids);
384 	TAILQ_INIT(&sc->mfi_cam_ccbq);
385 
386 	mfi_initq_free(sc);
387 	mfi_initq_ready(sc);
388 	mfi_initq_busy(sc);
389 	mfi_initq_bio(sc);
390 
391 	sc->adpreset = 0;
392 	sc->last_seq_num = 0;
393 	sc->disableOnlineCtrlReset = 1;
394 	sc->issuepend_done = 1;
395 	sc->hw_crit_error = 0;
396 
397 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
398 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
399 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
400 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
401 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
402 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
403 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
404 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
405 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
406 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
407 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
408 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
409 		sc->mfi_tbolt = 1;
410 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
411 	} else {
412 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
413 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
414 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
415 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
416 	}
417 
418 	/* Before we get too far, see if the firmware is working */
419 	if ((error = mfi_transition_firmware(sc)) != 0) {
420 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
421 		    "error %d\n", error);
422 		return (ENXIO);
423 	}
424 
425 	/* Start: LSIP200113393 */
426 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
427 				1, 0,			/* algnmnt, boundary */
428 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
429 				BUS_SPACE_MAXADDR,	/* highaddr */
430 				NULL, NULL,		/* filter, filterarg */
431 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
432 				1,			/* msegments */
433 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
434 				0,			/* flags */
435 				NULL, NULL,		/* lockfunc, lockarg */
436 				&sc->verbuf_h_dmat)) {
437 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
438 		return (ENOMEM);
439 	}
440 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
441 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
442 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
443 		return (ENOMEM);
444 	}
445 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
446 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
447 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
448 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
449 	/* End: LSIP200113393 */
450 
451 	/*
452 	 * Get information needed for sizing the contiguous memory for the
453 	 * frame pool.  Size down the sgl parameter since we know that
454 	 * we will never need more than what's required for MFI_MAXPHYS.
455 	 * It would be nice if these constants were available at runtime
456 	 * instead of compile time.
457 	 */
458 	status = sc->mfi_read_fw_status(sc);
459 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
460 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
461 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
462 		    max_fw_cmds, mfi_max_cmds);
463 		sc->mfi_max_fw_cmds = mfi_max_cmds;
464 	} else {
465 		sc->mfi_max_fw_cmds = max_fw_cmds;
466 	}
467 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
468 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
469 
470 	/* ThunderBolt Support get the contiguous memory */
471 
472 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
473 		mfi_tbolt_init_globals(sc);
474 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
475 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
476 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
477 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
478 
479 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
480 				1, 0,			/* algnmnt, boundary */
481 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
482 				BUS_SPACE_MAXADDR,	/* highaddr */
483 				NULL, NULL,		/* filter, filterarg */
484 				tb_mem_size,		/* maxsize */
485 				1,			/* msegments */
486 				tb_mem_size,		/* maxsegsize */
487 				0,			/* flags */
488 				NULL, NULL,		/* lockfunc, lockarg */
489 				&sc->mfi_tb_dmat)) {
490 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
491 			return (ENOMEM);
492 		}
493 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
494 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
495 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
496 			return (ENOMEM);
497 		}
498 		bzero(sc->request_message_pool, tb_mem_size);
499 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
500 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
501 
502 		/* For ThunderBolt memory init */
503 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
504 				0x100, 0,		/* alignmnt, boundary */
505 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
506 				BUS_SPACE_MAXADDR,	/* highaddr */
507 				NULL, NULL,		/* filter, filterarg */
508 				MFI_FRAME_SIZE,		/* maxsize */
509 				1,			/* msegments */
510 				MFI_FRAME_SIZE,		/* maxsegsize */
511 				0,			/* flags */
512 				NULL, NULL,		/* lockfunc, lockarg */
513 				&sc->mfi_tb_init_dmat)) {
514 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
515 			return (ENOMEM);
516 		}
517 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
518 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
519 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
520 			return (ENOMEM);
521 		}
522 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
523 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
524 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
525 		    &sc->mfi_tb_init_busaddr, 0);
526 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
527 		    tb_mem_size)) {
528 			device_printf(sc->mfi_dev,
529 			    "Thunderbolt pool preparation error\n");
530 			return 0;
531 		}
532 
533 		/*
534 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
535 		  we are taking it different from what we have allocated for Request
536 		  and reply descriptors to avoid confusion later
537 		*/
538 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
539 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
540 				1, 0,			/* algnmnt, boundary */
541 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
542 				BUS_SPACE_MAXADDR,	/* highaddr */
543 				NULL, NULL,		/* filter, filterarg */
544 				tb_mem_size,		/* maxsize */
545 				1,			/* msegments */
546 				tb_mem_size,		/* maxsegsize */
547 				0,			/* flags */
548 				NULL, NULL,		/* lockfunc, lockarg */
549 				&sc->mfi_tb_ioc_init_dmat)) {
550 			device_printf(sc->mfi_dev,
551 			    "Cannot allocate comms DMA tag\n");
552 			return (ENOMEM);
553 		}
554 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
555 		    (void **)&sc->mfi_tb_ioc_init_desc,
556 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
557 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
558 			return (ENOMEM);
559 		}
560 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
561 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
562 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
563 		    &sc->mfi_tb_ioc_init_busaddr, 0);
564 	}
565 	/*
566 	 * Create the dma tag for data buffers.  Used both for block I/O
567 	 * and for various internal data queries.
568 	 */
569 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
570 				1, 0,			/* algnmnt, boundary */
571 				BUS_SPACE_MAXADDR,	/* lowaddr */
572 				BUS_SPACE_MAXADDR,	/* highaddr */
573 				NULL, NULL,		/* filter, filterarg */
574 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
575 				sc->mfi_max_sge,	/* nsegments */
576 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
577 				BUS_DMA_ALLOCNOW,	/* flags */
578 				busdma_lock_mutex,	/* lockfunc */
579 				&sc->mfi_io_lock,	/* lockfuncarg */
580 				&sc->mfi_buffer_dmat)) {
581 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
582 		return (ENOMEM);
583 	}
584 
585 	/*
586 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
587 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
588 	 * entry, so the calculated size here will be will be 1 more than
589 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
590 	 */
591 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
592 	    sizeof(struct mfi_hwcomms);
593 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
594 				1, 0,			/* algnmnt, boundary */
595 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
596 				BUS_SPACE_MAXADDR,	/* highaddr */
597 				NULL, NULL,		/* filter, filterarg */
598 				commsz,			/* maxsize */
599 				1,			/* msegments */
600 				commsz,			/* maxsegsize */
601 				0,			/* flags */
602 				NULL, NULL,		/* lockfunc, lockarg */
603 				&sc->mfi_comms_dmat)) {
604 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
605 		return (ENOMEM);
606 	}
607 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
608 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
609 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
610 		return (ENOMEM);
611 	}
612 	bzero(sc->mfi_comms, commsz);
613 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
614 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
615 	/*
616 	 * Allocate DMA memory for the command frames.  Keep them in the
617 	 * lower 4GB for efficiency.  Calculate the size of the commands at
618 	 * the same time; each command is one 64 byte frame plus a set of
619          * additional frames for holding sg lists or other data.
620 	 * The assumption here is that the SG list will start at the second
621 	 * frame and not use the unused bytes in the first frame.  While this
622 	 * isn't technically correct, it simplifies the calculation and allows
623 	 * for command frames that might be larger than an mfi_io_frame.
624 	 */
625 	if (sizeof(bus_addr_t) == 8) {
626 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
627 		sc->mfi_flags |= MFI_FLAGS_SG64;
628 	} else {
629 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
630 	}
631 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
632 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
633 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
634 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
635 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
636 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
637 				64, 0,			/* algnmnt, boundary */
638 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
639 				BUS_SPACE_MAXADDR,	/* highaddr */
640 				NULL, NULL,		/* filter, filterarg */
641 				framessz,		/* maxsize */
642 				1,			/* nsegments */
643 				framessz,		/* maxsegsize */
644 				0,			/* flags */
645 				NULL, NULL,		/* lockfunc, lockarg */
646 				&sc->mfi_frames_dmat)) {
647 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
648 		return (ENOMEM);
649 	}
650 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
651 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
652 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
653 		return (ENOMEM);
654 	}
655 	bzero(sc->mfi_frames, framessz);
656 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
657 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
658 	/*
659 	 * Allocate DMA memory for the frame sense data.  Keep them in the
660 	 * lower 4GB for efficiency
661 	 */
662 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
663 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
664 				4, 0,			/* algnmnt, boundary */
665 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
666 				BUS_SPACE_MAXADDR,	/* highaddr */
667 				NULL, NULL,		/* filter, filterarg */
668 				sensesz,		/* maxsize */
669 				1,			/* nsegments */
670 				sensesz,		/* maxsegsize */
671 				0,			/* flags */
672 				NULL, NULL,		/* lockfunc, lockarg */
673 				&sc->mfi_sense_dmat)) {
674 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
675 		return (ENOMEM);
676 	}
677 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
678 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
679 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
680 		return (ENOMEM);
681 	}
682 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
683 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
684 	if ((error = mfi_alloc_commands(sc)) != 0)
685 		return (error);
686 
687 	/* Before moving the FW to operational state, check whether
688 	 * hostmemory is required by the FW or not
689 	 */
690 
691 	/* ThunderBolt MFI_IOC2 INIT */
692 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
693 		sc->mfi_disable_intr(sc);
694 		mtx_lock(&sc->mfi_io_lock);
695 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
696 			device_printf(sc->mfi_dev,
697 			    "TB Init has failed with error %d\n",error);
698 			mtx_unlock(&sc->mfi_io_lock);
699 			return error;
700 		}
701 		mtx_unlock(&sc->mfi_io_lock);
702 
703 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
704 			return error;
705 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
706 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
707 		    &sc->mfi_intr)) {
708 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
709 			return (EINVAL);
710 		}
711 		sc->mfi_intr_ptr = mfi_intr_tbolt;
712 		sc->mfi_enable_intr(sc);
713 	} else {
714 		if ((error = mfi_comms_init(sc)) != 0)
715 			return (error);
716 
717 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
718 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
719 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
720 			return (EINVAL);
721 		}
722 		sc->mfi_intr_ptr = mfi_intr;
723 		sc->mfi_enable_intr(sc);
724 	}
725 	if ((error = mfi_get_controller_info(sc)) != 0)
726 		return (error);
727 	sc->disableOnlineCtrlReset = 0;
728 
729 	/* Register a config hook to probe the bus for arrays */
730 	sc->mfi_ich.ich_func = mfi_startup;
731 	sc->mfi_ich.ich_arg = sc;
732 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
733 		device_printf(sc->mfi_dev, "Cannot establish configuration "
734 		    "hook\n");
735 		return (EINVAL);
736 	}
737 	mtx_lock(&sc->mfi_io_lock);
738 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
739 		mtx_unlock(&sc->mfi_io_lock);
740 		return (error);
741 	}
742 	mtx_unlock(&sc->mfi_io_lock);
743 
744 	/*
745 	 * Register a shutdown handler.
746 	 */
747 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
748 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
749 		device_printf(sc->mfi_dev, "Warning: shutdown event "
750 		    "registration failed\n");
751 	}
752 
753 	/*
754 	 * Create the control device for doing management
755 	 */
756 	unit = device_get_unit(sc->mfi_dev);
757 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
758 	    0640, "mfi%d", unit);
759 	if (unit == 0)
760 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
761 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
762 	if (sc->mfi_cdev != NULL)
763 		sc->mfi_cdev->si_drv1 = sc;
764 	SYSCTL_ADD_STRING(device_get_sysctl_ctx(sc->mfi_dev),
765 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
766 	    OID_AUTO, "driver_version", CTLFLAG_RD, MEGASAS_VERSION,
767 	    strlen(MEGASAS_VERSION), "driver version");
768 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
769 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
770 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
771 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
772 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
773 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
774 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
775 	    &sc->mfi_keep_deleted_volumes, 0,
776 	    "Don't detach the mfid device for a busy volume that is deleted");
777 
778 	device_add_child(sc->mfi_dev, "mfip", DEVICE_UNIT_ANY);
779 	bus_attach_children(sc->mfi_dev);
780 
781 	/* Start the timeout watchdog */
782 	callout_init(&sc->mfi_watchdog_callout, 1);
783 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
784 	    mfi_timeout, sc);
785 
786 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
787 		mtx_lock(&sc->mfi_io_lock);
788 		mfi_tbolt_sync_map_info(sc);
789 		mtx_unlock(&sc->mfi_io_lock);
790 	}
791 
792 	return (0);
793 }
794 
795 static int
mfi_alloc_commands(struct mfi_softc * sc)796 mfi_alloc_commands(struct mfi_softc *sc)
797 {
798 	struct mfi_command *cm;
799 	int i, j;
800 
801 	/*
802 	 * XXX Should we allocate all the commands up front, or allocate on
803 	 * demand later like 'aac' does?
804 	 */
805 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
806 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
807 
808 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
809 		cm = &sc->mfi_commands[i];
810 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
811 		    sc->mfi_cmd_size * i);
812 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
813 		    sc->mfi_cmd_size * i;
814 		cm->cm_frame->header.context = i;
815 		cm->cm_sense = &sc->mfi_sense[i];
816 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
817 		cm->cm_sc = sc;
818 		cm->cm_index = i;
819 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
820 		    &cm->cm_dmamap) == 0) {
821 			mtx_lock(&sc->mfi_io_lock);
822 			mfi_release_command(cm);
823 			mtx_unlock(&sc->mfi_io_lock);
824 		} else {
825 			device_printf(sc->mfi_dev, "Failed to allocate %d "
826 			   "command blocks, only allocated %d\n",
827 			    sc->mfi_max_fw_cmds, i - 1);
828 			for (j = 0; j < i; j++) {
829 				cm = &sc->mfi_commands[i];
830 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
831 				    cm->cm_dmamap);
832 			}
833 			free(sc->mfi_commands, M_MFIBUF);
834 			sc->mfi_commands = NULL;
835 
836 			return (ENOMEM);
837 		}
838 	}
839 
840 	return (0);
841 }
842 
843 void
mfi_release_command(struct mfi_command * cm)844 mfi_release_command(struct mfi_command *cm)
845 {
846 	struct mfi_frame_header *hdr;
847 	uint32_t *hdr_data;
848 
849 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
850 
851 	/*
852 	 * Zero out the important fields of the frame, but make sure the
853 	 * context field is preserved.  For efficiency, handle the fields
854 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
855 	 */
856 	hdr = &cm->cm_frame->header;
857 	if (cm->cm_data != NULL && hdr->sg_count) {
858 		cm->cm_sg->sg32[0].len = 0;
859 		cm->cm_sg->sg32[0].addr = 0;
860 	}
861 
862 	/*
863 	 * Command may be on other queues e.g. busy queue depending on the
864 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
865 	 * properly
866 	 */
867 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
868 		mfi_remove_busy(cm);
869 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
870 		mfi_remove_ready(cm);
871 
872 	/* We're not expecting it to be on any other queue but check */
873 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
874 		panic("Command %p is still on another queue, flags = %#x",
875 		    cm, cm->cm_flags);
876 	}
877 
878 	/* tbolt cleanup */
879 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
880 		mfi_tbolt_return_cmd(cm->cm_sc,
881 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
882 		    cm);
883 	}
884 
885 	hdr_data = (uint32_t *)cm->cm_frame;
886 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
887 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
888 	hdr_data[4] = 0;	/* flags, timeout */
889 	hdr_data[5] = 0;	/* data_len */
890 
891 	cm->cm_extra_frames = 0;
892 	cm->cm_flags = 0;
893 	cm->cm_complete = NULL;
894 	cm->cm_private = NULL;
895 	cm->cm_data = NULL;
896 	cm->cm_sg = 0;
897 	cm->cm_total_frame_size = 0;
898 	cm->retry_for_fw_reset = 0;
899 
900 	mfi_enqueue_free(cm);
901 }
902 
903 int
mfi_dcmd_command(struct mfi_softc * sc,struct mfi_command ** cmp,uint32_t opcode,void ** bufp,size_t bufsize)904 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
905     uint32_t opcode, void **bufp, size_t bufsize)
906 {
907 	struct mfi_command *cm;
908 	struct mfi_dcmd_frame *dcmd;
909 	void *buf = NULL;
910 	uint32_t context = 0;
911 
912 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
913 
914 	cm = mfi_dequeue_free(sc);
915 	if (cm == NULL)
916 		return (EBUSY);
917 
918 	/* Zero out the MFI frame */
919 	context = cm->cm_frame->header.context;
920 	bzero(cm->cm_frame, sizeof(union mfi_frame));
921 	cm->cm_frame->header.context = context;
922 
923 	if ((bufsize > 0) && (bufp != NULL)) {
924 		if (*bufp == NULL) {
925 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
926 			if (buf == NULL) {
927 				mfi_release_command(cm);
928 				return (ENOMEM);
929 			}
930 			*bufp = buf;
931 		} else {
932 			buf = *bufp;
933 		}
934 	}
935 
936 	dcmd =  &cm->cm_frame->dcmd;
937 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
938 	dcmd->header.cmd = MFI_CMD_DCMD;
939 	dcmd->header.timeout = 0;
940 	dcmd->header.flags = 0;
941 	dcmd->header.data_len = bufsize;
942 	dcmd->header.scsi_status = 0;
943 	dcmd->opcode = opcode;
944 	cm->cm_sg = &dcmd->sgl;
945 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
946 	cm->cm_flags = 0;
947 	cm->cm_data = buf;
948 	cm->cm_private = buf;
949 	cm->cm_len = bufsize;
950 
951 	*cmp = cm;
952 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
953 		*bufp = buf;
954 	return (0);
955 }
956 
957 static int
mfi_comms_init(struct mfi_softc * sc)958 mfi_comms_init(struct mfi_softc *sc)
959 {
960 	struct mfi_command *cm;
961 	struct mfi_init_frame *init;
962 	struct mfi_init_qinfo *qinfo;
963 	int error;
964 	uint32_t context = 0;
965 
966 	mtx_lock(&sc->mfi_io_lock);
967 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
968 		mtx_unlock(&sc->mfi_io_lock);
969 		return (EBUSY);
970 	}
971 
972 	/* Zero out the MFI frame */
973 	context = cm->cm_frame->header.context;
974 	bzero(cm->cm_frame, sizeof(union mfi_frame));
975 	cm->cm_frame->header.context = context;
976 
977 	/*
978 	 * Abuse the SG list area of the frame to hold the init_qinfo
979 	 * object;
980 	 */
981 	init = &cm->cm_frame->init;
982 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
983 
984 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
985 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
986 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
987 	    offsetof(struct mfi_hwcomms, hw_reply_q);
988 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
989 	    offsetof(struct mfi_hwcomms, hw_pi);
990 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_ci);
992 
993 	init->header.cmd = MFI_CMD_INIT;
994 	init->header.data_len = sizeof(struct mfi_init_qinfo);
995 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
996 	cm->cm_data = NULL;
997 	cm->cm_flags = MFI_CMD_POLLED;
998 
999 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1000 		device_printf(sc->mfi_dev, "failed to send init command\n");
1001 	mfi_release_command(cm);
1002 	mtx_unlock(&sc->mfi_io_lock);
1003 
1004 	return (error);
1005 }
1006 
1007 static int
mfi_get_controller_info(struct mfi_softc * sc)1008 mfi_get_controller_info(struct mfi_softc *sc)
1009 {
1010 	struct mfi_command *cm = NULL;
1011 	struct mfi_ctrl_info *ci = NULL;
1012 	uint32_t max_sectors_1, max_sectors_2;
1013 	int error;
1014 
1015 	mtx_lock(&sc->mfi_io_lock);
1016 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1017 	    (void **)&ci, sizeof(*ci));
1018 	if (error)
1019 		goto out;
1020 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1021 
1022 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1023 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1024 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1025 		    MFI_SECTOR_LEN;
1026 		error = 0;
1027 		goto out;
1028 	}
1029 
1030 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1031 	    BUS_DMASYNC_POSTREAD);
1032 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1033 
1034 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1035 	max_sectors_2 = ci->max_request_size;
1036 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1037 	sc->disableOnlineCtrlReset =
1038 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1039 
1040 out:
1041 	if (ci)
1042 		free(ci, M_MFIBUF);
1043 	if (cm)
1044 		mfi_release_command(cm);
1045 	mtx_unlock(&sc->mfi_io_lock);
1046 	return (error);
1047 }
1048 
1049 static int
mfi_get_log_state(struct mfi_softc * sc,struct mfi_evt_log_state ** log_state)1050 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1051 {
1052 	struct mfi_command *cm = NULL;
1053 	int error;
1054 
1055 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1056 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1057 	    (void **)log_state, sizeof(**log_state));
1058 	if (error)
1059 		goto out;
1060 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1061 
1062 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1063 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1064 		goto out;
1065 	}
1066 
1067 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1068 	    BUS_DMASYNC_POSTREAD);
1069 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1070 
1071 out:
1072 	if (cm)
1073 		mfi_release_command(cm);
1074 
1075 	return (error);
1076 }
1077 
1078 int
mfi_aen_setup(struct mfi_softc * sc,uint32_t seq_start)1079 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1080 {
1081 	struct mfi_evt_log_state *log_state = NULL;
1082 	union mfi_evt class_locale;
1083 	int error = 0;
1084 	uint32_t seq;
1085 
1086 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1087 
1088 	class_locale.members.reserved = 0;
1089 	class_locale.members.locale = mfi_event_locale;
1090 	class_locale.members.evt_class  = mfi_event_class;
1091 
1092 	if (seq_start == 0) {
1093 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1094 			goto out;
1095 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1096 
1097 		/*
1098 		 * Walk through any events that fired since the last
1099 		 * shutdown.
1100 		 */
1101 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1102 		    log_state->newest_seq_num)) != 0)
1103 			goto out;
1104 		seq = log_state->newest_seq_num;
1105 	} else
1106 		seq = seq_start;
1107 	error = mfi_aen_register(sc, seq, class_locale.word);
1108 out:
1109 	free(log_state, M_MFIBUF);
1110 
1111 	return (error);
1112 }
1113 
1114 int
mfi_wait_command(struct mfi_softc * sc,struct mfi_command * cm)1115 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1116 {
1117 
1118 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1119 	cm->cm_complete = NULL;
1120 
1121 	/*
1122 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1123 	 * and return 0 to it as status
1124 	 */
1125 	if (cm->cm_frame->dcmd.opcode == 0) {
1126 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1127 		cm->cm_error = 0;
1128 		return (cm->cm_error);
1129 	}
1130 	mfi_enqueue_ready(cm);
1131 	mfi_startio(sc);
1132 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1133 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1134 	return (cm->cm_error);
1135 }
1136 
1137 void
mfi_free(struct mfi_softc * sc)1138 mfi_free(struct mfi_softc *sc)
1139 {
1140 	struct mfi_command *cm;
1141 	int i;
1142 
1143 	callout_drain(&sc->mfi_watchdog_callout);
1144 
1145 	if (sc->mfi_cdev != NULL)
1146 		destroy_dev(sc->mfi_cdev);
1147 
1148 	if (sc->mfi_commands != NULL) {
1149 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1150 			cm = &sc->mfi_commands[i];
1151 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1152 		}
1153 		free(sc->mfi_commands, M_MFIBUF);
1154 		sc->mfi_commands = NULL;
1155 	}
1156 
1157 	if (sc->mfi_intr)
1158 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1159 	if (sc->mfi_irq != NULL)
1160 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1161 		    sc->mfi_irq);
1162 
1163 	if (sc->mfi_sense_busaddr != 0)
1164 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1165 	if (sc->mfi_sense != NULL)
1166 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1167 		    sc->mfi_sense_dmamap);
1168 	if (sc->mfi_sense_dmat != NULL)
1169 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1170 
1171 	if (sc->mfi_frames_busaddr != 0)
1172 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1173 	if (sc->mfi_frames != NULL)
1174 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1175 		    sc->mfi_frames_dmamap);
1176 	if (sc->mfi_frames_dmat != NULL)
1177 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1178 
1179 	if (sc->mfi_comms_busaddr != 0)
1180 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1181 	if (sc->mfi_comms != NULL)
1182 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1183 		    sc->mfi_comms_dmamap);
1184 	if (sc->mfi_comms_dmat != NULL)
1185 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1186 
1187 	/* ThunderBolt contiguous memory free here */
1188 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1189 		if (sc->mfi_tb_busaddr != 0)
1190 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1191 		if (sc->request_message_pool != NULL)
1192 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1193 			    sc->mfi_tb_dmamap);
1194 		if (sc->mfi_tb_dmat != NULL)
1195 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1196 
1197 		/* Version buffer memory free */
1198 		/* Start LSIP200113393 */
1199 		if (sc->verbuf_h_busaddr != 0)
1200 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1201 		if (sc->verbuf != NULL)
1202 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1203 			    sc->verbuf_h_dmamap);
1204 		if (sc->verbuf_h_dmat != NULL)
1205 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1206 
1207 		/* End LSIP200113393 */
1208 		/* ThunderBolt INIT packet memory Free */
1209 		if (sc->mfi_tb_init_busaddr != 0)
1210 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1211 			    sc->mfi_tb_init_dmamap);
1212 		if (sc->mfi_tb_init != NULL)
1213 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1214 			    sc->mfi_tb_init_dmamap);
1215 		if (sc->mfi_tb_init_dmat != NULL)
1216 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1217 
1218 		/* ThunderBolt IOC Init Desc memory free here */
1219 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1220 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1221 			    sc->mfi_tb_ioc_init_dmamap);
1222 		if (sc->mfi_tb_ioc_init_desc != NULL)
1223 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1224 			    sc->mfi_tb_ioc_init_desc,
1225 			    sc->mfi_tb_ioc_init_dmamap);
1226 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1227 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1228 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1229 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1230 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1231 					free(sc->mfi_cmd_pool_tbolt[i],
1232 					    M_MFIBUF);
1233 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1234 				}
1235 			}
1236 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1237 			sc->mfi_cmd_pool_tbolt = NULL;
1238 		}
1239 		if (sc->request_desc_pool != NULL) {
1240 			free(sc->request_desc_pool, M_MFIBUF);
1241 			sc->request_desc_pool = NULL;
1242 		}
1243 	}
1244 	if (sc->mfi_buffer_dmat != NULL)
1245 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1246 	if (sc->mfi_parent_dmat != NULL)
1247 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1248 
1249 	if (mtx_initialized(&sc->mfi_io_lock)) {
1250 		mtx_destroy(&sc->mfi_io_lock);
1251 		sx_destroy(&sc->mfi_config_lock);
1252 	}
1253 
1254 	return;
1255 }
1256 
1257 static void
mfi_startup(void * arg)1258 mfi_startup(void *arg)
1259 {
1260 	struct mfi_softc *sc;
1261 
1262 	sc = (struct mfi_softc *)arg;
1263 
1264 	sc->mfi_enable_intr(sc);
1265 	sx_xlock(&sc->mfi_config_lock);
1266 	mtx_lock(&sc->mfi_io_lock);
1267 	mfi_ldprobe(sc);
1268 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1269 	    mfi_syspdprobe(sc);
1270 	mtx_unlock(&sc->mfi_io_lock);
1271 	sx_xunlock(&sc->mfi_config_lock);
1272 
1273 	config_intrhook_disestablish(&sc->mfi_ich);
1274 }
1275 
1276 static void
mfi_intr(void * arg)1277 mfi_intr(void *arg)
1278 {
1279 	struct mfi_softc *sc;
1280 	struct mfi_command *cm;
1281 	uint32_t pi, ci, context;
1282 
1283 	sc = (struct mfi_softc *)arg;
1284 
1285 	if (sc->mfi_check_clear_intr(sc))
1286 		return;
1287 
1288 restart:
1289 	pi = sc->mfi_comms->hw_pi;
1290 	ci = sc->mfi_comms->hw_ci;
1291 	mtx_lock(&sc->mfi_io_lock);
1292 	while (ci != pi) {
1293 		context = sc->mfi_comms->hw_reply_q[ci];
1294 		if (context < sc->mfi_max_fw_cmds) {
1295 			cm = &sc->mfi_commands[context];
1296 			mfi_remove_busy(cm);
1297 			cm->cm_error = 0;
1298 			mfi_complete(sc, cm);
1299 		}
1300 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1301 			ci = 0;
1302 	}
1303 
1304 	sc->mfi_comms->hw_ci = ci;
1305 
1306 	/* Give defered I/O a chance to run */
1307 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1308 	mfi_startio(sc);
1309 	mtx_unlock(&sc->mfi_io_lock);
1310 
1311 	/*
1312 	 * Dummy read to flush the bus; this ensures that the indexes are up
1313 	 * to date.  Restart processing if more commands have come it.
1314 	 */
1315 	(void)sc->mfi_read_fw_status(sc);
1316 	if (pi != sc->mfi_comms->hw_pi)
1317 		goto restart;
1318 
1319 	return;
1320 }
1321 
1322 int
mfi_shutdown(struct mfi_softc * sc)1323 mfi_shutdown(struct mfi_softc *sc)
1324 {
1325 	struct mfi_dcmd_frame *dcmd;
1326 	struct mfi_command *cm;
1327 	int error;
1328 
1329 	if (sc->mfi_aen_cm != NULL) {
1330 		sc->cm_aen_abort = 1;
1331 		mfi_abort(sc, &sc->mfi_aen_cm);
1332 	}
1333 
1334 	if (sc->mfi_map_sync_cm != NULL) {
1335 		sc->cm_map_abort = 1;
1336 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1337 	}
1338 
1339 	mtx_lock(&sc->mfi_io_lock);
1340 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1341 	if (error) {
1342 		mtx_unlock(&sc->mfi_io_lock);
1343 		return (error);
1344 	}
1345 
1346 	dcmd = &cm->cm_frame->dcmd;
1347 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1348 	cm->cm_flags = MFI_CMD_POLLED;
1349 	cm->cm_data = NULL;
1350 
1351 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1352 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1353 
1354 	mfi_release_command(cm);
1355 	mtx_unlock(&sc->mfi_io_lock);
1356 	return (error);
1357 }
1358 
1359 static void
mfi_syspdprobe(struct mfi_softc * sc)1360 mfi_syspdprobe(struct mfi_softc *sc)
1361 {
1362 	struct mfi_frame_header *hdr;
1363 	struct mfi_command *cm = NULL;
1364 	struct mfi_pd_list *pdlist = NULL;
1365 	struct mfi_system_pd *syspd, *tmp;
1366 	struct mfi_system_pending *syspd_pend;
1367 	int error, i, found;
1368 
1369 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1370 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1371 	/* Add SYSTEM PD's */
1372 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1373 	    (void **)&pdlist, sizeof(*pdlist));
1374 	if (error) {
1375 		device_printf(sc->mfi_dev,
1376 		    "Error while forming SYSTEM PD list\n");
1377 		goto out;
1378 	}
1379 
1380 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1381 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1382 	cm->cm_frame->dcmd.mbox[1] = 0;
1383 	if (mfi_mapcmd(sc, cm) != 0) {
1384 		device_printf(sc->mfi_dev,
1385 		    "Failed to get syspd device listing\n");
1386 		goto out;
1387 	}
1388 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1389 	    BUS_DMASYNC_POSTREAD);
1390 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1391 	hdr = &cm->cm_frame->header;
1392 	if (hdr->cmd_status != MFI_STAT_OK) {
1393 		device_printf(sc->mfi_dev,
1394 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1395 		goto out;
1396 	}
1397 	/* Get each PD and add it to the system */
1398 	for (i = 0; i < pdlist->count; i++) {
1399 		if (pdlist->addr[i].device_id ==
1400 		    pdlist->addr[i].encl_device_id)
1401 			continue;
1402 		found = 0;
1403 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1404 			if (syspd->pd_id == pdlist->addr[i].device_id)
1405 				found = 1;
1406 		}
1407 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1408 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1409 				found = 1;
1410 		}
1411 		if (found == 0)
1412 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1413 	}
1414 	/* Delete SYSPD's whose state has been changed */
1415 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1416 		found = 0;
1417 		for (i = 0; i < pdlist->count; i++) {
1418 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1419 				found = 1;
1420 				break;
1421 			}
1422 		}
1423 		if (found == 0) {
1424 			printf("DELETE\n");
1425 			mtx_unlock(&sc->mfi_io_lock);
1426 			bus_topo_lock();
1427 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1428 			bus_topo_unlock();
1429 			mtx_lock(&sc->mfi_io_lock);
1430 		}
1431 	}
1432 out:
1433 	if (pdlist)
1434 	    free(pdlist, M_MFIBUF);
1435 	if (cm)
1436 	    mfi_release_command(cm);
1437 
1438 	return;
1439 }
1440 
1441 static void
mfi_ldprobe(struct mfi_softc * sc)1442 mfi_ldprobe(struct mfi_softc *sc)
1443 {
1444 	struct mfi_frame_header *hdr;
1445 	struct mfi_command *cm = NULL;
1446 	struct mfi_ld_list *list = NULL;
1447 	struct mfi_disk *ld;
1448 	struct mfi_disk_pending *ld_pend;
1449 	int error, i;
1450 
1451 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1452 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1453 
1454 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1455 	    (void **)&list, sizeof(*list));
1456 	if (error)
1457 		goto out;
1458 
1459 	cm->cm_flags = MFI_CMD_DATAIN;
1460 	if (mfi_wait_command(sc, cm) != 0) {
1461 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1462 		goto out;
1463 	}
1464 
1465 	hdr = &cm->cm_frame->header;
1466 	if (hdr->cmd_status != MFI_STAT_OK) {
1467 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1468 		    hdr->cmd_status);
1469 		goto out;
1470 	}
1471 
1472 	for (i = 0; i < list->ld_count; i++) {
1473 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1474 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1475 				goto skip_add;
1476 		}
1477 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1478 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1479 				goto skip_add;
1480 		}
1481 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1482 	skip_add:;
1483 	}
1484 out:
1485 	if (list)
1486 		free(list, M_MFIBUF);
1487 	if (cm)
1488 		mfi_release_command(cm);
1489 
1490 	return;
1491 }
1492 
1493 /*
1494  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1495  * the bits in 24-31 are all set, then it is the number of seconds since
1496  * boot.
1497  */
1498 static const char *
format_timestamp(uint32_t timestamp)1499 format_timestamp(uint32_t timestamp)
1500 {
1501 	static char buffer[32];
1502 
1503 	if ((timestamp & 0xff000000) == 0xff000000)
1504 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1505 		    0x00ffffff);
1506 	else
1507 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1508 	return (buffer);
1509 }
1510 
1511 static const char *
format_class(int8_t class)1512 format_class(int8_t class)
1513 {
1514 	static char buffer[6];
1515 
1516 	switch (class) {
1517 	case MFI_EVT_CLASS_DEBUG:
1518 		return ("debug");
1519 	case MFI_EVT_CLASS_PROGRESS:
1520 		return ("progress");
1521 	case MFI_EVT_CLASS_INFO:
1522 		return ("info");
1523 	case MFI_EVT_CLASS_WARNING:
1524 		return ("WARN");
1525 	case MFI_EVT_CLASS_CRITICAL:
1526 		return ("CRIT");
1527 	case MFI_EVT_CLASS_FATAL:
1528 		return ("FATAL");
1529 	case MFI_EVT_CLASS_DEAD:
1530 		return ("DEAD");
1531 	default:
1532 		snprintf(buffer, sizeof(buffer), "%d", class);
1533 		return (buffer);
1534 	}
1535 }
1536 
1537 static void
mfi_decode_evt(struct mfi_softc * sc,struct mfi_evt_detail * detail)1538 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1539 {
1540 	struct mfi_system_pd *syspd = NULL;
1541 
1542 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1543 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1544 	    format_class(detail->evt_class.members.evt_class),
1545 	    detail->description);
1546 
1547         /* Don't act on old AEN's or while shutting down */
1548         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1549                 return;
1550 
1551 	switch (detail->arg_type) {
1552 	case MR_EVT_ARGS_NONE:
1553 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1554 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1555 			if (mfi_detect_jbod_change) {
1556 				/*
1557 				 * Probe for new SYSPD's and Delete
1558 				 * invalid SYSPD's
1559 				 */
1560 				sx_xlock(&sc->mfi_config_lock);
1561 				mtx_lock(&sc->mfi_io_lock);
1562 				mfi_syspdprobe(sc);
1563 				mtx_unlock(&sc->mfi_io_lock);
1564 				sx_xunlock(&sc->mfi_config_lock);
1565 			}
1566 		}
1567 		break;
1568 	case MR_EVT_ARGS_LD_STATE:
1569 		/* During load time driver reads all the events starting
1570 		 * from the one that has been logged after shutdown. Avoid
1571 		 * these old events.
1572 		 */
1573 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1574 			/* Remove the LD */
1575 			struct mfi_disk *ld;
1576 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1577 				if (ld->ld_id ==
1578 				    detail->args.ld_state.ld.target_id)
1579 					break;
1580 			}
1581 			/*
1582 			Fix: for kernel panics when SSCD is removed
1583 			KASSERT(ld != NULL, ("volume dissappeared"));
1584 			*/
1585 			if (ld != NULL) {
1586 				bus_topo_lock();
1587 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1588 				bus_topo_unlock();
1589 			}
1590 		}
1591 		break;
1592 	case MR_EVT_ARGS_PD:
1593 		if (detail->code == MR_EVT_PD_REMOVED) {
1594 			if (mfi_detect_jbod_change) {
1595 				/*
1596 				 * If the removed device is a SYSPD then
1597 				 * delete it
1598 				 */
1599 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1600 				    pd_link) {
1601 					if (syspd->pd_id ==
1602 					    detail->args.pd.device_id) {
1603 						bus_topo_lock();
1604 						device_delete_child(
1605 						    sc->mfi_dev,
1606 						    syspd->pd_dev);
1607 						bus_topo_unlock();
1608 						break;
1609 					}
1610 				}
1611 			}
1612 		}
1613 		if (detail->code == MR_EVT_PD_INSERTED) {
1614 			if (mfi_detect_jbod_change) {
1615 				/* Probe for new SYSPD's */
1616 				sx_xlock(&sc->mfi_config_lock);
1617 				mtx_lock(&sc->mfi_io_lock);
1618 				mfi_syspdprobe(sc);
1619 				mtx_unlock(&sc->mfi_io_lock);
1620 				sx_xunlock(&sc->mfi_config_lock);
1621 			}
1622 		}
1623 		if (sc->mfi_cam_rescan_cb != NULL &&
1624 		    (detail->code == MR_EVT_PD_INSERTED ||
1625 		    detail->code == MR_EVT_PD_REMOVED)) {
1626 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1627 		}
1628 		break;
1629 	}
1630 }
1631 
1632 static void
mfi_queue_evt(struct mfi_softc * sc,struct mfi_evt_detail * detail)1633 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1634 {
1635 	struct mfi_evt_queue_elm *elm;
1636 
1637 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1638 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1639 	if (elm == NULL)
1640 		return;
1641 	memcpy(&elm->detail, detail, sizeof(*detail));
1642 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1643 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1644 }
1645 
1646 static void
mfi_handle_evt(void * context,int pending)1647 mfi_handle_evt(void *context, int pending)
1648 {
1649 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1650 	struct mfi_softc *sc;
1651 	struct mfi_evt_queue_elm *elm;
1652 
1653 	sc = context;
1654 	TAILQ_INIT(&queue);
1655 	mtx_lock(&sc->mfi_io_lock);
1656 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1657 	mtx_unlock(&sc->mfi_io_lock);
1658 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1659 		TAILQ_REMOVE(&queue, elm, link);
1660 		mfi_decode_evt(sc, &elm->detail);
1661 		free(elm, M_MFIBUF);
1662 	}
1663 }
1664 
1665 static int
mfi_aen_register(struct mfi_softc * sc,int seq,int locale)1666 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1667 {
1668 	struct mfi_command *cm;
1669 	struct mfi_dcmd_frame *dcmd;
1670 	union mfi_evt current_aen, prior_aen;
1671 	struct mfi_evt_detail *ed = NULL;
1672 	int error = 0;
1673 
1674 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1675 
1676 	current_aen.word = locale;
1677 	if (sc->mfi_aen_cm != NULL) {
1678 		prior_aen.word =
1679 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1680 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1681 		    !((prior_aen.members.locale & current_aen.members.locale)
1682 		    ^current_aen.members.locale)) {
1683 			return (0);
1684 		} else {
1685 			prior_aen.members.locale |= current_aen.members.locale;
1686 			if (prior_aen.members.evt_class
1687 			    < current_aen.members.evt_class)
1688 				current_aen.members.evt_class =
1689 				    prior_aen.members.evt_class;
1690 			mfi_abort(sc, &sc->mfi_aen_cm);
1691 		}
1692 	}
1693 
1694 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1695 	    (void **)&ed, sizeof(*ed));
1696 	if (error)
1697 		goto out;
1698 
1699 	dcmd = &cm->cm_frame->dcmd;
1700 	((uint32_t *)&dcmd->mbox)[0] = seq;
1701 	((uint32_t *)&dcmd->mbox)[1] = locale;
1702 	cm->cm_flags = MFI_CMD_DATAIN;
1703 	cm->cm_complete = mfi_aen_complete;
1704 
1705 	sc->last_seq_num = seq;
1706 	sc->mfi_aen_cm = cm;
1707 
1708 	mfi_enqueue_ready(cm);
1709 	mfi_startio(sc);
1710 
1711 out:
1712 	return (error);
1713 }
1714 
1715 static void
mfi_aen_complete(struct mfi_command * cm)1716 mfi_aen_complete(struct mfi_command *cm)
1717 {
1718 	struct mfi_frame_header *hdr;
1719 	struct mfi_softc *sc;
1720 	struct mfi_evt_detail *detail;
1721 	struct mfi_aen *mfi_aen_entry, *tmp;
1722 	int seq = 0, aborted = 0;
1723 
1724 	sc = cm->cm_sc;
1725 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1726 
1727 	if (sc->mfi_aen_cm == NULL)
1728 		return;
1729 
1730 	hdr = &cm->cm_frame->header;
1731 
1732 	if (sc->cm_aen_abort ||
1733 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1734 		sc->cm_aen_abort = 0;
1735 		aborted = 1;
1736 	} else {
1737 		sc->mfi_aen_triggered = 1;
1738 		if (sc->mfi_poll_waiting) {
1739 			sc->mfi_poll_waiting = 0;
1740 			selwakeup(&sc->mfi_select);
1741 		}
1742 		detail = cm->cm_data;
1743 		mfi_queue_evt(sc, detail);
1744 		seq = detail->seq + 1;
1745 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1746 		    tmp) {
1747 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1748 			    aen_link);
1749 			PROC_LOCK(mfi_aen_entry->p);
1750 			kern_psignal(mfi_aen_entry->p, SIGIO);
1751 			PROC_UNLOCK(mfi_aen_entry->p);
1752 			free(mfi_aen_entry, M_MFIBUF);
1753 		}
1754 	}
1755 
1756 	free(cm->cm_data, M_MFIBUF);
1757 	wakeup(&sc->mfi_aen_cm);
1758 	sc->mfi_aen_cm = NULL;
1759 	mfi_release_command(cm);
1760 
1761 	/* set it up again so the driver can catch more events */
1762 	if (!aborted)
1763 		mfi_aen_setup(sc, seq);
1764 }
1765 
1766 #define MAX_EVENTS 15
1767 
1768 static int
mfi_parse_entries(struct mfi_softc * sc,int start_seq,int stop_seq)1769 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1770 {
1771 	struct mfi_command *cm;
1772 	struct mfi_dcmd_frame *dcmd;
1773 	struct mfi_evt_list *el;
1774 	union mfi_evt class_locale;
1775 	int error, i, seq, size;
1776 
1777 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1778 
1779 	class_locale.members.reserved = 0;
1780 	class_locale.members.locale = mfi_event_locale;
1781 	class_locale.members.evt_class  = mfi_event_class;
1782 
1783 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1784 		* (MAX_EVENTS - 1);
1785 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1786 	if (el == NULL)
1787 		return (ENOMEM);
1788 
1789 	for (seq = start_seq;;) {
1790 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1791 			free(el, M_MFIBUF);
1792 			return (EBUSY);
1793 		}
1794 
1795 		dcmd = &cm->cm_frame->dcmd;
1796 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1797 		dcmd->header.cmd = MFI_CMD_DCMD;
1798 		dcmd->header.timeout = 0;
1799 		dcmd->header.data_len = size;
1800 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1801 		((uint32_t *)&dcmd->mbox)[0] = seq;
1802 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1803 		cm->cm_sg = &dcmd->sgl;
1804 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1805 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1806 		cm->cm_data = el;
1807 		cm->cm_len = size;
1808 
1809 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1810 			device_printf(sc->mfi_dev,
1811 			    "Failed to get controller entries\n");
1812 			mfi_release_command(cm);
1813 			break;
1814 		}
1815 
1816 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1817 		    BUS_DMASYNC_POSTREAD);
1818 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1819 
1820 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1821 			mfi_release_command(cm);
1822 			break;
1823 		}
1824 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1825 			device_printf(sc->mfi_dev,
1826 			    "Error %d fetching controller entries\n",
1827 			    dcmd->header.cmd_status);
1828 			mfi_release_command(cm);
1829 			error = EIO;
1830 			break;
1831 		}
1832 		mfi_release_command(cm);
1833 
1834 		for (i = 0; i < el->count; i++) {
1835 			/*
1836 			 * If this event is newer than 'stop_seq' then
1837 			 * break out of the loop.  Note that the log
1838 			 * is a circular buffer so we have to handle
1839 			 * the case that our stop point is earlier in
1840 			 * the buffer than our start point.
1841 			 */
1842 			if (el->event[i].seq >= stop_seq) {
1843 				if (start_seq <= stop_seq)
1844 					break;
1845 				else if (el->event[i].seq < start_seq)
1846 					break;
1847 			}
1848 			mfi_queue_evt(sc, &el->event[i]);
1849 		}
1850 		seq = el->event[el->count - 1].seq + 1;
1851 	}
1852 
1853 	free(el, M_MFIBUF);
1854 	return (error);
1855 }
1856 
1857 static int
mfi_add_ld(struct mfi_softc * sc,int id)1858 mfi_add_ld(struct mfi_softc *sc, int id)
1859 {
1860 	struct mfi_command *cm;
1861 	struct mfi_dcmd_frame *dcmd = NULL;
1862 	struct mfi_ld_info *ld_info = NULL;
1863 	struct mfi_disk_pending *ld_pend;
1864 	int error;
1865 
1866 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1867 
1868 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1869 	if (ld_pend != NULL) {
1870 		ld_pend->ld_id = id;
1871 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1872 	}
1873 
1874 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1875 	    (void **)&ld_info, sizeof(*ld_info));
1876 	if (error) {
1877 		device_printf(sc->mfi_dev,
1878 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1879 		if (ld_info)
1880 			free(ld_info, M_MFIBUF);
1881 		return (error);
1882 	}
1883 	cm->cm_flags = MFI_CMD_DATAIN;
1884 	dcmd = &cm->cm_frame->dcmd;
1885 	dcmd->mbox[0] = id;
1886 	if (mfi_wait_command(sc, cm) != 0) {
1887 		device_printf(sc->mfi_dev,
1888 		    "Failed to get logical drive: %d\n", id);
1889 		free(ld_info, M_MFIBUF);
1890 		return (0);
1891 	}
1892 	if (ld_info->ld_config.params.isSSCD != 1)
1893 		mfi_add_ld_complete(cm);
1894 	else {
1895 		mfi_release_command(cm);
1896 		if (ld_info)		/* SSCD drives ld_info free here */
1897 			free(ld_info, M_MFIBUF);
1898 	}
1899 	return (0);
1900 }
1901 
1902 static void
mfi_add_ld_complete(struct mfi_command * cm)1903 mfi_add_ld_complete(struct mfi_command *cm)
1904 {
1905 	struct mfi_frame_header *hdr;
1906 	struct mfi_ld_info *ld_info;
1907 	struct mfi_softc *sc;
1908 	device_t child;
1909 
1910 	sc = cm->cm_sc;
1911 	hdr = &cm->cm_frame->header;
1912 	ld_info = cm->cm_private;
1913 
1914 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1915 		free(ld_info, M_MFIBUF);
1916 		wakeup(&sc->mfi_map_sync_cm);
1917 		mfi_release_command(cm);
1918 		return;
1919 	}
1920 	wakeup(&sc->mfi_map_sync_cm);
1921 	mfi_release_command(cm);
1922 
1923 	mtx_unlock(&sc->mfi_io_lock);
1924 	bus_topo_lock();
1925 	if ((child = device_add_child(sc->mfi_dev, "mfid",
1926 	    DEVICE_UNIT_ANY)) == NULL) {
1927 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1928 		free(ld_info, M_MFIBUF);
1929 		bus_topo_unlock();
1930 		mtx_lock(&sc->mfi_io_lock);
1931 		return;
1932 	}
1933 
1934 	device_set_ivars(child, ld_info);
1935 	device_set_desc(child, "MFI Logical Disk");
1936 	bus_attach_children(sc->mfi_dev);
1937 	bus_topo_unlock();
1938 	mtx_lock(&sc->mfi_io_lock);
1939 }
1940 
mfi_add_sys_pd(struct mfi_softc * sc,int id)1941 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1942 {
1943 	struct mfi_command *cm;
1944 	struct mfi_dcmd_frame *dcmd = NULL;
1945 	struct mfi_pd_info *pd_info = NULL;
1946 	struct mfi_system_pending *syspd_pend;
1947 	int error;
1948 
1949 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1950 
1951 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1952 	if (syspd_pend != NULL) {
1953 		syspd_pend->pd_id = id;
1954 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1955 	}
1956 
1957 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1958 		(void **)&pd_info, sizeof(*pd_info));
1959 	if (error) {
1960 		device_printf(sc->mfi_dev,
1961 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1962 		    error);
1963 		if (pd_info)
1964 			free(pd_info, M_MFIBUF);
1965 		return (error);
1966 	}
1967 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1968 	dcmd = &cm->cm_frame->dcmd;
1969 	dcmd->mbox[0]=id;
1970 	dcmd->header.scsi_status = 0;
1971 	dcmd->header.pad0 = 0;
1972 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1973 		device_printf(sc->mfi_dev,
1974 		    "Failed to get physical drive info %d\n", id);
1975 		free(pd_info, M_MFIBUF);
1976 		mfi_release_command(cm);
1977 		return (error);
1978 	}
1979 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1980 	    BUS_DMASYNC_POSTREAD);
1981 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1982 	mfi_add_sys_pd_complete(cm);
1983 	return (0);
1984 }
1985 
1986 static void
mfi_add_sys_pd_complete(struct mfi_command * cm)1987 mfi_add_sys_pd_complete(struct mfi_command *cm)
1988 {
1989 	struct mfi_frame_header *hdr;
1990 	struct mfi_pd_info *pd_info;
1991 	struct mfi_softc *sc;
1992 	device_t child;
1993 
1994 	sc = cm->cm_sc;
1995 	hdr = &cm->cm_frame->header;
1996 	pd_info = cm->cm_private;
1997 
1998 	if (hdr->cmd_status != MFI_STAT_OK) {
1999 		free(pd_info, M_MFIBUF);
2000 		mfi_release_command(cm);
2001 		return;
2002 	}
2003 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2004 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2005 		    pd_info->ref.v.device_id);
2006 		free(pd_info, M_MFIBUF);
2007 		mfi_release_command(cm);
2008 		return;
2009 	}
2010 	mfi_release_command(cm);
2011 
2012 	mtx_unlock(&sc->mfi_io_lock);
2013 	bus_topo_lock();
2014 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd",
2015 	    DEVICE_UNIT_ANY)) == NULL) {
2016 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2017 		free(pd_info, M_MFIBUF);
2018 		bus_topo_unlock();
2019 		mtx_lock(&sc->mfi_io_lock);
2020 		return;
2021 	}
2022 
2023 	device_set_ivars(child, pd_info);
2024 	device_set_desc(child, "MFI System PD");
2025 	bus_attach_children(sc->mfi_dev);
2026 	bus_topo_unlock();
2027 	mtx_lock(&sc->mfi_io_lock);
2028 }
2029 
2030 static struct mfi_command *
mfi_bio_command(struct mfi_softc * sc)2031 mfi_bio_command(struct mfi_softc *sc)
2032 {
2033 	struct bio *bio;
2034 	struct mfi_command *cm = NULL;
2035 
2036 	/*reserving two commands to avoid starvation for IOCTL*/
2037 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2038 		return (NULL);
2039 	}
2040 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2041 		return (NULL);
2042 	}
2043 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2044 		cm = mfi_build_ldio(sc, bio);
2045 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2046 		cm = mfi_build_syspdio(sc, bio);
2047 	}
2048 	if (!cm)
2049 	    mfi_enqueue_bio(sc, bio);
2050 	return cm;
2051 }
2052 
2053 /*
2054  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2055  */
2056 
2057 int
mfi_build_cdb(int readop,uint8_t byte2,u_int64_t lba,u_int32_t block_count,uint8_t * cdb)2058 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2059 {
2060 	int cdb_len;
2061 
2062 	if (((lba & 0x1fffff) == lba)
2063          && ((block_count & 0xff) == block_count)
2064          && (byte2 == 0)) {
2065 		/* We can fit in a 6 byte cdb */
2066 		struct scsi_rw_6 *scsi_cmd;
2067 
2068 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2069 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2070 		scsi_ulto3b(lba, scsi_cmd->addr);
2071 		scsi_cmd->length = block_count & 0xff;
2072 		scsi_cmd->control = 0;
2073 		cdb_len = sizeof(*scsi_cmd);
2074 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2075 		/* Need a 10 byte CDB */
2076 		struct scsi_rw_10 *scsi_cmd;
2077 
2078 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2079 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2080 		scsi_cmd->byte2 = byte2;
2081 		scsi_ulto4b(lba, scsi_cmd->addr);
2082 		scsi_cmd->reserved = 0;
2083 		scsi_ulto2b(block_count, scsi_cmd->length);
2084 		scsi_cmd->control = 0;
2085 		cdb_len = sizeof(*scsi_cmd);
2086 	} else if (((block_count & 0xffffffff) == block_count) &&
2087 	    ((lba & 0xffffffff) == lba)) {
2088 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2089 		struct scsi_rw_12 *scsi_cmd;
2090 
2091 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2092 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2093 		scsi_cmd->byte2 = byte2;
2094 		scsi_ulto4b(lba, scsi_cmd->addr);
2095 		scsi_cmd->reserved = 0;
2096 		scsi_ulto4b(block_count, scsi_cmd->length);
2097 		scsi_cmd->control = 0;
2098 		cdb_len = sizeof(*scsi_cmd);
2099 	} else {
2100 		/*
2101 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2102 		 * than 2^32
2103 		 */
2104 		struct scsi_rw_16 *scsi_cmd;
2105 
2106 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2107 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2108 		scsi_cmd->byte2 = byte2;
2109 		scsi_u64to8b(lba, scsi_cmd->addr);
2110 		scsi_cmd->reserved = 0;
2111 		scsi_ulto4b(block_count, scsi_cmd->length);
2112 		scsi_cmd->control = 0;
2113 		cdb_len = sizeof(*scsi_cmd);
2114 	}
2115 
2116 	return cdb_len;
2117 }
2118 
2119 extern char *unmapped_buf;
2120 
2121 static struct mfi_command *
mfi_build_syspdio(struct mfi_softc * sc,struct bio * bio)2122 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2123 {
2124 	struct mfi_command *cm;
2125 	struct mfi_pass_frame *pass;
2126 	uint32_t context = 0;
2127 	int flags = 0, blkcount = 0, readop;
2128 	uint8_t cdb_len;
2129 
2130 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2131 
2132 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2133 	    return (NULL);
2134 
2135 	/* Zero out the MFI frame */
2136 	context = cm->cm_frame->header.context;
2137 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2138 	cm->cm_frame->header.context = context;
2139 	pass = &cm->cm_frame->pass;
2140 	bzero(pass->cdb, 16);
2141 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2142 	switch (bio->bio_cmd) {
2143 	case BIO_READ:
2144 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2145 		readop = 1;
2146 		break;
2147 	case BIO_WRITE:
2148 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2149 		readop = 0;
2150 		break;
2151 	default:
2152 		/* TODO: what about BIO_DELETE??? */
2153 		biofinish(bio, NULL, EOPNOTSUPP);
2154 		mfi_enqueue_free(cm);
2155 		return (NULL);
2156 	}
2157 
2158 	/* Cheat with the sector length to avoid a non-constant division */
2159 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2160 	/* Fill the LBA and Transfer length in CDB */
2161 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2162 	    pass->cdb);
2163 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2164 	pass->header.lun_id = 0;
2165 	pass->header.timeout = 0;
2166 	pass->header.flags = 0;
2167 	pass->header.scsi_status = 0;
2168 	pass->header.sense_len = MFI_SENSE_LEN;
2169 	pass->header.data_len = bio->bio_bcount;
2170 	pass->header.cdb_len = cdb_len;
2171 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2172 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2173 	cm->cm_complete = mfi_bio_complete;
2174 	cm->cm_private = bio;
2175 	cm->cm_data = unmapped_buf;
2176 	cm->cm_len = bio->bio_bcount;
2177 	cm->cm_sg = &pass->sgl;
2178 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2179 	cm->cm_flags = flags;
2180 
2181 	return (cm);
2182 }
2183 
2184 static struct mfi_command *
mfi_build_ldio(struct mfi_softc * sc,struct bio * bio)2185 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2186 {
2187 	struct mfi_io_frame *io;
2188 	struct mfi_command *cm;
2189 	int flags;
2190 	uint32_t blkcount;
2191 	uint32_t context = 0;
2192 
2193 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2194 
2195 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2196 	    return (NULL);
2197 
2198 	/* Zero out the MFI frame */
2199 	context = cm->cm_frame->header.context;
2200 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2201 	cm->cm_frame->header.context = context;
2202 	io = &cm->cm_frame->io;
2203 	switch (bio->bio_cmd) {
2204 	case BIO_READ:
2205 		io->header.cmd = MFI_CMD_LD_READ;
2206 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2207 		break;
2208 	case BIO_WRITE:
2209 		io->header.cmd = MFI_CMD_LD_WRITE;
2210 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2211 		break;
2212 	default:
2213 		/* TODO: what about BIO_DELETE??? */
2214 		biofinish(bio, NULL, EOPNOTSUPP);
2215 		mfi_enqueue_free(cm);
2216 		return (NULL);
2217 	}
2218 
2219 	/* Cheat with the sector length to avoid a non-constant division */
2220 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2221 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2222 	io->header.timeout = 0;
2223 	io->header.flags = 0;
2224 	io->header.scsi_status = 0;
2225 	io->header.sense_len = MFI_SENSE_LEN;
2226 	io->header.data_len = blkcount;
2227 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2228 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2229 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2230 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2231 	cm->cm_complete = mfi_bio_complete;
2232 	cm->cm_private = bio;
2233 	cm->cm_data = unmapped_buf;
2234 	cm->cm_len = bio->bio_bcount;
2235 	cm->cm_sg = &io->sgl;
2236 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2237 	cm->cm_flags = flags;
2238 
2239 	return (cm);
2240 }
2241 
2242 static void
mfi_bio_complete(struct mfi_command * cm)2243 mfi_bio_complete(struct mfi_command *cm)
2244 {
2245 	struct bio *bio;
2246 	struct mfi_frame_header *hdr;
2247 	struct mfi_softc *sc;
2248 
2249 	bio = cm->cm_private;
2250 	hdr = &cm->cm_frame->header;
2251 	sc = cm->cm_sc;
2252 
2253 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2254 		bio->bio_flags |= BIO_ERROR;
2255 		bio->bio_error = EIO;
2256 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2257 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2258 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2259 	} else if (cm->cm_error != 0) {
2260 		bio->bio_flags |= BIO_ERROR;
2261 		bio->bio_error = cm->cm_error;
2262 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2263 		    cm, cm->cm_error);
2264 	}
2265 
2266 	mfi_release_command(cm);
2267 	mfi_disk_complete(bio);
2268 }
2269 
2270 void
mfi_startio(struct mfi_softc * sc)2271 mfi_startio(struct mfi_softc *sc)
2272 {
2273 	struct mfi_command *cm;
2274 	struct ccb_hdr *ccbh;
2275 
2276 	for (;;) {
2277 		/* Don't bother if we're short on resources */
2278 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2279 			break;
2280 
2281 		/* Try a command that has already been prepared */
2282 		cm = mfi_dequeue_ready(sc);
2283 
2284 		if (cm == NULL) {
2285 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2286 				cm = sc->mfi_cam_start(ccbh);
2287 		}
2288 
2289 		/* Nope, so look for work on the bioq */
2290 		if (cm == NULL)
2291 			cm = mfi_bio_command(sc);
2292 
2293 		/* No work available, so exit */
2294 		if (cm == NULL)
2295 			break;
2296 
2297 		/* Send the command to the controller */
2298 		if (mfi_mapcmd(sc, cm) != 0) {
2299 			device_printf(sc->mfi_dev, "Failed to startio\n");
2300 			mfi_requeue_ready(cm);
2301 			break;
2302 		}
2303 	}
2304 }
2305 
2306 int
mfi_mapcmd(struct mfi_softc * sc,struct mfi_command * cm)2307 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2308 {
2309 	int error, polled;
2310 
2311 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2312 
2313 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2314 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2315 		if (cm->cm_flags & MFI_CMD_CCB)
2316 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2317 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2318 			    polled);
2319 		else if (cm->cm_flags & MFI_CMD_BIO)
2320 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2321 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2322 			    polled);
2323 		else
2324 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2325 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2326 			    mfi_data_cb, cm, polled);
2327 		if (error == EINPROGRESS) {
2328 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2329 			return (0);
2330 		}
2331 	} else {
2332 		error = mfi_send_frame(sc, cm);
2333 	}
2334 
2335 	return (error);
2336 }
2337 
2338 static void
mfi_data_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2339 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2340 {
2341 	struct mfi_frame_header *hdr;
2342 	struct mfi_command *cm;
2343 	union mfi_sgl *sgl;
2344 	struct mfi_softc *sc;
2345 	int i, j, first, dir;
2346 	int sge_size, locked;
2347 
2348 	cm = (struct mfi_command *)arg;
2349 	sc = cm->cm_sc;
2350 	hdr = &cm->cm_frame->header;
2351 	sgl = cm->cm_sg;
2352 
2353 	/*
2354 	 * We need to check if we have the lock as this is async
2355 	 * callback so even though our caller mfi_mapcmd asserts
2356 	 * it has the lock, there is no guarantee that hasn't been
2357 	 * dropped if bus_dmamap_load returned prior to our
2358 	 * completion.
2359 	 */
2360 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2361 		mtx_lock(&sc->mfi_io_lock);
2362 
2363 	if (error) {
2364 		printf("error %d in callback\n", error);
2365 		cm->cm_error = error;
2366 		mfi_complete(sc, cm);
2367 		goto out;
2368 	}
2369 	/* Use IEEE sgl only for IO's on a SKINNY controller
2370 	 * For other commands on a SKINNY controller use either
2371 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2372 	 * Also calculate the total frame size based on the type
2373 	 * of SGL used.
2374 	 */
2375 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2376 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2377 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2378 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2379 		for (i = 0; i < nsegs; i++) {
2380 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2381 			sgl->sg_skinny[i].len = segs[i].ds_len;
2382 			sgl->sg_skinny[i].flag = 0;
2383 		}
2384 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2385 		sge_size = sizeof(struct mfi_sg_skinny);
2386 		hdr->sg_count = nsegs;
2387 	} else {
2388 		j = 0;
2389 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2390 			first = cm->cm_stp_len;
2391 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2392 				sgl->sg32[j].addr = segs[0].ds_addr;
2393 				sgl->sg32[j++].len = first;
2394 			} else {
2395 				sgl->sg64[j].addr = segs[0].ds_addr;
2396 				sgl->sg64[j++].len = first;
2397 			}
2398 		} else
2399 			first = 0;
2400 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2401 			for (i = 0; i < nsegs; i++) {
2402 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2403 				sgl->sg32[j++].len = segs[i].ds_len - first;
2404 				first = 0;
2405 			}
2406 		} else {
2407 			for (i = 0; i < nsegs; i++) {
2408 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2409 				sgl->sg64[j++].len = segs[i].ds_len - first;
2410 				first = 0;
2411 			}
2412 			hdr->flags |= MFI_FRAME_SGL64;
2413 		}
2414 		hdr->sg_count = j;
2415 		sge_size = sc->mfi_sge_size;
2416 	}
2417 
2418 	dir = 0;
2419 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2420 		dir |= BUS_DMASYNC_PREREAD;
2421 		hdr->flags |= MFI_FRAME_DIR_READ;
2422 	}
2423 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2424 		dir |= BUS_DMASYNC_PREWRITE;
2425 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2426 	}
2427 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2428 	cm->cm_flags |= MFI_CMD_MAPPED;
2429 
2430 	/*
2431 	 * Instead of calculating the total number of frames in the
2432 	 * compound frame, it's already assumed that there will be at
2433 	 * least 1 frame, so don't compensate for the modulo of the
2434 	 * following division.
2435 	 */
2436 	cm->cm_total_frame_size += (sge_size * nsegs);
2437 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2438 
2439 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2440 		printf("error %d in callback from mfi_send_frame\n", error);
2441 		cm->cm_error = error;
2442 		mfi_complete(sc, cm);
2443 		goto out;
2444 	}
2445 
2446 out:
2447 	/* leave the lock in the state we found it */
2448 	if (locked == 0)
2449 		mtx_unlock(&sc->mfi_io_lock);
2450 
2451 	return;
2452 }
2453 
2454 static int
mfi_send_frame(struct mfi_softc * sc,struct mfi_command * cm)2455 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2456 {
2457 	int error;
2458 
2459 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2460 
2461 	if (sc->MFA_enabled)
2462 		error = mfi_tbolt_send_frame(sc, cm);
2463 	else
2464 		error = mfi_std_send_frame(sc, cm);
2465 
2466 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2467 		mfi_remove_busy(cm);
2468 
2469 	return (error);
2470 }
2471 
2472 static int
mfi_std_send_frame(struct mfi_softc * sc,struct mfi_command * cm)2473 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2474 {
2475 	struct mfi_frame_header *hdr;
2476 	int tm = mfi_polled_cmd_timeout * 1000;
2477 
2478 	hdr = &cm->cm_frame->header;
2479 
2480 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2481 		cm->cm_timestamp = time_uptime;
2482 		mfi_enqueue_busy(cm);
2483 	} else {
2484 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2485 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2486 	}
2487 
2488 	/*
2489 	 * The bus address of the command is aligned on a 64 byte boundary,
2490 	 * leaving the least 6 bits as zero.  For whatever reason, the
2491 	 * hardware wants the address shifted right by three, leaving just
2492 	 * 3 zero bits.  These three bits are then used as a prefetching
2493 	 * hint for the hardware to predict how many frames need to be
2494 	 * fetched across the bus.  If a command has more than 8 frames
2495 	 * then the 3 bits are set to 0x7 and the firmware uses other
2496 	 * information in the command to determine the total amount to fetch.
2497 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2498 	 * is enough for both 32bit and 64bit systems.
2499 	 */
2500 	if (cm->cm_extra_frames > 7)
2501 		cm->cm_extra_frames = 7;
2502 
2503 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2504 
2505 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2506 		return (0);
2507 
2508 	/* This is a polled command, so busy-wait for it to complete. */
2509 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2510 		DELAY(1000);
2511 		tm -= 1;
2512 		if (tm <= 0)
2513 			break;
2514 	}
2515 
2516 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2517 		device_printf(sc->mfi_dev, "Frame %p timed out "
2518 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2519 		return (ETIMEDOUT);
2520 	}
2521 
2522 	return (0);
2523 }
2524 
2525 void
mfi_complete(struct mfi_softc * sc,struct mfi_command * cm)2526 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2527 {
2528 	int dir;
2529 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2530 
2531 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2532 		dir = 0;
2533 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2534 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2535 			dir |= BUS_DMASYNC_POSTREAD;
2536 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2537 			dir |= BUS_DMASYNC_POSTWRITE;
2538 
2539 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2540 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2541 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2542 	}
2543 
2544 	cm->cm_flags |= MFI_CMD_COMPLETED;
2545 
2546 	if (cm->cm_complete != NULL)
2547 		cm->cm_complete(cm);
2548 	else
2549 		wakeup(cm);
2550 }
2551 
2552 static int
mfi_abort(struct mfi_softc * sc,struct mfi_command ** cm_abort)2553 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2554 {
2555 	struct mfi_command *cm;
2556 	struct mfi_abort_frame *abort;
2557 	int i = 0, error;
2558 	uint32_t context = 0;
2559 
2560 	mtx_lock(&sc->mfi_io_lock);
2561 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2562 		mtx_unlock(&sc->mfi_io_lock);
2563 		return (EBUSY);
2564 	}
2565 
2566 	/* Zero out the MFI frame */
2567 	context = cm->cm_frame->header.context;
2568 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2569 	cm->cm_frame->header.context = context;
2570 
2571 	abort = &cm->cm_frame->abort;
2572 	abort->header.cmd = MFI_CMD_ABORT;
2573 	abort->header.flags = 0;
2574 	abort->header.scsi_status = 0;
2575 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2576 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2577 	abort->abort_mfi_addr_hi =
2578 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2579 	cm->cm_data = NULL;
2580 	cm->cm_flags = MFI_CMD_POLLED;
2581 
2582 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2583 		device_printf(sc->mfi_dev, "failed to abort command\n");
2584 	mfi_release_command(cm);
2585 
2586 	mtx_unlock(&sc->mfi_io_lock);
2587 	while (i < 5 && *cm_abort != NULL) {
2588 		tsleep(cm_abort, 0, "mfiabort",
2589 		    5 * hz);
2590 		i++;
2591 	}
2592 	if (*cm_abort != NULL) {
2593 		/* Force a complete if command didn't abort */
2594 		mtx_lock(&sc->mfi_io_lock);
2595 		(*cm_abort)->cm_complete(*cm_abort);
2596 		mtx_unlock(&sc->mfi_io_lock);
2597 	}
2598 
2599 	return (error);
2600 }
2601 
2602 int
mfi_dump_blocks(struct mfi_softc * sc,int id,uint64_t lba,void * virt,int len)2603 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2604      int len)
2605 {
2606 	struct mfi_command *cm;
2607 	struct mfi_io_frame *io;
2608 	int error;
2609 	uint32_t context = 0;
2610 
2611 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2612 		return (EBUSY);
2613 
2614 	/* Zero out the MFI frame */
2615 	context = cm->cm_frame->header.context;
2616 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2617 	cm->cm_frame->header.context = context;
2618 
2619 	io = &cm->cm_frame->io;
2620 	io->header.cmd = MFI_CMD_LD_WRITE;
2621 	io->header.target_id = id;
2622 	io->header.timeout = 0;
2623 	io->header.flags = 0;
2624 	io->header.scsi_status = 0;
2625 	io->header.sense_len = MFI_SENSE_LEN;
2626 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2627 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2628 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2629 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2630 	io->lba_lo = lba & 0xffffffff;
2631 	cm->cm_data = virt;
2632 	cm->cm_len = len;
2633 	cm->cm_sg = &io->sgl;
2634 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2635 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2636 
2637 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2638 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2639 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2640 	    BUS_DMASYNC_POSTWRITE);
2641 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2642 	mfi_release_command(cm);
2643 
2644 	return (error);
2645 }
2646 
2647 int
mfi_dump_syspd_blocks(struct mfi_softc * sc,int id,uint64_t lba,void * virt,int len)2648 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2649     int len)
2650 {
2651 	struct mfi_command *cm;
2652 	struct mfi_pass_frame *pass;
2653 	int error, readop, cdb_len;
2654 	uint32_t blkcount;
2655 
2656 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2657 		return (EBUSY);
2658 
2659 	pass = &cm->cm_frame->pass;
2660 	bzero(pass->cdb, 16);
2661 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2662 
2663 	readop = 0;
2664 	blkcount = howmany(len, MFI_SECTOR_LEN);
2665 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2666 	pass->header.target_id = id;
2667 	pass->header.timeout = 0;
2668 	pass->header.flags = 0;
2669 	pass->header.scsi_status = 0;
2670 	pass->header.sense_len = MFI_SENSE_LEN;
2671 	pass->header.data_len = len;
2672 	pass->header.cdb_len = cdb_len;
2673 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2674 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2675 	cm->cm_data = virt;
2676 	cm->cm_len = len;
2677 	cm->cm_sg = &pass->sgl;
2678 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2679 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2680 
2681 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2682 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2683 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2684 	    BUS_DMASYNC_POSTWRITE);
2685 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2686 	mfi_release_command(cm);
2687 
2688 	return (error);
2689 }
2690 
2691 static int
mfi_open(struct cdev * dev,int flags,int fmt,struct thread * td)2692 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2693 {
2694 	struct mfi_softc *sc;
2695 	int error;
2696 
2697 	sc = dev->si_drv1;
2698 
2699 	mtx_lock(&sc->mfi_io_lock);
2700 	if (sc->mfi_detaching)
2701 		error = ENXIO;
2702 	else {
2703 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2704 		error = 0;
2705 	}
2706 	mtx_unlock(&sc->mfi_io_lock);
2707 
2708 	return (error);
2709 }
2710 
2711 static int
mfi_close(struct cdev * dev,int flags,int fmt,struct thread * td)2712 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2713 {
2714 	struct mfi_softc *sc;
2715 	struct mfi_aen *mfi_aen_entry, *tmp;
2716 
2717 	sc = dev->si_drv1;
2718 
2719 	mtx_lock(&sc->mfi_io_lock);
2720 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2721 
2722 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2723 		if (mfi_aen_entry->p == curproc) {
2724 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2725 			    aen_link);
2726 			free(mfi_aen_entry, M_MFIBUF);
2727 		}
2728 	}
2729 	mtx_unlock(&sc->mfi_io_lock);
2730 	return (0);
2731 }
2732 
2733 static int
mfi_config_lock(struct mfi_softc * sc,uint32_t opcode)2734 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2735 {
2736 
2737 	switch (opcode) {
2738 	case MFI_DCMD_LD_DELETE:
2739 	case MFI_DCMD_CFG_ADD:
2740 	case MFI_DCMD_CFG_CLEAR:
2741 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2742 		sx_xlock(&sc->mfi_config_lock);
2743 		return (1);
2744 	default:
2745 		return (0);
2746 	}
2747 }
2748 
2749 static void
mfi_config_unlock(struct mfi_softc * sc,int locked)2750 mfi_config_unlock(struct mfi_softc *sc, int locked)
2751 {
2752 
2753 	if (locked)
2754 		sx_xunlock(&sc->mfi_config_lock);
2755 }
2756 
2757 /*
2758  * Perform pre-issue checks on commands from userland and possibly veto
2759  * them.
2760  */
2761 static int
mfi_check_command_pre(struct mfi_softc * sc,struct mfi_command * cm)2762 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2763 {
2764 	struct mfi_disk *ld, *ld2;
2765 	int error;
2766 	struct mfi_system_pd *syspd = NULL;
2767 	uint16_t syspd_id;
2768 	uint16_t *mbox;
2769 
2770 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2771 	error = 0;
2772 	switch (cm->cm_frame->dcmd.opcode) {
2773 	case MFI_DCMD_LD_DELETE:
2774 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2775 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2776 				break;
2777 		}
2778 		if (ld == NULL)
2779 			error = ENOENT;
2780 		else
2781 			error = mfi_disk_disable(ld);
2782 		break;
2783 	case MFI_DCMD_CFG_CLEAR:
2784 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2785 			error = mfi_disk_disable(ld);
2786 			if (error)
2787 				break;
2788 		}
2789 		if (error) {
2790 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2791 				if (ld2 == ld)
2792 					break;
2793 				mfi_disk_enable(ld2);
2794 			}
2795 		}
2796 		break;
2797 	case MFI_DCMD_PD_STATE_SET:
2798 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2799 		syspd_id = mbox[0];
2800 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2801 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2802 				if (syspd->pd_id == syspd_id)
2803 					break;
2804 			}
2805 		}
2806 		else
2807 			break;
2808 		if (syspd)
2809 			error = mfi_syspd_disable(syspd);
2810 		break;
2811 	default:
2812 		break;
2813 	}
2814 	return (error);
2815 }
2816 
2817 /* Perform post-issue checks on commands from userland. */
2818 static void
mfi_check_command_post(struct mfi_softc * sc,struct mfi_command * cm)2819 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2820 {
2821 	struct mfi_disk *ld, *ldn;
2822 	struct mfi_system_pd *syspd = NULL;
2823 	uint16_t syspd_id;
2824 	uint16_t *mbox;
2825 
2826 	switch (cm->cm_frame->dcmd.opcode) {
2827 	case MFI_DCMD_LD_DELETE:
2828 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2829 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2830 				break;
2831 		}
2832 		KASSERT(ld != NULL, ("volume disappeared"));
2833 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2834 			mtx_unlock(&sc->mfi_io_lock);
2835 			bus_topo_lock();
2836 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2837 			bus_topo_unlock();
2838 			mtx_lock(&sc->mfi_io_lock);
2839 		} else
2840 			mfi_disk_enable(ld);
2841 		break;
2842 	case MFI_DCMD_CFG_CLEAR:
2843 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2844 			mtx_unlock(&sc->mfi_io_lock);
2845 			bus_topo_lock();
2846 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2847 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2848 			}
2849 			bus_topo_unlock();
2850 			mtx_lock(&sc->mfi_io_lock);
2851 		} else {
2852 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2853 				mfi_disk_enable(ld);
2854 		}
2855 		break;
2856 	case MFI_DCMD_CFG_ADD:
2857 		mfi_ldprobe(sc);
2858 		break;
2859 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2860 		mfi_ldprobe(sc);
2861 		break;
2862 	case MFI_DCMD_PD_STATE_SET:
2863 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2864 		syspd_id = mbox[0];
2865 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2866 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2867 				if (syspd->pd_id == syspd_id)
2868 					break;
2869 			}
2870 		}
2871 		else
2872 			break;
2873 		/* If the transition fails then enable the syspd again */
2874 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2875 			mfi_syspd_enable(syspd);
2876 		break;
2877 	}
2878 }
2879 
2880 static int
mfi_check_for_sscd(struct mfi_softc * sc,struct mfi_command * cm)2881 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2882 {
2883 	struct mfi_config_data *conf_data;
2884 	struct mfi_command *ld_cm = NULL;
2885 	struct mfi_ld_info *ld_info = NULL;
2886 	struct mfi_ld_config *ld;
2887 	char *p;
2888 	int error = 0;
2889 
2890 	conf_data = (struct mfi_config_data *)cm->cm_data;
2891 
2892 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2893 		p = (char *)conf_data->array;
2894 		p += conf_data->array_size * conf_data->array_count;
2895 		ld = (struct mfi_ld_config *)p;
2896 		if (ld->params.isSSCD == 1)
2897 			error = 1;
2898 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2899 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2900 		    (void **)&ld_info, sizeof(*ld_info));
2901 		if (error) {
2902 			device_printf(sc->mfi_dev, "Failed to allocate"
2903 			    "MFI_DCMD_LD_GET_INFO %d", error);
2904 			if (ld_info)
2905 				free(ld_info, M_MFIBUF);
2906 			return 0;
2907 		}
2908 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2909 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2910 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2911 		if (mfi_wait_command(sc, ld_cm) != 0) {
2912 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2913 			mfi_release_command(ld_cm);
2914 			free(ld_info, M_MFIBUF);
2915 			return 0;
2916 		}
2917 
2918 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2919 			free(ld_info, M_MFIBUF);
2920 			mfi_release_command(ld_cm);
2921 			return 0;
2922 		}
2923 		else
2924 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2925 
2926 		if (ld_info->ld_config.params.isSSCD == 1)
2927 			error = 1;
2928 
2929 		mfi_release_command(ld_cm);
2930 		free(ld_info, M_MFIBUF);
2931 	}
2932 	return error;
2933 }
2934 
2935 static int
mfi_stp_cmd(struct mfi_softc * sc,struct mfi_command * cm,caddr_t arg)2936 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2937 {
2938 	uint8_t i;
2939 	struct mfi_ioc_packet *ioc;
2940 	ioc = (struct mfi_ioc_packet *)arg;
2941 	int sge_size, error;
2942 	struct megasas_sge *kern_sge;
2943 
2944 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2945 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2946 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2947 
2948 	if (sizeof(bus_addr_t) == 8) {
2949 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2950 		cm->cm_extra_frames = 2;
2951 		sge_size = sizeof(struct mfi_sg64);
2952 	} else {
2953 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2954 		sge_size = sizeof(struct mfi_sg32);
2955 	}
2956 
2957 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2958 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2959 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2960 			1, 0,			/* algnmnt, boundary */
2961 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2962 			BUS_SPACE_MAXADDR,	/* highaddr */
2963 			NULL, NULL,		/* filter, filterarg */
2964 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2965 			2,			/* nsegments */
2966 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2967 			BUS_DMA_ALLOCNOW,	/* flags */
2968 			NULL, NULL,		/* lockfunc, lockarg */
2969 			&sc->mfi_kbuff_arr_dmat[i])) {
2970 			device_printf(sc->mfi_dev,
2971 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2972 			return (ENOMEM);
2973 		}
2974 
2975 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2976 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2977 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2978 			device_printf(sc->mfi_dev,
2979 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2980 			return (ENOMEM);
2981 		}
2982 
2983 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2984 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2985 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2986 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2987 
2988 		if (!sc->kbuff_arr[i]) {
2989 			device_printf(sc->mfi_dev,
2990 			    "Could not allocate memory for kbuff_arr info\n");
2991 			return -1;
2992 		}
2993 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2994 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2995 
2996 		if (sizeof(bus_addr_t) == 8) {
2997 			cm->cm_frame->stp.sgl.sg64[i].addr =
2998 			    kern_sge[i].phys_addr;
2999 			cm->cm_frame->stp.sgl.sg64[i].len =
3000 			    ioc->mfi_sgl[i].iov_len;
3001 		} else {
3002 			cm->cm_frame->stp.sgl.sg32[i].addr =
3003 			    kern_sge[i].phys_addr;
3004 			cm->cm_frame->stp.sgl.sg32[i].len =
3005 			    ioc->mfi_sgl[i].iov_len;
3006 		}
3007 
3008 		error = copyin(ioc->mfi_sgl[i].iov_base,
3009 		    sc->kbuff_arr[i],
3010 		    ioc->mfi_sgl[i].iov_len);
3011 		if (error != 0) {
3012 			device_printf(sc->mfi_dev, "Copy in failed\n");
3013 			return error;
3014 		}
3015 	}
3016 
3017 	cm->cm_flags |=MFI_CMD_MAPPED;
3018 	return 0;
3019 }
3020 
3021 static int
mfi_user_command(struct mfi_softc * sc,struct mfi_ioc_passthru * ioc)3022 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3023 {
3024 	struct mfi_command *cm;
3025 	struct mfi_dcmd_frame *dcmd;
3026 	void *ioc_buf = NULL;
3027 	uint32_t context;
3028 	int error = 0, locked;
3029 
3030 	if (ioc->buf_size > 0) {
3031 		if (ioc->buf_size > 1024 * 1024)
3032 			return (ENOMEM);
3033 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3034 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3035 		if (error) {
3036 			device_printf(sc->mfi_dev, "failed to copyin\n");
3037 			free(ioc_buf, M_MFIBUF);
3038 			return (error);
3039 		}
3040 	}
3041 
3042 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3043 
3044 	mtx_lock(&sc->mfi_io_lock);
3045 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3046 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3047 
3048 	/* Save context for later */
3049 	context = cm->cm_frame->header.context;
3050 
3051 	dcmd = &cm->cm_frame->dcmd;
3052 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3053 
3054 	cm->cm_sg = &dcmd->sgl;
3055 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3056 	cm->cm_data = ioc_buf;
3057 	cm->cm_len = ioc->buf_size;
3058 
3059 	/* restore context */
3060 	cm->cm_frame->header.context = context;
3061 
3062 	/* Cheat since we don't know if we're writing or reading */
3063 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3064 
3065 	error = mfi_check_command_pre(sc, cm);
3066 	if (error)
3067 		goto out;
3068 
3069 	error = mfi_wait_command(sc, cm);
3070 	if (error) {
3071 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3072 		goto out;
3073 	}
3074 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3075 	mfi_check_command_post(sc, cm);
3076 out:
3077 	mfi_release_command(cm);
3078 	mtx_unlock(&sc->mfi_io_lock);
3079 	mfi_config_unlock(sc, locked);
3080 	if (ioc->buf_size > 0)
3081 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3082 	if (ioc_buf)
3083 		free(ioc_buf, M_MFIBUF);
3084 	return (error);
3085 }
3086 
3087 static int
mfi_ioctl(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)3088 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3089 {
3090 	struct mfi_softc *sc;
3091 	union mfi_statrequest *ms;
3092 	struct mfi_ioc_packet *ioc;
3093 #ifdef COMPAT_FREEBSD32
3094 	struct mfi_ioc_packet32 *ioc32;
3095 #endif
3096 	struct mfi_ioc_aen *aen;
3097 	struct mfi_command *cm = NULL;
3098 	uint32_t context = 0;
3099 	union mfi_sense_ptr sense_ptr;
3100 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3101 	size_t len;
3102 	int i, res;
3103 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3104 #ifdef COMPAT_FREEBSD32
3105 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3106 	struct mfi_ioc_passthru iop_swab;
3107 #endif
3108 	int error, locked;
3109 	sc = dev->si_drv1;
3110 	error = 0;
3111 
3112 	if (sc->adpreset)
3113 		return EBUSY;
3114 
3115 	if (sc->hw_crit_error)
3116 		return EBUSY;
3117 
3118 	if (sc->issuepend_done == 0)
3119 		return EBUSY;
3120 
3121 	switch (cmd) {
3122 	case MFIIO_STATS:
3123 		ms = (union mfi_statrequest *)arg;
3124 		switch (ms->ms_item) {
3125 		case MFIQ_FREE:
3126 		case MFIQ_BIO:
3127 		case MFIQ_READY:
3128 		case MFIQ_BUSY:
3129 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3130 			    sizeof(struct mfi_qstat));
3131 			break;
3132 		default:
3133 			error = ENOIOCTL;
3134 			break;
3135 		}
3136 		break;
3137 	case MFIIO_QUERY_DISK:
3138 	{
3139 		struct mfi_query_disk *qd;
3140 		struct mfi_disk *ld;
3141 
3142 		qd = (struct mfi_query_disk *)arg;
3143 		mtx_lock(&sc->mfi_io_lock);
3144 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3145 			if (ld->ld_id == qd->array_id)
3146 				break;
3147 		}
3148 		if (ld == NULL) {
3149 			qd->present = 0;
3150 			mtx_unlock(&sc->mfi_io_lock);
3151 			return (0);
3152 		}
3153 		qd->present = 1;
3154 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3155 			qd->open = 1;
3156 		bzero(qd->devname, SPECNAMELEN + 1);
3157 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3158 		mtx_unlock(&sc->mfi_io_lock);
3159 		break;
3160 	}
3161 	case MFI_CMD:
3162 #ifdef COMPAT_FREEBSD32
3163 	case MFI_CMD32:
3164 #endif
3165 		{
3166 		devclass_t devclass;
3167 		ioc = (struct mfi_ioc_packet *)arg;
3168 		int adapter;
3169 
3170 		adapter = ioc->mfi_adapter_no;
3171 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3172 			devclass = devclass_find("mfi");
3173 			sc = devclass_get_softc(devclass, adapter);
3174 		}
3175 		mtx_lock(&sc->mfi_io_lock);
3176 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3177 			mtx_unlock(&sc->mfi_io_lock);
3178 			return (EBUSY);
3179 		}
3180 		mtx_unlock(&sc->mfi_io_lock);
3181 		locked = 0;
3182 
3183 		/*
3184 		 * save off original context since copying from user
3185 		 * will clobber some data
3186 		 */
3187 		context = cm->cm_frame->header.context;
3188 		cm->cm_frame->header.context = cm->cm_index;
3189 
3190 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3191 		    2 * MEGAMFI_FRAME_SIZE);
3192 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3193 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3194 		cm->cm_frame->header.scsi_status = 0;
3195 		cm->cm_frame->header.pad0 = 0;
3196 		if (ioc->mfi_sge_count) {
3197 			cm->cm_sg =
3198 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3199 		}
3200 		cm->cm_flags = 0;
3201 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3202 			cm->cm_flags |= MFI_CMD_DATAIN;
3203 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3204 			cm->cm_flags |= MFI_CMD_DATAOUT;
3205 		/* Legacy app shim */
3206 		if (cm->cm_flags == 0)
3207 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3208 		cm->cm_len = cm->cm_frame->header.data_len;
3209 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3210 #ifdef COMPAT_FREEBSD32
3211 			if (cmd == MFI_CMD) {
3212 #endif
3213 				/* Native */
3214 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3215 #ifdef COMPAT_FREEBSD32
3216 			} else {
3217 				/* 32bit on 64bit */
3218 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3219 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3220 			}
3221 #endif
3222 			cm->cm_len += cm->cm_stp_len;
3223 		}
3224 		if (cm->cm_len &&
3225 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3226 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3227 			    M_WAITOK | M_ZERO);
3228 		} else {
3229 			cm->cm_data = 0;
3230 		}
3231 
3232 		/* restore header context */
3233 		cm->cm_frame->header.context = context;
3234 
3235 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3236 			res = mfi_stp_cmd(sc, cm, arg);
3237 			if (res != 0)
3238 				goto out;
3239 		} else {
3240 			temp = data;
3241 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3242 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3243 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3244 #ifdef COMPAT_FREEBSD32
3245 					if (cmd == MFI_CMD) {
3246 #endif
3247 						/* Native */
3248 						addr = ioc->mfi_sgl[i].iov_base;
3249 						len = ioc->mfi_sgl[i].iov_len;
3250 #ifdef COMPAT_FREEBSD32
3251 					} else {
3252 						/* 32bit on 64bit */
3253 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3254 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3255 						len = ioc32->mfi_sgl[i].iov_len;
3256 					}
3257 #endif
3258 					error = copyin(addr, temp, len);
3259 					if (error != 0) {
3260 						device_printf(sc->mfi_dev,
3261 						    "Copy in failed\n");
3262 						goto out;
3263 					}
3264 					temp = &temp[len];
3265 				}
3266 			}
3267 		}
3268 
3269 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3270 			locked = mfi_config_lock(sc,
3271 			     cm->cm_frame->dcmd.opcode);
3272 
3273 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3274 			cm->cm_frame->pass.sense_addr_lo =
3275 			    (uint32_t)cm->cm_sense_busaddr;
3276 			cm->cm_frame->pass.sense_addr_hi =
3277 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3278 		}
3279 		mtx_lock(&sc->mfi_io_lock);
3280 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3281 		if (!skip_pre_post) {
3282 			error = mfi_check_command_pre(sc, cm);
3283 			if (error) {
3284 				mtx_unlock(&sc->mfi_io_lock);
3285 				goto out;
3286 			}
3287 		}
3288 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3289 			device_printf(sc->mfi_dev,
3290 			    "Controller polled failed\n");
3291 			mtx_unlock(&sc->mfi_io_lock);
3292 			goto out;
3293 		}
3294 		if (!skip_pre_post) {
3295 			mfi_check_command_post(sc, cm);
3296 		}
3297 		mtx_unlock(&sc->mfi_io_lock);
3298 
3299 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3300 			temp = data;
3301 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3302 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3303 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3304 #ifdef COMPAT_FREEBSD32
3305 					if (cmd == MFI_CMD) {
3306 #endif
3307 						/* Native */
3308 						addr = ioc->mfi_sgl[i].iov_base;
3309 						len = ioc->mfi_sgl[i].iov_len;
3310 #ifdef COMPAT_FREEBSD32
3311 					} else {
3312 						/* 32bit on 64bit */
3313 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3314 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3315 						len = ioc32->mfi_sgl[i].iov_len;
3316 					}
3317 #endif
3318 					error = copyout(temp, addr, len);
3319 					if (error != 0) {
3320 						device_printf(sc->mfi_dev,
3321 						    "Copy out failed\n");
3322 						goto out;
3323 					}
3324 					temp = &temp[len];
3325 				}
3326 			}
3327 		}
3328 
3329 		if (ioc->mfi_sense_len) {
3330 			/* get user-space sense ptr then copy out sense */
3331 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3332 			    &sense_ptr.sense_ptr_data[0],
3333 			    sizeof(sense_ptr.sense_ptr_data));
3334 #ifdef COMPAT_FREEBSD32
3335 			if (cmd != MFI_CMD) {
3336 				/*
3337 				 * not 64bit native so zero out any address
3338 				 * over 32bit */
3339 				sense_ptr.addr.high = 0;
3340 			}
3341 #endif
3342 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3343 			    ioc->mfi_sense_len);
3344 			if (error != 0) {
3345 				device_printf(sc->mfi_dev,
3346 				    "Copy out failed\n");
3347 				goto out;
3348 			}
3349 		}
3350 
3351 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3352 out:
3353 		mfi_config_unlock(sc, locked);
3354 		if (data)
3355 			free(data, M_MFIBUF);
3356 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3357 			for (i = 0; i < 2; i++) {
3358 				if (sc->kbuff_arr[i]) {
3359 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3360 						bus_dmamap_unload(
3361 						    sc->mfi_kbuff_arr_dmat[i],
3362 						    sc->mfi_kbuff_arr_dmamap[i]
3363 						    );
3364 					if (sc->kbuff_arr[i] != NULL)
3365 						bus_dmamem_free(
3366 						    sc->mfi_kbuff_arr_dmat[i],
3367 						    sc->kbuff_arr[i],
3368 						    sc->mfi_kbuff_arr_dmamap[i]
3369 						    );
3370 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3371 						bus_dma_tag_destroy(
3372 						    sc->mfi_kbuff_arr_dmat[i]);
3373 				}
3374 			}
3375 		}
3376 		if (cm) {
3377 			mtx_lock(&sc->mfi_io_lock);
3378 			mfi_release_command(cm);
3379 			mtx_unlock(&sc->mfi_io_lock);
3380 		}
3381 
3382 		break;
3383 		}
3384 	case MFI_SET_AEN:
3385 		aen = (struct mfi_ioc_aen *)arg;
3386 		mtx_lock(&sc->mfi_io_lock);
3387 		error = mfi_aen_register(sc, aen->aen_seq_num,
3388 		    aen->aen_class_locale);
3389 		mtx_unlock(&sc->mfi_io_lock);
3390 
3391 		break;
3392 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3393 		{
3394 			devclass_t devclass;
3395 			struct mfi_linux_ioc_packet l_ioc;
3396 			int adapter;
3397 
3398 			devclass = devclass_find("mfi");
3399 			if (devclass == NULL)
3400 				return (ENOENT);
3401 
3402 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3403 			if (error)
3404 				return (error);
3405 			adapter = l_ioc.lioc_adapter_no;
3406 			sc = devclass_get_softc(devclass, adapter);
3407 			if (sc == NULL)
3408 				return (ENOENT);
3409 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3410 			    cmd, arg, flag, td));
3411 			break;
3412 		}
3413 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3414 		{
3415 			devclass_t devclass;
3416 			struct mfi_linux_ioc_aen l_aen;
3417 			int adapter;
3418 
3419 			devclass = devclass_find("mfi");
3420 			if (devclass == NULL)
3421 				return (ENOENT);
3422 
3423 			error = copyin(arg, &l_aen, sizeof(l_aen));
3424 			if (error)
3425 				return (error);
3426 			adapter = l_aen.laen_adapter_no;
3427 			sc = devclass_get_softc(devclass, adapter);
3428 			if (sc == NULL)
3429 				return (ENOENT);
3430 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3431 			    cmd, arg, flag, td));
3432 			break;
3433 		}
3434 #ifdef COMPAT_FREEBSD32
3435 	case MFIIO_PASSTHRU32:
3436 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3437 			error = ENOTTY;
3438 			break;
3439 		}
3440 		iop_swab.ioc_frame	= iop32->ioc_frame;
3441 		iop_swab.buf_size	= iop32->buf_size;
3442 		iop_swab.buf		= PTRIN(iop32->buf);
3443 		iop			= &iop_swab;
3444 		/* FALLTHROUGH */
3445 #endif
3446 	case MFIIO_PASSTHRU:
3447 		error = mfi_user_command(sc, iop);
3448 #ifdef COMPAT_FREEBSD32
3449 		if (cmd == MFIIO_PASSTHRU32)
3450 			iop32->ioc_frame = iop_swab.ioc_frame;
3451 #endif
3452 		break;
3453 	default:
3454 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3455 		error = ENOTTY;
3456 		break;
3457 	}
3458 
3459 	return (error);
3460 }
3461 
3462 static int
mfi_linux_ioctl_int(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)3463 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3464 {
3465 	struct mfi_softc *sc;
3466 	struct mfi_linux_ioc_packet l_ioc;
3467 	struct mfi_linux_ioc_aen l_aen;
3468 	struct mfi_command *cm = NULL;
3469 	struct mfi_aen *mfi_aen_entry;
3470 	union mfi_sense_ptr sense_ptr;
3471 	uint32_t context = 0;
3472 	uint8_t *data = NULL, *temp;
3473 	int i;
3474 	int error, locked;
3475 
3476 	sc = dev->si_drv1;
3477 	error = 0;
3478 	switch (cmd) {
3479 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3480 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3481 		if (error != 0)
3482 			return (error);
3483 
3484 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3485 			return (EINVAL);
3486 		}
3487 
3488 		mtx_lock(&sc->mfi_io_lock);
3489 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3490 			mtx_unlock(&sc->mfi_io_lock);
3491 			return (EBUSY);
3492 		}
3493 		mtx_unlock(&sc->mfi_io_lock);
3494 		locked = 0;
3495 
3496 		/*
3497 		 * save off original context since copying from user
3498 		 * will clobber some data
3499 		 */
3500 		context = cm->cm_frame->header.context;
3501 
3502 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3503 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3504 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3505 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3506 		cm->cm_frame->header.scsi_status = 0;
3507 		cm->cm_frame->header.pad0 = 0;
3508 		if (l_ioc.lioc_sge_count)
3509 			cm->cm_sg =
3510 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3511 		cm->cm_flags = 0;
3512 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3513 			cm->cm_flags |= MFI_CMD_DATAIN;
3514 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3515 			cm->cm_flags |= MFI_CMD_DATAOUT;
3516 		cm->cm_len = cm->cm_frame->header.data_len;
3517 		if (cm->cm_len &&
3518 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3519 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3520 			    M_WAITOK | M_ZERO);
3521 		} else {
3522 			cm->cm_data = 0;
3523 		}
3524 
3525 		/* restore header context */
3526 		cm->cm_frame->header.context = context;
3527 
3528 		temp = data;
3529 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3530 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3531 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3532 				       temp,
3533 				       l_ioc.lioc_sgl[i].iov_len);
3534 				if (error != 0) {
3535 					device_printf(sc->mfi_dev,
3536 					    "Copy in failed\n");
3537 					goto out;
3538 				}
3539 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3540 			}
3541 		}
3542 
3543 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3544 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3545 
3546 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3547 			cm->cm_frame->pass.sense_addr_lo =
3548 			    (uint32_t)cm->cm_sense_busaddr;
3549 			cm->cm_frame->pass.sense_addr_hi =
3550 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3551 		}
3552 
3553 		mtx_lock(&sc->mfi_io_lock);
3554 		error = mfi_check_command_pre(sc, cm);
3555 		if (error) {
3556 			mtx_unlock(&sc->mfi_io_lock);
3557 			goto out;
3558 		}
3559 
3560 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3561 			device_printf(sc->mfi_dev,
3562 			    "Controller polled failed\n");
3563 			mtx_unlock(&sc->mfi_io_lock);
3564 			goto out;
3565 		}
3566 
3567 		mfi_check_command_post(sc, cm);
3568 		mtx_unlock(&sc->mfi_io_lock);
3569 
3570 		temp = data;
3571 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3572 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3573 				error = copyout(temp,
3574 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3575 					l_ioc.lioc_sgl[i].iov_len);
3576 				if (error != 0) {
3577 					device_printf(sc->mfi_dev,
3578 					    "Copy out failed\n");
3579 					goto out;
3580 				}
3581 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3582 			}
3583 		}
3584 
3585 		if (l_ioc.lioc_sense_len) {
3586 			/* get user-space sense ptr then copy out sense */
3587 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3588                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3589 			    &sense_ptr.sense_ptr_data[0],
3590 			    sizeof(sense_ptr.sense_ptr_data));
3591 #ifdef __amd64__
3592 			/*
3593 			 * only 32bit Linux support so zero out any
3594 			 * address over 32bit
3595 			 */
3596 			sense_ptr.addr.high = 0;
3597 #endif
3598 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3599 			    l_ioc.lioc_sense_len);
3600 			if (error != 0) {
3601 				device_printf(sc->mfi_dev,
3602 				    "Copy out failed\n");
3603 				goto out;
3604 			}
3605 		}
3606 
3607 		error = copyout(&cm->cm_frame->header.cmd_status,
3608 			&((struct mfi_linux_ioc_packet*)arg)
3609 			->lioc_frame.hdr.cmd_status,
3610 			1);
3611 		if (error != 0) {
3612 			device_printf(sc->mfi_dev,
3613 				      "Copy out failed\n");
3614 			goto out;
3615 		}
3616 
3617 out:
3618 		mfi_config_unlock(sc, locked);
3619 		if (data)
3620 			free(data, M_MFIBUF);
3621 		if (cm) {
3622 			mtx_lock(&sc->mfi_io_lock);
3623 			mfi_release_command(cm);
3624 			mtx_unlock(&sc->mfi_io_lock);
3625 		}
3626 
3627 		return (error);
3628 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3629 		error = copyin(arg, &l_aen, sizeof(l_aen));
3630 		if (error != 0)
3631 			return (error);
3632 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3633 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3634 		    M_WAITOK);
3635 		mtx_lock(&sc->mfi_io_lock);
3636 		mfi_aen_entry->p = curproc;
3637 		TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, aen_link);
3638 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3639 		    l_aen.laen_class_locale);
3640 
3641 		if (error != 0) {
3642 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3643 			    aen_link);
3644 			free(mfi_aen_entry, M_MFIBUF);
3645 		}
3646 		mtx_unlock(&sc->mfi_io_lock);
3647 
3648 		return (error);
3649 	default:
3650 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3651 		error = ENOENT;
3652 		break;
3653 	}
3654 
3655 	return (error);
3656 }
3657 
3658 static int
mfi_poll(struct cdev * dev,int poll_events,struct thread * td)3659 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3660 {
3661 	struct mfi_softc *sc;
3662 	int revents = 0;
3663 
3664 	sc = dev->si_drv1;
3665 
3666 	if (poll_events & (POLLIN | POLLRDNORM)) {
3667 		if (sc->mfi_aen_triggered != 0) {
3668 			revents |= poll_events & (POLLIN | POLLRDNORM);
3669 			sc->mfi_aen_triggered = 0;
3670 		}
3671 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3672 			revents |= POLLERR;
3673 		}
3674 	}
3675 
3676 	if (revents == 0) {
3677 		if (poll_events & (POLLIN | POLLRDNORM)) {
3678 			sc->mfi_poll_waiting = 1;
3679 			selrecord(td, &sc->mfi_select);
3680 		}
3681 	}
3682 
3683 	return revents;
3684 }
3685 
3686 static void
mfi_dump_all(void)3687 mfi_dump_all(void)
3688 {
3689 	struct mfi_softc *sc;
3690 	struct mfi_command *cm;
3691 	devclass_t dc;
3692 	time_t deadline;
3693 	int timedout __unused;
3694 	int i;
3695 
3696 	dc = devclass_find("mfi");
3697 	if (dc == NULL) {
3698 		printf("No mfi dev class\n");
3699 		return;
3700 	}
3701 
3702 	for (i = 0; ; i++) {
3703 		sc = devclass_get_softc(dc, i);
3704 		if (sc == NULL)
3705 			break;
3706 		device_printf(sc->mfi_dev, "Dumping\n\n");
3707 		timedout = 0;
3708 		deadline = time_uptime - mfi_cmd_timeout;
3709 		mtx_lock(&sc->mfi_io_lock);
3710 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3711 			if (cm->cm_timestamp <= deadline) {
3712 				device_printf(sc->mfi_dev,
3713 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3714 				    cm, (int)(time_uptime - cm->cm_timestamp));
3715 				MFI_PRINT_CMD(cm);
3716 				timedout++;
3717 			}
3718 		}
3719 
3720 #if 0
3721 		if (timedout)
3722 			MFI_DUMP_CMDS(sc);
3723 #endif
3724 
3725 		mtx_unlock(&sc->mfi_io_lock);
3726 	}
3727 
3728 	return;
3729 }
3730 
3731 static void
mfi_timeout(void * data)3732 mfi_timeout(void *data)
3733 {
3734 	struct mfi_softc *sc = (struct mfi_softc *)data;
3735 	struct mfi_command *cm, *tmp;
3736 	time_t deadline;
3737 	int timedout __unused = 0;
3738 
3739 	deadline = time_uptime - mfi_cmd_timeout;
3740 	if (sc->adpreset == 0) {
3741 		if (!mfi_tbolt_reset(sc)) {
3742 			callout_reset(&sc->mfi_watchdog_callout,
3743 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3744 			return;
3745 		}
3746 	}
3747 	mtx_lock(&sc->mfi_io_lock);
3748 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3749 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3750 			continue;
3751 		if (cm->cm_timestamp <= deadline) {
3752 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3753 				cm->cm_timestamp = time_uptime;
3754 			} else {
3755 				device_printf(sc->mfi_dev,
3756 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3757 				     cm, (int)(time_uptime - cm->cm_timestamp)
3758 				     );
3759 				MFI_PRINT_CMD(cm);
3760 				MFI_VALIDATE_CMD(sc, cm);
3761 				/*
3762 				 * While commands can get stuck forever we do
3763 				 * not fail them as there is no way to tell if
3764 				 * the controller has actually processed them
3765 				 * or not.
3766 				 *
3767 				 * In addition its very likely that force
3768 				 * failing a command here would cause a panic
3769 				 * e.g. in UFS.
3770 				 */
3771 				timedout++;
3772 			}
3773 		}
3774 	}
3775 
3776 #if 0
3777 	if (timedout)
3778 		MFI_DUMP_CMDS(sc);
3779 #endif
3780 
3781 	mtx_unlock(&sc->mfi_io_lock);
3782 
3783 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3784 	    mfi_timeout, sc);
3785 
3786 	if (0)
3787 		mfi_dump_all();
3788 	return;
3789 }
3790