1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006 IronPort Systems
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*-
29 * Copyright (c) 2007 LSI Corp.
30 * Copyright (c) 2007 Rajesh Prabhakaran.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 #include "opt_mfi.h"
57
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/poll.h>
64 #include <sys/selinfo.h>
65 #include <sys/bus.h>
66 #include <sys/conf.h>
67 #include <sys/eventhandler.h>
68 #include <sys/rman.h>
69 #include <sys/bio.h>
70 #include <sys/ioccom.h>
71 #include <sys/uio.h>
72 #include <sys/proc.h>
73 #include <sys/signalvar.h>
74 #include <sys/sysent.h>
75 #include <sys/taskqueue.h>
76
77 #include <machine/bus.h>
78 #include <machine/resource.h>
79
80 #include <dev/mfi/mfireg.h>
81 #include <dev/mfi/mfi_ioctl.h>
82 #include <dev/mfi/mfivar.h>
83 #include <sys/interrupt.h>
84 #include <sys/priority.h>
85
86 static int mfi_alloc_commands(struct mfi_softc *);
87 static int mfi_comms_init(struct mfi_softc *);
88 static int mfi_get_controller_info(struct mfi_softc *);
89 static int mfi_get_log_state(struct mfi_softc *,
90 struct mfi_evt_log_state **);
91 static int mfi_parse_entries(struct mfi_softc *, int, int);
92 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
93 static void mfi_startup(void *arg);
94 static void mfi_intr(void *arg);
95 static void mfi_ldprobe(struct mfi_softc *sc);
96 static void mfi_syspdprobe(struct mfi_softc *sc);
97 static void mfi_handle_evt(void *context, int pending);
98 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
99 static void mfi_aen_complete(struct mfi_command *);
100 static int mfi_add_ld(struct mfi_softc *sc, int);
101 static void mfi_add_ld_complete(struct mfi_command *);
102 static int mfi_add_sys_pd(struct mfi_softc *sc, int);
103 static void mfi_add_sys_pd_complete(struct mfi_command *);
104 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
105 static void mfi_bio_complete(struct mfi_command *);
106 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
107 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
108 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
109 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
110 static int mfi_abort(struct mfi_softc *, struct mfi_command **);
111 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
112 static void mfi_timeout(void *);
113 static int mfi_user_command(struct mfi_softc *,
114 struct mfi_ioc_passthru *);
115 static void mfi_enable_intr_xscale(struct mfi_softc *sc);
116 static void mfi_enable_intr_ppc(struct mfi_softc *sc);
117 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
118 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
119 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
120 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
121 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
122 uint32_t frame_cnt);
123 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
124 uint32_t frame_cnt);
125 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
126 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
127 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
128 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
129 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
130
131 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
132 "MFI driver parameters");
133 static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
135 0, "event message locale");
136
137 static int mfi_event_class = MFI_EVT_CLASS_INFO;
138 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
139 0, "event message class");
140
141 static int mfi_max_cmds = 128;
142 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
143 0, "Max commands limit (-1 = controller limit)");
144
145 static int mfi_detect_jbod_change = 1;
146 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
147 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
148
149 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
150 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
151 &mfi_polled_cmd_timeout, 0,
152 "Polled command timeout - used for firmware flash etc (in seconds)");
153
154 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
156 0, "Command timeout (in seconds)");
157
158 /* Management interface */
159 static d_open_t mfi_open;
160 static d_close_t mfi_close;
161 static d_ioctl_t mfi_ioctl;
162 static d_poll_t mfi_poll;
163
164 static struct cdevsw mfi_cdevsw = {
165 .d_version = D_VERSION,
166 .d_flags = 0,
167 .d_open = mfi_open,
168 .d_close = mfi_close,
169 .d_ioctl = mfi_ioctl,
170 .d_poll = mfi_poll,
171 .d_name = "mfi",
172 };
173
174 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
175
176 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
177 struct mfi_skinny_dma_info mfi_skinny;
178
179 static void
mfi_enable_intr_xscale(struct mfi_softc * sc)180 mfi_enable_intr_xscale(struct mfi_softc *sc)
181 {
182 MFI_WRITE4(sc, MFI_OMSK, 0x01);
183 }
184
185 static void
mfi_enable_intr_ppc(struct mfi_softc * sc)186 mfi_enable_intr_ppc(struct mfi_softc *sc)
187 {
188 if (sc->mfi_flags & MFI_FLAGS_1078) {
189 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
190 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
191 }
192 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
193 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
195 }
196 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
197 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
198 }
199 }
200
201 static int32_t
mfi_read_fw_status_xscale(struct mfi_softc * sc)202 mfi_read_fw_status_xscale(struct mfi_softc *sc)
203 {
204 return MFI_READ4(sc, MFI_OMSG0);
205 }
206
207 static int32_t
mfi_read_fw_status_ppc(struct mfi_softc * sc)208 mfi_read_fw_status_ppc(struct mfi_softc *sc)
209 {
210 return MFI_READ4(sc, MFI_OSP0);
211 }
212
213 static int
mfi_check_clear_intr_xscale(struct mfi_softc * sc)214 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
215 {
216 int32_t status;
217
218 status = MFI_READ4(sc, MFI_OSTS);
219 if ((status & MFI_OSTS_INTR_VALID) == 0)
220 return 1;
221
222 MFI_WRITE4(sc, MFI_OSTS, status);
223 return 0;
224 }
225
226 static int
mfi_check_clear_intr_ppc(struct mfi_softc * sc)227 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
228 {
229 int32_t status;
230
231 status = MFI_READ4(sc, MFI_OSTS);
232 if (sc->mfi_flags & MFI_FLAGS_1078) {
233 if (!(status & MFI_1078_RM)) {
234 return 1;
235 }
236 }
237 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
238 if (!(status & MFI_GEN2_RM)) {
239 return 1;
240 }
241 }
242 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
243 if (!(status & MFI_SKINNY_RM)) {
244 return 1;
245 }
246 }
247 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
248 MFI_WRITE4(sc, MFI_OSTS, status);
249 else
250 MFI_WRITE4(sc, MFI_ODCR0, status);
251 return 0;
252 }
253
254 static void
mfi_issue_cmd_xscale(struct mfi_softc * sc,bus_addr_t bus_add,uint32_t frame_cnt)255 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256 {
257 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
258 }
259
260 static void
mfi_issue_cmd_ppc(struct mfi_softc * sc,bus_addr_t bus_add,uint32_t frame_cnt)261 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
262 {
263 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
264 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
265 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
266 } else {
267 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
268 }
269 }
270
271 int
mfi_transition_firmware(struct mfi_softc * sc)272 mfi_transition_firmware(struct mfi_softc *sc)
273 {
274 uint32_t fw_state, cur_state;
275 int max_wait, i;
276 uint32_t cur_abs_reg_val = 0;
277 uint32_t prev_abs_reg_val = 0;
278
279 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
280 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
281 while (fw_state != MFI_FWSTATE_READY) {
282 if (bootverbose)
283 device_printf(sc->mfi_dev, "Waiting for firmware to "
284 "become ready\n");
285 cur_state = fw_state;
286 switch (fw_state) {
287 case MFI_FWSTATE_FAULT:
288 device_printf(sc->mfi_dev, "Firmware fault\n");
289 return (ENXIO);
290 case MFI_FWSTATE_WAIT_HANDSHAKE:
291 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
292 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
293 else
294 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 max_wait = MFI_RESET_WAIT_TIME;
296 break;
297 case MFI_FWSTATE_OPERATIONAL:
298 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
300 else
301 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
302 max_wait = MFI_RESET_WAIT_TIME;
303 break;
304 case MFI_FWSTATE_UNDEFINED:
305 case MFI_FWSTATE_BB_INIT:
306 max_wait = MFI_RESET_WAIT_TIME;
307 break;
308 case MFI_FWSTATE_FW_INIT_2:
309 max_wait = MFI_RESET_WAIT_TIME;
310 break;
311 case MFI_FWSTATE_FW_INIT:
312 case MFI_FWSTATE_FLUSH_CACHE:
313 max_wait = MFI_RESET_WAIT_TIME;
314 break;
315 case MFI_FWSTATE_DEVICE_SCAN:
316 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
317 prev_abs_reg_val = cur_abs_reg_val;
318 break;
319 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
320 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
321 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
322 else
323 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
324 max_wait = MFI_RESET_WAIT_TIME;
325 break;
326 default:
327 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
328 fw_state);
329 return (ENXIO);
330 }
331 for (i = 0; i < (max_wait * 10); i++) {
332 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
333 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
334 if (fw_state == cur_state)
335 DELAY(100000);
336 else
337 break;
338 }
339 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
340 /* Check the device scanning progress */
341 if (prev_abs_reg_val != cur_abs_reg_val) {
342 continue;
343 }
344 }
345 if (fw_state == cur_state) {
346 device_printf(sc->mfi_dev, "Firmware stuck in state "
347 "%#x\n", fw_state);
348 return (ENXIO);
349 }
350 }
351 return (0);
352 }
353
354 static void
mfi_addr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)355 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
356 {
357 bus_addr_t *addr;
358
359 addr = arg;
360 *addr = segs[0].ds_addr;
361 }
362
363 int
mfi_attach(struct mfi_softc * sc)364 mfi_attach(struct mfi_softc *sc)
365 {
366 uint32_t status;
367 int error, commsz, framessz, sensesz;
368 int frames, unit, max_fw_sge, max_fw_cmds;
369 uint32_t tb_mem_size = 0;
370 struct cdev *dev_t;
371
372 if (sc == NULL)
373 return EINVAL;
374
375 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
376 MEGASAS_VERSION);
377
378 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
379 sx_init(&sc->mfi_config_lock, "MFI config");
380 TAILQ_INIT(&sc->mfi_ld_tqh);
381 TAILQ_INIT(&sc->mfi_syspd_tqh);
382 TAILQ_INIT(&sc->mfi_ld_pend_tqh);
383 TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
384 TAILQ_INIT(&sc->mfi_evt_queue);
385 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
386 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
387 TAILQ_INIT(&sc->mfi_aen_pids);
388 TAILQ_INIT(&sc->mfi_cam_ccbq);
389
390 mfi_initq_free(sc);
391 mfi_initq_ready(sc);
392 mfi_initq_busy(sc);
393 mfi_initq_bio(sc);
394
395 sc->adpreset = 0;
396 sc->last_seq_num = 0;
397 sc->disableOnlineCtrlReset = 1;
398 sc->issuepend_done = 1;
399 sc->hw_crit_error = 0;
400
401 if (sc->mfi_flags & MFI_FLAGS_1064R) {
402 sc->mfi_enable_intr = mfi_enable_intr_xscale;
403 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
404 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
405 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
406 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
407 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
408 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
409 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
410 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
411 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
412 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
413 sc->mfi_tbolt = 1;
414 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
415 } else {
416 sc->mfi_enable_intr = mfi_enable_intr_ppc;
417 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
418 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
419 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
420 }
421
422 /* Before we get too far, see if the firmware is working */
423 if ((error = mfi_transition_firmware(sc)) != 0) {
424 device_printf(sc->mfi_dev, "Firmware not in READY state, "
425 "error %d\n", error);
426 return (ENXIO);
427 }
428
429 /* Start: LSIP200113393 */
430 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
431 1, 0, /* algnmnt, boundary */
432 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
433 BUS_SPACE_MAXADDR, /* highaddr */
434 NULL, NULL, /* filter, filterarg */
435 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
436 1, /* msegments */
437 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
438 0, /* flags */
439 NULL, NULL, /* lockfunc, lockarg */
440 &sc->verbuf_h_dmat)) {
441 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
442 return (ENOMEM);
443 }
444 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
445 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
446 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
447 return (ENOMEM);
448 }
449 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
450 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
451 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
452 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
453 /* End: LSIP200113393 */
454
455 /*
456 * Get information needed for sizing the contiguous memory for the
457 * frame pool. Size down the sgl parameter since we know that
458 * we will never need more than what's required for MFI_MAXPHYS.
459 * It would be nice if these constants were available at runtime
460 * instead of compile time.
461 */
462 status = sc->mfi_read_fw_status(sc);
463 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
464 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
465 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
466 max_fw_cmds, mfi_max_cmds);
467 sc->mfi_max_fw_cmds = mfi_max_cmds;
468 } else {
469 sc->mfi_max_fw_cmds = max_fw_cmds;
470 }
471 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
472 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
473
474 /* ThunderBolt Support get the contiguous memory */
475
476 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
477 mfi_tbolt_init_globals(sc);
478 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
479 "MaxSgl = %d, state = %#x\n", max_fw_cmds,
480 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
481 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
482
483 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
484 1, 0, /* algnmnt, boundary */
485 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
486 BUS_SPACE_MAXADDR, /* highaddr */
487 NULL, NULL, /* filter, filterarg */
488 tb_mem_size, /* maxsize */
489 1, /* msegments */
490 tb_mem_size, /* maxsegsize */
491 0, /* flags */
492 NULL, NULL, /* lockfunc, lockarg */
493 &sc->mfi_tb_dmat)) {
494 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
495 return (ENOMEM);
496 }
497 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
498 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
499 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
500 return (ENOMEM);
501 }
502 bzero(sc->request_message_pool, tb_mem_size);
503 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
504 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
505
506 /* For ThunderBolt memory init */
507 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
508 0x100, 0, /* alignmnt, boundary */
509 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
510 BUS_SPACE_MAXADDR, /* highaddr */
511 NULL, NULL, /* filter, filterarg */
512 MFI_FRAME_SIZE, /* maxsize */
513 1, /* msegments */
514 MFI_FRAME_SIZE, /* maxsegsize */
515 0, /* flags */
516 NULL, NULL, /* lockfunc, lockarg */
517 &sc->mfi_tb_init_dmat)) {
518 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
519 return (ENOMEM);
520 }
521 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
522 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
523 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
524 return (ENOMEM);
525 }
526 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
527 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
528 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
529 &sc->mfi_tb_init_busaddr, 0);
530 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
531 tb_mem_size)) {
532 device_printf(sc->mfi_dev,
533 "Thunderbolt pool preparation error\n");
534 return 0;
535 }
536
537 /*
538 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
539 we are taking it different from what we have allocated for Request
540 and reply descriptors to avoid confusion later
541 */
542 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
543 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
544 1, 0, /* algnmnt, boundary */
545 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
546 BUS_SPACE_MAXADDR, /* highaddr */
547 NULL, NULL, /* filter, filterarg */
548 tb_mem_size, /* maxsize */
549 1, /* msegments */
550 tb_mem_size, /* maxsegsize */
551 0, /* flags */
552 NULL, NULL, /* lockfunc, lockarg */
553 &sc->mfi_tb_ioc_init_dmat)) {
554 device_printf(sc->mfi_dev,
555 "Cannot allocate comms DMA tag\n");
556 return (ENOMEM);
557 }
558 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
559 (void **)&sc->mfi_tb_ioc_init_desc,
560 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
561 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
562 return (ENOMEM);
563 }
564 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
565 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
566 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
567 &sc->mfi_tb_ioc_init_busaddr, 0);
568 }
569 /*
570 * Create the dma tag for data buffers. Used both for block I/O
571 * and for various internal data queries.
572 */
573 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
574 1, 0, /* algnmnt, boundary */
575 BUS_SPACE_MAXADDR, /* lowaddr */
576 BUS_SPACE_MAXADDR, /* highaddr */
577 NULL, NULL, /* filter, filterarg */
578 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
579 sc->mfi_max_sge, /* nsegments */
580 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
581 BUS_DMA_ALLOCNOW, /* flags */
582 busdma_lock_mutex, /* lockfunc */
583 &sc->mfi_io_lock, /* lockfuncarg */
584 &sc->mfi_buffer_dmat)) {
585 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
586 return (ENOMEM);
587 }
588
589 /*
590 * Allocate DMA memory for the comms queues. Keep it under 4GB for
591 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
592 * entry, so the calculated size here will be will be 1 more than
593 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
594 */
595 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
596 sizeof(struct mfi_hwcomms);
597 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
598 1, 0, /* algnmnt, boundary */
599 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
600 BUS_SPACE_MAXADDR, /* highaddr */
601 NULL, NULL, /* filter, filterarg */
602 commsz, /* maxsize */
603 1, /* msegments */
604 commsz, /* maxsegsize */
605 0, /* flags */
606 NULL, NULL, /* lockfunc, lockarg */
607 &sc->mfi_comms_dmat)) {
608 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
609 return (ENOMEM);
610 }
611 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
612 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
613 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
614 return (ENOMEM);
615 }
616 bzero(sc->mfi_comms, commsz);
617 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
618 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
619 /*
620 * Allocate DMA memory for the command frames. Keep them in the
621 * lower 4GB for efficiency. Calculate the size of the commands at
622 * the same time; each command is one 64 byte frame plus a set of
623 * additional frames for holding sg lists or other data.
624 * The assumption here is that the SG list will start at the second
625 * frame and not use the unused bytes in the first frame. While this
626 * isn't technically correct, it simplifies the calculation and allows
627 * for command frames that might be larger than an mfi_io_frame.
628 */
629 if (sizeof(bus_addr_t) == 8) {
630 sc->mfi_sge_size = sizeof(struct mfi_sg64);
631 sc->mfi_flags |= MFI_FLAGS_SG64;
632 } else {
633 sc->mfi_sge_size = sizeof(struct mfi_sg32);
634 }
635 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
636 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
637 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
638 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
639 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
640 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
641 64, 0, /* algnmnt, boundary */
642 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
643 BUS_SPACE_MAXADDR, /* highaddr */
644 NULL, NULL, /* filter, filterarg */
645 framessz, /* maxsize */
646 1, /* nsegments */
647 framessz, /* maxsegsize */
648 0, /* flags */
649 NULL, NULL, /* lockfunc, lockarg */
650 &sc->mfi_frames_dmat)) {
651 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
652 return (ENOMEM);
653 }
654 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
655 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
656 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
657 return (ENOMEM);
658 }
659 bzero(sc->mfi_frames, framessz);
660 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
661 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
662 /*
663 * Allocate DMA memory for the frame sense data. Keep them in the
664 * lower 4GB for efficiency
665 */
666 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
667 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
668 4, 0, /* algnmnt, boundary */
669 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
670 BUS_SPACE_MAXADDR, /* highaddr */
671 NULL, NULL, /* filter, filterarg */
672 sensesz, /* maxsize */
673 1, /* nsegments */
674 sensesz, /* maxsegsize */
675 0, /* flags */
676 NULL, NULL, /* lockfunc, lockarg */
677 &sc->mfi_sense_dmat)) {
678 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
679 return (ENOMEM);
680 }
681 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
682 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
683 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
684 return (ENOMEM);
685 }
686 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
687 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
688 if ((error = mfi_alloc_commands(sc)) != 0)
689 return (error);
690
691 /* Before moving the FW to operational state, check whether
692 * hostmemory is required by the FW or not
693 */
694
695 /* ThunderBolt MFI_IOC2 INIT */
696 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
697 sc->mfi_disable_intr(sc);
698 mtx_lock(&sc->mfi_io_lock);
699 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
700 device_printf(sc->mfi_dev,
701 "TB Init has failed with error %d\n",error);
702 mtx_unlock(&sc->mfi_io_lock);
703 return error;
704 }
705 mtx_unlock(&sc->mfi_io_lock);
706
707 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
708 return error;
709 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
710 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
711 &sc->mfi_intr)) {
712 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
713 return (EINVAL);
714 }
715 sc->mfi_intr_ptr = mfi_intr_tbolt;
716 sc->mfi_enable_intr(sc);
717 } else {
718 if ((error = mfi_comms_init(sc)) != 0)
719 return (error);
720
721 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
722 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
723 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
724 return (EINVAL);
725 }
726 sc->mfi_intr_ptr = mfi_intr;
727 sc->mfi_enable_intr(sc);
728 }
729 if ((error = mfi_get_controller_info(sc)) != 0)
730 return (error);
731 sc->disableOnlineCtrlReset = 0;
732
733 /* Register a config hook to probe the bus for arrays */
734 sc->mfi_ich.ich_func = mfi_startup;
735 sc->mfi_ich.ich_arg = sc;
736 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
737 device_printf(sc->mfi_dev, "Cannot establish configuration "
738 "hook\n");
739 return (EINVAL);
740 }
741 mtx_lock(&sc->mfi_io_lock);
742 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
743 mtx_unlock(&sc->mfi_io_lock);
744 return (error);
745 }
746 mtx_unlock(&sc->mfi_io_lock);
747
748 /*
749 * Register a shutdown handler.
750 */
751 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
752 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
753 device_printf(sc->mfi_dev, "Warning: shutdown event "
754 "registration failed\n");
755 }
756
757 /*
758 * Create the control device for doing management
759 */
760 unit = device_get_unit(sc->mfi_dev);
761 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
762 0640, "mfi%d", unit);
763 if (unit == 0)
764 make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
765 sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
766 if (sc->mfi_cdev != NULL)
767 sc->mfi_cdev->si_drv1 = sc;
768 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
769 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
770 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
771 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
772 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
773 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
774 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
775 &sc->mfi_keep_deleted_volumes, 0,
776 "Don't detach the mfid device for a busy volume that is deleted");
777
778 device_add_child(sc->mfi_dev, "mfip", DEVICE_UNIT_ANY);
779 bus_attach_children(sc->mfi_dev);
780
781 /* Start the timeout watchdog */
782 callout_init(&sc->mfi_watchdog_callout, 1);
783 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
784 mfi_timeout, sc);
785
786 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
787 mtx_lock(&sc->mfi_io_lock);
788 mfi_tbolt_sync_map_info(sc);
789 mtx_unlock(&sc->mfi_io_lock);
790 }
791
792 return (0);
793 }
794
795 static int
mfi_alloc_commands(struct mfi_softc * sc)796 mfi_alloc_commands(struct mfi_softc *sc)
797 {
798 struct mfi_command *cm;
799 int i, j;
800
801 /*
802 * XXX Should we allocate all the commands up front, or allocate on
803 * demand later like 'aac' does?
804 */
805 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
806 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
807
808 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
809 cm = &sc->mfi_commands[i];
810 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
811 sc->mfi_cmd_size * i);
812 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
813 sc->mfi_cmd_size * i;
814 cm->cm_frame->header.context = i;
815 cm->cm_sense = &sc->mfi_sense[i];
816 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
817 cm->cm_sc = sc;
818 cm->cm_index = i;
819 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
820 &cm->cm_dmamap) == 0) {
821 mtx_lock(&sc->mfi_io_lock);
822 mfi_release_command(cm);
823 mtx_unlock(&sc->mfi_io_lock);
824 } else {
825 device_printf(sc->mfi_dev, "Failed to allocate %d "
826 "command blocks, only allocated %d\n",
827 sc->mfi_max_fw_cmds, i - 1);
828 for (j = 0; j < i; j++) {
829 cm = &sc->mfi_commands[i];
830 bus_dmamap_destroy(sc->mfi_buffer_dmat,
831 cm->cm_dmamap);
832 }
833 free(sc->mfi_commands, M_MFIBUF);
834 sc->mfi_commands = NULL;
835
836 return (ENOMEM);
837 }
838 }
839
840 return (0);
841 }
842
843 void
mfi_release_command(struct mfi_command * cm)844 mfi_release_command(struct mfi_command *cm)
845 {
846 struct mfi_frame_header *hdr;
847 uint32_t *hdr_data;
848
849 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
850
851 /*
852 * Zero out the important fields of the frame, but make sure the
853 * context field is preserved. For efficiency, handle the fields
854 * as 32 bit words. Clear out the first S/G entry too for safety.
855 */
856 hdr = &cm->cm_frame->header;
857 if (cm->cm_data != NULL && hdr->sg_count) {
858 cm->cm_sg->sg32[0].len = 0;
859 cm->cm_sg->sg32[0].addr = 0;
860 }
861
862 /*
863 * Command may be on other queues e.g. busy queue depending on the
864 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
865 * properly
866 */
867 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
868 mfi_remove_busy(cm);
869 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
870 mfi_remove_ready(cm);
871
872 /* We're not expecting it to be on any other queue but check */
873 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
874 panic("Command %p is still on another queue, flags = %#x",
875 cm, cm->cm_flags);
876 }
877
878 /* tbolt cleanup */
879 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
880 mfi_tbolt_return_cmd(cm->cm_sc,
881 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
882 cm);
883 }
884
885 hdr_data = (uint32_t *)cm->cm_frame;
886 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
887 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
888 hdr_data[4] = 0; /* flags, timeout */
889 hdr_data[5] = 0; /* data_len */
890
891 cm->cm_extra_frames = 0;
892 cm->cm_flags = 0;
893 cm->cm_complete = NULL;
894 cm->cm_private = NULL;
895 cm->cm_data = NULL;
896 cm->cm_sg = 0;
897 cm->cm_total_frame_size = 0;
898 cm->retry_for_fw_reset = 0;
899
900 mfi_enqueue_free(cm);
901 }
902
903 int
mfi_dcmd_command(struct mfi_softc * sc,struct mfi_command ** cmp,uint32_t opcode,void ** bufp,size_t bufsize)904 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
905 uint32_t opcode, void **bufp, size_t bufsize)
906 {
907 struct mfi_command *cm;
908 struct mfi_dcmd_frame *dcmd;
909 void *buf = NULL;
910 uint32_t context = 0;
911
912 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
913
914 cm = mfi_dequeue_free(sc);
915 if (cm == NULL)
916 return (EBUSY);
917
918 /* Zero out the MFI frame */
919 context = cm->cm_frame->header.context;
920 bzero(cm->cm_frame, sizeof(union mfi_frame));
921 cm->cm_frame->header.context = context;
922
923 if ((bufsize > 0) && (bufp != NULL)) {
924 if (*bufp == NULL) {
925 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
926 if (buf == NULL) {
927 mfi_release_command(cm);
928 return (ENOMEM);
929 }
930 *bufp = buf;
931 } else {
932 buf = *bufp;
933 }
934 }
935
936 dcmd = &cm->cm_frame->dcmd;
937 bzero(dcmd->mbox, MFI_MBOX_SIZE);
938 dcmd->header.cmd = MFI_CMD_DCMD;
939 dcmd->header.timeout = 0;
940 dcmd->header.flags = 0;
941 dcmd->header.data_len = bufsize;
942 dcmd->header.scsi_status = 0;
943 dcmd->opcode = opcode;
944 cm->cm_sg = &dcmd->sgl;
945 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
946 cm->cm_flags = 0;
947 cm->cm_data = buf;
948 cm->cm_private = buf;
949 cm->cm_len = bufsize;
950
951 *cmp = cm;
952 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
953 *bufp = buf;
954 return (0);
955 }
956
957 static int
mfi_comms_init(struct mfi_softc * sc)958 mfi_comms_init(struct mfi_softc *sc)
959 {
960 struct mfi_command *cm;
961 struct mfi_init_frame *init;
962 struct mfi_init_qinfo *qinfo;
963 int error;
964 uint32_t context = 0;
965
966 mtx_lock(&sc->mfi_io_lock);
967 if ((cm = mfi_dequeue_free(sc)) == NULL) {
968 mtx_unlock(&sc->mfi_io_lock);
969 return (EBUSY);
970 }
971
972 /* Zero out the MFI frame */
973 context = cm->cm_frame->header.context;
974 bzero(cm->cm_frame, sizeof(union mfi_frame));
975 cm->cm_frame->header.context = context;
976
977 /*
978 * Abuse the SG list area of the frame to hold the init_qinfo
979 * object;
980 */
981 init = &cm->cm_frame->init;
982 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
983
984 bzero(qinfo, sizeof(struct mfi_init_qinfo));
985 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
986 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
987 offsetof(struct mfi_hwcomms, hw_reply_q);
988 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
989 offsetof(struct mfi_hwcomms, hw_pi);
990 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
991 offsetof(struct mfi_hwcomms, hw_ci);
992
993 init->header.cmd = MFI_CMD_INIT;
994 init->header.data_len = sizeof(struct mfi_init_qinfo);
995 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
996 cm->cm_data = NULL;
997 cm->cm_flags = MFI_CMD_POLLED;
998
999 if ((error = mfi_mapcmd(sc, cm)) != 0)
1000 device_printf(sc->mfi_dev, "failed to send init command\n");
1001 mfi_release_command(cm);
1002 mtx_unlock(&sc->mfi_io_lock);
1003
1004 return (error);
1005 }
1006
1007 static int
mfi_get_controller_info(struct mfi_softc * sc)1008 mfi_get_controller_info(struct mfi_softc *sc)
1009 {
1010 struct mfi_command *cm = NULL;
1011 struct mfi_ctrl_info *ci = NULL;
1012 uint32_t max_sectors_1, max_sectors_2;
1013 int error;
1014
1015 mtx_lock(&sc->mfi_io_lock);
1016 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1017 (void **)&ci, sizeof(*ci));
1018 if (error)
1019 goto out;
1020 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1021
1022 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1023 device_printf(sc->mfi_dev, "Failed to get controller info\n");
1024 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1025 MFI_SECTOR_LEN;
1026 error = 0;
1027 goto out;
1028 }
1029
1030 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1031 BUS_DMASYNC_POSTREAD);
1032 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1033
1034 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1035 max_sectors_2 = ci->max_request_size;
1036 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1037 sc->disableOnlineCtrlReset =
1038 ci->properties.OnOffProperties.disableOnlineCtrlReset;
1039
1040 out:
1041 if (ci)
1042 free(ci, M_MFIBUF);
1043 if (cm)
1044 mfi_release_command(cm);
1045 mtx_unlock(&sc->mfi_io_lock);
1046 return (error);
1047 }
1048
1049 static int
mfi_get_log_state(struct mfi_softc * sc,struct mfi_evt_log_state ** log_state)1050 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1051 {
1052 struct mfi_command *cm = NULL;
1053 int error;
1054
1055 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1056 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1057 (void **)log_state, sizeof(**log_state));
1058 if (error)
1059 goto out;
1060 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1061
1062 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1063 device_printf(sc->mfi_dev, "Failed to get log state\n");
1064 goto out;
1065 }
1066
1067 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1068 BUS_DMASYNC_POSTREAD);
1069 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1070
1071 out:
1072 if (cm)
1073 mfi_release_command(cm);
1074
1075 return (error);
1076 }
1077
1078 int
mfi_aen_setup(struct mfi_softc * sc,uint32_t seq_start)1079 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1080 {
1081 struct mfi_evt_log_state *log_state = NULL;
1082 union mfi_evt class_locale;
1083 int error = 0;
1084 uint32_t seq;
1085
1086 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1087
1088 class_locale.members.reserved = 0;
1089 class_locale.members.locale = mfi_event_locale;
1090 class_locale.members.evt_class = mfi_event_class;
1091
1092 if (seq_start == 0) {
1093 if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1094 goto out;
1095 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1096
1097 /*
1098 * Walk through any events that fired since the last
1099 * shutdown.
1100 */
1101 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1102 log_state->newest_seq_num)) != 0)
1103 goto out;
1104 seq = log_state->newest_seq_num;
1105 } else
1106 seq = seq_start;
1107 error = mfi_aen_register(sc, seq, class_locale.word);
1108 out:
1109 free(log_state, M_MFIBUF);
1110
1111 return (error);
1112 }
1113
1114 int
mfi_wait_command(struct mfi_softc * sc,struct mfi_command * cm)1115 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1116 {
1117
1118 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1119 cm->cm_complete = NULL;
1120
1121 /*
1122 * MegaCli can issue a DCMD of 0. In this case do nothing
1123 * and return 0 to it as status
1124 */
1125 if (cm->cm_frame->dcmd.opcode == 0) {
1126 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1127 cm->cm_error = 0;
1128 return (cm->cm_error);
1129 }
1130 mfi_enqueue_ready(cm);
1131 mfi_startio(sc);
1132 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1133 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1134 return (cm->cm_error);
1135 }
1136
1137 void
mfi_free(struct mfi_softc * sc)1138 mfi_free(struct mfi_softc *sc)
1139 {
1140 struct mfi_command *cm;
1141 int i;
1142
1143 callout_drain(&sc->mfi_watchdog_callout);
1144
1145 if (sc->mfi_cdev != NULL)
1146 destroy_dev(sc->mfi_cdev);
1147
1148 if (sc->mfi_commands != NULL) {
1149 for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1150 cm = &sc->mfi_commands[i];
1151 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1152 }
1153 free(sc->mfi_commands, M_MFIBUF);
1154 sc->mfi_commands = NULL;
1155 }
1156
1157 if (sc->mfi_intr)
1158 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1159 if (sc->mfi_irq != NULL)
1160 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1161 sc->mfi_irq);
1162
1163 if (sc->mfi_sense_busaddr != 0)
1164 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1165 if (sc->mfi_sense != NULL)
1166 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1167 sc->mfi_sense_dmamap);
1168 if (sc->mfi_sense_dmat != NULL)
1169 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1170
1171 if (sc->mfi_frames_busaddr != 0)
1172 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1173 if (sc->mfi_frames != NULL)
1174 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1175 sc->mfi_frames_dmamap);
1176 if (sc->mfi_frames_dmat != NULL)
1177 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1178
1179 if (sc->mfi_comms_busaddr != 0)
1180 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1181 if (sc->mfi_comms != NULL)
1182 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1183 sc->mfi_comms_dmamap);
1184 if (sc->mfi_comms_dmat != NULL)
1185 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1186
1187 /* ThunderBolt contiguous memory free here */
1188 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1189 if (sc->mfi_tb_busaddr != 0)
1190 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1191 if (sc->request_message_pool != NULL)
1192 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1193 sc->mfi_tb_dmamap);
1194 if (sc->mfi_tb_dmat != NULL)
1195 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1196
1197 /* Version buffer memory free */
1198 /* Start LSIP200113393 */
1199 if (sc->verbuf_h_busaddr != 0)
1200 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1201 if (sc->verbuf != NULL)
1202 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1203 sc->verbuf_h_dmamap);
1204 if (sc->verbuf_h_dmat != NULL)
1205 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1206
1207 /* End LSIP200113393 */
1208 /* ThunderBolt INIT packet memory Free */
1209 if (sc->mfi_tb_init_busaddr != 0)
1210 bus_dmamap_unload(sc->mfi_tb_init_dmat,
1211 sc->mfi_tb_init_dmamap);
1212 if (sc->mfi_tb_init != NULL)
1213 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1214 sc->mfi_tb_init_dmamap);
1215 if (sc->mfi_tb_init_dmat != NULL)
1216 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1217
1218 /* ThunderBolt IOC Init Desc memory free here */
1219 if (sc->mfi_tb_ioc_init_busaddr != 0)
1220 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1221 sc->mfi_tb_ioc_init_dmamap);
1222 if (sc->mfi_tb_ioc_init_desc != NULL)
1223 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1224 sc->mfi_tb_ioc_init_desc,
1225 sc->mfi_tb_ioc_init_dmamap);
1226 if (sc->mfi_tb_ioc_init_dmat != NULL)
1227 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1228 if (sc->mfi_cmd_pool_tbolt != NULL) {
1229 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1230 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1231 free(sc->mfi_cmd_pool_tbolt[i],
1232 M_MFIBUF);
1233 sc->mfi_cmd_pool_tbolt[i] = NULL;
1234 }
1235 }
1236 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1237 sc->mfi_cmd_pool_tbolt = NULL;
1238 }
1239 if (sc->request_desc_pool != NULL) {
1240 free(sc->request_desc_pool, M_MFIBUF);
1241 sc->request_desc_pool = NULL;
1242 }
1243 }
1244 if (sc->mfi_buffer_dmat != NULL)
1245 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1246 if (sc->mfi_parent_dmat != NULL)
1247 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1248
1249 if (mtx_initialized(&sc->mfi_io_lock)) {
1250 mtx_destroy(&sc->mfi_io_lock);
1251 sx_destroy(&sc->mfi_config_lock);
1252 }
1253
1254 return;
1255 }
1256
1257 static void
mfi_startup(void * arg)1258 mfi_startup(void *arg)
1259 {
1260 struct mfi_softc *sc;
1261
1262 sc = (struct mfi_softc *)arg;
1263
1264 sc->mfi_enable_intr(sc);
1265 sx_xlock(&sc->mfi_config_lock);
1266 mtx_lock(&sc->mfi_io_lock);
1267 mfi_ldprobe(sc);
1268 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1269 mfi_syspdprobe(sc);
1270 mtx_unlock(&sc->mfi_io_lock);
1271 sx_xunlock(&sc->mfi_config_lock);
1272
1273 config_intrhook_disestablish(&sc->mfi_ich);
1274 }
1275
1276 static void
mfi_intr(void * arg)1277 mfi_intr(void *arg)
1278 {
1279 struct mfi_softc *sc;
1280 struct mfi_command *cm;
1281 uint32_t pi, ci, context;
1282
1283 sc = (struct mfi_softc *)arg;
1284
1285 if (sc->mfi_check_clear_intr(sc))
1286 return;
1287
1288 restart:
1289 pi = sc->mfi_comms->hw_pi;
1290 ci = sc->mfi_comms->hw_ci;
1291 mtx_lock(&sc->mfi_io_lock);
1292 while (ci != pi) {
1293 context = sc->mfi_comms->hw_reply_q[ci];
1294 if (context < sc->mfi_max_fw_cmds) {
1295 cm = &sc->mfi_commands[context];
1296 mfi_remove_busy(cm);
1297 cm->cm_error = 0;
1298 mfi_complete(sc, cm);
1299 }
1300 if (++ci == (sc->mfi_max_fw_cmds + 1))
1301 ci = 0;
1302 }
1303
1304 sc->mfi_comms->hw_ci = ci;
1305
1306 /* Give defered I/O a chance to run */
1307 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1308 mfi_startio(sc);
1309 mtx_unlock(&sc->mfi_io_lock);
1310
1311 /*
1312 * Dummy read to flush the bus; this ensures that the indexes are up
1313 * to date. Restart processing if more commands have come it.
1314 */
1315 (void)sc->mfi_read_fw_status(sc);
1316 if (pi != sc->mfi_comms->hw_pi)
1317 goto restart;
1318
1319 return;
1320 }
1321
1322 int
mfi_shutdown(struct mfi_softc * sc)1323 mfi_shutdown(struct mfi_softc *sc)
1324 {
1325 struct mfi_dcmd_frame *dcmd;
1326 struct mfi_command *cm;
1327 int error;
1328
1329 if (sc->mfi_aen_cm != NULL) {
1330 sc->cm_aen_abort = 1;
1331 mfi_abort(sc, &sc->mfi_aen_cm);
1332 }
1333
1334 if (sc->mfi_map_sync_cm != NULL) {
1335 sc->cm_map_abort = 1;
1336 mfi_abort(sc, &sc->mfi_map_sync_cm);
1337 }
1338
1339 mtx_lock(&sc->mfi_io_lock);
1340 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1341 if (error) {
1342 mtx_unlock(&sc->mfi_io_lock);
1343 return (error);
1344 }
1345
1346 dcmd = &cm->cm_frame->dcmd;
1347 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1348 cm->cm_flags = MFI_CMD_POLLED;
1349 cm->cm_data = NULL;
1350
1351 if ((error = mfi_mapcmd(sc, cm)) != 0)
1352 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1353
1354 mfi_release_command(cm);
1355 mtx_unlock(&sc->mfi_io_lock);
1356 return (error);
1357 }
1358
1359 static void
mfi_syspdprobe(struct mfi_softc * sc)1360 mfi_syspdprobe(struct mfi_softc *sc)
1361 {
1362 struct mfi_frame_header *hdr;
1363 struct mfi_command *cm = NULL;
1364 struct mfi_pd_list *pdlist = NULL;
1365 struct mfi_system_pd *syspd, *tmp;
1366 struct mfi_system_pending *syspd_pend;
1367 int error, i, found;
1368
1369 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1370 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1371 /* Add SYSTEM PD's */
1372 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1373 (void **)&pdlist, sizeof(*pdlist));
1374 if (error) {
1375 device_printf(sc->mfi_dev,
1376 "Error while forming SYSTEM PD list\n");
1377 goto out;
1378 }
1379
1380 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1381 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1382 cm->cm_frame->dcmd.mbox[1] = 0;
1383 if (mfi_mapcmd(sc, cm) != 0) {
1384 device_printf(sc->mfi_dev,
1385 "Failed to get syspd device listing\n");
1386 goto out;
1387 }
1388 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1389 BUS_DMASYNC_POSTREAD);
1390 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1391 hdr = &cm->cm_frame->header;
1392 if (hdr->cmd_status != MFI_STAT_OK) {
1393 device_printf(sc->mfi_dev,
1394 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1395 goto out;
1396 }
1397 /* Get each PD and add it to the system */
1398 for (i = 0; i < pdlist->count; i++) {
1399 if (pdlist->addr[i].device_id ==
1400 pdlist->addr[i].encl_device_id)
1401 continue;
1402 found = 0;
1403 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1404 if (syspd->pd_id == pdlist->addr[i].device_id)
1405 found = 1;
1406 }
1407 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1408 if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1409 found = 1;
1410 }
1411 if (found == 0)
1412 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1413 }
1414 /* Delete SYSPD's whose state has been changed */
1415 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1416 found = 0;
1417 for (i = 0; i < pdlist->count; i++) {
1418 if (syspd->pd_id == pdlist->addr[i].device_id) {
1419 found = 1;
1420 break;
1421 }
1422 }
1423 if (found == 0) {
1424 printf("DELETE\n");
1425 mtx_unlock(&sc->mfi_io_lock);
1426 bus_topo_lock();
1427 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1428 bus_topo_unlock();
1429 mtx_lock(&sc->mfi_io_lock);
1430 }
1431 }
1432 out:
1433 if (pdlist)
1434 free(pdlist, M_MFIBUF);
1435 if (cm)
1436 mfi_release_command(cm);
1437
1438 return;
1439 }
1440
1441 static void
mfi_ldprobe(struct mfi_softc * sc)1442 mfi_ldprobe(struct mfi_softc *sc)
1443 {
1444 struct mfi_frame_header *hdr;
1445 struct mfi_command *cm = NULL;
1446 struct mfi_ld_list *list = NULL;
1447 struct mfi_disk *ld;
1448 struct mfi_disk_pending *ld_pend;
1449 int error, i;
1450
1451 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1452 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1453
1454 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1455 (void **)&list, sizeof(*list));
1456 if (error)
1457 goto out;
1458
1459 cm->cm_flags = MFI_CMD_DATAIN;
1460 if (mfi_wait_command(sc, cm) != 0) {
1461 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1462 goto out;
1463 }
1464
1465 hdr = &cm->cm_frame->header;
1466 if (hdr->cmd_status != MFI_STAT_OK) {
1467 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1468 hdr->cmd_status);
1469 goto out;
1470 }
1471
1472 for (i = 0; i < list->ld_count; i++) {
1473 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1474 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1475 goto skip_add;
1476 }
1477 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1478 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1479 goto skip_add;
1480 }
1481 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1482 skip_add:;
1483 }
1484 out:
1485 if (list)
1486 free(list, M_MFIBUF);
1487 if (cm)
1488 mfi_release_command(cm);
1489
1490 return;
1491 }
1492
1493 /*
1494 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1495 * the bits in 24-31 are all set, then it is the number of seconds since
1496 * boot.
1497 */
1498 static const char *
format_timestamp(uint32_t timestamp)1499 format_timestamp(uint32_t timestamp)
1500 {
1501 static char buffer[32];
1502
1503 if ((timestamp & 0xff000000) == 0xff000000)
1504 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1505 0x00ffffff);
1506 else
1507 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1508 return (buffer);
1509 }
1510
1511 static const char *
format_class(int8_t class)1512 format_class(int8_t class)
1513 {
1514 static char buffer[6];
1515
1516 switch (class) {
1517 case MFI_EVT_CLASS_DEBUG:
1518 return ("debug");
1519 case MFI_EVT_CLASS_PROGRESS:
1520 return ("progress");
1521 case MFI_EVT_CLASS_INFO:
1522 return ("info");
1523 case MFI_EVT_CLASS_WARNING:
1524 return ("WARN");
1525 case MFI_EVT_CLASS_CRITICAL:
1526 return ("CRIT");
1527 case MFI_EVT_CLASS_FATAL:
1528 return ("FATAL");
1529 case MFI_EVT_CLASS_DEAD:
1530 return ("DEAD");
1531 default:
1532 snprintf(buffer, sizeof(buffer), "%d", class);
1533 return (buffer);
1534 }
1535 }
1536
1537 static void
mfi_decode_evt(struct mfi_softc * sc,struct mfi_evt_detail * detail)1538 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1539 {
1540 struct mfi_system_pd *syspd = NULL;
1541
1542 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1543 format_timestamp(detail->time), detail->evt_class.members.locale,
1544 format_class(detail->evt_class.members.evt_class),
1545 detail->description);
1546
1547 /* Don't act on old AEN's or while shutting down */
1548 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1549 return;
1550
1551 switch (detail->arg_type) {
1552 case MR_EVT_ARGS_NONE:
1553 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1554 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1555 if (mfi_detect_jbod_change) {
1556 /*
1557 * Probe for new SYSPD's and Delete
1558 * invalid SYSPD's
1559 */
1560 sx_xlock(&sc->mfi_config_lock);
1561 mtx_lock(&sc->mfi_io_lock);
1562 mfi_syspdprobe(sc);
1563 mtx_unlock(&sc->mfi_io_lock);
1564 sx_xunlock(&sc->mfi_config_lock);
1565 }
1566 }
1567 break;
1568 case MR_EVT_ARGS_LD_STATE:
1569 /* During load time driver reads all the events starting
1570 * from the one that has been logged after shutdown. Avoid
1571 * these old events.
1572 */
1573 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1574 /* Remove the LD */
1575 struct mfi_disk *ld;
1576 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1577 if (ld->ld_id ==
1578 detail->args.ld_state.ld.target_id)
1579 break;
1580 }
1581 /*
1582 Fix: for kernel panics when SSCD is removed
1583 KASSERT(ld != NULL, ("volume dissappeared"));
1584 */
1585 if (ld != NULL) {
1586 bus_topo_lock();
1587 device_delete_child(sc->mfi_dev, ld->ld_dev);
1588 bus_topo_unlock();
1589 }
1590 }
1591 break;
1592 case MR_EVT_ARGS_PD:
1593 if (detail->code == MR_EVT_PD_REMOVED) {
1594 if (mfi_detect_jbod_change) {
1595 /*
1596 * If the removed device is a SYSPD then
1597 * delete it
1598 */
1599 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1600 pd_link) {
1601 if (syspd->pd_id ==
1602 detail->args.pd.device_id) {
1603 bus_topo_lock();
1604 device_delete_child(
1605 sc->mfi_dev,
1606 syspd->pd_dev);
1607 bus_topo_unlock();
1608 break;
1609 }
1610 }
1611 }
1612 }
1613 if (detail->code == MR_EVT_PD_INSERTED) {
1614 if (mfi_detect_jbod_change) {
1615 /* Probe for new SYSPD's */
1616 sx_xlock(&sc->mfi_config_lock);
1617 mtx_lock(&sc->mfi_io_lock);
1618 mfi_syspdprobe(sc);
1619 mtx_unlock(&sc->mfi_io_lock);
1620 sx_xunlock(&sc->mfi_config_lock);
1621 }
1622 }
1623 if (sc->mfi_cam_rescan_cb != NULL &&
1624 (detail->code == MR_EVT_PD_INSERTED ||
1625 detail->code == MR_EVT_PD_REMOVED)) {
1626 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1627 }
1628 break;
1629 }
1630 }
1631
1632 static void
mfi_queue_evt(struct mfi_softc * sc,struct mfi_evt_detail * detail)1633 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1634 {
1635 struct mfi_evt_queue_elm *elm;
1636
1637 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1638 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1639 if (elm == NULL)
1640 return;
1641 memcpy(&elm->detail, detail, sizeof(*detail));
1642 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1643 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1644 }
1645
1646 static void
mfi_handle_evt(void * context,int pending)1647 mfi_handle_evt(void *context, int pending)
1648 {
1649 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1650 struct mfi_softc *sc;
1651 struct mfi_evt_queue_elm *elm;
1652
1653 sc = context;
1654 TAILQ_INIT(&queue);
1655 mtx_lock(&sc->mfi_io_lock);
1656 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1657 mtx_unlock(&sc->mfi_io_lock);
1658 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1659 TAILQ_REMOVE(&queue, elm, link);
1660 mfi_decode_evt(sc, &elm->detail);
1661 free(elm, M_MFIBUF);
1662 }
1663 }
1664
1665 static int
mfi_aen_register(struct mfi_softc * sc,int seq,int locale)1666 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1667 {
1668 struct mfi_command *cm;
1669 struct mfi_dcmd_frame *dcmd;
1670 union mfi_evt current_aen, prior_aen;
1671 struct mfi_evt_detail *ed = NULL;
1672 int error = 0;
1673
1674 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1675
1676 current_aen.word = locale;
1677 if (sc->mfi_aen_cm != NULL) {
1678 prior_aen.word =
1679 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1680 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1681 !((prior_aen.members.locale & current_aen.members.locale)
1682 ^current_aen.members.locale)) {
1683 return (0);
1684 } else {
1685 prior_aen.members.locale |= current_aen.members.locale;
1686 if (prior_aen.members.evt_class
1687 < current_aen.members.evt_class)
1688 current_aen.members.evt_class =
1689 prior_aen.members.evt_class;
1690 mfi_abort(sc, &sc->mfi_aen_cm);
1691 }
1692 }
1693
1694 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1695 (void **)&ed, sizeof(*ed));
1696 if (error)
1697 goto out;
1698
1699 dcmd = &cm->cm_frame->dcmd;
1700 ((uint32_t *)&dcmd->mbox)[0] = seq;
1701 ((uint32_t *)&dcmd->mbox)[1] = locale;
1702 cm->cm_flags = MFI_CMD_DATAIN;
1703 cm->cm_complete = mfi_aen_complete;
1704
1705 sc->last_seq_num = seq;
1706 sc->mfi_aen_cm = cm;
1707
1708 mfi_enqueue_ready(cm);
1709 mfi_startio(sc);
1710
1711 out:
1712 return (error);
1713 }
1714
1715 static void
mfi_aen_complete(struct mfi_command * cm)1716 mfi_aen_complete(struct mfi_command *cm)
1717 {
1718 struct mfi_frame_header *hdr;
1719 struct mfi_softc *sc;
1720 struct mfi_evt_detail *detail;
1721 struct mfi_aen *mfi_aen_entry, *tmp;
1722 int seq = 0, aborted = 0;
1723
1724 sc = cm->cm_sc;
1725 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1726
1727 if (sc->mfi_aen_cm == NULL)
1728 return;
1729
1730 hdr = &cm->cm_frame->header;
1731
1732 if (sc->cm_aen_abort ||
1733 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1734 sc->cm_aen_abort = 0;
1735 aborted = 1;
1736 } else {
1737 sc->mfi_aen_triggered = 1;
1738 if (sc->mfi_poll_waiting) {
1739 sc->mfi_poll_waiting = 0;
1740 selwakeup(&sc->mfi_select);
1741 }
1742 detail = cm->cm_data;
1743 mfi_queue_evt(sc, detail);
1744 seq = detail->seq + 1;
1745 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1746 tmp) {
1747 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1748 aen_link);
1749 PROC_LOCK(mfi_aen_entry->p);
1750 kern_psignal(mfi_aen_entry->p, SIGIO);
1751 PROC_UNLOCK(mfi_aen_entry->p);
1752 free(mfi_aen_entry, M_MFIBUF);
1753 }
1754 }
1755
1756 free(cm->cm_data, M_MFIBUF);
1757 wakeup(&sc->mfi_aen_cm);
1758 sc->mfi_aen_cm = NULL;
1759 mfi_release_command(cm);
1760
1761 /* set it up again so the driver can catch more events */
1762 if (!aborted)
1763 mfi_aen_setup(sc, seq);
1764 }
1765
1766 #define MAX_EVENTS 15
1767
1768 static int
mfi_parse_entries(struct mfi_softc * sc,int start_seq,int stop_seq)1769 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1770 {
1771 struct mfi_command *cm;
1772 struct mfi_dcmd_frame *dcmd;
1773 struct mfi_evt_list *el;
1774 union mfi_evt class_locale;
1775 int error, i, seq, size;
1776
1777 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1778
1779 class_locale.members.reserved = 0;
1780 class_locale.members.locale = mfi_event_locale;
1781 class_locale.members.evt_class = mfi_event_class;
1782
1783 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1784 * (MAX_EVENTS - 1);
1785 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1786 if (el == NULL)
1787 return (ENOMEM);
1788
1789 for (seq = start_seq;;) {
1790 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1791 free(el, M_MFIBUF);
1792 return (EBUSY);
1793 }
1794
1795 dcmd = &cm->cm_frame->dcmd;
1796 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1797 dcmd->header.cmd = MFI_CMD_DCMD;
1798 dcmd->header.timeout = 0;
1799 dcmd->header.data_len = size;
1800 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1801 ((uint32_t *)&dcmd->mbox)[0] = seq;
1802 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1803 cm->cm_sg = &dcmd->sgl;
1804 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1805 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1806 cm->cm_data = el;
1807 cm->cm_len = size;
1808
1809 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1810 device_printf(sc->mfi_dev,
1811 "Failed to get controller entries\n");
1812 mfi_release_command(cm);
1813 break;
1814 }
1815
1816 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1817 BUS_DMASYNC_POSTREAD);
1818 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1819
1820 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1821 mfi_release_command(cm);
1822 break;
1823 }
1824 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1825 device_printf(sc->mfi_dev,
1826 "Error %d fetching controller entries\n",
1827 dcmd->header.cmd_status);
1828 mfi_release_command(cm);
1829 error = EIO;
1830 break;
1831 }
1832 mfi_release_command(cm);
1833
1834 for (i = 0; i < el->count; i++) {
1835 /*
1836 * If this event is newer than 'stop_seq' then
1837 * break out of the loop. Note that the log
1838 * is a circular buffer so we have to handle
1839 * the case that our stop point is earlier in
1840 * the buffer than our start point.
1841 */
1842 if (el->event[i].seq >= stop_seq) {
1843 if (start_seq <= stop_seq)
1844 break;
1845 else if (el->event[i].seq < start_seq)
1846 break;
1847 }
1848 mfi_queue_evt(sc, &el->event[i]);
1849 }
1850 seq = el->event[el->count - 1].seq + 1;
1851 }
1852
1853 free(el, M_MFIBUF);
1854 return (error);
1855 }
1856
1857 static int
mfi_add_ld(struct mfi_softc * sc,int id)1858 mfi_add_ld(struct mfi_softc *sc, int id)
1859 {
1860 struct mfi_command *cm;
1861 struct mfi_dcmd_frame *dcmd = NULL;
1862 struct mfi_ld_info *ld_info = NULL;
1863 struct mfi_disk_pending *ld_pend;
1864 int error;
1865
1866 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1867
1868 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1869 if (ld_pend != NULL) {
1870 ld_pend->ld_id = id;
1871 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1872 }
1873
1874 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1875 (void **)&ld_info, sizeof(*ld_info));
1876 if (error) {
1877 device_printf(sc->mfi_dev,
1878 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1879 if (ld_info)
1880 free(ld_info, M_MFIBUF);
1881 return (error);
1882 }
1883 cm->cm_flags = MFI_CMD_DATAIN;
1884 dcmd = &cm->cm_frame->dcmd;
1885 dcmd->mbox[0] = id;
1886 if (mfi_wait_command(sc, cm) != 0) {
1887 device_printf(sc->mfi_dev,
1888 "Failed to get logical drive: %d\n", id);
1889 free(ld_info, M_MFIBUF);
1890 return (0);
1891 }
1892 if (ld_info->ld_config.params.isSSCD != 1)
1893 mfi_add_ld_complete(cm);
1894 else {
1895 mfi_release_command(cm);
1896 if (ld_info) /* SSCD drives ld_info free here */
1897 free(ld_info, M_MFIBUF);
1898 }
1899 return (0);
1900 }
1901
1902 static void
mfi_add_ld_complete(struct mfi_command * cm)1903 mfi_add_ld_complete(struct mfi_command *cm)
1904 {
1905 struct mfi_frame_header *hdr;
1906 struct mfi_ld_info *ld_info;
1907 struct mfi_softc *sc;
1908 device_t child;
1909
1910 sc = cm->cm_sc;
1911 hdr = &cm->cm_frame->header;
1912 ld_info = cm->cm_private;
1913
1914 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1915 free(ld_info, M_MFIBUF);
1916 wakeup(&sc->mfi_map_sync_cm);
1917 mfi_release_command(cm);
1918 return;
1919 }
1920 wakeup(&sc->mfi_map_sync_cm);
1921 mfi_release_command(cm);
1922
1923 mtx_unlock(&sc->mfi_io_lock);
1924 bus_topo_lock();
1925 if ((child = device_add_child(sc->mfi_dev, "mfid",
1926 DEVICE_UNIT_ANY)) == NULL) {
1927 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1928 free(ld_info, M_MFIBUF);
1929 bus_topo_unlock();
1930 mtx_lock(&sc->mfi_io_lock);
1931 return;
1932 }
1933
1934 device_set_ivars(child, ld_info);
1935 device_set_desc(child, "MFI Logical Disk");
1936 bus_attach_children(sc->mfi_dev);
1937 bus_topo_unlock();
1938 mtx_lock(&sc->mfi_io_lock);
1939 }
1940
mfi_add_sys_pd(struct mfi_softc * sc,int id)1941 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1942 {
1943 struct mfi_command *cm;
1944 struct mfi_dcmd_frame *dcmd = NULL;
1945 struct mfi_pd_info *pd_info = NULL;
1946 struct mfi_system_pending *syspd_pend;
1947 int error;
1948
1949 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1950
1951 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1952 if (syspd_pend != NULL) {
1953 syspd_pend->pd_id = id;
1954 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1955 }
1956
1957 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1958 (void **)&pd_info, sizeof(*pd_info));
1959 if (error) {
1960 device_printf(sc->mfi_dev,
1961 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1962 error);
1963 if (pd_info)
1964 free(pd_info, M_MFIBUF);
1965 return (error);
1966 }
1967 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1968 dcmd = &cm->cm_frame->dcmd;
1969 dcmd->mbox[0]=id;
1970 dcmd->header.scsi_status = 0;
1971 dcmd->header.pad0 = 0;
1972 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1973 device_printf(sc->mfi_dev,
1974 "Failed to get physical drive info %d\n", id);
1975 free(pd_info, M_MFIBUF);
1976 mfi_release_command(cm);
1977 return (error);
1978 }
1979 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1980 BUS_DMASYNC_POSTREAD);
1981 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1982 mfi_add_sys_pd_complete(cm);
1983 return (0);
1984 }
1985
1986 static void
mfi_add_sys_pd_complete(struct mfi_command * cm)1987 mfi_add_sys_pd_complete(struct mfi_command *cm)
1988 {
1989 struct mfi_frame_header *hdr;
1990 struct mfi_pd_info *pd_info;
1991 struct mfi_softc *sc;
1992 device_t child;
1993
1994 sc = cm->cm_sc;
1995 hdr = &cm->cm_frame->header;
1996 pd_info = cm->cm_private;
1997
1998 if (hdr->cmd_status != MFI_STAT_OK) {
1999 free(pd_info, M_MFIBUF);
2000 mfi_release_command(cm);
2001 return;
2002 }
2003 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2004 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2005 pd_info->ref.v.device_id);
2006 free(pd_info, M_MFIBUF);
2007 mfi_release_command(cm);
2008 return;
2009 }
2010 mfi_release_command(cm);
2011
2012 mtx_unlock(&sc->mfi_io_lock);
2013 bus_topo_lock();
2014 if ((child = device_add_child(sc->mfi_dev, "mfisyspd",
2015 DEVICE_UNIT_ANY)) == NULL) {
2016 device_printf(sc->mfi_dev, "Failed to add system pd\n");
2017 free(pd_info, M_MFIBUF);
2018 bus_topo_unlock();
2019 mtx_lock(&sc->mfi_io_lock);
2020 return;
2021 }
2022
2023 device_set_ivars(child, pd_info);
2024 device_set_desc(child, "MFI System PD");
2025 bus_attach_children(sc->mfi_dev);
2026 bus_topo_unlock();
2027 mtx_lock(&sc->mfi_io_lock);
2028 }
2029
2030 static struct mfi_command *
mfi_bio_command(struct mfi_softc * sc)2031 mfi_bio_command(struct mfi_softc *sc)
2032 {
2033 struct bio *bio;
2034 struct mfi_command *cm = NULL;
2035
2036 /*reserving two commands to avoid starvation for IOCTL*/
2037 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2038 return (NULL);
2039 }
2040 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2041 return (NULL);
2042 }
2043 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2044 cm = mfi_build_ldio(sc, bio);
2045 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2046 cm = mfi_build_syspdio(sc, bio);
2047 }
2048 if (!cm)
2049 mfi_enqueue_bio(sc, bio);
2050 return cm;
2051 }
2052
2053 /*
2054 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2055 */
2056
2057 int
mfi_build_cdb(int readop,uint8_t byte2,u_int64_t lba,u_int32_t block_count,uint8_t * cdb)2058 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2059 {
2060 int cdb_len;
2061
2062 if (((lba & 0x1fffff) == lba)
2063 && ((block_count & 0xff) == block_count)
2064 && (byte2 == 0)) {
2065 /* We can fit in a 6 byte cdb */
2066 struct scsi_rw_6 *scsi_cmd;
2067
2068 scsi_cmd = (struct scsi_rw_6 *)cdb;
2069 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2070 scsi_ulto3b(lba, scsi_cmd->addr);
2071 scsi_cmd->length = block_count & 0xff;
2072 scsi_cmd->control = 0;
2073 cdb_len = sizeof(*scsi_cmd);
2074 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2075 /* Need a 10 byte CDB */
2076 struct scsi_rw_10 *scsi_cmd;
2077
2078 scsi_cmd = (struct scsi_rw_10 *)cdb;
2079 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2080 scsi_cmd->byte2 = byte2;
2081 scsi_ulto4b(lba, scsi_cmd->addr);
2082 scsi_cmd->reserved = 0;
2083 scsi_ulto2b(block_count, scsi_cmd->length);
2084 scsi_cmd->control = 0;
2085 cdb_len = sizeof(*scsi_cmd);
2086 } else if (((block_count & 0xffffffff) == block_count) &&
2087 ((lba & 0xffffffff) == lba)) {
2088 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
2089 struct scsi_rw_12 *scsi_cmd;
2090
2091 scsi_cmd = (struct scsi_rw_12 *)cdb;
2092 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2093 scsi_cmd->byte2 = byte2;
2094 scsi_ulto4b(lba, scsi_cmd->addr);
2095 scsi_cmd->reserved = 0;
2096 scsi_ulto4b(block_count, scsi_cmd->length);
2097 scsi_cmd->control = 0;
2098 cdb_len = sizeof(*scsi_cmd);
2099 } else {
2100 /*
2101 * 16 byte CDB. We'll only get here if the LBA is larger
2102 * than 2^32
2103 */
2104 struct scsi_rw_16 *scsi_cmd;
2105
2106 scsi_cmd = (struct scsi_rw_16 *)cdb;
2107 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2108 scsi_cmd->byte2 = byte2;
2109 scsi_u64to8b(lba, scsi_cmd->addr);
2110 scsi_cmd->reserved = 0;
2111 scsi_ulto4b(block_count, scsi_cmd->length);
2112 scsi_cmd->control = 0;
2113 cdb_len = sizeof(*scsi_cmd);
2114 }
2115
2116 return cdb_len;
2117 }
2118
2119 extern char *unmapped_buf;
2120
2121 static struct mfi_command *
mfi_build_syspdio(struct mfi_softc * sc,struct bio * bio)2122 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2123 {
2124 struct mfi_command *cm;
2125 struct mfi_pass_frame *pass;
2126 uint32_t context = 0;
2127 int flags = 0, blkcount = 0, readop;
2128 uint8_t cdb_len;
2129
2130 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2131
2132 if ((cm = mfi_dequeue_free(sc)) == NULL)
2133 return (NULL);
2134
2135 /* Zero out the MFI frame */
2136 context = cm->cm_frame->header.context;
2137 bzero(cm->cm_frame, sizeof(union mfi_frame));
2138 cm->cm_frame->header.context = context;
2139 pass = &cm->cm_frame->pass;
2140 bzero(pass->cdb, 16);
2141 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2142 switch (bio->bio_cmd) {
2143 case BIO_READ:
2144 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2145 readop = 1;
2146 break;
2147 case BIO_WRITE:
2148 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2149 readop = 0;
2150 break;
2151 default:
2152 /* TODO: what about BIO_DELETE??? */
2153 biofinish(bio, NULL, EOPNOTSUPP);
2154 mfi_enqueue_free(cm);
2155 return (NULL);
2156 }
2157
2158 /* Cheat with the sector length to avoid a non-constant division */
2159 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2160 /* Fill the LBA and Transfer length in CDB */
2161 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2162 pass->cdb);
2163 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2164 pass->header.lun_id = 0;
2165 pass->header.timeout = 0;
2166 pass->header.flags = 0;
2167 pass->header.scsi_status = 0;
2168 pass->header.sense_len = MFI_SENSE_LEN;
2169 pass->header.data_len = bio->bio_bcount;
2170 pass->header.cdb_len = cdb_len;
2171 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2172 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2173 cm->cm_complete = mfi_bio_complete;
2174 cm->cm_private = bio;
2175 cm->cm_data = unmapped_buf;
2176 cm->cm_len = bio->bio_bcount;
2177 cm->cm_sg = &pass->sgl;
2178 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2179 cm->cm_flags = flags;
2180
2181 return (cm);
2182 }
2183
2184 static struct mfi_command *
mfi_build_ldio(struct mfi_softc * sc,struct bio * bio)2185 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2186 {
2187 struct mfi_io_frame *io;
2188 struct mfi_command *cm;
2189 int flags;
2190 uint32_t blkcount;
2191 uint32_t context = 0;
2192
2193 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2194
2195 if ((cm = mfi_dequeue_free(sc)) == NULL)
2196 return (NULL);
2197
2198 /* Zero out the MFI frame */
2199 context = cm->cm_frame->header.context;
2200 bzero(cm->cm_frame, sizeof(union mfi_frame));
2201 cm->cm_frame->header.context = context;
2202 io = &cm->cm_frame->io;
2203 switch (bio->bio_cmd) {
2204 case BIO_READ:
2205 io->header.cmd = MFI_CMD_LD_READ;
2206 flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2207 break;
2208 case BIO_WRITE:
2209 io->header.cmd = MFI_CMD_LD_WRITE;
2210 flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2211 break;
2212 default:
2213 /* TODO: what about BIO_DELETE??? */
2214 biofinish(bio, NULL, EOPNOTSUPP);
2215 mfi_enqueue_free(cm);
2216 return (NULL);
2217 }
2218
2219 /* Cheat with the sector length to avoid a non-constant division */
2220 blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2221 io->header.target_id = (uintptr_t)bio->bio_driver1;
2222 io->header.timeout = 0;
2223 io->header.flags = 0;
2224 io->header.scsi_status = 0;
2225 io->header.sense_len = MFI_SENSE_LEN;
2226 io->header.data_len = blkcount;
2227 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2228 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2229 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2230 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2231 cm->cm_complete = mfi_bio_complete;
2232 cm->cm_private = bio;
2233 cm->cm_data = unmapped_buf;
2234 cm->cm_len = bio->bio_bcount;
2235 cm->cm_sg = &io->sgl;
2236 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2237 cm->cm_flags = flags;
2238
2239 return (cm);
2240 }
2241
2242 static void
mfi_bio_complete(struct mfi_command * cm)2243 mfi_bio_complete(struct mfi_command *cm)
2244 {
2245 struct bio *bio;
2246 struct mfi_frame_header *hdr;
2247 struct mfi_softc *sc;
2248
2249 bio = cm->cm_private;
2250 hdr = &cm->cm_frame->header;
2251 sc = cm->cm_sc;
2252
2253 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2254 bio->bio_flags |= BIO_ERROR;
2255 bio->bio_error = EIO;
2256 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2257 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2258 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2259 } else if (cm->cm_error != 0) {
2260 bio->bio_flags |= BIO_ERROR;
2261 bio->bio_error = cm->cm_error;
2262 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2263 cm, cm->cm_error);
2264 }
2265
2266 mfi_release_command(cm);
2267 mfi_disk_complete(bio);
2268 }
2269
2270 void
mfi_startio(struct mfi_softc * sc)2271 mfi_startio(struct mfi_softc *sc)
2272 {
2273 struct mfi_command *cm;
2274 struct ccb_hdr *ccbh;
2275
2276 for (;;) {
2277 /* Don't bother if we're short on resources */
2278 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2279 break;
2280
2281 /* Try a command that has already been prepared */
2282 cm = mfi_dequeue_ready(sc);
2283
2284 if (cm == NULL) {
2285 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2286 cm = sc->mfi_cam_start(ccbh);
2287 }
2288
2289 /* Nope, so look for work on the bioq */
2290 if (cm == NULL)
2291 cm = mfi_bio_command(sc);
2292
2293 /* No work available, so exit */
2294 if (cm == NULL)
2295 break;
2296
2297 /* Send the command to the controller */
2298 if (mfi_mapcmd(sc, cm) != 0) {
2299 device_printf(sc->mfi_dev, "Failed to startio\n");
2300 mfi_requeue_ready(cm);
2301 break;
2302 }
2303 }
2304 }
2305
2306 int
mfi_mapcmd(struct mfi_softc * sc,struct mfi_command * cm)2307 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2308 {
2309 int error, polled;
2310
2311 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2312
2313 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2314 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2315 if (cm->cm_flags & MFI_CMD_CCB)
2316 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2317 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2318 polled);
2319 else if (cm->cm_flags & MFI_CMD_BIO)
2320 error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2321 cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2322 polled);
2323 else
2324 error = bus_dmamap_load(sc->mfi_buffer_dmat,
2325 cm->cm_dmamap, cm->cm_data, cm->cm_len,
2326 mfi_data_cb, cm, polled);
2327 if (error == EINPROGRESS) {
2328 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2329 return (0);
2330 }
2331 } else {
2332 error = mfi_send_frame(sc, cm);
2333 }
2334
2335 return (error);
2336 }
2337
2338 static void
mfi_data_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2339 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2340 {
2341 struct mfi_frame_header *hdr;
2342 struct mfi_command *cm;
2343 union mfi_sgl *sgl;
2344 struct mfi_softc *sc;
2345 int i, j, first, dir;
2346 int sge_size, locked;
2347
2348 cm = (struct mfi_command *)arg;
2349 sc = cm->cm_sc;
2350 hdr = &cm->cm_frame->header;
2351 sgl = cm->cm_sg;
2352
2353 /*
2354 * We need to check if we have the lock as this is async
2355 * callback so even though our caller mfi_mapcmd asserts
2356 * it has the lock, there is no guarantee that hasn't been
2357 * dropped if bus_dmamap_load returned prior to our
2358 * completion.
2359 */
2360 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2361 mtx_lock(&sc->mfi_io_lock);
2362
2363 if (error) {
2364 printf("error %d in callback\n", error);
2365 cm->cm_error = error;
2366 mfi_complete(sc, cm);
2367 goto out;
2368 }
2369 /* Use IEEE sgl only for IO's on a SKINNY controller
2370 * For other commands on a SKINNY controller use either
2371 * sg32 or sg64 based on the sizeof(bus_addr_t).
2372 * Also calculate the total frame size based on the type
2373 * of SGL used.
2374 */
2375 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2376 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2377 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2378 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2379 for (i = 0; i < nsegs; i++) {
2380 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2381 sgl->sg_skinny[i].len = segs[i].ds_len;
2382 sgl->sg_skinny[i].flag = 0;
2383 }
2384 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2385 sge_size = sizeof(struct mfi_sg_skinny);
2386 hdr->sg_count = nsegs;
2387 } else {
2388 j = 0;
2389 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2390 first = cm->cm_stp_len;
2391 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2392 sgl->sg32[j].addr = segs[0].ds_addr;
2393 sgl->sg32[j++].len = first;
2394 } else {
2395 sgl->sg64[j].addr = segs[0].ds_addr;
2396 sgl->sg64[j++].len = first;
2397 }
2398 } else
2399 first = 0;
2400 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2401 for (i = 0; i < nsegs; i++) {
2402 sgl->sg32[j].addr = segs[i].ds_addr + first;
2403 sgl->sg32[j++].len = segs[i].ds_len - first;
2404 first = 0;
2405 }
2406 } else {
2407 for (i = 0; i < nsegs; i++) {
2408 sgl->sg64[j].addr = segs[i].ds_addr + first;
2409 sgl->sg64[j++].len = segs[i].ds_len - first;
2410 first = 0;
2411 }
2412 hdr->flags |= MFI_FRAME_SGL64;
2413 }
2414 hdr->sg_count = j;
2415 sge_size = sc->mfi_sge_size;
2416 }
2417
2418 dir = 0;
2419 if (cm->cm_flags & MFI_CMD_DATAIN) {
2420 dir |= BUS_DMASYNC_PREREAD;
2421 hdr->flags |= MFI_FRAME_DIR_READ;
2422 }
2423 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2424 dir |= BUS_DMASYNC_PREWRITE;
2425 hdr->flags |= MFI_FRAME_DIR_WRITE;
2426 }
2427 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2428 cm->cm_flags |= MFI_CMD_MAPPED;
2429
2430 /*
2431 * Instead of calculating the total number of frames in the
2432 * compound frame, it's already assumed that there will be at
2433 * least 1 frame, so don't compensate for the modulo of the
2434 * following division.
2435 */
2436 cm->cm_total_frame_size += (sge_size * nsegs);
2437 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2438
2439 if ((error = mfi_send_frame(sc, cm)) != 0) {
2440 printf("error %d in callback from mfi_send_frame\n", error);
2441 cm->cm_error = error;
2442 mfi_complete(sc, cm);
2443 goto out;
2444 }
2445
2446 out:
2447 /* leave the lock in the state we found it */
2448 if (locked == 0)
2449 mtx_unlock(&sc->mfi_io_lock);
2450
2451 return;
2452 }
2453
2454 static int
mfi_send_frame(struct mfi_softc * sc,struct mfi_command * cm)2455 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2456 {
2457 int error;
2458
2459 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2460
2461 if (sc->MFA_enabled)
2462 error = mfi_tbolt_send_frame(sc, cm);
2463 else
2464 error = mfi_std_send_frame(sc, cm);
2465
2466 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2467 mfi_remove_busy(cm);
2468
2469 return (error);
2470 }
2471
2472 static int
mfi_std_send_frame(struct mfi_softc * sc,struct mfi_command * cm)2473 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2474 {
2475 struct mfi_frame_header *hdr;
2476 int tm = mfi_polled_cmd_timeout * 1000;
2477
2478 hdr = &cm->cm_frame->header;
2479
2480 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2481 cm->cm_timestamp = time_uptime;
2482 mfi_enqueue_busy(cm);
2483 } else {
2484 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2485 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2486 }
2487
2488 /*
2489 * The bus address of the command is aligned on a 64 byte boundary,
2490 * leaving the least 6 bits as zero. For whatever reason, the
2491 * hardware wants the address shifted right by three, leaving just
2492 * 3 zero bits. These three bits are then used as a prefetching
2493 * hint for the hardware to predict how many frames need to be
2494 * fetched across the bus. If a command has more than 8 frames
2495 * then the 3 bits are set to 0x7 and the firmware uses other
2496 * information in the command to determine the total amount to fetch.
2497 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2498 * is enough for both 32bit and 64bit systems.
2499 */
2500 if (cm->cm_extra_frames > 7)
2501 cm->cm_extra_frames = 7;
2502
2503 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2504
2505 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2506 return (0);
2507
2508 /* This is a polled command, so busy-wait for it to complete. */
2509 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2510 DELAY(1000);
2511 tm -= 1;
2512 if (tm <= 0)
2513 break;
2514 }
2515
2516 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2517 device_printf(sc->mfi_dev, "Frame %p timed out "
2518 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2519 return (ETIMEDOUT);
2520 }
2521
2522 return (0);
2523 }
2524
2525 void
mfi_complete(struct mfi_softc * sc,struct mfi_command * cm)2526 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2527 {
2528 int dir;
2529 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2530
2531 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2532 dir = 0;
2533 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2534 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2535 dir |= BUS_DMASYNC_POSTREAD;
2536 if (cm->cm_flags & MFI_CMD_DATAOUT)
2537 dir |= BUS_DMASYNC_POSTWRITE;
2538
2539 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2540 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2541 cm->cm_flags &= ~MFI_CMD_MAPPED;
2542 }
2543
2544 cm->cm_flags |= MFI_CMD_COMPLETED;
2545
2546 if (cm->cm_complete != NULL)
2547 cm->cm_complete(cm);
2548 else
2549 wakeup(cm);
2550 }
2551
2552 static int
mfi_abort(struct mfi_softc * sc,struct mfi_command ** cm_abort)2553 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2554 {
2555 struct mfi_command *cm;
2556 struct mfi_abort_frame *abort;
2557 int i = 0, error;
2558 uint32_t context = 0;
2559
2560 mtx_lock(&sc->mfi_io_lock);
2561 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2562 mtx_unlock(&sc->mfi_io_lock);
2563 return (EBUSY);
2564 }
2565
2566 /* Zero out the MFI frame */
2567 context = cm->cm_frame->header.context;
2568 bzero(cm->cm_frame, sizeof(union mfi_frame));
2569 cm->cm_frame->header.context = context;
2570
2571 abort = &cm->cm_frame->abort;
2572 abort->header.cmd = MFI_CMD_ABORT;
2573 abort->header.flags = 0;
2574 abort->header.scsi_status = 0;
2575 abort->abort_context = (*cm_abort)->cm_frame->header.context;
2576 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2577 abort->abort_mfi_addr_hi =
2578 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2579 cm->cm_data = NULL;
2580 cm->cm_flags = MFI_CMD_POLLED;
2581
2582 if ((error = mfi_mapcmd(sc, cm)) != 0)
2583 device_printf(sc->mfi_dev, "failed to abort command\n");
2584 mfi_release_command(cm);
2585
2586 mtx_unlock(&sc->mfi_io_lock);
2587 while (i < 5 && *cm_abort != NULL) {
2588 tsleep(cm_abort, 0, "mfiabort",
2589 5 * hz);
2590 i++;
2591 }
2592 if (*cm_abort != NULL) {
2593 /* Force a complete if command didn't abort */
2594 mtx_lock(&sc->mfi_io_lock);
2595 (*cm_abort)->cm_complete(*cm_abort);
2596 mtx_unlock(&sc->mfi_io_lock);
2597 }
2598
2599 return (error);
2600 }
2601
2602 int
mfi_dump_blocks(struct mfi_softc * sc,int id,uint64_t lba,void * virt,int len)2603 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2604 int len)
2605 {
2606 struct mfi_command *cm;
2607 struct mfi_io_frame *io;
2608 int error;
2609 uint32_t context = 0;
2610
2611 if ((cm = mfi_dequeue_free(sc)) == NULL)
2612 return (EBUSY);
2613
2614 /* Zero out the MFI frame */
2615 context = cm->cm_frame->header.context;
2616 bzero(cm->cm_frame, sizeof(union mfi_frame));
2617 cm->cm_frame->header.context = context;
2618
2619 io = &cm->cm_frame->io;
2620 io->header.cmd = MFI_CMD_LD_WRITE;
2621 io->header.target_id = id;
2622 io->header.timeout = 0;
2623 io->header.flags = 0;
2624 io->header.scsi_status = 0;
2625 io->header.sense_len = MFI_SENSE_LEN;
2626 io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2627 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2628 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2629 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2630 io->lba_lo = lba & 0xffffffff;
2631 cm->cm_data = virt;
2632 cm->cm_len = len;
2633 cm->cm_sg = &io->sgl;
2634 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2635 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2636
2637 if ((error = mfi_mapcmd(sc, cm)) != 0)
2638 device_printf(sc->mfi_dev, "failed dump blocks\n");
2639 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2640 BUS_DMASYNC_POSTWRITE);
2641 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2642 mfi_release_command(cm);
2643
2644 return (error);
2645 }
2646
2647 int
mfi_dump_syspd_blocks(struct mfi_softc * sc,int id,uint64_t lba,void * virt,int len)2648 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2649 int len)
2650 {
2651 struct mfi_command *cm;
2652 struct mfi_pass_frame *pass;
2653 int error, readop, cdb_len;
2654 uint32_t blkcount;
2655
2656 if ((cm = mfi_dequeue_free(sc)) == NULL)
2657 return (EBUSY);
2658
2659 pass = &cm->cm_frame->pass;
2660 bzero(pass->cdb, 16);
2661 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2662
2663 readop = 0;
2664 blkcount = howmany(len, MFI_SECTOR_LEN);
2665 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2666 pass->header.target_id = id;
2667 pass->header.timeout = 0;
2668 pass->header.flags = 0;
2669 pass->header.scsi_status = 0;
2670 pass->header.sense_len = MFI_SENSE_LEN;
2671 pass->header.data_len = len;
2672 pass->header.cdb_len = cdb_len;
2673 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2674 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2675 cm->cm_data = virt;
2676 cm->cm_len = len;
2677 cm->cm_sg = &pass->sgl;
2678 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2679 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2680
2681 if ((error = mfi_mapcmd(sc, cm)) != 0)
2682 device_printf(sc->mfi_dev, "failed dump blocks\n");
2683 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2684 BUS_DMASYNC_POSTWRITE);
2685 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2686 mfi_release_command(cm);
2687
2688 return (error);
2689 }
2690
2691 static int
mfi_open(struct cdev * dev,int flags,int fmt,struct thread * td)2692 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2693 {
2694 struct mfi_softc *sc;
2695 int error;
2696
2697 sc = dev->si_drv1;
2698
2699 mtx_lock(&sc->mfi_io_lock);
2700 if (sc->mfi_detaching)
2701 error = ENXIO;
2702 else {
2703 sc->mfi_flags |= MFI_FLAGS_OPEN;
2704 error = 0;
2705 }
2706 mtx_unlock(&sc->mfi_io_lock);
2707
2708 return (error);
2709 }
2710
2711 static int
mfi_close(struct cdev * dev,int flags,int fmt,struct thread * td)2712 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2713 {
2714 struct mfi_softc *sc;
2715 struct mfi_aen *mfi_aen_entry, *tmp;
2716
2717 sc = dev->si_drv1;
2718
2719 mtx_lock(&sc->mfi_io_lock);
2720 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2721
2722 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2723 if (mfi_aen_entry->p == curproc) {
2724 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2725 aen_link);
2726 free(mfi_aen_entry, M_MFIBUF);
2727 }
2728 }
2729 mtx_unlock(&sc->mfi_io_lock);
2730 return (0);
2731 }
2732
2733 static int
mfi_config_lock(struct mfi_softc * sc,uint32_t opcode)2734 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2735 {
2736
2737 switch (opcode) {
2738 case MFI_DCMD_LD_DELETE:
2739 case MFI_DCMD_CFG_ADD:
2740 case MFI_DCMD_CFG_CLEAR:
2741 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2742 sx_xlock(&sc->mfi_config_lock);
2743 return (1);
2744 default:
2745 return (0);
2746 }
2747 }
2748
2749 static void
mfi_config_unlock(struct mfi_softc * sc,int locked)2750 mfi_config_unlock(struct mfi_softc *sc, int locked)
2751 {
2752
2753 if (locked)
2754 sx_xunlock(&sc->mfi_config_lock);
2755 }
2756
2757 /*
2758 * Perform pre-issue checks on commands from userland and possibly veto
2759 * them.
2760 */
2761 static int
mfi_check_command_pre(struct mfi_softc * sc,struct mfi_command * cm)2762 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2763 {
2764 struct mfi_disk *ld, *ld2;
2765 int error;
2766 struct mfi_system_pd *syspd = NULL;
2767 uint16_t syspd_id;
2768 uint16_t *mbox;
2769
2770 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2771 error = 0;
2772 switch (cm->cm_frame->dcmd.opcode) {
2773 case MFI_DCMD_LD_DELETE:
2774 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2775 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2776 break;
2777 }
2778 if (ld == NULL)
2779 error = ENOENT;
2780 else
2781 error = mfi_disk_disable(ld);
2782 break;
2783 case MFI_DCMD_CFG_CLEAR:
2784 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2785 error = mfi_disk_disable(ld);
2786 if (error)
2787 break;
2788 }
2789 if (error) {
2790 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2791 if (ld2 == ld)
2792 break;
2793 mfi_disk_enable(ld2);
2794 }
2795 }
2796 break;
2797 case MFI_DCMD_PD_STATE_SET:
2798 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2799 syspd_id = mbox[0];
2800 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2801 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2802 if (syspd->pd_id == syspd_id)
2803 break;
2804 }
2805 }
2806 else
2807 break;
2808 if (syspd)
2809 error = mfi_syspd_disable(syspd);
2810 break;
2811 default:
2812 break;
2813 }
2814 return (error);
2815 }
2816
2817 /* Perform post-issue checks on commands from userland. */
2818 static void
mfi_check_command_post(struct mfi_softc * sc,struct mfi_command * cm)2819 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2820 {
2821 struct mfi_disk *ld, *ldn;
2822 struct mfi_system_pd *syspd = NULL;
2823 uint16_t syspd_id;
2824 uint16_t *mbox;
2825
2826 switch (cm->cm_frame->dcmd.opcode) {
2827 case MFI_DCMD_LD_DELETE:
2828 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2829 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2830 break;
2831 }
2832 KASSERT(ld != NULL, ("volume dissappeared"));
2833 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2834 mtx_unlock(&sc->mfi_io_lock);
2835 bus_topo_lock();
2836 device_delete_child(sc->mfi_dev, ld->ld_dev);
2837 bus_topo_unlock();
2838 mtx_lock(&sc->mfi_io_lock);
2839 } else
2840 mfi_disk_enable(ld);
2841 break;
2842 case MFI_DCMD_CFG_CLEAR:
2843 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2844 mtx_unlock(&sc->mfi_io_lock);
2845 bus_topo_lock();
2846 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2847 device_delete_child(sc->mfi_dev, ld->ld_dev);
2848 }
2849 bus_topo_unlock();
2850 mtx_lock(&sc->mfi_io_lock);
2851 } else {
2852 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2853 mfi_disk_enable(ld);
2854 }
2855 break;
2856 case MFI_DCMD_CFG_ADD:
2857 mfi_ldprobe(sc);
2858 break;
2859 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2860 mfi_ldprobe(sc);
2861 break;
2862 case MFI_DCMD_PD_STATE_SET:
2863 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2864 syspd_id = mbox[0];
2865 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2866 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2867 if (syspd->pd_id == syspd_id)
2868 break;
2869 }
2870 }
2871 else
2872 break;
2873 /* If the transition fails then enable the syspd again */
2874 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2875 mfi_syspd_enable(syspd);
2876 break;
2877 }
2878 }
2879
2880 static int
mfi_check_for_sscd(struct mfi_softc * sc,struct mfi_command * cm)2881 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2882 {
2883 struct mfi_config_data *conf_data;
2884 struct mfi_command *ld_cm = NULL;
2885 struct mfi_ld_info *ld_info = NULL;
2886 struct mfi_ld_config *ld;
2887 char *p;
2888 int error = 0;
2889
2890 conf_data = (struct mfi_config_data *)cm->cm_data;
2891
2892 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2893 p = (char *)conf_data->array;
2894 p += conf_data->array_size * conf_data->array_count;
2895 ld = (struct mfi_ld_config *)p;
2896 if (ld->params.isSSCD == 1)
2897 error = 1;
2898 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2899 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2900 (void **)&ld_info, sizeof(*ld_info));
2901 if (error) {
2902 device_printf(sc->mfi_dev, "Failed to allocate"
2903 "MFI_DCMD_LD_GET_INFO %d", error);
2904 if (ld_info)
2905 free(ld_info, M_MFIBUF);
2906 return 0;
2907 }
2908 ld_cm->cm_flags = MFI_CMD_DATAIN;
2909 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2910 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2911 if (mfi_wait_command(sc, ld_cm) != 0) {
2912 device_printf(sc->mfi_dev, "failed to get log drv\n");
2913 mfi_release_command(ld_cm);
2914 free(ld_info, M_MFIBUF);
2915 return 0;
2916 }
2917
2918 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2919 free(ld_info, M_MFIBUF);
2920 mfi_release_command(ld_cm);
2921 return 0;
2922 }
2923 else
2924 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2925
2926 if (ld_info->ld_config.params.isSSCD == 1)
2927 error = 1;
2928
2929 mfi_release_command(ld_cm);
2930 free(ld_info, M_MFIBUF);
2931 }
2932 return error;
2933 }
2934
2935 static int
mfi_stp_cmd(struct mfi_softc * sc,struct mfi_command * cm,caddr_t arg)2936 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2937 {
2938 uint8_t i;
2939 struct mfi_ioc_packet *ioc;
2940 ioc = (struct mfi_ioc_packet *)arg;
2941 int sge_size, error;
2942 struct megasas_sge *kern_sge;
2943
2944 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2945 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2946 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2947
2948 if (sizeof(bus_addr_t) == 8) {
2949 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2950 cm->cm_extra_frames = 2;
2951 sge_size = sizeof(struct mfi_sg64);
2952 } else {
2953 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2954 sge_size = sizeof(struct mfi_sg32);
2955 }
2956
2957 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2958 for (i = 0; i < ioc->mfi_sge_count; i++) {
2959 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2960 1, 0, /* algnmnt, boundary */
2961 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2962 BUS_SPACE_MAXADDR, /* highaddr */
2963 NULL, NULL, /* filter, filterarg */
2964 ioc->mfi_sgl[i].iov_len,/* maxsize */
2965 2, /* nsegments */
2966 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2967 BUS_DMA_ALLOCNOW, /* flags */
2968 NULL, NULL, /* lockfunc, lockarg */
2969 &sc->mfi_kbuff_arr_dmat[i])) {
2970 device_printf(sc->mfi_dev,
2971 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2972 return (ENOMEM);
2973 }
2974
2975 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2976 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2977 &sc->mfi_kbuff_arr_dmamap[i])) {
2978 device_printf(sc->mfi_dev,
2979 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2980 return (ENOMEM);
2981 }
2982
2983 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2984 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2985 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2986 &sc->mfi_kbuff_arr_busaddr[i], 0);
2987
2988 if (!sc->kbuff_arr[i]) {
2989 device_printf(sc->mfi_dev,
2990 "Could not allocate memory for kbuff_arr info\n");
2991 return -1;
2992 }
2993 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2994 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2995
2996 if (sizeof(bus_addr_t) == 8) {
2997 cm->cm_frame->stp.sgl.sg64[i].addr =
2998 kern_sge[i].phys_addr;
2999 cm->cm_frame->stp.sgl.sg64[i].len =
3000 ioc->mfi_sgl[i].iov_len;
3001 } else {
3002 cm->cm_frame->stp.sgl.sg32[i].addr =
3003 kern_sge[i].phys_addr;
3004 cm->cm_frame->stp.sgl.sg32[i].len =
3005 ioc->mfi_sgl[i].iov_len;
3006 }
3007
3008 error = copyin(ioc->mfi_sgl[i].iov_base,
3009 sc->kbuff_arr[i],
3010 ioc->mfi_sgl[i].iov_len);
3011 if (error != 0) {
3012 device_printf(sc->mfi_dev, "Copy in failed\n");
3013 return error;
3014 }
3015 }
3016
3017 cm->cm_flags |=MFI_CMD_MAPPED;
3018 return 0;
3019 }
3020
3021 static int
mfi_user_command(struct mfi_softc * sc,struct mfi_ioc_passthru * ioc)3022 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3023 {
3024 struct mfi_command *cm;
3025 struct mfi_dcmd_frame *dcmd;
3026 void *ioc_buf = NULL;
3027 uint32_t context;
3028 int error = 0, locked;
3029
3030 if (ioc->buf_size > 0) {
3031 if (ioc->buf_size > 1024 * 1024)
3032 return (ENOMEM);
3033 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3034 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3035 if (error) {
3036 device_printf(sc->mfi_dev, "failed to copyin\n");
3037 free(ioc_buf, M_MFIBUF);
3038 return (error);
3039 }
3040 }
3041
3042 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3043
3044 mtx_lock(&sc->mfi_io_lock);
3045 while ((cm = mfi_dequeue_free(sc)) == NULL)
3046 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3047
3048 /* Save context for later */
3049 context = cm->cm_frame->header.context;
3050
3051 dcmd = &cm->cm_frame->dcmd;
3052 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3053
3054 cm->cm_sg = &dcmd->sgl;
3055 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3056 cm->cm_data = ioc_buf;
3057 cm->cm_len = ioc->buf_size;
3058
3059 /* restore context */
3060 cm->cm_frame->header.context = context;
3061
3062 /* Cheat since we don't know if we're writing or reading */
3063 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3064
3065 error = mfi_check_command_pre(sc, cm);
3066 if (error)
3067 goto out;
3068
3069 error = mfi_wait_command(sc, cm);
3070 if (error) {
3071 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3072 goto out;
3073 }
3074 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3075 mfi_check_command_post(sc, cm);
3076 out:
3077 mfi_release_command(cm);
3078 mtx_unlock(&sc->mfi_io_lock);
3079 mfi_config_unlock(sc, locked);
3080 if (ioc->buf_size > 0)
3081 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3082 if (ioc_buf)
3083 free(ioc_buf, M_MFIBUF);
3084 return (error);
3085 }
3086
3087 #define PTRIN(p) ((void *)(uintptr_t)(p))
3088
3089 static int
mfi_ioctl(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)3090 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3091 {
3092 struct mfi_softc *sc;
3093 union mfi_statrequest *ms;
3094 struct mfi_ioc_packet *ioc;
3095 #ifdef COMPAT_FREEBSD32
3096 struct mfi_ioc_packet32 *ioc32;
3097 #endif
3098 struct mfi_ioc_aen *aen;
3099 struct mfi_command *cm = NULL;
3100 uint32_t context = 0;
3101 union mfi_sense_ptr sense_ptr;
3102 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3103 size_t len;
3104 int i, res;
3105 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3106 #ifdef COMPAT_FREEBSD32
3107 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3108 struct mfi_ioc_passthru iop_swab;
3109 #endif
3110 int error, locked;
3111 sc = dev->si_drv1;
3112 error = 0;
3113
3114 if (sc->adpreset)
3115 return EBUSY;
3116
3117 if (sc->hw_crit_error)
3118 return EBUSY;
3119
3120 if (sc->issuepend_done == 0)
3121 return EBUSY;
3122
3123 switch (cmd) {
3124 case MFIIO_STATS:
3125 ms = (union mfi_statrequest *)arg;
3126 switch (ms->ms_item) {
3127 case MFIQ_FREE:
3128 case MFIQ_BIO:
3129 case MFIQ_READY:
3130 case MFIQ_BUSY:
3131 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3132 sizeof(struct mfi_qstat));
3133 break;
3134 default:
3135 error = ENOIOCTL;
3136 break;
3137 }
3138 break;
3139 case MFIIO_QUERY_DISK:
3140 {
3141 struct mfi_query_disk *qd;
3142 struct mfi_disk *ld;
3143
3144 qd = (struct mfi_query_disk *)arg;
3145 mtx_lock(&sc->mfi_io_lock);
3146 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3147 if (ld->ld_id == qd->array_id)
3148 break;
3149 }
3150 if (ld == NULL) {
3151 qd->present = 0;
3152 mtx_unlock(&sc->mfi_io_lock);
3153 return (0);
3154 }
3155 qd->present = 1;
3156 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3157 qd->open = 1;
3158 bzero(qd->devname, SPECNAMELEN + 1);
3159 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3160 mtx_unlock(&sc->mfi_io_lock);
3161 break;
3162 }
3163 case MFI_CMD:
3164 #ifdef COMPAT_FREEBSD32
3165 case MFI_CMD32:
3166 #endif
3167 {
3168 devclass_t devclass;
3169 ioc = (struct mfi_ioc_packet *)arg;
3170 int adapter;
3171
3172 adapter = ioc->mfi_adapter_no;
3173 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3174 devclass = devclass_find("mfi");
3175 sc = devclass_get_softc(devclass, adapter);
3176 }
3177 mtx_lock(&sc->mfi_io_lock);
3178 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3179 mtx_unlock(&sc->mfi_io_lock);
3180 return (EBUSY);
3181 }
3182 mtx_unlock(&sc->mfi_io_lock);
3183 locked = 0;
3184
3185 /*
3186 * save off original context since copying from user
3187 * will clobber some data
3188 */
3189 context = cm->cm_frame->header.context;
3190 cm->cm_frame->header.context = cm->cm_index;
3191
3192 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3193 2 * MEGAMFI_FRAME_SIZE);
3194 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3195 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3196 cm->cm_frame->header.scsi_status = 0;
3197 cm->cm_frame->header.pad0 = 0;
3198 if (ioc->mfi_sge_count) {
3199 cm->cm_sg =
3200 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3201 }
3202 cm->cm_flags = 0;
3203 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3204 cm->cm_flags |= MFI_CMD_DATAIN;
3205 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3206 cm->cm_flags |= MFI_CMD_DATAOUT;
3207 /* Legacy app shim */
3208 if (cm->cm_flags == 0)
3209 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3210 cm->cm_len = cm->cm_frame->header.data_len;
3211 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3212 #ifdef COMPAT_FREEBSD32
3213 if (cmd == MFI_CMD) {
3214 #endif
3215 /* Native */
3216 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3217 #ifdef COMPAT_FREEBSD32
3218 } else {
3219 /* 32bit on 64bit */
3220 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3221 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3222 }
3223 #endif
3224 cm->cm_len += cm->cm_stp_len;
3225 }
3226 if (cm->cm_len &&
3227 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3228 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3229 M_WAITOK | M_ZERO);
3230 } else {
3231 cm->cm_data = 0;
3232 }
3233
3234 /* restore header context */
3235 cm->cm_frame->header.context = context;
3236
3237 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3238 res = mfi_stp_cmd(sc, cm, arg);
3239 if (res != 0)
3240 goto out;
3241 } else {
3242 temp = data;
3243 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3244 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3245 for (i = 0; i < ioc->mfi_sge_count; i++) {
3246 #ifdef COMPAT_FREEBSD32
3247 if (cmd == MFI_CMD) {
3248 #endif
3249 /* Native */
3250 addr = ioc->mfi_sgl[i].iov_base;
3251 len = ioc->mfi_sgl[i].iov_len;
3252 #ifdef COMPAT_FREEBSD32
3253 } else {
3254 /* 32bit on 64bit */
3255 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3256 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3257 len = ioc32->mfi_sgl[i].iov_len;
3258 }
3259 #endif
3260 error = copyin(addr, temp, len);
3261 if (error != 0) {
3262 device_printf(sc->mfi_dev,
3263 "Copy in failed\n");
3264 goto out;
3265 }
3266 temp = &temp[len];
3267 }
3268 }
3269 }
3270
3271 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3272 locked = mfi_config_lock(sc,
3273 cm->cm_frame->dcmd.opcode);
3274
3275 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3276 cm->cm_frame->pass.sense_addr_lo =
3277 (uint32_t)cm->cm_sense_busaddr;
3278 cm->cm_frame->pass.sense_addr_hi =
3279 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3280 }
3281 mtx_lock(&sc->mfi_io_lock);
3282 skip_pre_post = mfi_check_for_sscd (sc, cm);
3283 if (!skip_pre_post) {
3284 error = mfi_check_command_pre(sc, cm);
3285 if (error) {
3286 mtx_unlock(&sc->mfi_io_lock);
3287 goto out;
3288 }
3289 }
3290 if ((error = mfi_wait_command(sc, cm)) != 0) {
3291 device_printf(sc->mfi_dev,
3292 "Controller polled failed\n");
3293 mtx_unlock(&sc->mfi_io_lock);
3294 goto out;
3295 }
3296 if (!skip_pre_post) {
3297 mfi_check_command_post(sc, cm);
3298 }
3299 mtx_unlock(&sc->mfi_io_lock);
3300
3301 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3302 temp = data;
3303 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3304 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3305 for (i = 0; i < ioc->mfi_sge_count; i++) {
3306 #ifdef COMPAT_FREEBSD32
3307 if (cmd == MFI_CMD) {
3308 #endif
3309 /* Native */
3310 addr = ioc->mfi_sgl[i].iov_base;
3311 len = ioc->mfi_sgl[i].iov_len;
3312 #ifdef COMPAT_FREEBSD32
3313 } else {
3314 /* 32bit on 64bit */
3315 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3316 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3317 len = ioc32->mfi_sgl[i].iov_len;
3318 }
3319 #endif
3320 error = copyout(temp, addr, len);
3321 if (error != 0) {
3322 device_printf(sc->mfi_dev,
3323 "Copy out failed\n");
3324 goto out;
3325 }
3326 temp = &temp[len];
3327 }
3328 }
3329 }
3330
3331 if (ioc->mfi_sense_len) {
3332 /* get user-space sense ptr then copy out sense */
3333 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3334 &sense_ptr.sense_ptr_data[0],
3335 sizeof(sense_ptr.sense_ptr_data));
3336 #ifdef COMPAT_FREEBSD32
3337 if (cmd != MFI_CMD) {
3338 /*
3339 * not 64bit native so zero out any address
3340 * over 32bit */
3341 sense_ptr.addr.high = 0;
3342 }
3343 #endif
3344 error = copyout(cm->cm_sense, sense_ptr.user_space,
3345 ioc->mfi_sense_len);
3346 if (error != 0) {
3347 device_printf(sc->mfi_dev,
3348 "Copy out failed\n");
3349 goto out;
3350 }
3351 }
3352
3353 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3354 out:
3355 mfi_config_unlock(sc, locked);
3356 if (data)
3357 free(data, M_MFIBUF);
3358 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3359 for (i = 0; i < 2; i++) {
3360 if (sc->kbuff_arr[i]) {
3361 if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3362 bus_dmamap_unload(
3363 sc->mfi_kbuff_arr_dmat[i],
3364 sc->mfi_kbuff_arr_dmamap[i]
3365 );
3366 if (sc->kbuff_arr[i] != NULL)
3367 bus_dmamem_free(
3368 sc->mfi_kbuff_arr_dmat[i],
3369 sc->kbuff_arr[i],
3370 sc->mfi_kbuff_arr_dmamap[i]
3371 );
3372 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3373 bus_dma_tag_destroy(
3374 sc->mfi_kbuff_arr_dmat[i]);
3375 }
3376 }
3377 }
3378 if (cm) {
3379 mtx_lock(&sc->mfi_io_lock);
3380 mfi_release_command(cm);
3381 mtx_unlock(&sc->mfi_io_lock);
3382 }
3383
3384 break;
3385 }
3386 case MFI_SET_AEN:
3387 aen = (struct mfi_ioc_aen *)arg;
3388 mtx_lock(&sc->mfi_io_lock);
3389 error = mfi_aen_register(sc, aen->aen_seq_num,
3390 aen->aen_class_locale);
3391 mtx_unlock(&sc->mfi_io_lock);
3392
3393 break;
3394 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3395 {
3396 devclass_t devclass;
3397 struct mfi_linux_ioc_packet l_ioc;
3398 int adapter;
3399
3400 devclass = devclass_find("mfi");
3401 if (devclass == NULL)
3402 return (ENOENT);
3403
3404 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3405 if (error)
3406 return (error);
3407 adapter = l_ioc.lioc_adapter_no;
3408 sc = devclass_get_softc(devclass, adapter);
3409 if (sc == NULL)
3410 return (ENOENT);
3411 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3412 cmd, arg, flag, td));
3413 break;
3414 }
3415 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3416 {
3417 devclass_t devclass;
3418 struct mfi_linux_ioc_aen l_aen;
3419 int adapter;
3420
3421 devclass = devclass_find("mfi");
3422 if (devclass == NULL)
3423 return (ENOENT);
3424
3425 error = copyin(arg, &l_aen, sizeof(l_aen));
3426 if (error)
3427 return (error);
3428 adapter = l_aen.laen_adapter_no;
3429 sc = devclass_get_softc(devclass, adapter);
3430 if (sc == NULL)
3431 return (ENOENT);
3432 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3433 cmd, arg, flag, td));
3434 break;
3435 }
3436 #ifdef COMPAT_FREEBSD32
3437 case MFIIO_PASSTHRU32:
3438 if (!SV_CURPROC_FLAG(SV_ILP32)) {
3439 error = ENOTTY;
3440 break;
3441 }
3442 iop_swab.ioc_frame = iop32->ioc_frame;
3443 iop_swab.buf_size = iop32->buf_size;
3444 iop_swab.buf = PTRIN(iop32->buf);
3445 iop = &iop_swab;
3446 /* FALLTHROUGH */
3447 #endif
3448 case MFIIO_PASSTHRU:
3449 error = mfi_user_command(sc, iop);
3450 #ifdef COMPAT_FREEBSD32
3451 if (cmd == MFIIO_PASSTHRU32)
3452 iop32->ioc_frame = iop_swab.ioc_frame;
3453 #endif
3454 break;
3455 default:
3456 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3457 error = ENOTTY;
3458 break;
3459 }
3460
3461 return (error);
3462 }
3463
3464 static int
mfi_linux_ioctl_int(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)3465 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3466 {
3467 struct mfi_softc *sc;
3468 struct mfi_linux_ioc_packet l_ioc;
3469 struct mfi_linux_ioc_aen l_aen;
3470 struct mfi_command *cm = NULL;
3471 struct mfi_aen *mfi_aen_entry;
3472 union mfi_sense_ptr sense_ptr;
3473 uint32_t context = 0;
3474 uint8_t *data = NULL, *temp;
3475 int i;
3476 int error, locked;
3477
3478 sc = dev->si_drv1;
3479 error = 0;
3480 switch (cmd) {
3481 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3482 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3483 if (error != 0)
3484 return (error);
3485
3486 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3487 return (EINVAL);
3488 }
3489
3490 mtx_lock(&sc->mfi_io_lock);
3491 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3492 mtx_unlock(&sc->mfi_io_lock);
3493 return (EBUSY);
3494 }
3495 mtx_unlock(&sc->mfi_io_lock);
3496 locked = 0;
3497
3498 /*
3499 * save off original context since copying from user
3500 * will clobber some data
3501 */
3502 context = cm->cm_frame->header.context;
3503
3504 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3505 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3506 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3507 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3508 cm->cm_frame->header.scsi_status = 0;
3509 cm->cm_frame->header.pad0 = 0;
3510 if (l_ioc.lioc_sge_count)
3511 cm->cm_sg =
3512 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3513 cm->cm_flags = 0;
3514 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3515 cm->cm_flags |= MFI_CMD_DATAIN;
3516 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3517 cm->cm_flags |= MFI_CMD_DATAOUT;
3518 cm->cm_len = cm->cm_frame->header.data_len;
3519 if (cm->cm_len &&
3520 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3521 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3522 M_WAITOK | M_ZERO);
3523 } else {
3524 cm->cm_data = 0;
3525 }
3526
3527 /* restore header context */
3528 cm->cm_frame->header.context = context;
3529
3530 temp = data;
3531 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3532 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3533 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3534 temp,
3535 l_ioc.lioc_sgl[i].iov_len);
3536 if (error != 0) {
3537 device_printf(sc->mfi_dev,
3538 "Copy in failed\n");
3539 goto out;
3540 }
3541 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3542 }
3543 }
3544
3545 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3546 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3547
3548 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3549 cm->cm_frame->pass.sense_addr_lo =
3550 (uint32_t)cm->cm_sense_busaddr;
3551 cm->cm_frame->pass.sense_addr_hi =
3552 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3553 }
3554
3555 mtx_lock(&sc->mfi_io_lock);
3556 error = mfi_check_command_pre(sc, cm);
3557 if (error) {
3558 mtx_unlock(&sc->mfi_io_lock);
3559 goto out;
3560 }
3561
3562 if ((error = mfi_wait_command(sc, cm)) != 0) {
3563 device_printf(sc->mfi_dev,
3564 "Controller polled failed\n");
3565 mtx_unlock(&sc->mfi_io_lock);
3566 goto out;
3567 }
3568
3569 mfi_check_command_post(sc, cm);
3570 mtx_unlock(&sc->mfi_io_lock);
3571
3572 temp = data;
3573 if (cm->cm_flags & MFI_CMD_DATAIN) {
3574 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3575 error = copyout(temp,
3576 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3577 l_ioc.lioc_sgl[i].iov_len);
3578 if (error != 0) {
3579 device_printf(sc->mfi_dev,
3580 "Copy out failed\n");
3581 goto out;
3582 }
3583 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3584 }
3585 }
3586
3587 if (l_ioc.lioc_sense_len) {
3588 /* get user-space sense ptr then copy out sense */
3589 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3590 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3591 &sense_ptr.sense_ptr_data[0],
3592 sizeof(sense_ptr.sense_ptr_data));
3593 #ifdef __amd64__
3594 /*
3595 * only 32bit Linux support so zero out any
3596 * address over 32bit
3597 */
3598 sense_ptr.addr.high = 0;
3599 #endif
3600 error = copyout(cm->cm_sense, sense_ptr.user_space,
3601 l_ioc.lioc_sense_len);
3602 if (error != 0) {
3603 device_printf(sc->mfi_dev,
3604 "Copy out failed\n");
3605 goto out;
3606 }
3607 }
3608
3609 error = copyout(&cm->cm_frame->header.cmd_status,
3610 &((struct mfi_linux_ioc_packet*)arg)
3611 ->lioc_frame.hdr.cmd_status,
3612 1);
3613 if (error != 0) {
3614 device_printf(sc->mfi_dev,
3615 "Copy out failed\n");
3616 goto out;
3617 }
3618
3619 out:
3620 mfi_config_unlock(sc, locked);
3621 if (data)
3622 free(data, M_MFIBUF);
3623 if (cm) {
3624 mtx_lock(&sc->mfi_io_lock);
3625 mfi_release_command(cm);
3626 mtx_unlock(&sc->mfi_io_lock);
3627 }
3628
3629 return (error);
3630 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3631 error = copyin(arg, &l_aen, sizeof(l_aen));
3632 if (error != 0)
3633 return (error);
3634 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3635 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3636 M_WAITOK);
3637 mtx_lock(&sc->mfi_io_lock);
3638 mfi_aen_entry->p = curproc;
3639 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, aen_link);
3640 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3641 l_aen.laen_class_locale);
3642
3643 if (error != 0) {
3644 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3645 aen_link);
3646 free(mfi_aen_entry, M_MFIBUF);
3647 }
3648 mtx_unlock(&sc->mfi_io_lock);
3649
3650 return (error);
3651 default:
3652 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3653 error = ENOENT;
3654 break;
3655 }
3656
3657 return (error);
3658 }
3659
3660 static int
mfi_poll(struct cdev * dev,int poll_events,struct thread * td)3661 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3662 {
3663 struct mfi_softc *sc;
3664 int revents = 0;
3665
3666 sc = dev->si_drv1;
3667
3668 if (poll_events & (POLLIN | POLLRDNORM)) {
3669 if (sc->mfi_aen_triggered != 0) {
3670 revents |= poll_events & (POLLIN | POLLRDNORM);
3671 sc->mfi_aen_triggered = 0;
3672 }
3673 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3674 revents |= POLLERR;
3675 }
3676 }
3677
3678 if (revents == 0) {
3679 if (poll_events & (POLLIN | POLLRDNORM)) {
3680 sc->mfi_poll_waiting = 1;
3681 selrecord(td, &sc->mfi_select);
3682 }
3683 }
3684
3685 return revents;
3686 }
3687
3688 static void
mfi_dump_all(void)3689 mfi_dump_all(void)
3690 {
3691 struct mfi_softc *sc;
3692 struct mfi_command *cm;
3693 devclass_t dc;
3694 time_t deadline;
3695 int timedout __unused;
3696 int i;
3697
3698 dc = devclass_find("mfi");
3699 if (dc == NULL) {
3700 printf("No mfi dev class\n");
3701 return;
3702 }
3703
3704 for (i = 0; ; i++) {
3705 sc = devclass_get_softc(dc, i);
3706 if (sc == NULL)
3707 break;
3708 device_printf(sc->mfi_dev, "Dumping\n\n");
3709 timedout = 0;
3710 deadline = time_uptime - mfi_cmd_timeout;
3711 mtx_lock(&sc->mfi_io_lock);
3712 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3713 if (cm->cm_timestamp <= deadline) {
3714 device_printf(sc->mfi_dev,
3715 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3716 cm, (int)(time_uptime - cm->cm_timestamp));
3717 MFI_PRINT_CMD(cm);
3718 timedout++;
3719 }
3720 }
3721
3722 #if 0
3723 if (timedout)
3724 MFI_DUMP_CMDS(sc);
3725 #endif
3726
3727 mtx_unlock(&sc->mfi_io_lock);
3728 }
3729
3730 return;
3731 }
3732
3733 static void
mfi_timeout(void * data)3734 mfi_timeout(void *data)
3735 {
3736 struct mfi_softc *sc = (struct mfi_softc *)data;
3737 struct mfi_command *cm, *tmp;
3738 time_t deadline;
3739 int timedout __unused = 0;
3740
3741 deadline = time_uptime - mfi_cmd_timeout;
3742 if (sc->adpreset == 0) {
3743 if (!mfi_tbolt_reset(sc)) {
3744 callout_reset(&sc->mfi_watchdog_callout,
3745 mfi_cmd_timeout * hz, mfi_timeout, sc);
3746 return;
3747 }
3748 }
3749 mtx_lock(&sc->mfi_io_lock);
3750 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3751 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3752 continue;
3753 if (cm->cm_timestamp <= deadline) {
3754 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3755 cm->cm_timestamp = time_uptime;
3756 } else {
3757 device_printf(sc->mfi_dev,
3758 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3759 cm, (int)(time_uptime - cm->cm_timestamp)
3760 );
3761 MFI_PRINT_CMD(cm);
3762 MFI_VALIDATE_CMD(sc, cm);
3763 /*
3764 * While commands can get stuck forever we do
3765 * not fail them as there is no way to tell if
3766 * the controller has actually processed them
3767 * or not.
3768 *
3769 * In addition its very likely that force
3770 * failing a command here would cause a panic
3771 * e.g. in UFS.
3772 */
3773 timedout++;
3774 }
3775 }
3776 }
3777
3778 #if 0
3779 if (timedout)
3780 MFI_DUMP_CMDS(sc);
3781 #endif
3782
3783 mtx_unlock(&sc->mfi_io_lock);
3784
3785 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3786 mfi_timeout, sc);
3787
3788 if (0)
3789 mfi_dump_all();
3790 return;
3791 }
3792