Lines Matching +full:mc +full:- +full:bus
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
42 #include <sys/bus.h>
47 #include <machine/bus.h>
66 * Per-interface accessor methods
68 static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
73 static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
78 static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
87 static void mlx_periodic_enquiry(struct mlx_command *mc);
89 static void mlx_periodic_eventlog_respond(struct mlx_command *mc);
90 static void mlx_periodic_rebuild(struct mlx_command *mc);
96 static void mlx_pause_done(struct mlx_command *mc);
102 void (*complete)(struct mlx_command *mc));
106 static int mlx_wait_command(struct mlx_command *mc);
107 static int mlx_poll_command(struct mlx_command *mc);
112 static void mlx_completeio(struct mlx_command *mc);
122 static void mlx_releasecmd(struct mlx_command *mc);
123 static void mlx_freecmd(struct mlx_command *mc);
128 static int mlx_getslot(struct mlx_command *mc);
129 static void mlx_setup_dmamap(struct mlx_command *mc,
132 static void mlx_unmapcmd(struct mlx_command *mc);
134 static int mlx_start(struct mlx_command *mc);
141 static char *mlx_diagnose_command(struct mlx_command *mc);
164 struct mlx_command *mc; in mlx_free() local
169 if (sc->mlx_dev_t != NULL) in mlx_free()
170 destroy_dev(sc->mlx_dev_t); in mlx_free()
172 if (sc->mlx_intr) in mlx_free()
173 bus_teardown_intr(sc->mlx_dev, sc->mlx_irq, sc->mlx_intr); in mlx_free()
177 callout_stop(&sc->mlx_timeout); in mlx_free()
180 while ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) { in mlx_free()
181 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); in mlx_free()
182 mlx_freecmd(mc); in mlx_free()
185 callout_drain(&sc->mlx_timeout); in mlx_free()
187 /* destroy data-transfer DMA tag */ in mlx_free()
188 if (sc->mlx_buffer_dmat) in mlx_free()
189 bus_dma_tag_destroy(sc->mlx_buffer_dmat); in mlx_free()
192 if (sc->mlx_sgbusaddr) in mlx_free()
193 bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); in mlx_free()
194 if (sc->mlx_sgtable) in mlx_free()
195 bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); in mlx_free()
196 if (sc->mlx_sg_dmat) in mlx_free()
197 bus_dma_tag_destroy(sc->mlx_sg_dmat); in mlx_free()
200 if (sc->mlx_irq != NULL) in mlx_free()
201 bus_release_resource(sc->mlx_dev, SYS_RES_IRQ, 0, sc->mlx_irq); in mlx_free()
204 if (sc->mlx_parent_dmat) in mlx_free()
205 bus_dma_tag_destroy(sc->mlx_parent_dmat); in mlx_free()
208 if (sc->mlx_mem != NULL) in mlx_free()
209 bus_release_resource(sc->mlx_dev, sc->mlx_mem_type, sc->mlx_mem_rid, sc->mlx_mem); in mlx_free()
212 if (sc->mlx_enq2 != NULL) in mlx_free()
213 free(sc->mlx_enq2, M_DEVBUF); in mlx_free()
215 sx_destroy(&sc->mlx_config_lock); in mlx_free()
216 mtx_destroy(&sc->mlx_io_lock); in mlx_free()
220 * Map the scatter/gather table into bus space
229 /* save base of s/g table's address in bus space */ in mlx_dma_map_sg()
230 sc->mlx_sgbusaddr = segs->ds_addr; in mlx_dma_map_sg()
242 if (sc->mlx_sgbusaddr) in mlx_sglist_map()
243 bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); in mlx_sglist_map()
244 if (sc->mlx_sgtable) in mlx_sglist_map()
245 bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); in mlx_sglist_map()
246 if (sc->mlx_sg_dmat) in mlx_sglist_map()
247 bus_dma_tag_destroy(sc->mlx_sg_dmat); in mlx_sglist_map()
248 sc->mlx_sgbusaddr = 0; in mlx_sglist_map()
249 sc->mlx_sgtable = NULL; in mlx_sglist_map()
250 sc->mlx_sg_dmat = NULL; in mlx_sglist_map()
258 if (sc->mlx_enq2 == NULL) { in mlx_sglist_map()
261 ncmd = sc->mlx_enq2->me_max_commands; in mlx_sglist_map()
264 error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ in mlx_sglist_map()
273 &sc->mlx_sg_dmat); in mlx_sglist_map()
275 device_printf(sc->mlx_dev, "can't allocate scatter/gather DMA tag\n"); in mlx_sglist_map()
281 * controller-visible space. in mlx_sglist_map()
286 * to bus address. in mlx_sglist_map()
288 error = bus_dmamem_alloc(sc->mlx_sg_dmat, (void **)&sc->mlx_sgtable, in mlx_sglist_map()
289 BUS_DMA_NOWAIT, &sc->mlx_sg_dmamap); in mlx_sglist_map()
291 device_printf(sc->mlx_dev, "can't allocate s/g table\n"); in mlx_sglist_map()
294 (void)bus_dmamap_load(sc->mlx_sg_dmat, sc->mlx_sg_dmamap, sc->mlx_sgtable, in mlx_sglist_map()
311 * Initialise per-controller queues. in mlx_attach()
313 TAILQ_INIT(&sc->mlx_work); in mlx_attach()
314 TAILQ_INIT(&sc->mlx_freecmds); in mlx_attach()
315 bioq_init(&sc->mlx_bioq); in mlx_attach()
320 switch(sc->mlx_iftype) { in mlx_attach()
323 sc->mlx_tryqueue = mlx_v3_tryqueue; in mlx_attach()
324 sc->mlx_findcomplete = mlx_v3_findcomplete; in mlx_attach()
325 sc->mlx_intaction = mlx_v3_intaction; in mlx_attach()
326 sc->mlx_fw_handshake = mlx_v3_fw_handshake; in mlx_attach()
329 sc->mlx_tryqueue = mlx_v4_tryqueue; in mlx_attach()
330 sc->mlx_findcomplete = mlx_v4_findcomplete; in mlx_attach()
331 sc->mlx_intaction = mlx_v4_intaction; in mlx_attach()
332 sc->mlx_fw_handshake = mlx_v4_fw_handshake; in mlx_attach()
335 sc->mlx_tryqueue = mlx_v5_tryqueue; in mlx_attach()
336 sc->mlx_findcomplete = mlx_v5_findcomplete; in mlx_attach()
337 sc->mlx_intaction = mlx_v5_intaction; in mlx_attach()
338 sc->mlx_fw_handshake = mlx_v5_fw_handshake; in mlx_attach()
346 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); in mlx_attach()
356 while ((hscode = sc->mlx_fw_handshake(sc, &hserror, &hsparam1, &hsparam2, in mlx_attach()
360 device_printf(sc->mlx_dev, "controller initialisation in progress...\n"); in mlx_attach()
373 device_printf(sc->mlx_dev, "initialisation complete.\n"); in mlx_attach()
379 sc->mlx_irq = bus_alloc_resource_any(sc->mlx_dev, SYS_RES_IRQ, &rid, in mlx_attach()
381 if (sc->mlx_irq == NULL) { in mlx_attach()
382 device_printf(sc->mlx_dev, "can't allocate interrupt\n"); in mlx_attach()
385 error = bus_setup_intr(sc->mlx_dev, sc->mlx_irq, INTR_TYPE_BIO | in mlx_attach()
386 INTR_ENTROPY | INTR_MPSAFE, NULL, mlx_intr, sc, &sc->mlx_intr); in mlx_attach()
388 device_printf(sc->mlx_dev, "can't set up interrupt\n"); in mlx_attach()
393 * Create DMA tag for mapping buffers into controller-addressable space. in mlx_attach()
395 error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ in mlx_attach()
405 &sc->mlx_io_lock, /* lockarg */ in mlx_attach()
406 &sc->mlx_buffer_dmat); in mlx_attach()
408 device_printf(sc->mlx_dev, "can't allocate buffer DMA tag\n"); in mlx_attach()
418 device_printf(sc->mlx_dev, "can't make initial s/g list mapping\n"); in mlx_attach()
425 sc->mlx_currevent = -1; in mlx_attach()
431 …if ((sc->mlx_enq2 = mlx_enquire(sc, MLX_CMD_ENQUIRY2, sizeof(struct mlx_enquiry2), NULL)) == NULL)… in mlx_attach()
433 device_printf(sc->mlx_dev, "ENQUIRY2 failed\n"); in mlx_attach()
440 fwminor = (sc->mlx_enq2->me_firmware_id >> 8) & 0xff; in mlx_attach()
441 switch(sc->mlx_iftype) { in mlx_attach()
446 device_printf(sc->mlx_dev, "ENQUIRY_OLD failed\n"); in mlx_attach()
449 sc->mlx_enq2->me_firmware_id = ('0' << 24) | (0 << 16) | (meo->me_fwminor << 8) | meo->me_fwmajor; in mlx_attach()
452 if (meo->me_fwminor < 42) { in mlx_attach()
453 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); in mlx_attach()
454 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 2.42 or later\n"); in mlx_attach()
461 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); in mlx_attach()
462 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 3.51 or later\n"); in mlx_attach()
468 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); in mlx_attach()
469 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 4.06 or later\n"); in mlx_attach()
474 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); in mlx_attach()
475 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 5.07 or later\n"); in mlx_attach()
489 device_printf(sc->mlx_dev, "can't make final s/g list mapping\n"); in mlx_attach()
494 * No user-requested background operation is in progress. in mlx_attach()
496 sc->mlx_background = 0; in mlx_attach()
497 sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; in mlx_attach()
502 sc->mlx_dev_t = make_dev(&mlx_cdevsw, 0, UID_ROOT, GID_OPERATOR, in mlx_attach()
503 S_IRUSR | S_IWUSR, "mlx%d", device_get_unit(sc->mlx_dev)); in mlx_attach()
504 sc->mlx_dev_t->si_drv1 = sc; in mlx_attach()
509 callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); in mlx_attach()
537 device_printf(sc->mlx_dev, "error fetching drive status\n"); in mlx_startup()
543 for (i = 0, dr = &sc->mlx_sysdrive[0]; in mlx_startup()
547 if (dr->ms_disk == 0) { in mlx_startup()
549 dr->ms_size = mes[i].sd_size; in mlx_startup()
550 dr->ms_raidlevel = mes[i].sd_raidlevel & 0xf; in mlx_startup()
551 dr->ms_state = mes[i].sd_state; in mlx_startup()
554 if (sc->mlx_geom == MLX_GEOM_128_32) { in mlx_startup()
555 dr->ms_heads = 128; in mlx_startup()
556 dr->ms_sectors = 32; in mlx_startup()
557 dr->ms_cylinders = dr->ms_size / (128 * 32); in mlx_startup()
559 dr->ms_heads = 255; in mlx_startup()
560 dr->ms_sectors = 63; in mlx_startup()
561 dr->ms_cylinders = dr->ms_size / (255 * 63); in mlx_startup()
563 dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, DEVICE_UNIT_ANY); in mlx_startup()
564 if (dr->ms_disk == 0) in mlx_startup()
565 device_printf(sc->mlx_dev, "device_add_child failed\n"); in mlx_startup()
566 device_set_ivars(dr->ms_disk, dr); in mlx_startup()
570 bus_attach_children(sc->mlx_dev); in mlx_startup()
574 sc->mlx_state &= ~MLX_STATE_SHUTDOWN; in mlx_startup()
577 sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); in mlx_startup()
596 if (sc->mlx_state & MLX_STATE_OPEN) in mlx_detach()
600 if (sc->mlx_sysdrive[i].ms_disk != 0) { in mlx_detach()
601 mlxd = device_get_softc(sc->mlx_sysdrive[i].ms_disk); in mlx_detach()
602 if (mlxd->mlxd_flags & MLXD_OPEN) { /* drive is mounted, abort detach */ in mlx_detach()
603 device_printf(sc->mlx_sysdrive[i].ms_disk, "still open, can't detach\n"); in mlx_detach()
652 sc->mlx_state |= MLX_STATE_SHUTDOWN; in mlx_shutdown_locked()
653 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); in mlx_shutdown_locked()
656 device_printf(sc->mlx_dev, "flushing cache..."); in mlx_shutdown_locked()
665 error = bus_generic_detach(sc->mlx_dev); in mlx_shutdown_locked()
681 sc->mlx_state |= MLX_STATE_SUSPEND; in mlx_suspend()
684 device_printf(sc->mlx_dev, "flushing cache..."); in mlx_suspend()
687 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); in mlx_suspend()
704 sc->mlx_state &= ~MLX_STATE_SUSPEND; in mlx_resume()
705 sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); in mlx_resume()
712 * Take an interrupt, or be poked by other code to look for interrupt-worthy
739 bioq_insert_tail(&sc->mlx_bioq, bp); in mlx_submit_buf()
740 sc->mlx_waitbufs++; in mlx_submit_buf()
751 struct mlx_softc *sc = dev->si_drv1; in mlx_open()
755 sc->mlx_state |= MLX_STATE_OPEN; in mlx_open()
767 struct mlx_softc *sc = dev->si_drv1; in mlx_close()
771 sc->mlx_state &= ~MLX_STATE_OPEN; in mlx_close()
778 * Handle controller-specific control operations.
783 struct mlx_softc *sc = dev->si_drv1; in mlx_ioctl()
795 * unit number if *arg is -1, or the next unit after *arg if it's in mlx_ioctl()
803 if (sc->mlx_sysdrive[i].ms_disk != 0) { in mlx_ioctl()
805 if (*arg == -1) { in mlx_ioctl()
806 *arg = device_get_unit(sc->mlx_sysdrive[i].ms_disk); in mlx_ioctl()
811 if (*arg == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) in mlx_ioctl()
812 *arg = -1; in mlx_ioctl()
834 ((mlxd = device_get_softc(dr->ms_disk)) == NULL)) { in mlx_ioctl()
839 device_printf(dr->ms_disk, "detaching..."); in mlx_ioctl()
841 if (mlxd->mlxd_flags & MLXD_OPEN) { in mlx_ioctl()
856 if ((error = device_delete_child(sc->mlx_dev, dr->ms_disk)) != 0) in mlx_ioctl()
858 dr->ms_disk = 0; in mlx_ioctl()
871 * in the process of hot-swapping devices. in mlx_ioctl()
878 if (!(sc->mlx_feature & MLX_FEAT_PAUSEWORKS)) in mlx_ioctl()
883 if ((mp->mp_when < 0) || (mp->mp_when > 3600)) in mlx_ioctl()
885 if ((mp->mp_howlong < 1) || (mp->mp_howlong > (0xf * 30))) in mlx_ioctl()
889 if ((mp->mp_which == MLX_PAUSE_CANCEL) && (sc->mlx_pause.mp_when != 0)) { in mlx_ioctl()
891 sc->mlx_pause.mp_which = 0; in mlx_ioctl()
894 mp->mp_which &= ((1 << sc->mlx_enq2->me_actual_channels) -1); in mlx_ioctl()
897 if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0)) { in mlx_ioctl()
903 sc->mlx_pause.mp_which = mp->mp_which; in mlx_ioctl()
904 sc->mlx_pause.mp_when = time_second + mp->mp_when; in mlx_ioctl()
905 sc->mlx_pause.mp_howlong = sc->mlx_pause.mp_when + mp->mp_howlong; in mlx_ioctl()
911 * Accept a command passthrough-style. in mlx_ioctl()
921 if (sc->mlx_background != 0) { in mlx_ioctl()
923 rb->rr_status = 0x0106; in mlx_ioctl()
926 rb->rr_status = mlx_rebuild(sc, rb->rr_channel, rb->rr_target); in mlx_ioctl()
927 switch (rb->rr_status) { in mlx_ioctl()
951 sc->mlx_background = MLX_BACKGROUND_REBUILD; in mlx_ioctl()
960 *rs = sc->mlx_rebuildstat; in mlx_ioctl()
965 * Return the per-controller system drive number matching the in mlx_ioctl()
974 if ((mlxd != NULL) && (mlxd->mlxd_drive >= sc->mlx_sysdrive) && in mlx_ioctl()
975 (mlxd->mlxd_drive < (sc->mlx_sysdrive + MLX_MAXDRIVES))) { in mlx_ioctl()
977 *arg = mlxd->mlxd_drive - sc->mlx_sysdrive; in mlx_ioctl()
1003 *arg = drive->ms_state; in mlx_submit_ioctl()
1012 if (sc->mlx_background != 0) { in mlx_submit_ioctl()
1017 result = mlx_check(sc, drive - &sc->mlx_sysdrive[0]); in mlx_submit_ioctl()
1039 sc->mlx_background = MLX_BACKGROUND_CHECK; in mlx_submit_ioctl()
1067 * Run a bus pause? in mlx_periodic()
1069 if ((sc->mlx_pause.mp_which != 0) && in mlx_periodic()
1070 (sc->mlx_pause.mp_when > 0) && in mlx_periodic()
1071 (time_second >= sc->mlx_pause.mp_when)){ in mlx_periodic()
1074 sc->mlx_pause.mp_when = 0; in mlx_periodic()
1078 * Bus pause still running? in mlx_periodic()
1080 } else if ((sc->mlx_pause.mp_which != 0) && in mlx_periodic()
1081 (sc->mlx_pause.mp_when == 0)) { in mlx_periodic()
1083 /* time to stop bus pause? */ in mlx_periodic()
1084 if (time_second >= sc->mlx_pause.mp_howlong) { in mlx_periodic()
1086 sc->mlx_pause.mp_which = 0; /* pause is complete */ in mlx_periodic()
1095 } else if (time_second > (sc->mlx_lastpoll + 10)) { in mlx_periodic()
1096 sc->mlx_lastpoll = time_second; in mlx_periodic()
1103 mlx_enquire(sc, (sc->mlx_iftype == MLX_IFTYPE_2) ? MLX_CMD_ENQUIRY_OLD : MLX_CMD_ENQUIRY, in mlx_periodic()
1109 * XXX This might be better left to event-driven detection, eg. I/O to an offline in mlx_periodic()
1119 /* XXX should check sc->mlx_background if this is only valid while in progress */ in mlx_periodic()
1122 /* deal with possibly-missed interrupts and timed-out commands */ in mlx_periodic()
1126 callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); in mlx_periodic()
1133 mlx_periodic_enquiry(struct mlx_command *mc) in mlx_periodic_enquiry() argument
1135 struct mlx_softc *sc = mc->mc_sc; in mlx_periodic_enquiry()
1141 if (mc->mc_status != 0) { in mlx_periodic_enquiry()
1142 device_printf(sc->mlx_dev, "periodic enquiry failed - %s\n", mlx_diagnose_command(mc)); in mlx_periodic_enquiry()
1147 switch(mc->mc_mailbox[0]) { in mlx_periodic_enquiry()
1154 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; in mlx_periodic_enquiry()
1155 struct mlx_enquiry_old *meo = (struct mlx_enquiry_old *)mc->mc_data; in mlx_periodic_enquiry()
1158 /* convert data in-place to new format */ in mlx_periodic_enquiry()
1159 for (i = (sizeof(me->me_dead) / sizeof(me->me_dead[0])) - 1; i >= 0; i--) { in mlx_periodic_enquiry()
1160 me->me_dead[i].dd_chan = meo->me_dead[i].dd_chan; in mlx_periodic_enquiry()
1161 me->me_dead[i].dd_targ = meo->me_dead[i].dd_targ; in mlx_periodic_enquiry()
1163 me->me_misc_flags = 0; in mlx_periodic_enquiry()
1164 me->me_rebuild_count = meo->me_rebuild_count; in mlx_periodic_enquiry()
1165 me->me_dead_count = meo->me_dead_count; in mlx_periodic_enquiry()
1166 me->me_critical_sd_count = meo->me_critical_sd_count; in mlx_periodic_enquiry()
1167 me->me_event_log_seq_num = 0; in mlx_periodic_enquiry()
1168 me->me_offline_sd_count = meo->me_offline_sd_count; in mlx_periodic_enquiry()
1169 me->me_max_commands = meo->me_max_commands; in mlx_periodic_enquiry()
1170 me->me_rebuild_flag = meo->me_rebuild_flag; in mlx_periodic_enquiry()
1171 me->me_fwmajor = meo->me_fwmajor; in mlx_periodic_enquiry()
1172 me->me_fwminor = meo->me_fwminor; in mlx_periodic_enquiry()
1173 me->me_status_flags = meo->me_status_flags; in mlx_periodic_enquiry()
1174 me->me_flash_age = meo->me_flash_age; in mlx_periodic_enquiry()
1175 for (i = (sizeof(me->me_drvsize) / sizeof(me->me_drvsize[0])) - 1; i >= 0; i--) { in mlx_periodic_enquiry()
1176 if (i > ((sizeof(meo->me_drvsize) / sizeof(meo->me_drvsize[0])) - 1)) { in mlx_periodic_enquiry()
1177 me->me_drvsize[i] = 0; /* drive beyond supported range */ in mlx_periodic_enquiry()
1179 me->me_drvsize[i] = meo->me_drvsize[i]; in mlx_periodic_enquiry()
1182 me->me_num_sys_drvs = meo->me_num_sys_drvs; in mlx_periodic_enquiry()
1192 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; in mlx_periodic_enquiry()
1194 if (sc->mlx_currevent == -1) { in mlx_periodic_enquiry()
1196 sc->mlx_currevent = sc->mlx_lastevent = me->me_event_log_seq_num; in mlx_periodic_enquiry()
1197 …} else if ((me->me_event_log_seq_num != sc->mlx_lastevent) && !(sc->mlx_flags & MLX_EVENTLOG_BUSY)… in mlx_periodic_enquiry()
1199 sc->mlx_currevent = me->me_event_log_seq_num; in mlx_periodic_enquiry()
1200 debug(1, "event log pointer was %d, now %d\n", sc->mlx_lastevent, sc->mlx_currevent); in mlx_periodic_enquiry()
1203 sc->mlx_flags |= MLX_EVENTLOG_BUSY; in mlx_periodic_enquiry()
1212 struct mlx_enq_sys_drive *mes = (struct mlx_enq_sys_drive *)mc->mc_data; in mlx_periodic_enquiry()
1216 for (i = 0, dr = &sc->mlx_sysdrive[0]; in mlx_periodic_enquiry()
1221 if (dr->ms_state != mes[i].sd_state) { in mlx_periodic_enquiry()
1224 device_printf(dr->ms_disk, "drive offline\n"); in mlx_periodic_enquiry()
1227 device_printf(dr->ms_disk, "drive online\n"); in mlx_periodic_enquiry()
1230 device_printf(dr->ms_disk, "drive critical\n"); in mlx_periodic_enquiry()
1234 dr->ms_state = mes[i].sd_state; in mlx_periodic_enquiry()
1240 device_printf(sc->mlx_dev, "%s: unknown command 0x%x", __func__, mc->mc_mailbox[0]); in mlx_periodic_enquiry()
1245 free(mc->mc_data, M_DEVBUF); in mlx_periodic_enquiry()
1246 mlx_releasecmd(mc); in mlx_periodic_enquiry()
1252 struct mlx_command *mc; in mlx_eventlog_cb() local
1254 mc = (struct mlx_command *)arg; in mlx_eventlog_cb()
1255 mlx_setup_dmamap(mc, segs, nsegments, error); in mlx_eventlog_cb()
1258 mlx_make_type3(mc, MLX_CMD_LOGOP, MLX_LOGOP_GET, 1, in mlx_eventlog_cb()
1259 mc->mc_sc->mlx_lastevent, 0, 0, mc->mc_dataphys, 0); in mlx_eventlog_cb()
1260 mc->mc_complete = mlx_periodic_eventlog_respond; in mlx_eventlog_cb()
1261 mc->mc_private = mc; in mlx_eventlog_cb()
1264 if (mlx_start(mc) != 0) { in mlx_eventlog_cb()
1265 mlx_releasecmd(mc); in mlx_eventlog_cb()
1266 free(mc->mc_data, M_DEVBUF); in mlx_eventlog_cb()
1267 mc->mc_data = NULL; in mlx_eventlog_cb()
1279 struct mlx_command *mc; in mlx_periodic_eventlog_poll() local
1288 if ((mc = mlx_alloccmd(sc)) == NULL) in mlx_periodic_eventlog_poll()
1297 if (mlx_getslot(mc)) in mlx_periodic_eventlog_poll()
1301 mc->mc_data = result; in mlx_periodic_eventlog_poll()
1302 mc->mc_length = /*sizeof(struct mlx_eventlog_entry)*/1024; in mlx_periodic_eventlog_poll()
1303 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, in mlx_periodic_eventlog_poll()
1304 mc->mc_length, mlx_eventlog_cb, mc, BUS_DMA_NOWAIT); in mlx_periodic_eventlog_poll()
1308 if (mc != NULL) in mlx_periodic_eventlog_poll()
1309 mlx_releasecmd(mc); in mlx_periodic_eventlog_poll()
1310 if ((result != NULL) && (mc->mc_data != NULL)) in mlx_periodic_eventlog_poll()
1321 "because of SCSI bus reset failure",
1336 mlx_periodic_eventlog_respond(struct mlx_command *mc) in mlx_periodic_eventlog_respond() argument
1338 struct mlx_softc *sc = mc->mc_sc; in mlx_periodic_eventlog_respond()
1339 struct mlx_eventlog_entry *el = (struct mlx_eventlog_entry *)mc->mc_data; in mlx_periodic_eventlog_respond()
1345 sc->mlx_lastevent++; /* next message... */ in mlx_periodic_eventlog_respond()
1346 if (mc->mc_status == 0) { in mlx_periodic_eventlog_respond()
1349 switch(el->el_type) { in mlx_periodic_eventlog_respond()
1355 /* Mylex vendor-specific message indicating a drive was killed? */ in mlx_periodic_eventlog_respond()
1356 if ((el->el_sensekey == 9) && in mlx_periodic_eventlog_respond()
1357 (el->el_asc == 0x80)) { in mlx_periodic_eventlog_respond()
1358 if (el->el_asq < nitems(mlx_sense_messages)) { in mlx_periodic_eventlog_respond()
1359 reason = mlx_sense_messages[el->el_asq]; in mlx_periodic_eventlog_respond()
1363 device_printf(sc->mlx_dev, "physical drive %d:%d killed %s\n", in mlx_periodic_eventlog_respond()
1364 el->el_channel, el->el_target, reason); in mlx_periodic_eventlog_respond()
1367 if ((el->el_sensekey == 6) && (el->el_asc == 0x29)) { in mlx_periodic_eventlog_respond()
1368 device_printf(sc->mlx_dev, "physical drive %d:%d reset\n", in mlx_periodic_eventlog_respond()
1369 el->el_channel, el->el_target); in mlx_periodic_eventlog_respond()
1372 if (!((el->el_sensekey == 0) || in mlx_periodic_eventlog_respond()
1373 ((el->el_sensekey == 2) && in mlx_periodic_eventlog_respond()
1374 (el->el_asc == 0x04) && in mlx_periodic_eventlog_respond()
1375 ((el->el_asq == 0x01) || in mlx_periodic_eventlog_respond()
1376 (el->el_asq == 0x02))))) { in mlx_periodic_eventlog_respond()
1377 device_printf(sc->mlx_dev, "physical drive %d:%d error log: sense = %d asc = %x asq = %x\n", in mlx_periodic_eventlog_respond()
1378 el->el_channel, el->el_target, el->el_sensekey, el->el_asc, el->el_asq); in mlx_periodic_eventlog_respond()
1379 device_printf(sc->mlx_dev, " info %4D csi %4D\n", el->el_information, ":", el->el_csi, ":"); in mlx_periodic_eventlog_respond()
1384 device_printf(sc->mlx_dev, "unknown log message type 0x%x\n", el->el_type); in mlx_periodic_eventlog_respond()
1388 device_printf(sc->mlx_dev, "error reading message log - %s\n", mlx_diagnose_command(mc)); in mlx_periodic_eventlog_respond()
1390 sc->mlx_lastevent = sc->mlx_currevent; in mlx_periodic_eventlog_respond()
1394 free(mc->mc_data, M_DEVBUF); in mlx_periodic_eventlog_respond()
1395 mlx_releasecmd(mc); in mlx_periodic_eventlog_respond()
1398 if (sc->mlx_lastevent != sc->mlx_currevent) { in mlx_periodic_eventlog_respond()
1401 /* clear log-busy status */ in mlx_periodic_eventlog_respond()
1402 sc->mlx_flags &= ~MLX_EVENTLOG_BUSY; in mlx_periodic_eventlog_respond()
1410 mlx_periodic_rebuild(struct mlx_command *mc) in mlx_periodic_rebuild() argument
1412 struct mlx_softc *sc = mc->mc_sc; in mlx_periodic_rebuild()
1413 struct mlx_rebuild_status *mr = (struct mlx_rebuild_status *)mc->mc_data; in mlx_periodic_rebuild()
1416 switch(mc->mc_status) { in mlx_periodic_rebuild()
1418 sc->mlx_rebuildstat = *mr; in mlx_periodic_rebuild()
1421 if (sc->mlx_background == 0) { in mlx_periodic_rebuild()
1422 sc->mlx_background = MLX_BACKGROUND_SPONTANEOUS; in mlx_periodic_rebuild()
1423 device_printf(sc->mlx_dev, "background check/rebuild operation started\n"); in mlx_periodic_rebuild()
1428 switch(sc->mlx_background) { in mlx_periodic_rebuild()
1430 device_printf(sc->mlx_dev, "consistency check completed\n"); /* XXX print drive? */ in mlx_periodic_rebuild()
1433 device_printf(sc->mlx_dev, "drive rebuild completed\n"); /* XXX print channel/target? */ in mlx_periodic_rebuild()
1437 /* if we have previously been non-idle, report the transition */ in mlx_periodic_rebuild()
1438 if (sc->mlx_rebuildstat.rs_code != MLX_REBUILDSTAT_IDLE) { in mlx_periodic_rebuild()
1439 device_printf(sc->mlx_dev, "background check/rebuild operation completed\n"); in mlx_periodic_rebuild()
1442 sc->mlx_background = 0; in mlx_periodic_rebuild()
1443 sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; in mlx_periodic_rebuild()
1446 free(mc->mc_data, M_DEVBUF); in mlx_periodic_rebuild()
1447 mlx_releasecmd(mc); in mlx_periodic_rebuild()
1463 struct mlx_command *mc; in mlx_pause_action() local
1469 if (sc->mlx_pause.mp_when == 0) { in mlx_pause_action()
1481 failsafe = ((sc->mlx_pause.mp_howlong - time_second) + 5) / 30; in mlx_pause_action()
1484 sc->mlx_pause.mp_howlong = time_second + (0xf * 30) - 5; in mlx_pause_action()
1489 for (i = 0; i < sc->mlx_enq2->me_actual_channels; i++) { in mlx_pause_action()
1490 if ((1 << i) & sc->mlx_pause.mp_which) { in mlx_pause_action()
1493 if ((mc = mlx_alloccmd(sc)) == NULL) in mlx_pause_action()
1496 mc->mc_flags |= MLX_CMD_PRIORITY; in mlx_pause_action()
1497 if (mlx_getslot(mc)) in mlx_pause_action()
1501 mlx_make_type2(mc, command, (failsafe << 4) | i, 0, 0, 0, 0, 0, 0, 0); in mlx_pause_action()
1502 mc->mc_complete = mlx_pause_done; in mlx_pause_action()
1503 mc->mc_private = sc; /* XXX not needed */ in mlx_pause_action()
1504 if (mlx_start(mc)) in mlx_pause_action()
1510 device_printf(sc->mlx_dev, "%s failed for channel %d\n", in mlx_pause_action()
1512 if (mc != NULL) in mlx_pause_action()
1513 mlx_releasecmd(mc); in mlx_pause_action()
1519 mlx_pause_done(struct mlx_command *mc) in mlx_pause_done() argument
1521 struct mlx_softc *sc = mc->mc_sc; in mlx_pause_done()
1522 int command = mc->mc_mailbox[0]; in mlx_pause_done()
1523 int channel = mc->mc_mailbox[2] & 0xf; in mlx_pause_done()
1526 if (mc->mc_status != 0) { in mlx_pause_done()
1527 device_printf(sc->mlx_dev, "%s command failed - %s\n", in mlx_pause_done()
1528 command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", mlx_diagnose_command(mc)); in mlx_pause_done()
1530 device_printf(sc->mlx_dev, "channel %d pausing for %ld seconds\n", in mlx_pause_done()
1531 channel, (long)(sc->mlx_pause.mp_howlong - time_second)); in mlx_pause_done()
1533 device_printf(sc->mlx_dev, "channel %d resuming\n", channel); in mlx_pause_done()
1535 mlx_releasecmd(mc); in mlx_pause_done()
1548 struct mlx_command *mc; in mlx_enquire_cb() local
1550 mc = (struct mlx_command *)arg; in mlx_enquire_cb()
1554 mlx_setup_dmamap(mc, segs, nsegments, error); in mlx_enquire_cb()
1557 sc = mc->mc_sc; in mlx_enquire_cb()
1558 mlx_make_type2(mc, mc->mc_command, 0, 0, 0, 0, 0, 0, mc->mc_dataphys, 0); in mlx_enquire_cb()
1561 if (mc->mc_complete != NULL) { in mlx_enquire_cb()
1562 if ((error = mlx_start(mc)) != 0) in mlx_enquire_cb()
1566 if ((sc->mlx_state & MLX_STATE_INTEN) ? mlx_wait_command(mc) : in mlx_enquire_cb()
1567 mlx_poll_command(mc)) in mlx_enquire_cb()
1571 if (mc->mc_status != 0) { in mlx_enquire_cb()
1572 device_printf(sc->mlx_dev, "ENQUIRY failed - %s\n", in mlx_enquire_cb()
1573 mlx_diagnose_command(mc)); in mlx_enquire_cb()
1580 * Perform an Enquiry command using a type-3 command buffer and a return a single
1583 * valid until that point). Otherwise, the command will either be busy-waited
1587 …quire(struct mlx_softc *sc, int command, size_t bufsize, void (* complete)(struct mlx_command *mc)) in mlx_enquire() argument
1589 struct mlx_command *mc; in mlx_enquire() local
1599 if ((mc = mlx_alloccmd(sc)) == NULL) in mlx_enquire()
1605 mc->mc_flags |= MLX_CMD_PRIORITY | MLX_CMD_DATAOUT; in mlx_enquire()
1606 if (mlx_getslot(mc)) in mlx_enquire()
1610 mc->mc_data = result; in mlx_enquire()
1611 mc->mc_length = bufsize; in mlx_enquire()
1612 mc->mc_command = command; in mlx_enquire()
1615 mc->mc_complete = complete; in mlx_enquire()
1616 mc->mc_private = mc; in mlx_enquire()
1619 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, in mlx_enquire()
1620 mc->mc_length, mlx_enquire_cb, mc, BUS_DMA_NOWAIT); in mlx_enquire()
1624 if ((mc != NULL) && (mc->mc_complete == NULL)) in mlx_enquire()
1625 mlx_releasecmd(mc); in mlx_enquire()
1644 struct mlx_command *mc; in mlx_flush() local
1652 if ((mc = mlx_alloccmd(sc)) == NULL) in mlx_flush()
1655 if (mlx_getslot(mc)) in mlx_flush()
1659 mlx_make_type2(mc, MLX_CMD_FLUSH, 0, 0, 0, 0, 0, 0, 0, 0); in mlx_flush()
1662 if (mlx_poll_command(mc)) in mlx_flush()
1666 if (mc->mc_status != 0) { in mlx_flush()
1667 device_printf(sc->mlx_dev, "FLUSH failed - %s\n", mlx_diagnose_command(mc)); in mlx_flush()
1673 if (mc != NULL) in mlx_flush()
1674 mlx_releasecmd(mc); in mlx_flush()
1687 struct mlx_command *mc; in mlx_check() local
1695 if ((mc = mlx_alloccmd(sc)) == NULL) in mlx_check()
1698 if (mlx_getslot(mc)) in mlx_check()
1702 mlx_make_type2(mc, MLX_CMD_CHECKASYNC, 0, 0, 0, 0, 0, drive | 0x80, 0, 0); in mlx_check()
1705 if (mlx_wait_command(mc)) in mlx_check()
1709 if (mc->mc_status != 0) { in mlx_check()
1710 device_printf(sc->mlx_dev, "CHECK ASYNC failed - %s\n", mlx_diagnose_command(mc)); in mlx_check()
1712 device_printf(sc->mlx_sysdrive[drive].ms_disk, "consistency check started"); in mlx_check()
1714 error = mc->mc_status; in mlx_check()
1717 if (mc != NULL) in mlx_check()
1718 mlx_releasecmd(mc); in mlx_check()
1731 struct mlx_command *mc; in mlx_rebuild() local
1739 if ((mc = mlx_alloccmd(sc)) == NULL) in mlx_rebuild()
1742 if (mlx_getslot(mc)) in mlx_rebuild()
1746 mlx_make_type2(mc, MLX_CMD_REBUILDASYNC, channel, target, 0, 0, 0, 0, 0, 0); in mlx_rebuild()
1749 if (mlx_wait_command(mc)) in mlx_rebuild()
1753 if (mc->mc_status != 0) { in mlx_rebuild()
1754 device_printf(sc->mlx_dev, "REBUILD ASYNC failed - %s\n", mlx_diagnose_command(mc)); in mlx_rebuild()
1756 device_printf(sc->mlx_dev, "drive rebuild started for %d:%d\n", channel, target); in mlx_rebuild()
1758 error = mc->mc_status; in mlx_rebuild()
1761 if (mc != NULL) in mlx_rebuild()
1762 mlx_releasecmd(mc); in mlx_rebuild()
1767 * Run the command (mc) and return when it completes.
1772 mlx_wait_command(struct mlx_command *mc) in mlx_wait_command() argument
1774 struct mlx_softc *sc = mc->mc_sc; in mlx_wait_command()
1780 mc->mc_complete = NULL; in mlx_wait_command()
1781 mc->mc_private = mc; /* wake us when you're done */ in mlx_wait_command()
1782 if ((error = mlx_start(mc)) != 0) in mlx_wait_command()
1787 while ((mc->mc_status == MLX_STATUS_BUSY) && (count < 30)) { in mlx_wait_command()
1788 mtx_sleep(mc->mc_private, &sc->mlx_io_lock, PRIBIO | PCATCH, "mlxwcmd", hz); in mlx_wait_command()
1791 if (mc->mc_status != 0) { in mlx_wait_command()
1792 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); in mlx_wait_command()
1800 * Start the command (mc) and busy-wait for it to complete.
1807 mlx_poll_command(struct mlx_command *mc) in mlx_poll_command() argument
1809 struct mlx_softc *sc = mc->mc_sc; in mlx_poll_command()
1815 mc->mc_complete = NULL; in mlx_poll_command()
1816 mc->mc_private = NULL; /* we will poll for it */ in mlx_poll_command()
1817 if ((error = mlx_start(mc)) != 0) in mlx_poll_command()
1823 mlx_done(mc->mc_sc, 1); in mlx_poll_command()
1825 } while ((mc->mc_status == MLX_STATUS_BUSY) && (count++ < 15000000)); in mlx_poll_command()
1826 if (mc->mc_status != MLX_STATUS_BUSY) { in mlx_poll_command()
1827 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); in mlx_poll_command()
1830 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); in mlx_poll_command()
1837 struct mlx_command *mc; in mlx_startio_cb() local
1845 mc = (struct mlx_command *)arg; in mlx_startio_cb()
1846 mlx_setup_dmamap(mc, segs, nsegments, error); in mlx_startio_cb()
1848 sc = mc->mc_sc; in mlx_startio_cb()
1849 bp = mc->mc_private; in mlx_startio_cb()
1851 if (bp->bio_cmd == BIO_READ) { in mlx_startio_cb()
1852 mc->mc_flags |= MLX_CMD_DATAIN; in mlx_startio_cb()
1855 mc->mc_flags |= MLX_CMD_DATAOUT; in mlx_startio_cb()
1859 /* build a suitable I/O command (assumes 512-byte rounded transfers) */ in mlx_startio_cb()
1860 mlxd = bp->bio_disk->d_drv1; in mlx_startio_cb()
1861 driveno = mlxd->mlxd_drive - sc->mlx_sysdrive; in mlx_startio_cb()
1862 blkcount = howmany(bp->bio_bcount, MLX_BLKSIZE); in mlx_startio_cb()
1864 if ((bp->bio_pblkno + blkcount) > sc->mlx_sysdrive[driveno].ms_size) in mlx_startio_cb()
1865 device_printf(sc->mlx_dev, in mlx_startio_cb()
1867 (long long)bp->bio_pblkno, blkcount, in mlx_startio_cb()
1868 (u_long)sc->mlx_sysdrive[driveno].ms_size); in mlx_startio_cb()
1874 if (sc->mlx_iftype == MLX_IFTYPE_2) { in mlx_startio_cb()
1875 mlx_make_type1(mc, (cmd == MLX_CMD_WRITESG) ? MLX_CMD_WRITESG_OLD : in mlx_startio_cb()
1878 bp->bio_pblkno, /* physical block number */ in mlx_startio_cb()
1880 mc->mc_sgphys, /* location of SG list */ in mlx_startio_cb()
1881 mc->mc_nsgent & 0x3f); /* size of SG list */ in mlx_startio_cb()
1883 mlx_make_type5(mc, cmd, in mlx_startio_cb()
1887 bp->bio_pblkno, /* physical block number */ in mlx_startio_cb()
1888 mc->mc_sgphys, /* location of SG list */ in mlx_startio_cb()
1889 mc->mc_nsgent & 0x3f); /* size of SG list */ in mlx_startio_cb()
1893 if (mlx_start(mc) != 0) { in mlx_startio_cb()
1895 mc->mc_status = MLX_STATUS_WEDGED; in mlx_startio_cb()
1896 mlx_completeio(mc); in mlx_startio_cb()
1899 sc->mlx_state &= ~MLX_STATE_QFROZEN; in mlx_startio_cb()
1909 struct mlx_command *mc; in mlx_startio() local
1917 if (sc->mlx_state & MLX_STATE_QFROZEN) in mlx_startio()
1921 if ((bp = bioq_first(&sc->mlx_bioq)) == NULL) in mlx_startio()
1924 if ((mc = mlx_alloccmd(sc)) == NULL) in mlx_startio()
1927 if (mlx_getslot(mc) != 0) { in mlx_startio()
1928 mlx_releasecmd(mc); in mlx_startio()
1932 bioq_remove(&sc->mlx_bioq, bp); in mlx_startio()
1933 sc->mlx_waitbufs--; in mlx_startio()
1936 mc->mc_complete = mlx_completeio; in mlx_startio()
1937 mc->mc_private = bp; in mlx_startio()
1938 mc->mc_data = bp->bio_data; in mlx_startio()
1939 mc->mc_length = bp->bio_bcount; in mlx_startio()
1942 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, in mlx_startio()
1943 mc->mc_length, mlx_startio_cb, mc, 0); in mlx_startio()
1945 sc->mlx_state |= MLX_STATE_QFROZEN; in mlx_startio()
1955 mlx_completeio(struct mlx_command *mc) in mlx_completeio() argument
1957 struct mlx_softc *sc = mc->mc_sc; in mlx_completeio()
1958 struct bio *bp = mc->mc_private; in mlx_completeio()
1959 struct mlxd_softc *mlxd = bp->bio_disk->d_drv1; in mlx_completeio()
1962 if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */ in mlx_completeio()
1963 bp->bio_error = EIO; in mlx_completeio()
1964 bp->bio_flags |= BIO_ERROR; in mlx_completeio()
1966 switch(mc->mc_status) { in mlx_completeio()
1968 device_printf(mlxd->mlxd_dev, "drive offline\n"); in mlx_completeio()
1970 mlxd->mlxd_drive->ms_state = MLX_SYSD_OFFLINE; in mlx_completeio()
1974 device_printf(sc->mlx_dev, "I/O error - %s\n", mlx_diagnose_command(mc)); in mlx_completeio()
1976 device_printf(sc->mlx_dev, " b_bcount %ld blkcount %ld b_pblkno %d\n", in mlx_completeio()
1977 bp->bio_bcount, bp->bio_bcount / MLX_BLKSIZE, bp->bio_pblkno); in mlx_completeio()
1978 device_printf(sc->mlx_dev, " %13D\n", mc->mc_mailbox, " "); in mlx_completeio()
1983 mlx_releasecmd(mc); in mlx_completeio()
1991 struct mlx_command *mc; in mlx_user_cb() local
1994 mc = (struct mlx_command *)arg; in mlx_user_cb()
1998 mlx_setup_dmamap(mc, segs, nsegments, error); in mlx_user_cb()
2000 mu = (struct mlx_usercommand *)mc->mc_private; in mlx_user_cb()
2009 if (mc->mc_mailbox[0] == MLX_CMD_DIRECT_CDB) { in mlx_user_cb()
2010 dcdb = (struct mlx_dcdb *)mc->mc_data; in mlx_user_cb()
2011 dcdb->dcdb_physaddr = mc->mc_dataphys + sizeof(*dcdb); in mlx_user_cb()
2012 mu->mu_bufptr = 8; in mlx_user_cb()
2018 if (mu->mu_datasize > 0) { in mlx_user_cb()
2019 mc->mc_mailbox[mu->mu_bufptr ] = mc->mc_dataphys & 0xff; in mlx_user_cb()
2020 mc->mc_mailbox[mu->mu_bufptr + 1] = (mc->mc_dataphys >> 8) & 0xff; in mlx_user_cb()
2021 mc->mc_mailbox[mu->mu_bufptr + 2] = (mc->mc_dataphys >> 16) & 0xff; in mlx_user_cb()
2022 mc->mc_mailbox[mu->mu_bufptr + 3] = (mc->mc_dataphys >> 24) & 0xff; in mlx_user_cb()
2027 if (mlx_wait_command(mc) != 0) in mlx_user_cb()
2033 * Take a command from user-space and try to run it.
2042 struct mlx_command *mc; in mlx_user_command() local
2049 mc = NULL; in mlx_user_command()
2054 if ((mc = mlx_alloccmd(sc)) == NULL) { in mlx_user_command()
2058 bcopy(mu->mu_command, mc->mc_mailbox, sizeof(mc->mc_mailbox)); in mlx_user_command()
2065 if (mu->mu_datasize > 0) { in mlx_user_command()
2066 if (mu->mu_datasize > MLX_MAXPHYS) { in mlx_user_command()
2071 kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK); in mlx_user_command()
2072 if ((error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) { in mlx_user_command()
2081 if (mlx_getslot(mc)) in mlx_user_command()
2085 if (mu->mu_datasize > 0) { in mlx_user_command()
2088 if ((mu->mu_bufptr < 0) || (mu->mu_bufptr > (sizeof(mu->mu_command) - in mlx_user_command()
2096 mc->mc_data = kbuf; in mlx_user_command()
2097 mc->mc_length = mu->mu_datasize; in mlx_user_command()
2098 mc->mc_private = mu; in mlx_user_command()
2099 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, in mlx_user_command()
2100 mc->mc_length, mlx_user_cb, mc, BUS_DMA_NOWAIT); in mlx_user_command()
2105 mu->mu_status = mc->mc_status; in mlx_user_command()
2106 if (mu->mu_datasize > 0) { in mlx_user_command()
2108 error = copyout(kbuf, mu->mu_buf, mu->mu_datasize); in mlx_user_command()
2113 mlx_releasecmd(mc); in mlx_user_command()
2127 * Find a free command slot for (mc).
2129 * Don't hand out a slot to a normal-priority command unless there are at least
2133 mlx_getslot(struct mlx_command *mc) in mlx_getslot() argument
2135 struct mlx_softc *sc = mc->mc_sc; in mlx_getslot()
2143 * Enforce slot-usage limit, if we have the required information. in mlx_getslot()
2145 if (sc->mlx_enq2 != NULL) { in mlx_getslot()
2146 limit = sc->mlx_enq2->me_max_commands; in mlx_getslot()
2150 if (sc->mlx_busycmds >= ((mc->mc_flags & MLX_CMD_PRIORITY) ? limit : limit - 4)) in mlx_getslot()
2160 if (sc->mlx_busycmd[slot] == NULL) in mlx_getslot()
2164 sc->mlx_busycmd[slot] = mc; in mlx_getslot()
2165 sc->mlx_busycmds++; in mlx_getslot()
2173 mc->mc_slot = slot; in mlx_getslot()
2178 * Map/unmap (mc)'s data in the controller's addressable space.
2181 mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments, in mlx_setup_dmamap() argument
2184 struct mlx_softc *sc = mc->mc_sc; in mlx_setup_dmamap()
2191 if (sc->mlx_enq2 && (nsegments > sc->mlx_enq2->me_max_sg)) in mlx_setup_dmamap()
2193 sc->mlx_enq2->me_max_sg); in mlx_setup_dmamap()
2196 sg = sc->mlx_sgtable + (mc->mc_slot * MLX_NSEG); in mlx_setup_dmamap()
2199 mc->mc_nsgent = nsegments; in mlx_setup_dmamap()
2200 mc->mc_sgphys = sc->mlx_sgbusaddr + in mlx_setup_dmamap()
2201 (mc->mc_slot * MLX_NSEG * sizeof(struct mlx_sgentry)); in mlx_setup_dmamap()
2202 mc->mc_dataphys = segs[0].ds_addr; in mlx_setup_dmamap()
2206 sg->sg_addr = segs[i].ds_addr; in mlx_setup_dmamap()
2207 sg->sg_count = segs[i].ds_len; in mlx_setup_dmamap()
2210 /* Make sure the buffers are visible on the bus. */ in mlx_setup_dmamap()
2211 if (mc->mc_flags & MLX_CMD_DATAIN) in mlx_setup_dmamap()
2212 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, in mlx_setup_dmamap()
2214 if (mc->mc_flags & MLX_CMD_DATAOUT) in mlx_setup_dmamap()
2215 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, in mlx_setup_dmamap()
2220 mlx_unmapcmd(struct mlx_command *mc) in mlx_unmapcmd() argument
2222 struct mlx_softc *sc = mc->mc_sc; in mlx_unmapcmd()
2227 if (mc->mc_data != NULL) { in mlx_unmapcmd()
2229 if (mc->mc_flags & MLX_CMD_DATAIN) in mlx_unmapcmd()
2230 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTREAD); in mlx_unmapcmd()
2231 if (mc->mc_flags & MLX_CMD_DATAOUT) in mlx_unmapcmd()
2232 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTWRITE); in mlx_unmapcmd()
2234 bus_dmamap_unload(sc->mlx_buffer_dmat, mc->mc_dmamap); in mlx_unmapcmd()
2239 * Try to deliver (mc) to the controller.
2244 mlx_start(struct mlx_command *mc) in mlx_start() argument
2246 struct mlx_softc *sc = mc->mc_sc; in mlx_start()
2252 mc->mc_mailbox[0x1] = mc->mc_slot; in mlx_start()
2255 mc->mc_status = MLX_STATUS_BUSY; in mlx_start()
2257 /* set a default 60-second timeout XXX tunable? XXX not currently used */ in mlx_start()
2258 mc->mc_timeout = time_second + 60; in mlx_start()
2261 for (i = 100000; i > 0; i--) { in mlx_start()
2262 if (sc->mlx_tryqueue(sc, mc)) { in mlx_start()
2264 TAILQ_INSERT_TAIL(&sc->mlx_work, mc, mc_link); in mlx_start()
2274 sc->mlx_busycmd[mc->mc_slot] = NULL; in mlx_start()
2275 device_printf(sc->mlx_dev, "controller wedged (not taking commands)\n"); in mlx_start()
2276 mc->mc_status = MLX_STATUS_WEDGED; in mlx_start()
2291 struct mlx_command *mc; in mlx_done() local
2304 if (sc->mlx_findcomplete(sc, &slot, &status)) { in mlx_done()
2306 mc = sc->mlx_busycmd[slot]; /* find command */ in mlx_done()
2307 if (mc != NULL) { /* paranoia */ in mlx_done()
2308 if (mc->mc_status == MLX_STATUS_BUSY) { in mlx_done()
2309 mc->mc_status = status; /* save status */ in mlx_done()
2312 sc->mlx_busycmd[slot] = NULL; in mlx_done()
2313 sc->mlx_busycmds--; in mlx_done()
2315 device_printf(sc->mlx_dev, "duplicate done event for slot %d\n", slot); in mlx_done()
2318 device_printf(sc->mlx_dev, "done event for nonbusy slot %d\n", slot); in mlx_done()
2336 * Perform post-completion processing for commands on (sc).
2341 struct mlx_command *mc, *nc; in mlx_complete() local
2347 mc = TAILQ_FIRST(&sc->mlx_work); in mlx_complete()
2348 while (mc != NULL) { in mlx_complete()
2349 nc = TAILQ_NEXT(mc, mc_link); in mlx_complete()
2352 if (mc->mc_status != MLX_STATUS_BUSY) { in mlx_complete()
2355 mlx_unmapcmd(mc); in mlx_complete()
2359 if (mc->mc_complete != NULL) { in mlx_complete()
2361 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); in mlx_complete()
2362 mc->mc_complete(mc); in mlx_complete()
2367 } else if (mc->mc_private != NULL) { /* sleeping caller wants to know about it */ in mlx_complete()
2370 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); in mlx_complete()
2371 wakeup_one(mc->mc_private); in mlx_complete()
2379 mc = nc; in mlx_complete()
2392 * This may return NULL in low-memory cases.
2399 * XXX Note that command buffers are not cleaned out - it is the caller's
2406 struct mlx_command *mc; in mlx_alloccmd() local
2412 if ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) in mlx_alloccmd()
2413 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); in mlx_alloccmd()
2416 if (mc == NULL) { in mlx_alloccmd()
2417 mc = (struct mlx_command *)malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT | M_ZERO); in mlx_alloccmd()
2418 if (mc != NULL) { in mlx_alloccmd()
2419 mc->mc_sc = sc; in mlx_alloccmd()
2420 error = bus_dmamap_create(sc->mlx_buffer_dmat, 0, &mc->mc_dmamap); in mlx_alloccmd()
2422 free(mc, M_DEVBUF); in mlx_alloccmd()
2427 return(mc); in mlx_alloccmd()
2437 mlx_releasecmd(struct mlx_command *mc) in mlx_releasecmd() argument
2442 MLX_IO_ASSERT_LOCKED(mc->mc_sc); in mlx_releasecmd()
2443 TAILQ_INSERT_HEAD(&mc->mc_sc->mlx_freecmds, mc, mc_link); in mlx_releasecmd()
2450 mlx_freecmd(struct mlx_command *mc) in mlx_freecmd() argument
2452 struct mlx_softc *sc = mc->mc_sc; in mlx_freecmd()
2455 bus_dmamap_destroy(sc->mlx_buffer_dmat, mc->mc_dmamap); in mlx_freecmd()
2456 free(mc, M_DEVBUF); in mlx_freecmd()
2467 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2471 mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) in mlx_v3_tryqueue() argument
2482 MLX_V3_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); in mlx_v3_tryqueue()
2527 sc->mlx_state &= ~MLX_STATE_INTEN; in mlx_v3_intaction()
2531 sc->mlx_state |= MLX_STATE_INTEN; in mlx_v3_intaction()
2582 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2586 mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) in mlx_v4_tryqueue() argument
2597 MLX_V4_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); in mlx_v4_tryqueue()
2599 /* memory-mapped controller, so issue a write barrier to ensure the mailbox is filled */ in mlx_v4_tryqueue()
2600 bus_barrier(sc->mlx_mem, MLX_V4_MAILBOX, MLX_V4_MAILBOX_LENGTH, in mlx_v4_tryqueue()
2646 sc->mlx_state &= ~MLX_STATE_INTEN; in mlx_v4_intaction()
2650 sc->mlx_state |= MLX_STATE_INTEN; in mlx_v4_intaction()
2701 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2705 mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) in mlx_v5_tryqueue() argument
2716 MLX_V5_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); in mlx_v5_tryqueue()
2761 sc->mlx_state &= ~MLX_STATE_INTEN; in mlx_v5_intaction()
2765 sc->mlx_state |= MLX_STATE_INTEN; in mlx_v5_intaction()
2816 * Return a status message describing (mc)
2830 "invalid or non-redundant drive", /* 11 */
2881 {-1, 0, 0}
2885 mlx_diagnose_command(struct mlx_command *mc) in mlx_diagnose_command() argument
2891 for (i = 0; mlx_messages[i].command != -1; i++) in mlx_diagnose_command()
2892 if (((mc->mc_mailbox[0] == mlx_messages[i].command) || (mlx_messages[i].command == 0)) && in mlx_diagnose_command()
2893 (mc->mc_status == mlx_messages[i].status)) in mlx_diagnose_command()
2896 …sprintf(unkmsg, "unknown response 0x%x for command 0x%x", (int)mc->mc_status, (int)mc->mc_mailbox[… in mlx_diagnose_command()
2918 {-1, NULL}
2929 if ((sc->mlx_enq2->me_hardware_id & 0xff) == mlx_controller_names[i].hwid) { in mlx_describe_controller()
2935 sprintf(buf, " model 0x%x", sc->mlx_enq2->me_hardware_id & 0xff); in mlx_describe_controller()
2938 device_printf(sc->mlx_dev, "DAC%s, %d channel%s, firmware %d.%02d-%c-%02d, %dMB RAM\n", in mlx_describe_controller()
2940 sc->mlx_enq2->me_actual_channels, in mlx_describe_controller()
2941 sc->mlx_enq2->me_actual_channels > 1 ? "s" : "", in mlx_describe_controller()
2942 sc->mlx_enq2->me_firmware_id & 0xff, in mlx_describe_controller()
2943 (sc->mlx_enq2->me_firmware_id >> 8) & 0xff, in mlx_describe_controller()
2944 (sc->mlx_enq2->me_firmware_id >> 24) & 0xff, in mlx_describe_controller()
2945 (sc->mlx_enq2->me_firmware_id >> 16) & 0xff, in mlx_describe_controller()
2946 sc->mlx_enq2->me_mem_size / (1024 * 1024)); in mlx_describe_controller()
2949 device_printf(sc->mlx_dev, " Hardware ID 0x%08x\n", sc->mlx_enq2->me_hardware_id); in mlx_describe_controller()
2950 device_printf(sc->mlx_dev, " Firmware ID 0x%08x\n", sc->mlx_enq2->me_firmware_id); in mlx_describe_controller()
2951 …device_printf(sc->mlx_dev, " Configured/Actual channels %d/%d\n", sc->mlx_enq2->me_configured_ch… in mlx_describe_controller()
2952 sc->mlx_enq2->me_actual_channels); in mlx_describe_controller()
2953 device_printf(sc->mlx_dev, " Max Targets %d\n", sc->mlx_enq2->me_max_targets); in mlx_describe_controller()
2954 device_printf(sc->mlx_dev, " Max Tags %d\n", sc->mlx_enq2->me_max_tags); in mlx_describe_controller()
2955 device_printf(sc->mlx_dev, " Max System Drives %d\n", sc->mlx_enq2->me_max_sys_drives); in mlx_describe_controller()
2956 device_printf(sc->mlx_dev, " Max Arms %d\n", sc->mlx_enq2->me_max_arms); in mlx_describe_controller()
2957 device_printf(sc->mlx_dev, " Max Spans %d\n", sc->mlx_enq2->me_max_spans); in mlx_describe_controller()
2958 …device_printf(sc->mlx_dev, " DRAM/cache/flash/NVRAM size %d/%d/%d/%d\n", sc->mlx_enq2->me_mem_siz… in mlx_describe_controller()
2959 sc->mlx_enq2->me_cache_size, sc->mlx_enq2->me_flash_size, sc->mlx_enq2->me_nvram_size); in mlx_describe_controller()
2960 device_printf(sc->mlx_dev, " DRAM type %d\n", sc->mlx_enq2->me_mem_type); in mlx_describe_controller()
2961 device_printf(sc->mlx_dev, " Clock Speed %dns\n", sc->mlx_enq2->me_clock_speed); in mlx_describe_controller()
2962 …device_printf(sc->mlx_dev, " Hardware Speed %dns\n", sc->mlx_enq2->me_hardware_speed… in mlx_describe_controller()
2963 device_printf(sc->mlx_dev, " Max Commands %d\n", sc->mlx_enq2->me_max_commands); in mlx_describe_controller()
2964 device_printf(sc->mlx_dev, " Max SG Entries %d\n", sc->mlx_enq2->me_max_sg); in mlx_describe_controller()
2965 device_printf(sc->mlx_dev, " Max DP %d\n", sc->mlx_enq2->me_max_dp); in mlx_describe_controller()
2966 device_printf(sc->mlx_dev, " Max IOD %d\n", sc->mlx_enq2->me_max_iod); in mlx_describe_controller()
2967 device_printf(sc->mlx_dev, " Max Comb %d\n", sc->mlx_enq2->me_max_comb); in mlx_describe_controller()
2968 device_printf(sc->mlx_dev, " Latency %ds\n", sc->mlx_enq2->me_latency); in mlx_describe_controller()
2969 device_printf(sc->mlx_dev, " SCSI Timeout %ds\n", sc->mlx_enq2->me_scsi_timeout); in mlx_describe_controller()
2970 device_printf(sc->mlx_dev, " Min Free Lines %d\n", sc->mlx_enq2->me_min_freelines); in mlx_describe_controller()
2971 device_printf(sc->mlx_dev, " Rate Constant %d\n", sc->mlx_enq2->me_rate_const); in mlx_describe_controller()
2972 device_printf(sc->mlx_dev, " MAXBLK %d\n", sc->mlx_enq2->me_maxblk); in mlx_describe_controller()
2973 …device_printf(sc->mlx_dev, " Blocking Factor %d sectors\n", sc->mlx_enq2->me_blocking… in mlx_describe_controller()
2974 …device_printf(sc->mlx_dev, " Cache Line Size %d blocks\n", sc->mlx_enq2->me_cacheline… in mlx_describe_controller()
2975 device_printf(sc->mlx_dev, " SCSI Capability %s%dMHz, %d bit\n", in mlx_describe_controller()
2976 sc->mlx_enq2->me_scsi_cap & (1<<4) ? "differential " : "", in mlx_describe_controller()
2977 (1 << ((sc->mlx_enq2->me_scsi_cap >> 2) & 3)) * 10, in mlx_describe_controller()
2978 8 << (sc->mlx_enq2->me_scsi_cap & 0x3)); in mlx_describe_controller()
2979 device_printf(sc->mlx_dev, " Firmware Build Number %d\n", sc->mlx_enq2->me_firmware_build); in mlx_describe_controller()
2980 device_printf(sc->mlx_dev, " Fault Management Type %d\n", sc->mlx_enq2->me_fault_mgmt_type); in mlx_describe_controller()
2981 …device_printf(sc->mlx_dev, " Features %b\n", sc->mlx_enq2->me_firmware_feature… in mlx_describe_controller()
3000 device_printf(sc->mlx_dev, "physical drive %d:%d not responding\n", param2, param1); in mlx_fw_message()
3004 if (!(sc->mlx_flags & MLX_SPINUP_REPORTED)) { in mlx_fw_message()
3005 device_printf(sc->mlx_dev, "spinning up drives...\n"); in mlx_fw_message()
3006 sc->mlx_flags |= MLX_SPINUP_REPORTED; in mlx_fw_message()
3010 device_printf(sc->mlx_dev, "configuration checksum error\n"); in mlx_fw_message()
3013 device_printf(sc->mlx_dev, "mirror race recovery failed\n"); in mlx_fw_message()
3016 device_printf(sc->mlx_dev, "mirror race recovery in progress\n"); in mlx_fw_message()
3019 device_printf(sc->mlx_dev, "physical drive %d:%d COD mismatch\n", param2, param1); in mlx_fw_message()
3022 device_printf(sc->mlx_dev, "logical drive installation aborted\n"); in mlx_fw_message()
3025 device_printf(sc->mlx_dev, "mirror race on a critical system drive\n"); in mlx_fw_message()
3028 device_printf(sc->mlx_dev, "new controller configuration found\n"); in mlx_fw_message()
3031 device_printf(sc->mlx_dev, "FATAL MEMORY PARITY ERROR\n"); in mlx_fw_message()
3034 …device_printf(sc->mlx_dev, "unknown firmware initialisation error %02x:%02x:%02x\n", error, param1… in mlx_fw_message()
3058 if (sc->mlx_sysdrive[i].ms_disk != 0) { in mlx_findunit()
3060 if (unit == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) in mlx_findunit()
3061 return(&sc->mlx_sysdrive[i]); in mlx_findunit()