xref: /freebsd/sys/dev/aic7xxx/aic7xxx.c (revision b601c69bdbe8755d26570261d7fd4c02ee4eff74)
1 /*
2  * Generic driver for the aic7xxx based adaptec SCSI controllers
3  * Product specific probe and attach routines can be found in:
4  * i386/eisa/ahc_eisa.c	27/284X and aic7770 motherboard controllers
5  * pci/ahc_pci.c	3985, 3980, 3940, 2940, aic7895, aic7890,
6  *			aic7880, aic7870, aic7860, and aic7850 controllers
7  *
8  * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU Public License ("GPL").
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37 /*
38  * A few notes on features of the driver.
39  *
40  * SCB paging takes advantage of the fact that devices stay disconnected
41  * from the bus a relatively long time and that while they're disconnected,
42  * having the SCBs for these transactions down on the host adapter is of
43  * little use.  Instead of leaving this idle SCB down on the card we copy
44  * it back up into kernel memory and reuse the SCB slot on the card to
45  * schedule another transaction.  This can be a real payoff when doing random
46  * I/O to tagged queueing devices since there are more transactions active at
47  * once for the device to sort for optimal seek reduction. The algorithm goes
48  * like this...
49  *
50  * The sequencer maintains two lists of its hardware SCBs.  The first is the
51  * singly linked free list which tracks all SCBs that are not currently in
52  * use.  The second is the doubly linked disconnected list which holds the
53  * SCBs of transactions that are in the disconnected state sorted most
54  * recently disconnected first.  When the kernel queues a transaction to
55  * the card, a hardware SCB to "house" this transaction is retrieved from
56  * either of these two lists.  If the SCB came from the disconnected list,
57  * a check is made to see if any data transfer or SCB linking (more on linking
58  * in a bit) information has been changed since it was copied from the host
59  * and if so, DMAs the SCB back up before it can be used.  Once a hardware
60  * SCB has been obtained, the SCB is DMAed from the host.  Before any work
61  * can begin on this SCB, the sequencer must ensure that either the SCB is
62  * for a tagged transaction or the target is not already working on another
63  * non-tagged transaction.  If a conflict arises in the non-tagged case, the
64  * sequencer finds the SCB for the active transactions and sets the SCB_LINKED
65  * field in that SCB to this next SCB to execute.  To facilitate finding
66  * active non-tagged SCBs, the last four bytes of up to the first four hardware
67  * SCBs serve as a storage area for the currently active SCB ID for each
68  * target.
69  *
70  * When a device reconnects, a search is made of the hardware SCBs to find
71  * the SCB for this transaction.  If the search fails, a hardware SCB is
72  * pulled from either the free or disconnected SCB list and the proper
73  * SCB is DMAed from the host.  If the MK_MESSAGE control bit is set
74  * in the control byte of the SCB while it was disconnected, the sequencer
75  * will assert ATN and attempt to issue a message to the host.
76  *
77  * When a command completes, a check for non-zero status and residuals is
78  * made.  If either of these conditions exists, the SCB is DMAed back up to
79  * the host so that it can interpret this information.  Additionally, in the
80  * case of bad status, the sequencer generates a special interrupt and pauses
81  * itself.  This allows the host to setup a request sense command if it
82  * chooses for this target synchronously with the error so that sense
83  * information isn't lost.
84  *
85  */
86 
87 #include <opt_aic7xxx.h>
88 
89 #include <pci.h>
90 #include <stddef.h>	/* For offsetof */
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/malloc.h>
95 #include <sys/eventhandler.h>
96 #include <sys/proc.h>
97 
98 #include <cam/cam.h>
99 #include <cam/cam_ccb.h>
100 #include <cam/cam_sim.h>
101 #include <cam/cam_xpt_sim.h>
102 #include <cam/cam_debug.h>
103 
104 #include <cam/scsi/scsi_all.h>
105 #include <cam/scsi/scsi_message.h>
106 
107 #if NPCI > 0
108 #include <machine/bus_memio.h>
109 #endif
110 #include <machine/bus_pio.h>
111 #include <machine/bus.h>
112 #include <machine/clock.h>
113 #include <machine/endian.h>
114 #include <sys/rman.h>
115 
116 #include <vm/vm.h>
117 #include <vm/vm_param.h>
118 #include <vm/pmap.h>
119 
120 #include <dev/aic7xxx/aic7xxx.h>
121 #include <dev/aic7xxx/aicasm_insformat.h>
122 
123 #include <aic7xxx_reg.h>
124 #include <aic7xxx_seq.h>
125 
126 #include <sys/kernel.h>
127 
128 #ifndef AHC_TMODE_ENABLE
129 #define AHC_TMODE_ENABLE 0
130 #endif
131 
132 #define MAX(a,b) (((a) > (b)) ? (a) : (b))
133 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
134 #define ALL_CHANNELS '\0'
135 #define ALL_TARGETS_MASK 0xFFFF
136 #define INITIATOR_WILDCARD	(~0)
137 
138 #define	SIM_IS_SCSIBUS_B(ahc, sim)	\
139 	((sim) == ahc->sim_b)
140 #define	SIM_CHANNEL(ahc, sim)	\
141 	(((sim) == ahc->sim_b) ? 'B' : 'A')
142 #define	SIM_SCSI_ID(ahc, sim)	\
143 	(((sim) == ahc->sim_b) ? ahc->our_id_b : ahc->our_id)
144 #define	SIM_PATH(ahc, sim)	\
145 	(((sim) == ahc->sim_b) ? ahc->path_b : ahc->path)
146 #define SCSIID_TARGET(ahc, scsiid) \
147 	(((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \
148 	>> TID_SHIFT)
149 #define SCSIID_OUR_ID(scsiid) \
150 	((scsiid) & OID)
151 #define SCSIID_CHANNEL(ahc, scsiid) \
152 	((((ahc)->features & AHC_TWIN) != 0) \
153         ? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \
154        : 'A')
155 #define	SCB_IS_SCSIBUS_B(ahc, scb) \
156 	(SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B')
157 #define	SCB_GET_OUR_ID(scb) \
158 	SCSIID_OUR_ID((scb)->hscb->scsiid)
159 #define	SCB_GET_TARGET(ahc, scb) \
160 	SCSIID_TARGET((ahc), (scb)->hscb->scsiid)
161 #define	SCB_GET_CHANNEL(ahc, scb) \
162 	SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid)
163 #define	SCB_GET_LUN(scb) \
164 	((scb)->hscb->lun)
165 #define SCB_GET_TARGET_OFFSET(ahc, scb)	\
166 	(SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0))
167 #define SCB_GET_TARGET_MASK(ahc, scb) \
168 	(0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb)))
169 #define TCL_TARGET_OFFSET(tcl) \
170 	((((tcl) >> 4) & TID) >> 4)
171 #define BUILD_TCL(scsiid, lun) \
172 	((lun) | (((scsiid) & TID) >> 4))
173 #define BUILD_SCSIID(ahc, sim, target_id, our_id) \
174 	((((target_id) << TID_SHIFT) & TID) | (our_id) \
175 	| (SIM_IS_SCSIBUS_B(ahc, sim) ? TWIN_CHNLB : 0))
176 
177 #define ccb_scb_ptr spriv_ptr0
178 #define ccb_ahc_ptr spriv_ptr1
179 
180 char *ahc_chip_names[] =
181 {
182 	"NONE",
183 	"aic7770",
184 	"aic7850",
185 	"aic7855",
186 	"aic7859",
187 	"aic7860",
188 	"aic7870",
189 	"aic7880",
190 	"aic7895",
191 	"aic7890/91",
192 	"aic7896/97",
193 	"aic7892",
194 	"aic7899"
195 };
196 
197 typedef enum {
198 	ROLE_UNKNOWN,
199 	ROLE_INITIATOR,
200 	ROLE_TARGET
201 } role_t;
202 
203 struct ahc_devinfo {
204 	int	  our_scsiid;
205 	int	  target_offset;
206 	uint16_t target_mask;
207 	uint8_t  target;
208 	uint8_t  lun;
209 	char	  channel;
210 	role_t	  role;		/*
211 				 * Only guaranteed to be correct if not
212 				 * in the busfree state.
213 				 */
214 };
215 
216 typedef enum {
217 	SEARCH_COMPLETE,
218 	SEARCH_COUNT,
219 	SEARCH_REMOVE
220 } ahc_search_action;
221 
222 #ifdef AHC_DEBUG
223 static int     ahc_debug = AHC_DEBUG;
224 #endif
225 
226 #if NPCI > 0
227 void ahc_pci_intr(struct ahc_softc *ahc);
228 #endif
229 
230 static int	ahcinitscbdata(struct ahc_softc *ahc);
231 static void	ahcfiniscbdata(struct ahc_softc *ahc);
232 
233 static bus_dmamap_callback_t	ahcdmamapcb;
234 
235 #if UNUSED
236 static void	ahc_dump_targcmd(struct target_cmd *cmd);
237 #endif
238 static void	ahc_shutdown(void *arg, int howto);
239 static cam_status
240 		ahc_find_tmode_devs(struct ahc_softc *ahc,
241 				    struct cam_sim *sim, union ccb *ccb,
242 				    struct tmode_tstate **tstate,
243 				    struct tmode_lstate **lstate,
244 				    int notfound_failure);
245 static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
246 static void	ahc_async(void *callback_arg, uint32_t code,
247 			  struct cam_path *path, void *arg);
248 static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
249 				int nsegments, int error);
250 static void	ahc_poll(struct cam_sim *sim);
251 static void	ahc_setup_data(struct ahc_softc *ahc,
252 			       struct ccb_scsiio *csio, struct scb *scb);
253 static void	ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path);
254 static void	ahcallocscbs(struct ahc_softc *ahc);
255 #if UNUSED
256 static void	ahc_scb_devinfo(struct ahc_softc *ahc,
257 				struct ahc_devinfo *devinfo,
258 				struct scb *scb);
259 #endif
260 static void	ahc_fetch_devinfo(struct ahc_softc *ahc,
261 				  struct ahc_devinfo *devinfo);
262 static void	ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id,
263 				    u_int target, u_int lun, char channel,
264 				    role_t role);
265 static u_int	ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev);
266 static void	ahc_done(struct ahc_softc *ahc, struct scb *scbp);
267 static struct tmode_tstate *
268 		ahc_alloc_tstate(struct ahc_softc *ahc,
269 				 u_int scsi_id, char channel);
270 static void	ahc_free_tstate(struct ahc_softc *ahc,
271 				u_int scsi_id, char channel, int force);
272 static void	ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim,
273 				  union ccb *ccb);
274 static void	ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask);
275 static int	ahc_handle_target_cmd(struct ahc_softc *ahc,
276 				      struct target_cmd *cmd);
277 static void 	ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
278 static void	ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat);
279 static void	ahc_build_transfer_msg(struct ahc_softc *ahc,
280 				       struct ahc_devinfo *devinfo);
281 static void	ahc_setup_initiator_msgout(struct ahc_softc *ahc,
282 					   struct ahc_devinfo *devinfo,
283 					   struct scb *scb);
284 static void	ahc_setup_target_msgin(struct ahc_softc *ahc,
285 				       struct ahc_devinfo *devinfo);
286 static int	ahc_handle_msg_reject(struct ahc_softc *ahc,
287 				      struct ahc_devinfo *devinfo);
288 static void	ahc_clear_msg_state(struct ahc_softc *ahc);
289 static void	ahc_handle_message_phase(struct ahc_softc *ahc,
290 					 struct cam_path *path);
291 static int	ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full);
292 typedef enum {
293 	MSGLOOP_IN_PROG,
294 	MSGLOOP_MSGCOMPLETE,
295 	MSGLOOP_TERMINATED
296 } msg_loop_stat;
297 static int ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path,
298 				   struct ahc_devinfo *devinfo);
299 static void	ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
300 					    struct ahc_devinfo *devinfo);
301 static void	ahc_handle_devreset(struct ahc_softc *ahc,
302 				    struct ahc_devinfo *devinfo,
303 				    cam_status status, ac_code acode,
304 				    char *message,
305 				    int verbose_level);
306 #ifdef AHC_DUMP_SEQ
307 static void	ahc_dumpseq(struct ahc_softc *ahc);
308 #endif
309 static void	ahc_loadseq(struct ahc_softc *ahc);
310 static int	ahc_check_patch(struct ahc_softc *ahc,
311 				struct patch **start_patch,
312 				u_int start_instr, u_int *skip_addr);
313 static void	ahc_download_instr(struct ahc_softc *ahc,
314 				   u_int instrptr, uint8_t *dconsts);
315 static int	ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
316 			      int target, char channel, int lun, u_int tag,
317 			      role_t role);
318 #ifdef AHC_DEBUG
319 static void	ahc_print_scb(struct scb *scb);
320 #endif
321 static int	ahc_search_qinfifo(struct ahc_softc *ahc, int target,
322 				   char channel, int lun, u_int tag,
323 				   role_t role, uint32_t status,
324 				   ahc_search_action action);
325 static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
326 			      union ccb *ccb);
327 static int	ahc_reset_channel(struct ahc_softc *ahc, char channel,
328 				  int initiate_reset);
329 static int	ahc_abort_scbs(struct ahc_softc *ahc, int target,
330 			       char channel, int lun, u_int tag, role_t role,
331 			       uint32_t status);
332 static int	ahc_search_disc_list(struct ahc_softc *ahc, int target,
333 				     char channel, int lun, u_int tag,
334 				     int stop_on_first, int remove,
335 				     int save_state);
336 static u_int	ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
337 					   u_int prev, u_int scbptr);
338 static void	ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
339 static void	ahc_clear_intstat(struct ahc_softc *ahc);
340 static void	ahc_reset_current_bus(struct ahc_softc *ahc);
341 static struct ahc_syncrate *
342 		ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period,
343 					u_int *ppr_options);
344 static struct ahc_syncrate *
345 		ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
346 				  u_int *ppr_options, u_int maxsync);
347 static u_int	ahc_find_period(struct ahc_softc *ahc, u_int scsirate,
348 				u_int maxsync);
349 static void	ahc_validate_offset(struct ahc_softc *ahc,
350 				    struct ahc_syncrate *syncrate,
351 				    u_int *offset, int wide);
352 static void	ahc_validate_width(struct ahc_softc *ahc, u_int *bus_width);
353 static void	ahc_update_target_msg_request(struct ahc_softc *ahc,
354 					      struct ahc_devinfo *devinfo,
355 					      struct ahc_initiator_tinfo *tinfo,
356 					      int force, int paused);
357 static int	ahc_create_path(struct ahc_softc *ahc,
358 				struct ahc_devinfo *devinfo,
359 				struct cam_path **path);
360 static void	ahc_set_syncrate(struct ahc_softc *ahc,
361 				 struct ahc_devinfo *devinfo,
362 				 struct cam_path *path,
363 				 struct ahc_syncrate *syncrate,
364 				 u_int period, u_int offset,
365 				 u_int ppr_options, u_int type,
366 				 int paused);
367 static void	ahc_set_width(struct ahc_softc *ahc,
368 			      struct ahc_devinfo *devinfo,
369 			      struct cam_path *path, u_int width, u_int type,
370 			      int paused);
371 static void	ahc_set_tags(struct ahc_softc *ahc,
372 			     struct ahc_devinfo *devinfo,
373 			     int enable);
374 static void	ahc_construct_sdtr(struct ahc_softc *ahc,
375 				   u_int period, u_int offset);
376 
377 static void	ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width);
378 static void	ahc_construct_ppr(struct ahc_softc *ahc, u_int period,
379 				  u_int offset, u_int bus_width,
380 				  u_int ppr_options);
381 
382 static __inline int ahc_check_residual(struct scb *scb);
383 static void	ahc_calc_residual(struct scb *scb);
384 
385 static void	ahc_update_pending_syncrates(struct ahc_softc *ahc);
386 
387 static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
388 
389 static timeout_t
390 		ahc_timeout;
391 static void	ahc_queue_lstate_event(struct ahc_softc *ahc,
392 				       struct tmode_lstate *lstate,
393 				       u_int initiator_id, u_int event_type,
394 				       u_int event_arg);
395 static void	ahc_send_lstate_events(struct ahc_softc *ahc,
396 				       struct tmode_lstate *lstate);
397 static 		void restart_sequencer(struct ahc_softc *ahc);
398 static __inline u_int ahc_index_busy_tcl(struct ahc_softc *ahc,
399 					 u_int tcl, int unbusy);
400 
401 static __inline void	   ahc_freeze_ccb(union ccb* ccb);
402 static __inline cam_status ahc_ccb_status(union ccb* ccb);
403 static __inline void	   ahcsetccbstatus(union ccb* ccb,
404 					   cam_status status);
405 static void		   ahc_run_untagged_queues(struct ahc_softc *);
406 static void		   ahc_run_untagged_queue(struct ahc_softc *,
407 						  struct scb_tailq *);
408 static void		   ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
409 static void		   ahc_run_qoutfifo(struct ahc_softc *ahc);
410 
411 static __inline struct ahc_initiator_tinfo *
412 			   ahc_fetch_transinfo(struct ahc_softc *ahc,
413 					       char channel,
414 					       u_int our_id, u_int target,
415 					       struct tmode_tstate **tstate);
416 static __inline struct ahc_dma_seg *
417 			    ahc_sg_bus_to_virt(struct scb *scb,
418 					       uint32_t sg_busaddr);
419 static __inline uint32_t
420 			    ahc_sg_virt_to_bus(struct scb *scb,
421 					       struct ahc_dma_seg *sg);
422 static __inline void	    ahc_queue_scb(struct ahc_softc *ahc,
423 					  struct scb *scb);
424 static void	   ahcfreescb(struct ahc_softc *ahc, struct scb *scb);
425 static __inline	struct scb *ahcgetscb(struct ahc_softc *ahc);
426 static __inline void	    ahc_freeze_untagged_queues(struct ahc_softc *ahc);
427 static __inline void	    ahc_release_untagged_queues(struct ahc_softc *ahc);
428 
429 static __inline uint32_t
430 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
431 {
432 	return (ahc->scb_data->hscb_busaddr
433 		+ (sizeof(struct hardware_scb) * index));
434 }
435 
436 #define AHC_BUSRESET_DELAY	250	/* Reset delay in us */
437 
438 /*
439  * Restart the sequencer program from address zero
440  */
441 static void
442 restart_sequencer(struct ahc_softc *ahc)
443 {
444 	u_int i;
445 
446 	pause_sequencer(ahc);
447 
448 	/*
449 	 * Everytime we restart the sequencer, there
450 	 * is the possiblitity that we have restarted
451 	 * within a three instruction window where an
452 	 * SCB has been marked free but has not made it
453 	 * onto the free list.  Since SCSI events(bus reset,
454 	 * unexpected bus free) will always freeze the
455 	 * sequencer, we cannot close this window.  To
456 	 * avoid losing an SCB, we reconsitute the free
457 	 * list every time we restart the sequencer.
458 	 */
459 	ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
460 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
461 
462 		ahc_outb(ahc, SCBPTR, i);
463 		if (ahc_inb(ahc, SCB_TAG) == SCB_LIST_NULL) {
464 			ahc_add_curscb_to_free_list(ahc);
465 		}
466 	}
467 	ahc_outb(ahc, SEQCTL, FASTMODE|SEQRESET);
468 	unpause_sequencer(ahc);
469 }
470 
471 static __inline u_int
472 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl, int unbusy)
473 {
474 	u_int scbid;
475 	u_int target_offset;
476 
477 	target_offset = TCL_TARGET_OFFSET(tcl);
478 	scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
479 	if (unbusy)
480 		ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
481 
482 	return (scbid);
483 }
484 
485 static __inline int
486 ahc_check_residual(struct scb *scb)
487 {
488 	struct status_pkt *sp;
489 
490 	sp = &scb->hscb->shared_data.status;
491 	if ((scb->hscb->sgptr & SG_RESID_VALID) != 0)
492 		return (1);
493 	return (0);
494 }
495 
496 static __inline void
497 ahc_freeze_ccb(union ccb* ccb)
498 {
499 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
500 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
501 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
502 	}
503 }
504 
505 static __inline cam_status
506 ahc_ccb_status(union ccb* ccb)
507 {
508 	return (ccb->ccb_h.status & CAM_STATUS_MASK);
509 }
510 
511 static __inline void
512 ahcsetccbstatus(union ccb* ccb, cam_status status)
513 {
514 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
515 	ccb->ccb_h.status |= status;
516 }
517 
518 static __inline struct ahc_initiator_tinfo *
519 ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
520 		    u_int remote_id, struct tmode_tstate **tstate)
521 {
522 	/*
523 	 * Transfer data structures are stored from the perspective
524 	 * of the target role.  Since the parameters for a connection
525 	 * in the initiator role to a given target are the same as
526 	 * when the roles are reversed, we pretend we are the target.
527 	 */
528 	if (channel == 'B')
529 		our_id += 8;
530 	*tstate = ahc->enabled_targets[our_id];
531 	return (&(*tstate)->transinfo[remote_id]);
532 }
533 
534 static __inline struct ahc_dma_seg *
535 ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
536 {
537 	int sg_index;
538 
539 	sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
540 	/* sg_list_phys points to entry 1, not 0 */
541 	sg_index++;
542 
543 	return (&scb->sg_list[sg_index]);
544 }
545 
546 static __inline uint32_t
547 ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
548 {
549 	int sg_index;
550 
551 	/* sg_list_phys points to entry 1, not 0 */
552 	sg_index = sg - &scb->sg_list[1];
553 
554 	return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
555 }
556 
557 static __inline void
558 ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
559 {
560 	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
561 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
562 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
563 	} else {
564 		pause_sequencer(ahc);
565 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
566 		unpause_sequencer(ahc);
567 	}
568 }
569 
570 static __inline void
571 ahc_freeze_untagged_queues(struct ahc_softc *ahc)
572 {
573 	if ((ahc->features & AHC_SCB_BTT) == 0)
574 		ahc->untagged_queue_lock++;
575 }
576 
577 static __inline void
578 ahc_release_untagged_queues(struct ahc_softc *ahc)
579 {
580 	if ((ahc->features & AHC_SCB_BTT) == 0) {
581 		ahc->untagged_queue_lock--;
582 		if (ahc->untagged_queue_lock == 0)
583 			ahc_run_untagged_queues(ahc);
584 	}
585 }
586 
587 static void
588 ahc_run_untagged_queues(struct ahc_softc *ahc)
589 {
590 	int i;
591 
592 	for (i = 0; i < 16; i++)
593 		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
594 }
595 
596 static void
597 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
598 {
599 	struct scb *scb;
600 
601 	if (ahc->untagged_queue_lock != 0)
602 		return;
603 
604 	if ((scb = TAILQ_FIRST(queue)) != NULL
605 	 && (scb->flags & SCB_ACTIVE) == 0) {
606 		scb->flags |= SCB_ACTIVE;
607 		ahc_queue_scb(ahc, scb);
608 	}
609 }
610 
611 static void
612 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
613 {
614 	struct target_cmd *cmd;
615 
616 	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
617 
618 		/*
619 		 * Only advance through the queue if we
620 		 * have the resources to process the command.
621 		 */
622 		if (ahc_handle_target_cmd(ahc, cmd) != 0)
623 			break;
624 
625 		ahc->tqinfifonext++;
626 		cmd->cmd_valid = 0;
627 
628 		/*
629 		 * Lazily update our position in the target mode incomming
630 		 * command queue as seen by the sequencer.
631 		 */
632 		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
633 			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
634 				u_int hs_mailbox;
635 
636 				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
637 				hs_mailbox &= ~HOST_TQINPOS;
638 				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
639 				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
640 			} else {
641 				if (!paused)
642 					pause_sequencer(ahc);
643 				ahc_outb(ahc, KERNEL_TQINPOS,
644 					 ahc->tqinfifonext & HOST_TQINPOS);
645 				if (!paused)
646 				unpause_sequencer(ahc);
647 			}
648 		}
649 	}
650 }
651 
652 static void
653 ahc_run_qoutfifo(struct ahc_softc *ahc)
654 {
655 	struct scb *scb;
656 	u_int  scb_index;
657 
658 	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
659 		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
660 		ahc->qoutfifo[ahc->qoutfifonext++] = SCB_LIST_NULL;
661 
662 		scb = &ahc->scb_data->scbarray[scb_index];
663 		if (scb_index >= ahc->scb_data->numscbs
664 		  || (scb->flags & SCB_ACTIVE) == 0) {
665 			printf("%s: WARNING no command for scb %d "
666 			       "(cmdcmplt)\nQOUTPOS = %d\n",
667 			       ahc_name(ahc), scb_index,
668 			       ahc->qoutfifonext - 1);
669 			continue;
670 		}
671 
672 		/*
673 		 * Save off the residual
674 		 * if there is one.
675 		 */
676 		if (ahc_check_residual(scb) != 0)
677 			ahc_calc_residual(scb);
678 		else
679 			scb->ccb->csio.resid = 0;
680 		ahc_done(ahc, scb);
681 	}
682 }
683 
684 
685 /*
686  * Return an SCB resource to the free list.
687  */
688 static void
689 ahcfreescb(struct ahc_softc *ahc, struct scb *scb)
690 {
691 	struct hardware_scb *hscb;
692 	int opri;
693 
694 	hscb = scb->hscb;
695 
696 	opri = splcam();
697 
698 	if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
699 	 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
700 		scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
701 		ahc->flags &= ~AHC_RESOURCE_SHORTAGE;
702 	}
703 
704 	/* Clean up for the next user */
705 	scb->flags = SCB_FREE;
706 	hscb->control = 0;
707 
708 	SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
709 	splx(opri);
710 }
711 
712 /*
713  * Get a free scb. If there are none, see if we can allocate a new SCB.
714  */
715 static __inline struct scb *
716 ahcgetscb(struct ahc_softc *ahc)
717 {
718 	struct scb *scbp;
719 	int opri;
720 
721 	opri = splcam();
722 	if ((scbp = SLIST_FIRST(&ahc->scb_data->free_scbs))) {
723 		SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
724 	} else {
725 		ahcallocscbs(ahc);
726 		scbp = SLIST_FIRST(&ahc->scb_data->free_scbs);
727 		if (scbp != NULL)
728 			SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
729 	}
730 
731 	splx(opri);
732 
733 	return (scbp);
734 }
735 
736 char *
737 ahc_name(struct ahc_softc *ahc)
738 {
739 	static char name[10];
740 
741 	snprintf(name, sizeof(name), "ahc%d", ahc->unit);
742 	return (name);
743 }
744 
745 #ifdef  AHC_DEBUG
746 static void
747 ahc_print_scb(struct scb *scb)
748 {
749 	int i;
750 
751 	struct hardware_scb *hscb = scb->hscb;
752 
753 	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
754 	       scb,
755 	       hscb->control,
756 	       hscb->scsiid,
757 	       hscb->lun,
758 	       hscb->cdb_len);
759 	i = 0;
760 	printf("Shared Data: %#02x %#02x %#02x %#02x\n",
761 	       hscb->shared_data.cdb[i++],
762 	       hscb->shared_data.cdb[i++],
763 	       hscb->shared_data.cdb[i++],
764 	       hscb->shared_data.cdb[i++]);
765 	printf("             %#02x %#02x %#02x %#02x\n",
766 	       hscb->shared_data.cdb[i++],
767 	       hscb->shared_data.cdb[i++],
768 	       hscb->shared_data.cdb[i++],
769 	       hscb->shared_data.cdb[i++]);
770 	printf("             %#02x %#02x %#02x %#02x\n",
771 	       hscb->shared_data.cdb[i++],
772 	       hscb->shared_data.cdb[i++],
773 	       hscb->shared_data.cdb[i++],
774 	       hscb->shared_data.cdb[i++]);
775 	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
776 		hscb->dataptr,
777 		hscb->datacnt,
778 		hscb->sgptr,
779 		hscb->tag);
780 	if (scb->sg_count > 0) {
781 		for (i = 0; i < scb->sg_count; i++) {
782 			printf("sg[%d] - Addr 0x%x : Length %d\n",
783 			       i,
784 			       scb->sg_list[i].addr,
785 			       scb->sg_list[i].len);
786 		}
787 	}
788 }
789 #endif
790 
791 static struct {
792         uint8_t errno;
793 	char *errmesg;
794 } hard_error[] = {
795 	{ ILLHADDR,	"Illegal Host Access" },
796 	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
797 	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
798 	{ SQPARERR,	"Sequencer Parity Error" },
799 	{ DPARERR,	"Data-path Parity Error" },
800 	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
801 	{ PCIERRSTAT,	"PCI Error detected" },
802 	{ CIOPARERR,	"CIOBUS Parity Error" },
803 };
804 static const int num_errors = sizeof(hard_error)/sizeof(hard_error[0]);
805 
806 static struct {
807         uint8_t phase;
808         uint8_t mesg_out; /* Message response to parity errors */
809 	char *phasemsg;
810 } phase_table[] = {
811 	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
812 	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
813 	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
814 	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
815 	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
816 	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
817 	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
818 	{ 0,		MSG_NOOP,		"in unknown phase"	}
819 };
820 static const u_int num_phases =
821     (sizeof(phase_table)/sizeof(phase_table[0])) - 1;
822 
823 /*
824  * Valid SCSIRATE values.  (p. 3-17)
825  * Provides a mapping of tranfer periods in ns to the proper value to
826  * stick in the scsiscfr reg to use that transfer rate.
827  */
828 #define AHC_SYNCRATE_DT		0
829 #define AHC_SYNCRATE_ULTRA2	1
830 #define AHC_SYNCRATE_ULTRA	3
831 #define AHC_SYNCRATE_FAST	6
832 static struct ahc_syncrate ahc_syncrates[] = {
833       /* ultra2    fast/ultra  period     rate */
834 	{ 0x42,      0x000,      9,      "80.0" },
835 	{ 0x03,      0x000,     10,      "40.0" },
836 	{ 0x04,      0x000,     11,      "33.0" },
837 	{ 0x05,      0x100,     12,      "20.0" },
838 	{ 0x06,      0x110,     15,      "16.0" },
839 	{ 0x07,      0x120,     18,      "13.4" },
840 	{ 0x08,      0x000,     25,      "10.0" },
841 	{ 0x19,      0x010,     31,      "8.0"  },
842 	{ 0x1a,      0x020,     37,      "6.67" },
843 	{ 0x1b,      0x030,     43,      "5.7"  },
844 	{ 0x1c,      0x040,     50,      "5.0"  },
845 	{ 0x00,      0x050,     56,      "4.4"  },
846 	{ 0x00,      0x060,     62,      "4.0"  },
847 	{ 0x00,      0x070,     68,      "3.6"  },
848 	{ 0x00,      0x000,      0,      NULL   }
849 };
850 
851 void
852 ahc_init_probe_config(struct ahc_probe_config *probe_config)
853 {
854 	probe_config->description = NULL;
855 	probe_config->channel = 'A';
856 	probe_config->channel_b = 'B';
857 	probe_config->chip = AHC_NONE;
858 	probe_config->features = AHC_FENONE;
859 	probe_config->bugs = AHC_BUGNONE;
860 	probe_config->flags = AHC_FNONE;
861 }
862 
863 /*
864  * Allocate a controller structure for a new device and initialize it.
865  */
866 struct ahc_softc *
867 ahc_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id,
868 	  bus_dma_tag_t parent_dmat, struct ahc_probe_config *config,
869 	  struct scb_data *scb_data)
870 {
871 	/*
872 	 * find unit and check we have that many defined
873 	 */
874 	struct  ahc_softc *ahc;
875 	size_t	alloc_size;
876 	int	i;
877 
878 	/*
879 	 * Allocate a storage area for us.
880 	 */
881 	if (scb_data == NULL)
882 		/*
883 		 * We are not sharing SCB space with another controller
884 		 * so allocate our own SCB data space.
885 		 */
886 		alloc_size = sizeof(struct full_ahc_softc);
887 	else
888 		alloc_size = sizeof(struct ahc_softc);
889 	ahc = malloc(alloc_size, M_DEVBUF, M_NOWAIT);
890 	if (!ahc) {
891 		device_printf(dev, "cannot malloc softc!\n");
892 		return NULL;
893 	}
894 	bzero(ahc, alloc_size);
895 	LIST_INIT(&ahc->pending_ccbs);
896 	ahc->device = dev;
897 	ahc->unit = device_get_unit(dev);
898 	ahc->regs_res_type = regs_type;
899 	ahc->regs_res_id = regs_id;
900 	ahc->regs = regs;
901 	ahc->tag = rman_get_bustag(regs);
902 	ahc->bsh = rman_get_bushandle(regs);
903 	ahc->parent_dmat = parent_dmat;
904 	ahc->chip = config->chip;
905 	ahc->features = config->features;
906 	ahc->bugs = config->bugs;
907 	ahc->flags = config->flags;
908 	ahc->channel = config->channel;
909 	for (i = 0; i < 16; i++)
910 		TAILQ_INIT(&ahc->untagged_queues[i]);
911 
912 	if (scb_data == NULL) {
913 		struct full_ahc_softc* full_softc = (struct full_ahc_softc*)ahc;
914 		ahc->scb_data = &full_softc->scb_data_storage;
915 	} else
916 		ahc->scb_data = scb_data;
917 
918 	ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN;
919 	/* The IRQMS bit is only valid on VL and EISA chips */
920 	if ((ahc->chip & AHC_PCI) != 0)
921 		ahc->unpause &= ~IRQMS;
922 	ahc->pause = ahc->unpause | PAUSE;
923 	return (ahc);
924 }
925 
926 void
927 ahc_free(ahc)
928 	struct ahc_softc *ahc;
929 {
930 	ahcfiniscbdata(ahc);
931 	switch (ahc->init_level) {
932 	case 3:
933 		bus_dmamap_unload(ahc->shared_data_dmat,
934 				  ahc->shared_data_dmamap);
935 	case 2:
936 		bus_dmamem_free(ahc->shared_data_dmat, ahc->qoutfifo,
937 				ahc->shared_data_dmamap);
938 		bus_dmamap_destroy(ahc->shared_data_dmat,
939 				   ahc->shared_data_dmamap);
940 	case 1:
941 		bus_dma_tag_destroy(ahc->buffer_dmat);
942 		break;
943 	}
944 
945 	if (ahc->regs != NULL)
946 		bus_release_resource(ahc->device, ahc->regs_res_type,
947 				     ahc->regs_res_id, ahc->regs);
948 	if (ahc->irq != NULL)
949 		bus_release_resource(ahc->device, ahc->irq_res_type,
950 				     0, ahc->irq);
951 
952 	free(ahc, M_DEVBUF);
953 	return;
954 }
955 
956 static int
957 ahcinitscbdata(struct ahc_softc *ahc)
958 {
959 	struct scb_data *scb_data;
960 	int i;
961 
962 	scb_data = ahc->scb_data;
963 	SLIST_INIT(&scb_data->free_scbs);
964 	SLIST_INIT(&scb_data->sg_maps);
965 
966 	/* Allocate SCB resources */
967 	scb_data->scbarray =
968 	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX,
969 				 M_DEVBUF, M_NOWAIT);
970 	if (scb_data->scbarray == NULL)
971 		return (ENOMEM);
972 	bzero(scb_data->scbarray, sizeof(struct scb) * AHC_SCB_MAX);
973 
974 	/* Determine the number of hardware SCBs and initialize them */
975 
976 	scb_data->maxhscbs = ahc_probe_scbs(ahc);
977 	/* SCB 0 heads the free list */
978 	ahc_outb(ahc, FREE_SCBH, 0);
979 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
980 		ahc_outb(ahc, SCBPTR, i);
981 
982 		/* Clear the control byte. */
983 		ahc_outb(ahc, SCB_CONTROL, 0);
984 
985 		/* Set the next pointer */
986 		ahc_outb(ahc, SCB_NEXT, i+1);
987 
988 		/* Make the tag number invalid */
989 		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
990 	}
991 
992 	/* Make sure that the last SCB terminates the free list */
993 	ahc_outb(ahc, SCBPTR, i-1);
994 	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
995 
996 	/* Ensure we clear the 0 SCB's control byte. */
997 	ahc_outb(ahc, SCBPTR, 0);
998 	ahc_outb(ahc, SCB_CONTROL, 0);
999 
1000 	scb_data->maxhscbs = i;
1001 
1002 	if (ahc->scb_data->maxhscbs == 0)
1003 		panic("%s: No SCB space found", ahc_name(ahc));
1004 
1005 	/*
1006 	 * Create our DMA tags.  These tags define the kinds of device
1007 	 * accessible memory allocations and memory mappings we will
1008 	 * need to perform during normal operation.
1009 	 *
1010 	 * Unless we need to further restrict the allocation, we rely
1011 	 * on the restrictions of the parent dmat, hence the common
1012 	 * use of MAXADDR and MAXSIZE.
1013 	 */
1014 
1015 	/* DMA tag for our hardware scb structures */
1016 	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
1017 			       /*lowaddr*/BUS_SPACE_MAXADDR,
1018 			       /*highaddr*/BUS_SPACE_MAXADDR,
1019 			       /*filter*/NULL, /*filterarg*/NULL,
1020 			       AHC_SCB_MAX * sizeof(struct hardware_scb),
1021 			       /*nsegments*/1,
1022 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1023 			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
1024 		goto error_exit;
1025 	}
1026 
1027 	scb_data->init_level++;
1028 
1029 	/* Allocation for our ccbs */
1030 	if (bus_dmamem_alloc(scb_data->hscb_dmat, (void **)&scb_data->hscbs,
1031 			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
1032 		goto error_exit;
1033 	}
1034 
1035 	scb_data->init_level++;
1036 
1037 	/* And permanently map them */
1038 	bus_dmamap_load(scb_data->hscb_dmat, scb_data->hscb_dmamap,
1039 			scb_data->hscbs,
1040 			AHC_SCB_MAX * sizeof(struct hardware_scb),
1041 			ahcdmamapcb, &scb_data->hscb_busaddr, /*flags*/0);
1042 
1043 	scb_data->init_level++;
1044 
1045 	/* DMA tag for our sense buffers */
1046 	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
1047 			       /*lowaddr*/BUS_SPACE_MAXADDR,
1048 			       /*highaddr*/BUS_SPACE_MAXADDR,
1049 			       /*filter*/NULL, /*filterarg*/NULL,
1050 			       AHC_SCB_MAX * sizeof(struct scsi_sense_data),
1051 			       /*nsegments*/1,
1052 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1053 			       /*flags*/0, &scb_data->sense_dmat) != 0) {
1054 		goto error_exit;
1055 	}
1056 
1057 	scb_data->init_level++;
1058 
1059 	/* Allocate them */
1060 	if (bus_dmamem_alloc(scb_data->sense_dmat, (void **)&scb_data->sense,
1061 			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
1062 		goto error_exit;
1063 	}
1064 
1065 	scb_data->init_level++;
1066 
1067 	/* And permanently map them */
1068 	bus_dmamap_load(scb_data->sense_dmat, scb_data->sense_dmamap,
1069 			scb_data->sense,
1070 			AHC_SCB_MAX * sizeof(struct scsi_sense_data),
1071 			ahcdmamapcb, &scb_data->sense_busaddr, /*flags*/0);
1072 
1073 	scb_data->init_level++;
1074 
1075 	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1076 	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
1077 			       /*lowaddr*/BUS_SPACE_MAXADDR,
1078 			       /*highaddr*/BUS_SPACE_MAXADDR,
1079 			       /*filter*/NULL, /*filterarg*/NULL,
1080 			       PAGE_SIZE, /*nsegments*/1,
1081 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1082 			       /*flags*/0, &scb_data->sg_dmat) != 0) {
1083 		goto error_exit;
1084 	}
1085 
1086         scb_data->init_level++;
1087 
1088 	/* Perform initial CCB allocation */
1089 	bzero(scb_data->hscbs, AHC_SCB_MAX * sizeof(struct hardware_scb));
1090 	ahcallocscbs(ahc);
1091 
1092 	if (scb_data->numscbs == 0) {
1093 		printf("%s: ahc_init_scb_data - "
1094 		       "Unable to allocate initial scbs\n",
1095 		       ahc_name(ahc));
1096 		goto error_exit;
1097 	}
1098 
1099 	/*
1100          * Note that we were successfull
1101          */
1102         return 0;
1103 
1104 error_exit:
1105 
1106 	return ENOMEM;
1107 }
1108 
1109 static void
1110 ahcfiniscbdata(struct ahc_softc *ahc)
1111 {
1112 	struct scb_data *scb_data;
1113 
1114 	scb_data = ahc->scb_data;
1115 
1116 	switch (scb_data->init_level) {
1117 	default:
1118 	case 7:
1119 	{
1120 		struct sg_map_node *sg_map;
1121 
1122 		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
1123 			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
1124 			bus_dmamap_unload(scb_data->sg_dmat,
1125 					  sg_map->sg_dmamap);
1126 			bus_dmamem_free(scb_data->sg_dmat, sg_map->sg_vaddr,
1127 					sg_map->sg_dmamap);
1128 			free(sg_map, M_DEVBUF);
1129 		}
1130 		bus_dma_tag_destroy(scb_data->sg_dmat);
1131 	}
1132 	case 6:
1133 		bus_dmamap_unload(scb_data->sense_dmat,
1134 				  scb_data->sense_dmamap);
1135 	case 5:
1136 		bus_dmamem_free(scb_data->sense_dmat, scb_data->sense,
1137 				scb_data->sense_dmamap);
1138 		bus_dmamap_destroy(scb_data->sense_dmat,
1139 				   scb_data->sense_dmamap);
1140 	case 4:
1141 		bus_dma_tag_destroy(scb_data->sense_dmat);
1142 	case 3:
1143 		bus_dmamap_unload(scb_data->hscb_dmat, scb_data->hscb_dmamap);
1144 	case 2:
1145 		bus_dmamem_free(scb_data->hscb_dmat, scb_data->hscbs,
1146 				scb_data->hscb_dmamap);
1147 		bus_dmamap_destroy(scb_data->hscb_dmat, scb_data->hscb_dmamap);
1148 	case 1:
1149 		bus_dma_tag_destroy(scb_data->hscb_dmat);
1150 		break;
1151 	}
1152 	if (scb_data->scbarray != NULL)
1153 		free(scb_data->scbarray, M_DEVBUF);
1154 }
1155 
1156 static void
1157 ahcdmamapcb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1158 {
1159 	bus_addr_t *baddr;
1160 
1161 	baddr = (bus_addr_t *)arg;
1162 	*baddr = segs->ds_addr;
1163 }
1164 
1165 int
1166 ahc_reset(struct ahc_softc *ahc)
1167 {
1168 	u_int	sblkctl;
1169 	u_int	sxfrctl1;
1170 	int	wait;
1171 
1172 #ifdef AHC_DUMP_SEQ
1173 	if (ahc->init_level == 0)
1174 		ahc_dumpseq(ahc);
1175 #endif
1176 
1177 	/* Cache STPWEN.  It is cleared by a chip reset */
1178 	pause_sequencer(ahc);
1179 	sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN;
1180 	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
1181 	/*
1182 	 * Ensure that the reset has finished
1183 	 */
1184 	wait = 1000;
1185 	do {
1186 		DELAY(1000);
1187 	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
1188 
1189 	if (wait == 0) {
1190 		printf("%s: WARNING - Failed chip reset!  "
1191 		       "Trying to initialize anyway.\n", ahc_name(ahc));
1192 	}
1193 	ahc_outb(ahc, HCNTRL, ahc->pause);
1194 	/*
1195 	 * Reload sxfrctl1 with the cached value of STPWEN
1196 	 * to minimize the amount of time our terminators
1197 	 * are disabled.  If a BIOS has initialized the chip,
1198 	 * then sxfrctl1 will have the correct value.  If
1199 	 * not, STPWEN will be false (the value after a POST)
1200 	 * and this action will be harmless.
1201 	 *
1202 	 * We must actually always initialize STPWEN to 1
1203 	 * before we restore the saved value.  STPWEN is
1204 	 * initialized to a tri-state condition which is
1205 	 * only be cleared by turning it on.
1206 	 */
1207 	ahc_outb(ahc, SXFRCTL1, sxfrctl1|STPWEN);
1208 	ahc_outb(ahc, SXFRCTL1, sxfrctl1);
1209 
1210 	/* Determine channel configuration */
1211 	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
1212 	/* No Twin Channel PCI cards */
1213 	if ((ahc->chip & AHC_PCI) != 0)
1214 		sblkctl &= ~SELBUSB;
1215 	switch (sblkctl) {
1216 	case 0:
1217 		/* Single Narrow Channel */
1218 		break;
1219 	case 2:
1220 		/* Wide Channel */
1221 		ahc->features |= AHC_WIDE;
1222 		break;
1223 	case 8:
1224 		/* Twin Channel */
1225 		ahc->features |= AHC_TWIN;
1226 		break;
1227 	default:
1228 		printf(" Unsupported adapter type.  Ignoring\n");
1229 		return(-1);
1230 	}
1231 
1232 	return (0);
1233 }
1234 
1235 /*
1236  * Called when we have an active connection to a target on the bus,
1237  * this function finds the nearest syncrate to the input period limited
1238  * by the capabilities of the bus connectivity of the target.
1239  */
1240 static struct ahc_syncrate *
1241 ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period,
1242 			u_int *ppr_options) {
1243 	u_int	maxsync;
1244 
1245 	if ((ahc->features & AHC_ULTRA2) != 0) {
1246 		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1247 		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1248 			maxsync = AHC_SYNCRATE_DT;
1249 		} else {
1250 			maxsync = AHC_SYNCRATE_ULTRA;
1251 			/* Can't do DT on an SE bus */
1252 			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1253 		}
1254 	} else if ((ahc->features & AHC_ULTRA) != 0) {
1255 		maxsync = AHC_SYNCRATE_ULTRA;
1256 	} else {
1257 		maxsync = AHC_SYNCRATE_FAST;
1258 	}
1259 	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1260 }
1261 
1262 /*
1263  * Look up the valid period to SCSIRATE conversion in our table.
1264  * Return the period and offset that should be sent to the target
1265  * if this was the beginning of an SDTR.
1266  */
1267 static struct ahc_syncrate *
1268 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1269 		  u_int *ppr_options, u_int maxsync)
1270 {
1271 	struct ahc_syncrate *syncrate;
1272 
1273 	if ((ahc->features & AHC_DT) == 0)
1274 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1275 
1276 	for (syncrate = &ahc_syncrates[maxsync];
1277 	     syncrate->rate != NULL;
1278 	     syncrate++) {
1279 
1280 		/*
1281 		 * The Ultra2 table doesn't go as low
1282 		 * as for the Fast/Ultra cards.
1283 		 */
1284 		if ((ahc->features & AHC_ULTRA2) != 0
1285 		 && (syncrate->sxfr_u2 == 0))
1286 			break;
1287 
1288 		/* Skip any DT entries if DT is not available */
1289 		if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1290 		 && (syncrate->sxfr_u2 & DT_SXFR) != 0)
1291 			continue;
1292 
1293 		if (*period <= syncrate->period) {
1294 			/*
1295 			 * When responding to a target that requests
1296 			 * sync, the requested rate may fall between
1297 			 * two rates that we can output, but still be
1298 			 * a rate that we can receive.  Because of this,
1299 			 * we want to respond to the target with
1300 			 * the same rate that it sent to us even
1301 			 * if the period we use to send data to it
1302 			 * is lower.  Only lower the response period
1303 			 * if we must.
1304 			 */
1305 			if (syncrate == &ahc_syncrates[maxsync])
1306 				*period = syncrate->period;
1307 
1308 			/*
1309 			 * At some speeds, we only support
1310 			 * ST transfers.
1311 			 */
1312 		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1313 				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1314 			break;
1315 		}
1316 	}
1317 
1318 	if ((*period == 0)
1319 	 || (syncrate->rate == NULL)
1320 	 || ((ahc->features & AHC_ULTRA2) != 0
1321 	  && (syncrate->sxfr_u2 == 0))) {
1322 		/* Use asynchronous transfers. */
1323 		*period = 0;
1324 		syncrate = NULL;
1325 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1326 	}
1327 	return (syncrate);
1328 }
1329 
1330 static u_int
1331 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1332 {
1333 	struct ahc_syncrate *syncrate;
1334 
1335 	if ((ahc->features & AHC_ULTRA2) != 0)
1336 		scsirate &= SXFR_ULTRA2;
1337 	else
1338 		scsirate &= SXFR;
1339 
1340 	syncrate = &ahc_syncrates[maxsync];
1341 	while (syncrate->rate != NULL) {
1342 
1343 		if ((ahc->features & AHC_ULTRA2) != 0) {
1344 			if (syncrate->sxfr_u2 == 0)
1345 				break;
1346 			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1347 				return (syncrate->period);
1348 		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1349 				return (syncrate->period);
1350 		}
1351 		syncrate++;
1352 	}
1353 	return (0); /* async */
1354 }
1355 
1356 static void
1357 ahc_validate_offset(struct ahc_softc *ahc, struct ahc_syncrate *syncrate,
1358 		    u_int *offset, int wide)
1359 {
1360 	u_int maxoffset;
1361 
1362 	/* Limit offset to what we can do */
1363 	if (syncrate == NULL) {
1364 		maxoffset = 0;
1365 	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1366 		maxoffset = MAX_OFFSET_ULTRA2;
1367 	} else {
1368 		if (wide)
1369 			maxoffset = MAX_OFFSET_16BIT;
1370 		else
1371 			maxoffset = MAX_OFFSET_8BIT;
1372 	}
1373 	*offset = MIN(*offset, maxoffset);
1374 }
1375 
1376 
1377 static void
1378 ahc_validate_width(struct ahc_softc *ahc, u_int *bus_width)
1379 {
1380 	switch (*bus_width) {
1381 	default:
1382 		if (ahc->features & AHC_WIDE) {
1383 			/* Respond Wide */
1384 			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1385 			break;
1386 		}
1387 		/* FALLTHROUGH */
1388 	case MSG_EXT_WDTR_BUS_8_BIT:
1389 		bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1390 		break;
1391 	}
1392 }
1393 
1394 static void
1395 ahc_update_target_msg_request(struct ahc_softc *ahc,
1396 			      struct ahc_devinfo *devinfo,
1397 			      struct ahc_initiator_tinfo *tinfo,
1398 			      int force, int paused)
1399 {
1400 	u_int targ_msg_req_orig;
1401 
1402 	targ_msg_req_orig = ahc->targ_msg_req;
1403 	if (tinfo->current.period != tinfo->goal.period
1404 	 || tinfo->current.width != tinfo->goal.width
1405 	 || tinfo->current.offset != tinfo->goal.offset
1406 	 || (force
1407 	  && (tinfo->goal.period != 0
1408 	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT)))
1409 		ahc->targ_msg_req |= devinfo->target_mask;
1410 	else
1411 		ahc->targ_msg_req &= ~devinfo->target_mask;
1412 
1413 	if (ahc->targ_msg_req != targ_msg_req_orig) {
1414 		/* Update the message request bit for this target */
1415 		if (!paused)
1416 			pause_sequencer(ahc);
1417 
1418 		ahc_outb(ahc, TARGET_MSG_REQUEST,
1419 			 ahc->targ_msg_req & 0xFF);
1420 		ahc_outb(ahc, TARGET_MSG_REQUEST + 1,
1421 			 (ahc->targ_msg_req >> 8) & 0xFF);
1422 
1423 		if (!paused)
1424 			unpause_sequencer(ahc);
1425 	}
1426 }
1427 
1428 static int
1429 ahc_create_path(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1430 		     struct cam_path **path)
1431 {
1432 	path_id_t path_id;
1433 
1434 	if (devinfo->channel == 'B')
1435 		path_id = cam_sim_path(ahc->sim_b);
1436 	else
1437 		path_id = cam_sim_path(ahc->sim);
1438 
1439 	return (xpt_create_path(path, /*periph*/NULL,
1440 				path_id, devinfo->target,
1441 				devinfo->lun));
1442 }
1443 
1444 static void
1445 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1446 		 struct cam_path *path, struct ahc_syncrate *syncrate,
1447 		 u_int period, u_int offset, u_int ppr_options,
1448 		 u_int type, int paused)
1449 {
1450 	struct	ahc_initiator_tinfo *tinfo;
1451 	struct	tmode_tstate *tstate;
1452 	u_int	old_period;
1453 	u_int	old_offset;
1454 	int	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1455 
1456 	if (syncrate == NULL) {
1457 		period = 0;
1458 		offset = 0;
1459 	}
1460 
1461 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1462 				    devinfo->target, &tstate);
1463 	old_period = tinfo->current.period;
1464 	old_offset = tinfo->current.offset;
1465 
1466 	if ((type & AHC_TRANS_CUR) != 0
1467 	 && (old_period != period || old_offset != offset)) {
1468 		struct	cam_path *path2;
1469 		u_int	scsirate;
1470 
1471 		scsirate = tinfo->scsirate;
1472 		if ((ahc->features & AHC_ULTRA2) != 0) {
1473 
1474 			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1475 			if (syncrate != NULL) {
1476 				scsirate |= syncrate->sxfr_u2;
1477 				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1478 					scsirate |= ENABLE_CRC;
1479 				else
1480 					scsirate |= SINGLE_EDGE;
1481 			}
1482 			if (active)
1483 				ahc_outb(ahc, SCSIOFFSET, offset);
1484 		} else {
1485 
1486 			scsirate &= ~(SXFR|SOFS);
1487 			/*
1488 			 * Ensure Ultra mode is set properly for
1489 			 * this target.
1490 			 */
1491 			tstate->ultraenb &= ~devinfo->target_mask;
1492 			if (syncrate != NULL) {
1493 				if (syncrate->sxfr & ULTRA_SXFR) {
1494 					tstate->ultraenb |=
1495 						devinfo->target_mask;
1496 				}
1497 				scsirate |= syncrate->sxfr & SXFR;
1498 				scsirate |= offset & SOFS;
1499 			}
1500 			if (active) {
1501 				u_int sxfrctl0;
1502 
1503 				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1504 				sxfrctl0 &= ~FAST20;
1505 				if (tstate->ultraenb & devinfo->target_mask)
1506 					sxfrctl0 |= FAST20;
1507 				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1508 			}
1509 		}
1510 		if (active)
1511 			ahc_outb(ahc, SCSIRATE, scsirate);
1512 
1513 		tinfo->scsirate = scsirate;
1514 		tinfo->current.period = period;
1515 		tinfo->current.offset = offset;
1516 		tinfo->current.ppr_options = ppr_options;
1517 
1518 		/* Update the syncrates in any pending scbs */
1519 		ahc_update_pending_syncrates(ahc);
1520 
1521 		/*
1522 		 * If possible, tell the SCSI layer about the
1523 		 * new transfer parameters.
1524 		 */
1525 		/* If possible, update the XPT's notion of our transfer rate */
1526 		path2 = NULL;
1527 		if (path == NULL) {
1528 			int error;
1529 
1530 			error = ahc_create_path(ahc, devinfo, &path2);
1531 			if (error == CAM_REQ_CMP)
1532 				path = path2;
1533 			else
1534 				path2 = NULL;
1535 		}
1536 
1537 		if (path != NULL) {
1538 			struct	ccb_trans_settings neg;
1539 
1540 			neg.flags = CCB_TRANS_CURRENT_SETTINGS;
1541 			neg.sync_period = period;
1542 			neg.sync_offset = offset;
1543 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1544 				  | CCB_TRANS_SYNC_OFFSET_VALID;
1545 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1546 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1547 		}
1548 
1549 		if (path2 != NULL)
1550 			xpt_free_path(path2);
1551 
1552 		if (bootverbose) {
1553 			if (offset != 0) {
1554 				printf("%s: target %d synchronous at %sMHz%s, "
1555 				       "offset = 0x%x\n", ahc_name(ahc),
1556 				       devinfo->target, syncrate->rate,
1557 				       (ppr_options & MSG_EXT_PPR_DT_REQ)
1558 				       ? " DT" : "", offset);
1559 			} else {
1560 				printf("%s: target %d using "
1561 				       "asynchronous transfers\n",
1562 				       ahc_name(ahc), devinfo->target);
1563 			}
1564 		}
1565 	}
1566 
1567 	if ((type & AHC_TRANS_GOAL) != 0) {
1568 		tinfo->goal.period = period;
1569 		tinfo->goal.offset = offset;
1570 		tinfo->goal.ppr_options = ppr_options;
1571 	}
1572 
1573 	if ((type & AHC_TRANS_USER) != 0) {
1574 		tinfo->user.period = period;
1575 		tinfo->user.offset = offset;
1576 		tinfo->user.ppr_options = ppr_options;
1577 	}
1578 
1579 	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1580 				      /*force*/FALSE,
1581 				      paused);
1582 }
1583 
1584 static void
1585 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1586 	      struct cam_path *path, u_int width, u_int type, int paused)
1587 {
1588 	struct ahc_initiator_tinfo *tinfo;
1589 	struct tmode_tstate *tstate;
1590 	u_int  oldwidth;
1591 	int    active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1592 
1593 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1594 				    devinfo->target, &tstate);
1595 	oldwidth = tinfo->current.width;
1596 
1597 	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1598 		struct  cam_path *path2;
1599 		u_int	scsirate;
1600 
1601 		scsirate =  tinfo->scsirate;
1602 		scsirate &= ~WIDEXFER;
1603 		if (width == MSG_EXT_WDTR_BUS_16_BIT)
1604 			scsirate |= WIDEXFER;
1605 
1606 		tinfo->scsirate = scsirate;
1607 
1608 		if (active)
1609 			ahc_outb(ahc, SCSIRATE, scsirate);
1610 
1611 		tinfo->current.width = width;
1612 
1613 		/* If possible, update the XPT's notion of our transfer rate */
1614 		path2 = NULL;
1615 		if (path == NULL) {
1616 			int error;
1617 
1618 			error = ahc_create_path(ahc, devinfo, &path2);
1619 			if (error == CAM_REQ_CMP)
1620 				path = path2;
1621 			else
1622 				path2 = NULL;
1623 		}
1624 
1625 		if (path != NULL) {
1626 			struct	ccb_trans_settings neg;
1627 
1628 			neg.flags = CCB_TRANS_CURRENT_SETTINGS;
1629 			neg.bus_width = width;
1630 			neg.valid = CCB_TRANS_BUS_WIDTH_VALID;
1631 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1632 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1633 		}
1634 
1635 		if (path2 != NULL)
1636 			xpt_free_path(path2);
1637 
1638 		if (bootverbose) {
1639 			printf("%s: target %d using %dbit transfers\n",
1640 			       ahc_name(ahc), devinfo->target,
1641 			       8 * (0x01 << width));
1642 		}
1643 	}
1644 	if ((type & AHC_TRANS_GOAL) != 0)
1645 		tinfo->goal.width = width;
1646 	if ((type & AHC_TRANS_USER) != 0)
1647 		tinfo->user.width = width;
1648 
1649 	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1650 				      /*force*/FALSE, paused);
1651 }
1652 
1653 static void
1654 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable)
1655 {
1656 	struct ahc_initiator_tinfo *tinfo;
1657 	struct tmode_tstate *tstate;
1658 
1659 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1660 				    devinfo->target, &tstate);
1661 
1662 	if (enable)
1663 		tstate->tagenable |= devinfo->target_mask;
1664 	else
1665 		tstate->tagenable &= ~devinfo->target_mask;
1666 }
1667 
1668 /*
1669  * Attach all the sub-devices we can find
1670  */
1671 int
1672 ahc_attach(struct ahc_softc *ahc)
1673 {
1674 	struct ccb_setasync csa;
1675 	struct cam_devq *devq;
1676 	int bus_id;
1677 	int bus_id2;
1678 	struct cam_sim *sim;
1679 	struct cam_sim *sim2;
1680 	struct cam_path *path;
1681 	struct cam_path *path2;
1682 	int count;
1683 	int s;
1684 	int error;
1685 
1686 	count = 0;
1687 	sim = NULL;
1688 	sim2 = NULL;
1689 
1690 	s = splcam();
1691 	/* Hook up our interrupt handler */
1692 	if ((error = bus_setup_intr(ahc->device, ahc->irq, INTR_TYPE_CAM,
1693 				    ahc_intr, ahc, &ahc->ih)) != 0) {
1694 		device_printf(ahc->device, "bus_setup_intr() failed: %d\n",
1695 			      error);
1696 		goto fail;
1697 	}
1698 
1699 	/*
1700 	 * Attach secondary channel first if the user has
1701 	 * declared it the primary channel.
1702 	 */
1703 	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
1704 		bus_id = 1;
1705 		bus_id2 = 0;
1706 	} else {
1707 		bus_id = 0;
1708 		bus_id2 = 1;
1709 	}
1710 
1711 	/*
1712 	 * Create the device queue for our SIM(s).
1713 	 */
1714 	devq = cam_simq_alloc(AHC_SCB_MAX);
1715 	if (devq == NULL)
1716 		goto fail;
1717 
1718 	/*
1719 	 * Construct our first channel SIM entry
1720 	 */
1721 	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, ahc->unit,
1722 			    1, AHC_SCB_MAX, devq);
1723 	if (sim == NULL) {
1724 		cam_simq_free(devq);
1725 		goto fail;
1726 	}
1727 
1728 	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
1729 		cam_sim_free(sim, /*free_devq*/TRUE);
1730 		sim = NULL;
1731 		goto fail;
1732 	}
1733 
1734 	if (xpt_create_path(&path, /*periph*/NULL,
1735 			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
1736 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1737 		xpt_bus_deregister(cam_sim_path(sim));
1738 		cam_sim_free(sim, /*free_devq*/TRUE);
1739 		sim = NULL;
1740 		goto fail;
1741 	}
1742 
1743 	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
1744 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1745 	csa.event_enable = AC_LOST_DEVICE;
1746 	csa.callback = ahc_async;
1747 	csa.callback_arg = sim;
1748 	xpt_action((union ccb *)&csa);
1749 	count++;
1750 
1751 	if (ahc->features & AHC_TWIN) {
1752 		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
1753 				    ahc, ahc->unit, 1,
1754 				    AHC_SCB_MAX, devq);
1755 
1756 		if (sim2 == NULL) {
1757 			printf("ahc_attach: Unable to attach second "
1758 			       "bus due to resource shortage");
1759 			goto fail;
1760 		}
1761 
1762 		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
1763 			printf("ahc_attach: Unable to attach second "
1764 			       "bus due to resource shortage");
1765 			/*
1766 			 * We do not want to destroy the device queue
1767 			 * because the first bus is using it.
1768 			 */
1769 			cam_sim_free(sim2, /*free_devq*/FALSE);
1770 			goto fail;
1771 		}
1772 
1773 		if (xpt_create_path(&path2, /*periph*/NULL,
1774 				    cam_sim_path(sim2),
1775 				    CAM_TARGET_WILDCARD,
1776 				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1777 			xpt_bus_deregister(cam_sim_path(sim2));
1778 			cam_sim_free(sim2, /*free_devq*/FALSE);
1779 			sim2 = NULL;
1780 			goto fail;
1781 		}
1782 		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
1783 		csa.ccb_h.func_code = XPT_SASYNC_CB;
1784 		csa.event_enable = AC_LOST_DEVICE;
1785 		csa.callback = ahc_async;
1786 		csa.callback_arg = sim2;
1787 		xpt_action((union ccb *)&csa);
1788 		count++;
1789 	}
1790 
1791 fail:
1792 	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
1793 		ahc->sim_b = sim;
1794 		ahc->path_b = path;
1795 		ahc->sim = sim2;
1796 		ahc->path = path2;
1797 	} else {
1798 		ahc->sim = sim;
1799 		ahc->path = path;
1800 		ahc->sim_b = sim2;
1801 		ahc->path_b = path2;
1802 	}
1803 	splx(s);
1804 	return (count);
1805 }
1806 
1807 #if UNUSED
1808 static void
1809 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1810 		struct scb *scb)
1811 {
1812 	role_t	role;
1813 	int	our_id;
1814 
1815 	if (scb->ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1816 		our_id = scb->ccb->ccb_h.target_id;
1817 		role = ROLE_TARGET;
1818 	} else {
1819 		our_id = SCB_GET_CHANNEL(scb) == 'B' ? ahc->our_id_b : ahc->our_id;
1820 		role = ROLE_INITIATOR;
1821 	}
1822 	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
1823 			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(scb), role);
1824 }
1825 #endif
1826 
1827 static void
1828 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1829 {
1830 	u_int	saved_scsiid;
1831 	role_t	role;
1832 	int	our_id;
1833 
1834 	if (ahc_inb(ahc, SSTAT0) & TARGET)
1835 		role = ROLE_TARGET;
1836 	else
1837 		role = ROLE_INITIATOR;
1838 
1839 	if (role == ROLE_TARGET
1840 	 && (ahc->features & AHC_MULTI_TID) != 0
1841 	 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
1842 		/* We were selected, so pull our id from TARGIDIN */
1843 		our_id = ahc_inb(ahc, TARGIDIN) & OID;
1844 	} else if ((ahc->features & AHC_ULTRA2) != 0)
1845 		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
1846 	else
1847 		our_id = ahc_inb(ahc, SCSIID) & OID;
1848 
1849 	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1850 	ahc_compile_devinfo(devinfo,
1851 			    our_id,
1852 			    SCSIID_TARGET(ahc, saved_scsiid),
1853 			    ahc_inb(ahc, SAVED_LUN),
1854 			    SCSIID_CHANNEL(ahc, saved_scsiid),
1855 			    role);
1856 }
1857 
1858 static void
1859 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
1860 		    u_int lun, char channel, role_t role)
1861 {
1862 	devinfo->our_scsiid = our_id;
1863 	devinfo->target = target;
1864 	devinfo->lun = lun;
1865 	devinfo->target_offset = target;
1866 	devinfo->channel = channel;
1867 	devinfo->role = role;
1868 	if (channel == 'B')
1869 		devinfo->target_offset += 8;
1870 	devinfo->target_mask = (0x01 << devinfo->target_offset);
1871 }
1872 
1873 /*
1874  * Catch an interrupt from the adapter
1875  */
1876 void
1877 ahc_intr(void *arg)
1878 {
1879 	struct	ahc_softc *ahc;
1880 	u_int	intstat;
1881 
1882 	ahc = (struct ahc_softc *)arg;
1883 
1884 	intstat = ahc_inb(ahc, INTSTAT);
1885 
1886 	/*
1887 	 * Any interrupts to process?
1888 	 */
1889 #if NPCI > 0
1890 	if ((intstat & INT_PEND) == 0) {
1891 		if ((ahc->chip & AHC_PCI) != 0
1892 		 && (ahc->unsolicited_ints > 500)) {
1893 			if ((ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
1894 				ahc_pci_intr(ahc);
1895 			ahc->unsolicited_ints = 0;
1896 		} else {
1897 			ahc->unsolicited_ints++;
1898 		}
1899 		return;
1900 	} else {
1901 		ahc->unsolicited_ints = 0;
1902 	}
1903 #else
1904 	if ((intstat & INT_PEND) == 0)
1905 		return;
1906 #endif
1907 
1908 	if (intstat & CMDCMPLT) {
1909 		ahc_outb(ahc, CLRINT, CLRCMDINT);
1910 		ahc_run_qoutfifo(ahc);
1911 		if ((ahc->flags & AHC_TARGETMODE) != 0)
1912 			ahc_run_tqinfifo(ahc, /*paused*/FALSE);
1913 	}
1914 	if (intstat & BRKADRINT) {
1915 		/*
1916 		 * We upset the sequencer :-(
1917 		 * Lookup the error message
1918 		 */
1919 		int i, error, num_errors;
1920 
1921 		error = ahc_inb(ahc, ERROR);
1922 		num_errors =  sizeof(hard_error)/sizeof(hard_error[0]);
1923 		for (i = 0; error != 1 && i < num_errors; i++)
1924 			error >>= 1;
1925 		panic("%s: brkadrint, %s at seqaddr = 0x%x\n",
1926 		      ahc_name(ahc), hard_error[i].errmesg,
1927 		      ahc_inb(ahc, SEQADDR0) |
1928 		      (ahc_inb(ahc, SEQADDR1) << 8));
1929 
1930 		/* Tell everyone that this HBA is no longer availible */
1931 		ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
1932 			       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
1933 			       CAM_NO_HBA);
1934 	}
1935 
1936 	if ((intstat & (SEQINT|SCSIINT)) != 0)
1937 		ahc_pause_bug_fix(ahc);
1938 
1939 	if ((intstat & SEQINT) != 0)
1940 		ahc_handle_seqint(ahc, intstat);
1941 
1942 	if ((intstat & SCSIINT) != 0)
1943 		ahc_handle_scsiint(ahc, intstat);
1944 }
1945 
1946 static struct tmode_tstate *
1947 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1948 {
1949 	struct tmode_tstate *master_tstate;
1950 	struct tmode_tstate *tstate;
1951 	int i, s;
1952 
1953 	master_tstate = ahc->enabled_targets[ahc->our_id];
1954 	if (channel == 'B') {
1955 		scsi_id += 8;
1956 		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1957 	}
1958 	if (ahc->enabled_targets[scsi_id] != NULL
1959 	 && ahc->enabled_targets[scsi_id] != master_tstate)
1960 		panic("%s: ahc_alloc_tstate - Target already allocated",
1961 		      ahc_name(ahc));
1962 	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
1963 	if (tstate == NULL)
1964 		return (NULL);
1965 
1966 	/*
1967 	 * If we have allocated a master tstate, copy user settings from
1968 	 * the master tstate (taken from SRAM or the EEPROM) for this
1969 	 * channel, but reset our current and goal settings to async/narrow
1970 	 * until an initiator talks to us.
1971 	 */
1972 	if (master_tstate != NULL) {
1973 		bcopy(master_tstate, tstate, sizeof(*tstate));
1974 		bzero(tstate->enabled_luns, sizeof(tstate->enabled_luns));
1975 		tstate->ultraenb = 0;
1976 		for (i = 0; i < 16; i++) {
1977 			bzero(&tstate->transinfo[i].current,
1978 			      sizeof(tstate->transinfo[i].current));
1979 			bzero(&tstate->transinfo[i].goal,
1980 			      sizeof(tstate->transinfo[i].goal));
1981 		}
1982 	} else
1983 		bzero(tstate, sizeof(*tstate));
1984 	s = splcam();
1985 	ahc->enabled_targets[scsi_id] = tstate;
1986 	splx(s);
1987 	return (tstate);
1988 }
1989 
1990 static void
1991 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1992 {
1993 	struct tmode_tstate *tstate;
1994 
1995 	/* Don't clean up the entry for our initiator role */
1996 	if ((ahc->flags & AHC_INITIATORMODE) != 0
1997 	 && ((channel == 'B' && scsi_id == ahc->our_id_b)
1998 	  || (channel == 'A' && scsi_id == ahc->our_id))
1999 	 && force == FALSE)
2000 		return;
2001 
2002 	if (channel == 'B')
2003 		scsi_id += 8;
2004 	tstate = ahc->enabled_targets[scsi_id];
2005 	if (tstate != NULL)
2006 		free(tstate, M_DEVBUF);
2007 	ahc->enabled_targets[scsi_id] = NULL;
2008 }
2009 
2010 static void
2011 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
2012 {
2013 	struct	   tmode_tstate *tstate;
2014 	struct	   tmode_lstate *lstate;
2015 	struct	   ccb_en_lun *cel;
2016 	cam_status status;
2017 	u_int	   target;
2018 	u_int	   lun;
2019 	u_int	   target_mask;
2020 	char	   channel;
2021 	int	   s;
2022 
2023 	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
2024 				     /* notfound_failure*/FALSE);
2025 
2026 	if (status != CAM_REQ_CMP) {
2027 		ccb->ccb_h.status = status;
2028 		return;
2029 	}
2030 
2031 	cel = &ccb->cel;
2032 	target = ccb->ccb_h.target_id;
2033 	lun = ccb->ccb_h.target_lun;
2034 	channel = SIM_CHANNEL(ahc, sim);
2035 	target_mask = 0x01 << target;
2036 	if (channel == 'B')
2037 		target_mask <<= 8;
2038 
2039 	if (cel->enable != 0) {
2040 		u_int scsiseq;
2041 
2042 		/* Are we already enabled?? */
2043 		if (lstate != NULL) {
2044 			xpt_print_path(ccb->ccb_h.path);
2045 			printf("Lun already enabled\n");
2046 			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
2047 			return;
2048 		}
2049 
2050 		if (cel->grp6_len != 0
2051 		 || cel->grp7_len != 0) {
2052 			/*
2053 			 * Don't (yet?) support vendor
2054 			 * specific commands.
2055 			 */
2056 			ccb->ccb_h.status = CAM_REQ_INVALID;
2057 			printf("Non-zero Group Codes\n");
2058 			return;
2059 		}
2060 
2061 		/*
2062 		 * Seems to be okay.
2063 		 * Setup our data structures.
2064 		 */
2065 		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
2066 			tstate = ahc_alloc_tstate(ahc, target, channel);
2067 			if (tstate == NULL) {
2068 				xpt_print_path(ccb->ccb_h.path);
2069 				printf("Couldn't allocate tstate\n");
2070 				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2071 				return;
2072 			}
2073 		}
2074 		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
2075 		if (lstate == NULL) {
2076 			xpt_print_path(ccb->ccb_h.path);
2077 			printf("Couldn't allocate lstate\n");
2078 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2079 			return;
2080 		}
2081 		bzero(lstate, sizeof(*lstate));
2082 		status = xpt_create_path(&lstate->path, /*periph*/NULL,
2083 					 xpt_path_path_id(ccb->ccb_h.path),
2084 					 xpt_path_target_id(ccb->ccb_h.path),
2085 					 xpt_path_lun_id(ccb->ccb_h.path));
2086 		if (status != CAM_REQ_CMP) {
2087 			free(lstate, M_DEVBUF);
2088 			xpt_print_path(ccb->ccb_h.path);
2089 			printf("Couldn't allocate path\n");
2090 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2091 			return;
2092 		}
2093 		SLIST_INIT(&lstate->accept_tios);
2094 		SLIST_INIT(&lstate->immed_notifies);
2095 		s = splcam();
2096 		pause_sequencer(ahc);
2097 		if (target != CAM_TARGET_WILDCARD) {
2098 			tstate->enabled_luns[lun] = lstate;
2099 			ahc->enabled_luns++;
2100 
2101 			if ((ahc->features & AHC_MULTI_TID) != 0) {
2102 				u_int targid_mask;
2103 
2104 				targid_mask = ahc_inb(ahc, TARGID)
2105 					    | (ahc_inb(ahc, TARGID + 1) << 8);
2106 
2107 				targid_mask |= target_mask;
2108 				ahc_outb(ahc, TARGID, targid_mask);
2109 				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
2110 
2111 				ahc_update_scsiid(ahc, targid_mask);
2112 			} else {
2113 				u_int our_id;
2114 				char  channel;
2115 
2116 				channel = SIM_CHANNEL(ahc, sim);
2117 				our_id = SIM_SCSI_ID(ahc, sim);
2118 
2119 				/*
2120 				 * This can only happen if selections
2121 				 * are not enabled
2122 				 */
2123 				if (target != our_id) {
2124 					u_int sblkctl;
2125 					char  cur_channel;
2126 					int   swap;
2127 
2128 					sblkctl = ahc_inb(ahc, SBLKCTL);
2129 					cur_channel = (sblkctl & SELBUSB)
2130 						    ? 'B' : 'A';
2131 					if ((ahc->features & AHC_TWIN) == 0)
2132 						cur_channel = 'A';
2133 					swap = cur_channel != channel;
2134 					if (channel == 'A')
2135 						ahc->our_id = target;
2136 					else
2137 						ahc->our_id_b = target;
2138 
2139 					if (swap)
2140 						ahc_outb(ahc, SBLKCTL,
2141 							 sblkctl ^ SELBUSB);
2142 
2143 					ahc_outb(ahc, SCSIID, target);
2144 
2145 					if (swap)
2146 						ahc_outb(ahc, SBLKCTL, sblkctl);
2147 				}
2148 			}
2149 		} else
2150 			ahc->black_hole = lstate;
2151 		/* Allow select-in operations */
2152 		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
2153 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
2154 			scsiseq |= ENSELI;
2155 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
2156 			scsiseq = ahc_inb(ahc, SCSISEQ);
2157 			scsiseq |= ENSELI;
2158 			ahc_outb(ahc, SCSISEQ, scsiseq);
2159 		}
2160 		unpause_sequencer(ahc);
2161 		splx(s);
2162 		ccb->ccb_h.status = CAM_REQ_CMP;
2163 		xpt_print_path(ccb->ccb_h.path);
2164 		printf("Lun now enabled for target mode\n");
2165 	} else {
2166 		struct ccb_hdr *elm;
2167 		int i, empty;
2168 
2169 		if (lstate == NULL) {
2170 			ccb->ccb_h.status = CAM_LUN_INVALID;
2171 			return;
2172 		}
2173 
2174 		s = splcam();
2175 		ccb->ccb_h.status = CAM_REQ_CMP;
2176 		LIST_FOREACH(elm, &ahc->pending_ccbs, sim_links.le) {
2177 			if (elm->func_code == XPT_CONT_TARGET_IO
2178 			 && !xpt_path_comp(elm->path, ccb->ccb_h.path)){
2179 				printf("CTIO pending\n");
2180 				ccb->ccb_h.status = CAM_REQ_INVALID;
2181 				splx(s);
2182 				return;
2183 			}
2184 		}
2185 
2186 		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
2187 			printf("ATIOs pending\n");
2188 			ccb->ccb_h.status = CAM_REQ_INVALID;
2189 		}
2190 
2191 		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
2192 			printf("INOTs pending\n");
2193 			ccb->ccb_h.status = CAM_REQ_INVALID;
2194 		}
2195 
2196 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
2197 			splx(s);
2198 			return;
2199 		}
2200 
2201 		xpt_print_path(ccb->ccb_h.path);
2202 		printf("Target mode disabled\n");
2203 		xpt_free_path(lstate->path);
2204 		free(lstate, M_DEVBUF);
2205 
2206 		pause_sequencer(ahc);
2207 		/* Can we clean up the target too? */
2208 		if (target != CAM_TARGET_WILDCARD) {
2209 			tstate->enabled_luns[lun] = NULL;
2210 			ahc->enabled_luns--;
2211 			for (empty = 1, i = 0; i < 8; i++)
2212 				if (tstate->enabled_luns[i] != NULL) {
2213 					empty = 0;
2214 					break;
2215 				}
2216 
2217 			if (empty) {
2218 				ahc_free_tstate(ahc, target, channel,
2219 						/*force*/FALSE);
2220 				if (ahc->features & AHC_MULTI_TID) {
2221 					u_int targid_mask;
2222 
2223 					targid_mask = ahc_inb(ahc, TARGID)
2224 						    | (ahc_inb(ahc, TARGID + 1)
2225 						       << 8);
2226 
2227 					targid_mask &= ~target_mask;
2228 					ahc_outb(ahc, TARGID, targid_mask);
2229 					ahc_outb(ahc, TARGID+1,
2230 					 	 (targid_mask >> 8));
2231 					ahc_update_scsiid(ahc, targid_mask);
2232 				}
2233 			}
2234 		} else {
2235 
2236 			ahc->black_hole = NULL;
2237 
2238 			/*
2239 			 * We can't allow selections without
2240 			 * our black hole device.
2241 			 */
2242 			empty = TRUE;
2243 		}
2244 		if (ahc->enabled_luns == 0) {
2245 			/* Disallow select-in */
2246 			u_int scsiseq;
2247 
2248 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
2249 			scsiseq &= ~ENSELI;
2250 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
2251 			scsiseq = ahc_inb(ahc, SCSISEQ);
2252 			scsiseq &= ~ENSELI;
2253 			ahc_outb(ahc, SCSISEQ, scsiseq);
2254 		}
2255 		unpause_sequencer(ahc);
2256 		splx(s);
2257 	}
2258 }
2259 
2260 static void
2261 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
2262 {
2263 	u_int scsiid_mask;
2264 	u_int scsiid;
2265 
2266 	if ((ahc->features & AHC_MULTI_TID) == 0)
2267 		panic("ahc_update_scsiid called on non-multitid unit\n");
2268 
2269 	/*
2270 	 * Since we will rely on the the TARGID mask
2271 	 * for selection enables, ensure that OID
2272 	 * in SCSIID is not set to some other ID
2273 	 * that we don't want to allow selections on.
2274 	 */
2275 	if ((ahc->features & AHC_ULTRA2) != 0)
2276 		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
2277 	else
2278 		scsiid = ahc_inb(ahc, SCSIID);
2279 	scsiid_mask = 0x1 << (scsiid & OID);
2280 	if ((targid_mask & scsiid_mask) == 0) {
2281 		u_int our_id;
2282 
2283 		/* ffs counts from 1 */
2284 		our_id = ffs(targid_mask);
2285 		if (our_id == 0)
2286 			our_id = ahc->our_id;
2287 		else
2288 			our_id--;
2289 		scsiid &= TID;
2290 		scsiid |= our_id;
2291 	}
2292 	if ((ahc->features & AHC_ULTRA2) != 0)
2293 		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
2294 	else
2295 		ahc_outb(ahc, SCSIID, scsiid);
2296 }
2297 
2298 static int
2299 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
2300 {
2301 	struct	  tmode_tstate *tstate;
2302 	struct	  tmode_lstate *lstate;
2303 	struct	  ccb_accept_tio *atio;
2304 	uint8_t *byte;
2305 	int	  initiator;
2306 	int	  target;
2307 	int	  lun;
2308 
2309 	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
2310 	target = SCSIID_OUR_ID(cmd->scsiid);
2311 	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
2312 
2313 	byte = cmd->bytes;
2314 	tstate = ahc->enabled_targets[target];
2315 	lstate = NULL;
2316 	if (tstate != NULL)
2317 		lstate = tstate->enabled_luns[lun];
2318 
2319 	/*
2320 	 * Commands for disabled luns go to the black hole driver.
2321 	 */
2322 	if (lstate == NULL)
2323 		lstate = ahc->black_hole;
2324 
2325 	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
2326 	if (atio == NULL) {
2327 		ahc->flags |= AHC_TQINFIFO_BLOCKED;
2328 		/*
2329 		 * Wait for more ATIOs from the peripheral driver for this lun.
2330 		 */
2331 		return (1);
2332 	} else
2333 		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
2334 #if 0
2335 	printf("Incoming command from %d for %d:%d%s\n",
2336 	       initiator, target, lun,
2337 	       lstate == ahc->black_hole ? "(Black Holed)" : "");
2338 #endif
2339 	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
2340 
2341 	if (lstate == ahc->black_hole) {
2342 		/* Fill in the wildcards */
2343 		atio->ccb_h.target_id = target;
2344 		atio->ccb_h.target_lun = lun;
2345 	}
2346 
2347 	/*
2348 	 * Package it up and send it off to
2349 	 * whomever has this lun enabled.
2350 	 */
2351 	atio->sense_len = 0;
2352 	atio->init_id = initiator;
2353 	if (byte[0] != 0xFF) {
2354 		/* Tag was included */
2355 		atio->tag_action = *byte++;
2356 		atio->tag_id = *byte++;
2357 		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
2358 	} else {
2359 		atio->ccb_h.flags = 0;
2360 	}
2361 	byte++;
2362 
2363 	/* Okay.  Now determine the cdb size based on the command code */
2364 	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
2365 	case 0:
2366 		atio->cdb_len = 6;
2367 		break;
2368 	case 1:
2369 	case 2:
2370 		atio->cdb_len = 10;
2371 		break;
2372 	case 4:
2373 		atio->cdb_len = 16;
2374 		break;
2375 	case 5:
2376 		atio->cdb_len = 12;
2377 		break;
2378 	case 3:
2379 	default:
2380 		/* Only copy the opcode. */
2381 		atio->cdb_len = 1;
2382 		printf("Reserved or VU command code type encountered\n");
2383 		break;
2384 	}
2385 	bcopy(byte, atio->cdb_io.cdb_bytes, atio->cdb_len);
2386 
2387 	atio->ccb_h.status |= CAM_CDB_RECVD;
2388 
2389 	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
2390 		/*
2391 		 * We weren't allowed to disconnect.
2392 		 * We're hanging on the bus until a
2393 		 * continue target I/O comes in response
2394 		 * to this accept tio.
2395 		 */
2396 #if 0
2397 		printf("Received Immediate Command %d:%d:%d - %p\n",
2398 		       initiator, target, lun, ahc->pending_device);
2399 #endif
2400 		ahc->pending_device = lstate;
2401 		ahc_freeze_ccb((union ccb *)atio);
2402 		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
2403 	}
2404 	xpt_done((union ccb*)atio);
2405 	return (0);
2406 }
2407 
2408 static void
2409 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
2410 {
2411 	struct scb *scb;
2412 	struct ahc_devinfo devinfo;
2413 
2414 	ahc_fetch_devinfo(ahc, &devinfo);
2415 
2416 	/*
2417 	 * Clear the upper byte that holds SEQINT status
2418 	 * codes and clear the SEQINT bit. We will unpause
2419 	 * the sequencer, if appropriate, after servicing
2420 	 * the request.
2421 	 */
2422 	ahc_outb(ahc, CLRINT, CLRSEQINT);
2423 	switch (intstat & SEQINT_MASK) {
2424 	case BAD_STATUS:
2425 	{
2426 		u_int  scb_index;
2427 		struct hardware_scb *hscb;
2428 		struct ccb_scsiio *csio;
2429 		/*
2430 		 * The sequencer will notify us when a command
2431 		 * has an error that would be of interest to
2432 		 * the kernel.  This allows us to leave the sequencer
2433 		 * running in the common case of command completes
2434 		 * without error.  The sequencer will already have
2435 		 * dma'd the SCB back up to us, so we can reference
2436 		 * the in kernel copy directly.
2437 		 */
2438 		scb_index = ahc_inb(ahc, SCB_TAG);
2439 		scb = &ahc->scb_data->scbarray[scb_index];
2440 
2441 		/*
2442 		 * Set the default return value to 0 (don't
2443 		 * send sense).  The sense code will change
2444 		 * this if needed.
2445 		 */
2446 		ahc_outb(ahc, RETURN_1, 0);
2447 		if (!(scb_index < ahc->scb_data->numscbs
2448 		   && (scb->flags & SCB_ACTIVE) != 0)) {
2449 			printf("%s:%c:%d: ahc_intr - referenced scb "
2450 			       "not valid during seqint 0x%x scb(%d)\n",
2451 			       ahc_name(ahc), devinfo.channel,
2452 			       devinfo.target, intstat, scb_index);
2453 			goto unpause;
2454 		}
2455 
2456 		hscb = scb->hscb;
2457 
2458 		/* Don't want to clobber the original sense code */
2459 		if ((scb->flags & SCB_SENSE) != 0) {
2460 			/*
2461 			 * Clear the SCB_SENSE Flag and have
2462 			 * the sequencer do a normal command
2463 			 * complete.
2464 			 */
2465 			scb->flags &= ~SCB_SENSE;
2466 			ahcsetccbstatus(scb->ccb, CAM_AUTOSENSE_FAIL);
2467 			break;
2468 		}
2469 		ahcsetccbstatus(scb->ccb, CAM_SCSI_STATUS_ERROR);
2470 		/* Freeze the queue until the client sees the error. */
2471 		ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
2472 		ahc_freeze_ccb(scb->ccb);
2473 		csio = &scb->ccb->csio;
2474 		csio->scsi_status = hscb->shared_data.status.scsi_status;
2475 		switch (csio->scsi_status) {
2476 		case SCSI_STATUS_OK:
2477 			printf("%s: Interrupted for staus of 0???\n",
2478 			       ahc_name(ahc));
2479 			break;
2480 		case SCSI_STATUS_CMD_TERMINATED:
2481 		case SCSI_STATUS_CHECK_COND:
2482 #ifdef AHC_DEBUG
2483 			if (ahc_debug & AHC_SHOWSENSE) {
2484 				xpt_print_path(csio->ccb_h.path);
2485 				printf("SCB %d: requests Check Status\n",
2486 				       scb->hscb->tag);
2487 			}
2488 #endif
2489 			if ((csio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
2490 				struct ahc_dma_seg *sg;
2491 				struct scsi_sense *sc;
2492 				struct ahc_initiator_tinfo *targ_info;
2493 				struct tmode_tstate *tstate;
2494 				struct ahc_transinfo *tinfo;
2495 
2496 				targ_info =
2497 				    ahc_fetch_transinfo(ahc,
2498 							devinfo.channel,
2499 							devinfo.our_scsiid,
2500 							devinfo.target,
2501 							&tstate);
2502 				tinfo = &targ_info->current;
2503 				sg = scb->sg_list;
2504 				sc = (struct scsi_sense *)
2505 				     (&hscb->shared_data.cdb);
2506 				/*
2507 				 * Save off the residual if there is one.
2508 				 */
2509 				if (ahc_check_residual(scb))
2510 					ahc_calc_residual(scb);
2511 				else
2512 					scb->ccb->csio.resid = 0;
2513 
2514 #ifdef AHC_DEBUG
2515 				if (ahc_debug & AHC_SHOWSENSE) {
2516 					xpt_print_path(csio->ccb_h.path);
2517 					printf("Sending Sense\n");
2518 				}
2519 #endif
2520 				sg->addr = ahc->scb_data->sense_busaddr
2521 				   + (hscb->tag*sizeof(struct scsi_sense_data));
2522 				sg->len = MIN(sizeof(struct scsi_sense_data),
2523 					      csio->sense_len);
2524 				sg->len |= AHC_DMA_LAST_SEG;
2525 
2526 				sc->opcode = REQUEST_SENSE;
2527 				sc->byte2 = 0;
2528 				if (tinfo->protocol_version <= SCSI_REV_2
2529 				 && SCB_GET_LUN(scb) < 8)
2530 					sc->byte2 = SCB_GET_LUN(scb) << 5;
2531 				sc->unused[0] = 0;
2532 				sc->unused[1] = 0;
2533 				sc->length = sg->len;
2534 				sc->control = 0;
2535 
2536 				/*
2537 				 * Would be nice to preserve DISCENB here,
2538 				 * but due to the way we manage busy targets,
2539 				 * we can't.
2540 				 */
2541 				hscb->control = 0;
2542 
2543 				/*
2544 				 * This request sense could be because the
2545 				 * the device lost power or in some other
2546 				 * way has lost our transfer negotiations.
2547 				 * Renegotiate if appropriate.  Unit attention
2548 				 * errors will be reported before any data
2549 				 * phases occur.
2550 				 */
2551 				if (scb->ccb->csio.resid
2552 				 == scb->ccb->csio.dxfer_len) {
2553 					ahc_update_target_msg_request(ahc,
2554 							      &devinfo,
2555 							      targ_info,
2556 							      /*force*/TRUE,
2557 							      /*paused*/TRUE);
2558 				}
2559 				hscb->cdb_len = sizeof(*sc);
2560 				hscb->dataptr = sg->addr;
2561 				hscb->datacnt = sg->len;
2562 				hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
2563 				scb->sg_count = 1;
2564 				scb->flags |= SCB_SENSE;
2565 				ahc_outb(ahc, RETURN_1, SEND_SENSE);
2566 
2567 				/*
2568 				 * Ensure we have enough time to actually
2569 				 * retrieve the sense.
2570 				 */
2571 				untimeout(ahc_timeout, (caddr_t)scb,
2572 					  scb->ccb->ccb_h.timeout_ch);
2573 				scb->ccb->ccb_h.timeout_ch =
2574 				    timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
2575 			}
2576 			break;
2577 		default:
2578 			break;
2579 		}
2580 		break;
2581 	}
2582 	case NO_MATCH:
2583 	{
2584 		/* Ensure we don't leave the selection hardware on */
2585 		ahc_outb(ahc, SCSISEQ,
2586 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
2587 
2588 		printf("%s:%c:%d: no active SCB for reconnecting "
2589 		       "target - issuing BUS DEVICE RESET\n",
2590 		       ahc_name(ahc), devinfo.channel, devinfo.target);
2591 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
2592 		       "ARG_1 == 0x%x ARG_2 = 0x%x, SEQ_FLAGS == 0x%x\n",
2593 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
2594 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ARG_2),
2595 		       ahc_inb(ahc, SEQ_FLAGS));
2596 		printf("SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
2597 		       "SCB_TAG == 0x%x\n",
2598 		       ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN),
2599 		       ahc_inb(ahc, SCB_TAG));
2600 		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
2601 		ahc->msgout_len = 1;
2602 		ahc->msgout_index = 0;
2603 		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2604 		ahc_outb(ahc, MSG_OUT, HOST_MSG);
2605 		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO);
2606 		break;
2607 	}
2608 	case SEND_REJECT:
2609 	{
2610 		u_int rejbyte = ahc_inb(ahc, ACCUM);
2611 		printf("%s:%c:%d: Warning - unknown message received from "
2612 		       "target (0x%x).  Rejecting\n",
2613 		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
2614 		break;
2615 	}
2616 	case NO_IDENT:
2617 	{
2618 		/*
2619 		 * The reconnecting target either did not send an identify
2620 		 * message, or did, but we didn't find an SCB to match and
2621 		 * before it could respond to our ATN/abort, it hit a dataphase.
2622 		 * The only safe thing to do is to blow it away with a bus
2623 		 * reset.
2624 		 */
2625 		int found;
2626 
2627 		printf("%s:%c:%d: Target did not send an IDENTIFY message. "
2628 		       "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
2629 		       ahc_name(ahc), devinfo.channel, devinfo.target,
2630 		       ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
2631 		found = ahc_reset_channel(ahc, devinfo.channel,
2632 					  /*initiate reset*/TRUE);
2633 		printf("%s: Issued Channel %c Bus Reset. "
2634 		       "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
2635 		       found);
2636 		return;
2637 	}
2638 	case IGN_WIDE_RES:
2639 		ahc_handle_ign_wide_residue(ahc, &devinfo);
2640 		break;
2641 	case BAD_PHASE:
2642 	{
2643 		u_int lastphase;
2644 
2645 		lastphase = ahc_inb(ahc, LASTPHASE);
2646 		if (lastphase == P_BUSFREE) {
2647 			printf("%s:%c:%d: Missed busfree.  Curphase = 0x%x\n",
2648 			       ahc_name(ahc), devinfo.channel, devinfo.target,
2649 			       ahc_inb(ahc, SCSISIGI));
2650 			restart_sequencer(ahc);
2651 			return;
2652 		} else {
2653 			printf("%s:%c:%d: unknown scsi bus phase %x.  "
2654 			       "Attempting to continue\n",
2655 			       ahc_name(ahc), devinfo.channel, devinfo.target,
2656 			       ahc_inb(ahc, SCSISIGI));
2657 		}
2658 		break;
2659 	}
2660 	case HOST_MSG_LOOP:
2661 	{
2662 		/*
2663 		 * The sequencer has encountered a message phase
2664 		 * that requires host assistance for completion.
2665 		 * While handling the message phase(s), we will be
2666 		 * notified by the sequencer after each byte is
2667 		 * transfered so we can track bus phase changes.
2668 		 *
2669 		 * If this is the first time we've seen a HOST_MSG_LOOP
2670 		 * interrupt, initialize the state of the host message
2671 		 * loop.
2672 		 */
2673 		if (ahc->msg_type == MSG_TYPE_NONE) {
2674 			u_int bus_phase;
2675 
2676 			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2677 			if (bus_phase != P_MESGIN
2678 			 && bus_phase != P_MESGOUT) {
2679 				printf("ahc_intr: HOST_MSG_LOOP bad "
2680 				       "phase 0x%x\n",
2681 				      bus_phase);
2682 				/*
2683 				 * Probably transitioned to bus free before
2684 				 * we got here.  Just punt the message.
2685 				 */
2686 				ahc_clear_intstat(ahc);
2687 				restart_sequencer(ahc);
2688 				return;
2689 			}
2690 
2691 			if (devinfo.role == ROLE_INITIATOR) {
2692 				struct scb *scb;
2693 				u_int scb_index;
2694 
2695 				scb_index = ahc_inb(ahc, SCB_TAG);
2696 				scb = &ahc->scb_data->scbarray[scb_index];
2697 
2698 				if (bus_phase == P_MESGOUT)
2699 					ahc_setup_initiator_msgout(ahc,
2700 								   &devinfo,
2701 								   scb);
2702 				else {
2703 					ahc->msg_type =
2704 					    MSG_TYPE_INITIATOR_MSGIN;
2705 					ahc->msgin_index = 0;
2706 				}
2707 			} else {
2708 				if (bus_phase == P_MESGOUT) {
2709 					ahc->msg_type =
2710 					    MSG_TYPE_TARGET_MSGOUT;
2711 					ahc->msgin_index = 0;
2712 				} else
2713 					/* XXX Ever executed??? */
2714 					ahc_setup_target_msgin(ahc, &devinfo);
2715 			}
2716 		}
2717 
2718 		/* Pass a NULL path so that handlers generate their own */
2719 		ahc_handle_message_phase(ahc, /*path*/NULL);
2720 		break;
2721 	}
2722 	case PERR_DETECTED:
2723 	{
2724 		/*
2725 		 * If we've cleared the parity error interrupt
2726 		 * but the sequencer still believes that SCSIPERR
2727 		 * is true, it must be that the parity error is
2728 		 * for the currently presented byte on the bus,
2729 		 * and we are not in a phase (data-in) where we will
2730 		 * eventually ack this byte.  Ack the byte and
2731 		 * throw it away in the hope that the target will
2732 		 * take us to message out to deliver the appropriate
2733 		 * error message.
2734 		 */
2735 		if ((intstat & SCSIINT) == 0
2736 		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
2737 			u_int curphase;
2738 
2739 			/*
2740 			 * The hardware will only let you ack bytes
2741 			 * if the expected phase in SCSISIGO matches
2742 			 * the current phase.  Make sure this is
2743 			 * currently the case.
2744 			 */
2745 			curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2746 			ahc_outb(ahc, LASTPHASE, curphase);
2747 			ahc_outb(ahc, SCSISIGO, curphase);
2748 			ahc_inb(ahc, SCSIDATL);
2749 		}
2750 		break;
2751 	}
2752 	case DATA_OVERRUN:
2753 	{
2754 		/*
2755 		 * When the sequencer detects an overrun, it
2756 		 * places the controller in "BITBUCKET" mode
2757 		 * and allows the target to complete its transfer.
2758 		 * Unfortunately, none of the counters get updated
2759 		 * when the controller is in this mode, so we have
2760 		 * no way of knowing how large the overrun was.
2761 		 */
2762 		u_int scbindex = ahc_inb(ahc, SCB_TAG);
2763 		u_int lastphase = ahc_inb(ahc, LASTPHASE);
2764 		u_int i;
2765 
2766 		scb = &ahc->scb_data->scbarray[scbindex];
2767 		for (i = 0; i < num_phases; i++) {
2768 			if (lastphase == phase_table[i].phase)
2769 				break;
2770 		}
2771 		xpt_print_path(scb->ccb->ccb_h.path);
2772 		printf("data overrun detected %s."
2773 		       "  Tag == 0x%x.\n",
2774 		       phase_table[i].phasemsg,
2775   		       scb->hscb->tag);
2776 		xpt_print_path(scb->ccb->ccb_h.path);
2777 		printf("%s seen Data Phase.  Length = %d.  NumSGs = %d.\n",
2778 		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
2779 		       scb->ccb->csio.dxfer_len, scb->sg_count);
2780 		if (scb->sg_count > 0) {
2781 			for (i = 0; i < scb->sg_count; i++) {
2782 				printf("sg[%d] - Addr 0x%x : Length %d\n",
2783 				       i,
2784 				       scb->sg_list[i].addr,
2785 				       scb->sg_list[i].len & AHC_SG_LEN_MASK);
2786 			}
2787 		}
2788 		/*
2789 		 * Set this and it will take effect when the
2790 		 * target does a command complete.
2791 		 */
2792 		ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
2793 		ahcsetccbstatus(scb->ccb, CAM_DATA_RUN_ERR);
2794 		ahc_freeze_ccb(scb->ccb);
2795 		break;
2796 	}
2797 	case TRACEPOINT:
2798 	{
2799 		printf("SAVED_SCSIID %x, SAVED_LUN %x, SCBPTR %x\n",
2800 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
2801 		       ahc_inb(ahc, SCBPTR));
2802 #if 0
2803 		printf("%s: SCB_DATAPTR = %x, SCB_DATACNT = %x\n",
2804 		       ahc_name(ahc),
2805 		       ahc_inb(ahc, SCB_DATAPTR)
2806 		    | (ahc_inb(ahc, SCB_DATAPTR + 1) << 8)
2807 		    | (ahc_inb(ahc, SCB_DATAPTR + 2) << 16)
2808 		    | (ahc_inb(ahc, SCB_DATAPTR + 3) << 24),
2809 		       ahc_inb(ahc, SCB_DATACNT)
2810 		    | (ahc_inb(ahc, SCB_DATACNT + 1) << 8)
2811 		    | (ahc_inb(ahc, SCB_DATACNT + 2) << 16)
2812 		    | (ahc_inb(ahc, SCB_DATACNT + 3) << 24));
2813 		printf("SCSIRATE = %x\n", ahc_inb(ahc, SCSIRATE));
2814 		printf("SG_CACHEPTR = %x\n", ahc_inb(ahc, SINDEX));
2815 		printf("DFCNTRL = %x, DFSTATUS = %x\n",
2816 		       ahc_inb(ahc, DFCNTRL),
2817 		       ahc_inb(ahc, DFSTATUS));
2818 		if ((ahc->features & AHC_CMD_CHAN) != 0) {
2819 			printf("CCHADDR = 0x%x\n",
2820 			       ahc_inb(ahc, CCHADDR)
2821 			     | (ahc_inb(ahc, CCHADDR + 1) << 8)
2822 			     | (ahc_inb(ahc, CCHADDR + 2) << 16)
2823 			     | (ahc_inb(ahc, CCHADDR + 3) << 24));
2824 		} else {
2825 			printf("HADDR = 0x%x\n",
2826 			       ahc_inb(ahc, HADDR)
2827 			     | (ahc_inb(ahc, HADDR + 1) << 8)
2828 			     | (ahc_inb(ahc, HADDR + 2) << 16)
2829 			     | (ahc_inb(ahc, HADDR + 3) << 24));
2830 		}
2831 
2832 #endif
2833 		break;
2834 	}
2835 	case TRACEPOINT2:
2836 	{
2837 		printf("SINDEX = %x\n", ahc_inb(ahc, SINDEX));
2838 		printf("SCSIRATE = %x\n", ahc_inb(ahc, SCSIRATE));
2839 #if 0
2840 		printf("SCB_RESIDUAL_SGPTR = %x, SCB_RESIDUAL_DATACNT = %x\n",
2841 		       ahc_inb(ahc, SCB_RESIDUAL_SGPTR)
2842 		    | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
2843 		    | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
2844 		    | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24),
2845 		       ahc_inb(ahc, SCB_RESIDUAL_DATACNT)
2846 		    | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
2847 		    | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
2848 		    | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 3) << 24));
2849 		printf("DATA_COUNT_ODD = %x\n", ahc_inb(ahc, DATA_COUNT_ODD));
2850 		printf("SINDEX = %x\n", ahc_inb(ahc, SINDEX));
2851 		printf("SCB_SGPTR %x, SCB_RESIDUAL_SGPTR %x\n",
2852 		       ahc_inb(ahc, SCB_SGPTR),
2853 		       ahc_inb(ahc, SCB_RESIDUAL_SGPTR));
2854 		printf("SAVED_SCSIID %x, SAVED_LUN %d, "
2855 		       "DISCONNECTED_SCBH %d\n",
2856 		       ahc_inb(ahc, SAVED_SCSIID),
2857 		       ahc_inb(ahc, SAVED_LUN),
2858 		       ahc_inb(ahc, DISCONNECTED_SCBH));
2859 		int i;
2860 
2861 		if (ahc->unit != 1)
2862 			break;
2863 		for (i = 0; i < 32;) {
2864 			printf("0x%x 0x%x 0x%x 0x%x\n",
2865 			       ahc_inb(ahc, SCB_CONTROL + i),
2866 			       ahc_inb(ahc, SCB_CONTROL + i + 1),
2867 			       ahc_inb(ahc, SCB_CONTROL + i + 2),
2868 			       ahc_inb(ahc, SCB_CONTROL + i + 3));
2869 			i += 4;
2870 		}
2871 #endif
2872 #if 0
2873 		printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1));
2874 		printf("SSTAT0 == 0x%x\n", ahc_inb(ahc, SSTAT0));
2875 		printf(", SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSISIGI));
2876 		printf("TRACEPOINT: CCHCNT = %d, SG_COUNT = %d\n",
2877 		       ahc_inb(ahc, CCHCNT), ahc_inb(ahc, SG_COUNT));
2878 		printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG));
2879 		printf("TRACEPOINT1: CCHADDR = %d, CCHCNT = %d, SCBPTR = %d\n",
2880 		       ahc_inb(ahc, CCHADDR)
2881 		    | (ahc_inb(ahc, CCHADDR+1) << 8)
2882 		    | (ahc_inb(ahc, CCHADDR+2) << 16)
2883 		    | (ahc_inb(ahc, CCHADDR+3) << 24),
2884 		       ahc_inb(ahc, CCHCNT)
2885 		    | (ahc_inb(ahc, CCHCNT+1) << 8)
2886 		    | (ahc_inb(ahc, CCHCNT+2) << 16),
2887 		       ahc_inb(ahc, SCBPTR));
2888 		printf("TRACEPOINT: WAITING_SCBH = %d\n", ahc_inb(ahc, WAITING_SCBH));
2889 		printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG));
2890 #endif
2891 		break;
2892 	}
2893 	default:
2894 		printf("ahc_intr: seqint, "
2895 		       "intstat == 0x%x, scsisigi = 0x%x\n",
2896 		       intstat, ahc_inb(ahc, SCSISIGI));
2897 		break;
2898 	}
2899 
2900 unpause:
2901 	/*
2902 	 *  The sequencer is paused immediately on
2903 	 *  a SEQINT, so we should restart it when
2904 	 *  we're done.
2905 	 */
2906 	unpause_sequencer(ahc);
2907 }
2908 
2909 static void
2910 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
2911 {
2912 	u_int	scb_index;
2913 	u_int	status;
2914 	struct	scb *scb;
2915 	char	cur_channel;
2916 	char	intr_channel;
2917 
2918 	if ((ahc->features & AHC_TWIN) != 0
2919 	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
2920 		cur_channel = 'B';
2921 	else
2922 		cur_channel = 'A';
2923 	intr_channel = cur_channel;
2924 
2925 	status = ahc_inb(ahc, SSTAT1);
2926 	if (status == 0) {
2927 		if ((ahc->features & AHC_TWIN) != 0) {
2928 			/* Try the other channel */
2929 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
2930 			status = ahc_inb(ahc, SSTAT1);
2931 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
2932 			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
2933 		}
2934 		if (status == 0) {
2935 			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
2936 			return;
2937 		}
2938 	}
2939 
2940 	scb_index = ahc_inb(ahc, SCB_TAG);
2941 	if (scb_index < ahc->scb_data->numscbs) {
2942 		scb = &ahc->scb_data->scbarray[scb_index];
2943 		if ((scb->flags & SCB_ACTIVE) == 0
2944 		 || (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
2945 			scb = NULL;
2946 	} else
2947 		scb = NULL;
2948 
2949 	if ((status & SCSIRSTI) != 0) {
2950 		printf("%s: Someone reset channel %c\n",
2951 			ahc_name(ahc), intr_channel);
2952 		ahc_reset_channel(ahc, intr_channel, /* Initiate Reset */FALSE);
2953 	} else if ((status & SCSIPERR) != 0) {
2954 		/*
2955 		 * Determine the bus phase and queue an appropriate message.
2956 		 * SCSIPERR is latched true as soon as a parity error
2957 		 * occurs.  If the sequencer acked the transfer that
2958 		 * caused the parity error and the currently presented
2959 		 * transfer on the bus has correct parity, SCSIPERR will
2960 		 * be cleared by CLRSCSIPERR.  Use this to determine if
2961 		 * we should look at the last phase the sequencer recorded,
2962 		 * or the current phase presented on the bus.
2963 		 */
2964 		u_int mesg_out;
2965 		u_int curphase;
2966 		u_int errorphase;
2967 		u_int lastphase;
2968 		u_int i;
2969 
2970 		lastphase = ahc_inb(ahc, LASTPHASE);
2971 		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2972 		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
2973 		/*
2974 		 * For all phases save DATA, the sequencer won't
2975 		 * automatically ack a byte that has a parity error
2976 		 * in it.  So the only way that the current phase
2977 		 * could be 'data-in' is if the parity error is for
2978 		 * an already acked byte in the data phase.  During
2979 		 * synchronous data-in transfers, we may actually
2980 		 * ack bytes before latching the current phase in
2981 		 * LASTPHASE, leading to the discrepancy between
2982 		 * curphase and lastphase.
2983 		 */
2984 		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
2985 		 || curphase == P_DATAIN)
2986 			errorphase = curphase;
2987 		else
2988 			errorphase = lastphase;
2989 
2990 		for (i = 0; i < num_phases; i++) {
2991 			if (errorphase == phase_table[i].phase)
2992 				break;
2993 		}
2994 		mesg_out = phase_table[i].mesg_out;
2995 		if (scb != NULL)
2996 			xpt_print_path(scb->ccb->ccb_h.path);
2997 		else
2998 			printf("%s:%c:%d: ", ahc_name(ahc),
2999 			       intr_channel,
3000 			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
3001 
3002 		printf("parity error detected %s. "
3003 		       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
3004 		       phase_table[i].phasemsg,
3005 		       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
3006 		       ahc_inb(ahc, SCSIRATE));
3007 
3008 		/*
3009 		 * We've set the hardware to assert ATN if we
3010 		 * get a parity error on "in" phases, so all we
3011 		 * need to do is stuff the message buffer with
3012 		 * the appropriate message.  "In" phases have set
3013 		 * mesg_out to something other than MSG_NOP.
3014 		 */
3015 		if (mesg_out != MSG_NOOP) {
3016 			if (ahc->msg_type != MSG_TYPE_NONE)
3017 				ahc->send_msg_perror = TRUE;
3018 			else
3019 				ahc_outb(ahc, MSG_OUT, mesg_out);
3020 		}
3021 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3022 		unpause_sequencer(ahc);
3023 	} else if ((status & BUSFREE) != 0
3024 		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
3025 		/*
3026 		 * First look at what phase we were last in.
3027 		 * If its message out, chances are pretty good
3028 		 * that the busfree was in response to one of
3029 		 * our abort requests.
3030 		 */
3031 		u_int lastphase = ahc_inb(ahc, LASTPHASE);
3032 		u_int saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
3033 		u_int saved_lun = ahc_inb(ahc, SAVED_LUN);
3034 		u_int target = SCSIID_TARGET(ahc, saved_scsiid);
3035 		u_int initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
3036 		char channel = SCSIID_CHANNEL(ahc, saved_scsiid);
3037 		int printerror = 1;
3038 
3039 		ahc_outb(ahc, SCSISEQ,
3040 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
3041 		if (lastphase == P_MESGOUT) {
3042 			u_int message;
3043 			u_int tag;
3044 
3045 			message = ahc->msgout_buf[ahc->msgout_index - 1];
3046 			tag = SCB_LIST_NULL;
3047 			switch (message) {
3048 			case MSG_ABORT_TAG:
3049 				tag = scb->hscb->tag;
3050 				/* FALLTRHOUGH */
3051 			case MSG_ABORT:
3052 				xpt_print_path(scb->ccb->ccb_h.path);
3053 				printf("SCB %d - Abort %s Completed.\n",
3054 				       scb->hscb->tag, tag == SCB_LIST_NULL ?
3055 				       "" : "Tag");
3056 				ahc_abort_scbs(ahc, target, channel,
3057 					       saved_lun, tag,
3058 					       ROLE_INITIATOR,
3059 					       CAM_REQ_ABORTED);
3060 				printerror = 0;
3061 				break;
3062 			case MSG_BUS_DEV_RESET:
3063 			{
3064 				struct ahc_devinfo devinfo;
3065 
3066 				/*
3067 				 * Don't mark the user's request for this BDR
3068 				 * as completing with CAM_BDR_SENT.  CAM3
3069 				 * specifies CAM_REQ_CMP.
3070 				 */
3071 				if (scb != NULL
3072 				 && scb->ccb->ccb_h.func_code == XPT_RESET_DEV
3073 				 && ahc_match_scb(ahc, scb, target, channel,
3074 						  saved_lun,
3075 						  SCB_LIST_NULL,
3076 						  ROLE_INITIATOR)) {
3077 					ahcsetccbstatus(scb->ccb, CAM_REQ_CMP);
3078 				}
3079 				ahc_compile_devinfo(&devinfo,
3080 						    initiator_role_id,
3081 						    target,
3082 						    saved_lun,
3083 						    channel,
3084 						    ROLE_INITIATOR);
3085 				ahc_handle_devreset(ahc, &devinfo,
3086 						    CAM_BDR_SENT, AC_SENT_BDR,
3087 						    "Bus Device Reset",
3088 						    /*verbose_level*/0);
3089 				printerror = 0;
3090 				break;
3091 			}
3092 			default:
3093 				break;
3094 			}
3095 		}
3096 		if (printerror != 0) {
3097 			u_int i;
3098 
3099 			if (scb != NULL) {
3100 				u_int tag;
3101 
3102 				if ((scb->hscb->control & TAG_ENB) != 0)
3103 					tag = scb->hscb->tag;
3104 				else
3105 					tag = SCB_LIST_NULL;
3106 				ahc_abort_scbs(ahc, target, channel,
3107 					       SCB_GET_LUN(scb), tag,
3108 					       ROLE_INITIATOR,
3109 					       CAM_UNEXP_BUSFREE);
3110 				xpt_print_path(scb->ccb->ccb_h.path);
3111 			} else {
3112 				/*
3113 				 * We had not fully identified this connection,
3114 				 * so we cannot abort anything.
3115 				 */
3116 				printf("%s: ", ahc_name(ahc));
3117 			}
3118 			for (i = 0; i < num_phases; i++) {
3119 				if (lastphase == phase_table[i].phase)
3120 					break;
3121 			}
3122 			printf("Unexpected busfree %s\n"
3123 			       "SEQADDR == 0x%x\n",
3124 			       phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0)
3125 				| (ahc_inb(ahc, SEQADDR1) << 8));
3126 		}
3127 		ahc_clear_msg_state(ahc);
3128 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
3129 		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
3130 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3131 		restart_sequencer(ahc);
3132 	} else if ((status & SELTO) != 0) {
3133 		u_int scbptr;
3134 
3135 		scbptr = ahc_inb(ahc, WAITING_SCBH);
3136 		ahc_outb(ahc, SCBPTR, scbptr);
3137 		scb_index = ahc_inb(ahc, SCB_TAG);
3138 
3139 		if (scb_index < ahc->scb_data->numscbs) {
3140 			scb = &ahc->scb_data->scbarray[scb_index];
3141 			if ((scb->flags & SCB_ACTIVE) == 0)
3142 				scb = NULL;
3143 		} else
3144 			scb = NULL;
3145 
3146 		if (scb == NULL) {
3147 			printf("%s: ahc_intr - referenced scb not "
3148 			       "valid during SELTO scb(%d, %d)\n",
3149 			       ahc_name(ahc), scbptr, scb_index);
3150 		} else {
3151 			ahcsetccbstatus(scb->ccb, CAM_SEL_TIMEOUT);
3152 			ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
3153 		}
3154 		/* Stop the selection */
3155 		ahc_outb(ahc, SCSISEQ, 0);
3156 
3157 		/* No more pending messages */
3158 		ahc_clear_msg_state(ahc);
3159 
3160 		/*
3161 		 * Although the driver does not care about the
3162 		 * 'Selection in Progress' status bit, the busy
3163 		 * LED does.  SELINGO is only cleared by a sucessful
3164 		 * selection, so we must manually clear it to insure
3165 		 * the LED turns off just incase no future successful
3166 		 * selections occur (e.g. no devices on the bus).
3167 		 */
3168 		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
3169 
3170 		/* Clear interrupt state */
3171 		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
3172 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3173 		restart_sequencer(ahc);
3174 	} else {
3175 		xpt_print_path(scb->ccb->ccb_h.path);
3176 		printf("Unknown SCSIINT. Status = 0x%x\n", status);
3177 		ahc_outb(ahc, CLRSINT1, status);
3178 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3179 		unpause_sequencer(ahc);
3180 	}
3181 }
3182 
3183 static void
3184 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3185 {
3186 	/*
3187 	 * We need to initiate transfer negotiations.
3188 	 * If our current and goal settings are identical,
3189 	 * we want to renegotiate due to a check condition.
3190 	 */
3191 	struct	ahc_initiator_tinfo *tinfo;
3192 	struct	tmode_tstate *tstate;
3193 	struct	ahc_syncrate *rate;
3194 	int	dowide;
3195 	int	dosync;
3196 	int	doppr;
3197 	int	use_ppr;
3198 	u_int	period;
3199 	u_int	ppr_options;
3200 	u_int	offset;
3201 
3202 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
3203 				    devinfo->target, &tstate);
3204 	dowide = tinfo->current.width != tinfo->goal.width;
3205 	dosync = tinfo->current.period != tinfo->goal.period;
3206 	doppr = tinfo->current.ppr_options != tinfo->goal.ppr_options;
3207 
3208 	if (!dowide && !dosync && !doppr) {
3209 		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
3210 		dosync = tinfo->goal.period != 0;
3211 		doppr = tinfo->goal.ppr_options != 0;
3212 	}
3213 
3214 	if (!dowide && !dosync && !doppr) {
3215 		panic("ahc_intr: AWAITING_MSG for negotiation, "
3216 		      "but no negotiation needed\n");
3217 	}
3218 
3219 	use_ppr = (tinfo->current.transport_version >= 3) || doppr;
3220 	if (use_ppr) {
3221 		ahc_construct_ppr(ahc, tinfo->goal.period, tinfo->goal.offset,
3222 				  tinfo->goal.width, tinfo->goal.ppr_options);
3223 	} else  if (dowide) {
3224 		ahc_construct_wdtr(ahc, tinfo->goal.width);
3225 	} else if (dosync) {
3226 
3227 		period = tinfo->goal.period;
3228 		ppr_options = 0;
3229 		rate = ahc_devlimited_syncrate(ahc, &period, &ppr_options);
3230 		offset = tinfo->goal.offset;
3231 		ahc_validate_offset(ahc, rate, &offset,
3232 				    tinfo->current.width);
3233 		ahc_construct_sdtr(ahc, period, offset);
3234 	}
3235 }
3236 
3237 static void
3238 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3239 			   struct scb *scb)
3240 {
3241 	/*
3242 	 * To facilitate adding multiple messages together,
3243 	 * each routine should increment the index and len
3244 	 * variables instead of setting them explicitly.
3245 	 */
3246 	ahc->msgout_index = 0;
3247 	ahc->msgout_len = 0;
3248 
3249 	if ((scb->flags & SCB_DEVICE_RESET) == 0
3250 	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
3251 		u_int identify_msg;
3252 
3253 		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
3254 		if ((scb->hscb->control & DISCENB) != 0)
3255 			identify_msg |= MSG_IDENTIFY_DISCFLAG;
3256 		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
3257 		ahc->msgout_len++;
3258 
3259 		if ((scb->hscb->control & TAG_ENB) != 0) {
3260 			ahc->msgout_buf[ahc->msgout_index++] =
3261 			    scb->ccb->csio.tag_action;
3262 			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
3263 			ahc->msgout_len += 2;
3264 		}
3265 	}
3266 
3267 	if (scb->flags & SCB_DEVICE_RESET) {
3268 		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
3269 		ahc->msgout_len++;
3270 		xpt_print_path(scb->ccb->ccb_h.path);
3271 		printf("Bus Device Reset Message Sent\n");
3272 	} else if ((scb->flags & SCB_ABORT) != 0) {
3273 		if ((scb->hscb->control & TAG_ENB) != 0)
3274 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
3275 		else
3276 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
3277 		ahc->msgout_len++;
3278 		xpt_print_path(scb->ccb->ccb_h.path);
3279 		printf("Abort Message Sent\n");
3280 	} else if ((ahc->targ_msg_req & devinfo->target_mask) != 0
3281 		|| (scb->flags & SCB_NEGOTIATE) != 0) {
3282 		ahc_build_transfer_msg(ahc, devinfo);
3283 	} else {
3284 		printf("ahc_intr: AWAITING_MSG for an SCB that "
3285 		       "does not have a waiting message\n");
3286 		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
3287 		       devinfo->target_mask);
3288 		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
3289 		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
3290 		      ahc_inb(ahc, MSG_OUT), scb->flags);
3291 	}
3292 
3293 	/*
3294 	 * Clear the MK_MESSAGE flag from the SCB so we aren't
3295 	 * asked to send this message again.
3296 	 */
3297 	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
3298 	ahc->msgout_index = 0;
3299 	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3300 }
3301 
3302 static void
3303 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3304 {
3305 	/*
3306 	 * To facilitate adding multiple messages together,
3307 	 * each routine should increment the index and len
3308 	 * variables instead of setting them explicitly.
3309 	 */
3310 	ahc->msgout_index = 0;
3311 	ahc->msgout_len = 0;
3312 
3313 	if ((ahc->targ_msg_req & devinfo->target_mask) != 0)
3314 		ahc_build_transfer_msg(ahc, devinfo);
3315 	else
3316 		panic("ahc_intr: AWAITING target message with no message");
3317 
3318 	ahc->msgout_index = 0;
3319 	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3320 }
3321 
3322 static int
3323 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3324 {
3325 	/*
3326 	 * What we care about here is if we had an
3327 	 * outstanding SDTR or WDTR message for this
3328 	 * target.  If we did, this is a signal that
3329 	 * the target is refusing negotiation.
3330 	 */
3331 	struct scb *scb;
3332 	struct ahc_initiator_tinfo *tinfo;
3333 	struct tmode_tstate *tstate;
3334 	u_int scb_index;
3335 	u_int last_msg;
3336 	int   response = 0;
3337 
3338 	scb_index = ahc_inb(ahc, SCB_TAG);
3339 	scb = &ahc->scb_data->scbarray[scb_index];
3340 
3341 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3342 				    devinfo->our_scsiid,
3343 				    devinfo->target, &tstate);
3344 	/* Might be necessary */
3345 	last_msg = ahc_inb(ahc, LAST_MSG);
3346 
3347 	if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/FALSE)) {
3348 
3349 		/* note 8bit xfers */
3350 		printf("%s:%c:%d: refuses WIDE negotiation.  Using "
3351 		       "8bit transfers\n", ahc_name(ahc),
3352 		       devinfo->channel, devinfo->target);
3353 		ahc_set_width(ahc, devinfo, scb->ccb->ccb_h.path,
3354 			      MSG_EXT_WDTR_BUS_8_BIT,
3355 			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3356 			      /*paused*/TRUE);
3357 		/*
3358 		 * No need to clear the sync rate.  If the target
3359 		 * did not accept the command, our syncrate is
3360 		 * unaffected.  If the target started the negotiation,
3361 		 * but rejected our response, we already cleared the
3362 		 * sync rate before sending our WDTR.
3363 		 */
3364 		if (tinfo->goal.period) {
3365 			u_int period;
3366 			u_int ppr_options;
3367 
3368 			/* Start the sync negotiation */
3369 			period = tinfo->goal.period;
3370 			ppr_options = 0;
3371 			ahc_devlimited_syncrate(ahc, &period, &ppr_options);
3372 			ahc->msgout_index = 0;
3373 			ahc->msgout_len = 0;
3374 			ahc_construct_sdtr(ahc, period, tinfo->goal.offset);
3375 			ahc->msgout_index = 0;
3376 			response = 1;
3377 		}
3378 	} else if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/FALSE)) {
3379 		/* note asynch xfers and clear flag */
3380 		ahc_set_syncrate(ahc, devinfo, scb->ccb->ccb_h.path,
3381 				 /*syncrate*/NULL, /*period*/0,
3382 				 /*offset*/0, /*ppr_options*/0,
3383 				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3384 				 /*paused*/TRUE);
3385 		printf("%s:%c:%d: refuses synchronous negotiation. "
3386 		       "Using asynchronous transfers\n",
3387 		       ahc_name(ahc),
3388 		       devinfo->channel, devinfo->target);
3389 	} else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) {
3390 		struct	ccb_trans_settings neg;
3391 
3392 		printf("%s:%c:%d: refuses tagged commands.  Performing "
3393 		       "non-tagged I/O\n", ahc_name(ahc),
3394 		       devinfo->channel, devinfo->target);
3395 
3396 		ahc_set_tags(ahc, devinfo, FALSE);
3397 		neg.flags = CCB_TRANS_CURRENT_SETTINGS;
3398 		neg.valid = CCB_TRANS_TQ_VALID;
3399 		xpt_setup_ccb(&neg.ccb_h, scb->ccb->ccb_h.path, /*priority*/1);
3400 		xpt_async(AC_TRANSFER_NEG, scb->ccb->ccb_h.path, &neg);
3401 
3402 		/*
3403 		 * Resend the identify for this CCB as the target
3404 		 * may believe that the selection is invalid otherwise.
3405 		 */
3406 		ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL)
3407 					  & ~MSG_SIMPLE_Q_TAG);
3408 	 	scb->hscb->control &= ~MSG_SIMPLE_Q_TAG;
3409 		scb->ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3410 		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3411 		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
3412 
3413 		/*
3414 		 * Requeue all tagged commands for this target
3415 		 * currently in our posession so they can be
3416 		 * converted to untagged commands.
3417 		 */
3418 		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3419 				   SCB_GET_CHANNEL(ahc, scb),
3420 				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3421 				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3422 				   SEARCH_COMPLETE);
3423 	} else {
3424 		/*
3425 		 * Otherwise, we ignore it.
3426 		 */
3427 		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3428 		       ahc_name(ahc), devinfo->channel, devinfo->target,
3429 		       last_msg);
3430 	}
3431 	return (response);
3432 }
3433 
3434 static void
3435 ahc_clear_msg_state(struct ahc_softc *ahc)
3436 {
3437 	ahc->msgout_len = 0;
3438 	ahc->msgin_index = 0;
3439 	ahc->msg_type = MSG_TYPE_NONE;
3440 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
3441 }
3442 
3443 static void
3444 ahc_handle_message_phase(struct ahc_softc *ahc, struct cam_path *path)
3445 {
3446 	struct	ahc_devinfo devinfo;
3447 	u_int	bus_phase;
3448 	int	end_session;
3449 
3450 	ahc_fetch_devinfo(ahc, &devinfo);
3451 	end_session = FALSE;
3452 	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
3453 
3454 reswitch:
3455 	switch (ahc->msg_type) {
3456 	case MSG_TYPE_INITIATOR_MSGOUT:
3457 	{
3458 		int lastbyte;
3459 		int phasemis;
3460 		int msgdone;
3461 
3462 		if (ahc->msgout_len == 0)
3463 			panic("REQINIT interrupt with no active message");
3464 
3465 		phasemis = bus_phase != P_MESGOUT;
3466 		if (phasemis) {
3467 			if (bus_phase == P_MESGIN) {
3468 				/*
3469 				 * Change gears and see if
3470 				 * this messages is of interest to
3471 				 * us or should be passed back to
3472 				 * the sequencer.
3473 				 */
3474 				ahc_outb(ahc, CLRSINT1, CLRATNO);
3475 				ahc->send_msg_perror = FALSE;
3476 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
3477 				ahc->msgin_index = 0;
3478 				goto reswitch;
3479 			}
3480 			end_session = TRUE;
3481 			break;
3482 		}
3483 
3484 		if (ahc->send_msg_perror) {
3485 			ahc_outb(ahc, CLRSINT1, CLRATNO);
3486 			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3487 			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
3488 			break;
3489 		}
3490 
3491 		msgdone	= ahc->msgout_index == ahc->msgout_len;
3492 		if (msgdone) {
3493 			/*
3494 			 * The target has requested a retry.
3495 			 * Re-assert ATN, reset our message index to
3496 			 * 0, and try again.
3497 			 */
3498 			ahc->msgout_index = 0;
3499 			ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
3500 		}
3501 
3502 		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
3503 		if (lastbyte) {
3504 			/* Last byte is signified by dropping ATN */
3505 			ahc_outb(ahc, CLRSINT1, CLRATNO);
3506 		}
3507 
3508 		/*
3509 		 * Clear our interrupt status and present
3510 		 * the next byte on the bus.
3511 		 */
3512 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3513 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
3514 		break;
3515 	}
3516 	case MSG_TYPE_INITIATOR_MSGIN:
3517 	{
3518 		int phasemis;
3519 		int message_done;
3520 
3521 		phasemis = bus_phase != P_MESGIN;
3522 
3523 		if (phasemis) {
3524 			ahc->msgin_index = 0;
3525 			if (bus_phase == P_MESGOUT
3526 			 && (ahc->send_msg_perror == TRUE
3527 			  || (ahc->msgout_len != 0
3528 			   && ahc->msgout_index == 0))) {
3529 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3530 				goto reswitch;
3531 			}
3532 			end_session = TRUE;
3533 			break;
3534 		}
3535 
3536 		/* Pull the byte in without acking it */
3537 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
3538 
3539 		message_done = ahc_parse_msg(ahc, path, &devinfo);
3540 
3541 		if (message_done) {
3542 			/*
3543 			 * Clear our incoming message buffer in case there
3544 			 * is another message following this one.
3545 			 */
3546 			ahc->msgin_index = 0;
3547 
3548 			/*
3549 			 * If this message illicited a response,
3550 			 * assert ATN so the target takes us to the
3551 			 * message out phase.
3552 			 */
3553 			if (ahc->msgout_len != 0)
3554 				ahc_outb(ahc, SCSISIGO,
3555 					 ahc_inb(ahc, SCSISIGO) | ATNO);
3556 		} else
3557 			ahc->msgin_index++;
3558 
3559 		/* Ack the byte */
3560 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3561 		ahc_inb(ahc, SCSIDATL);
3562 		break;
3563 	}
3564 	case MSG_TYPE_TARGET_MSGIN:
3565 	{
3566 		int msgdone;
3567 		int msgout_request;
3568 
3569 		if (ahc->msgout_len == 0)
3570 			panic("Target MSGIN with no active message");
3571 
3572 		/*
3573 		 * If we interrupted a mesgout session, the initiator
3574 		 * will not know this until our first REQ.  So, we
3575 		 * only honor mesgout requests after we've sent our
3576 		 * first byte.
3577 		 */
3578 		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
3579 		 && ahc->msgout_index > 0)
3580 			msgout_request = TRUE;
3581 		else
3582 			msgout_request = FALSE;
3583 
3584 		if (msgout_request) {
3585 
3586 			/*
3587 			 * Change gears and see if
3588 			 * this messages is of interest to
3589 			 * us or should be passed back to
3590 			 * the sequencer.
3591 			 */
3592 			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
3593 			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
3594 			ahc->msgin_index = 0;
3595 			/* Dummy read to REQ for first byte */
3596 			ahc_inb(ahc, SCSIDATL);
3597 			ahc_outb(ahc, SXFRCTL0,
3598 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3599 			break;
3600 		}
3601 
3602 		msgdone = ahc->msgout_index == ahc->msgout_len;
3603 		if (msgdone) {
3604 			ahc_outb(ahc, SXFRCTL0,
3605 				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
3606 			end_session = TRUE;
3607 			break;
3608 		}
3609 
3610 		/*
3611 		 * Present the next byte on the bus.
3612 		 */
3613 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3614 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
3615 		break;
3616 	}
3617 	case MSG_TYPE_TARGET_MSGOUT:
3618 	{
3619 		int lastbyte;
3620 		int msgdone;
3621 
3622 		/*
3623 		 * The initiator signals that this is
3624 		 * the last byte by dropping ATN.
3625 		 */
3626 		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
3627 
3628 		/*
3629 		 * Read the latched byte, but turn off SPIOEN first
3630 		 * so that we don't inadvertantly cause a REQ for the
3631 		 * next byte.
3632 		 */
3633 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
3634 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
3635 		msgdone = ahc_parse_msg(ahc, path, &devinfo);
3636 		if (msgdone == MSGLOOP_TERMINATED) {
3637 			/*
3638 			 * The message is *really* done in that it caused
3639 			 * us to go to bus free.  The sequencer has already
3640 			 * been reset at this point, so pull the ejection
3641 			 * handle.
3642 			 */
3643 			return;
3644 		}
3645 
3646 		ahc->msgin_index++;
3647 
3648 		/*
3649 		 * XXX Read spec about initiator dropping ATN too soon
3650 		 *     and use msgdone to detect it.
3651 		 */
3652 		if (msgdone == MSGLOOP_MSGCOMPLETE) {
3653 			ahc->msgin_index = 0;
3654 
3655 			/*
3656 			 * If this message illicited a response, transition
3657 			 * to the Message in phase and send it.
3658 			 */
3659 			if (ahc->msgout_len != 0) {
3660 				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
3661 				ahc_outb(ahc, SXFRCTL0,
3662 					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3663 				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3664 				ahc->msgin_index = 0;
3665 				break;
3666 			}
3667 		}
3668 
3669 		if (lastbyte)
3670 			end_session = TRUE;
3671 		else {
3672 			/* Ask for the next byte. */
3673 			ahc_outb(ahc, SXFRCTL0,
3674 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3675 		}
3676 
3677 		break;
3678 	}
3679 	default:
3680 		panic("Unknown REQINIT message type");
3681 	}
3682 
3683 	if (end_session) {
3684 		ahc_clear_msg_state(ahc);
3685 		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
3686 	} else
3687 		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
3688 }
3689 
3690 /*
3691  * See if we sent a particular extended message to the target.
3692  * If "full" is true, the target saw the full message.
3693  * If "full" is false, the target saw at least the first
3694  * byte of the message.
3695  */
3696 static int
3697 ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full)
3698 {
3699 	int found;
3700 	u_int index;
3701 
3702 	found = FALSE;
3703 	index = 0;
3704 
3705 	while (index < ahc->msgout_len) {
3706 		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
3707 
3708 			/* Found a candidate */
3709 			if (ahc->msgout_buf[index+2] == msgtype) {
3710 				u_int end_index;
3711 
3712 				end_index = index + 1
3713 					  + ahc->msgout_buf[index + 1];
3714 				if (full) {
3715 					if (ahc->msgout_index > end_index)
3716 						found = TRUE;
3717 				} else if (ahc->msgout_index > index)
3718 					found = TRUE;
3719 			}
3720 			break;
3721 		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
3722 			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
3723 
3724 			/* Skip tag type and tag id or residue param*/
3725 			index += 2;
3726 		} else {
3727 			/* Single byte message */
3728 			index++;
3729 		}
3730 	}
3731 	return (found);
3732 }
3733 
3734 static int
3735 ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path,
3736 	      struct ahc_devinfo *devinfo)
3737 {
3738 	struct	ahc_initiator_tinfo *tinfo;
3739 	struct	tmode_tstate *tstate;
3740 	int	reject;
3741 	int	done;
3742 	int	response;
3743 	u_int	targ_scsirate;
3744 
3745 	done = MSGLOOP_IN_PROG;
3746 	response = FALSE;
3747 	reject = FALSE;
3748 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
3749 				    devinfo->target, &tstate);
3750 	targ_scsirate = tinfo->scsirate;
3751 
3752 	/*
3753 	 * Parse as much of the message as is availible,
3754 	 * rejecting it if we don't support it.  When
3755 	 * the entire message is availible and has been
3756 	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
3757 	 * that we have parsed an entire message.
3758 	 *
3759 	 * In the case of extended messages, we accept the length
3760 	 * byte outright and perform more checking once we know the
3761 	 * extended message type.
3762 	 */
3763 	switch (ahc->msgin_buf[0]) {
3764 	case MSG_MESSAGE_REJECT:
3765 		response = ahc_handle_msg_reject(ahc, devinfo);
3766 		/* FALLTHROUGH */
3767 	case MSG_NOOP:
3768 		done = MSGLOOP_MSGCOMPLETE;
3769 		break;
3770 	case MSG_EXTENDED:
3771 	{
3772 		/* Wait for enough of the message to begin validation */
3773 		if (ahc->msgin_index < 2)
3774 			break;
3775 		switch (ahc->msgin_buf[2]) {
3776 		case MSG_EXT_SDTR:
3777 		{
3778 			struct	 ahc_syncrate *syncrate;
3779 			u_int	 period;
3780 			u_int	 ppr_options;
3781 			u_int	 offset;
3782 			u_int	 saved_offset;
3783 
3784 			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
3785 				reject = TRUE;
3786 				break;
3787 			}
3788 
3789 			/*
3790 			 * Wait until we have both args before validating
3791 			 * and acting on this message.
3792 			 *
3793 			 * Add one to MSG_EXT_SDTR_LEN to account for
3794 			 * the extended message preamble.
3795 			 */
3796 			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
3797 				break;
3798 
3799 			period = ahc->msgin_buf[3];
3800 			ppr_options = 0;
3801 			saved_offset = offset = ahc->msgin_buf[4];
3802 			syncrate = ahc_devlimited_syncrate(ahc, &period,
3803 							   &ppr_options);
3804 			ahc_validate_offset(ahc, syncrate, &offset,
3805 					    targ_scsirate & WIDEXFER);
3806 			ahc_set_syncrate(ahc, devinfo, path,
3807 					 syncrate, period,
3808 					 offset, ppr_options,
3809 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3810 					 /*paused*/TRUE);
3811 
3812 			/*
3813 			 * See if we initiated Sync Negotiation
3814 			 * and didn't have to fall down to async
3815 			 * transfers.
3816 			 */
3817 			if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/TRUE)) {
3818 				/* We started it */
3819 				if (saved_offset != offset) {
3820 					/* Went too low - force async */
3821 					reject = TRUE;
3822 				}
3823 			} else {
3824 				/*
3825 				 * Send our own SDTR in reply
3826 				 */
3827 				if (bootverbose)
3828 					printf("Sending SDTR!\n");
3829 				ahc->msgout_index = 0;
3830 				ahc->msgout_len = 0;
3831 				ahc_construct_sdtr(ahc, period, offset);
3832 				ahc->msgout_index = 0;
3833 				response = TRUE;
3834 			}
3835 			done = MSGLOOP_MSGCOMPLETE;
3836 			break;
3837 		}
3838 		case MSG_EXT_WDTR:
3839 		{
3840 			u_int bus_width;
3841 			u_int saved_width;
3842 			u_int sending_reply;
3843 
3844 			sending_reply = FALSE;
3845 			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
3846 				reject = TRUE;
3847 				break;
3848 			}
3849 
3850 			/*
3851 			 * Wait until we have our arg before validating
3852 			 * and acting on this message.
3853 			 *
3854 			 * Add one to MSG_EXT_WDTR_LEN to account for
3855 			 * the extended message preamble.
3856 			 */
3857 			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
3858 				break;
3859 
3860 			bus_width = ahc->msgin_buf[3];
3861 			saved_width = bus_width;
3862 			ahc_validate_width(ahc, &bus_width);
3863 
3864 			if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/TRUE)) {
3865 				/*
3866 				 * Don't send a WDTR back to the
3867 				 * target, since we asked first.
3868 				 * If the width went higher than our
3869 				 * request, reject it.
3870 				 */
3871 				if (saved_width > bus_width) {
3872 					reject = TRUE;
3873 					printf("%s: target %d requested %dBit "
3874 					       "transfers.  Rejecting...\n",
3875 					       ahc_name(ahc), devinfo->target,
3876 					       8 * (0x01 << bus_width));
3877 					bus_width = 0;
3878 				}
3879 			} else {
3880 				/*
3881 				 * Send our own WDTR in reply
3882 				 */
3883 				if (bootverbose)
3884 					printf("Sending WDTR!\n");
3885 				ahc->msgout_index = 0;
3886 				ahc->msgout_len = 0;
3887 				ahc_construct_wdtr(ahc, bus_width);
3888 				ahc->msgout_index = 0;
3889 				response = TRUE;
3890 				sending_reply = TRUE;
3891 			}
3892 			ahc_set_width(ahc, devinfo, path, bus_width,
3893 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3894 				      /*paused*/TRUE);
3895 
3896 			/* After a wide message, we are async */
3897 			ahc_set_syncrate(ahc, devinfo, path,
3898 					 /*syncrate*/NULL, /*period*/0,
3899 					 /*offset*/0, /*ppr_options*/0,
3900 					 AHC_TRANS_ACTIVE, /*paused*/TRUE);
3901 			if (sending_reply == FALSE && reject == FALSE) {
3902 
3903 				/* XXX functionalize */
3904 				if (tinfo->goal.period) {
3905 					struct	ahc_syncrate *rate;
3906 					u_int	period;
3907 					u_int	ppr;
3908 					u_int	offset;
3909 
3910 					/* Start the sync negotiation */
3911 					period = tinfo->goal.period;
3912 					ppr = 0;
3913 					rate = ahc_devlimited_syncrate(ahc,
3914 								       &period,
3915 								       &ppr);
3916 					offset = tinfo->goal.offset;
3917 					ahc_validate_offset(ahc, rate, &offset,
3918 							  tinfo->current.width);
3919 					ahc->msgout_index = 0;
3920 					ahc->msgout_len = 0;
3921 					ahc_construct_sdtr(ahc, period, offset);
3922 					ahc->msgout_index = 0;
3923 					response = TRUE;
3924 				}
3925 			}
3926 			done = MSGLOOP_MSGCOMPLETE;
3927 			break;
3928 		}
3929 		case MSG_EXT_PPR:
3930 		{
3931 			struct	ahc_syncrate *syncrate;
3932 			u_int	period;
3933 			u_int	offset;
3934 			u_int	bus_width;
3935 			u_int	ppr_options;
3936 			u_int	saved_width;
3937 			u_int	saved_offset;
3938 			u_int	saved_ppr_options;
3939 
3940 			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
3941 				reject = TRUE;
3942 				break;
3943 			}
3944 
3945 			/*
3946 			 * Wait until we have all args before validating
3947 			 * and acting on this message.
3948 			 *
3949 			 * Add one to MSG_EXT_PPR_LEN to account for
3950 			 * the extended message preamble.
3951 			 */
3952 			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
3953 				break;
3954 
3955 			period = ahc->msgin_buf[3];
3956 			offset = ahc->msgin_buf[5];
3957 			bus_width = ahc->msgin_buf[6];
3958 			saved_width = bus_width;
3959 			ppr_options = ahc->msgin_buf[7];
3960 			/*
3961 			 * According to the spec, a DT only
3962 			 * period factor with no DT option
3963 			 * set implies async.
3964 			 */
3965 			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
3966 			 && period == 9)
3967 				offset = 0;
3968 			saved_ppr_options = ppr_options;
3969 			saved_offset = offset;
3970 
3971 			/*
3972 			 * Mask out any options we don't support
3973 			 * on any controller.  Transfer options are
3974 			 * only available if we are negotiating wide.
3975 			 */
3976 			ppr_options &= MSG_EXT_PPR_DT_REQ;
3977 			if (bus_width == 0)
3978 				ppr_options = 0;
3979 
3980 			ahc_validate_width(ahc, &bus_width);
3981 			syncrate = ahc_devlimited_syncrate(ahc, &period,
3982 							   &ppr_options);
3983 			ahc_validate_offset(ahc, syncrate, &offset, bus_width);
3984 
3985 			if (ahc_sent_msg(ahc, MSG_EXT_PPR, /*full*/TRUE)) {
3986 				/*
3987 				 * If we are unable to do any of the
3988 				 * requested options (we went too low),
3989 				 * then we'll have to reject the message.
3990 				 */
3991 				if (saved_width > bus_width
3992 				 || saved_offset != offset
3993 				 || saved_ppr_options != ppr_options)
3994 					reject = TRUE;
3995 			} else {
3996 				printf("Target Initated PPR detected!\n");
3997 				response = TRUE;
3998 			}
3999 			ahc_set_syncrate(ahc, devinfo, path,
4000 					 syncrate, period,
4001 					 offset, ppr_options,
4002 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
4003 					 /*paused*/TRUE);
4004 			ahc_set_width(ahc, devinfo, path, bus_width,
4005 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
4006 				      /*paused*/TRUE);
4007 			break;
4008 		}
4009 		default:
4010 			/* Unknown extended message.  Reject it. */
4011 			reject = TRUE;
4012 			break;
4013 		}
4014 		break;
4015 	}
4016 	case MSG_BUS_DEV_RESET:
4017 		ahc_handle_devreset(ahc, devinfo,
4018 				    CAM_BDR_SENT, AC_SENT_BDR,
4019 				    "Bus Device Reset Received",
4020 				    /*verbose_level*/0);
4021 		restart_sequencer(ahc);
4022 		done = MSGLOOP_TERMINATED;
4023 		break;
4024 	case MSG_ABORT_TAG:
4025 	case MSG_ABORT:
4026 	case MSG_CLEAR_QUEUE:
4027 		/* Target mode messages */
4028 		if (devinfo->role != ROLE_TARGET) {
4029 			reject = TRUE;
4030 			break;
4031 		}
4032 		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
4033 			       devinfo->lun,
4034 			       ahc->msgin_buf[0] == MSG_ABORT_TAG
4035 						  ? SCB_LIST_NULL
4036 						  : ahc_inb(ahc, INITIATOR_TAG),
4037 			       ROLE_TARGET, CAM_REQ_ABORTED);
4038 
4039 		tstate = ahc->enabled_targets[devinfo->our_scsiid];
4040 		if (tstate != NULL) {
4041 			struct tmode_lstate* lstate;
4042 
4043 			lstate = tstate->enabled_luns[devinfo->lun];
4044 			if (lstate != NULL) {
4045 				ahc_queue_lstate_event(ahc, lstate,
4046 						       devinfo->our_scsiid,
4047 						       ahc->msgin_buf[0],
4048 						       /*arg*/0);
4049 				ahc_send_lstate_events(ahc, lstate);
4050 			}
4051 		}
4052 		done = MSGLOOP_MSGCOMPLETE;
4053 		break;
4054 	case MSG_TERM_IO_PROC:
4055 	default:
4056 		reject = TRUE;
4057 		break;
4058 	}
4059 
4060 	if (reject) {
4061 		/*
4062 		 * Setup to reject the message.
4063 		 */
4064 		ahc->msgout_index = 0;
4065 		ahc->msgout_len = 1;
4066 		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
4067 		done = MSGLOOP_MSGCOMPLETE;
4068 		response = TRUE;
4069 	}
4070 
4071 	if (done != MSGLOOP_IN_PROG && !response)
4072 		/* Clear the outgoing message buffer */
4073 		ahc->msgout_len = 0;
4074 
4075 	return (done);
4076 }
4077 
4078 static void
4079 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4080 {
4081 	u_int scb_index;
4082 	struct scb *scb;
4083 
4084 	scb_index = ahc_inb(ahc, SCB_TAG);
4085 	scb = &ahc->scb_data->scbarray[scb_index];
4086 	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
4087 	 || (scb->ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN) {
4088 		/*
4089 		 * Ignore the message if we haven't
4090 		 * seen an appropriate data phase yet.
4091 		 */
4092 	} else {
4093 		/*
4094 		 * If the residual occurred on the last
4095 		 * transfer and the transfer request was
4096 		 * expected to end on an odd count, do
4097 		 * nothing.  Otherwise, subtract a byte
4098 		 * and update the residual count accordingly.
4099 		 */
4100 		uint32_t sgptr;
4101 
4102 		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
4103 		if ((sgptr & SG_LIST_NULL) != 0
4104 		 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
4105 			/*
4106 			 * If the residual occurred on the last
4107 			 * transfer and the transfer request was
4108 			 * expected to end on an odd count, do
4109 			 * nothing.
4110 			 */
4111 		} else {
4112 			struct ahc_dma_seg *sg;
4113 			uint32_t data_cnt;
4114 			uint32_t data_addr;
4115 
4116 			/* Pull in the rest of the sgptr */
4117 			sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
4118 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
4119 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
4120 			sgptr &= SG_PTR_MASK;
4121 			data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
4122 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
4123 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
4124 
4125 			data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
4126 				  | (ahc_inb(ahc, SHADDR + 2) << 16)
4127 				  | (ahc_inb(ahc, SHADDR + 1) << 8)
4128 				  | (ahc_inb(ahc, SHADDR));
4129 
4130 			data_cnt += 1;
4131 			data_addr -= 1;
4132 
4133 			sg = ahc_sg_bus_to_virt(scb, sgptr);
4134 			/*
4135 			 * The residual sg ptr points to the next S/G
4136 			 * to load so we must go back one.
4137 			 */
4138 			sg--;
4139 			if (sg != scb->sg_list
4140 			 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) {
4141 
4142 				sg--;
4143 				data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG);
4144 				data_addr = sg->addr
4145 					  + (sg->len & AHC_SG_LEN_MASK) - 1;
4146 
4147 				/*
4148 				 * Increment sg so it points to the
4149 				 * "next" sg.
4150 				 */
4151 				sg++;
4152 				sgptr = ahc_sg_virt_to_bus(scb, sg);
4153 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
4154 					 sgptr >> 24);
4155 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
4156 					 sgptr >> 16);
4157 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
4158 					 sgptr >> 8);
4159 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
4160 			}
4161 
4162 /* XXX What about high address byte??? */
4163 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
4164 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
4165 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
4166 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
4167 
4168 /* XXX Perhaps better to just keep the saved address in sram */
4169 			if ((ahc->features & AHC_ULTRA2) != 0) {
4170 				ahc_outb(ahc, HADDR + 3, data_addr >> 24);
4171 				ahc_outb(ahc, HADDR + 2, data_addr >> 16);
4172 				ahc_outb(ahc, HADDR + 1, data_addr >> 8);
4173 				ahc_outb(ahc, HADDR, data_addr);
4174 				ahc_outb(ahc, DFCNTRL, PRELOADEN);
4175 				ahc_outb(ahc, SXFRCTL0,
4176 					 ahc_inb(ahc, SXFRCTL0) | CLRCHN);
4177 			} else {
4178 				ahc_outb(ahc, SHADDR + 3, data_addr >> 24);
4179 				ahc_outb(ahc, SHADDR + 2, data_addr >> 16);
4180 				ahc_outb(ahc, SHADDR + 1, data_addr >> 8);
4181 				ahc_outb(ahc, SHADDR, data_addr);
4182 			}
4183 		}
4184 	}
4185 }
4186 
4187 static void
4188 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
4189 		    cam_status status, ac_code acode, char *message,
4190 		    int verbose_level)
4191 {
4192 	struct cam_path *path;
4193 	int found;
4194 	int error;
4195 	struct tmode_tstate* tstate;
4196 	u_int lun;
4197 
4198 	error = ahc_create_path(ahc, devinfo, &path);
4199 
4200 	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
4201 			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
4202 			       status);
4203 
4204 	/*
4205 	 * Send an immediate notify ccb to all target more peripheral
4206 	 * drivers affected by this action.
4207 	 */
4208 	tstate = ahc->enabled_targets[devinfo->our_scsiid];
4209 	if (tstate != NULL) {
4210 		for (lun = 0; lun <= 7; lun++) {
4211 			struct tmode_lstate* lstate;
4212 
4213 			lstate = tstate->enabled_luns[lun];
4214 			if (lstate == NULL)
4215 				continue;
4216 
4217 			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
4218 					       MSG_BUS_DEV_RESET, /*arg*/0);
4219 			ahc_send_lstate_events(ahc, lstate);
4220 		}
4221 	}
4222 
4223 	/*
4224 	 * Go back to async/narrow transfers and renegotiate.
4225 	 * ahc_set_width and ahc_set_syncrate can cope with NULL
4226 	 * paths.
4227 	 */
4228 	ahc_set_width(ahc, devinfo, path, MSG_EXT_WDTR_BUS_8_BIT,
4229 		      AHC_TRANS_CUR, /*paused*/TRUE);
4230 	ahc_set_syncrate(ahc, devinfo, path, /*syncrate*/NULL,
4231 			 /*period*/0, /*offset*/0, /*ppr_options*/0,
4232 			 AHC_TRANS_CUR, /*paused*/TRUE);
4233 
4234 	if (error == CAM_REQ_CMP && acode != 0)
4235 		xpt_async(AC_SENT_BDR, path, NULL);
4236 
4237 	if (error == CAM_REQ_CMP)
4238 		xpt_free_path(path);
4239 
4240 	if (message != NULL
4241 	 && (verbose_level <= bootverbose))
4242 		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
4243 		       message, devinfo->channel, devinfo->target, found);
4244 }
4245 
4246 /*
4247  * We have an scb which has been processed by the
4248  * adaptor, now we look to see how the operation
4249  * went.
4250  */
4251 static void
4252 ahc_done(struct ahc_softc *ahc, struct scb *scb)
4253 {
4254 	union ccb *ccb;
4255 
4256 	CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE,
4257 		  ("ahc_done - scb %d\n", scb->hscb->tag));
4258 
4259 	ccb = scb->ccb;
4260 	LIST_REMOVE(&ccb->ccb_h, sim_links.le);
4261 	if (ccb->ccb_h.func_code == XPT_SCSI_IO
4262 	  && ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
4263 	   || ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
4264 	  && (ahc->features & AHC_SCB_BTT) == 0) {
4265 		struct scb_tailq *untagged_q;
4266 
4267 		untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
4268 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
4269 		ahc_run_untagged_queue(ahc, untagged_q);
4270 	}
4271 
4272 	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
4273 
4274 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
4275 		bus_dmasync_op_t op;
4276 
4277 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
4278 			op = BUS_DMASYNC_POSTREAD;
4279 		else
4280 			op = BUS_DMASYNC_POSTWRITE;
4281 		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
4282 		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
4283 	}
4284 
4285 	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
4286 		if (ahc_ccb_status(ccb) == CAM_REQ_INPROG)
4287 			ccb->ccb_h.status |= CAM_REQ_CMP;
4288 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4289 		ahcfreescb(ahc, scb);
4290 		xpt_done(ccb);
4291 		return;
4292 	}
4293 
4294 	/*
4295 	 * If the recovery SCB completes, we have to be
4296 	 * out of our timeout.
4297 	 */
4298 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
4299 
4300 		struct	ccb_hdr *ccbh;
4301 
4302 		/*
4303 		 * We were able to complete the command successfully,
4304 		 * so reinstate the timeouts for all other pending
4305 		 * commands.
4306 		 */
4307 		ccbh = ahc->pending_ccbs.lh_first;
4308 		while (ccbh != NULL) {
4309 			struct scb *pending_scb;
4310 
4311 			pending_scb = (struct scb *)ccbh->ccb_scb_ptr;
4312 			ccbh->timeout_ch =
4313 			    timeout(ahc_timeout, pending_scb,
4314 				    (ccbh->timeout * hz)/1000);
4315 			ccbh = LIST_NEXT(ccbh, sim_links.le);
4316 		}
4317 
4318 		/*
4319 		 * Ensure that we didn't put a second instance of this
4320 		 * SCB into the QINFIFO.
4321 		 */
4322 		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
4323 				   SCB_GET_CHANNEL(ahc, scb),
4324 				   SCB_GET_LUN(scb), scb->hscb->tag,
4325 				   ROLE_INITIATOR, /*status*/0,
4326 				   SEARCH_REMOVE);
4327 		if (ahc_ccb_status(ccb) == CAM_BDR_SENT
4328 		 || ahc_ccb_status(ccb) == CAM_REQ_ABORTED)
4329 			ahcsetccbstatus(ccb, CAM_CMD_TIMEOUT);
4330 		xpt_print_path(ccb->ccb_h.path);
4331 		printf("no longer in timeout, status = %x\n",
4332 		       ccb->ccb_h.status);
4333 	}
4334 
4335 	/* Don't clobber any existing error state */
4336 	if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) {
4337 		ccb->ccb_h.status |= CAM_REQ_CMP;
4338 	} else if ((scb->flags & SCB_SENSE) != 0) {
4339 		/*
4340 		 * We performed autosense retrieval.
4341 		 *
4342 		 * bzero the sense data before having
4343 		 * the drive fill it.  The SCSI spec mandates
4344 		 * that any untransfered data should be
4345 		 * assumed to be zero.  Complete the 'bounce'
4346 		 * of sense information through buffers accessible
4347 		 * via bus-space by copying it into the clients
4348 		 * csio.
4349 		 */
4350 		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
4351 		bcopy(&ahc->scb_data->sense[scb->hscb->tag],
4352 		      &ccb->csio.sense_data,
4353 		      scb->sg_list->len & AHC_SG_LEN_MASK);
4354 		scb->ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4355 	}
4356 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4357 	ahcfreescb(ahc, scb);
4358 	xpt_done(ccb);
4359 }
4360 
4361 /*
4362  * Determine the number of SCBs available on the controller
4363  */
4364 int
4365 ahc_probe_scbs(struct ahc_softc *ahc) {
4366 	int i;
4367 
4368 	for (i = 0; i < AHC_SCB_MAX; i++) {
4369 		ahc_outb(ahc, SCBPTR, i);
4370 		ahc_outb(ahc, SCB_CONTROL, i);
4371 		if (ahc_inb(ahc, SCB_CONTROL) != i)
4372 			break;
4373 		ahc_outb(ahc, SCBPTR, 0);
4374 		if (ahc_inb(ahc, SCB_CONTROL) != 0)
4375 			break;
4376 	}
4377 	return (i);
4378 }
4379 
4380 /*
4381  * Start the board, ready for normal operation
4382  */
4383 int
4384 ahc_init(struct ahc_softc *ahc)
4385 {
4386 	int	  max_targ = 15;
4387 	int	  i;
4388 	int	  term;
4389 	u_int	  scsi_conf;
4390 	u_int	  scsiseq_template;
4391 	u_int	  ultraenb;
4392 	u_int	  discenable;
4393 	u_int	  tagenable;
4394 	size_t	  driver_data_size;
4395 	uint32_t physaddr;
4396 
4397 #ifdef AHC_PRINT_SRAM
4398 	printf("Scratch Ram:");
4399 	for (i = 0x20; i < 0x5f; i++) {
4400 		if (((i % 8) == 0) && (i != 0)) {
4401 			printf ("\n              ");
4402 		}
4403 		printf (" 0x%x", ahc_inb(ahc, i));
4404 	}
4405 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4406 		for (i = 0x70; i < 0x7f; i++) {
4407 			if (((i % 8) == 0) && (i != 0)) {
4408 				printf ("\n              ");
4409 			}
4410 			printf (" 0x%x", ahc_inb(ahc, i));
4411 		}
4412 	}
4413 	printf ("\n");
4414 #endif
4415 
4416 	/*
4417 	 * Assume we have a board at this stage and it has been reset.
4418 	 */
4419 	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4420 		ahc->our_id = ahc->our_id_b = 7;
4421 
4422 	/*
4423 	 * Default to allowing initiator operations.
4424 	 */
4425 	ahc->flags |= AHC_INITIATORMODE;
4426 
4427 	/*
4428 	 * XXX Would be better to use a per device flag, but PCI and EISA
4429 	 *     devices don't have them yet.
4430 	 */
4431 	if ((AHC_TMODE_ENABLE & (0x01 << ahc->unit)) != 0) {
4432 		ahc->flags |= AHC_TARGETMODE;
4433 		/*
4434 		 * Although we have space for both the initiator and
4435 		 * target roles on ULTRA2 chips, we currently disable
4436 		 * the initiator role to allow multi-scsi-id target mode
4437 		 * configurations.  We can only respond on the same SCSI
4438 		 * ID as our initiator role if we allow initiator operation.
4439 		 * At some point, we should add a configuration knob to
4440 		 * allow both roles to be loaded.
4441 		 */
4442 		ahc->flags &= ~AHC_INITIATORMODE;
4443 	}
4444 
4445 	/* DMA tag for mapping buffers into device visible space. */
4446 	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
4447 			       /*lowaddr*/BUS_SPACE_MAXADDR,
4448 			       /*highaddr*/BUS_SPACE_MAXADDR,
4449 			       /*filter*/NULL, /*filterarg*/NULL,
4450 			       /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
4451 			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4452 			       /*flags*/BUS_DMA_ALLOCNOW,
4453 			       &ahc->buffer_dmat) != 0) {
4454 		return (ENOMEM);
4455 	}
4456 
4457 	ahc->init_level++;
4458 
4459 	/*
4460 	 * DMA tag for our command fifos and other data in system memory
4461 	 * the card's sequencer must be able to access.  For initiator
4462 	 * roles, we need to allocate space for the the qinfifo and qoutfifo.
4463 	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4464 	 * When providing for the target mode role, we additionally must
4465 	 * provide space for the incoming target command fifo and an extra
4466 	 * byte to deal with a dma bug in some chip versions.
4467 	 */
4468 	driver_data_size = 2 * 256 * sizeof(uint8_t);
4469 	if ((ahc->flags & AHC_TARGETMODE) != 0)
4470 		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4471 				 + /*DMA WideOdd Bug Buffer*/1;
4472 	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
4473 			       /*lowaddr*/BUS_SPACE_MAXADDR,
4474 			       /*highaddr*/BUS_SPACE_MAXADDR,
4475 			       /*filter*/NULL, /*filterarg*/NULL,
4476 			       driver_data_size,
4477 			       /*nsegments*/1,
4478 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4479 			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
4480 		return (ENOMEM);
4481 	}
4482 
4483 	ahc->init_level++;
4484 
4485 	/* Allocation of driver data */
4486 	if (bus_dmamem_alloc(ahc->shared_data_dmat,
4487 			     (void **)&ahc->qoutfifo,
4488 			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4489 		return (ENOMEM);
4490 	}
4491 
4492 	ahc->init_level++;
4493 
4494         /* And permanently map it in */
4495 	bus_dmamap_load(ahc->shared_data_dmat, ahc->shared_data_dmamap,
4496 			ahc->qoutfifo, driver_data_size, ahcdmamapcb,
4497 			&ahc->shared_data_busaddr, /*flags*/0);
4498 
4499 	if ((ahc->flags & AHC_TARGETMODE) != 0) {
4500 		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4501 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4502 		ahc->dma_bug_buf = ahc->shared_data_busaddr
4503 				 + driver_data_size - 1;
4504 		/* All target command blocks start out invalid. */
4505 		for (i = 0; i < AHC_TMODE_CMDS; i++)
4506 			ahc->targetcmds[i].cmd_valid = 0;
4507 		ahc->tqinfifonext = 1;
4508 		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4509 		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4510 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4511 	}
4512 	ahc->qinfifo = &ahc->qoutfifo[256];
4513 
4514 	ahc->init_level++;
4515 
4516 	/* Allocate SCB data now that buffer_dmat is initialized */
4517 	if (ahc->scb_data->maxhscbs == 0)
4518 		if (ahcinitscbdata(ahc) != 0)
4519 			return (ENOMEM);
4520 
4521 	/* There are no untagged SCBs active yet. */
4522 	/* XXX will need to change for SCB ram approach */
4523 	for (i = 0; i < 16; i++)
4524 		ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, 0), /*unbusy*/TRUE);
4525 
4526 	/* All of our queues are empty */
4527 	for (i = 0; i < 256; i++)
4528 		ahc->qoutfifo[i] = SCB_LIST_NULL;
4529 
4530 	for (i = 0; i < 256; i++)
4531 		ahc->qinfifo[i] = SCB_LIST_NULL;
4532 
4533 	if ((ahc->features & AHC_MULTI_TID) != 0) {
4534 		ahc_outb(ahc, TARGID, 0);
4535 		ahc_outb(ahc, TARGID + 1, 0);
4536 	}
4537 
4538 	/*
4539 	 * Allocate a tstate to house information for our
4540 	 * initiator presence on the bus as well as the user
4541 	 * data for any target mode initiator.
4542 	 */
4543 	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4544 		printf("%s: unable to allocate tmode_tstate.  "
4545 		       "Failing attach\n", ahc_name(ahc));
4546 		return (-1);
4547 	}
4548 
4549 	if ((ahc->features & AHC_TWIN) != 0) {
4550 		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4551 			printf("%s: unable to allocate tmode_tstate.  "
4552 			       "Failing attach\n", ahc_name(ahc));
4553 			return (-1);
4554 		}
4555  		printf("Twin Channel, A SCSI Id=%d, B SCSI Id=%d, primary %c, ",
4556 		       ahc->our_id, ahc->our_id_b,
4557 		       ahc->flags & AHC_CHANNEL_B_PRIMARY? 'B': 'A');
4558 	} else {
4559 		if ((ahc->features & AHC_WIDE) != 0) {
4560 			printf("Wide ");
4561 		} else {
4562 			printf("Single ");
4563 		}
4564 		printf("Channel %c, SCSI Id=%d, ", ahc->channel, ahc->our_id);
4565 	}
4566 
4567 	ahc_outb(ahc, SEQ_FLAGS, 0);
4568 
4569 	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) {
4570 		ahc->flags |= AHC_PAGESCBS;
4571 		printf("%d/%d SCBs\n", ahc->scb_data->maxhscbs, AHC_SCB_MAX);
4572 	} else {
4573 		ahc->flags &= ~AHC_PAGESCBS;
4574 		printf("%d SCBs\n", ahc->scb_data->maxhscbs);
4575 	}
4576 
4577 #ifdef AHC_DEBUG
4578 	if (ahc_debug & AHC_SHOWMISC) {
4579 		printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4580 		       "ahc_dma %d bytes\n",
4581 			ahc_name(ahc),
4582 		        sizeof(struct hardware_scb),
4583 			sizeof(struct scb),
4584 			sizeof(struct ahc_dma_seg));
4585 	}
4586 #endif /* AHC_DEBUG */
4587 
4588 	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4589 	if (ahc->features & AHC_TWIN) {
4590 
4591 		/*
4592 		 * The device is gated to channel B after a chip reset,
4593 		 * so set those values first
4594 		 */
4595 		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4596 		if ((ahc->features & AHC_ULTRA2) != 0)
4597 			ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id_b);
4598 		else
4599 			ahc_outb(ahc, SCSIID, ahc->our_id_b);
4600 		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4601 		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4602 					|term|ENSTIMER|ACTNEGEN);
4603 		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4604 		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4605 
4606 		if ((scsi_conf & RESET_SCSI) != 0
4607 		 && (ahc->flags & AHC_INITIATORMODE) != 0)
4608 			ahc->flags |= AHC_RESET_BUS_B;
4609 
4610 		/* Select Channel A */
4611 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4612 	}
4613 	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4614 	if ((ahc->features & AHC_ULTRA2) != 0)
4615 		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4616 	else
4617 		ahc_outb(ahc, SCSIID, ahc->our_id);
4618 	scsi_conf = ahc_inb(ahc, SCSICONF);
4619 	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4620 				|term
4621 				|ENSTIMER|ACTNEGEN);
4622 	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4623 	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4624 
4625 	if ((scsi_conf & RESET_SCSI) != 0
4626 	 && (ahc->flags & AHC_INITIATORMODE) != 0)
4627 		ahc->flags |= AHC_RESET_BUS_A;
4628 
4629 	/*
4630 	 * Look at the information that board initialization or
4631 	 * the board bios has left us.
4632 	 */
4633 	ultraenb = 0;
4634 	tagenable = ALL_TARGETS_MASK;
4635 
4636 	/* Grab the disconnection disable table and invert it for our needs */
4637 	if (ahc->flags & AHC_USEDEFAULTS) {
4638 		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
4639 			"device parameters\n", ahc_name(ahc));
4640 		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4641 			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4642 		discenable = ALL_TARGETS_MASK;
4643 		if ((ahc->features & AHC_ULTRA) != 0)
4644 			ultraenb = ALL_TARGETS_MASK;
4645 	} else {
4646 		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4647 			   | ahc_inb(ahc, DISC_DSB));
4648 		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4649 			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4650 				      | ahc_inb(ahc, ULTRA_ENB);
4651 	}
4652 
4653 	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4654 		max_targ = 7;
4655 
4656 	for (i = 0; i <= max_targ; i++) {
4657 		struct ahc_initiator_tinfo *tinfo;
4658 		struct tmode_tstate *tstate;
4659 		u_int our_id;
4660 		u_int target_id;
4661 		char channel;
4662 
4663 		channel = 'A';
4664 		our_id = ahc->our_id;
4665 		target_id = i;
4666 		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4667 			channel = 'B';
4668 			our_id = ahc->our_id_b;
4669 			target_id = i % 8;
4670 		}
4671 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4672 					    target_id, &tstate);
4673 		/* Default to async narrow across the board */
4674 		bzero(tinfo, sizeof(*tinfo));
4675 		if (ahc->flags & AHC_USEDEFAULTS) {
4676 			if ((ahc->features & AHC_WIDE) != 0)
4677 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4678 
4679 			/*
4680 			 * These will be truncated when we determine the
4681 			 * connection type we have with the target.
4682 			 */
4683 			tinfo->user.period = ahc_syncrates->period;
4684 			tinfo->user.offset = ~0;
4685 		} else {
4686 			u_int scsirate;
4687 			uint16_t mask;
4688 
4689 			/* Take the settings leftover in scratch RAM. */
4690 			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4691 			mask = (0x01 << i);
4692 			if ((ahc->features & AHC_ULTRA2) != 0) {
4693 				u_int offset;
4694 				u_int maxsync;
4695 
4696 				if ((scsirate & SOFS) == 0x0F) {
4697 					/*
4698 					 * Haven't negotiated yet,
4699 					 * so the format is different.
4700 					 */
4701 					scsirate = (scsirate & SXFR) >> 4
4702 						 | (ultraenb & mask)
4703 						  ? 0x08 : 0x0
4704 						 | (scsirate & WIDEXFER);
4705 					offset = MAX_OFFSET_ULTRA2;
4706 				} else
4707 					offset = ahc_inb(ahc, TARG_OFFSET + i);
4708 				maxsync = AHC_SYNCRATE_ULTRA2;
4709 				if ((ahc->features & AHC_DT) != 0)
4710 					maxsync = AHC_SYNCRATE_DT;
4711 				tinfo->user.period =
4712 				    ahc_find_period(ahc, scsirate, maxsync);
4713 				if (offset == 0)
4714 					tinfo->user.period = 0;
4715 				else
4716 					tinfo->user.offset = ~0;
4717 				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4718 				 && (ahc->features & AHC_DT) != 0)
4719 					tinfo->user.ppr_options =
4720 					    MSG_EXT_PPR_DT_REQ;
4721 			} else if ((scsirate & SOFS) != 0) {
4722 				tinfo->user.period =
4723 				    ahc_find_period(ahc, scsirate,
4724 						    (ultraenb & mask)
4725 						   ? AHC_SYNCRATE_ULTRA
4726 						   : AHC_SYNCRATE_FAST);
4727 				if (tinfo->user.period != 0)
4728 					tinfo->user.offset = ~0;
4729 			}
4730 			if ((scsirate & WIDEXFER) != 0
4731 			 && (ahc->features & AHC_WIDE) != 0)
4732 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4733 			tinfo->user.protocol_version = 4;
4734 			if ((ahc->features & AHC_DT) != 0)
4735 				tinfo->user.transport_version = 3;
4736 			else
4737 				tinfo->user.transport_version = 2;
4738 			tinfo->goal.protocol_version = 2;
4739 			tinfo->goal.transport_version = 2;
4740 			tinfo->current.protocol_version = 2;
4741 			tinfo->current.transport_version = 2;
4742 		}
4743 		tstate->ultraenb = ultraenb;
4744 		tstate->discenable = discenable;
4745 		tstate->tagenable = 0; /* Wait until the XPT says its okay */
4746 	}
4747 	ahc->user_discenable = discenable;
4748 	ahc->user_tagenable = tagenable;
4749 
4750 	/*
4751 	 * Tell the sequencer where it can find our arrays in memory.
4752 	 */
4753 	physaddr = ahc->scb_data->hscb_busaddr;
4754 	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4755 	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4756 	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4757 	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4758 
4759 	physaddr = ahc->shared_data_busaddr;
4760 	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4761 	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4762 	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4763 	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4764 
4765 	/*
4766 	 * Initialize the group code to command length table.
4767 	 * This overrides the values in TARG_SCSIRATE, so only
4768 	 * setup the table after we have processed that information.
4769 	 */
4770 	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4771 	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4772 	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4773 	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4774 	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4775 	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4776 	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4777 	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4778 
4779 	/* Tell the sequencer of our initial queue positions */
4780 	ahc_outb(ahc, KERNEL_QINPOS, 0);
4781 	ahc_outb(ahc, QINPOS, 0);
4782 	ahc_outb(ahc, QOUTPOS, 0);
4783 
4784 	/* Don't have any special messages to send to targets */
4785 	ahc_outb(ahc, TARGET_MSG_REQUEST, 0);
4786 	ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0);
4787 
4788 	/*
4789 	 * Use the built in queue management registers
4790 	 * if they are available.
4791 	 */
4792 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4793 		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4794 		ahc_outb(ahc, SDSCB_QOFF, 0);
4795 		ahc_outb(ahc, SNSCB_QOFF, 0);
4796 		ahc_outb(ahc, HNSCB_QOFF, 0);
4797 	}
4798 
4799 
4800 	/* We don't have any waiting selections */
4801 	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4802 
4803 	/* Our disconnection list is empty too */
4804 	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4805 
4806 	/* Message out buffer starts empty */
4807 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4808 
4809 	/*
4810 	 * Setup the allowed SCSI Sequences based on operational mode.
4811 	 * If we are a target, we'll enalbe select in operations once
4812 	 * we've had a lun enabled.
4813 	 */
4814 	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4815 	if ((ahc->flags & AHC_INITIATORMODE) != 0)
4816 		scsiseq_template |= ENRSELI;
4817 	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4818 
4819 	/*
4820 	 * Load the Sequencer program and Enable the adapter
4821 	 * in "fast" mode.
4822          */
4823 	if (bootverbose)
4824 		printf("%s: Downloading Sequencer Program...",
4825 		       ahc_name(ahc));
4826 
4827 	ahc_loadseq(ahc);
4828 
4829 	/* We have to wait until after any system dumps... */
4830 	EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
4831 			      ahc, SHUTDOWN_PRI_DEFAULT);
4832 
4833 	if ((ahc->features & AHC_ULTRA2) != 0) {
4834 		int wait;
4835 
4836 		/*
4837 		 * Wait for up to 500ms for our transceivers
4838 		 * to settle.  If the adapter does not have
4839 		 * a cable attached, the tranceivers may
4840 		 * never settle, so don't complain if we
4841 		 * fail here.
4842 		 */
4843 		pause_sequencer(ahc);
4844 		for (wait = 5000;
4845 		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4846 		     wait--)
4847 			DELAY(100);
4848 		unpause_sequencer(ahc);
4849 	}
4850 
4851 	return (0);
4852 }
4853 
4854 static cam_status
4855 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
4856 		    struct tmode_tstate **tstate, struct tmode_lstate **lstate,
4857 		    int notfound_failure)
4858 {
4859 	u_int our_id;
4860 
4861 	/*
4862 	 * If we are not configured for target mode, someone
4863 	 * is really confused to be sending this to us.
4864 	 */
4865 	if ((ahc->flags & AHC_TARGETMODE) == 0)
4866 		return (CAM_REQ_INVALID);
4867 
4868 	/* Range check target and lun */
4869 
4870 	/*
4871 	 * Handle the 'black hole' device that sucks up
4872 	 * requests to unattached luns on enabled targets.
4873 	 */
4874 	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
4875 	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4876 		*tstate = NULL;
4877 		*lstate = ahc->black_hole;
4878 	} else {
4879 		u_int max_id;
4880 
4881 		if (cam_sim_bus(sim) == 0)
4882 			our_id = ahc->our_id;
4883 		else
4884 			our_id = ahc->our_id_b;
4885 
4886 		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
4887 		if (ccb->ccb_h.target_id > max_id)
4888 			return (CAM_TID_INVALID);
4889 
4890 		if (ccb->ccb_h.target_lun > 7)
4891 			return (CAM_LUN_INVALID);
4892 
4893 		if (ccb->ccb_h.target_id != our_id) {
4894 			if ((ahc->features & AHC_MULTI_TID) != 0) {
4895 				/*
4896 				 * Only allow additional targets if
4897 				 * the initiator role is disabled.
4898 				 * The hardware cannot handle a re-select-in
4899 				 * on the initiator id during a re-select-out
4900 				 * on a different target id.
4901 				 */
4902 			   	if ((ahc->flags & AHC_INITIATORMODE) != 0)
4903 					return (CAM_TID_INVALID);
4904 			} else {
4905 				/*
4906 				 * Only allow our target id to change
4907 				 * if the initiator role is not configured
4908 				 * and there are no enabled luns which
4909 				 * are attached to the currently registered
4910 				 * scsi id.
4911 				 */
4912 			   	if ((ahc->flags & AHC_INITIATORMODE) != 0
4913 				 || ahc->enabled_luns > 0)
4914 					return (CAM_TID_INVALID);
4915 			}
4916 		}
4917 
4918 		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
4919 		*lstate = NULL;
4920 		if (*tstate != NULL)
4921 			*lstate =
4922 			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
4923 	}
4924 
4925 	if (notfound_failure != 0 && *lstate == NULL)
4926 		return (CAM_PATH_INVALID);
4927 
4928 	return (CAM_REQ_CMP);
4929 }
4930 
4931 static void
4932 ahc_action(struct cam_sim *sim, union ccb *ccb)
4933 {
4934 	struct	ahc_softc *ahc;
4935 	struct	tmode_lstate *lstate;
4936 	u_int	target_id;
4937 	u_int	our_id;
4938 	int	s;
4939 
4940 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
4941 
4942 	ahc = (struct ahc_softc *)cam_sim_softc(sim);
4943 
4944 	target_id = ccb->ccb_h.target_id;
4945 	our_id = SIM_SCSI_ID(ahc, sim);
4946 
4947 	switch (ccb->ccb_h.func_code) {
4948 	/* Common cases first */
4949 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
4950 	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
4951 	{
4952 		struct	   tmode_tstate *tstate;
4953 		cam_status status;
4954 
4955 		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
4956 					     &lstate, TRUE);
4957 
4958 		if (status != CAM_REQ_CMP) {
4959 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
4960 				/* Response from the black hole device */
4961 				tstate = NULL;
4962 				lstate = ahc->black_hole;
4963 			} else {
4964 				ccb->ccb_h.status = status;
4965 				xpt_done(ccb);
4966 				break;
4967 			}
4968 		}
4969 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4970 			int s;
4971 
4972 			s = splcam();
4973 			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
4974 					  sim_links.sle);
4975 			ccb->ccb_h.status = CAM_REQ_INPROG;
4976 			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
4977 				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
4978 			splx(s);
4979 			break;
4980 		}
4981 
4982 		/*
4983 		 * The target_id represents the target we attempt to
4984 		 * select.  In target mode, this is the initiator of
4985 		 * the original command.
4986 		 */
4987 		our_id = target_id;
4988 		target_id = ccb->csio.init_id;
4989 		/* FALLTHROUGH */
4990 	}
4991 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
4992 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
4993 	{
4994 		struct	   scb *scb;
4995 		struct	   hardware_scb *hscb;
4996 		struct	   ahc_initiator_tinfo *tinfo;
4997 		struct	   tmode_tstate *tstate;
4998 		uint16_t  mask;
4999 
5000 		/*
5001 		 * get an scb to use.
5002 		 */
5003 		if ((scb = ahcgetscb(ahc)) == NULL) {
5004 			int s;
5005 
5006 			s = splcam();
5007 			ahc->flags |= AHC_RESOURCE_SHORTAGE;
5008 			splx(s);
5009 			xpt_freeze_simq(ahc->sim, /*count*/1);
5010 			ahcsetccbstatus(ccb, CAM_REQUEUE_REQ);
5011 			xpt_done(ccb);
5012 			return;
5013 		}
5014 
5015 		hscb = scb->hscb;
5016 
5017 		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
5018 			  ("start scb(%p)\n", scb));
5019 		scb->ccb = ccb;
5020 		/*
5021 		 * So we can find the SCB when an abort is requested
5022 		 */
5023 		ccb->ccb_h.ccb_scb_ptr = scb;
5024 		ccb->ccb_h.ccb_ahc_ptr = ahc;
5025 
5026 		/*
5027 		 * Put all the arguments for the xfer in the scb
5028 		 */
5029 		hscb->control = 0;
5030 		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
5031 		hscb->lun = ccb->ccb_h.target_lun;
5032 		mask = SCB_GET_TARGET_MASK(ahc, scb);
5033 		tinfo = ahc_fetch_transinfo(ahc, SIM_CHANNEL(ahc, sim), our_id,
5034 					    target_id, &tstate);
5035 
5036 		hscb->scsirate = tinfo->scsirate;
5037 		hscb->scsioffset = tinfo->current.offset;
5038 		if ((tstate->ultraenb & mask) != 0)
5039 			hscb->control |= ULTRAENB;
5040 
5041 		if ((tstate->discenable & mask) != 0
5042 		 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
5043 			hscb->control |= DISCENB;
5044 
5045 		if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
5046 		 && (tinfo->current.width != 0 || tinfo->current.period != 0)) {
5047 			scb->flags |= SCB_NEGOTIATE;
5048 			hscb->control |= MK_MESSAGE;
5049 		}
5050 
5051 		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
5052 			hscb->cdb_len = 0;
5053 			scb->flags |= SCB_DEVICE_RESET;
5054 			hscb->control |= MK_MESSAGE;
5055 			ahc_execute_scb(scb, NULL, 0, 0);
5056 		} else {
5057 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
5058 				struct target_data *tdata;
5059 
5060 				tdata = &hscb->shared_data.tdata;
5061 				if (ahc->pending_device == lstate) {
5062 					scb->flags |= SCB_TARGET_IMMEDIATE;
5063 					ahc->pending_device = NULL;
5064 				}
5065 				hscb->control |= TARGET_SCB;
5066 				tdata->target_phases = IDENTIFY_SEEN;
5067 				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
5068 					tdata->target_phases |= SPHASE_PENDING;
5069 					tdata->scsi_status =
5070 					    ccb->csio.scsi_status;
5071 				}
5072 				tdata->initiator_tag = ccb->csio.tag_id;
5073 			}
5074 			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
5075 				hscb->control |= ccb->csio.tag_action;
5076 
5077 			ahc_setup_data(ahc, &ccb->csio, scb);
5078 		}
5079 		break;
5080 	}
5081 	case XPT_NOTIFY_ACK:
5082 	case XPT_IMMED_NOTIFY:
5083 	{
5084 		struct	   tmode_tstate *tstate;
5085 		struct	   tmode_lstate *lstate;
5086 		cam_status status;
5087 
5088 		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
5089 					     &lstate, TRUE);
5090 
5091 		if (status != CAM_REQ_CMP) {
5092 			ccb->ccb_h.status = status;
5093 			xpt_done(ccb);
5094 			break;
5095 		}
5096 		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
5097 				  sim_links.sle);
5098 		ccb->ccb_h.status = CAM_REQ_INPROG;
5099 		ahc_send_lstate_events(ahc, lstate);
5100 		break;
5101 	}
5102 	case XPT_EN_LUN:		/* Enable LUN as a target */
5103 		ahc_handle_en_lun(ahc, sim, ccb);
5104 		xpt_done(ccb);
5105 		break;
5106 	case XPT_ABORT:			/* Abort the specified CCB */
5107 	{
5108 		ahc_abort_ccb(ahc, sim, ccb);
5109 		break;
5110 	}
5111 	case XPT_SET_TRAN_SETTINGS:
5112 	{
5113 		struct	  ahc_devinfo devinfo;
5114 		struct	  ccb_trans_settings *cts;
5115 		struct	  ahc_initiator_tinfo *tinfo;
5116 		struct	  tmode_tstate *tstate;
5117 		uint16_t *discenable;
5118 		uint16_t *tagenable;
5119 		u_int	  update_type;
5120 		int	  s;
5121 
5122 		cts = &ccb->cts;
5123 		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
5124 				    cts->ccb_h.target_id,
5125 				    cts->ccb_h.target_lun,
5126 				    SIM_CHANNEL(ahc, sim),
5127 				    ROLE_UNKNOWN);
5128 		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
5129 					    devinfo.our_scsiid,
5130 					    devinfo.target, &tstate);
5131 		update_type = 0;
5132 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
5133 			update_type |= AHC_TRANS_GOAL;
5134 			discenable = &tstate->discenable;
5135 			tagenable = &tstate->tagenable;
5136 		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
5137 			update_type |= AHC_TRANS_USER;
5138 			discenable = &ahc->user_discenable;
5139 			tagenable = &ahc->user_tagenable;
5140 		} else {
5141 			ccb->ccb_h.status = CAM_REQ_INVALID;
5142 			xpt_done(ccb);
5143 			break;
5144 		}
5145 
5146 		s = splcam();
5147 
5148 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
5149 			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
5150 				*discenable |= devinfo.target_mask;
5151 			else
5152 				*discenable &= ~devinfo.target_mask;
5153 		}
5154 
5155 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
5156 			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
5157 				*tagenable |= devinfo.target_mask;
5158 			else
5159 				*tagenable &= ~devinfo.target_mask;
5160 		}
5161 
5162 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
5163 			ahc_validate_width(ahc, &cts->bus_width);
5164 			ahc_set_width(ahc, &devinfo, cts->ccb_h.path,
5165 				      cts->bus_width, update_type,
5166 				      /*paused*/FALSE);
5167 		}
5168 
5169 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
5170 			if (update_type == AHC_TRANS_USER)
5171 				cts->sync_offset = tinfo->user.offset;
5172 			else
5173 				cts->sync_offset = tinfo->goal.offset;
5174 		}
5175 
5176 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
5177 			if (update_type == AHC_TRANS_USER)
5178 				cts->sync_period = tinfo->user.period;
5179 			else
5180 				cts->sync_period = tinfo->goal.period;
5181 		}
5182 
5183 		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
5184 		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
5185 			struct ahc_syncrate *syncrate;
5186 			u_int ppr_options;
5187 			u_int maxsync;
5188 
5189 			if ((ahc->features & AHC_ULTRA2) != 0)
5190 				maxsync = AHC_SYNCRATE_DT;
5191 			else if ((ahc->features & AHC_ULTRA) != 0)
5192 				maxsync = AHC_SYNCRATE_ULTRA;
5193 			else
5194 				maxsync = AHC_SYNCRATE_FAST;
5195 
5196 			ppr_options = 0;
5197 			if (cts->sync_period <= 9)
5198 				ppr_options = MSG_EXT_PPR_DT_REQ;
5199 
5200 			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
5201 						     &ppr_options,
5202 						     maxsync);
5203 			ahc_validate_offset(ahc, syncrate, &cts->sync_offset,
5204 					    MSG_EXT_WDTR_BUS_8_BIT);
5205 
5206 			/* We use a period of 0 to represent async */
5207 			if (cts->sync_offset == 0) {
5208 				cts->sync_period = 0;
5209 				ppr_options = 0;
5210 			}
5211 
5212 			if (ppr_options == MSG_EXT_PPR_DT_REQ
5213 			 && tinfo->user.transport_version >= 3) {
5214 				tinfo->goal.transport_version =
5215 				    tinfo->user.transport_version;
5216 				tinfo->current.transport_version =
5217 				    tinfo->user.transport_version;
5218 			}
5219 
5220 			ahc_set_syncrate(ahc, &devinfo, cts->ccb_h.path,
5221 					 syncrate, cts->sync_period,
5222 					 cts->sync_offset, ppr_options,
5223 					 update_type, /*paused*/FALSE);
5224 		}
5225 
5226 		splx(s);
5227 		ccb->ccb_h.status = CAM_REQ_CMP;
5228 		xpt_done(ccb);
5229 		break;
5230 	}
5231 	case XPT_GET_TRAN_SETTINGS:
5232 	/* Get default/user set transfer settings for the target */
5233 	{
5234 		struct	ahc_devinfo devinfo;
5235 		struct	ccb_trans_settings *cts;
5236 		struct	ahc_initiator_tinfo *targ_info;
5237 		struct	tmode_tstate *tstate;
5238 		struct	ahc_transinfo *tinfo;
5239 		int	s;
5240 
5241 		cts = &ccb->cts;
5242 		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
5243 				    cts->ccb_h.target_id,
5244 				    cts->ccb_h.target_lun,
5245 				    SIM_CHANNEL(ahc, sim),
5246 				    ROLE_UNKNOWN);
5247 		targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
5248 						devinfo.our_scsiid,
5249 						devinfo.target, &tstate);
5250 
5251 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
5252 			tinfo = &targ_info->current;
5253 		else
5254 			tinfo = &targ_info->user;
5255 
5256 		s = splcam();
5257 
5258 		cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
5259 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
5260 			if ((ahc->user_discenable & devinfo.target_mask) != 0)
5261 				cts->flags |= CCB_TRANS_DISC_ENB;
5262 
5263 			if ((ahc->user_tagenable & devinfo.target_mask) != 0)
5264 				cts->flags |= CCB_TRANS_TAG_ENB;
5265 		} else {
5266 			if ((tstate->discenable & devinfo.target_mask) != 0)
5267 				cts->flags |= CCB_TRANS_DISC_ENB;
5268 
5269 			if ((tstate->tagenable & devinfo.target_mask) != 0)
5270 				cts->flags |= CCB_TRANS_TAG_ENB;
5271 		}
5272 		cts->sync_period = tinfo->period;
5273 		cts->sync_offset = tinfo->offset;
5274 		cts->bus_width = tinfo->width;
5275 
5276 		splx(s);
5277 
5278 		cts->valid = CCB_TRANS_SYNC_RATE_VALID
5279 			   | CCB_TRANS_SYNC_OFFSET_VALID
5280 			   | CCB_TRANS_BUS_WIDTH_VALID
5281 			   | CCB_TRANS_DISC_VALID
5282 			   | CCB_TRANS_TQ_VALID;
5283 
5284 		ccb->ccb_h.status = CAM_REQ_CMP;
5285 		xpt_done(ccb);
5286 		break;
5287 	}
5288 	case XPT_CALC_GEOMETRY:
5289 	{
5290 		struct	  ccb_calc_geometry *ccg;
5291 		uint32_t size_mb;
5292 		uint32_t secs_per_cylinder;
5293 		int	  extended;
5294 
5295 		ccg = &ccb->ccg;
5296 		size_mb = ccg->volume_size
5297 			/ ((1024L * 1024L) / ccg->block_size);
5298 		extended = SIM_IS_SCSIBUS_B(ahc, sim)
5299 			? ahc->flags & AHC_EXTENDED_TRANS_B
5300 			: ahc->flags & AHC_EXTENDED_TRANS_A;
5301 
5302 		if (size_mb > 1024 && extended) {
5303 			ccg->heads = 255;
5304 			ccg->secs_per_track = 63;
5305 		} else {
5306 			ccg->heads = 64;
5307 			ccg->secs_per_track = 32;
5308 		}
5309 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
5310 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
5311 		ccb->ccb_h.status = CAM_REQ_CMP;
5312 		xpt_done(ccb);
5313 		break;
5314 	}
5315 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
5316 	{
5317 		int  found;
5318 
5319 		s = splcam();
5320 		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
5321 					  /*initiate reset*/TRUE);
5322 		splx(s);
5323 		if (1 || bootverbose) {
5324 			xpt_print_path(SIM_PATH(ahc, sim));
5325 			printf("SCSI bus reset delivered. "
5326 			       "%d SCBs aborted.\n", found);
5327 		}
5328 		ccb->ccb_h.status = CAM_REQ_CMP;
5329 		xpt_done(ccb);
5330 		break;
5331 	}
5332 	case XPT_TERM_IO:		/* Terminate the I/O process */
5333 		/* XXX Implement */
5334 		ccb->ccb_h.status = CAM_REQ_INVALID;
5335 		xpt_done(ccb);
5336 		break;
5337 	case XPT_PATH_INQ:		/* Path routing inquiry */
5338 	{
5339 		struct ccb_pathinq *cpi = &ccb->cpi;
5340 
5341 		cpi->version_num = 1; /* XXX??? */
5342 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
5343 		if ((ahc->features & AHC_WIDE) != 0)
5344 			cpi->hba_inquiry |= PI_WIDE_16;
5345 		if ((ahc->flags & AHC_TARGETMODE) != 0) {
5346 			cpi->target_sprt = PIT_PROCESSOR
5347 					 | PIT_DISCONNECT
5348 					 | PIT_TERM_IO;
5349 		} else {
5350 			cpi->target_sprt = 0;
5351 		}
5352 		cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE)
5353 			      ? 0 : PIM_NOINITIATOR;
5354 		cpi->hba_eng_cnt = 0;
5355 		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
5356 		cpi->max_lun = 64;
5357 		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
5358 			cpi->initiator_id = ahc->our_id_b;
5359 			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
5360 				cpi->hba_misc |= PIM_NOBUSRESET;
5361 		} else {
5362 			cpi->initiator_id = ahc->our_id;
5363 			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
5364 				cpi->hba_misc |= PIM_NOBUSRESET;
5365 		}
5366 		cpi->bus_id = cam_sim_bus(sim);
5367 		cpi->base_transfer_speed = 3300;
5368 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5369 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
5370 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
5371 		cpi->unit_number = cam_sim_unit(sim);
5372 		cpi->ccb_h.status = CAM_REQ_CMP;
5373 		xpt_done(ccb);
5374 		break;
5375 	}
5376 	default:
5377 		ccb->ccb_h.status = CAM_REQ_INVALID;
5378 		xpt_done(ccb);
5379 		break;
5380 	}
5381 }
5382 
5383 static void
5384 ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
5385 {
5386 	struct ahc_softc *ahc;
5387 	struct cam_sim *sim;
5388 
5389 	sim = (struct cam_sim *)callback_arg;
5390 	ahc = (struct ahc_softc *)cam_sim_softc(sim);
5391 	switch (code) {
5392 	case AC_LOST_DEVICE:
5393 	{
5394 		struct	ahc_devinfo devinfo;
5395 		int	s;
5396 
5397 		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
5398 				    xpt_path_target_id(path),
5399 				    xpt_path_lun_id(path),
5400 				    SIM_CHANNEL(ahc, sim),
5401 				    ROLE_UNKNOWN);
5402 
5403 		/*
5404 		 * Revert to async/narrow transfers
5405 		 * for the next device.
5406 		 */
5407 		s = splcam();
5408 		ahc_set_width(ahc, &devinfo, path, MSG_EXT_WDTR_BUS_8_BIT,
5409 			      AHC_TRANS_GOAL|AHC_TRANS_CUR,
5410 			      /*paused*/FALSE);
5411 		ahc_set_syncrate(ahc, &devinfo, path, /*syncrate*/NULL,
5412 				 /*period*/0, /*offset*/0, /*ppr_options*/0,
5413 				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
5414 				 /*paused*/FALSE);
5415 		splx(s);
5416 		break;
5417 	}
5418 	default:
5419 		break;
5420 	}
5421 }
5422 
5423 static void
5424 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
5425 		int error)
5426 {
5427 	struct	 scb *scb;
5428 	union	 ccb *ccb;
5429 	struct	 ahc_softc *ahc;
5430 	int	 s;
5431 
5432 	scb = (struct scb *)arg;
5433 	ccb = scb->ccb;
5434 	ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr;
5435 
5436 	if (error != 0) {
5437 		if (error == EFBIG)
5438 			ahcsetccbstatus(scb->ccb, CAM_REQ_TOO_BIG);
5439 		else
5440 			ahcsetccbstatus(scb->ccb, CAM_REQ_CMP_ERR);
5441 		if (nsegments != 0)
5442 			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
5443 		ahcfreescb(ahc, scb);
5444 		xpt_done(ccb);
5445 		return;
5446 	}
5447 	if (nsegments != 0) {
5448 		struct	  ahc_dma_seg *sg;
5449 		bus_dma_segment_t *end_seg;
5450 		bus_dmasync_op_t op;
5451 
5452 		end_seg = dm_segs + nsegments;
5453 
5454 		/* Copy the segments into our SG list */
5455 		sg = scb->sg_list;
5456 		while (dm_segs < end_seg) {
5457 			sg->addr = dm_segs->ds_addr;
5458 /* XXX Add in the 5th byte of the address later. */
5459 			sg->len = dm_segs->ds_len;
5460 			sg++;
5461 			dm_segs++;
5462 		}
5463 
5464 		/*
5465 		 * Note where to find the SG entries in bus space.
5466 		 * We also set the full residual flag which the
5467 		 * sequencer will clear as soon as a data transfer
5468 		 * occurs.
5469 		 */
5470 		scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
5471 
5472 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
5473 			op = BUS_DMASYNC_PREREAD;
5474 		else
5475 			op = BUS_DMASYNC_PREWRITE;
5476 
5477 		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
5478 
5479 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
5480 			struct target_data *tdata;
5481 
5482 			tdata = &scb->hscb->shared_data.tdata;
5483 			tdata->target_phases |= DPHASE_PENDING;
5484 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
5485 				tdata->data_phase = P_DATAOUT;
5486 			else
5487 				tdata->data_phase = P_DATAIN;
5488 
5489 			/*
5490 			 * If the transfer is of an odd length and in the
5491 			 * "in" direction (scsi->HostBus), then it may
5492 			 * trigger a bug in the 'WideODD' feature of
5493 			 * non-Ultra2 chips.  Force the total data-length
5494 			 * to be even by adding an extra, 1 byte, SG,
5495 			 * element.  We do this even if we are not currently
5496 			 * negotiated wide as negotiation could occur before
5497 			 * this command is executed.
5498 			 */
5499 			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
5500 			 && (ccb->csio.dxfer_len & 0x1) != 0
5501 			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
5502 
5503 				nsegments++;
5504 				if (nsegments > AHC_NSEG) {
5505 
5506 					ahcsetccbstatus(scb->ccb,
5507 							CAM_REQ_TOO_BIG);
5508 					bus_dmamap_unload(ahc->buffer_dmat,
5509 							  scb->dmamap);
5510 					ahcfreescb(ahc, scb);
5511 					xpt_done(ccb);
5512 					return;
5513 				}
5514 				sg->addr = ahc->dma_bug_buf;
5515 				sg->len = 1;
5516 				sg++;
5517 			}
5518 		}
5519 		sg--;
5520 		sg->len |= AHC_DMA_LAST_SEG;
5521 
5522 		/* Copy the first SG into the "current" data pointer area */
5523 		scb->hscb->dataptr = scb->sg_list->addr;
5524 		scb->hscb->datacnt = scb->sg_list->len;
5525 	} else {
5526 		scb->hscb->sgptr = SG_LIST_NULL;
5527 		scb->hscb->dataptr = 0;
5528 		scb->hscb->datacnt = 0;
5529 	}
5530 
5531 	scb->sg_count = nsegments;
5532 
5533 	s = splcam();
5534 
5535 	/*
5536 	 * Last time we need to check if this SCB needs to
5537 	 * be aborted.
5538 	 */
5539 	if (ahc_ccb_status(ccb) != CAM_REQ_INPROG) {
5540 		if (nsegments != 0)
5541 			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
5542 		ahcfreescb(ahc, scb);
5543 		xpt_done(ccb);
5544 		splx(s);
5545 		return;
5546 	}
5547 
5548 	LIST_INSERT_HEAD(&ahc->pending_ccbs, &ccb->ccb_h,
5549 			 sim_links.le);
5550 
5551 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
5552 
5553 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
5554 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
5555 			ccb->ccb_h.timeout = 5 * 1000;
5556 		ccb->ccb_h.timeout_ch =
5557 		    timeout(ahc_timeout, (caddr_t)scb,
5558 			    (ccb->ccb_h.timeout * hz) / 1000);
5559 	}
5560 
5561 	/*
5562 	 * We only allow one untagged transaction
5563 	 * per target in the initiator role unless
5564 	 * we are storing a full busy target *lun*
5565 	 * table in SCB space.
5566 	 */
5567 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
5568 	 && (ahc->features & AHC_SCB_BTT) == 0) {
5569 		struct scb_tailq *untagged_q;
5570 
5571 		untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
5572 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
5573 		if (TAILQ_FIRST(untagged_q) != scb) {
5574 			splx(s);
5575 			return;
5576 		}
5577 	}
5578 	scb->flags |= SCB_ACTIVE;
5579 
5580 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
5581 #if 0
5582 		printf("Continueing Immediate Command %d:%d\n",
5583 		       ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
5584 #endif
5585 		pause_sequencer(ahc);
5586 		if ((ahc->flags & AHC_PAGESCBS) == 0)
5587 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
5588 		ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
5589 		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
5590 		unpause_sequencer(ahc);
5591 	} else {
5592 		ahc_queue_scb(ahc, scb);
5593 	}
5594 
5595 	splx(s);
5596 }
5597 
5598 static void
5599 ahc_poll(struct cam_sim *sim)
5600 {
5601 	ahc_intr(cam_sim_softc(sim));
5602 }
5603 
5604 static void
5605 ahc_setup_data(struct ahc_softc *ahc, struct ccb_scsiio *csio,
5606 	       struct scb *scb)
5607 {
5608 	struct hardware_scb *hscb;
5609 	struct ccb_hdr *ccb_h;
5610 
5611 	hscb = scb->hscb;
5612 	ccb_h = &csio->ccb_h;
5613 
5614 	if (ccb_h->func_code == XPT_SCSI_IO) {
5615 		hscb->cdb_len = csio->cdb_len;
5616 		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
5617 
5618 			if (hscb->cdb_len > sizeof(hscb->cdb32)
5619 			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
5620 				ahcsetccbstatus(scb->ccb, CAM_REQ_INVALID);
5621 				xpt_done(scb->ccb);
5622 				ahcfreescb(ahc, scb);
5623 				return;
5624 			}
5625 			if (hscb->cdb_len > 12) {
5626 				memcpy(hscb->cdb32,
5627 				       csio->cdb_io.cdb_ptr,
5628 				       hscb->cdb_len);
5629 				if ((ahc->flags & AHC_CMD_CHAN) == 0) {
5630 					hscb->shared_data.cdb_ptr =
5631 					    scb->cdb32_busaddr;
5632 				}
5633 			} else {
5634 				memcpy(hscb->shared_data.cdb,
5635 				       csio->cdb_io.cdb_ptr,
5636 				       hscb->cdb_len);
5637 			}
5638 		} else {
5639 			if (hscb->cdb_len > 12) {
5640 				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
5641 				       hscb->cdb_len);
5642 			 	if  ((ahc->flags & AHC_CMD_CHAN) == 0) {
5643 					hscb->shared_data.cdb_ptr =
5644 					    scb->cdb32_busaddr;
5645 				}
5646 			} else {
5647 				memcpy(hscb->shared_data.cdb,
5648 				       csio->cdb_io.cdb_bytes,
5649 				       hscb->cdb_len);
5650 			}
5651 		}
5652 	}
5653 
5654 	/* Only use S/G if there is a transfer */
5655 	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
5656 		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
5657 			/* We've been given a pointer to a single buffer */
5658 			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
5659 				int s;
5660 				int error;
5661 
5662 				s = splsoftvm();
5663 				error = bus_dmamap_load(ahc->buffer_dmat,
5664 							scb->dmamap,
5665 							csio->data_ptr,
5666 							csio->dxfer_len,
5667 							ahc_execute_scb,
5668 							scb, /*flags*/0);
5669 				if (error == EINPROGRESS) {
5670 					/*
5671 					 * So as to maintain ordering,
5672 					 * freeze the controller queue
5673 					 * until our mapping is
5674 					 * returned.
5675 					 */
5676 					xpt_freeze_simq(ahc->sim,
5677 							/*count*/1);
5678 					scb->ccb->ccb_h.status |=
5679 					    CAM_RELEASE_SIMQ;
5680 				}
5681 				splx(s);
5682 			} else {
5683 				struct bus_dma_segment seg;
5684 
5685 				/* Pointer to physical buffer */
5686 				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
5687 					panic("ahc_setup_data - Transfer size "
5688 					      "larger than can device max");
5689 
5690 				seg.ds_addr = (bus_addr_t)csio->data_ptr;
5691 				seg.ds_len = csio->dxfer_len;
5692 				ahc_execute_scb(scb, &seg, 1, 0);
5693 			}
5694 		} else {
5695 			struct bus_dma_segment *segs;
5696 
5697 			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
5698 				panic("ahc_setup_data - Physical segment "
5699 				      "pointers unsupported");
5700 
5701 			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
5702 				panic("ahc_setup_data - Virtual segment "
5703 				      "addresses unsupported");
5704 
5705 			/* Just use the segments provided */
5706 			segs = (struct bus_dma_segment *)csio->data_ptr;
5707 			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
5708 		}
5709 	} else {
5710 		ahc_execute_scb(scb, NULL, 0, 0);
5711 	}
5712 }
5713 
5714 static void
5715 ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path)
5716 {
5717 	int	target;
5718 	char	channel;
5719 	int	lun;
5720 
5721 	target = xpt_path_target_id(path);
5722 	lun = xpt_path_lun_id(path);
5723 	channel = xpt_path_sim(path)->bus_id == 0 ? 'A' : 'B';
5724 
5725 	ahc_search_qinfifo(ahc, target, channel, lun,
5726 			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
5727 			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5728 }
5729 
5730 static void
5731 ahcallocscbs(struct ahc_softc *ahc)
5732 {
5733 	struct scb_data *scb_data;
5734 	struct scb *next_scb;
5735 	struct sg_map_node *sg_map;
5736 	bus_addr_t physaddr;
5737 	struct ahc_dma_seg *segs;
5738 	int newcount;
5739 	int i;
5740 
5741 	scb_data = ahc->scb_data;
5742 	if (scb_data->numscbs >= AHC_SCB_MAX)
5743 		/* Can't allocate any more */
5744 		return;
5745 
5746 	next_scb = &scb_data->scbarray[scb_data->numscbs];
5747 
5748 	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
5749 
5750 	if (sg_map == NULL)
5751 		return;
5752 
5753 	/* Allocate S/G space for the next batch of SCBS */
5754 	if (bus_dmamem_alloc(scb_data->sg_dmat, (void **)&sg_map->sg_vaddr,
5755 			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
5756 		free(sg_map, M_DEVBUF);
5757 		return;
5758 	}
5759 
5760 	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
5761 
5762 	bus_dmamap_load(scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
5763 			PAGE_SIZE, ahcdmamapcb, &sg_map->sg_physaddr,
5764 			/*flags*/0);
5765 
5766 	segs = sg_map->sg_vaddr;
5767 	physaddr = sg_map->sg_physaddr;
5768 
5769 	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
5770 	for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) {
5771 		int error;
5772 
5773 		next_scb->sg_list = segs;
5774 		/*
5775 		 * The sequencer always starts with the second entry.
5776 		 * The first entry is embedded in the scb.
5777 		 */
5778 		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
5779 		next_scb->flags = SCB_FREE;
5780 		error = bus_dmamap_create(ahc->buffer_dmat, /*flags*/0,
5781 					  &next_scb->dmamap);
5782 		if (error != 0)
5783 			break;
5784 		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
5785 		next_scb->hscb->tag = ahc->scb_data->numscbs;
5786 		next_scb->cdb32_busaddr =
5787 		    ahc_hscb_busaddr(ahc, next_scb->hscb->tag)
5788 		  + offsetof(struct hardware_scb, cdb32);
5789 		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
5790 				  next_scb, links.sle);
5791 		segs += AHC_NSEG;
5792 		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
5793 		next_scb++;
5794 		ahc->scb_data->numscbs++;
5795 	}
5796 }
5797 
5798 #ifdef AHC_DUMP_SEQ
5799 static void
5800 ahc_dumpseq(struct ahc_softc* ahc)
5801 {
5802 	int i;
5803 	int max_prog;
5804 
5805 	if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
5806 		max_prog = 448;
5807 	else if ((ahc->features & AHC_ULTRA2) != 0)
5808 		max_prog = 768;
5809 	else
5810 		max_prog = 512;
5811 
5812 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5813 	ahc_outb(ahc, SEQADDR0, 0);
5814 	ahc_outb(ahc, SEQADDR1, 0);
5815 	for (i = 0; i < max_prog; i++) {
5816 		uint8_t ins_bytes[4];
5817 
5818 		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
5819 		printf("0x%08x\n", ins_bytes[0] << 24
5820 				 | ins_bytes[1] << 16
5821 				 | ins_bytes[2] << 8
5822 				 | ins_bytes[3]);
5823 	}
5824 }
5825 #endif
5826 
5827 static void
5828 ahc_loadseq(struct ahc_softc *ahc)
5829 {
5830 	struct patch *cur_patch;
5831 	u_int i;
5832 	int downloaded;
5833 	u_int skip_addr;
5834 	uint8_t download_consts[2];
5835 
5836 	/* Setup downloadable constant table */
5837 	download_consts[QOUTFIFO_OFFSET] = 0;
5838 	if (ahc->targetcmds != NULL)
5839 		download_consts[QOUTFIFO_OFFSET] += 32;
5840 	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
5841 
5842 	cur_patch = patches;
5843 	downloaded = 0;
5844 	skip_addr = 0;
5845 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5846 	ahc_outb(ahc, SEQADDR0, 0);
5847 	ahc_outb(ahc, SEQADDR1, 0);
5848 
5849 	for (i = 0; i < sizeof(seqprog)/4; i++) {
5850 		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
5851 			/*
5852 			 * Don't download this instruction as it
5853 			 * is in a patch that was removed.
5854 			 */
5855                         continue;
5856 		}
5857 		ahc_download_instr(ahc, i, download_consts);
5858 		downloaded++;
5859 	}
5860 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
5861 	restart_sequencer(ahc);
5862 
5863 	if (bootverbose)
5864 		printf(" %d instructions downloaded\n", downloaded);
5865 }
5866 
5867 static int
5868 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
5869 		u_int start_instr, u_int *skip_addr)
5870 {
5871 	struct	patch *cur_patch;
5872 	struct	patch *last_patch;
5873 	u_int	num_patches;
5874 
5875 	num_patches = sizeof(patches)/sizeof(struct patch);
5876 	last_patch = &patches[num_patches];
5877 	cur_patch = *start_patch;
5878 
5879 	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
5880 
5881 		if (cur_patch->patch_func(ahc) == 0) {
5882 
5883 			/* Start rejecting code */
5884 			*skip_addr = start_instr + cur_patch->skip_instr;
5885 			cur_patch += cur_patch->skip_patch;
5886 		} else {
5887 			/* Accepted this patch.  Advance to the next
5888 			 * one and wait for our intruction pointer to
5889 			 * hit this point.
5890 			 */
5891 			cur_patch++;
5892 		}
5893 	}
5894 
5895 	*start_patch = cur_patch;
5896 	if (start_instr < *skip_addr)
5897 		/* Still skipping */
5898 		return (0);
5899 
5900 	return (1);
5901 }
5902 
5903 static void
5904 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
5905 {
5906 	union	ins_formats instr;
5907 	struct	ins_format1 *fmt1_ins;
5908 	struct	ins_format3 *fmt3_ins;
5909 	u_int	opcode;
5910 
5911 	/* Structure copy */
5912 	instr = *(union ins_formats*)&seqprog[instrptr * 4];
5913 
5914 #if BYTE_ORDER == BIG_ENDIAN
5915 	opcode = instr.format.bytes[0];
5916 	instr.format.bytes[0] = instr.format.bytes[3];
5917 	instr.format.bytes[3] = opcode;
5918 	opcode = instr.format.bytes[1];
5919 	instr.format.bytes[1] = instr.format.bytes[2];
5920 	instr.format.bytes[2] = opcode;
5921 #endif
5922 
5923 	fmt1_ins = &instr.format1;
5924 	fmt3_ins = NULL;
5925 
5926 	/* Pull the opcode */
5927 	opcode = instr.format1.opcode;
5928 	switch (opcode) {
5929 	case AIC_OP_JMP:
5930 	case AIC_OP_JC:
5931 	case AIC_OP_JNC:
5932 	case AIC_OP_CALL:
5933 	case AIC_OP_JNE:
5934 	case AIC_OP_JNZ:
5935 	case AIC_OP_JE:
5936 	case AIC_OP_JZ:
5937 	{
5938 		struct patch *cur_patch;
5939 		int address_offset;
5940 		u_int address;
5941 		u_int skip_addr;
5942 		u_int i;
5943 
5944 		fmt3_ins = &instr.format3;
5945 		address_offset = 0;
5946 		address = fmt3_ins->address;
5947 		cur_patch = patches;
5948 		skip_addr = 0;
5949 
5950 		for (i = 0; i < address;) {
5951 
5952 			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
5953 
5954 			if (skip_addr > i) {
5955 				int end_addr;
5956 
5957 				end_addr = MIN(address, skip_addr);
5958 				address_offset += end_addr - i;
5959 				i = skip_addr;
5960 			} else {
5961 				i++;
5962 			}
5963 		}
5964 		address -= address_offset;
5965 		fmt3_ins->address = address;
5966 		/* FALLTHROUGH */
5967 	}
5968 	case AIC_OP_OR:
5969 	case AIC_OP_AND:
5970 	case AIC_OP_XOR:
5971 	case AIC_OP_ADD:
5972 	case AIC_OP_ADC:
5973 	case AIC_OP_BMOV:
5974 		if (fmt1_ins->parity != 0) {
5975 			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
5976 		}
5977 		fmt1_ins->parity = 0;
5978 		/* FALLTHROUGH */
5979 	case AIC_OP_ROL:
5980 		if ((ahc->features & AHC_ULTRA2) != 0) {
5981 			int i, count;
5982 
5983 			/* Calculate odd parity for the instruction */
5984 			for (i = 0, count = 0; i < 31; i++) {
5985 				uint32_t mask;
5986 
5987 				mask = 0x01 << i;
5988 				if ((instr.integer & mask) != 0)
5989 					count++;
5990 			}
5991 			if ((count & 0x01) == 0)
5992 				instr.format1.parity = 1;
5993 		} else {
5994 			/* Compress the instruction for older sequencers */
5995 			if (fmt3_ins != NULL) {
5996 				instr.integer =
5997 					fmt3_ins->immediate
5998 				      | (fmt3_ins->source << 8)
5999 				      | (fmt3_ins->address << 16)
6000 				      |	(fmt3_ins->opcode << 25);
6001 			} else {
6002 				instr.integer =
6003 					fmt1_ins->immediate
6004 				      | (fmt1_ins->source << 8)
6005 				      | (fmt1_ins->destination << 16)
6006 				      |	(fmt1_ins->ret << 24)
6007 				      |	(fmt1_ins->opcode << 25);
6008 			}
6009 		}
6010 #if BYTE_ORDER == BIG_ENDIAN
6011 		opcode = instr.format.bytes[0];
6012 		instr.format.bytes[0] = instr.format.bytes[3];
6013 		instr.format.bytes[3] = opcode;
6014 		opcode = instr.format.bytes[1];
6015 		instr.format.bytes[1] = instr.format.bytes[2];
6016 		instr.format.bytes[2] = opcode;
6017 #endif
6018 		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6019 		break;
6020 	default:
6021 		panic("Unknown opcode encountered in seq program");
6022 		break;
6023 	}
6024 }
6025 
6026 static void
6027 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
6028 
6029 	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
6030 		struct ccb_hdr *ccbh;
6031 
6032 		scb->flags |= SCB_RECOVERY_SCB;
6033 
6034 		/*
6035 		 * Take all queued, but not sent SCBs out of the equation.
6036 		 * Also ensure that no new CCBs are queued to us while we
6037 		 * try to fix this problem.
6038 		 */
6039 		if ((scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
6040 			xpt_freeze_simq(ahc->sim, /*count*/1);
6041 			scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
6042 		}
6043 
6044 		/*
6045 		 * Go through all of our pending SCBs and remove
6046 		 * any scheduled timeouts for them.  We will reschedule
6047 		 * them after we've successfully fixed this problem.
6048 		 */
6049 		ccbh = ahc->pending_ccbs.lh_first;
6050 		while (ccbh != NULL) {
6051 			struct scb *pending_scb;
6052 
6053 			pending_scb = (struct scb *)ccbh->ccb_scb_ptr;
6054 			untimeout(ahc_timeout, pending_scb, ccbh->timeout_ch);
6055 			ccbh = ccbh->sim_links.le.le_next;
6056 		}
6057 	}
6058 }
6059 
6060 static void
6061 ahc_timeout(void *arg)
6062 {
6063 	struct	scb *scb;
6064 	struct	ahc_softc *ahc;
6065 	int	s, found;
6066 	u_int	last_phase;
6067 	int	target;
6068 	int	lun;
6069 	int	i;
6070 	char	channel;
6071 
6072 	scb = (struct scb *)arg;
6073 	ahc = (struct ahc_softc *)scb->ccb->ccb_h.ccb_ahc_ptr;
6074 
6075 	s = splcam();
6076 
6077 	/*
6078 	 * Ensure that the card doesn't do anything
6079 	 * behind our back.  Also make sure that we
6080 	 * didn't "just" miss an interrupt that would
6081 	 * affect this timeout.
6082 	 */
6083 	do {
6084 		ahc_intr(ahc);
6085 		pause_sequencer(ahc);
6086 	} while (ahc_inb(ahc, INTSTAT) & INT_PEND);
6087 
6088 	xpt_print_path(scb->ccb->ccb_h.path);
6089 	if ((scb->flags & SCB_ACTIVE) == 0) {
6090 		/* Previous timeout took care of me already */
6091 		printf("Timedout SCB %d handled by another timeout\n",
6092 		       scb->hscb->tag);
6093 		unpause_sequencer(ahc);
6094 		splx(s);
6095 		return;
6096 	}
6097 
6098 	target = SCB_GET_TARGET(ahc, scb);
6099 	channel = SCB_GET_CHANNEL(ahc, scb);
6100 	lun = SCB_GET_LUN(scb);
6101 
6102 	printf("SCB 0x%x - timed out ", scb->hscb->tag);
6103 	/*
6104 	 * Take a snapshot of the bus state and print out
6105 	 * some information so we can track down driver bugs.
6106 	 */
6107 	last_phase = ahc_inb(ahc, LASTPHASE);
6108 
6109 	for (i = 0; i < num_phases; i++) {
6110 		if (last_phase == phase_table[i].phase)
6111 			break;
6112 	}
6113 	printf("%s", phase_table[i].phasemsg);
6114 
6115 	printf(", SEQADDR == 0x%x\n",
6116 	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6117 
6118 #if 0
6119 	printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1));
6120 	printf("SSTAT3 == 0x%x\n", ahc_inb(ahc, SSTAT3));
6121 	printf("SCSIPHASE == 0x%x\n", ahc_inb(ahc, SCSIPHASE));
6122 	printf("SCSIRATE == 0x%x\n", ahc_inb(ahc, SCSIRATE));
6123 	printf("SCSIOFFSET == 0x%x\n", ahc_inb(ahc, SCSIOFFSET));
6124 	printf("SEQ_FLAGS == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS));
6125 	printf("SCB_DATAPTR == 0x%x\n", ahc_inb(ahc, SCB_DATAPTR)
6126 				      | ahc_inb(ahc, SCB_DATAPTR + 1) << 8
6127 				      | ahc_inb(ahc, SCB_DATAPTR + 2) << 16
6128 				      | ahc_inb(ahc, SCB_DATAPTR + 3) << 24);
6129 	printf("SCB_DATACNT == 0x%x\n", ahc_inb(ahc, SCB_DATACNT)
6130 				      | ahc_inb(ahc, SCB_DATACNT + 1) << 8
6131 				      | ahc_inb(ahc, SCB_DATACNT + 2) << 16);
6132 	printf("SCB_SGCOUNT == 0x%x\n", ahc_inb(ahc, SCB_SGCOUNT));
6133 	printf("CCSCBCTL == 0x%x\n", ahc_inb(ahc, CCSCBCTL));
6134 	printf("CCSCBCNT == 0x%x\n", ahc_inb(ahc, CCSCBCNT));
6135 	printf("DFCNTRL == 0x%x\n", ahc_inb(ahc, DFCNTRL));
6136 	printf("DFSTATUS == 0x%x\n", ahc_inb(ahc, DFSTATUS));
6137 	printf("CCHCNT == 0x%x\n", ahc_inb(ahc, CCHCNT));
6138 	if (scb->sg_count > 0) {
6139 		for (i = 0; i < scb->sg_count; i++) {
6140 			printf("sg[%d] - Addr 0x%x : Length %d\n",
6141 			       i,
6142 			       scb->sg_list[i].addr,
6143 			       scb->sg_list[i].len);
6144 		}
6145 	}
6146 #endif
6147 	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
6148 		/*
6149 		 * Been down this road before.
6150 		 * Do a full bus reset.
6151 		 */
6152 bus_reset:
6153 		ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT);
6154 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
6155 		printf("%s: Issued Channel %c Bus Reset. "
6156 		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
6157 	} else {
6158 		/*
6159 		 * If we are a target, transition to bus free and report
6160 		 * the timeout.
6161 		 *
6162 		 * The target/initiator that is holding up the bus may not
6163 		 * be the same as the one that triggered this timeout
6164 		 * (different commands have different timeout lengths).
6165 		 * If the bus is idle and we are actiing as the initiator
6166 		 * for this request, queue a BDR message to the timed out
6167 		 * target.  Otherwise, if the timed out transaction is
6168 		 * active:
6169 		 *   Initiator transaction:
6170 		 *	Stuff the message buffer with a BDR message and assert
6171 		 *	ATN in the hopes that the target will let go of the bus
6172 		 *	and go to the mesgout phase.  If this fails, we'll
6173 		 *	get another timeout 2 seconds later which will attempt
6174 		 *	a bus reset.
6175 		 *
6176 		 *   Target transaction:
6177 		 *	Transition to BUS FREE and report the error.
6178 		 *	It's good to be the target!
6179 		 */
6180 		u_int active_scb_index;
6181 
6182 		active_scb_index = ahc_inb(ahc, SCB_TAG);
6183 
6184 		if (last_phase != P_BUSFREE
6185 		  && (active_scb_index < ahc->scb_data->numscbs)) {
6186 			struct scb *active_scb;
6187 
6188 			/*
6189 			 * If the active SCB is not from our device,
6190 			 * assume that another device is hogging the bus
6191 			 * and wait for it's timeout to expire before
6192 			 * taking additional action.
6193 			 */
6194 			active_scb = &ahc->scb_data->scbarray[active_scb_index];
6195 			if (active_scb->hscb->scsiid != scb->hscb->scsiid
6196 			 || active_scb->hscb->lun != scb->hscb->lun) {
6197 				struct	ccb_hdr *ccbh;
6198 				u_int	newtimeout;
6199 
6200 				xpt_print_path(scb->ccb->ccb_h.path);
6201 				printf("Other SCB Timeout%s",
6202 			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
6203 				       ? " again\n" : "\n");
6204 				scb->flags |= SCB_OTHERTCL_TIMEOUT;
6205 				newtimeout = MAX(active_scb->ccb->ccb_h.timeout,
6206 						 scb->ccb->ccb_h.timeout);
6207 				ccbh = &scb->ccb->ccb_h;
6208 				scb->ccb->ccb_h.timeout_ch =
6209 				    timeout(ahc_timeout, scb,
6210 					    (newtimeout * hz) / 1000);
6211 				splx(s);
6212 				return;
6213 			}
6214 
6215 			/* It's us */
6216 			if ((scb->hscb->control & TARGET_SCB) != 0) {
6217 
6218 				/*
6219 				 * Send back any queued up transactions
6220 				 * and properly record the error condition.
6221 				 */
6222 				ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
6223 				ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT);
6224 				ahc_freeze_ccb(scb->ccb);
6225 				ahc_done(ahc, scb);
6226 
6227 				/* Will clear us from the bus */
6228 				restart_sequencer(ahc);
6229 				return;
6230 			}
6231 
6232 			ahc_set_recoveryscb(ahc, active_scb);
6233 			ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET);
6234 			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
6235 			xpt_print_path(active_scb->ccb->ccb_h.path);
6236 			printf("BDR message in message buffer\n");
6237 			active_scb->flags |=  SCB_DEVICE_RESET;
6238 			active_scb->ccb->ccb_h.timeout_ch =
6239 			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
6240 			unpause_sequencer(ahc);
6241 		} else {
6242 			int	 disconnected;
6243 
6244 			/* XXX Shouldn't panic.  Just punt instead */
6245 			if ((scb->hscb->control & TARGET_SCB) != 0)
6246 				panic("Timed-out target SCB but bus idle");
6247 
6248 			if (last_phase != P_BUSFREE
6249 			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
6250 				/* XXX What happened to the SCB? */
6251 				/* Hung target selection.  Goto busfree */
6252 				printf("%s: Hung target selection\n",
6253 				       ahc_name(ahc));
6254 				restart_sequencer(ahc);
6255 				return;
6256 			}
6257 
6258 			if (ahc_search_qinfifo(ahc, target, channel, lun,
6259 					       scb->hscb->tag, ROLE_INITIATOR,
6260 					       /*status*/0, SEARCH_COUNT) > 0) {
6261 				disconnected = FALSE;
6262 			} else {
6263 				disconnected = TRUE;
6264 			}
6265 
6266 			if (disconnected) {
6267 				u_int active_scb;
6268 
6269 				ahc_set_recoveryscb(ahc, scb);
6270 				/*
6271 				 * Simply set the MK_MESSAGE control bit.
6272 				 */
6273 				scb->hscb->control |= MK_MESSAGE;
6274 				scb->flags |= SCB_QUEUED_MSG
6275 					   |  SCB_DEVICE_RESET;
6276 
6277 				/*
6278 				 * Mark the cached copy of this SCB in the
6279 				 * disconnected list too, so that a reconnect
6280 				 * at this point causes a BDR or abort.
6281 				 */
6282 				active_scb = ahc_inb(ahc, SCBPTR);
6283 				if (ahc_search_disc_list(ahc, target,
6284 							 channel, lun,
6285 							 scb->hscb->tag,
6286 							 /*stop_on_first*/TRUE,
6287 							 /*remove*/FALSE,
6288 							 /*save_state*/FALSE)) {
6289 					u_int scb_control;
6290 
6291 					scb_control = ahc_inb(ahc, SCB_CONTROL);
6292 					scb_control |= MK_MESSAGE;
6293 					ahc_outb(ahc, SCB_CONTROL, scb_control);
6294 				}
6295 				ahc_outb(ahc, SCBPTR, active_scb);
6296 
6297 				/*
6298 				 * Actually re-queue this SCB in case we can
6299 				 * select the device before it reconnects.
6300 				 * Clear out any entries in the QINFIFO first
6301 				 * so we are the next SCB for this target
6302 				 * to run.
6303 				 */
6304 				ahc_search_qinfifo(ahc,
6305 						   SCB_GET_TARGET(ahc, scb),
6306 						   channel, SCB_GET_LUN(scb),
6307 						   SCB_LIST_NULL,
6308 						   ROLE_INITIATOR,
6309 						   CAM_REQUEUE_REQ,
6310 						   SEARCH_COMPLETE);
6311 				xpt_print_path(scb->ccb->ccb_h.path);
6312 				printf("Queuing a BDR SCB\n");
6313 				ahc->qinfifo[ahc->qinfifonext++] =
6314 				    scb->hscb->tag;
6315 				if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6316 					ahc_outb(ahc, HNSCB_QOFF,
6317 						 ahc->qinfifonext);
6318 				} else {
6319 					ahc_outb(ahc, KERNEL_QINPOS,
6320 						 ahc->qinfifonext);
6321 				}
6322 				scb->ccb->ccb_h.timeout_ch =
6323 				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
6324 				unpause_sequencer(ahc);
6325 			} else {
6326 				/* Go "immediatly" to the bus reset */
6327 				/* This shouldn't happen */
6328 				ahc_set_recoveryscb(ahc, scb);
6329 				xpt_print_path(scb->ccb->ccb_h.path);
6330 				printf("SCB %d: Immediate reset.  "
6331 					"Flags = 0x%x\n", scb->hscb->tag,
6332 					scb->flags);
6333 				goto bus_reset;
6334 			}
6335 		}
6336 	}
6337 	splx(s);
6338 }
6339 
6340 static int
6341 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
6342 		   int lun, u_int tag, role_t role, uint32_t status,
6343 		   ahc_search_action action)
6344 {
6345 	struct	 scb *scbp;
6346 	uint8_t qinpos;
6347 	uint8_t qintail;
6348 	uint8_t next, prev;
6349 	uint8_t curscbptr;
6350 	int	 found;
6351 
6352 	qinpos = ahc_inb(ahc, QINPOS);
6353 	qintail = ahc->qinfifonext;
6354 	found = 0;
6355 
6356 	if (action == SEARCH_COMPLETE) {
6357 		/*
6358 		 * Don't attempt to run any queued untagged transactions
6359 		 * until we are done with the abort process.
6360 		 */
6361 		ahc_freeze_untagged_queues(ahc);
6362 	}
6363 
6364 	/*
6365 	 * Start with an empty queue.  Entries that are not chosen
6366 	 * for removal will be re-added to the queue as we go.
6367 	 */
6368 	ahc->qinfifonext = qinpos;
6369 
6370 	while (qinpos != qintail) {
6371 		scbp = &ahc->scb_data->scbarray[ahc->qinfifo[qinpos]];
6372 		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
6373 			/*
6374 			 * We found an scb that needs to be acted on.
6375 			 */
6376 			found++;
6377 			switch (action) {
6378 			case SEARCH_COMPLETE:
6379 				if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG)
6380 					ahcsetccbstatus(scbp->ccb, status);
6381 				ahc_freeze_ccb(scbp->ccb);
6382 				ahc_done(ahc, scbp);
6383 				break;
6384 			case SEARCH_COUNT:
6385 				ahc->qinfifo[ahc->qinfifonext++] =
6386 				    scbp->hscb->tag;
6387 				break;
6388 			case SEARCH_REMOVE:
6389 				break;
6390 			}
6391 		} else {
6392 			ahc->qinfifo[ahc->qinfifonext++] = scbp->hscb->tag;
6393 		}
6394 		qinpos++;
6395 	}
6396 
6397 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6398 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
6399 	} else {
6400 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
6401 	}
6402 
6403 	/*
6404 	 * Search waiting for selection list.
6405 	 */
6406 	curscbptr = ahc_inb(ahc, SCBPTR);
6407 	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
6408 	prev = SCB_LIST_NULL;
6409 
6410 	while (next != SCB_LIST_NULL) {
6411 		uint8_t scb_index;
6412 
6413 		ahc_outb(ahc, SCBPTR, next);
6414 		scb_index = ahc_inb(ahc, SCB_TAG);
6415 		if (scb_index >= ahc->scb_data->numscbs) {
6416 			panic("Waiting List inconsistency. "
6417 			      "SCB index == %d, yet numscbs == %d.",
6418 			      scb_index, ahc->scb_data->numscbs);
6419 		}
6420 		scbp = &ahc->scb_data->scbarray[scb_index];
6421 		if (ahc_match_scb(ahc, scbp, target, channel,
6422 				  lun, SCB_LIST_NULL, role)) {
6423 			/*
6424 			 * We found an scb that needs to be acted on.
6425 			 */
6426 			found++;
6427 			switch (action) {
6428 			case SEARCH_REMOVE:
6429 				next = ahc_rem_wscb(ahc, next, prev);
6430 				break;
6431 			case SEARCH_COMPLETE:
6432 				next = ahc_rem_wscb(ahc, next, prev);
6433 				if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG)
6434 					ahcsetccbstatus(scbp->ccb, status);
6435 				ahc_freeze_ccb(scbp->ccb);
6436 				ahc_done(ahc, scbp);
6437 				break;
6438 			case SEARCH_COUNT:
6439 				prev = next;
6440 				next = ahc_inb(ahc, SCB_NEXT);
6441 				break;
6442 			}
6443 		} else {
6444 
6445 			prev = next;
6446 			next = ahc_inb(ahc, SCB_NEXT);
6447 		}
6448 	}
6449 	ahc_outb(ahc, SCBPTR, curscbptr);
6450 
6451 	if (action == SEARCH_COMPLETE)
6452 		ahc_release_untagged_queues(ahc);
6453 	return (found);
6454 }
6455 
6456 
6457 static void
6458 ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6459 {
6460 	union ccb *abort_ccb;
6461 
6462 	abort_ccb = ccb->cab.abort_ccb;
6463 	switch (abort_ccb->ccb_h.func_code) {
6464 	case XPT_ACCEPT_TARGET_IO:
6465 	case XPT_IMMED_NOTIFY:
6466 	case XPT_CONT_TARGET_IO:
6467 	{
6468 		struct tmode_tstate *tstate;
6469 		struct tmode_lstate *lstate;
6470 		struct ccb_hdr_slist *list;
6471 		cam_status status;
6472 
6473 		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
6474 					     &lstate, TRUE);
6475 
6476 		if (status != CAM_REQ_CMP) {
6477 			ccb->ccb_h.status = status;
6478 			break;
6479 		}
6480 
6481 		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
6482 			list = &lstate->accept_tios;
6483 		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
6484 			list = &lstate->immed_notifies;
6485 		else
6486 			list = NULL;
6487 
6488 		if (list != NULL) {
6489 			struct ccb_hdr *curelm;
6490 			int found;
6491 
6492 			curelm = SLIST_FIRST(list);
6493 			found = 0;
6494 			if (curelm == &abort_ccb->ccb_h) {
6495 				found = 1;
6496 				SLIST_REMOVE_HEAD(list, sim_links.sle);
6497 			} else {
6498 				while(curelm != NULL) {
6499 					struct ccb_hdr *nextelm;
6500 
6501 					nextelm =
6502 					    SLIST_NEXT(curelm, sim_links.sle);
6503 
6504 					if (nextelm == &abort_ccb->ccb_h) {
6505 						found = 1;
6506 						SLIST_NEXT(curelm,
6507 							   sim_links.sle) =
6508 						    SLIST_NEXT(nextelm,
6509 							       sim_links.sle);
6510 						break;
6511 					}
6512 					curelm = nextelm;
6513 				}
6514 			}
6515 
6516 			if (found) {
6517 				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
6518 				xpt_done(abort_ccb);
6519 				ccb->ccb_h.status = CAM_REQ_CMP;
6520 			} else {
6521 				printf("Not found\n");
6522 				ccb->ccb_h.status = CAM_PATH_INVALID;
6523 			}
6524 			break;
6525 		}
6526 		/* FALLTHROUGH */
6527 	}
6528 	case XPT_SCSI_IO:
6529 		/* XXX Fully implement the hard ones */
6530 		ccb->ccb_h.status = CAM_UA_ABORT;
6531 		break;
6532 	default:
6533 		ccb->ccb_h.status = CAM_REQ_INVALID;
6534 		break;
6535 	}
6536 	xpt_done(ccb);
6537 }
6538 
6539 /*
6540  * Abort all SCBs that match the given description (target/channel/lun/tag),
6541  * setting their status to the passed in status if the status has not already
6542  * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
6543  * is paused before it is called.
6544  */
6545 static int
6546 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
6547 	       int lun, u_int tag, role_t role, uint32_t status)
6548 {
6549 	struct	scb *scbp;
6550 	u_int	active_scb;
6551 	int	i;
6552 	int	maxtarget;
6553 	int	found;
6554 
6555 	/*
6556 	 * Don't attempt to run any queued untagged transactions
6557 	 * until we are done with the abort process.
6558 	 */
6559 	ahc_freeze_untagged_queues(ahc);
6560 
6561 	/* restore this when we're done */
6562 	active_scb = ahc_inb(ahc, SCBPTR);
6563 
6564 	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
6565 				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
6566 
6567 	/*
6568 	 * Clean out the busy target table for any untagged commands.
6569 	 */
6570 	i = 0;
6571 	maxtarget = 16;
6572 	if (target != CAM_TARGET_WILDCARD) {
6573 		i = target;
6574 		maxtarget = target + 1;
6575 	}
6576 
6577 	for (;i < maxtarget; i++) {
6578 		u_int scbid;
6579 
6580 		/* XXX Will need lun loop for SCB ram version */
6581 		scbid = ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, 0),
6582 					   /*unbusy*/FALSE);
6583 		scbp = &ahc->scb_data->scbarray[scbid];
6584 		if (scbid < ahc->scb_data->numscbs
6585 		 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
6586 			ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, 0),
6587 					   /*unbusy*/TRUE);
6588 	}
6589 
6590 	/*
6591 	 * Go through the disconnected list and remove any entries we
6592 	 * have queued for completion, 0'ing their control byte too.
6593 	 * We save the active SCB and restore it ourselves, so there
6594 	 * is no reason for this search to restore it too.
6595 	 */
6596 	ahc_search_disc_list(ahc, target, channel, lun, tag,
6597 			     /*stop_on_first*/FALSE, /*remove*/TRUE,
6598 			     /*save_state*/FALSE);
6599 
6600 	/*
6601 	 * Go through the hardware SCB array looking for commands that
6602 	 * were active but not on any list.
6603 	 */
6604 	for(i = 0; i < ahc->scb_data->maxhscbs; i++) {
6605 		u_int scbid;
6606 
6607 		ahc_outb(ahc, SCBPTR, i);
6608 		scbid = ahc_inb(ahc, SCB_TAG);
6609 		scbp = &ahc->scb_data->scbarray[scbid];
6610 		if (scbid < ahc->scb_data->numscbs
6611 		 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
6612 			ahc_add_curscb_to_free_list(ahc);
6613 	}
6614 
6615 	/*
6616 	 * Go through the pending CCB list and look for
6617 	 * commands for this target that are still active.
6618 	 * These are other tagged commands that were
6619 	 * disconnected when the reset occured.
6620 	 */
6621 	{
6622 		struct ccb_hdr *ccb_h;
6623 
6624 		ccb_h = ahc->pending_ccbs.lh_first;
6625 		while (ccb_h != NULL) {
6626 			scbp = (struct scb *)ccb_h->ccb_scb_ptr;
6627 			ccb_h = ccb_h->sim_links.le.le_next;
6628 			if (ahc_match_scb(ahc, scbp, target, channel,
6629 					  lun, tag, role)) {
6630 				if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG)
6631 					ahcsetccbstatus(scbp->ccb, status);
6632 				ahc_freeze_ccb(scbp->ccb);
6633 				ahc_done(ahc, scbp);
6634 				found++;
6635 			}
6636 		}
6637 	}
6638 	ahc_outb(ahc, SCBPTR, active_scb);
6639 	ahc_release_untagged_queues(ahc);
6640 	return found;
6641 }
6642 
6643 static int
6644 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
6645 		     int lun, u_int tag, int stop_on_first, int remove,
6646 		     int save_state)
6647 {
6648 	struct	scb *scbp;
6649 	u_int	next;
6650 	u_int	prev;
6651 	u_int	count;
6652 	u_int	active_scb;
6653 
6654 	count = 0;
6655 	next = ahc_inb(ahc, DISCONNECTED_SCBH);
6656 	prev = SCB_LIST_NULL;
6657 
6658 	if (save_state) {
6659 		/* restore this when we're done */
6660 		active_scb = ahc_inb(ahc, SCBPTR);
6661 	} else
6662 		/* Silence compiler */
6663 		active_scb = SCB_LIST_NULL;
6664 
6665 	while (next != SCB_LIST_NULL) {
6666 		u_int scb_index;
6667 
6668 		ahc_outb(ahc, SCBPTR, next);
6669 		scb_index = ahc_inb(ahc, SCB_TAG);
6670 		if (scb_index >= ahc->scb_data->numscbs) {
6671 			panic("Disconnected List inconsistency. "
6672 			      "SCB index == %d, yet numscbs == %d.",
6673 			      scb_index, ahc->scb_data->numscbs);
6674 		}
6675 
6676 		if (next == prev) {
6677 			panic("Disconnected List Loop. "
6678 			      "cur SCBPTR == %x, prev SCBPTR == %x.",
6679 			      next, prev);
6680 		}
6681 		scbp = &ahc->scb_data->scbarray[scb_index];
6682 		if (ahc_match_scb(ahc, scbp, target, channel, lun,
6683 				  tag, ROLE_INITIATOR)) {
6684 			count++;
6685 			if (remove) {
6686 				next =
6687 				    ahc_rem_scb_from_disc_list(ahc, prev, next);
6688 			} else {
6689 				prev = next;
6690 				next = ahc_inb(ahc, SCB_NEXT);
6691 			}
6692 			if (stop_on_first)
6693 				break;
6694 		} else {
6695 			prev = next;
6696 			next = ahc_inb(ahc, SCB_NEXT);
6697 		}
6698 	}
6699 	if (save_state)
6700 		ahc_outb(ahc, SCBPTR, active_scb);
6701 	return (count);
6702 }
6703 
6704 static u_int
6705 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
6706 {
6707 	u_int next;
6708 
6709 	ahc_outb(ahc, SCBPTR, scbptr);
6710 	next = ahc_inb(ahc, SCB_NEXT);
6711 
6712 	ahc_outb(ahc, SCB_CONTROL, 0);
6713 
6714 	ahc_add_curscb_to_free_list(ahc);
6715 
6716 	if (prev != SCB_LIST_NULL) {
6717 		ahc_outb(ahc, SCBPTR, prev);
6718 		ahc_outb(ahc, SCB_NEXT, next);
6719 	} else
6720 		ahc_outb(ahc, DISCONNECTED_SCBH, next);
6721 
6722 	return (next);
6723 }
6724 
6725 static void
6726 ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
6727 {
6728 	/* Invalidate the tag so that ahc_find_scb doesn't think it's active */
6729 	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
6730 
6731 	ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
6732 	ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
6733 }
6734 
6735 /*
6736  * Manipulate the waiting for selection list and return the
6737  * scb that follows the one that we remove.
6738  */
6739 static u_int
6740 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
6741 {
6742 	u_int curscb, next;
6743 
6744 	/*
6745 	 * Select the SCB we want to abort and
6746 	 * pull the next pointer out of it.
6747 	 */
6748 	curscb = ahc_inb(ahc, SCBPTR);
6749 	ahc_outb(ahc, SCBPTR, scbpos);
6750 	next = ahc_inb(ahc, SCB_NEXT);
6751 
6752 	/* Clear the necessary fields */
6753 	ahc_outb(ahc, SCB_CONTROL, 0);
6754 
6755 	ahc_add_curscb_to_free_list(ahc);
6756 
6757 	/* update the waiting list */
6758 	if (prev == SCB_LIST_NULL) {
6759 		/* First in the list */
6760 		ahc_outb(ahc, WAITING_SCBH, next);
6761 
6762 		/*
6763 		 * Ensure we aren't attempting to perform
6764 		 * selection for this entry.
6765 		 */
6766 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
6767 	} else {
6768 		/*
6769 		 * Select the scb that pointed to us
6770 		 * and update its next pointer.
6771 		 */
6772 		ahc_outb(ahc, SCBPTR, prev);
6773 		ahc_outb(ahc, SCB_NEXT, next);
6774 	}
6775 
6776 	/*
6777 	 * Point us back at the original scb position.
6778 	 */
6779 	ahc_outb(ahc, SCBPTR, curscb);
6780 	return next;
6781 }
6782 
6783 static void
6784 ahc_clear_intstat(struct ahc_softc *ahc)
6785 {
6786 	/* Clear any interrupt conditions this may have caused */
6787 	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
6788 	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
6789 				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
6790 				CLRREQINIT);
6791 	ahc_outb(ahc, CLRINT, CLRSCSIINT);
6792 }
6793 
6794 static void
6795 ahc_reset_current_bus(struct ahc_softc *ahc)
6796 {
6797 	uint8_t scsiseq;
6798 
6799 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
6800 	scsiseq = ahc_inb(ahc, SCSISEQ);
6801 	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
6802 	DELAY(AHC_BUSRESET_DELAY);
6803 	/* Turn off the bus reset */
6804 	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
6805 
6806 	ahc_clear_intstat(ahc);
6807 
6808 	/* Re-enable reset interrupts */
6809 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
6810 }
6811 
6812 static int
6813 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
6814 {
6815 	struct	cam_path *path;
6816 	u_int	initiator, target, max_scsiid;
6817 	u_int	sblkctl;
6818 	u_int	our_id;
6819 	int	found;
6820 	int	restart_needed;
6821 	char	cur_channel;
6822 
6823 	ahc->pending_device = NULL;
6824 
6825 	pause_sequencer(ahc);
6826 
6827 	/*
6828 	 * Run our command complete fifos to ensure that we perform
6829 	 * completion processing on any commands that 'completed'
6830 	 * before the reset occurred.
6831 	 */
6832 	ahc_run_qoutfifo(ahc);
6833 	if ((ahc->flags & AHC_TARGETMODE) != 0) {
6834 		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
6835 	}
6836 
6837 	/*
6838 	 * Reset the bus if we are initiating this reset
6839 	 */
6840 	sblkctl = ahc_inb(ahc, SBLKCTL);
6841 	cur_channel = 'A';
6842 	if ((ahc->features & AHC_TWIN) != 0
6843 	 && ((sblkctl & SELBUSB) != 0))
6844 	    cur_channel = 'B';
6845 	if (cur_channel != channel) {
6846 		/* Case 1: Command for another bus is active
6847 		 * Stealthily reset the other bus without
6848 		 * upsetting the current bus.
6849 		 */
6850 		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
6851 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
6852 		ahc_outb(ahc, SCSISEQ,
6853 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
6854 		if (initiate_reset)
6855 			ahc_reset_current_bus(ahc);
6856 		ahc_clear_intstat(ahc);
6857 		ahc_outb(ahc, SBLKCTL, sblkctl);
6858 		restart_needed = FALSE;
6859 	} else {
6860 		/* Case 2: A command from this bus is active or we're idle */
6861 		ahc_clear_msg_state(ahc);
6862 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
6863 		ahc_outb(ahc, SCSISEQ,
6864 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
6865 		if (initiate_reset)
6866 			ahc_reset_current_bus(ahc);
6867 		ahc_clear_intstat(ahc);
6868 
6869 		/*
6870 		 * Since we are going to restart the sequencer, avoid
6871 		 * a race in the sequencer that could cause corruption
6872 		 * of our Q pointers by starting over from index 1.
6873 		 */
6874 		ahc->qoutfifonext = 0;
6875 		if ((ahc->features & AHC_QUEUE_REGS) != 0)
6876 			ahc_outb(ahc, SDSCB_QOFF, 0);
6877 		else
6878 			ahc_outb(ahc, QOUTPOS, 0);
6879 		if ((ahc->flags & AHC_TARGETMODE) != 0) {
6880 			ahc->tqinfifonext = 1;
6881 			ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
6882 			ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
6883 			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
6884 				u_int hs_mailbox;
6885 
6886 				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
6887 				hs_mailbox &= ~HOST_TQINPOS;
6888 				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
6889 			}
6890 		}
6891 		restart_needed = TRUE;
6892 	}
6893 
6894 	/*
6895 	 * Clean up all the state information for the
6896 	 * pending transactions on this bus.
6897 	 */
6898 	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
6899 			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
6900 			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
6901 	if (channel == 'B') {
6902 		path = ahc->path_b;
6903 		our_id = ahc->our_id_b;
6904 	} else {
6905 		path = ahc->path;
6906 		our_id = ahc->our_id;
6907 	}
6908 
6909 	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
6910 
6911 	/*
6912 	 * Send an immediate notify ccb to all target more peripheral
6913 	 * drivers affected by this action.
6914 	 */
6915 	for (target = 0; target <= max_scsiid; target++) {
6916 		struct tmode_tstate* tstate;
6917 		u_int lun;
6918 
6919 		tstate = ahc->enabled_targets[target];
6920 		if (tstate == NULL)
6921 			continue;
6922 		for (lun = 0; lun <= 7; lun++) {
6923 			struct tmode_lstate* lstate;
6924 
6925 			lstate = tstate->enabled_luns[lun];
6926 			if (lstate == NULL)
6927 				continue;
6928 
6929 			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
6930 					       EVENT_TYPE_BUS_RESET, /*arg*/0);
6931 			ahc_send_lstate_events(ahc, lstate);
6932 		}
6933 	}
6934 
6935 	/* Notify the XPT that a bus reset occurred */
6936 	xpt_async(AC_BUS_RESET, path, NULL);
6937 
6938 	/*
6939 	 * Revert to async/narrow transfers until we renegotiate.
6940 	 */
6941 	for (target = 0; target <= max_scsiid; target++) {
6942 
6943 		if (ahc->enabled_targets[target] == NULL)
6944 			continue;
6945 		for (initiator = 0; initiator <= max_scsiid; initiator++) {
6946 			struct ahc_devinfo devinfo;
6947 
6948 			ahc_compile_devinfo(&devinfo, target, initiator,
6949 					    CAM_LUN_WILDCARD,
6950 					    channel, ROLE_UNKNOWN);
6951 			ahc_set_width(ahc, &devinfo, path,
6952 				      MSG_EXT_WDTR_BUS_8_BIT,
6953 				      AHC_TRANS_CUR, /*paused*/TRUE);
6954 			ahc_set_syncrate(ahc, &devinfo, path,
6955 					 /*syncrate*/NULL, /*period*/0,
6956 					 /*offset*/0, /*ppr_options*/0,
6957 					 AHC_TRANS_CUR, /*paused*/TRUE);
6958 		}
6959 	}
6960 
6961 	if (restart_needed)
6962 		restart_sequencer(ahc);
6963 	else
6964 		unpause_sequencer(ahc);
6965 	return found;
6966 }
6967 
6968 static int
6969 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
6970 	      char channel, int lun, u_int tag, role_t role)
6971 {
6972 	int targ = SCB_GET_TARGET(ahc, scb);
6973 	char chan = SCB_GET_CHANNEL(ahc, scb);
6974 	int slun = SCB_GET_LUN(scb);
6975 	int match;
6976 
6977 	match = ((chan == channel) || (channel == ALL_CHANNELS));
6978 	if (match != 0)
6979 		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
6980 	if (match != 0)
6981 		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
6982 	if (match != 0) {
6983 		int group;
6984 
6985 		group = XPT_FC_GROUP(scb->ccb->ccb_h.func_code);
6986 		if (role == ROLE_INITIATOR) {
6987 			match = (group == XPT_FC_GROUP_COMMON)
6988 			      && ((tag == scb->hscb->tag)
6989 			       || (tag == SCB_LIST_NULL));
6990 		} else if (role == ROLE_TARGET) {
6991 			match = (group == XPT_FC_GROUP_TMODE)
6992 			      && ((tag == scb->ccb->csio.tag_id)
6993 			       || (tag == SCB_LIST_NULL));
6994 		}
6995 	}
6996 
6997 	return match;
6998 }
6999 
7000 static void
7001 ahc_construct_sdtr(struct ahc_softc *ahc, u_int period, u_int offset)
7002 {
7003 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
7004 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
7005 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
7006 	ahc->msgout_buf[ahc->msgout_index++] = period;
7007 	ahc->msgout_buf[ahc->msgout_index++] = offset;
7008 	ahc->msgout_len += 5;
7009 }
7010 
7011 static void
7012 ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width)
7013 {
7014 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
7015 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
7016 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
7017 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
7018 	ahc->msgout_len += 4;
7019 }
7020 
7021 static void
7022 ahc_construct_ppr(struct ahc_softc *ahc, u_int period, u_int offset,
7023 		  u_int bus_width, u_int ppr_options)
7024 {
7025 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
7026 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
7027 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
7028 	ahc->msgout_buf[ahc->msgout_index++] = period;
7029 	ahc->msgout_buf[ahc->msgout_index++] = 0;
7030 	ahc->msgout_buf[ahc->msgout_index++] = offset;
7031 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
7032 	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
7033 	ahc->msgout_len += 8;
7034 }
7035 
7036 static void
7037 ahc_calc_residual(struct scb *scb)
7038 {
7039 	struct hardware_scb *hscb;
7040 	struct status_pkt *spkt;
7041 	uint32_t resid;
7042 
7043 	/*
7044 	 * 4 cases.
7045 	 * 1) No residual.
7046 	 *    SG_RESID_VALID clear in sgptr.
7047 	 * 2) Transferless command
7048 	 * 3) Never performed any transfers.
7049 	 *    sgptr has SG_FULL_RESID set.
7050 	 * 4) We have a partial residual.
7051 	 *    Use residual_sgptr to determine
7052 	 *    where we are.
7053 	 */
7054 
7055 	/* Cases 1, 2 & 3 are easy.  Check them first. */
7056 	hscb = scb->hscb;
7057 	if ((hscb->sgptr & SG_RESID_VALID) == 0)
7058 		return;
7059 	hscb->sgptr &= ~SG_RESID_VALID;
7060 
7061 	if ((hscb->sgptr & SG_LIST_NULL) != 0)
7062 		return;
7063 
7064 	spkt = &hscb->shared_data.status;
7065 	if ((hscb->sgptr & SG_FULL_RESID) != 0)
7066 		resid = scb->ccb->csio.dxfer_len;
7067 	else if ((hscb->sgptr & ~SG_PTR_MASK) != 0)
7068 		panic("Bogus sgptr value 0x%x\n", hscb->sgptr);
7069 	else if ((spkt->residual_sg_ptr & ~SG_PTR_MASK) != 0)
7070 		panic("Bogus resid sgptr value 0x%x\n", spkt->residual_sg_ptr);
7071 	else {
7072 		struct ahc_dma_seg *sg;
7073 
7074 		/*
7075 		 * Remainder of the SG where the transfer
7076 		 * stopped.
7077 		 */
7078 		resid = spkt->residual_datacnt & AHC_SG_LEN_MASK;
7079 		sg = ahc_sg_bus_to_virt(scb,
7080 					spkt->residual_sg_ptr & SG_PTR_MASK);
7081 
7082 		/* The residual sg_ptr always points to the next sg */
7083 		sg--;
7084 
7085 		/*
7086 		 * Add up the contents of all residual
7087 		 * SG segments that are after the SG where
7088 		 * the transfer stopped.
7089 		 */
7090 		while ((sg->len & AHC_DMA_LAST_SEG) == 0) {
7091 			sg++;
7092 			resid += sg->len & AHC_SG_LEN_MASK;
7093 		}
7094 	}
7095 	if ((scb->flags & SCB_SENSE) == 0) {
7096 
7097 		scb->ccb->csio.resid = resid;
7098 	} else {
7099 
7100 		scb->ccb->csio.sense_resid = resid;
7101 	}
7102 
7103 #ifdef AHC_DEBUG
7104 	if (ahc_debug & AHC_SHOWMISC) {
7105 		xpt_print_path(scb->ccb->ccb_h.path);
7106 		printf("Handled Residual of %d bytes\n", resid);
7107 	}
7108 #endif
7109 }
7110 
7111 static void
7112 ahc_update_pending_syncrates(struct ahc_softc *ahc)
7113 {
7114 	struct	ccb_hdr *ccbh;
7115 	int	pending_ccb_count;
7116 	int	i;
7117 	u_int	saved_scbptr;
7118 
7119 	/*
7120 	 * Traverse the pending SCB list and ensure that all of the
7121 	 * SCBs there have the proper settings.
7122 	 */
7123 	ccbh = LIST_FIRST(&ahc->pending_ccbs);
7124 	pending_ccb_count = 0;
7125 	while (ccbh != NULL) {
7126 		struct ahc_devinfo devinfo;
7127 		union  ccb *ccb;
7128 		struct scb *pending_scb;
7129 		struct hardware_scb *pending_hscb;
7130 		struct ahc_initiator_tinfo *tinfo;
7131 		struct tmode_tstate *tstate;
7132 		u_int  our_id, remote_id;
7133 
7134 		ccb = (union ccb*)ccbh;
7135 		pending_scb = (struct scb *)ccbh->ccb_scb_ptr;
7136 		pending_hscb = pending_scb->hscb;
7137 		if (ccbh->func_code == XPT_CONT_TARGET_IO) {
7138 			our_id = ccb->ccb_h.target_id;
7139 			remote_id = ccb->ctio.init_id;
7140 		} else {
7141 			our_id = SCB_IS_SCSIBUS_B(ahc, pending_scb)
7142 			       ? ahc->our_id_b : ahc->our_id;
7143 			remote_id = ccb->ccb_h.target_id;
7144 		}
7145 		ahc_compile_devinfo(&devinfo, our_id, remote_id,
7146 				    SCB_GET_LUN(pending_scb),
7147 				    SCB_GET_CHANNEL(ahc, pending_scb),
7148 				    ROLE_UNKNOWN);
7149 		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
7150 					    our_id, remote_id, &tstate);
7151 		pending_hscb->control &= ~ULTRAENB;
7152 		if ((tstate->ultraenb & devinfo.target_mask) != 0)
7153 			pending_hscb->control |= ULTRAENB;
7154 		pending_hscb->scsirate = tinfo->scsirate;
7155 		pending_hscb->scsioffset = tinfo->current.offset;
7156 		pending_ccb_count++;
7157 		ccbh = LIST_NEXT(ccbh, sim_links.le);
7158 	}
7159 
7160 	if (pending_ccb_count == 0)
7161 		return;
7162 
7163 	saved_scbptr = ahc_inb(ahc, SCBPTR);
7164 	/* Ensure that the hscbs down on the card match the new information */
7165 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
7166 		u_int scb_tag;
7167 
7168 		ahc_outb(ahc, SCBPTR, i);
7169 		scb_tag = ahc_inb(ahc, SCB_TAG);
7170 		if (scb_tag != SCB_LIST_NULL) {
7171 			struct	ahc_devinfo devinfo;
7172 			union  ccb *ccb;
7173 			struct	scb *pending_scb;
7174 			struct	hardware_scb *pending_hscb;
7175 			struct	ahc_initiator_tinfo *tinfo;
7176 			struct	tmode_tstate *tstate;
7177 			u_int	our_id, remote_id;
7178 			u_int	control;
7179 
7180 			pending_scb = &ahc->scb_data->scbarray[scb_tag];
7181 			if (pending_scb->flags == SCB_FREE)
7182 				continue;
7183 			pending_hscb = pending_scb->hscb;
7184 			ccb = pending_scb->ccb;
7185 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
7186 				our_id = ccb->ccb_h.target_id;
7187 				remote_id = ccb->ctio.init_id;
7188 			} else {
7189 				our_id = SCB_IS_SCSIBUS_B(ahc, pending_scb)
7190 				       ? ahc->our_id_b : ahc->our_id;
7191 				remote_id = ccb->ccb_h.target_id;
7192 			}
7193 			ahc_compile_devinfo(&devinfo, our_id, remote_id,
7194 					    SCB_GET_LUN(pending_scb),
7195 					    SCB_GET_CHANNEL(ahc, pending_scb),
7196 					    ROLE_UNKNOWN);
7197 			tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
7198 						    our_id, remote_id, &tstate);
7199 			control = ahc_inb(ahc, SCB_CONTROL);
7200 			control &= ~ULTRAENB;
7201 			if ((tstate->ultraenb & devinfo.target_mask) != 0)
7202 				control |= ULTRAENB;
7203 			ahc_outb(ahc, SCB_CONTROL, control);
7204 			ahc_outb(ahc, SCB_SCSIRATE, tinfo->scsirate);
7205 			ahc_outb(ahc, SCB_SCSIOFFSET, tinfo->current.offset);
7206 		}
7207 	}
7208 	ahc_outb(ahc, SCBPTR, saved_scbptr);
7209 }
7210 
7211 #if UNUSED
7212 static void
7213 ahc_dump_targcmd(struct target_cmd *cmd)
7214 {
7215 	uint8_t *byte;
7216 	uint8_t *last_byte;
7217 	int i;
7218 
7219 	byte = &cmd->initiator_channel;
7220 	/* Debugging info for received commands */
7221 	last_byte = &cmd[1].initiator_channel;
7222 
7223 	i = 0;
7224 	while (byte < last_byte) {
7225 		if (i == 0)
7226 			printf("\t");
7227 		printf("%#x", *byte++);
7228 		i++;
7229 		if (i == 8) {
7230 			printf("\n");
7231 			i = 0;
7232 		} else {
7233 			printf(", ");
7234 		}
7235 	}
7236 }
7237 #endif
7238 
7239 static void
7240 ahc_shutdown(void *arg, int howto)
7241 {
7242 	struct	ahc_softc *ahc;
7243 	int	i;
7244 	u_int	sxfrctl1_a, sxfrctl1_b;
7245 
7246 	ahc = (struct ahc_softc *)arg;
7247 
7248 	pause_sequencer(ahc);
7249 
7250 	/*
7251 	 * Preserve the value of the SXFRCTL1 register for all channels.
7252 	 * It contains settings that affect termination and we don't want
7253 	 * to disturb the integrity of the bus during shutdown in case
7254 	 * we are in a multi-initiator setup.
7255 	 */
7256 	sxfrctl1_b = 0;
7257 	if ((ahc->features & AHC_TWIN) != 0) {
7258 		u_int sblkctl;
7259 
7260 		sblkctl = ahc_inb(ahc, SBLKCTL);
7261 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
7262 		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
7263 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
7264 	}
7265 
7266 	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
7267 
7268 	/* This will reset most registers to 0, but not all */
7269 	ahc_reset(ahc);
7270 
7271 	if ((ahc->features & AHC_TWIN) != 0) {
7272 		u_int sblkctl;
7273 
7274 		sblkctl = ahc_inb(ahc, SBLKCTL);
7275 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
7276 		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
7277 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
7278 	}
7279 	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
7280 
7281 	ahc_outb(ahc, SCSISEQ, 0);
7282 	ahc_outb(ahc, SXFRCTL0, 0);
7283 	ahc_outb(ahc, DSPCISTATUS, 0);
7284 
7285 	for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++)
7286 		ahc_outb(ahc, i, 0);
7287 }
7288 
7289 /*
7290  * Add a target mode event to this lun's queue
7291  */
7292 static void
7293 ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate,
7294 		       u_int initiator_id, u_int event_type, u_int event_arg)
7295 {
7296 	struct ahc_tmode_event *event;
7297 	int pending;
7298 
7299 	xpt_freeze_devq(lstate->path, /*count*/1);
7300 	if (lstate->event_w_idx >= lstate->event_r_idx)
7301 		pending = lstate->event_w_idx - lstate->event_r_idx;
7302 	else
7303 		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
7304 			- (lstate->event_r_idx - lstate->event_w_idx);
7305 
7306 	if (event_type == EVENT_TYPE_BUS_RESET
7307 	 || event_type == MSG_BUS_DEV_RESET) {
7308 		/*
7309 		 * Any earlier events are irrelevant, so reset our buffer.
7310 		 * This has the effect of allowing us to deal with reset
7311 		 * floods (an external device holding down the reset line)
7312 		 * without losing the event that is really interesting.
7313 		 */
7314 		lstate->event_r_idx = 0;
7315 		lstate->event_w_idx = 0;
7316 		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
7317 	}
7318 
7319 	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
7320 		xpt_print_path(lstate->path);
7321 		printf("immediate event %x:%x lost\n",
7322 		       lstate->event_buffer[lstate->event_r_idx].event_type,
7323 		       lstate->event_buffer[lstate->event_r_idx].event_arg);
7324 		lstate->event_r_idx++;
7325 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
7326 			lstate->event_r_idx = 0;
7327 		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
7328 	}
7329 
7330 	event = &lstate->event_buffer[lstate->event_w_idx];
7331 	event->initiator_id = initiator_id;
7332 	event->event_type = event_type;
7333 	event->event_arg = event_arg;
7334 	lstate->event_w_idx++;
7335 	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
7336 		lstate->event_w_idx = 0;
7337 }
7338 
7339 /*
7340  * Send any target mode events queued up waiting
7341  * for immediate notify resources.
7342  */
7343 static void
7344 ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate)
7345 {
7346 	struct ccb_hdr *ccbh;
7347 	struct ccb_immed_notify *inot;
7348 
7349 	while (lstate->event_r_idx != lstate->event_w_idx
7350 	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
7351 		struct ahc_tmode_event *event;
7352 
7353 		event = &lstate->event_buffer[lstate->event_r_idx];
7354 		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
7355 		inot = (struct ccb_immed_notify *)ccbh;
7356 		switch (event->event_type) {
7357 		case EVENT_TYPE_BUS_RESET:
7358 			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
7359 			break;
7360 		default:
7361 			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
7362 			inot->message_args[0] = event->event_type;
7363 			inot->message_args[1] = event->event_arg;
7364 			break;
7365 		}
7366 		inot->initiator_id = event->initiator_id;
7367 		inot->sense_len = 0;
7368 		xpt_done((union ccb *)inot);
7369 		lstate->event_r_idx++;
7370 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
7371 			lstate->event_r_idx = 0;
7372 	}
7373 }
7374