xref: /freebsd/sys/dev/aic7xxx/aic7xxx.c (revision eacee0ff7ec955b32e09515246bd97b6edcd2b0f)
1 /*
2  * Core routines and tables shareable across OS platforms.
3  *
4  * Copyright (c) 1994-2001 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * Alternatively, this software may be distributed under the terms of the
17  * GNU Public License ("GPL").
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $Id: //depot/src/aic7xxx/aic7xxx.c#43 $
32  *
33  * $FreeBSD$
34  */
35 
36 #include <dev/aic7xxx/aic7xxx_freebsd.h>
37 #include <dev/aic7xxx/aic7xxx_inline.h>
38 #include <dev/aic7xxx/aicasm/aicasm_insformat.h>
39 
40 /****************************** Softc Data ************************************/
41 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
42 
43 /***************************** Lookup Tables **********************************/
44 char *ahc_chip_names[] =
45 {
46 	"NONE",
47 	"aic7770",
48 	"aic7850",
49 	"aic7855",
50 	"aic7859",
51 	"aic7860",
52 	"aic7870",
53 	"aic7880",
54 	"aic7895",
55 	"aic7895C",
56 	"aic7890/91",
57 	"aic7896/97",
58 	"aic7892",
59 	"aic7899"
60 };
61 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
62 
63 /*
64  * Hardware error codes.
65  */
66 struct ahc_hard_error_entry {
67         uint8_t errno;
68 	char *errmesg;
69 };
70 
71 static struct ahc_hard_error_entry ahc_hard_errors[] = {
72 	{ ILLHADDR,	"Illegal Host Access" },
73 	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
74 	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
75 	{ SQPARERR,	"Sequencer Parity Error" },
76 	{ DPARERR,	"Data-path Parity Error" },
77 	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
78 	{ PCIERRSTAT,	"PCI Error detected" },
79 	{ CIOPARERR,	"CIOBUS Parity Error" },
80 };
81 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
82 
83 static struct ahc_phase_table_entry ahc_phase_table[] =
84 {
85 	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
86 	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
87 	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
88 	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
89 	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
90 	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
91 	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
92 	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
93 	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
94 	{ 0,		MSG_NOOP,		"in unknown phase"	}
95 };
96 
97 /*
98  * In most cases we only wish to itterate over real phases, so
99  * exclude the last element from the count.
100  */
101 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
102 
103 /*
104  * Valid SCSIRATE values.  (p. 3-17)
105  * Provides a mapping of tranfer periods in ns to the proper value to
106  * stick in the scsixfer reg.
107  */
108 static struct ahc_syncrate ahc_syncrates[] =
109 {
110       /* ultra2    fast/ultra  period     rate */
111 	{ 0x42,      0x000,      9,      "80.0" },
112 	{ 0x03,      0x000,     10,      "40.0" },
113 	{ 0x04,      0x000,     11,      "33.0" },
114 	{ 0x05,      0x100,     12,      "20.0" },
115 	{ 0x06,      0x110,     15,      "16.0" },
116 	{ 0x07,      0x120,     18,      "13.4" },
117 	{ 0x08,      0x000,     25,      "10.0" },
118 	{ 0x19,      0x010,     31,      "8.0"  },
119 	{ 0x1a,      0x020,     37,      "6.67" },
120 	{ 0x1b,      0x030,     43,      "5.7"  },
121 	{ 0x1c,      0x040,     50,      "5.0"  },
122 	{ 0x00,      0x050,     56,      "4.4"  },
123 	{ 0x00,      0x060,     62,      "4.0"  },
124 	{ 0x00,      0x070,     68,      "3.6"  },
125 	{ 0x00,      0x000,      0,      NULL   }
126 };
127 
128 /* Our Sequencer Program */
129 #include "aic7xxx_seq.h"
130 
131 /**************************** Function Declarations ***************************/
132 static struct ahc_tmode_tstate*
133 			ahc_alloc_tstate(struct ahc_softc *ahc,
134 					 u_int scsi_id, char channel);
135 #ifdef AHC_TARGET_MODE
136 static void		ahc_free_tstate(struct ahc_softc *ahc,
137 					u_int scsi_id, char channel, int force);
138 #endif
139 static struct ahc_syncrate*
140 			ahc_devlimited_syncrate(struct ahc_softc *ahc,
141 					        struct ahc_initiator_tinfo *,
142 						u_int *period,
143 						u_int *ppr_options,
144 						role_t role);
145 static void		ahc_update_pending_scbs(struct ahc_softc *ahc);
146 static void		ahc_fetch_devinfo(struct ahc_softc *ahc,
147 					  struct ahc_devinfo *devinfo);
148 static void		ahc_scb_devinfo(struct ahc_softc *ahc,
149 					struct ahc_devinfo *devinfo,
150 					struct scb *scb);
151 static void		ahc_assert_atn(struct ahc_softc *ahc);
152 static void		ahc_setup_initiator_msgout(struct ahc_softc *ahc,
153 						   struct ahc_devinfo *devinfo,
154 						   struct scb *scb);
155 static void		ahc_build_transfer_msg(struct ahc_softc *ahc,
156 					       struct ahc_devinfo *devinfo);
157 static void		ahc_construct_sdtr(struct ahc_softc *ahc,
158 					   struct ahc_devinfo *devinfo,
159 					   u_int period, u_int offset);
160 static void		ahc_construct_wdtr(struct ahc_softc *ahc,
161 					   struct ahc_devinfo *devinfo,
162 					   u_int bus_width);
163 static void		ahc_construct_ppr(struct ahc_softc *ahc,
164 					  struct ahc_devinfo *devinfo,
165 					  u_int period, u_int offset,
166 					  u_int bus_width, u_int ppr_options);
167 static void		ahc_clear_msg_state(struct ahc_softc *ahc);
168 static void		ahc_handle_message_phase(struct ahc_softc *ahc);
169 typedef enum {
170 	AHCMSG_1B,
171 	AHCMSG_2B,
172 	AHCMSG_EXT
173 } ahc_msgtype;
174 static int		ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
175 				     u_int msgval, int full);
176 static int		ahc_parse_msg(struct ahc_softc *ahc,
177 				      struct ahc_devinfo *devinfo);
178 static int		ahc_handle_msg_reject(struct ahc_softc *ahc,
179 					      struct ahc_devinfo *devinfo);
180 static void		ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
181 						struct ahc_devinfo *devinfo);
182 static void		ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
183 static void		ahc_handle_devreset(struct ahc_softc *ahc,
184 					    struct ahc_devinfo *devinfo,
185 					    cam_status status, char *message,
186 					    int verbose_level);
187 #if AHC_TARGET_MODE
188 static void		ahc_setup_target_msgin(struct ahc_softc *ahc,
189 					       struct ahc_devinfo *devinfo,
190 					       struct scb *scb);
191 #endif
192 
193 static bus_dmamap_callback_t	ahc_dmamap_cb;
194 static void			ahc_build_free_scb_list(struct ahc_softc *ahc);
195 static int			ahc_init_scbdata(struct ahc_softc *ahc);
196 static void			ahc_fini_scbdata(struct ahc_softc *ahc);
197 static void		ahc_qinfifo_requeue(struct ahc_softc *ahc,
198 					    struct scb *prev_scb,
199 					    struct scb *scb);
200 static int		ahc_qinfifo_count(struct ahc_softc *ahc);
201 static u_int		ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
202 						   u_int prev, u_int scbptr);
203 static void		ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
204 static u_int		ahc_rem_wscb(struct ahc_softc *ahc,
205 				     u_int scbpos, u_int prev);
206 static int		ahc_abort_scbs(struct ahc_softc *ahc, int target,
207 				       char channel, int lun, u_int tag,
208 				       role_t role, uint32_t status);
209 static void		ahc_reset_current_bus(struct ahc_softc *ahc);
210 #ifdef AHC_DUMP_SEQ
211 static void		ahc_dumpseq(struct ahc_softc *ahc);
212 #endif
213 static void		ahc_loadseq(struct ahc_softc *ahc);
214 static int		ahc_check_patch(struct ahc_softc *ahc,
215 					struct patch **start_patch,
216 					u_int start_instr, u_int *skip_addr);
217 static void		ahc_download_instr(struct ahc_softc *ahc,
218 					   u_int instrptr, uint8_t *dconsts);
219 #ifdef AHC_TARGET_MODE
220 static void		ahc_queue_lstate_event(struct ahc_softc *ahc,
221 					       struct ahc_tmode_lstate *lstate,
222 					       u_int initiator_id,
223 					       u_int event_type,
224 					       u_int event_arg);
225 static void		ahc_update_scsiid(struct ahc_softc *ahc,
226 					  u_int targid_mask);
227 static int		ahc_handle_target_cmd(struct ahc_softc *ahc,
228 					      struct target_cmd *cmd);
229 #endif
230 /************************* Sequencer Execution Control ************************/
231 /*
232  * Restart the sequencer program from address zero
233  */
234 void
235 ahc_restart(struct ahc_softc *ahc)
236 {
237 
238 	ahc_pause(ahc);
239 
240 	ahc_outb(ahc, SCSISIGO, 0);		/* De-assert BSY */
241 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);	/* No message to send */
242 	ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
243 
244 	/*
245 	 * Ensure that the sequencer's idea of TQINPOS
246 	 * matches our own.  The sequencer increments TQINPOS
247 	 * only after it sees a DMA complete and a reset could
248 	 * occur before the increment leaving the kernel to believe
249 	 * the command arrived but the sequencer to not.
250 	 */
251 	ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
252 
253 	/* Always allow reselection */
254 	ahc_outb(ahc, SCSISEQ,
255 		 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
256 	if ((ahc->features & AHC_CMD_CHAN) != 0) {
257 		/* Ensure that no DMA operations are in progress */
258 		ahc_outb(ahc, CCSCBCNT, 0);
259 		ahc_outb(ahc, CCSGCTL, 0);
260 		ahc_outb(ahc, CCSCBCTL, 0);
261 	}
262 	/*
263 	 * If we were in the process of DMA'ing SCB data into
264 	 * an SCB, replace that SCB on the free list.  This prevents
265 	 * an SCB leak.
266 	 */
267 	if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
268 		ahc_add_curscb_to_free_list(ahc);
269 		ahc_outb(ahc, SEQ_FLAGS2,
270 			 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
271 	}
272 	ahc_outb(ahc, MWI_RESIDUAL, 0);
273 	ahc_outb(ahc, SEQCTL, FASTMODE);
274 	ahc_outb(ahc, SEQADDR0, 0);
275 	ahc_outb(ahc, SEQADDR1, 0);
276 	ahc_unpause(ahc);
277 }
278 
279 /************************* Input/Output Queues ********************************/
280 void
281 ahc_run_qoutfifo(struct ahc_softc *ahc)
282 {
283 	struct scb *scb;
284 	u_int  scb_index;
285 
286 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
287 	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
288 
289 		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
290 		if ((ahc->qoutfifonext & 0x03) == 0x03) {
291 			u_int modnext;
292 
293 			/*
294 			 * Clear 32bits of QOUTFIFO at a time
295 			 * so that we don't clobber an incoming
296 			 * byte DMA to the array on architectures
297 			 * that only support 32bit load and store
298 			 * operations.
299 			 */
300 			modnext = ahc->qoutfifonext & ~0x3;
301 			*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
302 			ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
303 					ahc->shared_data_dmamap,
304 					/*offset*/modnext, /*len*/4,
305 					BUS_DMASYNC_PREREAD);
306 		}
307 		ahc->qoutfifonext++;
308 
309 		scb = ahc_lookup_scb(ahc, scb_index);
310 		if (scb == NULL) {
311 			printf("%s: WARNING no command for scb %d "
312 			       "(cmdcmplt)\nQOUTPOS = %d\n",
313 			       ahc_name(ahc), scb_index,
314 			       ahc->qoutfifonext - 1);
315 			continue;
316 		}
317 
318 		/*
319 		 * Save off the residual
320 		 * if there is one.
321 		 */
322 		ahc_update_residual(scb);
323 		ahc_done(ahc, scb);
324 	}
325 }
326 
327 void
328 ahc_run_untagged_queues(struct ahc_softc *ahc)
329 {
330 	int i;
331 
332 	for (i = 0; i < 16; i++)
333 		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
334 }
335 
336 void
337 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
338 {
339 	struct scb *scb;
340 
341 	if (ahc->untagged_queue_lock != 0)
342 		return;
343 
344 	if ((scb = TAILQ_FIRST(queue)) != NULL
345 	 && (scb->flags & SCB_ACTIVE) == 0) {
346 		scb->flags |= SCB_ACTIVE;
347 		ahc_queue_scb(ahc, scb);
348 	}
349 }
350 
351 /************************* Interrupt Handling *********************************/
352 void
353 ahc_handle_brkadrint(struct ahc_softc *ahc)
354 {
355 	/*
356 	 * We upset the sequencer :-(
357 	 * Lookup the error message
358 	 */
359 	int i;
360 	int error;
361 
362 	error = ahc_inb(ahc, ERROR);
363 	for (i = 0; error != 1 && i < num_errors; i++)
364 		error >>= 1;
365 	printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
366 	       ahc_name(ahc), ahc_hard_errors[i].errmesg,
367 	       ahc_inb(ahc, SEQADDR0) |
368 	       (ahc_inb(ahc, SEQADDR1) << 8));
369 
370 	ahc_dump_card_state(ahc);
371 
372 	/* Tell everyone that this HBA is no longer availible */
373 	ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
374 		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
375 		       CAM_NO_HBA);
376 
377 	/* Disable all interrupt sources by resetting the controller */
378 	ahc_shutdown(ahc);
379 }
380 
381 void
382 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
383 {
384 	struct scb *scb;
385 	struct ahc_devinfo devinfo;
386 
387 	ahc_fetch_devinfo(ahc, &devinfo);
388 
389 	/*
390 	 * Clear the upper byte that holds SEQINT status
391 	 * codes and clear the SEQINT bit. We will unpause
392 	 * the sequencer, if appropriate, after servicing
393 	 * the request.
394 	 */
395 	ahc_outb(ahc, CLRINT, CLRSEQINT);
396 	switch (intstat & SEQINT_MASK) {
397 	case BAD_STATUS:
398 	{
399 		u_int  scb_index;
400 		struct hardware_scb *hscb;
401 
402 		/*
403 		 * Set the default return value to 0 (don't
404 		 * send sense).  The sense code will change
405 		 * this if needed.
406 		 */
407 		ahc_outb(ahc, RETURN_1, 0);
408 
409 		/*
410 		 * The sequencer will notify us when a command
411 		 * has an error that would be of interest to
412 		 * the kernel.  This allows us to leave the sequencer
413 		 * running in the common case of command completes
414 		 * without error.  The sequencer will already have
415 		 * dma'd the SCB back up to us, so we can reference
416 		 * the in kernel copy directly.
417 		 */
418 		scb_index = ahc_inb(ahc, SCB_TAG);
419 		scb = ahc_lookup_scb(ahc, scb_index);
420 		if (scb == NULL) {
421 			printf("%s:%c:%d: ahc_intr - referenced scb "
422 			       "not valid during seqint 0x%x scb(%d)\n",
423 			       ahc_name(ahc), devinfo.channel,
424 			       devinfo.target, intstat, scb_index);
425 			ahc_dump_card_state(ahc);
426 			panic("for safety");
427 			goto unpause;
428 		}
429 
430 		hscb = scb->hscb;
431 
432 		/* Don't want to clobber the original sense code */
433 		if ((scb->flags & SCB_SENSE) != 0) {
434 			/*
435 			 * Clear the SCB_SENSE Flag and have
436 			 * the sequencer do a normal command
437 			 * complete.
438 			 */
439 			scb->flags &= ~SCB_SENSE;
440 			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
441 			break;
442 		}
443 		ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
444 		/* Freeze the queue until the client sees the error. */
445 		ahc_freeze_devq(ahc, scb);
446 		ahc_freeze_scb(scb);
447 		ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
448 		switch (hscb->shared_data.status.scsi_status) {
449 		case SCSI_STATUS_OK:
450 			printf("%s: Interrupted for staus of 0???\n",
451 			       ahc_name(ahc));
452 			break;
453 		case SCSI_STATUS_CMD_TERMINATED:
454 		case SCSI_STATUS_CHECK_COND:
455 		{
456 			struct ahc_dma_seg *sg;
457 			struct scsi_sense *sc;
458 			struct ahc_initiator_tinfo *targ_info;
459 			struct ahc_tmode_tstate *tstate;
460 			struct ahc_transinfo *tinfo;
461 #ifdef AHC_DEBUG
462 			if (ahc_debug & AHC_SHOWSENSE) {
463 				ahc_print_path(ahc, scb);
464 				printf("SCB %d: requests Check Status\n",
465 				       scb->hscb->tag);
466 			}
467 #endif
468 
469 			if (ahc_perform_autosense(scb) == 0)
470 				break;
471 
472 			targ_info = ahc_fetch_transinfo(ahc,
473 							devinfo.channel,
474 							devinfo.our_scsiid,
475 							devinfo.target,
476 							&tstate);
477 			tinfo = &targ_info->curr;
478 			sg = scb->sg_list;
479 			sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
480 			/*
481 			 * Save off the residual if there is one.
482 			 */
483 			ahc_update_residual(scb);
484 #ifdef AHC_DEBUG
485 			if (ahc_debug & AHC_SHOWSENSE) {
486 				ahc_print_path(ahc, scb);
487 				printf("Sending Sense\n");
488 			}
489 #endif
490 			sg->addr = ahc_get_sense_bufaddr(ahc, scb);
491 			sg->len = ahc_get_sense_bufsize(ahc, scb);
492 			sg->len |= AHC_DMA_LAST_SEG;
493 
494 			/* Fixup byte order */
495 			sg->addr = ahc_htole32(sg->addr);
496 			sg->len = ahc_htole32(sg->len);
497 
498 			sc->opcode = REQUEST_SENSE;
499 			sc->byte2 = 0;
500 			if (tinfo->protocol_version <= SCSI_REV_2
501 			 && SCB_GET_LUN(scb) < 8)
502 				sc->byte2 = SCB_GET_LUN(scb) << 5;
503 			sc->unused[0] = 0;
504 			sc->unused[1] = 0;
505 			sc->length = sg->len;
506 			sc->control = 0;
507 
508 			/*
509 			 * We can't allow the target to disconnect.
510 			 * This will be an untagged transaction and
511 			 * having the target disconnect will make this
512 			 * transaction indestinguishable from outstanding
513 			 * tagged transactions.
514 			 */
515 			hscb->control = 0;
516 
517 			/*
518 			 * This request sense could be because the
519 			 * the device lost power or in some other
520 			 * way has lost our transfer negotiations.
521 			 * Renegotiate if appropriate.  Unit attention
522 			 * errors will be reported before any data
523 			 * phases occur.
524 			 */
525 			if (ahc_get_residual(scb)
526 			 == ahc_get_transfer_length(scb)) {
527 				ahc_update_neg_request(ahc, &devinfo,
528 						       tstate, targ_info,
529 						       /*force*/TRUE);
530 			}
531 			if (tstate->auto_negotiate & devinfo.target_mask) {
532 				hscb->control |= MK_MESSAGE;
533 				scb->flags &= ~SCB_NEGOTIATE;
534 				scb->flags |= SCB_AUTO_NEGOTIATE;
535 			}
536 			hscb->cdb_len = sizeof(*sc);
537 			hscb->dataptr = sg->addr;
538 			hscb->datacnt = sg->len;
539 			hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
540 			hscb->sgptr = ahc_htole32(hscb->sgptr);
541 			scb->sg_count = 1;
542 			scb->flags |= SCB_SENSE;
543 			ahc_qinfifo_requeue_tail(ahc, scb);
544 			ahc_outb(ahc, RETURN_1, SEND_SENSE);
545 #ifdef __FreeBSD__
546 			/*
547 			 * Ensure we have enough time to actually
548 			 * retrieve the sense.
549 			 */
550 			untimeout(ahc_timeout, (caddr_t)scb,
551 				  scb->io_ctx->ccb_h.timeout_ch);
552 			scb->io_ctx->ccb_h.timeout_ch =
553 			    timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
554 #endif
555 			break;
556 		}
557 		default:
558 			break;
559 		}
560 		break;
561 	}
562 	case NO_MATCH:
563 	{
564 		/* Ensure we don't leave the selection hardware on */
565 		ahc_outb(ahc, SCSISEQ,
566 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
567 
568 		printf("%s:%c:%d: no active SCB for reconnecting "
569 		       "target - issuing BUS DEVICE RESET\n",
570 		       ahc_name(ahc), devinfo.channel, devinfo.target);
571 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
572 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
573 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
574 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
575 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
576 		       "SINDEX == 0x%x\n",
577 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
578 		       ahc_index_busy_tcl(ahc,
579 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
580 				      ahc_inb(ahc, SAVED_LUN))),
581 		       ahc_inb(ahc, SINDEX));
582 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
583 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
584 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
585 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
586 		       ahc_inb(ahc, SCB_CONTROL));
587 		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
588 		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
589 		printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
590 		printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
591 		ahc_dump_card_state(ahc);
592 		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
593 		ahc->msgout_len = 1;
594 		ahc->msgout_index = 0;
595 		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
596 		ahc_outb(ahc, MSG_OUT, HOST_MSG);
597 		ahc_assert_atn(ahc);
598 		break;
599 	}
600 	case SEND_REJECT:
601 	{
602 		u_int rejbyte = ahc_inb(ahc, ACCUM);
603 		printf("%s:%c:%d: Warning - unknown message received from "
604 		       "target (0x%x).  Rejecting\n",
605 		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
606 		break;
607 	}
608 	case NO_IDENT:
609 	{
610 		/*
611 		 * The reconnecting target either did not send an identify
612 		 * message, or did, but we didn't find an SCB to match and
613 		 * before it could respond to our ATN/abort, it hit a dataphase.
614 		 * The only safe thing to do is to blow it away with a bus
615 		 * reset.
616 		 */
617 		int found;
618 
619 		printf("%s:%c:%d: Target did not send an IDENTIFY message. "
620 		       "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
621 		       ahc_name(ahc), devinfo.channel, devinfo.target,
622 		       ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
623 		found = ahc_reset_channel(ahc, devinfo.channel,
624 					  /*initiate reset*/TRUE);
625 		printf("%s: Issued Channel %c Bus Reset. "
626 		       "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
627 		       found);
628 		return;
629 	}
630 	case IGN_WIDE_RES:
631 		ahc_handle_ign_wide_residue(ahc, &devinfo);
632 		break;
633 	case PDATA_REINIT:
634 		ahc_reinitialize_dataptrs(ahc);
635 		break;
636 	case BAD_PHASE:
637 	{
638 		u_int lastphase;
639 
640 		lastphase = ahc_inb(ahc, LASTPHASE);
641 		printf("%s:%c:%d: unknown scsi bus phase %x, "
642 		       "lastphase = 0x%x.  Attempting to continue\n",
643 		       ahc_name(ahc), devinfo.channel, devinfo.target,
644 		       lastphase, ahc_inb(ahc, SCSISIGI));
645 		break;
646 	}
647 	case MISSED_BUSFREE:
648 	{
649 		u_int lastphase;
650 
651 		lastphase = ahc_inb(ahc, LASTPHASE);
652 		printf("%s:%c:%d: Missed busfree. "
653 		       "Lastphase = 0x%x, Curphase = 0x%x\n",
654 		       ahc_name(ahc), devinfo.channel, devinfo.target,
655 		       lastphase, ahc_inb(ahc, SCSISIGI));
656 		ahc_restart(ahc);
657 		return;
658 	}
659 	case HOST_MSG_LOOP:
660 	{
661 		/*
662 		 * The sequencer has encountered a message phase
663 		 * that requires host assistance for completion.
664 		 * While handling the message phase(s), we will be
665 		 * notified by the sequencer after each byte is
666 		 * transfered so we can track bus phase changes.
667 		 *
668 		 * If this is the first time we've seen a HOST_MSG_LOOP
669 		 * interrupt, initialize the state of the host message
670 		 * loop.
671 		 */
672 		if (ahc->msg_type == MSG_TYPE_NONE) {
673 			struct scb *scb;
674 			u_int scb_index;
675 			u_int bus_phase;
676 
677 			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
678 			if (bus_phase != P_MESGIN
679 			 && bus_phase != P_MESGOUT) {
680 				printf("ahc_intr: HOST_MSG_LOOP bad "
681 				       "phase 0x%x\n",
682 				      bus_phase);
683 				/*
684 				 * Probably transitioned to bus free before
685 				 * we got here.  Just punt the message.
686 				 */
687 				ahc_clear_intstat(ahc);
688 				ahc_restart(ahc);
689 				return;
690 			}
691 
692 			scb_index = ahc_inb(ahc, SCB_TAG);
693 			scb = ahc_lookup_scb(ahc, scb_index);
694 			if (devinfo.role == ROLE_INITIATOR) {
695 				if (scb == NULL)
696 					panic("HOST_MSG_LOOP with "
697 					      "invalid SCB %x\n", scb_index);
698 
699 				if (bus_phase == P_MESGOUT)
700 					ahc_setup_initiator_msgout(ahc,
701 								   &devinfo,
702 								   scb);
703 				else {
704 					ahc->msg_type =
705 					    MSG_TYPE_INITIATOR_MSGIN;
706 					ahc->msgin_index = 0;
707 				}
708 			} else {
709 				if (bus_phase == P_MESGOUT) {
710 					ahc->msg_type =
711 					    MSG_TYPE_TARGET_MSGOUT;
712 					ahc->msgin_index = 0;
713 				}
714 #if AHC_TARGET_MODE
715 				else
716 					ahc_setup_target_msgin(ahc,
717 							       &devinfo,
718 							       scb);
719 #endif
720 			}
721 		}
722 
723 		ahc_handle_message_phase(ahc);
724 		break;
725 	}
726 	case PERR_DETECTED:
727 	{
728 		/*
729 		 * If we've cleared the parity error interrupt
730 		 * but the sequencer still believes that SCSIPERR
731 		 * is true, it must be that the parity error is
732 		 * for the currently presented byte on the bus,
733 		 * and we are not in a phase (data-in) where we will
734 		 * eventually ack this byte.  Ack the byte and
735 		 * throw it away in the hope that the target will
736 		 * take us to message out to deliver the appropriate
737 		 * error message.
738 		 */
739 		if ((intstat & SCSIINT) == 0
740 		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
741 
742 			if ((ahc->features & AHC_DT) == 0) {
743 				u_int curphase;
744 
745 				/*
746 				 * The hardware will only let you ack bytes
747 				 * if the expected phase in SCSISIGO matches
748 				 * the current phase.  Make sure this is
749 				 * currently the case.
750 				 */
751 				curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
752 				ahc_outb(ahc, LASTPHASE, curphase);
753 				ahc_outb(ahc, SCSISIGO, curphase);
754 			}
755 			ahc_inb(ahc, SCSIDATL);
756 		}
757 		break;
758 	}
759 	case DATA_OVERRUN:
760 	{
761 		/*
762 		 * When the sequencer detects an overrun, it
763 		 * places the controller in "BITBUCKET" mode
764 		 * and allows the target to complete its transfer.
765 		 * Unfortunately, none of the counters get updated
766 		 * when the controller is in this mode, so we have
767 		 * no way of knowing how large the overrun was.
768 		 */
769 		u_int scbindex = ahc_inb(ahc, SCB_TAG);
770 		u_int lastphase = ahc_inb(ahc, LASTPHASE);
771 		u_int i;
772 
773 		scb = ahc_lookup_scb(ahc, scbindex);
774 		for (i = 0; i < num_phases; i++) {
775 			if (lastphase == ahc_phase_table[i].phase)
776 				break;
777 		}
778 		ahc_print_path(ahc, scb);
779 		printf("data overrun detected %s."
780 		       "  Tag == 0x%x.\n",
781 		       ahc_phase_table[i].phasemsg,
782   		       scb->hscb->tag);
783 		ahc_print_path(ahc, scb);
784 		printf("%s seen Data Phase.  Length = %ld.  NumSGs = %d.\n",
785 		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
786 		       ahc_get_transfer_length(scb), scb->sg_count);
787 		if (scb->sg_count > 0) {
788 			for (i = 0; i < scb->sg_count; i++) {
789 
790 				printf("sg[%d] - Addr 0x%x%x : Length %d\n",
791 				       i,
792 				       (ahc_le32toh(scb->sg_list[i].len) >> 24
793 				        & SG_HIGH_ADDR_BITS),
794 				       ahc_le32toh(scb->sg_list[i].addr),
795 				       ahc_le32toh(scb->sg_list[i].len)
796 				       & AHC_SG_LEN_MASK);
797 			}
798 		}
799 		/*
800 		 * Set this and it will take effect when the
801 		 * target does a command complete.
802 		 */
803 		ahc_freeze_devq(ahc, scb);
804 		ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
805 		ahc_freeze_scb(scb);
806 
807 		if ((ahc->features & AHC_ULTRA2) != 0) {
808 			/*
809 			 * Clear the channel in case we return
810 			 * to data phase later.
811 			 */
812 			ahc_outb(ahc, SXFRCTL0,
813 				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
814 			ahc_outb(ahc, SXFRCTL0,
815 				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
816 		}
817 		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
818 			u_int dscommand1;
819 
820 			/* Ensure HHADDR is 0 for future DMA operations. */
821 			dscommand1 = ahc_inb(ahc, DSCOMMAND1);
822 			ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
823 			ahc_outb(ahc, HADDR, 0);
824 			ahc_outb(ahc, DSCOMMAND1, dscommand1);
825 		}
826 		break;
827 	}
828 	case MKMSG_FAILED:
829 	{
830 		u_int scbindex;
831 
832 		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
833 		       ahc_name(ahc), devinfo.channel, devinfo.target,
834 		       devinfo.lun);
835 		scbindex = ahc_inb(ahc, SCB_TAG);
836 		scb = ahc_lookup_scb(ahc, scbindex);
837 		if (scb != NULL
838 		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
839 			/*
840 			 * Ensure that we didn't put a second instance of this
841 			 * SCB into the QINFIFO.
842 			 */
843 			ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
844 					   SCB_GET_CHANNEL(ahc, scb),
845 					   SCB_GET_LUN(scb), scb->hscb->tag,
846 					   ROLE_INITIATOR, /*status*/0,
847 					   SEARCH_REMOVE);
848 		break;
849 	}
850 	case NO_FREE_SCB:
851 	{
852 		printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
853 		ahc_dump_card_state(ahc);
854 		panic("for safety");
855 		break;
856 	}
857 	case SCB_MISMATCH:
858 	{
859 		u_int scbptr;
860 
861 		scbptr = ahc_inb(ahc, SCBPTR);
862 		printf("Bogus TAG after DMA.  SCBPTR %d, tag %d, our tag %d\n",
863 		       scbptr, ahc_inb(ahc, ARG_1),
864 		       ahc->scb_data->hscbs[scbptr].tag);
865 		ahc_dump_card_state(ahc);
866 		panic("for saftey");
867 		break;
868 	}
869 	case OUT_OF_RANGE:
870 	{
871 		printf("%s: BTT calculation out of range\n", ahc_name(ahc));
872 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
873 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
874 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
875 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
876 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
877 		       "SINDEX == 0x%x\n, A == 0x%x\n",
878 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
879 		       ahc_index_busy_tcl(ahc,
880 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
881 				      ahc_inb(ahc, SAVED_LUN))),
882 		       ahc_inb(ahc, SINDEX),
883 		       ahc_inb(ahc, ACCUM));
884 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
885 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
886 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
887 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
888 		       ahc_inb(ahc, SCB_CONTROL));
889 		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
890 		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
891 		ahc_dump_card_state(ahc);
892 		panic("for safety");
893 		break;
894 	}
895 	default:
896 		printf("ahc_intr: seqint, "
897 		       "intstat == 0x%x, scsisigi = 0x%x\n",
898 		       intstat, ahc_inb(ahc, SCSISIGI));
899 		break;
900 	}
901 unpause:
902 	/*
903 	 *  The sequencer is paused immediately on
904 	 *  a SEQINT, so we should restart it when
905 	 *  we're done.
906 	 */
907 	ahc_unpause(ahc);
908 }
909 
910 void
911 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
912 {
913 	u_int	scb_index;
914 	u_int	status0;
915 	u_int	status;
916 	struct	scb *scb;
917 	char	cur_channel;
918 	char	intr_channel;
919 
920 	/* Make sure the sequencer is in a safe location. */
921 	ahc_clear_critical_section(ahc);
922 
923 	if ((ahc->features & AHC_TWIN) != 0
924 	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
925 		cur_channel = 'B';
926 	else
927 		cur_channel = 'A';
928 	intr_channel = cur_channel;
929 
930 	if ((ahc->features & AHC_ULTRA2) != 0)
931 		status0 = ahc_inb(ahc, SSTAT0) & IOERR;
932 	else
933 		status0 = 0;
934 	status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
935 	if (status == 0 && status0 == 0) {
936 		if ((ahc->features & AHC_TWIN) != 0) {
937 			/* Try the other channel */
938 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
939 			status = ahc_inb(ahc, SSTAT1)
940 			       & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
941 			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
942 		}
943 		if (status == 0) {
944 			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
945 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
946 			ahc_unpause(ahc);
947 			return;
948 		}
949 	}
950 
951 	scb_index = ahc_inb(ahc, SCB_TAG);
952 	scb = ahc_lookup_scb(ahc, scb_index);
953 	if (scb != NULL
954 	 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
955 		scb = NULL;
956 
957 	if ((ahc->features & AHC_ULTRA2) != 0
958 	 && (status0 & IOERR) != 0) {
959 		int now_lvd;
960 
961 		now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
962 		printf("%s: Transceiver State Has Changed to %s mode\n",
963 		       ahc_name(ahc), now_lvd ? "LVD" : "SE");
964 		ahc_outb(ahc, CLRSINT0, CLRIOERR);
965 		/*
966 		 * When transitioning to SE mode, the reset line
967 		 * glitches, triggering an arbitration bug in some
968 		 * Ultra2 controllers.  This bug is cleared when we
969 		 * assert the reset line.  Since a reset glitch has
970 		 * already occurred with this transition and a
971 		 * transceiver state change is handled just like
972 		 * a bus reset anyway, asserting the reset line
973 		 * ourselves is safe.
974 		 */
975 		ahc_reset_channel(ahc, intr_channel,
976 				 /*Initiate Reset*/now_lvd == 0);
977 	} else if ((status & SCSIRSTI) != 0) {
978 		printf("%s: Someone reset channel %c\n",
979 			ahc_name(ahc), intr_channel);
980 		if (intr_channel != cur_channel)
981 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
982 		ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
983 	} else if ((status & SCSIPERR) != 0) {
984 		/*
985 		 * Determine the bus phase and queue an appropriate message.
986 		 * SCSIPERR is latched true as soon as a parity error
987 		 * occurs.  If the sequencer acked the transfer that
988 		 * caused the parity error and the currently presented
989 		 * transfer on the bus has correct parity, SCSIPERR will
990 		 * be cleared by CLRSCSIPERR.  Use this to determine if
991 		 * we should look at the last phase the sequencer recorded,
992 		 * or the current phase presented on the bus.
993 		 */
994 		u_int mesg_out;
995 		u_int curphase;
996 		u_int errorphase;
997 		u_int lastphase;
998 		u_int scsirate;
999 		u_int i;
1000 		u_int sstat2;
1001 
1002 		lastphase = ahc_inb(ahc, LASTPHASE);
1003 		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
1004 		sstat2 = ahc_inb(ahc, SSTAT2);
1005 		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
1006 		/*
1007 		 * For all phases save DATA, the sequencer won't
1008 		 * automatically ack a byte that has a parity error
1009 		 * in it.  So the only way that the current phase
1010 		 * could be 'data-in' is if the parity error is for
1011 		 * an already acked byte in the data phase.  During
1012 		 * synchronous data-in transfers, we may actually
1013 		 * ack bytes before latching the current phase in
1014 		 * LASTPHASE, leading to the discrepancy between
1015 		 * curphase and lastphase.
1016 		 */
1017 		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
1018 		 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
1019 			errorphase = curphase;
1020 		else
1021 			errorphase = lastphase;
1022 
1023 		for (i = 0; i < num_phases; i++) {
1024 			if (errorphase == ahc_phase_table[i].phase)
1025 				break;
1026 		}
1027 		mesg_out = ahc_phase_table[i].mesg_out;
1028 		if (scb != NULL)
1029 			ahc_print_path(ahc, scb);
1030 		else
1031 			printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1032 			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1033 		scsirate = ahc_inb(ahc, SCSIRATE);
1034 		printf("parity error detected %s. "
1035 		       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1036 		       ahc_phase_table[i].phasemsg,
1037 		       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
1038 		       scsirate);
1039 
1040 		if ((ahc->features & AHC_DT) != 0) {
1041 
1042 			if ((sstat2 & CRCVALERR) != 0)
1043 				printf("\tCRC Value Mismatch\n");
1044 			if ((sstat2 & CRCENDERR) != 0)
1045 				printf("\tNo terminal CRC packet recevied\n");
1046 			if ((sstat2 & CRCREQERR) != 0)
1047 				printf("\tIllegal CRC packet request\n");
1048 			if ((sstat2 & DUAL_EDGE_ERR) != 0)
1049 				printf("\tUnexpected %sDT Data Phase\n",
1050 				       (scsirate & SINGLE_EDGE) ? "" : "non-");
1051 		}
1052 
1053 		/*
1054 		 * We've set the hardware to assert ATN if we
1055 		 * get a parity error on "in" phases, so all we
1056 		 * need to do is stuff the message buffer with
1057 		 * the appropriate message.  "In" phases have set
1058 		 * mesg_out to something other than MSG_NOP.
1059 		 */
1060 		if (mesg_out != MSG_NOOP) {
1061 			if (ahc->msg_type != MSG_TYPE_NONE)
1062 				ahc->send_msg_perror = TRUE;
1063 			else
1064 				ahc_outb(ahc, MSG_OUT, mesg_out);
1065 		}
1066 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1067 		ahc_unpause(ahc);
1068 	} else if ((status & SELTO) != 0) {
1069 		u_int scbptr;
1070 
1071 		/* Stop the selection */
1072 		ahc_outb(ahc, SCSISEQ, 0);
1073 
1074 		/* No more pending messages */
1075 		ahc_clear_msg_state(ahc);
1076 
1077 		/* Clear interrupt state */
1078 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1079 		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1080 
1081 		/*
1082 		 * Although the driver does not care about the
1083 		 * 'Selection in Progress' status bit, the busy
1084 		 * LED does.  SELINGO is only cleared by a sucessfull
1085 		 * selection, so we must manually clear it to insure
1086 		 * the LED turns off just incase no future successful
1087 		 * selections occur (e.g. no devices on the bus).
1088 		 */
1089 		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1090 
1091 		scbptr = ahc_inb(ahc, WAITING_SCBH);
1092 		ahc_outb(ahc, SCBPTR, scbptr);
1093 		scb_index = ahc_inb(ahc, SCB_TAG);
1094 
1095 		scb = ahc_lookup_scb(ahc, scb_index);
1096 		if (scb == NULL) {
1097 			printf("%s: ahc_intr - referenced scb not "
1098 			       "valid during SELTO scb(%d, %d)\n",
1099 			       ahc_name(ahc), scbptr, scb_index);
1100 		} else {
1101 			ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1102 			ahc_freeze_devq(ahc, scb);
1103 		}
1104 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1105 		ahc_restart(ahc);
1106 	} else if ((status & BUSFREE) != 0
1107 		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1108 		u_int lastphase;
1109 		u_int saved_scsiid;
1110 		u_int saved_lun;
1111 		u_int target;
1112 		u_int initiator_role_id;
1113 		char channel;
1114 		int printerror;
1115 
1116 		/*
1117 		 * Clear our selection hardware as soon as possible.
1118 		 * We may have an entry in the waiting Q for this target,
1119 		 * that is affected by this busfree and we don't want to
1120 		 * go about selecting the target while we handle the event.
1121 		 */
1122 		ahc_outb(ahc, SCSISEQ,
1123 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1124 
1125 		/*
1126 		 * Disable busfree interrupts and clear the busfree
1127 		 * interrupt status.  We do this here so that several
1128 		 * bus transactions occur prior to clearing the SCSIINT
1129 		 * latch.  It can take a bit for the clearing to take effect.
1130 		 */
1131 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1132 		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1133 
1134 		/*
1135 		 * Look at what phase we were last in.
1136 		 * If its message out, chances are pretty good
1137 		 * that the busfree was in response to one of
1138 		 * our abort requests.
1139 		 */
1140 		lastphase = ahc_inb(ahc, LASTPHASE);
1141 		saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1142 		saved_lun = ahc_inb(ahc, SAVED_LUN);
1143 		target = SCSIID_TARGET(ahc, saved_scsiid);
1144 		initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1145 		channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1146 		printerror = 1;
1147 
1148 		if (lastphase == P_MESGOUT) {
1149 			struct ahc_devinfo devinfo;
1150 			u_int tag;
1151 
1152 			ahc_fetch_devinfo(ahc, &devinfo);
1153 			tag = SCB_LIST_NULL;
1154 			if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1155 			 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1156 				if (ahc->msgout_buf[ahc->msgout_index - 1]
1157 				 == MSG_ABORT_TAG)
1158 					tag = scb->hscb->tag;
1159 				ahc_print_path(ahc, scb);
1160 				printf("SCB %d - Abort%s Completed.\n",
1161 				       scb->hscb->tag, tag == SCB_LIST_NULL ?
1162 				       "" : " Tag");
1163 				ahc_abort_scbs(ahc, target, channel,
1164 					       saved_lun, tag,
1165 					       ROLE_INITIATOR,
1166 					       CAM_REQ_ABORTED);
1167 				printerror = 0;
1168 			} else if (ahc_sent_msg(ahc, AHCMSG_1B,
1169 						MSG_BUS_DEV_RESET, TRUE)) {
1170 				struct ahc_devinfo devinfo;
1171 #ifdef __FreeBSD__
1172 				/*
1173 				 * Don't mark the user's request for this BDR
1174 				 * as completing with CAM_BDR_SENT.  CAM3
1175 				 * specifies CAM_REQ_CMP.
1176 				 */
1177 				if (scb != NULL
1178 				 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1179 				 && ahc_match_scb(ahc, scb, target, channel,
1180 						  CAM_LUN_WILDCARD,
1181 						  SCB_LIST_NULL,
1182 						  ROLE_INITIATOR)) {
1183 					ahc_set_transaction_status(scb, CAM_REQ_CMP);
1184 				}
1185 #endif
1186 				ahc_compile_devinfo(&devinfo,
1187 						    initiator_role_id,
1188 						    target,
1189 						    CAM_LUN_WILDCARD,
1190 						    channel,
1191 						    ROLE_INITIATOR);
1192 				ahc_handle_devreset(ahc, &devinfo,
1193 						    CAM_BDR_SENT,
1194 						    "Bus Device Reset",
1195 						    /*verbose_level*/0);
1196 				printerror = 0;
1197 			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1198 						MSG_EXT_PPR, FALSE)) {
1199 				struct ahc_initiator_tinfo *tinfo;
1200 				struct ahc_tmode_tstate *tstate;
1201 
1202 				/*
1203 				 * PPR Rejected.  Try non-ppr negotiation
1204 				 * and retry command.
1205 				 */
1206 				tinfo = ahc_fetch_transinfo(ahc,
1207 							    devinfo.channel,
1208 							    devinfo.our_scsiid,
1209 							    devinfo.target,
1210 							    &tstate);
1211 				tinfo->curr.transport_version = 2;
1212 				tinfo->goal.transport_version = 2;
1213 				tinfo->goal.ppr_options = 0;
1214 				ahc_qinfifo_requeue_tail(ahc, scb);
1215 				printerror = 0;
1216 			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1217 						MSG_EXT_WDTR, FALSE)
1218 				|| ahc_sent_msg(ahc, AHCMSG_EXT,
1219 						MSG_EXT_SDTR, FALSE)) {
1220 				/*
1221 				 * Negotiation Rejected.  Go-async and
1222 				 * retry command.
1223 				 */
1224 				ahc_set_width(ahc, &devinfo,
1225 					      MSG_EXT_WDTR_BUS_8_BIT,
1226 					      AHC_TRANS_CUR|AHC_TRANS_GOAL,
1227 					      /*paused*/TRUE);
1228 				ahc_set_syncrate(ahc, &devinfo,
1229 						/*syncrate*/NULL,
1230 						/*period*/0, /*offset*/0,
1231 						/*ppr_options*/0,
1232 						AHC_TRANS_CUR|AHC_TRANS_GOAL,
1233 						/*paused*/TRUE);
1234 				ahc_qinfifo_requeue_tail(ahc, scb);
1235 				printerror = 0;
1236 			}
1237 		}
1238 		if (printerror != 0) {
1239 			u_int i;
1240 
1241 			if (scb != NULL) {
1242 				u_int tag;
1243 
1244 				if ((scb->hscb->control & TAG_ENB) != 0)
1245 					tag = scb->hscb->tag;
1246 				else
1247 					tag = SCB_LIST_NULL;
1248 				ahc_print_path(ahc, scb);
1249 				ahc_abort_scbs(ahc, target, channel,
1250 					       SCB_GET_LUN(scb), tag,
1251 					       ROLE_INITIATOR,
1252 					       CAM_UNEXP_BUSFREE);
1253 			} else {
1254 				/*
1255 				 * We had not fully identified this connection,
1256 				 * so we cannot abort anything.
1257 				 */
1258 				printf("%s: ", ahc_name(ahc));
1259 			}
1260 			for (i = 0; i < num_phases; i++) {
1261 				if (lastphase == ahc_phase_table[i].phase)
1262 					break;
1263 			}
1264 			printf("Unexpected busfree %s\n"
1265 			       "SEQADDR == 0x%x\n",
1266 			       ahc_phase_table[i].phasemsg,
1267 			       ahc_inb(ahc, SEQADDR0)
1268 				| (ahc_inb(ahc, SEQADDR1) << 8));
1269 		}
1270 		ahc_clear_msg_state(ahc);
1271 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1272 		ahc_restart(ahc);
1273 	} else {
1274 		printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1275 		       ahc_name(ahc), status);
1276 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1277 	}
1278 }
1279 
1280 #define AHC_MAX_STEPS 2000
1281 void
1282 ahc_clear_critical_section(struct ahc_softc *ahc)
1283 {
1284 	int	stepping;
1285 	int	steps;
1286 	u_int	simode0;
1287 	u_int	simode1;
1288 
1289 	if (ahc->num_critical_sections == 0)
1290 		return;
1291 
1292 	stepping = FALSE;
1293 	steps = 0;
1294 	simode0 = 0;
1295 	simode1 = 0;
1296 	for (;;) {
1297 		struct	cs *cs;
1298 		u_int	seqaddr;
1299 		u_int	i;
1300 
1301 		seqaddr = ahc_inb(ahc, SEQADDR0)
1302 			| (ahc_inb(ahc, SEQADDR1) << 8);
1303 
1304 		/*
1305 		 * Seqaddr represents the next instruction to execute,
1306 		 * so we are really executing the instruction just
1307 		 * before it.
1308 		 */
1309 		if (seqaddr != 0)
1310 			seqaddr -= 1;
1311 		cs = ahc->critical_sections;
1312 		for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1313 
1314 			if (cs->begin < seqaddr && cs->end >= seqaddr)
1315 				break;
1316 		}
1317 
1318 		if (i == ahc->num_critical_sections)
1319 			break;
1320 
1321 		if (steps > AHC_MAX_STEPS) {
1322 			printf("%s: Infinite loop in critical section\n",
1323 			       ahc_name(ahc));
1324 			ahc_dump_card_state(ahc);
1325 			panic("critical section loop");
1326 		}
1327 
1328 		steps++;
1329 		if (stepping == FALSE) {
1330 
1331 			/*
1332 			 * Disable all interrupt sources so that the
1333 			 * sequencer will not be stuck by a pausing
1334 			 * interrupt condition while we attempt to
1335 			 * leave a critical section.
1336 			 */
1337 			simode0 = ahc_inb(ahc, SIMODE0);
1338 			ahc_outb(ahc, SIMODE0, 0);
1339 			simode1 = ahc_inb(ahc, SIMODE1);
1340 			ahc_outb(ahc, SIMODE1, 0);
1341 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1342 			ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP);
1343 			stepping = TRUE;
1344 		}
1345 		ahc_outb(ahc, HCNTRL, ahc->unpause);
1346 		do {
1347 			ahc_delay(200);
1348 		} while (!ahc_is_paused(ahc));
1349 	}
1350 	if (stepping) {
1351 		ahc_outb(ahc, SIMODE0, simode0);
1352 		ahc_outb(ahc, SIMODE1, simode1);
1353 		ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP);
1354 	}
1355 }
1356 
1357 /*
1358  * Clear any pending interrupt status.
1359  */
1360 void
1361 ahc_clear_intstat(struct ahc_softc *ahc)
1362 {
1363 	/* Clear any interrupt conditions this may have caused */
1364 	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1365 				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1366 				CLRREQINIT);
1367 	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1368 	ahc_outb(ahc, CLRINT, CLRSCSIINT);
1369 }
1370 
1371 /**************************** Debugging Routines ******************************/
1372 void
1373 ahc_print_scb(struct scb *scb)
1374 {
1375 	int i;
1376 
1377 	struct hardware_scb *hscb = scb->hscb;
1378 
1379 	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1380 	       (void *)scb,
1381 	       hscb->control,
1382 	       hscb->scsiid,
1383 	       hscb->lun,
1384 	       hscb->cdb_len);
1385 	printf("Shared Data: ");
1386 	for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
1387 		printf("%#02x", hscb->shared_data.cdb[i]);
1388 	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1389 		ahc_le32toh(hscb->dataptr),
1390 		ahc_le32toh(hscb->datacnt),
1391 		ahc_le32toh(hscb->sgptr),
1392 		hscb->tag);
1393 	if (scb->sg_count > 0) {
1394 		for (i = 0; i < scb->sg_count; i++) {
1395 			printf("sg[%d] - Addr 0x%x%x : Length %d\n",
1396 			       i,
1397 			       (ahc_le32toh(scb->sg_list[i].len) >> 24
1398 			        & SG_HIGH_ADDR_BITS),
1399 			       ahc_le32toh(scb->sg_list[i].addr),
1400 			       ahc_le32toh(scb->sg_list[i].len));
1401 		}
1402 	}
1403 }
1404 
1405 /************************* Transfer Negotiation *******************************/
1406 /*
1407  * Allocate per target mode instance (ID we respond to as a target)
1408  * transfer negotiation data structures.
1409  */
1410 static struct ahc_tmode_tstate *
1411 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1412 {
1413 	struct ahc_tmode_tstate *master_tstate;
1414 	struct ahc_tmode_tstate *tstate;
1415 	int i;
1416 
1417 	master_tstate = ahc->enabled_targets[ahc->our_id];
1418 	if (channel == 'B') {
1419 		scsi_id += 8;
1420 		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1421 	}
1422 	if (ahc->enabled_targets[scsi_id] != NULL
1423 	 && ahc->enabled_targets[scsi_id] != master_tstate)
1424 		panic("%s: ahc_alloc_tstate - Target already allocated",
1425 		      ahc_name(ahc));
1426 	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
1427 	if (tstate == NULL)
1428 		return (NULL);
1429 
1430 	/*
1431 	 * If we have allocated a master tstate, copy user settings from
1432 	 * the master tstate (taken from SRAM or the EEPROM) for this
1433 	 * channel, but reset our current and goal settings to async/narrow
1434 	 * until an initiator talks to us.
1435 	 */
1436 	if (master_tstate != NULL) {
1437 		memcpy(tstate, master_tstate, sizeof(*tstate));
1438 		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1439 		tstate->ultraenb = 0;
1440 		for (i = 0; i < 16; i++) {
1441 			memset(&tstate->transinfo[i].curr, 0,
1442 			      sizeof(tstate->transinfo[i].curr));
1443 			memset(&tstate->transinfo[i].goal, 0,
1444 			      sizeof(tstate->transinfo[i].goal));
1445 		}
1446 	} else
1447 		memset(tstate, 0, sizeof(*tstate));
1448 	ahc->enabled_targets[scsi_id] = tstate;
1449 	return (tstate);
1450 }
1451 
1452 #ifdef AHC_TARGET_MODE
1453 /*
1454  * Free per target mode instance (ID we respond to as a target)
1455  * transfer negotiation data structures.
1456  */
1457 static void
1458 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1459 {
1460 	struct ahc_tmode_tstate *tstate;
1461 
1462 	/*
1463 	 * Don't clean up our "master" tstate.
1464 	 * It has our default user settings.
1465 	 */
1466 	if (((channel == 'B' && scsi_id == ahc->our_id_b)
1467 	  || (channel == 'A' && scsi_id == ahc->our_id))
1468 	 && force == FALSE)
1469 		return;
1470 
1471 	if (channel == 'B')
1472 		scsi_id += 8;
1473 	tstate = ahc->enabled_targets[scsi_id];
1474 	if (tstate != NULL)
1475 		free(tstate, M_DEVBUF);
1476 	ahc->enabled_targets[scsi_id] = NULL;
1477 }
1478 #endif
1479 
1480 /*
1481  * Called when we have an active connection to a target on the bus,
1482  * this function finds the nearest syncrate to the input period limited
1483  * by the capabilities of the bus connectivity of and sync settings for
1484  * the target.
1485  */
1486 struct ahc_syncrate *
1487 ahc_devlimited_syncrate(struct ahc_softc *ahc,
1488 			struct ahc_initiator_tinfo *tinfo,
1489 			u_int *period, u_int *ppr_options, role_t role) {
1490 	struct	ahc_transinfo *transinfo;
1491 	u_int	maxsync;
1492 
1493 	if ((ahc->features & AHC_ULTRA2) != 0) {
1494 		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1495 		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1496 			maxsync = AHC_SYNCRATE_DT;
1497 		} else {
1498 			maxsync = AHC_SYNCRATE_ULTRA;
1499 			/* Can't do DT on an SE bus */
1500 			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1501 		}
1502 	} else if ((ahc->features & AHC_ULTRA) != 0) {
1503 		maxsync = AHC_SYNCRATE_ULTRA;
1504 	} else {
1505 		maxsync = AHC_SYNCRATE_FAST;
1506 	}
1507 	/*
1508 	 * Never allow a value higher than our current goal
1509 	 * period otherwise we may allow a target initiated
1510 	 * negotiation to go above the limit as set by the
1511 	 * user.  In the case of an initiator initiated
1512 	 * sync negotiation, we limit based on the user
1513 	 * setting.  This allows the system to still accept
1514 	 * incoming negotiations even if target initiated
1515 	 * negotiation is not performed.
1516 	 */
1517 	if (role == ROLE_TARGET)
1518 		transinfo = &tinfo->user;
1519 	else
1520 		transinfo = &tinfo->goal;
1521 	*ppr_options &= transinfo->ppr_options;
1522 	if (transinfo->period == 0) {
1523 		*period = 0;
1524 		*ppr_options = 0;
1525 		return (NULL);
1526 	}
1527 	*period = MAX(*period, transinfo->period);
1528 	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1529 }
1530 
1531 /*
1532  * Look up the valid period to SCSIRATE conversion in our table.
1533  * Return the period and offset that should be sent to the target
1534  * if this was the beginning of an SDTR.
1535  */
1536 struct ahc_syncrate *
1537 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1538 		  u_int *ppr_options, u_int maxsync)
1539 {
1540 	struct ahc_syncrate *syncrate;
1541 
1542 	if ((ahc->features & AHC_DT) == 0)
1543 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1544 
1545 	/* Skip all DT only entries if DT is not available */
1546 	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1547 	 && maxsync < AHC_SYNCRATE_ULTRA2)
1548 		maxsync = AHC_SYNCRATE_ULTRA2;
1549 
1550 	for (syncrate = &ahc_syncrates[maxsync];
1551 	     syncrate->rate != NULL;
1552 	     syncrate++) {
1553 
1554 		/*
1555 		 * The Ultra2 table doesn't go as low
1556 		 * as for the Fast/Ultra cards.
1557 		 */
1558 		if ((ahc->features & AHC_ULTRA2) != 0
1559 		 && (syncrate->sxfr_u2 == 0))
1560 			break;
1561 
1562 		if (*period <= syncrate->period) {
1563 			/*
1564 			 * When responding to a target that requests
1565 			 * sync, the requested rate may fall between
1566 			 * two rates that we can output, but still be
1567 			 * a rate that we can receive.  Because of this,
1568 			 * we want to respond to the target with
1569 			 * the same rate that it sent to us even
1570 			 * if the period we use to send data to it
1571 			 * is lower.  Only lower the response period
1572 			 * if we must.
1573 			 */
1574 			if (syncrate == &ahc_syncrates[maxsync])
1575 				*period = syncrate->period;
1576 
1577 			/*
1578 			 * At some speeds, we only support
1579 			 * ST transfers.
1580 			 */
1581 		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1582 				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1583 			break;
1584 		}
1585 	}
1586 
1587 	if ((*period == 0)
1588 	 || (syncrate->rate == NULL)
1589 	 || ((ahc->features & AHC_ULTRA2) != 0
1590 	  && (syncrate->sxfr_u2 == 0))) {
1591 		/* Use asynchronous transfers. */
1592 		*period = 0;
1593 		syncrate = NULL;
1594 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1595 	}
1596 	return (syncrate);
1597 }
1598 
1599 /*
1600  * Convert from an entry in our syncrate table to the SCSI equivalent
1601  * sync "period" factor.
1602  */
1603 u_int
1604 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1605 {
1606 	struct ahc_syncrate *syncrate;
1607 
1608 	if ((ahc->features & AHC_ULTRA2) != 0)
1609 		scsirate &= SXFR_ULTRA2;
1610 	else
1611 		scsirate &= SXFR;
1612 
1613 	syncrate = &ahc_syncrates[maxsync];
1614 	while (syncrate->rate != NULL) {
1615 
1616 		if ((ahc->features & AHC_ULTRA2) != 0) {
1617 			if (syncrate->sxfr_u2 == 0)
1618 				break;
1619 			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1620 				return (syncrate->period);
1621 		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1622 				return (syncrate->period);
1623 		}
1624 		syncrate++;
1625 	}
1626 	return (0); /* async */
1627 }
1628 
1629 /*
1630  * Truncate the given synchronous offset to a value the
1631  * current adapter type and syncrate are capable of.
1632  */
1633 void
1634 ahc_validate_offset(struct ahc_softc *ahc,
1635 		    struct ahc_initiator_tinfo *tinfo,
1636 		    struct ahc_syncrate *syncrate,
1637 		    u_int *offset, int wide, role_t role)
1638 {
1639 	u_int maxoffset;
1640 
1641 	/* Limit offset to what we can do */
1642 	if (syncrate == NULL) {
1643 		maxoffset = 0;
1644 	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1645 		maxoffset = MAX_OFFSET_ULTRA2;
1646 	} else {
1647 		if (wide)
1648 			maxoffset = MAX_OFFSET_16BIT;
1649 		else
1650 			maxoffset = MAX_OFFSET_8BIT;
1651 	}
1652 	*offset = MIN(*offset, maxoffset);
1653 	if (tinfo != NULL) {
1654 		if (role == ROLE_TARGET)
1655 			*offset = MIN(*offset, tinfo->user.offset);
1656 		else
1657 			*offset = MIN(*offset, tinfo->goal.offset);
1658 	}
1659 }
1660 
1661 /*
1662  * Truncate the given transfer width parameter to a value the
1663  * current adapter type is capable of.
1664  */
1665 void
1666 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1667 		   u_int *bus_width, role_t role)
1668 {
1669 	switch (*bus_width) {
1670 	default:
1671 		if (ahc->features & AHC_WIDE) {
1672 			/* Respond Wide */
1673 			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1674 			break;
1675 		}
1676 		/* FALLTHROUGH */
1677 	case MSG_EXT_WDTR_BUS_8_BIT:
1678 		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1679 		break;
1680 	}
1681 	if (tinfo != NULL) {
1682 		if (role == ROLE_TARGET)
1683 			*bus_width = MIN(tinfo->user.width, *bus_width);
1684 		else
1685 			*bus_width = MIN(tinfo->goal.width, *bus_width);
1686 	}
1687 }
1688 
1689 /*
1690  * Update the bitmask of targets for which the controller should
1691  * negotiate with at the next convenient oportunity.  This currently
1692  * means the next time we send the initial identify messages for
1693  * a new transaction.
1694  */
1695 int
1696 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1697 		       struct ahc_tmode_tstate *tstate,
1698 		       struct ahc_initiator_tinfo *tinfo, int force)
1699 {
1700 	u_int auto_negotiate_orig;
1701 
1702 	auto_negotiate_orig = tstate->auto_negotiate;
1703 	if (tinfo->curr.period != tinfo->goal.period
1704 	 || tinfo->curr.width != tinfo->goal.width
1705 	 || tinfo->curr.offset != tinfo->goal.offset
1706 	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
1707 	 || (force
1708 	  && (tinfo->goal.period != 0
1709 	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1710 	   || tinfo->goal.ppr_options != 0)))
1711 		tstate->auto_negotiate |= devinfo->target_mask;
1712 	else
1713 		tstate->auto_negotiate &= ~devinfo->target_mask;
1714 
1715 	return (auto_negotiate_orig != tstate->auto_negotiate);
1716 }
1717 
1718 /*
1719  * Update the user/goal/curr tables of synchronous negotiation
1720  * parameters as well as, in the case of a current or active update,
1721  * any data structures on the host controller.  In the case of an
1722  * active update, the specified target is currently talking to us on
1723  * the bus, so the transfer parameter update must take effect
1724  * immediately.
1725  */
1726 void
1727 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1728 		 struct ahc_syncrate *syncrate, u_int period,
1729 		 u_int offset, u_int ppr_options, u_int type, int paused)
1730 {
1731 	struct	ahc_initiator_tinfo *tinfo;
1732 	struct	ahc_tmode_tstate *tstate;
1733 	u_int	old_period;
1734 	u_int	old_offset;
1735 	u_int	old_ppr;
1736 	int	active;
1737 	int	update_needed;
1738 
1739 	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1740 	update_needed = 0;
1741 
1742 	if (syncrate == NULL) {
1743 		period = 0;
1744 		offset = 0;
1745 	}
1746 
1747 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1748 				    devinfo->target, &tstate);
1749 
1750 	if ((type & AHC_TRANS_USER) != 0) {
1751 		tinfo->user.period = period;
1752 		tinfo->user.offset = offset;
1753 		tinfo->user.ppr_options = ppr_options;
1754 	}
1755 
1756 	if ((type & AHC_TRANS_GOAL) != 0) {
1757 		tinfo->goal.period = period;
1758 		tinfo->goal.offset = offset;
1759 		tinfo->goal.ppr_options = ppr_options;
1760 	}
1761 
1762 	old_period = tinfo->curr.period;
1763 	old_offset = tinfo->curr.offset;
1764 	old_ppr	   = tinfo->curr.ppr_options;
1765 
1766 	if ((type & AHC_TRANS_CUR) != 0
1767 	 && (old_period != period
1768 	  || old_offset != offset
1769 	  || old_ppr != ppr_options)) {
1770 		u_int	scsirate;
1771 
1772 		update_needed++;
1773 		scsirate = tinfo->scsirate;
1774 		if ((ahc->features & AHC_ULTRA2) != 0) {
1775 
1776 			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1777 			if (syncrate != NULL) {
1778 				scsirate |= syncrate->sxfr_u2;
1779 				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1780 					scsirate |= ENABLE_CRC;
1781 				else
1782 					scsirate |= SINGLE_EDGE;
1783 			}
1784 		} else {
1785 
1786 			scsirate &= ~(SXFR|SOFS);
1787 			/*
1788 			 * Ensure Ultra mode is set properly for
1789 			 * this target.
1790 			 */
1791 			tstate->ultraenb &= ~devinfo->target_mask;
1792 			if (syncrate != NULL) {
1793 				if (syncrate->sxfr & ULTRA_SXFR) {
1794 					tstate->ultraenb |=
1795 						devinfo->target_mask;
1796 				}
1797 				scsirate |= syncrate->sxfr & SXFR;
1798 				scsirate |= offset & SOFS;
1799 			}
1800 			if (active) {
1801 				u_int sxfrctl0;
1802 
1803 				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1804 				sxfrctl0 &= ~FAST20;
1805 				if (tstate->ultraenb & devinfo->target_mask)
1806 					sxfrctl0 |= FAST20;
1807 				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1808 			}
1809 		}
1810 		if (active) {
1811 			ahc_outb(ahc, SCSIRATE, scsirate);
1812 			if ((ahc->features & AHC_ULTRA2) != 0)
1813 				ahc_outb(ahc, SCSIOFFSET, offset);
1814 		}
1815 
1816 		tinfo->scsirate = scsirate;
1817 		tinfo->curr.period = period;
1818 		tinfo->curr.offset = offset;
1819 		tinfo->curr.ppr_options = ppr_options;
1820 
1821 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1822 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
1823 		if (bootverbose) {
1824 			if (offset != 0) {
1825 				printf("%s: target %d synchronous at %sMHz%s, "
1826 				       "offset = 0x%x\n", ahc_name(ahc),
1827 				       devinfo->target, syncrate->rate,
1828 				       (ppr_options & MSG_EXT_PPR_DT_REQ)
1829 				       ? " DT" : "", offset);
1830 			} else {
1831 				printf("%s: target %d using "
1832 				       "asynchronous transfers\n",
1833 				       ahc_name(ahc), devinfo->target);
1834 			}
1835 		}
1836 	}
1837 
1838 	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
1839 						tinfo, /*force*/FALSE);
1840 
1841 	if (update_needed)
1842 		ahc_update_pending_scbs(ahc);
1843 }
1844 
1845 /*
1846  * Update the user/goal/curr tables of wide negotiation
1847  * parameters as well as, in the case of a current or active update,
1848  * any data structures on the host controller.  In the case of an
1849  * active update, the specified target is currently talking to us on
1850  * the bus, so the transfer parameter update must take effect
1851  * immediately.
1852  */
1853 void
1854 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1855 	      u_int width, u_int type, int paused)
1856 {
1857 	struct	ahc_initiator_tinfo *tinfo;
1858 	struct	ahc_tmode_tstate *tstate;
1859 	u_int	oldwidth;
1860 	int	active;
1861 	int	update_needed;
1862 
1863 	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1864 	update_needed = 0;
1865 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1866 				    devinfo->target, &tstate);
1867 
1868 	if ((type & AHC_TRANS_USER) != 0)
1869 		tinfo->user.width = width;
1870 
1871 	if ((type & AHC_TRANS_GOAL) != 0)
1872 		tinfo->goal.width = width;
1873 
1874 	oldwidth = tinfo->curr.width;
1875 	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1876 		u_int	scsirate;
1877 
1878 		update_needed++;
1879 		scsirate =  tinfo->scsirate;
1880 		scsirate &= ~WIDEXFER;
1881 		if (width == MSG_EXT_WDTR_BUS_16_BIT)
1882 			scsirate |= WIDEXFER;
1883 
1884 		tinfo->scsirate = scsirate;
1885 
1886 		if (active)
1887 			ahc_outb(ahc, SCSIRATE, scsirate);
1888 
1889 		tinfo->curr.width = width;
1890 
1891 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1892 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
1893 		if (bootverbose) {
1894 			printf("%s: target %d using %dbit transfers\n",
1895 			       ahc_name(ahc), devinfo->target,
1896 			       8 * (0x01 << width));
1897 		}
1898 	}
1899 
1900 	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
1901 						tinfo, /*force*/FALSE);
1902 	if (update_needed)
1903 		ahc_update_pending_scbs(ahc);
1904 }
1905 
1906 /*
1907  * Update the current state of tagged queuing for a given target.
1908  */
1909 void
1910 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1911 	     ahc_queue_alg alg)
1912 {
1913  	ahc_platform_set_tags(ahc, devinfo, alg);
1914  	ahc_send_async(ahc, devinfo->channel, devinfo->target,
1915  		       devinfo->lun, AC_TRANSFER_NEG, &alg);
1916 }
1917 
1918 /*
1919  * When the transfer settings for a connection change, update any
1920  * in-transit SCBs to contain the new data so the hardware will
1921  * be set correctly during future (re)selections.
1922  */
1923 static void
1924 ahc_update_pending_scbs(struct ahc_softc *ahc)
1925 {
1926 	struct	scb *pending_scb;
1927 	int	pending_scb_count;
1928 	int	i;
1929 	int	paused;
1930 	u_int	saved_scbptr;
1931 
1932 	/*
1933 	 * Traverse the pending SCB list and ensure that all of the
1934 	 * SCBs there have the proper settings.
1935 	 */
1936 	pending_scb_count = 0;
1937 	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
1938 		struct ahc_devinfo devinfo;
1939 		struct hardware_scb *pending_hscb;
1940 		struct ahc_initiator_tinfo *tinfo;
1941 		struct ahc_tmode_tstate *tstate;
1942 
1943 		ahc_scb_devinfo(ahc, &devinfo, pending_scb);
1944 		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
1945 					    devinfo.our_scsiid,
1946 					    devinfo.target, &tstate);
1947 		pending_hscb = pending_scb->hscb;
1948 		pending_hscb->control &= ~ULTRAENB;
1949 		if ((tstate->ultraenb & devinfo.target_mask) != 0)
1950 			pending_hscb->control |= ULTRAENB;
1951 		pending_hscb->scsirate = tinfo->scsirate;
1952 		pending_hscb->scsioffset = tinfo->curr.offset;
1953 		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
1954 		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
1955 			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
1956 			pending_hscb->control &= ~MK_MESSAGE;
1957 		}
1958 		ahc_sync_scb(ahc, pending_scb,
1959 			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1960 		pending_scb_count++;
1961 	}
1962 
1963 	if (pending_scb_count == 0)
1964 		return;
1965 
1966 	if (ahc_is_paused(ahc)) {
1967 		paused = 1;
1968 	} else {
1969 		paused = 0;
1970 		ahc_pause(ahc);
1971 	}
1972 
1973 	saved_scbptr = ahc_inb(ahc, SCBPTR);
1974 	/* Ensure that the hscbs down on the card match the new information */
1975 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
1976 		struct	hardware_scb *pending_hscb;
1977 		u_int	control;
1978 		u_int	scb_tag;
1979 
1980 		ahc_outb(ahc, SCBPTR, i);
1981 		scb_tag = ahc_inb(ahc, SCB_TAG);
1982 		pending_scb = ahc_lookup_scb(ahc, scb_tag);
1983 		if (pending_scb == NULL)
1984 			continue;
1985 
1986 		pending_hscb = pending_scb->hscb;
1987 		control = ahc_inb(ahc, SCB_CONTROL);
1988 		control &= ~(ULTRAENB|MK_MESSAGE);
1989 		control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
1990 		ahc_outb(ahc, SCB_CONTROL, control);
1991 		ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
1992 		ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
1993 	}
1994 	ahc_outb(ahc, SCBPTR, saved_scbptr);
1995 
1996 	if (paused == 0)
1997 		ahc_unpause(ahc);
1998 }
1999 
2000 /**************************** Pathing Information *****************************/
2001 static void
2002 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2003 {
2004 	u_int	saved_scsiid;
2005 	role_t	role;
2006 	int	our_id;
2007 
2008 	if (ahc_inb(ahc, SSTAT0) & TARGET)
2009 		role = ROLE_TARGET;
2010 	else
2011 		role = ROLE_INITIATOR;
2012 
2013 	if (role == ROLE_TARGET
2014 	 && (ahc->features & AHC_MULTI_TID) != 0
2015 	 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
2016 		/* We were selected, so pull our id from TARGIDIN */
2017 		our_id = ahc_inb(ahc, TARGIDIN) & OID;
2018 	} else if ((ahc->features & AHC_ULTRA2) != 0)
2019 		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
2020 	else
2021 		our_id = ahc_inb(ahc, SCSIID) & OID;
2022 
2023 	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2024 	ahc_compile_devinfo(devinfo,
2025 			    our_id,
2026 			    SCSIID_TARGET(ahc, saved_scsiid),
2027 			    ahc_inb(ahc, SAVED_LUN),
2028 			    SCSIID_CHANNEL(ahc, saved_scsiid),
2029 			    role);
2030 }
2031 
2032 struct ahc_phase_table_entry*
2033 ahc_lookup_phase_entry(int phase)
2034 {
2035 	struct ahc_phase_table_entry *entry;
2036 	struct ahc_phase_table_entry *last_entry;
2037 
2038 	/*
2039 	 * num_phases doesn't include the default entry which
2040 	 * will be returned if the phase doesn't match.
2041 	 */
2042 	last_entry = &ahc_phase_table[num_phases];
2043 	for (entry = ahc_phase_table; entry < last_entry; entry++) {
2044 		if (phase == entry->phase)
2045 			break;
2046 	}
2047 	return (entry);
2048 }
2049 
2050 void
2051 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2052 		    u_int lun, char channel, role_t role)
2053 {
2054 	devinfo->our_scsiid = our_id;
2055 	devinfo->target = target;
2056 	devinfo->lun = lun;
2057 	devinfo->target_offset = target;
2058 	devinfo->channel = channel;
2059 	devinfo->role = role;
2060 	if (channel == 'B')
2061 		devinfo->target_offset += 8;
2062 	devinfo->target_mask = (0x01 << devinfo->target_offset);
2063 }
2064 
2065 static void
2066 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2067 		struct scb *scb)
2068 {
2069 	role_t	role;
2070 	int	our_id;
2071 
2072 	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2073 	role = ROLE_INITIATOR;
2074 	if ((scb->hscb->control & TARGET_SCB) != 0)
2075 		role = ROLE_TARGET;
2076 	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2077 			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2078 }
2079 
2080 
2081 /************************ Message Phase Processing ****************************/
2082 static void
2083 ahc_assert_atn(struct ahc_softc *ahc)
2084 {
2085 	u_int scsisigo;
2086 
2087 	scsisigo = ATNO;
2088 	if ((ahc->features & AHC_DT) == 0)
2089 		scsisigo |= ahc_inb(ahc, SCSISIGI);
2090 	ahc_outb(ahc, SCSISIGO, scsisigo);
2091 }
2092 
2093 /*
2094  * When an initiator transaction with the MK_MESSAGE flag either reconnects
2095  * or enters the initial message out phase, we are interrupted.  Fill our
2096  * outgoing message buffer with the appropriate message and beging handing
2097  * the message phase(s) manually.
2098  */
2099 static void
2100 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2101 			   struct scb *scb)
2102 {
2103 	/*
2104 	 * To facilitate adding multiple messages together,
2105 	 * each routine should increment the index and len
2106 	 * variables instead of setting them explicitly.
2107 	 */
2108 	ahc->msgout_index = 0;
2109 	ahc->msgout_len = 0;
2110 
2111 	if ((scb->flags & SCB_DEVICE_RESET) == 0
2112 	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2113 		u_int identify_msg;
2114 
2115 		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2116 		if ((scb->hscb->control & DISCENB) != 0)
2117 			identify_msg |= MSG_IDENTIFY_DISCFLAG;
2118 		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2119 		ahc->msgout_len++;
2120 
2121 		if ((scb->hscb->control & TAG_ENB) != 0) {
2122 			ahc->msgout_buf[ahc->msgout_index++] =
2123 			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2124 			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2125 			ahc->msgout_len += 2;
2126 		}
2127 	}
2128 
2129 	if (scb->flags & SCB_DEVICE_RESET) {
2130 		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2131 		ahc->msgout_len++;
2132 		ahc_print_path(ahc, scb);
2133 		printf("Bus Device Reset Message Sent\n");
2134 		/*
2135 		 * Clear our selection hardware in advance of
2136 		 * the busfree.  We may have an entry in the waiting
2137 		 * Q for this target, and we don't want to go about
2138 		 * selecting while we handle the busfree and blow it
2139 		 * away.
2140 		 */
2141 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2142 	} else if ((scb->flags & SCB_ABORT) != 0) {
2143 		if ((scb->hscb->control & TAG_ENB) != 0)
2144 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2145 		else
2146 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2147 		ahc->msgout_len++;
2148 		ahc_print_path(ahc, scb);
2149 		printf("Abort%s Message Sent\n",
2150 		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2151 		/*
2152 		 * Clear our selection hardware in advance of
2153 		 * the busfree.  We may have an entry in the waiting
2154 		 * Q for this target, and we don't want to go about
2155 		 * selecting while we handle the busfree and blow it
2156 		 * away.
2157 		 */
2158 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2159 	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2160 		ahc_build_transfer_msg(ahc, devinfo);
2161 	} else {
2162 		printf("ahc_intr: AWAITING_MSG for an SCB that "
2163 		       "does not have a waiting message\n");
2164 		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2165 		       devinfo->target_mask);
2166 		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2167 		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2168 		      ahc_inb(ahc, MSG_OUT), scb->flags);
2169 	}
2170 
2171 	/*
2172 	 * Clear the MK_MESSAGE flag from the SCB so we aren't
2173 	 * asked to send this message again.
2174 	 */
2175 	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2176 	scb->hscb->control &= ~MK_MESSAGE;
2177 	ahc->msgout_index = 0;
2178 	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2179 }
2180 
2181 /*
2182  * Build an appropriate transfer negotiation message for the
2183  * currently active target.
2184  */
2185 static void
2186 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2187 {
2188 	/*
2189 	 * We need to initiate transfer negotiations.
2190 	 * If our current and goal settings are identical,
2191 	 * we want to renegotiate due to a check condition.
2192 	 */
2193 	struct	ahc_initiator_tinfo *tinfo;
2194 	struct	ahc_tmode_tstate *tstate;
2195 	struct	ahc_syncrate *rate;
2196 	int	dowide;
2197 	int	dosync;
2198 	int	doppr;
2199 	int	use_ppr;
2200 	u_int	period;
2201 	u_int	ppr_options;
2202 	u_int	offset;
2203 
2204 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2205 				    devinfo->target, &tstate);
2206 	/*
2207 	 * Filter our period based on the current connection.
2208 	 * If we can't perform DT transfers on this segment (not in LVD
2209 	 * mode for instance), then our decision to issue a PPR message
2210 	 * may change.
2211 	 */
2212 	period = tinfo->goal.period;
2213 	ppr_options = tinfo->goal.ppr_options;
2214 	/* Target initiated PPR is not allowed in the SCSI spec */
2215 	if (devinfo->role == ROLE_TARGET)
2216 		ppr_options = 0;
2217 	rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2218 				       &ppr_options, devinfo->role);
2219 	dowide = tinfo->curr.width != tinfo->goal.width;
2220 	dosync = tinfo->curr.period != period;
2221 	doppr = tinfo->curr.ppr_options != ppr_options;
2222 
2223 	if (!dowide && !dosync && !doppr) {
2224 		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2225 		dosync = tinfo->goal.period != 0;
2226 		doppr = tinfo->goal.ppr_options != 0;
2227 	}
2228 
2229 	if (!dowide && !dosync && !doppr) {
2230 		panic("ahc_intr: AWAITING_MSG for negotiation, "
2231 		      "but no negotiation needed\n");
2232 	}
2233 
2234 	use_ppr = (tinfo->curr.transport_version >= 3) || doppr;
2235 	/* Target initiated PPR is not allowed in the SCSI spec */
2236 	if (devinfo->role == ROLE_TARGET)
2237 		use_ppr = 0;
2238 
2239 	/*
2240 	 * Both the PPR message and SDTR message require the
2241 	 * goal syncrate to be limited to what the target device
2242 	 * is capable of handling (based on whether an LVD->SE
2243 	 * expander is on the bus), so combine these two cases.
2244 	 * Regardless, guarantee that if we are using WDTR and SDTR
2245 	 * messages that WDTR comes first.
2246 	 */
2247 	if (use_ppr || (dosync && !dowide)) {
2248 
2249 		offset = tinfo->goal.offset;
2250 		ahc_validate_offset(ahc, tinfo, rate, &offset,
2251 				    use_ppr ? tinfo->goal.width
2252 					    : tinfo->curr.width,
2253 				    devinfo->role);
2254 		if (use_ppr) {
2255 			ahc_construct_ppr(ahc, devinfo, period, offset,
2256 					  tinfo->goal.width, ppr_options);
2257 		} else {
2258 			ahc_construct_sdtr(ahc, devinfo, period, offset);
2259 		}
2260 	} else {
2261 		ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2262 	}
2263 }
2264 
2265 /*
2266  * Build a synchronous negotiation message in our message
2267  * buffer based on the input parameters.
2268  */
2269 static void
2270 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2271 		   u_int period, u_int offset)
2272 {
2273 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2274 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2275 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2276 	ahc->msgout_buf[ahc->msgout_index++] = period;
2277 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2278 	ahc->msgout_len += 5;
2279 	if (bootverbose) {
2280 		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2281 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2282 		       devinfo->lun, period, offset);
2283 	}
2284 }
2285 
2286 /*
2287  * Build a wide negotiateion message in our message
2288  * buffer based on the input parameters.
2289  */
2290 static void
2291 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2292 		   u_int bus_width)
2293 {
2294 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2295 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2296 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2297 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2298 	ahc->msgout_len += 4;
2299 	if (bootverbose) {
2300 		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2301 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2302 		       devinfo->lun, bus_width);
2303 	}
2304 }
2305 
2306 /*
2307  * Build a parallel protocol request message in our message
2308  * buffer based on the input parameters.
2309  */
2310 static void
2311 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2312 		  u_int period, u_int offset, u_int bus_width,
2313 		  u_int ppr_options)
2314 {
2315 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2316 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2317 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2318 	ahc->msgout_buf[ahc->msgout_index++] = period;
2319 	ahc->msgout_buf[ahc->msgout_index++] = 0;
2320 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2321 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2322 	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2323 	ahc->msgout_len += 8;
2324 	if (bootverbose) {
2325 		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2326 		       "offset %x, ppr_options %x\n", ahc_name(ahc),
2327 		       devinfo->channel, devinfo->target, devinfo->lun,
2328 		       bus_width, period, offset, ppr_options);
2329 	}
2330 }
2331 
2332 /*
2333  * Clear any active message state.
2334  */
2335 static void
2336 ahc_clear_msg_state(struct ahc_softc *ahc)
2337 {
2338 	ahc->msgout_len = 0;
2339 	ahc->msgin_index = 0;
2340 	ahc->msg_type = MSG_TYPE_NONE;
2341 	if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2342 		/*
2343 		 * The target didn't care to respond to our
2344 		 * message request, so clear ATN.
2345 		 */
2346 		ahc_outb(ahc, CLRSINT1, CLRATNO);
2347 	}
2348 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2349 }
2350 
2351 /*
2352  * Manual message loop handler.
2353  */
2354 static void
2355 ahc_handle_message_phase(struct ahc_softc *ahc)
2356 {
2357 	struct	ahc_devinfo devinfo;
2358 	u_int	bus_phase;
2359 	int	end_session;
2360 
2361 	ahc_fetch_devinfo(ahc, &devinfo);
2362 	end_session = FALSE;
2363 	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2364 
2365 reswitch:
2366 	switch (ahc->msg_type) {
2367 	case MSG_TYPE_INITIATOR_MSGOUT:
2368 	{
2369 		int lastbyte;
2370 		int phasemis;
2371 		int msgdone;
2372 
2373 		if (ahc->msgout_len == 0)
2374 			panic("HOST_MSG_LOOP interrupt with no active message");
2375 
2376 		phasemis = bus_phase != P_MESGOUT;
2377 		if (phasemis) {
2378 			if (bus_phase == P_MESGIN) {
2379 				/*
2380 				 * Change gears and see if
2381 				 * this messages is of interest to
2382 				 * us or should be passed back to
2383 				 * the sequencer.
2384 				 */
2385 				ahc_outb(ahc, CLRSINT1, CLRATNO);
2386 				ahc->send_msg_perror = FALSE;
2387 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2388 				ahc->msgin_index = 0;
2389 				goto reswitch;
2390 			}
2391 			end_session = TRUE;
2392 			break;
2393 		}
2394 
2395 		if (ahc->send_msg_perror) {
2396 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2397 			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2398 			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2399 			break;
2400 		}
2401 
2402 		msgdone	= ahc->msgout_index == ahc->msgout_len;
2403 		if (msgdone) {
2404 			/*
2405 			 * The target has requested a retry.
2406 			 * Re-assert ATN, reset our message index to
2407 			 * 0, and try again.
2408 			 */
2409 			ahc->msgout_index = 0;
2410 			ahc_assert_atn(ahc);
2411 		}
2412 
2413 		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2414 		if (lastbyte) {
2415 			/* Last byte is signified by dropping ATN */
2416 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2417 		}
2418 
2419 		/*
2420 		 * Clear our interrupt status and present
2421 		 * the next byte on the bus.
2422 		 */
2423 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2424 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2425 		break;
2426 	}
2427 	case MSG_TYPE_INITIATOR_MSGIN:
2428 	{
2429 		int phasemis;
2430 		int message_done;
2431 
2432 		phasemis = bus_phase != P_MESGIN;
2433 
2434 		if (phasemis) {
2435 			ahc->msgin_index = 0;
2436 			if (bus_phase == P_MESGOUT
2437 			 && (ahc->send_msg_perror == TRUE
2438 			  || (ahc->msgout_len != 0
2439 			   && ahc->msgout_index == 0))) {
2440 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2441 				goto reswitch;
2442 			}
2443 			end_session = TRUE;
2444 			break;
2445 		}
2446 
2447 		/* Pull the byte in without acking it */
2448 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2449 
2450 		message_done = ahc_parse_msg(ahc, &devinfo);
2451 
2452 		if (message_done) {
2453 			/*
2454 			 * Clear our incoming message buffer in case there
2455 			 * is another message following this one.
2456 			 */
2457 			ahc->msgin_index = 0;
2458 
2459 			/*
2460 			 * If this message illicited a response,
2461 			 * assert ATN so the target takes us to the
2462 			 * message out phase.
2463 			 */
2464 			if (ahc->msgout_len != 0)
2465 				ahc_assert_atn(ahc);
2466 		} else
2467 			ahc->msgin_index++;
2468 
2469 		/* Ack the byte */
2470 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2471 		ahc_inb(ahc, SCSIDATL);
2472 		break;
2473 	}
2474 	case MSG_TYPE_TARGET_MSGIN:
2475 	{
2476 		int msgdone;
2477 		int msgout_request;
2478 
2479 		if (ahc->msgout_len == 0)
2480 			panic("Target MSGIN with no active message");
2481 
2482 		/*
2483 		 * If we interrupted a mesgout session, the initiator
2484 		 * will not know this until our first REQ.  So, we
2485 		 * only honor mesgout requests after we've sent our
2486 		 * first byte.
2487 		 */
2488 		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2489 		 && ahc->msgout_index > 0)
2490 			msgout_request = TRUE;
2491 		else
2492 			msgout_request = FALSE;
2493 
2494 		if (msgout_request) {
2495 
2496 			/*
2497 			 * Change gears and see if
2498 			 * this messages is of interest to
2499 			 * us or should be passed back to
2500 			 * the sequencer.
2501 			 */
2502 			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2503 			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2504 			ahc->msgin_index = 0;
2505 			/* Dummy read to REQ for first byte */
2506 			ahc_inb(ahc, SCSIDATL);
2507 			ahc_outb(ahc, SXFRCTL0,
2508 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2509 			break;
2510 		}
2511 
2512 		msgdone = ahc->msgout_index == ahc->msgout_len;
2513 		if (msgdone) {
2514 			ahc_outb(ahc, SXFRCTL0,
2515 				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2516 			end_session = TRUE;
2517 			break;
2518 		}
2519 
2520 		/*
2521 		 * Present the next byte on the bus.
2522 		 */
2523 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2524 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2525 		break;
2526 	}
2527 	case MSG_TYPE_TARGET_MSGOUT:
2528 	{
2529 		int lastbyte;
2530 		int msgdone;
2531 
2532 		/*
2533 		 * The initiator signals that this is
2534 		 * the last byte by dropping ATN.
2535 		 */
2536 		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2537 
2538 		/*
2539 		 * Read the latched byte, but turn off SPIOEN first
2540 		 * so that we don't inadvertantly cause a REQ for the
2541 		 * next byte.
2542 		 */
2543 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2544 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2545 		msgdone = ahc_parse_msg(ahc, &devinfo);
2546 		if (msgdone == MSGLOOP_TERMINATED) {
2547 			/*
2548 			 * The message is *really* done in that it caused
2549 			 * us to go to bus free.  The sequencer has already
2550 			 * been reset at this point, so pull the ejection
2551 			 * handle.
2552 			 */
2553 			return;
2554 		}
2555 
2556 		ahc->msgin_index++;
2557 
2558 		/*
2559 		 * XXX Read spec about initiator dropping ATN too soon
2560 		 *     and use msgdone to detect it.
2561 		 */
2562 		if (msgdone == MSGLOOP_MSGCOMPLETE) {
2563 			ahc->msgin_index = 0;
2564 
2565 			/*
2566 			 * If this message illicited a response, transition
2567 			 * to the Message in phase and send it.
2568 			 */
2569 			if (ahc->msgout_len != 0) {
2570 				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2571 				ahc_outb(ahc, SXFRCTL0,
2572 					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2573 				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2574 				ahc->msgin_index = 0;
2575 				break;
2576 			}
2577 		}
2578 
2579 		if (lastbyte)
2580 			end_session = TRUE;
2581 		else {
2582 			/* Ask for the next byte. */
2583 			ahc_outb(ahc, SXFRCTL0,
2584 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2585 		}
2586 
2587 		break;
2588 	}
2589 	default:
2590 		panic("Unknown REQINIT message type");
2591 	}
2592 
2593 	if (end_session) {
2594 		ahc_clear_msg_state(ahc);
2595 		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2596 	} else
2597 		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2598 }
2599 
2600 /*
2601  * See if we sent a particular extended message to the target.
2602  * If "full" is true, return true only if the target saw the full
2603  * message.  If "full" is false, return true if the target saw at
2604  * least the first byte of the message.
2605  */
2606 static int
2607 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
2608 {
2609 	int found;
2610 	u_int index;
2611 
2612 	found = FALSE;
2613 	index = 0;
2614 
2615 	while (index < ahc->msgout_len) {
2616 		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2617 			u_int end_index;
2618 
2619 			end_index = index + 1 + ahc->msgout_buf[index + 1];
2620 			if (ahc->msgout_buf[index+2] == msgval
2621 			 && type == AHCMSG_EXT) {
2622 
2623 				if (full) {
2624 					if (ahc->msgout_index > end_index)
2625 						found = TRUE;
2626 				} else if (ahc->msgout_index > index)
2627 					found = TRUE;
2628 			}
2629 			index = end_index;
2630 		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
2631 			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2632 
2633 			/* Skip tag type and tag id or residue param*/
2634 			index += 2;
2635 		} else {
2636 			/* Single byte message */
2637 			if (type == AHCMSG_1B
2638 			 && ahc->msgout_buf[index] == msgval
2639 			 && ahc->msgout_index > index)
2640 				found = TRUE;
2641 			index++;
2642 		}
2643 
2644 		if (found)
2645 			break;
2646 	}
2647 	return (found);
2648 }
2649 
2650 /*
2651  * Wait for a complete incoming message, parse it, and respond accordingly.
2652  */
2653 static int
2654 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2655 {
2656 	struct	ahc_initiator_tinfo *tinfo;
2657 	struct	ahc_tmode_tstate *tstate;
2658 	int	reject;
2659 	int	done;
2660 	int	response;
2661 	u_int	targ_scsirate;
2662 
2663 	done = MSGLOOP_IN_PROG;
2664 	response = FALSE;
2665 	reject = FALSE;
2666 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2667 				    devinfo->target, &tstate);
2668 	targ_scsirate = tinfo->scsirate;
2669 
2670 	/*
2671 	 * Parse as much of the message as is availible,
2672 	 * rejecting it if we don't support it.  When
2673 	 * the entire message is availible and has been
2674 	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
2675 	 * that we have parsed an entire message.
2676 	 *
2677 	 * In the case of extended messages, we accept the length
2678 	 * byte outright and perform more checking once we know the
2679 	 * extended message type.
2680 	 */
2681 	switch (ahc->msgin_buf[0]) {
2682 	case MSG_MESSAGE_REJECT:
2683 		response = ahc_handle_msg_reject(ahc, devinfo);
2684 		/* FALLTHROUGH */
2685 	case MSG_NOOP:
2686 		done = MSGLOOP_MSGCOMPLETE;
2687 		break;
2688 	case MSG_EXTENDED:
2689 	{
2690 		/* Wait for enough of the message to begin validation */
2691 		if (ahc->msgin_index < 2)
2692 			break;
2693 		switch (ahc->msgin_buf[2]) {
2694 		case MSG_EXT_SDTR:
2695 		{
2696 			struct	 ahc_syncrate *syncrate;
2697 			u_int	 period;
2698 			u_int	 ppr_options;
2699 			u_int	 offset;
2700 			u_int	 saved_offset;
2701 
2702 			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
2703 				reject = TRUE;
2704 				break;
2705 			}
2706 
2707 			/*
2708 			 * Wait until we have both args before validating
2709 			 * and acting on this message.
2710 			 *
2711 			 * Add one to MSG_EXT_SDTR_LEN to account for
2712 			 * the extended message preamble.
2713 			 */
2714 			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
2715 				break;
2716 
2717 			period = ahc->msgin_buf[3];
2718 			ppr_options = 0;
2719 			saved_offset = offset = ahc->msgin_buf[4];
2720 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2721 							   &ppr_options,
2722 							   devinfo->role);
2723 			ahc_validate_offset(ahc, tinfo, syncrate, &offset,
2724 					    targ_scsirate & WIDEXFER,
2725 					    devinfo->role);
2726 			if (bootverbose) {
2727 				printf("(%s:%c:%d:%d): Received "
2728 				       "SDTR period %x, offset %x\n\t"
2729 				       "Filtered to period %x, offset %x\n",
2730 				       ahc_name(ahc), devinfo->channel,
2731 				       devinfo->target, devinfo->lun,
2732 				       ahc->msgin_buf[3], saved_offset,
2733 				       period, offset);
2734 			}
2735 			ahc_set_syncrate(ahc, devinfo,
2736 					 syncrate, period,
2737 					 offset, ppr_options,
2738 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2739 					 /*paused*/TRUE);
2740 
2741 			/*
2742 			 * See if we initiated Sync Negotiation
2743 			 * and didn't have to fall down to async
2744 			 * transfers.
2745 			 */
2746 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
2747 				/* We started it */
2748 				if (saved_offset != offset) {
2749 					/* Went too low - force async */
2750 					reject = TRUE;
2751 				}
2752 			} else {
2753 				/*
2754 				 * Send our own SDTR in reply
2755 				 */
2756 				if (bootverbose
2757 				 && devinfo->role == ROLE_INITIATOR) {
2758 					printf("(%s:%c:%d:%d): Target "
2759 					       "Initiated SDTR\n",
2760 					       ahc_name(ahc), devinfo->channel,
2761 					       devinfo->target, devinfo->lun);
2762 				}
2763 				ahc->msgout_index = 0;
2764 				ahc->msgout_len = 0;
2765 				ahc_construct_sdtr(ahc, devinfo,
2766 						   period, offset);
2767 				ahc->msgout_index = 0;
2768 				response = TRUE;
2769 			}
2770 			done = MSGLOOP_MSGCOMPLETE;
2771 			break;
2772 		}
2773 		case MSG_EXT_WDTR:
2774 		{
2775 			u_int bus_width;
2776 			u_int saved_width;
2777 			u_int sending_reply;
2778 
2779 			sending_reply = FALSE;
2780 			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
2781 				reject = TRUE;
2782 				break;
2783 			}
2784 
2785 			/*
2786 			 * Wait until we have our arg before validating
2787 			 * and acting on this message.
2788 			 *
2789 			 * Add one to MSG_EXT_WDTR_LEN to account for
2790 			 * the extended message preamble.
2791 			 */
2792 			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
2793 				break;
2794 
2795 			bus_width = ahc->msgin_buf[3];
2796 			saved_width = bus_width;
2797 			ahc_validate_width(ahc, tinfo, &bus_width,
2798 					   devinfo->role);
2799 			if (bootverbose) {
2800 				printf("(%s:%c:%d:%d): Received WDTR "
2801 				       "%x filtered to %x\n",
2802 				       ahc_name(ahc), devinfo->channel,
2803 				       devinfo->target, devinfo->lun,
2804 				       saved_width, bus_width);
2805 			}
2806 
2807 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
2808 				/*
2809 				 * Don't send a WDTR back to the
2810 				 * target, since we asked first.
2811 				 * If the width went higher than our
2812 				 * request, reject it.
2813 				 */
2814 				if (saved_width > bus_width) {
2815 					reject = TRUE;
2816 					printf("(%s:%c:%d:%d): requested %dBit "
2817 					       "transfers.  Rejecting...\n",
2818 					       ahc_name(ahc), devinfo->channel,
2819 					       devinfo->target, devinfo->lun,
2820 					       8 * (0x01 << bus_width));
2821 					bus_width = 0;
2822 				}
2823 			} else {
2824 				/*
2825 				 * Send our own WDTR in reply
2826 				 */
2827 				if (bootverbose
2828 				 && devinfo->role == ROLE_INITIATOR) {
2829 					printf("(%s:%c:%d:%d): Target "
2830 					       "Initiated WDTR\n",
2831 					       ahc_name(ahc), devinfo->channel,
2832 					       devinfo->target, devinfo->lun);
2833 				}
2834 				ahc->msgout_index = 0;
2835 				ahc->msgout_len = 0;
2836 				ahc_construct_wdtr(ahc, devinfo, bus_width);
2837 				ahc->msgout_index = 0;
2838 				response = TRUE;
2839 				sending_reply = TRUE;
2840 			}
2841 			ahc_set_width(ahc, devinfo, bus_width,
2842 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2843 				      /*paused*/TRUE);
2844 			/* After a wide message, we are async */
2845 			ahc_set_syncrate(ahc, devinfo,
2846 					 /*syncrate*/NULL, /*period*/0,
2847 					 /*offset*/0, /*ppr_options*/0,
2848 					 AHC_TRANS_ACTIVE, /*paused*/TRUE);
2849 			if (sending_reply == FALSE && reject == FALSE) {
2850 
2851 				if (tinfo->goal.period) {
2852 					ahc->msgout_index = 0;
2853 					ahc->msgout_len = 0;
2854 					ahc_build_transfer_msg(ahc, devinfo);
2855 					ahc->msgout_index = 0;
2856 					response = TRUE;
2857 				}
2858 			}
2859 			done = MSGLOOP_MSGCOMPLETE;
2860 			break;
2861 		}
2862 		case MSG_EXT_PPR:
2863 		{
2864 			struct	ahc_syncrate *syncrate;
2865 			u_int	period;
2866 			u_int	offset;
2867 			u_int	bus_width;
2868 			u_int	ppr_options;
2869 			u_int	saved_width;
2870 			u_int	saved_offset;
2871 			u_int	saved_ppr_options;
2872 
2873 			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
2874 				reject = TRUE;
2875 				break;
2876 			}
2877 
2878 			/*
2879 			 * Wait until we have all args before validating
2880 			 * and acting on this message.
2881 			 *
2882 			 * Add one to MSG_EXT_PPR_LEN to account for
2883 			 * the extended message preamble.
2884 			 */
2885 			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
2886 				break;
2887 
2888 			period = ahc->msgin_buf[3];
2889 			offset = ahc->msgin_buf[5];
2890 			bus_width = ahc->msgin_buf[6];
2891 			saved_width = bus_width;
2892 			ppr_options = ahc->msgin_buf[7];
2893 			/*
2894 			 * According to the spec, a DT only
2895 			 * period factor with no DT option
2896 			 * set implies async.
2897 			 */
2898 			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2899 			 && period == 9)
2900 				offset = 0;
2901 			saved_ppr_options = ppr_options;
2902 			saved_offset = offset;
2903 
2904 			/*
2905 			 * Mask out any options we don't support
2906 			 * on any controller.  Transfer options are
2907 			 * only available if we are negotiating wide.
2908 			 */
2909 			ppr_options &= MSG_EXT_PPR_DT_REQ;
2910 			if (bus_width == 0)
2911 				ppr_options = 0;
2912 
2913 			ahc_validate_width(ahc, tinfo, &bus_width,
2914 					   devinfo->role);
2915 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2916 							   &ppr_options,
2917 							   devinfo->role);
2918 			ahc_validate_offset(ahc, tinfo, syncrate,
2919 					    &offset, bus_width,
2920 					    devinfo->role);
2921 
2922 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
2923 				/*
2924 				 * If we are unable to do any of the
2925 				 * requested options (we went too low),
2926 				 * then we'll have to reject the message.
2927 				 */
2928 				if (saved_width > bus_width
2929 				 || saved_offset != offset
2930 				 || saved_ppr_options != ppr_options) {
2931 					reject = TRUE;
2932 					period = 0;
2933 					offset = 0;
2934 					bus_width = 0;
2935 					ppr_options = 0;
2936 					syncrate = NULL;
2937 				}
2938 			} else {
2939 				if (devinfo->role != ROLE_TARGET)
2940 					printf("(%s:%c:%d:%d): Target "
2941 					       "Initiated PPR\n",
2942 					       ahc_name(ahc), devinfo->channel,
2943 					       devinfo->target, devinfo->lun);
2944 				else
2945 					printf("(%s:%c:%d:%d): Initiator "
2946 					       "Initiated PPR\n",
2947 					       ahc_name(ahc), devinfo->channel,
2948 					       devinfo->target, devinfo->lun);
2949 				ahc->msgout_index = 0;
2950 				ahc->msgout_len = 0;
2951 				ahc_construct_ppr(ahc, devinfo, period, offset,
2952 						  bus_width, ppr_options);
2953 				ahc->msgout_index = 0;
2954 				response = TRUE;
2955 			}
2956 			if (bootverbose) {
2957 				printf("(%s:%c:%d:%d): Received PPR width %x, "
2958 				       "period %x, offset %x,options %x\n"
2959 				       "\tFiltered to width %x, period %x, "
2960 				       "offset %x, options %x\n",
2961 				       ahc_name(ahc), devinfo->channel,
2962 				       devinfo->target, devinfo->lun,
2963 				       saved_width, ahc->msgin_buf[3],
2964 				       saved_offset, saved_ppr_options,
2965 				       bus_width, period, offset, ppr_options);
2966 			}
2967 			ahc_set_width(ahc, devinfo, bus_width,
2968 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2969 				      /*paused*/TRUE);
2970 			ahc_set_syncrate(ahc, devinfo,
2971 					 syncrate, period,
2972 					 offset, ppr_options,
2973 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2974 					 /*paused*/TRUE);
2975 			done = MSGLOOP_MSGCOMPLETE;
2976 			break;
2977 		}
2978 		default:
2979 			/* Unknown extended message.  Reject it. */
2980 			reject = TRUE;
2981 			break;
2982 		}
2983 		break;
2984 	}
2985 	case MSG_BUS_DEV_RESET:
2986 		ahc_handle_devreset(ahc, devinfo,
2987 				    CAM_BDR_SENT,
2988 				    "Bus Device Reset Received",
2989 				    /*verbose_level*/0);
2990 		ahc_restart(ahc);
2991 		done = MSGLOOP_TERMINATED;
2992 		break;
2993 	case MSG_ABORT_TAG:
2994 	case MSG_ABORT:
2995 	case MSG_CLEAR_QUEUE:
2996 #ifdef AHC_TARGET_MODE
2997 		/* Target mode messages */
2998 		if (devinfo->role != ROLE_TARGET) {
2999 			reject = TRUE;
3000 			break;
3001 		}
3002 		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3003 			       devinfo->lun,
3004 			       ahc->msgin_buf[0] == MSG_ABORT_TAG
3005 						  ? SCB_LIST_NULL
3006 						  : ahc_inb(ahc, INITIATOR_TAG),
3007 			       ROLE_TARGET, CAM_REQ_ABORTED);
3008 
3009 		tstate = ahc->enabled_targets[devinfo->our_scsiid];
3010 		if (tstate != NULL) {
3011 			struct ahc_tmode_lstate* lstate;
3012 
3013 			lstate = tstate->enabled_luns[devinfo->lun];
3014 			if (lstate != NULL) {
3015 				ahc_queue_lstate_event(ahc, lstate,
3016 						       devinfo->our_scsiid,
3017 						       ahc->msgin_buf[0],
3018 						       /*arg*/0);
3019 				ahc_send_lstate_events(ahc, lstate);
3020 			}
3021 		}
3022 		done = MSGLOOP_MSGCOMPLETE;
3023 		break;
3024 #endif
3025 	case MSG_TERM_IO_PROC:
3026 	default:
3027 		reject = TRUE;
3028 		break;
3029 	}
3030 
3031 	if (reject) {
3032 		/*
3033 		 * Setup to reject the message.
3034 		 */
3035 		ahc->msgout_index = 0;
3036 		ahc->msgout_len = 1;
3037 		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
3038 		done = MSGLOOP_MSGCOMPLETE;
3039 		response = TRUE;
3040 	}
3041 
3042 	if (done != MSGLOOP_IN_PROG && !response)
3043 		/* Clear the outgoing message buffer */
3044 		ahc->msgout_len = 0;
3045 
3046 	return (done);
3047 }
3048 
3049 /*
3050  * Process a message reject message.
3051  */
3052 static int
3053 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3054 {
3055 	/*
3056 	 * What we care about here is if we had an
3057 	 * outstanding SDTR or WDTR message for this
3058 	 * target.  If we did, this is a signal that
3059 	 * the target is refusing negotiation.
3060 	 */
3061 	struct scb *scb;
3062 	struct ahc_initiator_tinfo *tinfo;
3063 	struct ahc_tmode_tstate *tstate;
3064 	u_int scb_index;
3065 	u_int last_msg;
3066 	int   response = 0;
3067 
3068 	scb_index = ahc_inb(ahc, SCB_TAG);
3069 	scb = ahc_lookup_scb(ahc, scb_index);
3070 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3071 				    devinfo->our_scsiid,
3072 				    devinfo->target, &tstate);
3073 	/* Might be necessary */
3074 	last_msg = ahc_inb(ahc, LAST_MSG);
3075 
3076 	if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3077 		/*
3078 		 * Target does not support the PPR message.
3079 		 * Attempt to negotiate SPI-2 style.
3080 		 */
3081 		if (bootverbose) {
3082 			printf("(%s:%c:%d:%d): PPR Rejected. "
3083 			       "Trying WDTR/SDTR\n",
3084 			       ahc_name(ahc), devinfo->channel,
3085 			       devinfo->target, devinfo->lun);
3086 		}
3087 		tinfo->goal.ppr_options = 0;
3088 		tinfo->curr.transport_version = 2;
3089 		tinfo->goal.transport_version = 2;
3090 		ahc->msgout_index = 0;
3091 		ahc->msgout_len = 0;
3092 		ahc_build_transfer_msg(ahc, devinfo);
3093 		ahc->msgout_index = 0;
3094 		response = 1;
3095 	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3096 
3097 		/* note 8bit xfers */
3098 		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
3099 		       "8bit transfers\n", ahc_name(ahc),
3100 		       devinfo->channel, devinfo->target, devinfo->lun);
3101 		ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3102 			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3103 			      /*paused*/TRUE);
3104 		/*
3105 		 * No need to clear the sync rate.  If the target
3106 		 * did not accept the command, our syncrate is
3107 		 * unaffected.  If the target started the negotiation,
3108 		 * but rejected our response, we already cleared the
3109 		 * sync rate before sending our WDTR.
3110 		 */
3111 		if (tinfo->goal.period) {
3112 
3113 			/* Start the sync negotiation */
3114 			ahc->msgout_index = 0;
3115 			ahc->msgout_len = 0;
3116 			ahc_build_transfer_msg(ahc, devinfo);
3117 			ahc->msgout_index = 0;
3118 			response = 1;
3119 		}
3120 	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3121 		/* note asynch xfers and clear flag */
3122 		ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3123 				 /*offset*/0, /*ppr_options*/0,
3124 				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3125 				 /*paused*/TRUE);
3126 		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3127 		       "Using asynchronous transfers\n",
3128 		       ahc_name(ahc), devinfo->channel,
3129 		       devinfo->target, devinfo->lun);
3130 	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
3131 		int tag_type;
3132 		int mask;
3133 
3134 		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
3135 
3136 		if (tag_type == MSG_SIMPLE_TASK) {
3137 			printf("(%s:%c:%d:%d): refuses tagged commands.  "
3138 			       "Performing non-tagged I/O\n", ahc_name(ahc),
3139 			       devinfo->channel, devinfo->target, devinfo->lun);
3140 			ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE);
3141 			mask = ~0x23;
3142 		} else {
3143 			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
3144 			       "Performing simple queue tagged I/O only\n",
3145 			       ahc_name(ahc), devinfo->channel, devinfo->target,
3146 			       devinfo->lun, tag_type == MSG_ORDERED_TASK
3147 			       ? "ordered" : "head of queue");
3148 			ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC);
3149 			mask = ~0x03;
3150 		}
3151 
3152 		/*
3153 		 * Resend the identify for this CCB as the target
3154 		 * may believe that the selection is invalid otherwise.
3155 		 */
3156 		ahc_outb(ahc, SCB_CONTROL,
3157 			 ahc_inb(ahc, SCB_CONTROL) & mask);
3158 	 	scb->hscb->control &= mask;
3159 		ahc_set_transaction_tag(scb, /*enabled*/FALSE,
3160 					/*type*/MSG_SIMPLE_TASK);
3161 		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3162 		ahc_assert_atn(ahc);
3163 
3164 		/*
3165 		 * This transaction is now at the head of
3166 		 * the untagged queue for this target.
3167 		 */
3168 		if ((ahc->flags & AHC_SCB_BTT) == 0) {
3169 			struct scb_tailq *untagged_q;
3170 
3171 			untagged_q =
3172 			    &(ahc->untagged_queues[devinfo->target_offset]);
3173 			TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3174 			scb->flags |= SCB_UNTAGGEDQ;
3175 		}
3176 		ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3177 			     scb->hscb->tag);
3178 
3179 		/*
3180 		 * Requeue all tagged commands for this target
3181 		 * currently in our posession so they can be
3182 		 * converted to untagged commands.
3183 		 */
3184 		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3185 				   SCB_GET_CHANNEL(ahc, scb),
3186 				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3187 				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3188 				   SEARCH_COMPLETE);
3189 	} else {
3190 		/*
3191 		 * Otherwise, we ignore it.
3192 		 */
3193 		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3194 		       ahc_name(ahc), devinfo->channel, devinfo->target,
3195 		       last_msg);
3196 	}
3197 	return (response);
3198 }
3199 
3200 /*
3201  * Process an ingnore wide residue message.
3202  */
3203 static void
3204 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3205 {
3206 	u_int scb_index;
3207 	struct scb *scb;
3208 
3209 	scb_index = ahc_inb(ahc, SCB_TAG);
3210 	scb = ahc_lookup_scb(ahc, scb_index);
3211 	/*
3212 	 * XXX Actually check data direction in the sequencer?
3213 	 * Perhaps add datadir to some spare bits in the hscb?
3214 	 */
3215 	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3216 	 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3217 		/*
3218 		 * Ignore the message if we haven't
3219 		 * seen an appropriate data phase yet.
3220 		 */
3221 	} else {
3222 		/*
3223 		 * If the residual occurred on the last
3224 		 * transfer and the transfer request was
3225 		 * expected to end on an odd count, do
3226 		 * nothing.  Otherwise, subtract a byte
3227 		 * and update the residual count accordingly.
3228 		 */
3229 		uint32_t sgptr;
3230 
3231 		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3232 		if ((sgptr & SG_LIST_NULL) != 0
3233 		 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
3234 			/*
3235 			 * If the residual occurred on the last
3236 			 * transfer and the transfer request was
3237 			 * expected to end on an odd count, do
3238 			 * nothing.
3239 			 */
3240 		} else {
3241 			struct ahc_dma_seg *sg;
3242 			uint32_t data_cnt;
3243 			uint32_t data_addr;
3244 			uint32_t sglen;
3245 
3246 			/* Pull in the rest of the sgptr */
3247 			sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3248 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3249 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
3250 			sgptr &= SG_PTR_MASK;
3251 			data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24)
3252 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
3253 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
3254 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
3255 
3256 			data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
3257 				  | (ahc_inb(ahc, SHADDR + 2) << 16)
3258 				  | (ahc_inb(ahc, SHADDR + 1) << 8)
3259 				  | (ahc_inb(ahc, SHADDR));
3260 
3261 			data_cnt += 1;
3262 			data_addr -= 1;
3263 
3264 			sg = ahc_sg_bus_to_virt(scb, sgptr);
3265 			/*
3266 			 * The residual sg ptr points to the next S/G
3267 			 * to load so we must go back one.
3268 			 */
3269 			sg--;
3270 			sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
3271 			if (sg != scb->sg_list
3272 			 && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
3273 
3274 				sg--;
3275 				sglen = ahc_le32toh(sg->len);
3276 				/*
3277 				 * Preserve High Address and SG_LIST bits
3278 				 * while setting the count to 1.
3279 				 */
3280 				data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
3281 				data_addr = ahc_le32toh(sg->addr)
3282 					  + (sglen & AHC_SG_LEN_MASK) - 1;
3283 
3284 				/*
3285 				 * Increment sg so it points to the
3286 				 * "next" sg.
3287 				 */
3288 				sg++;
3289 				sgptr = ahc_sg_virt_to_bus(scb, sg);
3290 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
3291 					 sgptr >> 24);
3292 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
3293 					 sgptr >> 16);
3294 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
3295 					 sgptr >> 8);
3296 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3297 			}
3298 
3299 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
3300 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
3301 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
3302 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3303 		}
3304 	}
3305 }
3306 
3307 
3308 /*
3309  * Reinitialize the data pointers for the active transfer
3310  * based on its current residual.
3311  */
3312 static void
3313 ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
3314 {
3315 	struct	 scb *scb;
3316 	struct	 ahc_dma_seg *sg;
3317 	u_int	 scb_index;
3318 	uint32_t sgptr;
3319 	uint32_t resid;
3320 	uint32_t dataptr;
3321 
3322 	scb_index = ahc_inb(ahc, SCB_TAG);
3323 	scb = ahc_lookup_scb(ahc, scb_index);
3324 	sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3325 	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3326 	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
3327 	      |	ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3328 
3329 	sgptr &= SG_PTR_MASK;
3330 	sg = ahc_sg_bus_to_virt(scb, sgptr);
3331 
3332 	/* The residual sg_ptr always points to the next sg */
3333 	sg--;
3334 
3335 	resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
3336 	      | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
3337 	      | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
3338 
3339 	dataptr = ahc_le32toh(sg->addr)
3340 		+ (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK)
3341 		- resid;
3342 	if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
3343 		u_int dscommand1;
3344 
3345 		dscommand1 = ahc_inb(ahc, DSCOMMAND1);
3346 		ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
3347 		ahc_outb(ahc, HADDR,
3348 			 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
3349 		ahc_outb(ahc, DSCOMMAND1, dscommand1);
3350 	}
3351 	ahc_outb(ahc, HADDR + 3, dataptr >> 24);
3352 	ahc_outb(ahc, HADDR + 2, dataptr >> 16);
3353 	ahc_outb(ahc, HADDR + 1, dataptr >> 8);
3354 	ahc_outb(ahc, HADDR, dataptr);
3355 	ahc_outb(ahc, HCNT + 2, resid >> 16);
3356 	ahc_outb(ahc, HCNT + 1, resid >> 8);
3357 	ahc_outb(ahc, HCNT, resid);
3358 	if ((ahc->features & AHC_ULTRA2) == 0) {
3359 		ahc_outb(ahc, STCNT + 2, resid >> 16);
3360 		ahc_outb(ahc, STCNT + 1, resid >> 8);
3361 		ahc_outb(ahc, STCNT, resid);
3362 	}
3363 }
3364 
3365 /*
3366  * Handle the effects of issuing a bus device reset message.
3367  */
3368 static void
3369 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3370 		    cam_status status, char *message, int verbose_level)
3371 {
3372 #ifdef AHC_TARGET_MODE
3373 	struct ahc_tmode_tstate* tstate;
3374 	u_int lun;
3375 #endif
3376 	int found;
3377 
3378 	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3379 			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3380 			       status);
3381 
3382 #ifdef AHC_TARGET_MODE
3383 	/*
3384 	 * Send an immediate notify ccb to all target mord peripheral
3385 	 * drivers affected by this action.
3386 	 */
3387 	tstate = ahc->enabled_targets[devinfo->our_scsiid];
3388 	if (tstate != NULL) {
3389 		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3390 			struct ahc_tmode_lstate* lstate;
3391 
3392 			lstate = tstate->enabled_luns[lun];
3393 			if (lstate == NULL)
3394 				continue;
3395 
3396 			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3397 					       MSG_BUS_DEV_RESET, /*arg*/0);
3398 			ahc_send_lstate_events(ahc, lstate);
3399 		}
3400 	}
3401 #endif
3402 
3403 	/*
3404 	 * Go back to async/narrow transfers and renegotiate.
3405 	 */
3406 	ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3407 		      AHC_TRANS_CUR, /*paused*/TRUE);
3408 	ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3409 			 /*period*/0, /*offset*/0, /*ppr_options*/0,
3410 			 AHC_TRANS_CUR, /*paused*/TRUE);
3411 
3412 	ahc_send_async(ahc, devinfo->channel, devinfo->target,
3413 		       CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
3414 
3415 	if (message != NULL
3416 	 && (verbose_level <= bootverbose))
3417 		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3418 		       message, devinfo->channel, devinfo->target, found);
3419 }
3420 
3421 #ifdef AHC_TARGET_MODE
3422 static void
3423 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3424 		       struct scb *scb)
3425 {
3426 
3427 	/*
3428 	 * To facilitate adding multiple messages together,
3429 	 * each routine should increment the index and len
3430 	 * variables instead of setting them explicitly.
3431 	 */
3432 	ahc->msgout_index = 0;
3433 	ahc->msgout_len = 0;
3434 
3435 	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
3436 		ahc_build_transfer_msg(ahc, devinfo);
3437 	else
3438 		panic("ahc_intr: AWAITING target message with no message");
3439 
3440 	ahc->msgout_index = 0;
3441 	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3442 }
3443 #endif
3444 /**************************** Initialization **********************************/
3445 /*
3446  * Allocate a controller structure for a new device
3447  * and perform initial initializion.
3448  */
3449 struct ahc_softc *
3450 ahc_alloc(void *platform_arg, char *name)
3451 {
3452 	struct  ahc_softc *ahc;
3453 	int	i;
3454 
3455 #ifndef	__FreeBSD__
3456 	ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3457 	if (!ahc) {
3458 		printf("aic7xxx: cannot malloc softc!\n");
3459 		free(name, M_DEVBUF);
3460 		return NULL;
3461 	}
3462 #else
3463 	ahc = device_get_softc((device_t)platform_arg);
3464 #endif
3465 	memset(ahc, 0, sizeof(*ahc));
3466 	LIST_INIT(&ahc->pending_scbs);
3467 	/* We don't know our unit number until the OSM sets it */
3468 	ahc->name = name;
3469 	ahc->unit = -1;
3470 	ahc->description = NULL;
3471 	ahc->channel = 'A';
3472 	ahc->channel_b = 'B';
3473 	ahc->chip = AHC_NONE;
3474 	ahc->features = AHC_FENONE;
3475 	ahc->bugs = AHC_BUGNONE;
3476 	ahc->flags = AHC_FNONE;
3477 
3478 	for (i = 0; i < 16; i++)
3479 		TAILQ_INIT(&ahc->untagged_queues[i]);
3480 	if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3481 		ahc_free(ahc);
3482 		ahc = NULL;
3483 	}
3484 	return (ahc);
3485 }
3486 
3487 int
3488 ahc_softc_init(struct ahc_softc *ahc)
3489 {
3490 
3491 	/* The IRQMS bit is only valid on VL and EISA chips */
3492 	if ((ahc->chip & AHC_PCI) == 0)
3493 		ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
3494 	else
3495 		ahc->unpause = 0;
3496 	ahc->pause = ahc->unpause | PAUSE;
3497 	/* XXX The shared scb data stuff should be deprecated */
3498 	if (ahc->scb_data == NULL) {
3499 		ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3500 				       M_DEVBUF, M_NOWAIT);
3501 		if (ahc->scb_data == NULL)
3502 			return (ENOMEM);
3503 		memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3504 	}
3505 
3506 	return (0);
3507 }
3508 
3509 void
3510 ahc_softc_insert(struct ahc_softc *ahc)
3511 {
3512 	struct ahc_softc *list_ahc;
3513 
3514 #if AHC_PCI_CONFIG > 0
3515 	/*
3516 	 * Second Function PCI devices need to inherit some
3517 	 * settings from function 0.
3518 	 */
3519 	if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3520 	 && (ahc->features & AHC_MULTI_FUNC) != 0) {
3521 		TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3522 			ahc_dev_softc_t list_pci;
3523 			ahc_dev_softc_t pci;
3524 
3525 			list_pci = list_ahc->dev_softc;
3526 			pci = ahc->dev_softc;
3527 			if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
3528 			 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) {
3529 				struct ahc_softc *master;
3530 				struct ahc_softc *slave;
3531 
3532 				if (ahc_get_pci_function(list_pci) == 0) {
3533 					master = list_ahc;
3534 					slave = ahc;
3535 				} else {
3536 					master = ahc;
3537 					slave = list_ahc;
3538 				}
3539 				slave->flags &= ~AHC_BIOS_ENABLED;
3540 				slave->flags |=
3541 				    master->flags & AHC_BIOS_ENABLED;
3542 				slave->flags &= ~AHC_PRIMARY_CHANNEL;
3543 				slave->flags |=
3544 				    master->flags & AHC_PRIMARY_CHANNEL;
3545 				break;
3546 			}
3547 		}
3548 	}
3549 #endif
3550 
3551 	/*
3552 	 * Insertion sort into our list of softcs.
3553 	 */
3554 	list_ahc = TAILQ_FIRST(&ahc_tailq);
3555 	while (list_ahc != NULL
3556 	    && ahc_softc_comp(list_ahc, ahc) <= 0)
3557 		list_ahc = TAILQ_NEXT(list_ahc, links);
3558 	if (list_ahc != NULL)
3559 		TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3560 	else
3561 		TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3562 	ahc->init_level++;
3563 }
3564 
3565 void
3566 ahc_set_unit(struct ahc_softc *ahc, int unit)
3567 {
3568 	ahc->unit = unit;
3569 }
3570 
3571 void
3572 ahc_set_name(struct ahc_softc *ahc, char *name)
3573 {
3574 	if (ahc->name != NULL)
3575 		free(ahc->name, M_DEVBUF);
3576 	ahc->name = name;
3577 }
3578 
3579 void
3580 ahc_free(struct ahc_softc *ahc)
3581 {
3582 	int i;
3583 
3584 	ahc_fini_scbdata(ahc);
3585 	switch (ahc->init_level) {
3586 	default:
3587 	case 5:
3588 		ahc_shutdown(ahc);
3589 		TAILQ_REMOVE(&ahc_tailq, ahc, links);
3590 		/* FALLTHROUGH */
3591 	case 4:
3592 		ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3593 				  ahc->shared_data_dmamap);
3594 		/* FALLTHROUGH */
3595 	case 3:
3596 		ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3597 				ahc->shared_data_dmamap);
3598 		ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3599 				   ahc->shared_data_dmamap);
3600 		/* FALLTHROUGH */
3601 	case 2:
3602 		ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
3603 	case 1:
3604 #ifndef __linux__
3605 		ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3606 #endif
3607 		break;
3608 	case 0:
3609 		break;
3610 	}
3611 
3612 #ifndef __linux__
3613 	ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
3614 #endif
3615 	ahc_platform_free(ahc);
3616 	for (i = 0; i < AHC_NUM_TARGETS; i++) {
3617 		struct ahc_tmode_tstate *tstate;
3618 
3619 		tstate = ahc->enabled_targets[i];
3620 		if (tstate != NULL) {
3621 #if AHC_TARGET_MODE
3622 			int j;
3623 
3624 			for (j = 0; j < AHC_NUM_LUNS; j++) {
3625 				struct ahc_tmode_lstate *lstate;
3626 
3627 				lstate = tstate->enabled_luns[j];
3628 				if (lstate != NULL) {
3629 					xpt_free_path(lstate->path);
3630 					free(lstate, M_DEVBUF);
3631 				}
3632 			}
3633 #endif
3634 			free(tstate, M_DEVBUF);
3635 		}
3636 	}
3637 #if AHC_TARGET_MODE
3638 	if (ahc->black_hole != NULL) {
3639 		xpt_free_path(ahc->black_hole->path);
3640 		free(ahc->black_hole, M_DEVBUF);
3641 	}
3642 #endif
3643 	if (ahc->name != NULL)
3644 		free(ahc->name, M_DEVBUF);
3645 #ifndef __FreeBSD__
3646 	free(ahc, M_DEVBUF);
3647 #endif
3648 	return;
3649 }
3650 
3651 void
3652 ahc_shutdown(void *arg)
3653 {
3654 	struct	ahc_softc *ahc;
3655 	int	i;
3656 
3657 	ahc = (struct ahc_softc *)arg;
3658 
3659 	/* This will reset most registers to 0, but not all */
3660 	ahc_reset(ahc);
3661 	ahc_outb(ahc, SCSISEQ, 0);
3662 	ahc_outb(ahc, SXFRCTL0, 0);
3663 	ahc_outb(ahc, DSPCISTATUS, 0);
3664 
3665 	for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++)
3666 		ahc_outb(ahc, i, 0);
3667 }
3668 
3669 /*
3670  * Reset the controller and record some information about it
3671  * that is only availabel just after a reset.
3672  */
3673 int
3674 ahc_reset(struct ahc_softc *ahc)
3675 {
3676 	u_int	sblkctl;
3677 	u_int	sxfrctl1_a, sxfrctl1_b;
3678 	int	wait;
3679 
3680 	/*
3681 	 * Preserve the value of the SXFRCTL1 register for all channels.
3682 	 * It contains settings that affect termination and we don't want
3683 	 * to disturb the integrity of the bus.
3684 	 */
3685 	ahc_pause(ahc);
3686 	sxfrctl1_b = 0;
3687 	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
3688 		u_int sblkctl;
3689 
3690 		/*
3691 		 * Save channel B's settings in case this chip
3692 		 * is setup for TWIN channel operation.
3693 		 */
3694 		sblkctl = ahc_inb(ahc, SBLKCTL);
3695 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3696 		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
3697 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3698 	}
3699 	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
3700 
3701 	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
3702 
3703 	/*
3704 	 * Ensure that the reset has finished
3705 	 */
3706 	wait = 1000;
3707 	do {
3708 		ahc_delay(1000);
3709 	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
3710 
3711 	if (wait == 0) {
3712 		printf("%s: WARNING - Failed chip reset!  "
3713 		       "Trying to initialize anyway.\n", ahc_name(ahc));
3714 	}
3715 	ahc_outb(ahc, HCNTRL, ahc->pause);
3716 
3717 	/* Determine channel configuration */
3718 	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
3719 	/* No Twin Channel PCI cards */
3720 	if ((ahc->chip & AHC_PCI) != 0)
3721 		sblkctl &= ~SELBUSB;
3722 	switch (sblkctl) {
3723 	case 0:
3724 		/* Single Narrow Channel */
3725 		break;
3726 	case 2:
3727 		/* Wide Channel */
3728 		ahc->features |= AHC_WIDE;
3729 		break;
3730 	case 8:
3731 		/* Twin Channel */
3732 		ahc->features |= AHC_TWIN;
3733 		break;
3734 	default:
3735 		printf(" Unsupported adapter type.  Ignoring\n");
3736 		return(-1);
3737 	}
3738 
3739 	/*
3740 	 * Reload sxfrctl1.
3741 	 *
3742 	 * We must always initialize STPWEN to 1 before we
3743 	 * restore the saved values.  STPWEN is initialized
3744 	 * to a tri-state condition which can only be cleared
3745 	 * by turning it on.
3746 	 */
3747 	if ((ahc->features & AHC_TWIN) != 0) {
3748 		u_int sblkctl;
3749 
3750 		sblkctl = ahc_inb(ahc, SBLKCTL);
3751 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3752 		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
3753 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3754 	}
3755 	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
3756 
3757 #ifdef AHC_DUMP_SEQ
3758 	if (ahc->init_level == 0)
3759 		ahc_dumpseq(ahc);
3760 #endif
3761 
3762 	return (0);
3763 }
3764 
3765 /*
3766  * Determine the number of SCBs available on the controller
3767  */
3768 int
3769 ahc_probe_scbs(struct ahc_softc *ahc) {
3770 	int i;
3771 
3772 	for (i = 0; i < AHC_SCB_MAX; i++) {
3773 
3774 		ahc_outb(ahc, SCBPTR, i);
3775 		ahc_outb(ahc, SCB_BASE, i);
3776 		if (ahc_inb(ahc, SCB_BASE) != i)
3777 			break;
3778 		ahc_outb(ahc, SCBPTR, 0);
3779 		if (ahc_inb(ahc, SCB_BASE) != 0)
3780 			break;
3781 	}
3782 	return (i);
3783 }
3784 
3785 static void
3786 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3787 {
3788 	bus_addr_t *baddr;
3789 
3790 	baddr = (bus_addr_t *)arg;
3791 	*baddr = segs->ds_addr;
3792 }
3793 
3794 static void
3795 ahc_build_free_scb_list(struct ahc_softc *ahc)
3796 {
3797 	int i;
3798 
3799 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
3800 		ahc_outb(ahc, SCBPTR, i);
3801 
3802 		/* Clear the control byte. */
3803 		ahc_outb(ahc, SCB_CONTROL, 0);
3804 
3805 		/* Set the next pointer */
3806 		if ((ahc->flags & AHC_PAGESCBS) != 0)
3807 			ahc_outb(ahc, SCB_NEXT, i+1);
3808 		else
3809 			ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3810 
3811 		/* Make the tag number invalid */
3812 		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
3813 	}
3814 
3815 	/* Make sure that the last SCB terminates the free list */
3816 	ahc_outb(ahc, SCBPTR, i-1);
3817 	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3818 
3819 	/* Ensure we clear the 0 SCB's control byte. */
3820 	ahc_outb(ahc, SCBPTR, 0);
3821 	ahc_outb(ahc, SCB_CONTROL, 0);
3822 }
3823 
3824 static int
3825 ahc_init_scbdata(struct ahc_softc *ahc)
3826 {
3827 	struct scb_data *scb_data;
3828 
3829 	scb_data = ahc->scb_data;
3830 	SLIST_INIT(&scb_data->free_scbs);
3831 	SLIST_INIT(&scb_data->sg_maps);
3832 
3833 	/* Allocate SCB resources */
3834 	scb_data->scbarray =
3835 	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX,
3836 				 M_DEVBUF, M_NOWAIT);
3837 	if (scb_data->scbarray == NULL)
3838 		return (ENOMEM);
3839 	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX);
3840 
3841 	/* Determine the number of hardware SCBs and initialize them */
3842 
3843 	scb_data->maxhscbs = ahc_probe_scbs(ahc);
3844 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
3845 		/* SCB 0 heads the free list */
3846 		ahc_outb(ahc, FREE_SCBH, 0);
3847 	} else {
3848 		ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
3849 	}
3850 
3851 	if (ahc->scb_data->maxhscbs == 0) {
3852 		printf("%s: No SCB space found\n", ahc_name(ahc));
3853 		return (ENXIO);
3854 	}
3855 
3856 	ahc_build_free_scb_list(ahc);
3857 
3858 	/*
3859 	 * Create our DMA tags.  These tags define the kinds of device
3860 	 * accessible memory allocations and memory mappings we will
3861 	 * need to perform during normal operation.
3862 	 *
3863 	 * Unless we need to further restrict the allocation, we rely
3864 	 * on the restrictions of the parent dmat, hence the common
3865 	 * use of MAXADDR and MAXSIZE.
3866 	 */
3867 
3868 	/* DMA tag for our hardware scb structures */
3869 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3870 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
3871 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
3872 			       /*highaddr*/BUS_SPACE_MAXADDR,
3873 			       /*filter*/NULL, /*filterarg*/NULL,
3874 			       AHC_SCB_MAX * sizeof(struct hardware_scb),
3875 			       /*nsegments*/1,
3876 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3877 			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
3878 		goto error_exit;
3879 	}
3880 
3881 	scb_data->init_level++;
3882 
3883 	/* Allocation for our hscbs */
3884 	if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
3885 			     (void **)&scb_data->hscbs,
3886 			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
3887 		goto error_exit;
3888 	}
3889 
3890 	scb_data->init_level++;
3891 
3892 	/* And permanently map them */
3893 	ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
3894 			scb_data->hscbs,
3895 			AHC_SCB_MAX * sizeof(struct hardware_scb),
3896 			ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
3897 
3898 	scb_data->init_level++;
3899 
3900 	/* DMA tag for our sense buffers */
3901 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3902 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
3903 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
3904 			       /*highaddr*/BUS_SPACE_MAXADDR,
3905 			       /*filter*/NULL, /*filterarg*/NULL,
3906 			       AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3907 			       /*nsegments*/1,
3908 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3909 			       /*flags*/0, &scb_data->sense_dmat) != 0) {
3910 		goto error_exit;
3911 	}
3912 
3913 	scb_data->init_level++;
3914 
3915 	/* Allocate them */
3916 	if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
3917 			     (void **)&scb_data->sense,
3918 			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
3919 		goto error_exit;
3920 	}
3921 
3922 	scb_data->init_level++;
3923 
3924 	/* And permanently map them */
3925 	ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
3926 			scb_data->sense,
3927 			AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3928 			ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
3929 
3930 	scb_data->init_level++;
3931 
3932 	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
3933 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3934 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
3935 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
3936 			       /*highaddr*/BUS_SPACE_MAXADDR,
3937 			       /*filter*/NULL, /*filterarg*/NULL,
3938 			       PAGE_SIZE, /*nsegments*/1,
3939 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3940 			       /*flags*/0, &scb_data->sg_dmat) != 0) {
3941 		goto error_exit;
3942 	}
3943 
3944 	scb_data->init_level++;
3945 
3946 	/* Perform initial CCB allocation */
3947 	memset(scb_data->hscbs, 0, AHC_SCB_MAX * sizeof(struct hardware_scb));
3948 	ahc_alloc_scbs(ahc);
3949 
3950 	if (scb_data->numscbs == 0) {
3951 		printf("%s: ahc_init_scbdata - "
3952 		       "Unable to allocate initial scbs\n",
3953 		       ahc_name(ahc));
3954 		goto error_exit;
3955 	}
3956 
3957 	/*
3958 	 * Tell the sequencer which SCB will be the next one it receives.
3959 	 */
3960 	ahc->next_queued_scb = ahc_get_scb(ahc);
3961 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
3962 
3963 	/*
3964 	 * Note that we were successfull
3965 	 */
3966 	return (0);
3967 
3968 error_exit:
3969 
3970 	return (ENOMEM);
3971 }
3972 
3973 static void
3974 ahc_fini_scbdata(struct ahc_softc *ahc)
3975 {
3976 	struct scb_data *scb_data;
3977 
3978 	scb_data = ahc->scb_data;
3979 	if (scb_data == NULL)
3980 		return;
3981 
3982 	switch (scb_data->init_level) {
3983 	default:
3984 	case 7:
3985 	{
3986 		struct sg_map_node *sg_map;
3987 
3988 		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
3989 			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
3990 			ahc_dmamap_unload(ahc, scb_data->sg_dmat,
3991 					  sg_map->sg_dmamap);
3992 			ahc_dmamem_free(ahc, scb_data->sg_dmat,
3993 					sg_map->sg_vaddr,
3994 					sg_map->sg_dmamap);
3995 			free(sg_map, M_DEVBUF);
3996 		}
3997 		ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
3998 	}
3999 	case 6:
4000 		ahc_dmamap_unload(ahc, scb_data->sense_dmat,
4001 				  scb_data->sense_dmamap);
4002 	case 5:
4003 		ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
4004 				scb_data->sense_dmamap);
4005 		ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
4006 				   scb_data->sense_dmamap);
4007 	case 4:
4008 		ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
4009 	case 3:
4010 		ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
4011 				  scb_data->hscb_dmamap);
4012 	case 2:
4013 		ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
4014 				scb_data->hscb_dmamap);
4015 		ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
4016 				   scb_data->hscb_dmamap);
4017 	case 1:
4018 		ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
4019 		break;
4020 	case 0:
4021 		break;
4022 	}
4023 	if (scb_data->scbarray != NULL)
4024 		free(scb_data->scbarray, M_DEVBUF);
4025 }
4026 
4027 void
4028 ahc_alloc_scbs(struct ahc_softc *ahc)
4029 {
4030 	struct scb_data *scb_data;
4031 	struct scb *next_scb;
4032 	struct sg_map_node *sg_map;
4033 	bus_addr_t physaddr;
4034 	struct ahc_dma_seg *segs;
4035 	int newcount;
4036 	int i;
4037 
4038 	scb_data = ahc->scb_data;
4039 	if (scb_data->numscbs >= AHC_SCB_MAX)
4040 		/* Can't allocate any more */
4041 		return;
4042 
4043 	next_scb = &scb_data->scbarray[scb_data->numscbs];
4044 
4045 	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
4046 
4047 	if (sg_map == NULL)
4048 		return;
4049 
4050 	/* Allocate S/G space for the next batch of SCBS */
4051 	if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
4052 			     (void **)&sg_map->sg_vaddr,
4053 			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
4054 		free(sg_map, M_DEVBUF);
4055 		return;
4056 	}
4057 
4058 	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
4059 
4060 	ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
4061 			sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
4062 			&sg_map->sg_physaddr, /*flags*/0);
4063 
4064 	segs = sg_map->sg_vaddr;
4065 	physaddr = sg_map->sg_physaddr;
4066 
4067 	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
4068 	for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) {
4069 		struct scb_platform_data *pdata;
4070 #ifndef __linux__
4071 		int error;
4072 #endif
4073 		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
4074 							   M_DEVBUF, M_NOWAIT);
4075 		if (pdata == NULL)
4076 			break;
4077 		next_scb->platform_data = pdata;
4078 		next_scb->sg_map = sg_map;
4079 		next_scb->sg_list = segs;
4080 		/*
4081 		 * The sequencer always starts with the second entry.
4082 		 * The first entry is embedded in the scb.
4083 		 */
4084 		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
4085 		next_scb->ahc_softc = ahc;
4086 		next_scb->flags = SCB_FREE;
4087 #ifndef __linux__
4088 		error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
4089 					  &next_scb->dmamap);
4090 		if (error != 0)
4091 			break;
4092 #endif
4093 		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
4094 		next_scb->hscb->tag = ahc->scb_data->numscbs;
4095 		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
4096 				  next_scb, links.sle);
4097 		segs += AHC_NSEG;
4098 		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
4099 		next_scb++;
4100 		ahc->scb_data->numscbs++;
4101 	}
4102 }
4103 
4104 void
4105 ahc_controller_info(struct ahc_softc *ahc, char *buf)
4106 {
4107 	int len;
4108 
4109 	len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4110 	buf += len;
4111 	if ((ahc->features & AHC_TWIN) != 0)
4112  		len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4113 			      "B SCSI Id=%d, primary %c, ",
4114 			      ahc->our_id, ahc->our_id_b,
4115 			      (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4116 	else {
4117 		const char *speed;
4118 		const char *type;
4119 
4120 		speed = "";
4121 		if ((ahc->features & AHC_ULTRA) != 0) {
4122 			speed = "Ultra ";
4123 		} else if ((ahc->features & AHC_DT) != 0) {
4124 			speed = "Ultra160 ";
4125 		} else if ((ahc->features & AHC_ULTRA2) != 0) {
4126 			speed = "Ultra2 ";
4127 		}
4128 		if ((ahc->features & AHC_WIDE) != 0) {
4129 			type = "Wide";
4130 		} else {
4131 			type = "Single";
4132 		}
4133 		len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
4134 			      speed, type, ahc->channel, ahc->our_id);
4135 	}
4136 	buf += len;
4137 
4138 	if ((ahc->flags & AHC_PAGESCBS) != 0)
4139 		sprintf(buf, "%d/%d SCBs",
4140 			ahc->scb_data->maxhscbs, AHC_SCB_MAX);
4141 	else
4142 		sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4143 }
4144 
4145 /*
4146  * Start the board, ready for normal operation
4147  */
4148 int
4149 ahc_init(struct ahc_softc *ahc)
4150 {
4151 	int	 max_targ;
4152 	int	 i;
4153 	int	 term;
4154 	u_int	 scsi_conf;
4155 	u_int	 scsiseq_template;
4156 	u_int	 ultraenb;
4157 	u_int	 discenable;
4158 	u_int	 tagenable;
4159 	size_t	 driver_data_size;
4160 	uint32_t physaddr;
4161 
4162 #ifdef AHC_DEBUG_SEQUENCER
4163 	ahc->flags |= AHC_SEQUENCER_DEBUG;
4164 #endif
4165 
4166 #ifdef AHC_PRINT_SRAM
4167 	printf("Scratch Ram:");
4168 	for (i = 0x20; i < 0x5f; i++) {
4169 		if (((i % 8) == 0) && (i != 0)) {
4170 			printf ("\n              ");
4171 		}
4172 		printf (" 0x%x", ahc_inb(ahc, i));
4173 	}
4174 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4175 		for (i = 0x70; i < 0x7f; i++) {
4176 			if (((i % 8) == 0) && (i != 0)) {
4177 				printf ("\n              ");
4178 			}
4179 			printf (" 0x%x", ahc_inb(ahc, i));
4180 		}
4181 	}
4182 	printf ("\n");
4183 #endif
4184 	max_targ = 15;
4185 
4186 	/*
4187 	 * Assume we have a board at this stage and it has been reset.
4188 	 */
4189 	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4190 		ahc->our_id = ahc->our_id_b = 7;
4191 
4192 	/*
4193 	 * Default to allowing initiator operations.
4194 	 */
4195 	ahc->flags |= AHC_INITIATORROLE;
4196 
4197 	/*
4198 	 * Only allow target mode features if this unit has them enabled.
4199 	 */
4200 	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
4201 		ahc->features &= ~AHC_TARGETMODE;
4202 
4203 #ifndef __linux__
4204 	/* DMA tag for mapping buffers into device visible space. */
4205 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4206 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4207 			       /*lowaddr*/BUS_SPACE_MAXADDR,
4208 			       /*highaddr*/BUS_SPACE_MAXADDR,
4209 			       /*filter*/NULL, /*filterarg*/NULL,
4210 			       /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
4211 			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4212 			       /*flags*/BUS_DMA_ALLOCNOW,
4213 			       &ahc->buffer_dmat) != 0) {
4214 		return (ENOMEM);
4215 	}
4216 #endif
4217 
4218 	ahc->init_level++;
4219 
4220 	/*
4221 	 * DMA tag for our command fifos and other data in system memory
4222 	 * the card's sequencer must be able to access.  For initiator
4223 	 * roles, we need to allocate space for the the qinfifo and qoutfifo.
4224 	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4225 	 * When providing for the target mode role, we must additionally
4226 	 * provide space for the incoming target command fifo and an extra
4227 	 * byte to deal with a dma bug in some chip versions.
4228 	 */
4229 	driver_data_size = 2 * 256 * sizeof(uint8_t);
4230 	if ((ahc->features & AHC_TARGETMODE) != 0)
4231 		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4232 				 + /*DMA WideOdd Bug Buffer*/1;
4233 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4234 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4235 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4236 			       /*highaddr*/BUS_SPACE_MAXADDR,
4237 			       /*filter*/NULL, /*filterarg*/NULL,
4238 			       driver_data_size,
4239 			       /*nsegments*/1,
4240 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4241 			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
4242 		return (ENOMEM);
4243 	}
4244 
4245 	ahc->init_level++;
4246 
4247 	/* Allocation of driver data */
4248 	if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
4249 			     (void **)&ahc->qoutfifo,
4250 			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4251 		return (ENOMEM);
4252 	}
4253 
4254 	ahc->init_level++;
4255 
4256 	/* And permanently map it in */
4257 	ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4258 			ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4259 			&ahc->shared_data_busaddr, /*flags*/0);
4260 
4261 	if ((ahc->features & AHC_TARGETMODE) != 0) {
4262 		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4263 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4264 		ahc->dma_bug_buf = ahc->shared_data_busaddr
4265 				 + driver_data_size - 1;
4266 		/* All target command blocks start out invalid. */
4267 		for (i = 0; i < AHC_TMODE_CMDS; i++)
4268 			ahc->targetcmds[i].cmd_valid = 0;
4269 		ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
4270 		ahc->tqinfifonext = 1;
4271 		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4272 		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4273 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4274 	}
4275 	ahc->qinfifo = &ahc->qoutfifo[256];
4276 
4277 	ahc->init_level++;
4278 
4279 	/* Allocate SCB data now that buffer_dmat is initialized */
4280 	if (ahc->scb_data->maxhscbs == 0)
4281 		if (ahc_init_scbdata(ahc) != 0)
4282 			return (ENOMEM);
4283 
4284 	/*
4285 	 * Allocate a tstate to house information for our
4286 	 * initiator presence on the bus as well as the user
4287 	 * data for any target mode initiator.
4288 	 */
4289 	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4290 		printf("%s: unable to allocate ahc_tmode_tstate.  "
4291 		       "Failing attach\n", ahc_name(ahc));
4292 		return (ENOMEM);
4293 	}
4294 
4295 	if ((ahc->features & AHC_TWIN) != 0) {
4296 		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4297 			printf("%s: unable to allocate ahc_tmode_tstate.  "
4298 			       "Failing attach\n", ahc_name(ahc));
4299 			return (ENOMEM);
4300 		}
4301 	}
4302 
4303 	ahc_outb(ahc, SEQ_FLAGS, 0);
4304 	ahc_outb(ahc, SEQ_FLAGS2, 0);
4305 
4306 	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) {
4307 		ahc->flags |= AHC_PAGESCBS;
4308 	} else {
4309 		ahc->flags &= ~AHC_PAGESCBS;
4310 	}
4311 
4312 #ifdef AHC_DEBUG
4313 	if (ahc_debug & AHC_SHOWMISC) {
4314 		printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4315 		       "ahc_dma %d bytes\n",
4316 			ahc_name(ahc),
4317 			sizeof(struct hardware_scb),
4318 			sizeof(struct scb),
4319 			sizeof(struct ahc_dma_seg));
4320 	}
4321 #endif /* AHC_DEBUG */
4322 
4323 	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4324 	if (ahc->features & AHC_TWIN) {
4325 
4326 		/*
4327 		 * The device is gated to channel B after a chip reset,
4328 		 * so set those values first
4329 		 */
4330 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4331 		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4332 		ahc_outb(ahc, SCSIID, ahc->our_id_b);
4333 		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4334 		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4335 					|term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4336 		if ((ahc->features & AHC_ULTRA2) != 0)
4337 			ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4338 		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4339 		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4340 
4341 		if ((scsi_conf & RESET_SCSI) != 0
4342 		 && (ahc->flags & AHC_INITIATORROLE) != 0)
4343 			ahc->flags |= AHC_RESET_BUS_B;
4344 
4345 		/* Select Channel A */
4346 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4347 	}
4348 	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4349 	if ((ahc->features & AHC_ULTRA2) != 0)
4350 		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4351 	else
4352 		ahc_outb(ahc, SCSIID, ahc->our_id);
4353 	scsi_conf = ahc_inb(ahc, SCSICONF);
4354 	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4355 				|term|ahc->seltime
4356 				|ENSTIMER|ACTNEGEN);
4357 	if ((ahc->features & AHC_ULTRA2) != 0)
4358 		ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4359 	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4360 	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4361 
4362 	if ((scsi_conf & RESET_SCSI) != 0
4363 	 && (ahc->flags & AHC_INITIATORROLE) != 0)
4364 		ahc->flags |= AHC_RESET_BUS_A;
4365 
4366 	/*
4367 	 * Look at the information that board initialization or
4368 	 * the board bios has left us.
4369 	 */
4370 	ultraenb = 0;
4371 	tagenable = ALL_TARGETS_MASK;
4372 
4373 	/* Grab the disconnection disable table and invert it for our needs */
4374 	if (ahc->flags & AHC_USEDEFAULTS) {
4375 		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
4376 			"device parameters\n", ahc_name(ahc));
4377 		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4378 			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4379 		discenable = ALL_TARGETS_MASK;
4380 		if ((ahc->features & AHC_ULTRA) != 0)
4381 			ultraenb = ALL_TARGETS_MASK;
4382 	} else {
4383 		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4384 			   | ahc_inb(ahc, DISC_DSB));
4385 		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4386 			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4387 				      | ahc_inb(ahc, ULTRA_ENB);
4388 	}
4389 
4390 	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4391 		max_targ = 7;
4392 
4393 	for (i = 0; i <= max_targ; i++) {
4394 		struct ahc_initiator_tinfo *tinfo;
4395 		struct ahc_tmode_tstate *tstate;
4396 		u_int our_id;
4397 		u_int target_id;
4398 		char channel;
4399 
4400 		channel = 'A';
4401 		our_id = ahc->our_id;
4402 		target_id = i;
4403 		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4404 			channel = 'B';
4405 			our_id = ahc->our_id_b;
4406 			target_id = i % 8;
4407 		}
4408 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4409 					    target_id, &tstate);
4410 		/* Default to async narrow across the board */
4411 		memset(tinfo, 0, sizeof(*tinfo));
4412 		if (ahc->flags & AHC_USEDEFAULTS) {
4413 			if ((ahc->features & AHC_WIDE) != 0)
4414 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4415 
4416 			/*
4417 			 * These will be truncated when we determine the
4418 			 * connection type we have with the target.
4419 			 */
4420 			tinfo->user.period = ahc_syncrates->period;
4421 			tinfo->user.offset = ~0;
4422 		} else {
4423 			u_int scsirate;
4424 			uint16_t mask;
4425 
4426 			/* Take the settings leftover in scratch RAM. */
4427 			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4428 			mask = (0x01 << i);
4429 			if ((ahc->features & AHC_ULTRA2) != 0) {
4430 				u_int offset;
4431 				u_int maxsync;
4432 
4433 				if ((scsirate & SOFS) == 0x0F) {
4434 					/*
4435 					 * Haven't negotiated yet,
4436 					 * so the format is different.
4437 					 */
4438 					scsirate = (scsirate & SXFR) >> 4
4439 						 | (ultraenb & mask)
4440 						  ? 0x08 : 0x0
4441 						 | (scsirate & WIDEXFER);
4442 					offset = MAX_OFFSET_ULTRA2;
4443 				} else
4444 					offset = ahc_inb(ahc, TARG_OFFSET + i);
4445 				if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
4446 					/* Set to the lowest sync rate, 5MHz */
4447 					scsirate |= 0x1c;
4448 				maxsync = AHC_SYNCRATE_ULTRA2;
4449 				if ((ahc->features & AHC_DT) != 0)
4450 					maxsync = AHC_SYNCRATE_DT;
4451 				tinfo->user.period =
4452 				    ahc_find_period(ahc, scsirate, maxsync);
4453 				if (offset == 0)
4454 					tinfo->user.period = 0;
4455 				else
4456 					tinfo->user.offset = ~0;
4457 				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4458 				 && (ahc->features & AHC_DT) != 0)
4459 					tinfo->user.ppr_options =
4460 					    MSG_EXT_PPR_DT_REQ;
4461 			} else if ((scsirate & SOFS) != 0) {
4462 				if ((scsirate & SXFR) == 0x40
4463 				 && (ultraenb & mask) != 0) {
4464 					/* Treat 10MHz as a non-ultra speed */
4465 					scsirate &= ~SXFR;
4466 				 	ultraenb &= ~mask;
4467 				}
4468 				tinfo->user.period =
4469 				    ahc_find_period(ahc, scsirate,
4470 						    (ultraenb & mask)
4471 						   ? AHC_SYNCRATE_ULTRA
4472 						   : AHC_SYNCRATE_FAST);
4473 				if (tinfo->user.period != 0)
4474 					tinfo->user.offset = ~0;
4475 			}
4476 			if (tinfo->user.period == 0)
4477 				tinfo->user.offset = 0;
4478 			if ((scsirate & WIDEXFER) != 0
4479 			 && (ahc->features & AHC_WIDE) != 0)
4480 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4481 			tinfo->user.protocol_version = 4;
4482 			if ((ahc->features & AHC_DT) != 0)
4483 				tinfo->user.transport_version = 3;
4484 			else
4485 				tinfo->user.transport_version = 2;
4486 			tinfo->goal.protocol_version = 2;
4487 			tinfo->goal.transport_version = 2;
4488 			tinfo->curr.protocol_version = 2;
4489 			tinfo->curr.transport_version = 2;
4490 		}
4491 		tstate->ultraenb = ultraenb;
4492 	}
4493 	ahc->user_discenable = discenable;
4494 	ahc->user_tagenable = tagenable;
4495 
4496 	/* There are no untagged SCBs active yet. */
4497 	for (i = 0; i < 16; i++) {
4498 		ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4499 		if ((ahc->flags & AHC_SCB_BTT) != 0) {
4500 			int lun;
4501 
4502 			/*
4503 			 * The SCB based BTT allows an entry per
4504 			 * target and lun pair.
4505 			 */
4506 			for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4507 				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4508 		}
4509 	}
4510 
4511 	/* All of our queues are empty */
4512 	for (i = 0; i < 256; i++)
4513 		ahc->qoutfifo[i] = SCB_LIST_NULL;
4514 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
4515 
4516 	for (i = 0; i < 256; i++)
4517 		ahc->qinfifo[i] = SCB_LIST_NULL;
4518 
4519 	if ((ahc->features & AHC_MULTI_TID) != 0) {
4520 		ahc_outb(ahc, TARGID, 0);
4521 		ahc_outb(ahc, TARGID + 1, 0);
4522 	}
4523 
4524 	/*
4525 	 * Tell the sequencer where it can find our arrays in memory.
4526 	 */
4527 	physaddr = ahc->scb_data->hscb_busaddr;
4528 	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4529 	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4530 	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4531 	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4532 
4533 	physaddr = ahc->shared_data_busaddr;
4534 	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4535 	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4536 	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4537 	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4538 
4539 	/*
4540 	 * Initialize the group code to command length table.
4541 	 * This overrides the values in TARG_SCSIRATE, so only
4542 	 * setup the table after we have processed that information.
4543 	 */
4544 	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4545 	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4546 	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4547 	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4548 	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4549 	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4550 	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4551 	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4552 
4553 	/* Tell the sequencer of our initial queue positions */
4554 	ahc_outb(ahc, KERNEL_QINPOS, 0);
4555 	ahc_outb(ahc, QINPOS, 0);
4556 	ahc_outb(ahc, QOUTPOS, 0);
4557 
4558 	/*
4559 	 * Use the built in queue management registers
4560 	 * if they are available.
4561 	 */
4562 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4563 		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4564 		ahc_outb(ahc, SDSCB_QOFF, 0);
4565 		ahc_outb(ahc, SNSCB_QOFF, 0);
4566 		ahc_outb(ahc, HNSCB_QOFF, 0);
4567 	}
4568 
4569 
4570 	/* We don't have any waiting selections */
4571 	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4572 
4573 	/* Our disconnection list is empty too */
4574 	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4575 
4576 	/* Message out buffer starts empty */
4577 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4578 
4579 	/*
4580 	 * Setup the allowed SCSI Sequences based on operational mode.
4581 	 * If we are a target, we'll enalbe select in operations once
4582 	 * we've had a lun enabled.
4583 	 */
4584 	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4585 	if ((ahc->flags & AHC_INITIATORROLE) != 0)
4586 		scsiseq_template |= ENRSELI;
4587 	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4588 
4589 	/*
4590 	 * Load the Sequencer program and Enable the adapter
4591 	 * in "fast" mode.
4592 	 */
4593 	if (bootverbose)
4594 		printf("%s: Downloading Sequencer Program...",
4595 		       ahc_name(ahc));
4596 
4597 	ahc_loadseq(ahc);
4598 
4599 	if ((ahc->features & AHC_ULTRA2) != 0) {
4600 		int wait;
4601 
4602 		/*
4603 		 * Wait for up to 500ms for our transceivers
4604 		 * to settle.  If the adapter does not have
4605 		 * a cable attached, the tranceivers may
4606 		 * never settle, so don't complain if we
4607 		 * fail here.
4608 		 */
4609 		ahc_pause(ahc);
4610 		for (wait = 5000;
4611 		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4612 		     wait--)
4613 			ahc_delay(100);
4614 		ahc_unpause(ahc);
4615 	}
4616 	return (0);
4617 }
4618 
4619 void
4620 ahc_intr_enable(struct ahc_softc *ahc, int enable)
4621 {
4622 	u_int hcntrl;
4623 
4624 	hcntrl = ahc_inb(ahc, HCNTRL);
4625 	hcntrl &= ~INTEN;
4626 	ahc->pause &= ~INTEN;
4627 	ahc->unpause &= ~INTEN;
4628 	if (enable) {
4629 		hcntrl |= INTEN;
4630 		ahc->pause |= INTEN;
4631 		ahc->unpause |= INTEN;
4632 	}
4633 	ahc_outb(ahc, HCNTRL, hcntrl);
4634 }
4635 
4636 /*
4637  * Ensure that the card is paused in a location
4638  * outside of all critical sections and that all
4639  * pending work is completed prior to returning.
4640  * This routine should only be called from outside
4641  * an interrupt context.
4642  */
4643 void
4644 ahc_pause_and_flushwork(struct ahc_softc *ahc)
4645 {
4646 	int intstat;
4647 	int maxloops;
4648 
4649 	maxloops = 1000;
4650 	ahc->flags |= AHC_ALL_INTERRUPTS;
4651 	intstat = 0;
4652 	do {
4653 		ahc_intr(ahc);
4654 		ahc_pause(ahc);
4655 		ahc_clear_critical_section(ahc);
4656 		if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0)
4657 			break;
4658 		maxloops--;
4659 	} while (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) && --maxloops);
4660 	if (maxloops == 0) {
4661 		printf("Infinite interrupt loop, INTSTAT = %x",
4662 		      ahc_inb(ahc, INTSTAT));
4663 	}
4664 	ahc_platform_flushwork(ahc);
4665 	ahc->flags &= ~AHC_ALL_INTERRUPTS;
4666 }
4667 
4668 int
4669 ahc_suspend(struct ahc_softc *ahc)
4670 {
4671 	uint8_t *ptr;
4672 	int	 i;
4673 
4674 	ahc_pause_and_flushwork(ahc);
4675 
4676 	if (LIST_FIRST(&ahc->pending_scbs) != NULL)
4677 		return (EBUSY);
4678 
4679 #if AHC_TARGET_MODE
4680 	/*
4681 	 * XXX What about ATIOs that have not yet been serviced?
4682 	 * Perhaps we should just refuse to be suspended if we
4683 	 * are acting in a target role.
4684 	 */
4685 	if (ahc->pending_device != NULL)
4686 		return (EBUSY);
4687 #endif
4688 
4689 	/* Save volatile registers */
4690 	if ((ahc->features & AHC_TWIN) != 0) {
4691 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4692 		ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ);
4693 		ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4694 		ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4695 		ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0);
4696 		ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1);
4697 		ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER);
4698 		ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL);
4699 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4700 	}
4701 	ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ);
4702 	ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4703 	ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4704 	ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0);
4705 	ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1);
4706 	ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER);
4707 	ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL);
4708 
4709 	if ((ahc->chip & AHC_PCI) != 0) {
4710 		ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
4711 		ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
4712 	}
4713 
4714 	if ((ahc->features & AHC_DT) != 0) {
4715 		u_int sfunct;
4716 
4717 		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4718 		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4719 		ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE);
4720 		ahc_outb(ahc, SFUNCT, sfunct);
4721 		ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1);
4722 	}
4723 
4724 	if ((ahc->features & AHC_MULTI_FUNC) != 0)
4725 		ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR);
4726 
4727 	if ((ahc->features & AHC_ULTRA2) != 0)
4728 		ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
4729 
4730 	ptr = ahc->suspend_state.scratch_ram;
4731 	for (i = 0; i < 64; i++)
4732 		*ptr++ = ahc_inb(ahc, SRAM_BASE + i);
4733 
4734 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4735 		for (i = 0; i < 16; i++)
4736 			*ptr++ = ahc_inb(ahc, TARG_OFFSET + i);
4737 	}
4738 
4739 	ptr = ahc->suspend_state.btt;
4740 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4741 		for (i = 0;i < AHC_NUM_TARGETS; i++) {
4742 			int j;
4743 
4744 			for (j = 0;j < AHC_NUM_LUNS; j++) {
4745 				u_int tcl;
4746 
4747 				tcl = BUILD_TCL(i << 4, j);
4748 				*ptr = ahc_index_busy_tcl(ahc, tcl);
4749 			}
4750 		}
4751 	}
4752 	ahc_shutdown(ahc);
4753 	return (0);
4754 }
4755 
4756 int
4757 ahc_resume(struct ahc_softc *ahc)
4758 {
4759 	uint8_t *ptr;
4760 	int	 i;
4761 
4762 	ahc_reset(ahc);
4763 
4764 	ahc_build_free_scb_list(ahc);
4765 
4766 	/* Restore volatile registers */
4767 	if ((ahc->features & AHC_TWIN) != 0) {
4768 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4769 		ahc_outb(ahc, SCSIID, ahc->our_id);
4770 		ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq);
4771 		ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0);
4772 		ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1);
4773 		ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0);
4774 		ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1);
4775 		ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer);
4776 		ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl);
4777 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4778 	}
4779 	ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq);
4780 	ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0);
4781 	ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1);
4782 	ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0);
4783 	ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1);
4784 	ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer);
4785 	ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl);
4786 	if ((ahc->features & AHC_ULTRA2) != 0)
4787 		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4788 	else
4789 		ahc_outb(ahc, SCSIID, ahc->our_id);
4790 
4791 	if ((ahc->chip & AHC_PCI) != 0) {
4792 		ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0);
4793 		ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus);
4794 	}
4795 
4796 	if ((ahc->features & AHC_DT) != 0) {
4797 		u_int sfunct;
4798 
4799 		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4800 		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4801 		ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode);
4802 		ahc_outb(ahc, SFUNCT, sfunct);
4803 		ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1);
4804 	}
4805 
4806 	if ((ahc->features & AHC_MULTI_FUNC) != 0)
4807 		ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr);
4808 
4809 	if ((ahc->features & AHC_ULTRA2) != 0)
4810 		ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh);
4811 
4812 	ptr = ahc->suspend_state.scratch_ram;
4813 	for (i = 0; i < 64; i++)
4814 		ahc_outb(ahc, SRAM_BASE + i, *ptr++);
4815 
4816 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4817 		for (i = 0; i < 16; i++)
4818 			ahc_outb(ahc, TARG_OFFSET + i, *ptr++);
4819 	}
4820 
4821 	ptr = ahc->suspend_state.btt;
4822 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4823 		for (i = 0;i < AHC_NUM_TARGETS; i++) {
4824 			int j;
4825 
4826 			for (j = 0;j < AHC_NUM_LUNS; j++) {
4827 				u_int tcl;
4828 
4829 				tcl = BUILD_TCL(i << 4, j);
4830 				ahc_busy_tcl(ahc, tcl, *ptr);
4831 			}
4832 		}
4833 	}
4834 	return (0);
4835 }
4836 
4837 /************************** Busy Target Table *********************************/
4838 /*
4839  * Return the untagged transaction id for a given target/channel lun.
4840  * Optionally, clear the entry.
4841  */
4842 u_int
4843 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
4844 {
4845 	u_int scbid;
4846 	u_int target_offset;
4847 
4848 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4849 		u_int saved_scbptr;
4850 
4851 		saved_scbptr = ahc_inb(ahc, SCBPTR);
4852 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4853 		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
4854 		ahc_outb(ahc, SCBPTR, saved_scbptr);
4855 	} else {
4856 		target_offset = TCL_TARGET_OFFSET(tcl);
4857 		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
4858 	}
4859 
4860 	return (scbid);
4861 }
4862 
4863 void
4864 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
4865 {
4866 	u_int target_offset;
4867 
4868 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4869 		u_int saved_scbptr;
4870 
4871 		saved_scbptr = ahc_inb(ahc, SCBPTR);
4872 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4873 		ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
4874 		ahc_outb(ahc, SCBPTR, saved_scbptr);
4875 	} else {
4876 		target_offset = TCL_TARGET_OFFSET(tcl);
4877 		ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
4878 	}
4879 }
4880 
4881 void
4882 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
4883 {
4884 	u_int target_offset;
4885 
4886 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4887 		u_int saved_scbptr;
4888 
4889 		saved_scbptr = ahc_inb(ahc, SCBPTR);
4890 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4891 		ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
4892 		ahc_outb(ahc, SCBPTR, saved_scbptr);
4893 	} else {
4894 		target_offset = TCL_TARGET_OFFSET(tcl);
4895 		ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
4896 	}
4897 }
4898 
4899 /************************** SCB and SCB queue management **********************/
4900 int
4901 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
4902 	      char channel, int lun, u_int tag, role_t role)
4903 {
4904 	int targ = SCB_GET_TARGET(ahc, scb);
4905 	char chan = SCB_GET_CHANNEL(ahc, scb);
4906 	int slun = SCB_GET_LUN(scb);
4907 	int match;
4908 
4909 	match = ((chan == channel) || (channel == ALL_CHANNELS));
4910 	if (match != 0)
4911 		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
4912 	if (match != 0)
4913 		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
4914 	if (match != 0) {
4915 #if AHC_TARGET_MODE
4916 		int group;
4917 
4918 		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
4919 		if (role == ROLE_INITIATOR) {
4920 			match = (group != XPT_FC_GROUP_TMODE)
4921 			      && ((tag == scb->hscb->tag)
4922 			       || (tag == SCB_LIST_NULL));
4923 		} else if (role == ROLE_TARGET) {
4924 			match = (group == XPT_FC_GROUP_TMODE)
4925 			      && ((tag == scb->io_ctx->csio.tag_id)
4926 			       || (tag == SCB_LIST_NULL));
4927 		}
4928 #else /* !AHC_TARGET_MODE */
4929 		match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
4930 #endif /* AHC_TARGET_MODE */
4931 	}
4932 
4933 	return match;
4934 }
4935 
4936 void
4937 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
4938 {
4939 	int	target;
4940 	char	channel;
4941 	int	lun;
4942 
4943 	target = SCB_GET_TARGET(ahc, scb);
4944 	lun = SCB_GET_LUN(scb);
4945 	channel = SCB_GET_CHANNEL(ahc, scb);
4946 
4947 	ahc_search_qinfifo(ahc, target, channel, lun,
4948 			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
4949 			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
4950 
4951 	ahc_platform_freeze_devq(ahc, scb);
4952 }
4953 
4954 void
4955 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
4956 {
4957 	struct scb *prev_scb;
4958 
4959 	prev_scb = NULL;
4960 	if (ahc_qinfifo_count(ahc) != 0) {
4961 		u_int prev_tag;
4962 		uint8_t prev_pos;
4963 
4964 		prev_pos = ahc->qinfifonext - 1;
4965 		prev_tag = ahc->qinfifo[prev_pos];
4966 		prev_scb = ahc_lookup_scb(ahc, prev_tag);
4967 	}
4968 	ahc_qinfifo_requeue(ahc, prev_scb, scb);
4969 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4970 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4971 	} else {
4972 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4973 	}
4974 }
4975 
4976 static void
4977 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
4978 		    struct scb *scb)
4979 {
4980 	if (prev_scb == NULL) {
4981 		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
4982 	} else {
4983 		prev_scb->hscb->next = scb->hscb->tag;
4984 		ahc_sync_scb(ahc, prev_scb,
4985 			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4986 	}
4987 	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
4988 	scb->hscb->next = ahc->next_queued_scb->hscb->tag;
4989 	ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4990 }
4991 
4992 static int
4993 ahc_qinfifo_count(struct ahc_softc *ahc)
4994 {
4995 	u_int8_t qinpos;
4996 	u_int8_t diff;
4997 
4998 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4999 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
5000 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
5001 	} else
5002 		qinpos = ahc_inb(ahc, QINPOS);
5003 	diff = ahc->qinfifonext - qinpos;
5004 	return (diff);
5005 }
5006 
5007 int
5008 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5009 		   int lun, u_int tag, role_t role, uint32_t status,
5010 		   ahc_search_action action)
5011 {
5012 	struct	scb *scb;
5013 	struct	scb *prev_scb;
5014 	uint8_t qinstart;
5015 	uint8_t qinpos;
5016 	uint8_t qintail;
5017 	uint8_t next, prev;
5018 	uint8_t curscbptr;
5019 	int	found;
5020 	int	maxtarget;
5021 	int	i;
5022 	int	have_qregs;
5023 
5024 	qintail = ahc->qinfifonext;
5025 	have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
5026 	if (have_qregs) {
5027 		qinstart = ahc_inb(ahc, SNSCB_QOFF);
5028 		ahc_outb(ahc, SNSCB_QOFF, qinstart);
5029 	} else
5030 		qinstart = ahc_inb(ahc, QINPOS);
5031 	qinpos = qinstart;
5032 	next = ahc_inb(ahc, NEXT_QUEUED_SCB);
5033 	found = 0;
5034 	prev_scb = NULL;
5035 
5036 	if (action == SEARCH_COMPLETE) {
5037 		/*
5038 		 * Don't attempt to run any queued untagged transactions
5039 		 * until we are done with the abort process.
5040 		 */
5041 		ahc_freeze_untagged_queues(ahc);
5042 	}
5043 
5044 	/*
5045 	 * Start with an empty queue.  Entries that are not chosen
5046 	 * for removal will be re-added to the queue as we go.
5047 	 */
5048 	ahc->qinfifonext = qinpos;
5049 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
5050 
5051 	while (qinpos != qintail) {
5052 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
5053 		if (scb == NULL) {
5054 			printf("qinpos = %d, SCB index = %d\n",
5055 				qinpos, ahc->qinfifo[qinpos]);
5056 			panic("Loop 1\n");
5057 		}
5058 
5059 		if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
5060 			/*
5061 			 * We found an scb that needs to be acted on.
5062 			 */
5063 			found++;
5064 			switch (action) {
5065 			case SEARCH_COMPLETE:
5066 			{
5067 				cam_status ostat;
5068 				cam_status cstat;
5069 
5070 				ostat = ahc_get_transaction_status(scb);
5071 				if (ostat == CAM_REQ_INPROG)
5072 					ahc_set_transaction_status(scb,
5073 								   status);
5074 				cstat = ahc_get_transaction_status(scb);
5075 				if (cstat != CAM_REQ_CMP)
5076 					ahc_freeze_scb(scb);
5077 				if ((scb->flags & SCB_ACTIVE) == 0)
5078 					printf("Inactive SCB in qinfifo\n");
5079 				ahc_done(ahc, scb);
5080 
5081 				/* FALLTHROUGH */
5082 			case SEARCH_REMOVE:
5083 				break;
5084 			}
5085 			case SEARCH_COUNT:
5086 				ahc_qinfifo_requeue(ahc, prev_scb, scb);
5087 				prev_scb = scb;
5088 				break;
5089 			}
5090 		} else {
5091 			ahc_qinfifo_requeue(ahc, prev_scb, scb);
5092 			prev_scb = scb;
5093 		}
5094 		qinpos++;
5095 	}
5096 
5097 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5098 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5099 	} else {
5100 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5101 	}
5102 
5103 	if (action != SEARCH_COUNT
5104 	 && (found != 0)
5105 	 && (qinstart != ahc->qinfifonext)) {
5106 		/*
5107 		 * The sequencer may be in the process of dmaing
5108 		 * down the SCB at the beginning of the queue.
5109 		 * This could be problematic if either the first,
5110 		 * or the second SCB is removed from the queue
5111 		 * (the first SCB includes a pointer to the "next"
5112 		 * SCB to dma). If we have removed any entries, swap
5113 		 * the first element in the queue with the next HSCB
5114 		 * so the sequencer will notice that NEXT_QUEUED_SCB
5115 		 * has changed during its dma attempt and will retry
5116 		 * the DMA.
5117 		 */
5118 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5119 
5120 		if (scb == NULL) {
5121 			printf("found = %d, qinstart = %d, qinfifionext = %d\n",
5122 				found, qinstart, ahc->qinfifonext);
5123 			panic("First/Second Qinfifo fixup\n");
5124 		}
5125 		/*
5126 		 * ahc_swap_with_next_hscb forces our next pointer to
5127 		 * point to the reserved SCB for future commands.  Save
5128 		 * and restore our original next pointer to maintain
5129 		 * queue integrity.
5130 		 */
5131 		next = scb->hscb->next;
5132 		ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
5133 		ahc_swap_with_next_hscb(ahc, scb);
5134 		scb->hscb->next = next;
5135 		ahc->qinfifo[qinstart] = scb->hscb->tag;
5136 
5137 		/* Tell the card about the new head of the qinfifo. */
5138 		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5139 
5140 		/* Fixup the tail "next" pointer. */
5141 		qintail = ahc->qinfifonext - 1;
5142 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
5143 		scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5144 	}
5145 
5146 	/*
5147 	 * Search waiting for selection list.
5148 	 */
5149 	curscbptr = ahc_inb(ahc, SCBPTR);
5150 	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
5151 	prev = SCB_LIST_NULL;
5152 
5153 	while (next != SCB_LIST_NULL) {
5154 		uint8_t scb_index;
5155 
5156 		ahc_outb(ahc, SCBPTR, next);
5157 		scb_index = ahc_inb(ahc, SCB_TAG);
5158 		if (scb_index >= ahc->scb_data->numscbs) {
5159 			printf("Waiting List inconsistency. "
5160 			       "SCB index == %d, yet numscbs == %d.",
5161 			       scb_index, ahc->scb_data->numscbs);
5162 			ahc_dump_card_state(ahc);
5163 			panic("for safety");
5164 		}
5165 		scb = ahc_lookup_scb(ahc, scb_index);
5166 		if (scb == NULL) {
5167 			printf("scb_index = %d, next = %d\n",
5168 				scb_index, next);
5169 			panic("Waiting List traversal\n");
5170 		}
5171 		if (ahc_match_scb(ahc, scb, target, channel,
5172 				  lun, SCB_LIST_NULL, role)) {
5173 			/*
5174 			 * We found an scb that needs to be acted on.
5175 			 */
5176 			found++;
5177 			switch (action) {
5178 			case SEARCH_COMPLETE:
5179 			{
5180 				cam_status ostat;
5181 				cam_status cstat;
5182 
5183 				ostat = ahc_get_transaction_status(scb);
5184 				if (ostat == CAM_REQ_INPROG)
5185 					ahc_set_transaction_status(scb,
5186 								   status);
5187 				cstat = ahc_get_transaction_status(scb);
5188 				if (cstat != CAM_REQ_CMP)
5189 					ahc_freeze_scb(scb);
5190 				if ((scb->flags & SCB_ACTIVE) == 0)
5191 					printf("Inactive SCB in Waiting List\n");
5192 				ahc_done(ahc, scb);
5193 				/* FALLTHROUGH */
5194 			}
5195 			case SEARCH_REMOVE:
5196 				next = ahc_rem_wscb(ahc, next, prev);
5197 				break;
5198 			case SEARCH_COUNT:
5199 				prev = next;
5200 				next = ahc_inb(ahc, SCB_NEXT);
5201 				break;
5202 			}
5203 		} else {
5204 
5205 			prev = next;
5206 			next = ahc_inb(ahc, SCB_NEXT);
5207 		}
5208 	}
5209 	ahc_outb(ahc, SCBPTR, curscbptr);
5210 
5211 	/*
5212 	 * And lastly, the untagged holding queues.
5213 	 */
5214 	i = 0;
5215 	if ((ahc->flags & AHC_SCB_BTT) == 0) {
5216 
5217 		maxtarget = 16;
5218 		if (target != CAM_TARGET_WILDCARD) {
5219 
5220 			i = target;
5221 			if (channel == 'B')
5222 				i += 8;
5223 			maxtarget = i + 1;
5224 		}
5225 	} else {
5226 		maxtarget = 0;
5227 	}
5228 
5229 	for (; i < maxtarget; i++) {
5230 		struct scb_tailq *untagged_q;
5231 		struct scb *next_scb;
5232 
5233 		untagged_q = &(ahc->untagged_queues[i]);
5234 		next_scb = TAILQ_FIRST(untagged_q);
5235 		while (next_scb != NULL) {
5236 
5237 			scb = next_scb;
5238 			next_scb = TAILQ_NEXT(scb, links.tqe);
5239 
5240 			/*
5241 			 * The head of the list may be the currently
5242 			 * active untagged command for a device.
5243 			 * We're only searching for commands that
5244 			 * have not been started.  A transaction
5245 			 * marked active but still in the qinfifo
5246 			 * is removed by the qinfifo scanning code
5247 			 * above.
5248 			 */
5249 			if ((scb->flags & SCB_ACTIVE) != 0)
5250 				continue;
5251 
5252 			if (ahc_match_scb(ahc, scb, target, channel,
5253 					  lun, SCB_LIST_NULL, role)) {
5254 				/*
5255 				 * We found an scb that needs to be acted on.
5256 				 */
5257 				found++;
5258 				switch (action) {
5259 				case SEARCH_COMPLETE:
5260 				{
5261 					cam_status ostat;
5262 					cam_status cstat;
5263 
5264 					ostat = ahc_get_transaction_status(scb);
5265 					if (ostat == CAM_REQ_INPROG)
5266 						ahc_set_transaction_status(scb,
5267 								   status);
5268 					cstat = ahc_get_transaction_status(scb);
5269 					if (cstat != CAM_REQ_CMP)
5270 						ahc_freeze_scb(scb);
5271 					if ((scb->flags & SCB_ACTIVE) == 0)
5272 						printf("Inactive SCB in untaggedQ\n");
5273 					ahc_done(ahc, scb);
5274 					break;
5275 				}
5276 				case SEARCH_REMOVE:
5277 					TAILQ_REMOVE(untagged_q, scb,
5278 						     links.tqe);
5279 					break;
5280 				case SEARCH_COUNT:
5281 					break;
5282 				}
5283 			}
5284 		}
5285 	}
5286 
5287 	if (action == SEARCH_COMPLETE)
5288 		ahc_release_untagged_queues(ahc);
5289 	return (found);
5290 }
5291 
5292 int
5293 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5294 		     int lun, u_int tag, int stop_on_first, int remove,
5295 		     int save_state)
5296 {
5297 	struct	scb *scbp;
5298 	u_int	next;
5299 	u_int	prev;
5300 	u_int	count;
5301 	u_int	active_scb;
5302 
5303 	count = 0;
5304 	next = ahc_inb(ahc, DISCONNECTED_SCBH);
5305 	prev = SCB_LIST_NULL;
5306 
5307 	if (save_state) {
5308 		/* restore this when we're done */
5309 		active_scb = ahc_inb(ahc, SCBPTR);
5310 	} else
5311 		/* Silence compiler */
5312 		active_scb = SCB_LIST_NULL;
5313 
5314 	while (next != SCB_LIST_NULL) {
5315 		u_int scb_index;
5316 
5317 		ahc_outb(ahc, SCBPTR, next);
5318 		scb_index = ahc_inb(ahc, SCB_TAG);
5319 		if (scb_index >= ahc->scb_data->numscbs) {
5320 			printf("Disconnected List inconsistency. "
5321 			       "SCB index == %d, yet numscbs == %d.",
5322 			       scb_index, ahc->scb_data->numscbs);
5323 			ahc_dump_card_state(ahc);
5324 			panic("for safety");
5325 		}
5326 
5327 		if (next == prev) {
5328 			panic("Disconnected List Loop. "
5329 			      "cur SCBPTR == %x, prev SCBPTR == %x.",
5330 			      next, prev);
5331 		}
5332 		scbp = ahc_lookup_scb(ahc, scb_index);
5333 		if (ahc_match_scb(ahc, scbp, target, channel, lun,
5334 				  tag, ROLE_INITIATOR)) {
5335 			count++;
5336 			if (remove) {
5337 				next =
5338 				    ahc_rem_scb_from_disc_list(ahc, prev, next);
5339 			} else {
5340 				prev = next;
5341 				next = ahc_inb(ahc, SCB_NEXT);
5342 			}
5343 			if (stop_on_first)
5344 				break;
5345 		} else {
5346 			prev = next;
5347 			next = ahc_inb(ahc, SCB_NEXT);
5348 		}
5349 	}
5350 	if (save_state)
5351 		ahc_outb(ahc, SCBPTR, active_scb);
5352 	return (count);
5353 }
5354 
5355 /*
5356  * Remove an SCB from the on chip list of disconnected transactions.
5357  * This is empty/unused if we are not performing SCB paging.
5358  */
5359 static u_int
5360 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5361 {
5362 	u_int next;
5363 
5364 	ahc_outb(ahc, SCBPTR, scbptr);
5365 	next = ahc_inb(ahc, SCB_NEXT);
5366 
5367 	ahc_outb(ahc, SCB_CONTROL, 0);
5368 
5369 	ahc_add_curscb_to_free_list(ahc);
5370 
5371 	if (prev != SCB_LIST_NULL) {
5372 		ahc_outb(ahc, SCBPTR, prev);
5373 		ahc_outb(ahc, SCB_NEXT, next);
5374 	} else
5375 		ahc_outb(ahc, DISCONNECTED_SCBH, next);
5376 
5377 	return (next);
5378 }
5379 
5380 /*
5381  * Add the SCB as selected by SCBPTR onto the on chip list of
5382  * free hardware SCBs.  This list is empty/unused if we are not
5383  * performing SCB paging.
5384  */
5385 static void
5386 ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5387 {
5388 	/*
5389 	 * Invalidate the tag so that our abort
5390 	 * routines don't think it's active.
5391 	 */
5392 	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5393 
5394 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
5395 		ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5396 		ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5397 	}
5398 }
5399 
5400 /*
5401  * Manipulate the waiting for selection list and return the
5402  * scb that follows the one that we remove.
5403  */
5404 static u_int
5405 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5406 {
5407 	u_int curscb, next;
5408 
5409 	/*
5410 	 * Select the SCB we want to abort and
5411 	 * pull the next pointer out of it.
5412 	 */
5413 	curscb = ahc_inb(ahc, SCBPTR);
5414 	ahc_outb(ahc, SCBPTR, scbpos);
5415 	next = ahc_inb(ahc, SCB_NEXT);
5416 
5417 	/* Clear the necessary fields */
5418 	ahc_outb(ahc, SCB_CONTROL, 0);
5419 
5420 	ahc_add_curscb_to_free_list(ahc);
5421 
5422 	/* update the waiting list */
5423 	if (prev == SCB_LIST_NULL) {
5424 		/* First in the list */
5425 		ahc_outb(ahc, WAITING_SCBH, next);
5426 
5427 		/*
5428 		 * Ensure we aren't attempting to perform
5429 		 * selection for this entry.
5430 		 */
5431 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5432 	} else {
5433 		/*
5434 		 * Select the scb that pointed to us
5435 		 * and update its next pointer.
5436 		 */
5437 		ahc_outb(ahc, SCBPTR, prev);
5438 		ahc_outb(ahc, SCB_NEXT, next);
5439 	}
5440 
5441 	/*
5442 	 * Point us back at the original scb position.
5443 	 */
5444 	ahc_outb(ahc, SCBPTR, curscb);
5445 	return next;
5446 }
5447 
5448 /******************************** Error Handling ******************************/
5449 /*
5450  * Abort all SCBs that match the given description (target/channel/lun/tag),
5451  * setting their status to the passed in status if the status has not already
5452  * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
5453  * is paused before it is called.
5454  */
5455 int
5456 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5457 	       int lun, u_int tag, role_t role, uint32_t status)
5458 {
5459 	struct	scb *scbp;
5460 	struct	scb *scbp_next;
5461 	u_int	active_scb;
5462 	int	i, j;
5463 	int	maxtarget;
5464 	int	minlun;
5465 	int	maxlun;
5466 
5467 	int	found;
5468 
5469 	/*
5470 	 * Don't attempt to run any queued untagged transactions
5471 	 * until we are done with the abort process.
5472 	 */
5473 	ahc_freeze_untagged_queues(ahc);
5474 
5475 	/* restore this when we're done */
5476 	active_scb = ahc_inb(ahc, SCBPTR);
5477 
5478 	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5479 				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5480 
5481 	/*
5482 	 * Clean out the busy target table for any untagged commands.
5483 	 */
5484 	i = 0;
5485 	maxtarget = 16;
5486 	if (target != CAM_TARGET_WILDCARD) {
5487 		i = target;
5488 		if (channel == 'B')
5489 			i += 8;
5490 		maxtarget = i + 1;
5491 	}
5492 
5493 	if (lun == CAM_LUN_WILDCARD) {
5494 
5495 		/*
5496 		 * Unless we are using an SCB based
5497 		 * busy targets table, there is only
5498 		 * one table entry for all luns of
5499 		 * a target.
5500 		 */
5501 		minlun = 0;
5502 		maxlun = 1;
5503 		if ((ahc->flags & AHC_SCB_BTT) != 0)
5504 			maxlun = AHC_NUM_LUNS;
5505 	} else {
5506 		minlun = lun;
5507 		maxlun = lun + 1;
5508 	}
5509 
5510 	for (;i < maxtarget; i++) {
5511 		for (j = minlun;j < maxlun; j++)
5512 			ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5513 	}
5514 
5515 	/*
5516 	 * Go through the disconnected list and remove any entries we
5517 	 * have queued for completion, 0'ing their control byte too.
5518 	 * We save the active SCB and restore it ourselves, so there
5519 	 * is no reason for this search to restore it too.
5520 	 */
5521 	ahc_search_disc_list(ahc, target, channel, lun, tag,
5522 			     /*stop_on_first*/FALSE, /*remove*/TRUE,
5523 			     /*save_state*/FALSE);
5524 
5525 	/*
5526 	 * Go through the hardware SCB array looking for commands that
5527 	 * were active but not on any list.
5528 	 */
5529 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5530 		u_int scbid;
5531 
5532 		ahc_outb(ahc, SCBPTR, i);
5533 		scbid = ahc_inb(ahc, SCB_TAG);
5534 		scbp = ahc_lookup_scb(ahc, scbid);
5535 		if (scbp != NULL
5536 		 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
5537 			ahc_add_curscb_to_free_list(ahc);
5538 	}
5539 
5540 	/*
5541 	 * Go through the pending CCB list and look for
5542 	 * commands for this target that are still active.
5543 	 * These are other tagged commands that were
5544 	 * disconnected when the reset occurred.
5545 	 */
5546 	scbp_next = LIST_FIRST(&ahc->pending_scbs);
5547 	while (scbp_next != NULL) {
5548 		scbp = scbp_next;
5549 		scbp_next = LIST_NEXT(scbp, pending_links);
5550 		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5551 			cam_status ostat;
5552 
5553 			ostat = ahc_get_transaction_status(scbp);
5554 			if (ostat == CAM_REQ_INPROG)
5555 				ahc_set_transaction_status(scbp, status);
5556 			if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
5557 				ahc_freeze_scb(scbp);
5558 			if ((scbp->flags & SCB_ACTIVE) == 0)
5559 				printf("Inactive SCB on pending list\n");
5560 			ahc_done(ahc, scbp);
5561 			found++;
5562 		}
5563 	}
5564 	ahc_outb(ahc, SCBPTR, active_scb);
5565 	ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
5566 	ahc_release_untagged_queues(ahc);
5567 	return found;
5568 }
5569 
5570 static void
5571 ahc_reset_current_bus(struct ahc_softc *ahc)
5572 {
5573 	uint8_t scsiseq;
5574 
5575 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5576 	scsiseq = ahc_inb(ahc, SCSISEQ);
5577 	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5578 	ahc_delay(AHC_BUSRESET_DELAY);
5579 	/* Turn off the bus reset */
5580 	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5581 
5582 	ahc_clear_intstat(ahc);
5583 
5584 	/* Re-enable reset interrupts */
5585 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5586 }
5587 
5588 int
5589 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
5590 {
5591 	struct	ahc_devinfo devinfo;
5592 	u_int	initiator, target, max_scsiid;
5593 	u_int	sblkctl;
5594 	u_int	scsiseq;
5595 	u_int	simode1;
5596 	int	found;
5597 	int	restart_needed;
5598 	char	cur_channel;
5599 
5600 	ahc->pending_device = NULL;
5601 
5602 	ahc_compile_devinfo(&devinfo,
5603 			    CAM_TARGET_WILDCARD,
5604 			    CAM_TARGET_WILDCARD,
5605 			    CAM_LUN_WILDCARD,
5606 			    channel, ROLE_UNKNOWN);
5607 	ahc_pause(ahc);
5608 
5609 	/* Make sure the sequencer is in a safe location. */
5610 	ahc_clear_critical_section(ahc);
5611 
5612 	/*
5613 	 * Run our command complete fifos to ensure that we perform
5614 	 * completion processing on any commands that 'completed'
5615 	 * before the reset occurred.
5616 	 */
5617 	ahc_run_qoutfifo(ahc);
5618 #if AHC_TARGET_MODE
5619 	if ((ahc->flags & AHC_TARGETROLE) != 0) {
5620 		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
5621 	}
5622 #endif
5623 
5624 	/*
5625 	 * Reset the bus if we are initiating this reset
5626 	 */
5627 	sblkctl = ahc_inb(ahc, SBLKCTL);
5628 	cur_channel = 'A';
5629 	if ((ahc->features & AHC_TWIN) != 0
5630 	 && ((sblkctl & SELBUSB) != 0))
5631 	    cur_channel = 'B';
5632 	scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
5633 	if (cur_channel != channel) {
5634 		/* Case 1: Command for another bus is active
5635 		 * Stealthily reset the other bus without
5636 		 * upsetting the current bus.
5637 		 */
5638 		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
5639 		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
5640 		ahc_outb(ahc, SIMODE1, simode1);
5641 		if (initiate_reset)
5642 			ahc_reset_current_bus(ahc);
5643 		ahc_clear_intstat(ahc);
5644 #if AHC_TARGET_MODE
5645 		/*
5646 		 * Bus resets clear ENSELI, so we cannot
5647 		 * defer re-enabling bus reset interrupts
5648 		 * if we are in target mode.
5649 		 */
5650 		if ((ahc->flags & AHC_TARGETROLE) != 0)
5651 			ahc_outb(ahc, SIMODE1, simode1 | ENSCSIRST);
5652 #endif
5653 		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
5654 		ahc_outb(ahc, SBLKCTL, sblkctl);
5655 		restart_needed = FALSE;
5656 	} else {
5657 		/* Case 2: A command from this bus is active or we're idle */
5658 		ahc_clear_msg_state(ahc);
5659 		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
5660 		ahc_outb(ahc, SIMODE1, simode1);
5661 		if (initiate_reset)
5662 			ahc_reset_current_bus(ahc);
5663 		ahc_clear_intstat(ahc);
5664 #if AHC_TARGET_MODE
5665 		/*
5666 		 * Bus resets clear ENSELI, so we cannot
5667 		 * defer re-enabling bus reset interrupts
5668 		 * if we are in target mode.
5669 		 */
5670 		if ((ahc->flags & AHC_TARGETROLE) != 0)
5671 			ahc_outb(ahc, SIMODE1, simode1 | ENSCSIRST);
5672 #endif
5673 		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
5674 		restart_needed = TRUE;
5675 	}
5676 
5677 	/*
5678 	 * Clean up all the state information for the
5679 	 * pending transactions on this bus.
5680 	 */
5681 	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
5682 			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
5683 			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
5684 
5685 	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
5686 
5687 #ifdef AHC_TARGET_MODE
5688 	/*
5689 	 * Send an immediate notify ccb to all target more peripheral
5690 	 * drivers affected by this action.
5691 	 */
5692 	for (target = 0; target <= max_scsiid; target++) {
5693 		struct ahc_tmode_tstate* tstate;
5694 		u_int lun;
5695 
5696 		tstate = ahc->enabled_targets[target];
5697 		if (tstate == NULL)
5698 			continue;
5699 		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
5700 			struct ahc_tmode_lstate* lstate;
5701 
5702 			lstate = tstate->enabled_luns[lun];
5703 			if (lstate == NULL)
5704 				continue;
5705 
5706 			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
5707 					       EVENT_TYPE_BUS_RESET, /*arg*/0);
5708 			ahc_send_lstate_events(ahc, lstate);
5709 		}
5710 	}
5711 #endif
5712 	/* Notify the XPT that a bus reset occurred */
5713 	ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
5714 		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
5715 
5716 	/*
5717 	 * Revert to async/narrow transfers until we renegotiate.
5718 	 */
5719 	for (target = 0; target <= max_scsiid; target++) {
5720 
5721 		if (ahc->enabled_targets[target] == NULL)
5722 			continue;
5723 		for (initiator = 0; initiator <= max_scsiid; initiator++) {
5724 			struct ahc_devinfo devinfo;
5725 
5726 			ahc_compile_devinfo(&devinfo, target, initiator,
5727 					    CAM_LUN_WILDCARD,
5728 					    channel, ROLE_UNKNOWN);
5729 			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5730 				      AHC_TRANS_CUR, /*paused*/TRUE);
5731 			ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
5732 					 /*period*/0, /*offset*/0,
5733 					 /*ppr_options*/0, AHC_TRANS_CUR,
5734 					 /*paused*/TRUE);
5735 		}
5736 	}
5737 
5738 	if (restart_needed)
5739 		ahc_restart(ahc);
5740 	else
5741 		ahc_unpause(ahc);
5742 	return found;
5743 }
5744 
5745 
5746 /***************************** Residual Processing ****************************/
5747 /*
5748  * Calculate the residual for a just completed SCB.
5749  */
5750 void
5751 ahc_calc_residual(struct scb *scb)
5752 {
5753 	struct hardware_scb *hscb;
5754 	struct status_pkt *spkt;
5755 	uint32_t sgptr;
5756 	uint32_t resid_sgptr;
5757 	uint32_t resid;
5758 
5759 	/*
5760 	 * 5 cases.
5761 	 * 1) No residual.
5762 	 *    SG_RESID_VALID clear in sgptr.
5763 	 * 2) Transferless command
5764 	 * 3) Never performed any transfers.
5765 	 *    sgptr has SG_FULL_RESID set.
5766 	 * 4) No residual but target did not
5767 	 *    save data pointers after the
5768 	 *    last transfer, so sgptr was
5769 	 *    never updated.
5770 	 * 5) We have a partial residual.
5771 	 *    Use residual_sgptr to determine
5772 	 *    where we are.
5773 	 */
5774 
5775 	hscb = scb->hscb;
5776 	sgptr = ahc_le32toh(hscb->sgptr);
5777 	if ((sgptr & SG_RESID_VALID) == 0)
5778 		/* Case 1 */
5779 		return;
5780 	sgptr &= ~SG_RESID_VALID;
5781 
5782 	if ((sgptr & SG_LIST_NULL) != 0)
5783 		/* Case 2 */
5784 		return;
5785 
5786 	spkt = &hscb->shared_data.status;
5787 	resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
5788 	if ((sgptr & SG_FULL_RESID) != 0) {
5789 		/* Case 3 */
5790 		resid = ahc_get_transfer_length(scb);
5791 	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
5792 		/* Case 4 */
5793 		return;
5794 	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
5795 		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
5796 	} else {
5797 		struct ahc_dma_seg *sg;
5798 
5799 		/*
5800 		 * Remainder of the SG where the transfer
5801 		 * stopped.
5802 		 */
5803 		resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
5804 		sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
5805 
5806 		/* The residual sg_ptr always points to the next sg */
5807 		sg--;
5808 
5809 		/*
5810 		 * Add up the contents of all residual
5811 		 * SG segments that are after the SG where
5812 		 * the transfer stopped.
5813 		 */
5814 		while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
5815 			sg++;
5816 			resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
5817 		}
5818 	}
5819 	if ((scb->flags & SCB_SENSE) == 0)
5820 		ahc_set_residual(scb, resid);
5821 	else
5822 		ahc_set_sense_residual(scb, resid);
5823 
5824 #ifdef AHC_DEBUG
5825 	if ((ahc_debug & AHC_SHOWMISC) != 0) {
5826 		ahc_print_path(ahc, scb);
5827 		printf("Handled Residual of %d bytes\n", resid);
5828 	}
5829 #endif
5830 }
5831 
5832 /******************************* Target Mode **********************************/
5833 #ifdef AHC_TARGET_MODE
5834 /*
5835  * Add a target mode event to this lun's queue
5836  */
5837 static void
5838 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
5839 		       u_int initiator_id, u_int event_type, u_int event_arg)
5840 {
5841 	struct ahc_tmode_event *event;
5842 	int pending;
5843 
5844 	xpt_freeze_devq(lstate->path, /*count*/1);
5845 	if (lstate->event_w_idx >= lstate->event_r_idx)
5846 		pending = lstate->event_w_idx - lstate->event_r_idx;
5847 	else
5848 		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
5849 			- (lstate->event_r_idx - lstate->event_w_idx);
5850 
5851 	if (event_type == EVENT_TYPE_BUS_RESET
5852 	 || event_type == MSG_BUS_DEV_RESET) {
5853 		/*
5854 		 * Any earlier events are irrelevant, so reset our buffer.
5855 		 * This has the effect of allowing us to deal with reset
5856 		 * floods (an external device holding down the reset line)
5857 		 * without losing the event that is really interesting.
5858 		 */
5859 		lstate->event_r_idx = 0;
5860 		lstate->event_w_idx = 0;
5861 		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
5862 	}
5863 
5864 	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
5865 		xpt_print_path(lstate->path);
5866 		printf("immediate event %x:%x lost\n",
5867 		       lstate->event_buffer[lstate->event_r_idx].event_type,
5868 		       lstate->event_buffer[lstate->event_r_idx].event_arg);
5869 		lstate->event_r_idx++;
5870 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5871 			lstate->event_r_idx = 0;
5872 		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
5873 	}
5874 
5875 	event = &lstate->event_buffer[lstate->event_w_idx];
5876 	event->initiator_id = initiator_id;
5877 	event->event_type = event_type;
5878 	event->event_arg = event_arg;
5879 	lstate->event_w_idx++;
5880 	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5881 		lstate->event_w_idx = 0;
5882 }
5883 
5884 /*
5885  * Send any target mode events queued up waiting
5886  * for immediate notify resources.
5887  */
5888 void
5889 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
5890 {
5891 	struct ccb_hdr *ccbh;
5892 	struct ccb_immed_notify *inot;
5893 
5894 	while (lstate->event_r_idx != lstate->event_w_idx
5895 	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
5896 		struct ahc_tmode_event *event;
5897 
5898 		event = &lstate->event_buffer[lstate->event_r_idx];
5899 		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
5900 		inot = (struct ccb_immed_notify *)ccbh;
5901 		switch (event->event_type) {
5902 		case EVENT_TYPE_BUS_RESET:
5903 			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
5904 			break;
5905 		default:
5906 			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5907 			inot->message_args[0] = event->event_type;
5908 			inot->message_args[1] = event->event_arg;
5909 			break;
5910 		}
5911 		inot->initiator_id = event->initiator_id;
5912 		inot->sense_len = 0;
5913 		xpt_done((union ccb *)inot);
5914 		lstate->event_r_idx++;
5915 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5916 			lstate->event_r_idx = 0;
5917 	}
5918 }
5919 #endif
5920 
5921 /******************** Sequencer Program Patching/Download *********************/
5922 
5923 #ifdef AHC_DUMP_SEQ
5924 void
5925 ahc_dumpseq(struct ahc_softc* ahc)
5926 {
5927 	int i;
5928 	int max_prog;
5929 
5930 	if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
5931 		max_prog = 448;
5932 	else if ((ahc->features & AHC_ULTRA2) != 0)
5933 		max_prog = 768;
5934 	else
5935 		max_prog = 512;
5936 
5937 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5938 	ahc_outb(ahc, SEQADDR0, 0);
5939 	ahc_outb(ahc, SEQADDR1, 0);
5940 	for (i = 0; i < max_prog; i++) {
5941 		uint8_t ins_bytes[4];
5942 
5943 		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
5944 		printf("0x%08x\n", ins_bytes[0] << 24
5945 				 | ins_bytes[1] << 16
5946 				 | ins_bytes[2] << 8
5947 				 | ins_bytes[3]);
5948 	}
5949 }
5950 #endif
5951 
5952 static void
5953 ahc_loadseq(struct ahc_softc *ahc)
5954 {
5955 	struct	cs cs_table[num_critical_sections];
5956 	u_int	begin_set[num_critical_sections];
5957 	u_int	end_set[num_critical_sections];
5958 	struct	patch *cur_patch;
5959 	u_int	cs_count;
5960 	u_int	cur_cs;
5961 	u_int	i;
5962 	int	downloaded;
5963 	u_int	skip_addr;
5964 	u_int	sg_prefetch_cnt;
5965 	uint8_t	download_consts[7];
5966 
5967 	/*
5968 	 * Start out with 0 critical sections
5969 	 * that apply to this firmware load.
5970 	 */
5971 	cs_count = 0;
5972 	cur_cs = 0;
5973 	memset(begin_set, 0, sizeof(begin_set));
5974 	memset(end_set, 0, sizeof(end_set));
5975 
5976 	/* Setup downloadable constant table */
5977 	download_consts[QOUTFIFO_OFFSET] = 0;
5978 	if (ahc->targetcmds != NULL)
5979 		download_consts[QOUTFIFO_OFFSET] += 32;
5980 	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
5981 	download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
5982 	download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
5983 	sg_prefetch_cnt = ahc->pci_cachesize;
5984 	if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
5985 		sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
5986 	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
5987 	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
5988 	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
5989 
5990 	cur_patch = patches;
5991 	downloaded = 0;
5992 	skip_addr = 0;
5993 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5994 	ahc_outb(ahc, SEQADDR0, 0);
5995 	ahc_outb(ahc, SEQADDR1, 0);
5996 
5997 	for (i = 0; i < sizeof(seqprog)/4; i++) {
5998 		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
5999 			/*
6000 			 * Don't download this instruction as it
6001 			 * is in a patch that was removed.
6002 			 */
6003 			continue;
6004 		}
6005 		/*
6006 		 * Move through the CS table until we find a CS
6007 		 * that might apply to this instruction.
6008 		 */
6009 		for (; cur_cs < num_critical_sections; cur_cs++) {
6010 			if (critical_sections[cur_cs].end <= i) {
6011 				if (begin_set[cs_count] == TRUE
6012 				 && end_set[cs_count] == FALSE) {
6013 					cs_table[cs_count].end = downloaded;
6014 				 	end_set[cs_count] = TRUE;
6015 					cs_count++;
6016 				}
6017 				continue;
6018 			}
6019 			if (critical_sections[cur_cs].begin <= i
6020 			 && begin_set[cs_count] == FALSE) {
6021 				cs_table[cs_count].begin = downloaded;
6022 				begin_set[cs_count] = TRUE;
6023 			}
6024 			break;
6025 		}
6026 		ahc_download_instr(ahc, i, download_consts);
6027 		downloaded++;
6028 	}
6029 
6030 	ahc->num_critical_sections = cs_count;
6031 	if (cs_count != 0) {
6032 
6033 		cs_count *= sizeof(struct cs);
6034 		ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
6035 		if (ahc->critical_sections == NULL)
6036 			panic("ahc_loadseq: Could not malloc");
6037 		memcpy(ahc->critical_sections, cs_table, cs_count);
6038 	}
6039 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
6040 	ahc_restart(ahc);
6041 
6042 	if (bootverbose)
6043 		printf(" %d instructions downloaded\n", downloaded);
6044 }
6045 
6046 static int
6047 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
6048 		u_int start_instr, u_int *skip_addr)
6049 {
6050 	struct	patch *cur_patch;
6051 	struct	patch *last_patch;
6052 	u_int	num_patches;
6053 
6054 	num_patches = sizeof(patches)/sizeof(struct patch);
6055 	last_patch = &patches[num_patches];
6056 	cur_patch = *start_patch;
6057 
6058 	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
6059 
6060 		if (cur_patch->patch_func(ahc) == 0) {
6061 
6062 			/* Start rejecting code */
6063 			*skip_addr = start_instr + cur_patch->skip_instr;
6064 			cur_patch += cur_patch->skip_patch;
6065 		} else {
6066 			/* Accepted this patch.  Advance to the next
6067 			 * one and wait for our intruction pointer to
6068 			 * hit this point.
6069 			 */
6070 			cur_patch++;
6071 		}
6072 	}
6073 
6074 	*start_patch = cur_patch;
6075 	if (start_instr < *skip_addr)
6076 		/* Still skipping */
6077 		return (0);
6078 
6079 	return (1);
6080 }
6081 
6082 static void
6083 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6084 {
6085 	union	ins_formats instr;
6086 	struct	ins_format1 *fmt1_ins;
6087 	struct	ins_format3 *fmt3_ins;
6088 	u_int	opcode;
6089 
6090 	/*
6091 	 * The firmware is always compiled into a little endian format.
6092 	 */
6093 	instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
6094 
6095 	fmt1_ins = &instr.format1;
6096 	fmt3_ins = NULL;
6097 
6098 	/* Pull the opcode */
6099 	opcode = instr.format1.opcode;
6100 	switch (opcode) {
6101 	case AIC_OP_JMP:
6102 	case AIC_OP_JC:
6103 	case AIC_OP_JNC:
6104 	case AIC_OP_CALL:
6105 	case AIC_OP_JNE:
6106 	case AIC_OP_JNZ:
6107 	case AIC_OP_JE:
6108 	case AIC_OP_JZ:
6109 	{
6110 		struct patch *cur_patch;
6111 		int address_offset;
6112 		u_int address;
6113 		u_int skip_addr;
6114 		u_int i;
6115 
6116 		fmt3_ins = &instr.format3;
6117 		address_offset = 0;
6118 		address = fmt3_ins->address;
6119 		cur_patch = patches;
6120 		skip_addr = 0;
6121 
6122 		for (i = 0; i < address;) {
6123 
6124 			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
6125 
6126 			if (skip_addr > i) {
6127 				int end_addr;
6128 
6129 				end_addr = MIN(address, skip_addr);
6130 				address_offset += end_addr - i;
6131 				i = skip_addr;
6132 			} else {
6133 				i++;
6134 			}
6135 		}
6136 		address -= address_offset;
6137 		fmt3_ins->address = address;
6138 		/* FALLTHROUGH */
6139 	}
6140 	case AIC_OP_OR:
6141 	case AIC_OP_AND:
6142 	case AIC_OP_XOR:
6143 	case AIC_OP_ADD:
6144 	case AIC_OP_ADC:
6145 	case AIC_OP_BMOV:
6146 		if (fmt1_ins->parity != 0) {
6147 			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6148 		}
6149 		fmt1_ins->parity = 0;
6150 		if ((ahc->features & AHC_CMD_CHAN) == 0
6151 		 && opcode == AIC_OP_BMOV) {
6152 			/*
6153 			 * Block move was added at the same time
6154 			 * as the command channel.  Verify that
6155 			 * this is only a move of a single element
6156 			 * and convert the BMOV to a MOV
6157 			 * (AND with an immediate of FF).
6158 			 */
6159 			if (fmt1_ins->immediate != 1)
6160 				panic("%s: BMOV not supported\n",
6161 				      ahc_name(ahc));
6162 			fmt1_ins->opcode = AIC_OP_AND;
6163 			fmt1_ins->immediate = 0xff;
6164 		}
6165 		/* FALLTHROUGH */
6166 	case AIC_OP_ROL:
6167 		if ((ahc->features & AHC_ULTRA2) != 0) {
6168 			int i, count;
6169 
6170 			/* Calculate odd parity for the instruction */
6171 			for (i = 0, count = 0; i < 31; i++) {
6172 				uint32_t mask;
6173 
6174 				mask = 0x01 << i;
6175 				if ((instr.integer & mask) != 0)
6176 					count++;
6177 			}
6178 			if ((count & 0x01) == 0)
6179 				instr.format1.parity = 1;
6180 		} else {
6181 			/* Compress the instruction for older sequencers */
6182 			if (fmt3_ins != NULL) {
6183 				instr.integer =
6184 					fmt3_ins->immediate
6185 				      | (fmt3_ins->source << 8)
6186 				      | (fmt3_ins->address << 16)
6187 				      |	(fmt3_ins->opcode << 25);
6188 			} else {
6189 				instr.integer =
6190 					fmt1_ins->immediate
6191 				      | (fmt1_ins->source << 8)
6192 				      | (fmt1_ins->destination << 16)
6193 				      |	(fmt1_ins->ret << 24)
6194 				      |	(fmt1_ins->opcode << 25);
6195 			}
6196 		}
6197 		/* The sequencer is a little endian cpu */
6198 		instr.integer = ahc_htole32(instr.integer);
6199 		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6200 		break;
6201 	default:
6202 		panic("Unknown opcode encountered in seq program");
6203 		break;
6204 	}
6205 }
6206 
6207 void
6208 ahc_dump_card_state(struct ahc_softc *ahc)
6209 {
6210 	struct scb *scb;
6211 	struct scb_tailq *untagged_q;
6212 	int target;
6213 	int maxtarget;
6214 	int i;
6215 	uint8_t last_phase;
6216 	uint8_t qinpos;
6217 	uint8_t qintail;
6218 	uint8_t qoutpos;
6219 	uint8_t scb_index;
6220 	uint8_t saved_scbptr;
6221 
6222 	saved_scbptr = ahc_inb(ahc, SCBPTR);
6223 
6224 	last_phase = ahc_inb(ahc, LASTPHASE);
6225 	printf("%s: Dumping Card State %s, at SEQADDR 0x%x\n",
6226 	       ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
6227 	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6228 	printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
6229 	       ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
6230 	       ahc_inb(ahc, ARG_2));
6231 	printf("HCNT = 0x%x\n", ahc_inb(ahc, HCNT));
6232 	printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x\n",
6233 	       ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL));
6234 	printf(" DFCNTRL = 0x%x, DFSTATUS = 0x%x\n",
6235 	       ahc_inb(ahc, DFCNTRL), ahc_inb(ahc, DFSTATUS));
6236 	printf("LASTPHASE = 0x%x, SCSISIGI = 0x%x, SXFRCTL0 = 0x%x\n",
6237 	       last_phase, ahc_inb(ahc, SCSISIGI), ahc_inb(ahc, SXFRCTL0));
6238 	printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x\n",
6239 	       ahc_inb(ahc, SSTAT0), ahc_inb(ahc, SSTAT1));
6240 	if ((ahc->features & AHC_DT) != 0)
6241 		printf("SCSIPHASE = 0x%x\n", ahc_inb(ahc, SCSIPHASE));
6242 	printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n",
6243 		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6244 		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6245 		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6246 		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8));
6247 	printf("SCB count = %d\n", ahc->scb_data->numscbs);
6248 	printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6249 	printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6250 	/* QINFIFO */
6251 	printf("QINFIFO entries: ");
6252 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6253 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
6254 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
6255 	} else
6256 		qinpos = ahc_inb(ahc, QINPOS);
6257 	qintail = ahc->qinfifonext;
6258 	while (qinpos != qintail) {
6259 		printf("%d ", ahc->qinfifo[qinpos]);
6260 		qinpos++;
6261 	}
6262 	printf("\n");
6263 
6264 	printf("Waiting Queue entries: ");
6265 	scb_index = ahc_inb(ahc, WAITING_SCBH);
6266 	i = 0;
6267 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6268 		ahc_outb(ahc, SCBPTR, scb_index);
6269 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6270 		scb_index = ahc_inb(ahc, SCB_NEXT);
6271 	}
6272 	printf("\n");
6273 
6274 	printf("Disconnected Queue entries: ");
6275 	scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6276 	i = 0;
6277 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6278 		ahc_outb(ahc, SCBPTR, scb_index);
6279 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6280 		scb_index = ahc_inb(ahc, SCB_NEXT);
6281 	}
6282 	printf("\n");
6283 
6284 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
6285 	printf("QOUTFIFO entries: ");
6286 	qoutpos = ahc->qoutfifonext;
6287 	i = 0;
6288 	while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6289 		printf("%d ", ahc->qoutfifo[qoutpos]);
6290 		qoutpos++;
6291 	}
6292 	printf("\n");
6293 
6294 	printf("Sequencer Free SCB List: ");
6295 	scb_index = ahc_inb(ahc, FREE_SCBH);
6296 	i = 0;
6297 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6298 		ahc_outb(ahc, SCBPTR, scb_index);
6299 		printf("%d ", scb_index);
6300 		scb_index = ahc_inb(ahc, SCB_NEXT);
6301 	}
6302 	printf("\n");
6303 
6304 	printf("Pending list: ");
6305 	i = 0;
6306 	LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6307 		if (i++ > 256)
6308 			break;
6309 		if (scb != LIST_FIRST(&ahc->pending_scbs))
6310 			printf(", ");
6311 		printf("%d", scb->hscb->tag);
6312 		if ((ahc->flags & AHC_PAGESCBS) == 0) {
6313 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
6314 			printf("(0x%x, 0x%x)", ahc_inb(ahc, SCB_CONTROL),
6315 			       ahc_inb(ahc, SCB_TAG));
6316 		}
6317 	}
6318 	printf("\n");
6319 
6320 	printf("Kernel Free SCB list: ");
6321 	i = 0;
6322 	SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6323 		if (i++ > 256)
6324 			break;
6325 		printf("%d ", scb->hscb->tag);
6326 	}
6327 	printf("\n");
6328 
6329 	maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6330 	for (target = 0; target <= maxtarget; target++) {
6331 		untagged_q = &ahc->untagged_queues[target];
6332 		if (TAILQ_FIRST(untagged_q) == NULL)
6333 			continue;
6334 		printf("Untagged Q(%d): ", target);
6335 		i = 0;
6336 		TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6337 			if (i++ > 256)
6338 				break;
6339 			printf("%d ", scb->hscb->tag);
6340 		}
6341 		printf("\n");
6342 	}
6343 
6344 	ahc_platform_dump_card_state(ahc);
6345 	ahc_outb(ahc, SCBPTR, saved_scbptr);
6346 }
6347 
6348 /************************* Target Mode ****************************************/
6349 #ifdef AHC_TARGET_MODE
6350 cam_status
6351 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
6352 		    struct ahc_tmode_tstate **tstate,
6353 		    struct ahc_tmode_lstate **lstate,
6354 		    int notfound_failure)
6355 {
6356 
6357 	if ((ahc->features & AHC_TARGETMODE) == 0)
6358 		return (CAM_REQ_INVALID);
6359 
6360 	/*
6361 	 * Handle the 'black hole' device that sucks up
6362 	 * requests to unattached luns on enabled targets.
6363 	 */
6364 	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
6365 	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
6366 		*tstate = NULL;
6367 		*lstate = ahc->black_hole;
6368 	} else {
6369 		u_int max_id;
6370 
6371 		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
6372 		if (ccb->ccb_h.target_id > max_id)
6373 			return (CAM_TID_INVALID);
6374 
6375 		if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
6376 			return (CAM_LUN_INVALID);
6377 
6378 		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
6379 		*lstate = NULL;
6380 		if (*tstate != NULL)
6381 			*lstate =
6382 			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
6383 	}
6384 
6385 	if (notfound_failure != 0 && *lstate == NULL)
6386 		return (CAM_PATH_INVALID);
6387 
6388 	return (CAM_REQ_CMP);
6389 }
6390 
6391 void
6392 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6393 {
6394 	struct	   ahc_tmode_tstate *tstate;
6395 	struct	   ahc_tmode_lstate *lstate;
6396 	struct	   ccb_en_lun *cel;
6397 	cam_status status;
6398 	u_int	   target;
6399 	u_int	   lun;
6400 	u_int	   target_mask;
6401 	u_long	   s;
6402 	char	   channel;
6403 
6404 	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
6405 				     /*notfound_failure*/FALSE);
6406 
6407 	if (status != CAM_REQ_CMP) {
6408 		ccb->ccb_h.status = status;
6409 		return;
6410 	}
6411 
6412 	if ((ahc->features & AHC_MULTIROLE) != 0) {
6413 		u_int	   our_id;
6414 
6415 		if (cam_sim_bus(sim) == 0)
6416 			our_id = ahc->our_id;
6417 		else
6418 			our_id = ahc->our_id_b;
6419 
6420 		if (ccb->ccb_h.target_id != our_id) {
6421 			if ((ahc->features & AHC_MULTI_TID) != 0
6422 		   	 && (ahc->flags & AHC_INITIATORROLE) != 0) {
6423 				/*
6424 				 * Only allow additional targets if
6425 				 * the initiator role is disabled.
6426 				 * The hardware cannot handle a re-select-in
6427 				 * on the initiator id during a re-select-out
6428 				 * on a different target id.
6429 				 */
6430 				status = CAM_TID_INVALID;
6431 			} else if ((ahc->flags & AHC_INITIATORROLE) != 0
6432 				|| ahc->enabled_luns > 0) {
6433 				/*
6434 				 * Only allow our target id to change
6435 				 * if the initiator role is not configured
6436 				 * and there are no enabled luns which
6437 				 * are attached to the currently registered
6438 				 * scsi id.
6439 				 */
6440 				status = CAM_TID_INVALID;
6441 			}
6442 		}
6443 	}
6444 
6445 	if (status != CAM_REQ_CMP) {
6446 		ccb->ccb_h.status = status;
6447 		return;
6448 	}
6449 
6450 	/*
6451 	 * We now have an id that is valid.
6452 	 * If we aren't in target mode, switch modes.
6453 	 */
6454 	if ((ahc->flags & AHC_TARGETROLE) == 0
6455 	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
6456 		u_long	s;
6457 
6458 		printf("Configuring Target Mode\n");
6459 		ahc_lock(ahc, &s);
6460 		if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
6461 			ccb->ccb_h.status = CAM_BUSY;
6462 			ahc_unlock(ahc, &s);
6463 			return;
6464 		}
6465 		ahc->flags |= AHC_TARGETROLE;
6466 		if ((ahc->features & AHC_MULTIROLE) == 0)
6467 			ahc->flags &= ~AHC_INITIATORROLE;
6468 		ahc_pause(ahc);
6469 		ahc_loadseq(ahc);
6470 		ahc_unlock(ahc, &s);
6471 	}
6472 	cel = &ccb->cel;
6473 	target = ccb->ccb_h.target_id;
6474 	lun = ccb->ccb_h.target_lun;
6475 	channel = SIM_CHANNEL(ahc, sim);
6476 	target_mask = 0x01 << target;
6477 	if (channel == 'B')
6478 		target_mask <<= 8;
6479 
6480 	if (cel->enable != 0) {
6481 		u_int scsiseq;
6482 
6483 		/* Are we already enabled?? */
6484 		if (lstate != NULL) {
6485 			xpt_print_path(ccb->ccb_h.path);
6486 			printf("Lun already enabled\n");
6487 			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
6488 			return;
6489 		}
6490 
6491 		if (cel->grp6_len != 0
6492 		 || cel->grp7_len != 0) {
6493 			/*
6494 			 * Don't (yet?) support vendor
6495 			 * specific commands.
6496 			 */
6497 			ccb->ccb_h.status = CAM_REQ_INVALID;
6498 			printf("Non-zero Group Codes\n");
6499 			return;
6500 		}
6501 
6502 		/*
6503 		 * Seems to be okay.
6504 		 * Setup our data structures.
6505 		 */
6506 		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
6507 			tstate = ahc_alloc_tstate(ahc, target, channel);
6508 			if (tstate == NULL) {
6509 				xpt_print_path(ccb->ccb_h.path);
6510 				printf("Couldn't allocate tstate\n");
6511 				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6512 				return;
6513 			}
6514 		}
6515 		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
6516 		if (lstate == NULL) {
6517 			xpt_print_path(ccb->ccb_h.path);
6518 			printf("Couldn't allocate lstate\n");
6519 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6520 			return;
6521 		}
6522 		memset(lstate, 0, sizeof(*lstate));
6523 		status = xpt_create_path(&lstate->path, /*periph*/NULL,
6524 					 xpt_path_path_id(ccb->ccb_h.path),
6525 					 xpt_path_target_id(ccb->ccb_h.path),
6526 					 xpt_path_lun_id(ccb->ccb_h.path));
6527 		if (status != CAM_REQ_CMP) {
6528 			free(lstate, M_DEVBUF);
6529 			xpt_print_path(ccb->ccb_h.path);
6530 			printf("Couldn't allocate path\n");
6531 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6532 			return;
6533 		}
6534 		SLIST_INIT(&lstate->accept_tios);
6535 		SLIST_INIT(&lstate->immed_notifies);
6536 		ahc_lock(ahc, &s);
6537 		ahc_pause(ahc);
6538 		if (target != CAM_TARGET_WILDCARD) {
6539 			tstate->enabled_luns[lun] = lstate;
6540 			ahc->enabled_luns++;
6541 
6542 			if ((ahc->features & AHC_MULTI_TID) != 0) {
6543 				u_int targid_mask;
6544 
6545 				targid_mask = ahc_inb(ahc, TARGID)
6546 					    | (ahc_inb(ahc, TARGID + 1) << 8);
6547 
6548 				targid_mask |= target_mask;
6549 				ahc_outb(ahc, TARGID, targid_mask);
6550 				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
6551 
6552 				ahc_update_scsiid(ahc, targid_mask);
6553 			} else {
6554 				u_int our_id;
6555 				char  channel;
6556 
6557 				channel = SIM_CHANNEL(ahc, sim);
6558 				our_id = SIM_SCSI_ID(ahc, sim);
6559 
6560 				/*
6561 				 * This can only happen if selections
6562 				 * are not enabled
6563 				 */
6564 				if (target != our_id) {
6565 					u_int sblkctl;
6566 					char  cur_channel;
6567 					int   swap;
6568 
6569 					sblkctl = ahc_inb(ahc, SBLKCTL);
6570 					cur_channel = (sblkctl & SELBUSB)
6571 						    ? 'B' : 'A';
6572 					if ((ahc->features & AHC_TWIN) == 0)
6573 						cur_channel = 'A';
6574 					swap = cur_channel != channel;
6575 					if (channel == 'A')
6576 						ahc->our_id = target;
6577 					else
6578 						ahc->our_id_b = target;
6579 
6580 					if (swap)
6581 						ahc_outb(ahc, SBLKCTL,
6582 							 sblkctl ^ SELBUSB);
6583 
6584 					ahc_outb(ahc, SCSIID, target);
6585 
6586 					if (swap)
6587 						ahc_outb(ahc, SBLKCTL, sblkctl);
6588 				}
6589 			}
6590 		} else
6591 			ahc->black_hole = lstate;
6592 		/* Allow select-in operations */
6593 		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
6594 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6595 			scsiseq |= ENSELI;
6596 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6597 			scsiseq = ahc_inb(ahc, SCSISEQ);
6598 			scsiseq |= ENSELI;
6599 			ahc_outb(ahc, SCSISEQ, scsiseq);
6600 		}
6601 		ahc_unpause(ahc);
6602 		ahc_unlock(ahc, &s);
6603 		ccb->ccb_h.status = CAM_REQ_CMP;
6604 		xpt_print_path(ccb->ccb_h.path);
6605 		printf("Lun now enabled for target mode\n");
6606 	} else {
6607 		struct scb *scb;
6608 		int i, empty;
6609 
6610 		if (lstate == NULL) {
6611 			ccb->ccb_h.status = CAM_LUN_INVALID;
6612 			return;
6613 		}
6614 
6615 		ahc_lock(ahc, &s);
6616 
6617 		ccb->ccb_h.status = CAM_REQ_CMP;
6618 		LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6619 			struct ccb_hdr *ccbh;
6620 
6621 			ccbh = &scb->io_ctx->ccb_h;
6622 			if (ccbh->func_code == XPT_CONT_TARGET_IO
6623 			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
6624 				printf("CTIO pending\n");
6625 				ccb->ccb_h.status = CAM_REQ_INVALID;
6626 				ahc_unlock(ahc, &s);
6627 				return;
6628 			}
6629 		}
6630 
6631 		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
6632 			printf("ATIOs pending\n");
6633 			ccb->ccb_h.status = CAM_REQ_INVALID;
6634 		}
6635 
6636 		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
6637 			printf("INOTs pending\n");
6638 			ccb->ccb_h.status = CAM_REQ_INVALID;
6639 		}
6640 
6641 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
6642 			ahc_unlock(ahc, &s);
6643 			return;
6644 		}
6645 
6646 		xpt_print_path(ccb->ccb_h.path);
6647 		printf("Target mode disabled\n");
6648 		xpt_free_path(lstate->path);
6649 		free(lstate, M_DEVBUF);
6650 
6651 		ahc_pause(ahc);
6652 		/* Can we clean up the target too? */
6653 		if (target != CAM_TARGET_WILDCARD) {
6654 			tstate->enabled_luns[lun] = NULL;
6655 			ahc->enabled_luns--;
6656 			for (empty = 1, i = 0; i < 8; i++)
6657 				if (tstate->enabled_luns[i] != NULL) {
6658 					empty = 0;
6659 					break;
6660 				}
6661 
6662 			if (empty) {
6663 				ahc_free_tstate(ahc, target, channel,
6664 						/*force*/FALSE);
6665 				if (ahc->features & AHC_MULTI_TID) {
6666 					u_int targid_mask;
6667 
6668 					targid_mask = ahc_inb(ahc, TARGID)
6669 						    | (ahc_inb(ahc, TARGID + 1)
6670 						       << 8);
6671 
6672 					targid_mask &= ~target_mask;
6673 					ahc_outb(ahc, TARGID, targid_mask);
6674 					ahc_outb(ahc, TARGID+1,
6675 					 	 (targid_mask >> 8));
6676 					ahc_update_scsiid(ahc, targid_mask);
6677 				}
6678 			}
6679 		} else {
6680 
6681 			ahc->black_hole = NULL;
6682 
6683 			/*
6684 			 * We can't allow selections without
6685 			 * our black hole device.
6686 			 */
6687 			empty = TRUE;
6688 		}
6689 		if (ahc->enabled_luns == 0) {
6690 			/* Disallow select-in */
6691 			u_int scsiseq;
6692 
6693 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6694 			scsiseq &= ~ENSELI;
6695 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6696 			scsiseq = ahc_inb(ahc, SCSISEQ);
6697 			scsiseq &= ~ENSELI;
6698 			ahc_outb(ahc, SCSISEQ, scsiseq);
6699 
6700 			if ((ahc->features & AHC_MULTIROLE) == 0) {
6701 				printf("Configuring Initiator Mode\n");
6702 				ahc->flags &= ~AHC_TARGETROLE;
6703 				ahc->flags |= AHC_INITIATORROLE;
6704 				ahc_pause(ahc);
6705 				ahc_loadseq(ahc);
6706 			}
6707 		}
6708 		ahc_unpause(ahc);
6709 		ahc_unlock(ahc, &s);
6710 	}
6711 }
6712 
6713 static void
6714 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
6715 {
6716 	u_int scsiid_mask;
6717 	u_int scsiid;
6718 
6719 	if ((ahc->features & AHC_MULTI_TID) == 0)
6720 		panic("ahc_update_scsiid called on non-multitid unit\n");
6721 
6722 	/*
6723 	 * Since we will rely on the the TARGID mask
6724 	 * for selection enables, ensure that OID
6725 	 * in SCSIID is not set to some other ID
6726 	 * that we don't want to allow selections on.
6727 	 */
6728 	if ((ahc->features & AHC_ULTRA2) != 0)
6729 		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
6730 	else
6731 		scsiid = ahc_inb(ahc, SCSIID);
6732 	scsiid_mask = 0x1 << (scsiid & OID);
6733 	if ((targid_mask & scsiid_mask) == 0) {
6734 		u_int our_id;
6735 
6736 		/* ffs counts from 1 */
6737 		our_id = ffs(targid_mask);
6738 		if (our_id == 0)
6739 			our_id = ahc->our_id;
6740 		else
6741 			our_id--;
6742 		scsiid &= TID;
6743 		scsiid |= our_id;
6744 	}
6745 	if ((ahc->features & AHC_ULTRA2) != 0)
6746 		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
6747 	else
6748 		ahc_outb(ahc, SCSIID, scsiid);
6749 }
6750 
6751 void
6752 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
6753 {
6754 	struct target_cmd *cmd;
6755 
6756 	/*
6757 	 * If the card supports auto-access pause,
6758 	 * we can access the card directly regardless
6759 	 * of whether it is paused or not.
6760 	 */
6761 	if ((ahc->features & AHC_AUTOPAUSE) != 0)
6762 		paused = TRUE;
6763 
6764 	ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
6765 	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
6766 
6767 		/*
6768 		 * Only advance through the queue if we
6769 		 * have the resources to process the command.
6770 		 */
6771 		if (ahc_handle_target_cmd(ahc, cmd) != 0)
6772 			break;
6773 
6774 		cmd->cmd_valid = 0;
6775 		ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
6776 				ahc->shared_data_dmamap,
6777 				ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
6778 				sizeof(struct target_cmd),
6779 				BUS_DMASYNC_PREREAD);
6780 		ahc->tqinfifonext++;
6781 
6782 		/*
6783 		 * Lazily update our position in the target mode incoming
6784 		 * command queue as seen by the sequencer.
6785 		 */
6786 		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
6787 			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
6788 				u_int hs_mailbox;
6789 
6790 				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
6791 				hs_mailbox &= ~HOST_TQINPOS;
6792 				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
6793 				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
6794 			} else {
6795 				if (!paused)
6796 					ahc_pause(ahc);
6797 				ahc_outb(ahc, KERNEL_TQINPOS,
6798 					 ahc->tqinfifonext & HOST_TQINPOS);
6799 				if (!paused)
6800 					ahc_unpause(ahc);
6801 			}
6802 		}
6803 	}
6804 }
6805 
6806 static int
6807 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
6808 {
6809 	struct	  ahc_tmode_tstate *tstate;
6810 	struct	  ahc_tmode_lstate *lstate;
6811 	struct	  ccb_accept_tio *atio;
6812 	uint8_t *byte;
6813 	int	  initiator;
6814 	int	  target;
6815 	int	  lun;
6816 
6817 	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
6818 	target = SCSIID_OUR_ID(cmd->scsiid);
6819 	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
6820 
6821 	byte = cmd->bytes;
6822 	tstate = ahc->enabled_targets[target];
6823 	lstate = NULL;
6824 	if (tstate != NULL)
6825 		lstate = tstate->enabled_luns[lun];
6826 
6827 	/*
6828 	 * Commands for disabled luns go to the black hole driver.
6829 	 */
6830 	if (lstate == NULL)
6831 		lstate = ahc->black_hole;
6832 
6833 	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
6834 	if (atio == NULL) {
6835 		ahc->flags |= AHC_TQINFIFO_BLOCKED;
6836 		/*
6837 		 * Wait for more ATIOs from the peripheral driver for this lun.
6838 		 */
6839 		return (1);
6840 	} else
6841 		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
6842 #if 0
6843 	printf("Incoming command from %d for %d:%d%s\n",
6844 	       initiator, target, lun,
6845 	       lstate == ahc->black_hole ? "(Black Holed)" : "");
6846 #endif
6847 	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
6848 
6849 	if (lstate == ahc->black_hole) {
6850 		/* Fill in the wildcards */
6851 		atio->ccb_h.target_id = target;
6852 		atio->ccb_h.target_lun = lun;
6853 	}
6854 
6855 	/*
6856 	 * Package it up and send it off to
6857 	 * whomever has this lun enabled.
6858 	 */
6859 	atio->sense_len = 0;
6860 	atio->init_id = initiator;
6861 	if (byte[0] != 0xFF) {
6862 		/* Tag was included */
6863 		atio->tag_action = *byte++;
6864 		atio->tag_id = *byte++;
6865 		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
6866 	} else {
6867 		atio->ccb_h.flags = 0;
6868 	}
6869 	byte++;
6870 
6871 	/* Okay.  Now determine the cdb size based on the command code */
6872 	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
6873 	case 0:
6874 		atio->cdb_len = 6;
6875 		break;
6876 	case 1:
6877 	case 2:
6878 		atio->cdb_len = 10;
6879 		break;
6880 	case 4:
6881 		atio->cdb_len = 16;
6882 		break;
6883 	case 5:
6884 		atio->cdb_len = 12;
6885 		break;
6886 	case 3:
6887 	default:
6888 		/* Only copy the opcode. */
6889 		atio->cdb_len = 1;
6890 		printf("Reserved or VU command code type encountered\n");
6891 		break;
6892 	}
6893 
6894 	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
6895 
6896 	atio->ccb_h.status |= CAM_CDB_RECVD;
6897 
6898 	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
6899 		/*
6900 		 * We weren't allowed to disconnect.
6901 		 * We're hanging on the bus until a
6902 		 * continue target I/O comes in response
6903 		 * to this accept tio.
6904 		 */
6905 #if 0
6906 		printf("Received Immediate Command %d:%d:%d - %p\n",
6907 		       initiator, target, lun, ahc->pending_device);
6908 #endif
6909 		ahc->pending_device = lstate;
6910 		ahc_freeze_ccb((union ccb *)atio);
6911 		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
6912 	}
6913 	xpt_done((union ccb*)atio);
6914 	return (0);
6915 }
6916 
6917 #endif
6918