xref: /freebsd/sys/dev/aic7xxx/aic7xxx.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Core routines and tables shareable across OS platforms.
3  *
4  * Copyright (c) 1994-2002 Justin T. Gibbs.
5  * Copyright (c) 2000-2002 Adaptec Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15  *    substantially similar to the "NO WARRANTY" disclaimer below
16  *    ("Disclaimer") and any redistribution must be conditioned upon
17  *    including a substantially similar Disclaimer requirement for further
18  *    binary redistribution.
19  * 3. Neither the names of the above-listed copyright holders nor the names
20  *    of any contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * Alternatively, this software may be distributed under the terms of the
24  * GNU General Public License ("GPL") version 2 as published by the Free
25  * Software Foundation.
26  *
27  * NO WARRANTY
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGES.
39  *
40  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
41  */
42 
43 #ifdef __linux__
44 #include "aic7xxx_osm.h"
45 #include "aic7xxx_inline.h"
46 #include "aicasm/aicasm_insformat.h"
47 #else
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 #include <dev/aic7xxx/aic7xxx_osm.h>
51 #include <dev/aic7xxx/aic7xxx_inline.h>
52 #include <dev/aic7xxx/aicasm/aicasm_insformat.h>
53 #endif
54 
55 /****************************** Softc Data ************************************/
56 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
57 
58 /***************************** Lookup Tables **********************************/
59 char *ahc_chip_names[] =
60 {
61 	"NONE",
62 	"aic7770",
63 	"aic7850",
64 	"aic7855",
65 	"aic7859",
66 	"aic7860",
67 	"aic7870",
68 	"aic7880",
69 	"aic7895",
70 	"aic7895C",
71 	"aic7890/91",
72 	"aic7896/97",
73 	"aic7892",
74 	"aic7899"
75 };
76 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
77 
78 /*
79  * Hardware error codes.
80  */
81 struct ahc_hard_error_entry {
82         uint8_t errno;
83 	char *errmesg;
84 };
85 
86 static struct ahc_hard_error_entry ahc_hard_errors[] = {
87 	{ ILLHADDR,	"Illegal Host Access" },
88 	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
89 	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
90 	{ SQPARERR,	"Sequencer Parity Error" },
91 	{ DPARERR,	"Data-path Parity Error" },
92 	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
93 	{ PCIERRSTAT,	"PCI Error detected" },
94 	{ CIOPARERR,	"CIOBUS Parity Error" },
95 };
96 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
97 
98 static struct ahc_phase_table_entry ahc_phase_table[] =
99 {
100 	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
101 	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
102 	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
103 	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
104 	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
105 	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
106 	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
107 	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
108 	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
109 	{ 0,		MSG_NOOP,		"in unknown phase"	}
110 };
111 
112 /*
113  * In most cases we only wish to itterate over real phases, so
114  * exclude the last element from the count.
115  */
116 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
117 
118 /*
119  * Valid SCSIRATE values.  (p. 3-17)
120  * Provides a mapping of tranfer periods in ns to the proper value to
121  * stick in the scsixfer reg.
122  */
123 static struct ahc_syncrate ahc_syncrates[] =
124 {
125       /* ultra2    fast/ultra  period     rate */
126 	{ 0x42,      0x000,      9,      "80.0" },
127 	{ 0x03,      0x000,     10,      "40.0" },
128 	{ 0x04,      0x000,     11,      "33.0" },
129 	{ 0x05,      0x100,     12,      "20.0" },
130 	{ 0x06,      0x110,     15,      "16.0" },
131 	{ 0x07,      0x120,     18,      "13.4" },
132 	{ 0x08,      0x000,     25,      "10.0" },
133 	{ 0x19,      0x010,     31,      "8.0"  },
134 	{ 0x1a,      0x020,     37,      "6.67" },
135 	{ 0x1b,      0x030,     43,      "5.7"  },
136 	{ 0x1c,      0x040,     50,      "5.0"  },
137 	{ 0x00,      0x050,     56,      "4.4"  },
138 	{ 0x00,      0x060,     62,      "4.0"  },
139 	{ 0x00,      0x070,     68,      "3.6"  },
140 	{ 0x00,      0x000,      0,      NULL   }
141 };
142 
143 /* Our Sequencer Program */
144 #include "aic7xxx_seq.h"
145 
146 /**************************** Function Declarations ***************************/
147 static void		ahc_force_renegotiation(struct ahc_softc *ahc,
148 						struct ahc_devinfo *devinfo);
149 static struct ahc_tmode_tstate*
150 			ahc_alloc_tstate(struct ahc_softc *ahc,
151 					 u_int scsi_id, char channel);
152 #ifdef AHC_TARGET_MODE
153 static void		ahc_free_tstate(struct ahc_softc *ahc,
154 					u_int scsi_id, char channel, int force);
155 #endif
156 static struct ahc_syncrate*
157 			ahc_devlimited_syncrate(struct ahc_softc *ahc,
158 					        struct ahc_initiator_tinfo *,
159 						u_int *period,
160 						u_int *ppr_options,
161 						role_t role);
162 static void		ahc_update_pending_scbs(struct ahc_softc *ahc);
163 static void		ahc_fetch_devinfo(struct ahc_softc *ahc,
164 					  struct ahc_devinfo *devinfo);
165 static void		ahc_scb_devinfo(struct ahc_softc *ahc,
166 					struct ahc_devinfo *devinfo,
167 					struct scb *scb);
168 static void		ahc_assert_atn(struct ahc_softc *ahc);
169 static void		ahc_setup_initiator_msgout(struct ahc_softc *ahc,
170 						   struct ahc_devinfo *devinfo,
171 						   struct scb *scb);
172 static void		ahc_build_transfer_msg(struct ahc_softc *ahc,
173 					       struct ahc_devinfo *devinfo);
174 static void		ahc_construct_sdtr(struct ahc_softc *ahc,
175 					   struct ahc_devinfo *devinfo,
176 					   u_int period, u_int offset);
177 static void		ahc_construct_wdtr(struct ahc_softc *ahc,
178 					   struct ahc_devinfo *devinfo,
179 					   u_int bus_width);
180 static void		ahc_construct_ppr(struct ahc_softc *ahc,
181 					  struct ahc_devinfo *devinfo,
182 					  u_int period, u_int offset,
183 					  u_int bus_width, u_int ppr_options);
184 static void		ahc_clear_msg_state(struct ahc_softc *ahc);
185 static void		ahc_handle_proto_violation(struct ahc_softc *ahc);
186 static void		ahc_handle_message_phase(struct ahc_softc *ahc);
187 typedef enum {
188 	AHCMSG_1B,
189 	AHCMSG_2B,
190 	AHCMSG_EXT
191 } ahc_msgtype;
192 static int		ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
193 				     u_int msgval, int full);
194 static int		ahc_parse_msg(struct ahc_softc *ahc,
195 				      struct ahc_devinfo *devinfo);
196 static int		ahc_handle_msg_reject(struct ahc_softc *ahc,
197 					      struct ahc_devinfo *devinfo);
198 static void		ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
199 						struct ahc_devinfo *devinfo);
200 static void		ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
201 static void		ahc_handle_devreset(struct ahc_softc *ahc,
202 					    struct ahc_devinfo *devinfo,
203 					    cam_status status, char *message,
204 					    int verbose_level);
205 #ifdef AHC_TARGET_MODE
206 static void		ahc_setup_target_msgin(struct ahc_softc *ahc,
207 					       struct ahc_devinfo *devinfo,
208 					       struct scb *scb);
209 #endif
210 
211 static bus_dmamap_callback_t	ahc_dmamap_cb;
212 static void			ahc_build_free_scb_list(struct ahc_softc *ahc);
213 static int			ahc_init_scbdata(struct ahc_softc *ahc);
214 static void			ahc_fini_scbdata(struct ahc_softc *ahc);
215 static void		ahc_qinfifo_requeue(struct ahc_softc *ahc,
216 					    struct scb *prev_scb,
217 					    struct scb *scb);
218 static int		ahc_qinfifo_count(struct ahc_softc *ahc);
219 static u_int		ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
220 						   u_int prev, u_int scbptr);
221 static void		ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
222 static u_int		ahc_rem_wscb(struct ahc_softc *ahc,
223 				     u_int scbpos, u_int prev);
224 static void		ahc_reset_current_bus(struct ahc_softc *ahc);
225 #ifdef AHC_DUMP_SEQ
226 static void		ahc_dumpseq(struct ahc_softc *ahc);
227 #endif
228 static int		ahc_loadseq(struct ahc_softc *ahc);
229 static int		ahc_check_patch(struct ahc_softc *ahc,
230 					struct patch **start_patch,
231 					u_int start_instr, u_int *skip_addr);
232 static void		ahc_download_instr(struct ahc_softc *ahc,
233 					   u_int instrptr, uint8_t *dconsts);
234 static int		ahc_other_scb_timeout(struct ahc_softc *ahc,
235 					      struct scb *scb,
236 					      struct scb *other_scb);
237 #ifdef AHC_TARGET_MODE
238 static void		ahc_queue_lstate_event(struct ahc_softc *ahc,
239 					       struct ahc_tmode_lstate *lstate,
240 					       u_int initiator_id,
241 					       u_int event_type,
242 					       u_int event_arg);
243 static void		ahc_update_scsiid(struct ahc_softc *ahc,
244 					  u_int targid_mask);
245 static int		ahc_handle_target_cmd(struct ahc_softc *ahc,
246 					      struct target_cmd *cmd);
247 #endif
248 /************************* Sequencer Execution Control ************************/
249 /*
250  * Restart the sequencer program from address zero
251  */
252 void
253 ahc_restart(struct ahc_softc *ahc)
254 {
255 
256 	ahc_pause(ahc);
257 
258 	/* No more pending messages. */
259 	ahc_clear_msg_state(ahc);
260 
261 	ahc_outb(ahc, SCSISIGO, 0);		/* De-assert BSY */
262 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);	/* No message to send */
263 	ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
264 	ahc_outb(ahc, LASTPHASE, P_BUSFREE);
265 	ahc_outb(ahc, SAVED_SCSIID, 0xFF);
266 	ahc_outb(ahc, SAVED_LUN, 0xFF);
267 
268 	/*
269 	 * Ensure that the sequencer's idea of TQINPOS
270 	 * matches our own.  The sequencer increments TQINPOS
271 	 * only after it sees a DMA complete and a reset could
272 	 * occur before the increment leaving the kernel to believe
273 	 * the command arrived but the sequencer to not.
274 	 */
275 	ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
276 
277 	/* Always allow reselection */
278 	ahc_outb(ahc, SCSISEQ,
279 		 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
280 	if ((ahc->features & AHC_CMD_CHAN) != 0) {
281 		/* Ensure that no DMA operations are in progress */
282 		ahc_outb(ahc, CCSCBCNT, 0);
283 		ahc_outb(ahc, CCSGCTL, 0);
284 		ahc_outb(ahc, CCSCBCTL, 0);
285 	}
286 	/*
287 	 * If we were in the process of DMA'ing SCB data into
288 	 * an SCB, replace that SCB on the free list.  This prevents
289 	 * an SCB leak.
290 	 */
291 	if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
292 		ahc_add_curscb_to_free_list(ahc);
293 		ahc_outb(ahc, SEQ_FLAGS2,
294 			 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
295 	}
296 
297 	/*
298 	 * Clear any pending sequencer interrupt.  It is no
299 	 * longer relevant since we're resetting the Program
300 	 * Counter.
301 	 */
302 	ahc_outb(ahc, CLRINT, CLRSEQINT);
303 
304 	ahc_outb(ahc, MWI_RESIDUAL, 0);
305 	ahc_outb(ahc, SEQCTL, ahc->seqctl);
306 	ahc_outb(ahc, SEQADDR0, 0);
307 	ahc_outb(ahc, SEQADDR1, 0);
308 
309 	ahc_unpause(ahc);
310 }
311 
312 /************************* Input/Output Queues ********************************/
313 void
314 ahc_run_qoutfifo(struct ahc_softc *ahc)
315 {
316 	struct scb *scb;
317 	u_int  scb_index;
318 
319 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
320 	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
321 
322 		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
323 		if ((ahc->qoutfifonext & 0x03) == 0x03) {
324 			u_int modnext;
325 
326 			/*
327 			 * Clear 32bits of QOUTFIFO at a time
328 			 * so that we don't clobber an incoming
329 			 * byte DMA to the array on architectures
330 			 * that only support 32bit load and store
331 			 * operations.
332 			 */
333 			modnext = ahc->qoutfifonext & ~0x3;
334 			*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
335 			aic_dmamap_sync(ahc, ahc->shared_data_dmat,
336 					ahc->shared_data_dmamap,
337 					/*offset*/modnext, /*len*/4,
338 					BUS_DMASYNC_PREREAD);
339 		}
340 		ahc->qoutfifonext++;
341 
342 		scb = ahc_lookup_scb(ahc, scb_index);
343 		if (scb == NULL) {
344 			printf("%s: WARNING no command for scb %d "
345 			       "(cmdcmplt)\nQOUTPOS = %d\n",
346 			       ahc_name(ahc), scb_index,
347 			       (ahc->qoutfifonext - 1) & 0xFF);
348 			continue;
349 		}
350 
351 		/*
352 		 * Save off the residual
353 		 * if there is one.
354 		 */
355 		ahc_update_residual(ahc, scb);
356 		ahc_done(ahc, scb);
357 	}
358 }
359 
360 void
361 ahc_run_untagged_queues(struct ahc_softc *ahc)
362 {
363 	int i;
364 
365 	for (i = 0; i < 16; i++)
366 		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
367 }
368 
369 void
370 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
371 {
372 	struct scb *scb;
373 
374 	if (ahc->untagged_queue_lock != 0)
375 		return;
376 
377 	if ((scb = TAILQ_FIRST(queue)) != NULL
378 	 && (scb->flags & SCB_ACTIVE) == 0) {
379 		scb->flags |= SCB_ACTIVE;
380 		/*
381 		 * Timers are disabled while recovery is in progress.
382 		 */
383 		aic_scb_timer_start(scb);
384 		ahc_queue_scb(ahc, scb);
385 	}
386 }
387 
388 /************************* Interrupt Handling *********************************/
389 void
390 ahc_handle_brkadrint(struct ahc_softc *ahc)
391 {
392 	/*
393 	 * We upset the sequencer :-(
394 	 * Lookup the error message
395 	 */
396 	int i;
397 	int error;
398 
399 	error = ahc_inb(ahc, ERROR);
400 	for (i = 0; error != 1 && i < num_errors; i++)
401 		error >>= 1;
402 	printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
403 	       ahc_name(ahc), ahc_hard_errors[i].errmesg,
404 	       ahc_inb(ahc, SEQADDR0) |
405 	       (ahc_inb(ahc, SEQADDR1) << 8));
406 
407 	ahc_dump_card_state(ahc);
408 
409 	/* Tell everyone that this HBA is no longer available */
410 	ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
411 		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
412 		       CAM_NO_HBA);
413 
414 	/* Disable all interrupt sources by resetting the controller */
415 	ahc_shutdown(ahc);
416 }
417 
418 void
419 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
420 {
421 	struct scb *scb;
422 	struct ahc_devinfo devinfo;
423 
424 	ahc_fetch_devinfo(ahc, &devinfo);
425 
426 	/*
427 	 * Clear the upper byte that holds SEQINT status
428 	 * codes and clear the SEQINT bit. We will unpause
429 	 * the sequencer, if appropriate, after servicing
430 	 * the request.
431 	 */
432 	ahc_outb(ahc, CLRINT, CLRSEQINT);
433 	switch (intstat & SEQINT_MASK) {
434 	case BAD_STATUS:
435 	{
436 		u_int  scb_index;
437 		struct hardware_scb *hscb;
438 
439 		/*
440 		 * Set the default return value to 0 (don't
441 		 * send sense).  The sense code will change
442 		 * this if needed.
443 		 */
444 		ahc_outb(ahc, RETURN_1, 0);
445 
446 		/*
447 		 * The sequencer will notify us when a command
448 		 * has an error that would be of interest to
449 		 * the kernel.  This allows us to leave the sequencer
450 		 * running in the common case of command completes
451 		 * without error.  The sequencer will already have
452 		 * dma'd the SCB back up to us, so we can reference
453 		 * the in kernel copy directly.
454 		 */
455 		scb_index = ahc_inb(ahc, SCB_TAG);
456 		scb = ahc_lookup_scb(ahc, scb_index);
457 		if (scb == NULL) {
458 			ahc_print_devinfo(ahc, &devinfo);
459 			printf("ahc_intr - referenced scb "
460 			       "not valid during seqint 0x%x scb(%d)\n",
461 			       intstat, scb_index);
462 			ahc_dump_card_state(ahc);
463 			panic("for safety");
464 			goto unpause;
465 		}
466 
467 		hscb = scb->hscb;
468 
469 		/* Don't want to clobber the original sense code */
470 		if ((scb->flags & SCB_SENSE) != 0) {
471 			/*
472 			 * Clear the SCB_SENSE Flag and have
473 			 * the sequencer do a normal command
474 			 * complete.
475 			 */
476 			scb->flags &= ~SCB_SENSE;
477 			aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
478 			break;
479 		}
480 		aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
481 		/* Freeze the queue until the client sees the error. */
482 		ahc_freeze_devq(ahc, scb);
483 		aic_freeze_scb(scb);
484 		aic_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
485 		switch (hscb->shared_data.status.scsi_status) {
486 		case SCSI_STATUS_OK:
487 			printf("%s: Interrupted for staus of 0???\n",
488 			       ahc_name(ahc));
489 			break;
490 		case SCSI_STATUS_CMD_TERMINATED:
491 		case SCSI_STATUS_CHECK_COND:
492 		{
493 			struct ahc_dma_seg *sg;
494 			struct scsi_sense *sc;
495 			struct ahc_initiator_tinfo *targ_info;
496 			struct ahc_tmode_tstate *tstate;
497 			struct ahc_transinfo *tinfo;
498 #ifdef AHC_DEBUG
499 			if (ahc_debug & AHC_SHOW_SENSE) {
500 				ahc_print_path(ahc, scb);
501 				printf("SCB %d: requests Check Status\n",
502 				       scb->hscb->tag);
503 			}
504 #endif
505 
506 			if (aic_perform_autosense(scb) == 0)
507 				break;
508 
509 			targ_info = ahc_fetch_transinfo(ahc,
510 							devinfo.channel,
511 							devinfo.our_scsiid,
512 							devinfo.target,
513 							&tstate);
514 			tinfo = &targ_info->curr;
515 			sg = scb->sg_list;
516 			sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
517 			/*
518 			 * Save off the residual if there is one.
519 			 */
520 			ahc_update_residual(ahc, scb);
521 #ifdef AHC_DEBUG
522 			if (ahc_debug & AHC_SHOW_SENSE) {
523 				ahc_print_path(ahc, scb);
524 				printf("Sending Sense\n");
525 			}
526 #endif
527 			sg->addr = ahc_get_sense_bufaddr(ahc, scb);
528 			sg->len = aic_get_sense_bufsize(ahc, scb);
529 			sg->len |= AHC_DMA_LAST_SEG;
530 
531 			/* Fixup byte order */
532 			sg->addr = aic_htole32(sg->addr);
533 			sg->len = aic_htole32(sg->len);
534 
535 			sc->opcode = REQUEST_SENSE;
536 			sc->byte2 = 0;
537 			if (tinfo->protocol_version <= SCSI_REV_2
538 			 && SCB_GET_LUN(scb) < 8)
539 				sc->byte2 = SCB_GET_LUN(scb) << 5;
540 			sc->unused[0] = 0;
541 			sc->unused[1] = 0;
542 			sc->length = sg->len;
543 			sc->control = 0;
544 
545 			/*
546 			 * We can't allow the target to disconnect.
547 			 * This will be an untagged transaction and
548 			 * having the target disconnect will make this
549 			 * transaction indestinguishable from outstanding
550 			 * tagged transactions.
551 			 */
552 			hscb->control = 0;
553 
554 			/*
555 			 * This request sense could be because the
556 			 * the device lost power or in some other
557 			 * way has lost our transfer negotiations.
558 			 * Renegotiate if appropriate.  Unit attention
559 			 * errors will be reported before any data
560 			 * phases occur.
561 			 */
562 			if (aic_get_residual(scb)
563 			 == aic_get_transfer_length(scb)) {
564 				ahc_update_neg_request(ahc, &devinfo,
565 						       tstate, targ_info,
566 						       AHC_NEG_IF_NON_ASYNC);
567 			}
568 			if (tstate->auto_negotiate & devinfo.target_mask) {
569 				hscb->control |= MK_MESSAGE;
570 				scb->flags &= ~SCB_NEGOTIATE;
571 				scb->flags |= SCB_AUTO_NEGOTIATE;
572 			}
573 			hscb->cdb_len = sizeof(*sc);
574 			hscb->dataptr = sg->addr;
575 			hscb->datacnt = sg->len;
576 			hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
577 			hscb->sgptr = aic_htole32(hscb->sgptr);
578 			scb->sg_count = 1;
579 			scb->flags |= SCB_SENSE;
580 			ahc_qinfifo_requeue_tail(ahc, scb);
581 			ahc_outb(ahc, RETURN_1, SEND_SENSE);
582 			/*
583 			 * Ensure we have enough time to actually
584 			 * retrieve the sense, but only schedule
585 			 * the timer if we are not in recovery or
586 			 * this is a recovery SCB that is allowed
587 			 * to have an active timer.
588 			 */
589 			if (ahc->scb_data->recovery_scbs == 0
590 			 || (scb->flags & SCB_RECOVERY_SCB) != 0)
591 				aic_scb_timer_reset(scb, 5 * 1000);
592 			break;
593 		}
594 		default:
595 			break;
596 		}
597 		break;
598 	}
599 	case NO_MATCH:
600 	{
601 		/* Ensure we don't leave the selection hardware on */
602 		ahc_outb(ahc, SCSISEQ,
603 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
604 
605 		printf("%s:%c:%d: no active SCB for reconnecting "
606 		       "target - issuing BUS DEVICE RESET\n",
607 		       ahc_name(ahc), devinfo.channel, devinfo.target);
608 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
609 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
610 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
611 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
612 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
613 		       "SINDEX == 0x%x\n",
614 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
615 		       ahc_index_busy_tcl(ahc,
616 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
617 				      ahc_inb(ahc, SAVED_LUN))),
618 		       ahc_inb(ahc, SINDEX));
619 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
620 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
621 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
622 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
623 		       ahc_inb(ahc, SCB_CONTROL));
624 		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
625 		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
626 		printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
627 		printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
628 		ahc_dump_card_state(ahc);
629 		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
630 		ahc->msgout_len = 1;
631 		ahc->msgout_index = 0;
632 		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
633 		ahc_outb(ahc, MSG_OUT, HOST_MSG);
634 		ahc_assert_atn(ahc);
635 		break;
636 	}
637 	case SEND_REJECT:
638 	{
639 		u_int rejbyte = ahc_inb(ahc, ACCUM);
640 		printf("%s:%c:%d: Warning - unknown message received from "
641 		       "target (0x%x).  Rejecting\n",
642 		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
643 		break;
644 	}
645 	case PROTO_VIOLATION:
646 	{
647 		ahc_handle_proto_violation(ahc);
648 		break;
649 	}
650 	case IGN_WIDE_RES:
651 		ahc_handle_ign_wide_residue(ahc, &devinfo);
652 		break;
653 	case PDATA_REINIT:
654 		ahc_reinitialize_dataptrs(ahc);
655 		break;
656 	case BAD_PHASE:
657 	{
658 		u_int lastphase;
659 
660 		lastphase = ahc_inb(ahc, LASTPHASE);
661 		printf("%s:%c:%d: unknown scsi bus phase %x, "
662 		       "lastphase = 0x%x.  Attempting to continue\n",
663 		       ahc_name(ahc), devinfo.channel, devinfo.target,
664 		       lastphase, ahc_inb(ahc, SCSISIGI));
665 		break;
666 	}
667 	case MISSED_BUSFREE:
668 	{
669 		u_int lastphase;
670 
671 		lastphase = ahc_inb(ahc, LASTPHASE);
672 		printf("%s:%c:%d: Missed busfree. "
673 		       "Lastphase = 0x%x, Curphase = 0x%x\n",
674 		       ahc_name(ahc), devinfo.channel, devinfo.target,
675 		       lastphase, ahc_inb(ahc, SCSISIGI));
676 		ahc_restart(ahc);
677 		return;
678 	}
679 	case HOST_MSG_LOOP:
680 	{
681 		/*
682 		 * The sequencer has encountered a message phase
683 		 * that requires host assistance for completion.
684 		 * While handling the message phase(s), we will be
685 		 * notified by the sequencer after each byte is
686 		 * transfered so we can track bus phase changes.
687 		 *
688 		 * If this is the first time we've seen a HOST_MSG_LOOP
689 		 * interrupt, initialize the state of the host message
690 		 * loop.
691 		 */
692 		if (ahc->msg_type == MSG_TYPE_NONE) {
693 			struct scb *scb;
694 			u_int scb_index;
695 			u_int bus_phase;
696 
697 			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
698 			if (bus_phase != P_MESGIN
699 			 && bus_phase != P_MESGOUT) {
700 				printf("ahc_intr: HOST_MSG_LOOP bad "
701 				       "phase 0x%x\n",
702 				      bus_phase);
703 				/*
704 				 * Probably transitioned to bus free before
705 				 * we got here.  Just punt the message.
706 				 */
707 				ahc_clear_intstat(ahc);
708 				ahc_restart(ahc);
709 				return;
710 			}
711 
712 			scb_index = ahc_inb(ahc, SCB_TAG);
713 			scb = ahc_lookup_scb(ahc, scb_index);
714 			if (devinfo.role == ROLE_INITIATOR) {
715 				if (scb == NULL)
716 					panic("HOST_MSG_LOOP with "
717 					      "invalid SCB %x\n", scb_index);
718 
719 				if (bus_phase == P_MESGOUT)
720 					ahc_setup_initiator_msgout(ahc,
721 								   &devinfo,
722 								   scb);
723 				else {
724 					ahc->msg_type =
725 					    MSG_TYPE_INITIATOR_MSGIN;
726 					ahc->msgin_index = 0;
727 				}
728 			}
729 #ifdef AHC_TARGET_MODE
730 			else {
731 				if (bus_phase == P_MESGOUT) {
732 					ahc->msg_type =
733 					    MSG_TYPE_TARGET_MSGOUT;
734 					ahc->msgin_index = 0;
735 				}
736 				else
737 					ahc_setup_target_msgin(ahc,
738 							       &devinfo,
739 							       scb);
740 			}
741 #endif
742 		}
743 
744 		ahc_handle_message_phase(ahc);
745 		break;
746 	}
747 	case PERR_DETECTED:
748 	{
749 		/*
750 		 * If we've cleared the parity error interrupt
751 		 * but the sequencer still believes that SCSIPERR
752 		 * is true, it must be that the parity error is
753 		 * for the currently presented byte on the bus,
754 		 * and we are not in a phase (data-in) where we will
755 		 * eventually ack this byte.  Ack the byte and
756 		 * throw it away in the hope that the target will
757 		 * take us to message out to deliver the appropriate
758 		 * error message.
759 		 */
760 		if ((intstat & SCSIINT) == 0
761 		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
762 
763 			if ((ahc->features & AHC_DT) == 0) {
764 				u_int curphase;
765 
766 				/*
767 				 * The hardware will only let you ack bytes
768 				 * if the expected phase in SCSISIGO matches
769 				 * the current phase.  Make sure this is
770 				 * currently the case.
771 				 */
772 				curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
773 				ahc_outb(ahc, LASTPHASE, curphase);
774 				ahc_outb(ahc, SCSISIGO, curphase);
775 			}
776 			if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) {
777 				int wait;
778 
779 				/*
780 				 * In a data phase.  Faster to bitbucket
781 				 * the data than to individually ack each
782 				 * byte.  This is also the only strategy
783 				 * that will work with AUTOACK enabled.
784 				 */
785 				ahc_outb(ahc, SXFRCTL1,
786 					 ahc_inb(ahc, SXFRCTL1) | BITBUCKET);
787 				wait = 5000;
788 				while (--wait != 0) {
789 					if ((ahc_inb(ahc, SCSISIGI)
790 					  & (CDI|MSGI)) != 0)
791 						break;
792 					aic_delay(100);
793 				}
794 				ahc_outb(ahc, SXFRCTL1,
795 					 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
796 				if (wait == 0) {
797 					struct	scb *scb;
798 					u_int	scb_index;
799 
800 					ahc_print_devinfo(ahc, &devinfo);
801 					printf("Unable to clear parity error.  "
802 					       "Resetting bus.\n");
803 					scb_index = ahc_inb(ahc, SCB_TAG);
804 					scb = ahc_lookup_scb(ahc, scb_index);
805 					if (scb != NULL)
806 						aic_set_transaction_status(scb,
807 						    CAM_UNCOR_PARITY);
808 					ahc_reset_channel(ahc, devinfo.channel,
809 							  /*init reset*/TRUE);
810 				}
811 			} else {
812 				ahc_inb(ahc, SCSIDATL);
813 			}
814 		}
815 		break;
816 	}
817 	case DATA_OVERRUN:
818 	{
819 		/*
820 		 * When the sequencer detects an overrun, it
821 		 * places the controller in "BITBUCKET" mode
822 		 * and allows the target to complete its transfer.
823 		 * Unfortunately, none of the counters get updated
824 		 * when the controller is in this mode, so we have
825 		 * no way of knowing how large the overrun was.
826 		 */
827 		u_int scbindex = ahc_inb(ahc, SCB_TAG);
828 		u_int lastphase = ahc_inb(ahc, LASTPHASE);
829 		u_int i;
830 
831 		scb = ahc_lookup_scb(ahc, scbindex);
832 		for (i = 0; i < num_phases; i++) {
833 			if (lastphase == ahc_phase_table[i].phase)
834 				break;
835 		}
836 		ahc_print_path(ahc, scb);
837 		printf("data overrun detected %s."
838 		       "  Tag == 0x%x.\n",
839 		       ahc_phase_table[i].phasemsg,
840   		       scb->hscb->tag);
841 		ahc_print_path(ahc, scb);
842 		printf("%s seen Data Phase.  Length = %ld.  NumSGs = %d.\n",
843 		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
844 		       aic_get_transfer_length(scb), scb->sg_count);
845 		if (scb->sg_count > 0) {
846 			for (i = 0; i < scb->sg_count; i++) {
847 
848 				printf("sg[%d] - Addr 0x%x%x : Length %d\n",
849 				       i,
850 				       (aic_le32toh(scb->sg_list[i].len) >> 24
851 				        & SG_HIGH_ADDR_BITS),
852 				       aic_le32toh(scb->sg_list[i].addr),
853 				       aic_le32toh(scb->sg_list[i].len)
854 				       & AHC_SG_LEN_MASK);
855 			}
856 		}
857 		/*
858 		 * Set this and it will take effect when the
859 		 * target does a command complete.
860 		 */
861 		ahc_freeze_devq(ahc, scb);
862 		if ((scb->flags & SCB_SENSE) == 0) {
863 			aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
864 		} else {
865 			scb->flags &= ~SCB_SENSE;
866 			aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
867 		}
868 		aic_freeze_scb(scb);
869 
870 		if ((ahc->features & AHC_ULTRA2) != 0) {
871 			/*
872 			 * Clear the channel in case we return
873 			 * to data phase later.
874 			 */
875 			ahc_outb(ahc, SXFRCTL0,
876 				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
877 			ahc_outb(ahc, SXFRCTL0,
878 				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
879 		}
880 		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
881 			u_int dscommand1;
882 
883 			/* Ensure HHADDR is 0 for future DMA operations. */
884 			dscommand1 = ahc_inb(ahc, DSCOMMAND1);
885 			ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
886 			ahc_outb(ahc, HADDR, 0);
887 			ahc_outb(ahc, DSCOMMAND1, dscommand1);
888 		}
889 		break;
890 	}
891 	case MKMSG_FAILED:
892 	{
893 		u_int scbindex;
894 
895 		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
896 		       ahc_name(ahc), devinfo.channel, devinfo.target,
897 		       devinfo.lun);
898 		scbindex = ahc_inb(ahc, SCB_TAG);
899 		scb = ahc_lookup_scb(ahc, scbindex);
900 		if (scb != NULL
901 		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
902 			/*
903 			 * Ensure that we didn't put a second instance of this
904 			 * SCB into the QINFIFO.
905 			 */
906 			ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
907 					   SCB_GET_CHANNEL(ahc, scb),
908 					   SCB_GET_LUN(scb), scb->hscb->tag,
909 					   ROLE_INITIATOR, /*status*/0,
910 					   SEARCH_REMOVE);
911 		break;
912 	}
913 	case NO_FREE_SCB:
914 	{
915 		printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
916 		ahc_dump_card_state(ahc);
917 		panic("for safety");
918 		break;
919 	}
920 	case SCB_MISMATCH:
921 	{
922 		u_int scbptr;
923 
924 		scbptr = ahc_inb(ahc, SCBPTR);
925 		printf("Bogus TAG after DMA.  SCBPTR %d, tag %d, our tag %d\n",
926 		       scbptr, ahc_inb(ahc, ARG_1),
927 		       ahc->scb_data->hscbs[scbptr].tag);
928 		ahc_dump_card_state(ahc);
929 		panic("for saftey");
930 		break;
931 	}
932 	case OUT_OF_RANGE:
933 	{
934 		printf("%s: BTT calculation out of range\n", ahc_name(ahc));
935 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
936 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
937 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
938 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
939 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
940 		       "SINDEX == 0x%x\n, A == 0x%x\n",
941 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
942 		       ahc_index_busy_tcl(ahc,
943 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
944 				      ahc_inb(ahc, SAVED_LUN))),
945 		       ahc_inb(ahc, SINDEX),
946 		       ahc_inb(ahc, ACCUM));
947 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
948 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
949 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
950 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
951 		       ahc_inb(ahc, SCB_CONTROL));
952 		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
953 		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
954 		ahc_dump_card_state(ahc);
955 		panic("for safety");
956 		break;
957 	}
958 	default:
959 		printf("ahc_intr: seqint, "
960 		       "intstat == 0x%x, scsisigi = 0x%x\n",
961 		       intstat, ahc_inb(ahc, SCSISIGI));
962 		break;
963 	}
964 unpause:
965 	/*
966 	 *  The sequencer is paused immediately on
967 	 *  a SEQINT, so we should restart it when
968 	 *  we're done.
969 	 */
970 	ahc_unpause(ahc);
971 }
972 
973 void
974 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
975 {
976 	u_int	scb_index;
977 	u_int	status0;
978 	u_int	status;
979 	struct	scb *scb;
980 	char	cur_channel;
981 	char	intr_channel;
982 
983 	if ((ahc->features & AHC_TWIN) != 0
984 	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
985 		cur_channel = 'B';
986 	else
987 		cur_channel = 'A';
988 	intr_channel = cur_channel;
989 
990 	if ((ahc->features & AHC_ULTRA2) != 0)
991 		status0 = ahc_inb(ahc, SSTAT0) & IOERR;
992 	else
993 		status0 = 0;
994 	status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
995 	if (status == 0 && status0 == 0) {
996 		if ((ahc->features & AHC_TWIN) != 0) {
997 			/* Try the other channel */
998 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
999 			status = ahc_inb(ahc, SSTAT1)
1000 			       & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
1001 			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
1002 		}
1003 		if (status == 0) {
1004 			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
1005 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1006 			ahc_unpause(ahc);
1007 			return;
1008 		}
1009 	}
1010 
1011 	/* Make sure the sequencer is in a safe location. */
1012 	ahc_clear_critical_section(ahc);
1013 
1014 	scb_index = ahc_inb(ahc, SCB_TAG);
1015 	scb = ahc_lookup_scb(ahc, scb_index);
1016 	if (scb != NULL
1017 	 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1018 		scb = NULL;
1019 
1020 	if ((ahc->features & AHC_ULTRA2) != 0
1021 	 && (status0 & IOERR) != 0) {
1022 		int now_lvd;
1023 
1024 		now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
1025 		printf("%s: Transceiver State Has Changed to %s mode\n",
1026 		       ahc_name(ahc), now_lvd ? "LVD" : "SE");
1027 		ahc_outb(ahc, CLRSINT0, CLRIOERR);
1028 		/*
1029 		 * When transitioning to SE mode, the reset line
1030 		 * glitches, triggering an arbitration bug in some
1031 		 * Ultra2 controllers.  This bug is cleared when we
1032 		 * assert the reset line.  Since a reset glitch has
1033 		 * already occurred with this transition and a
1034 		 * transceiver state change is handled just like
1035 		 * a bus reset anyway, asserting the reset line
1036 		 * ourselves is safe.
1037 		 */
1038 		ahc_reset_channel(ahc, intr_channel,
1039 				 /*Initiate Reset*/now_lvd == 0);
1040 	} else if ((status & SCSIRSTI) != 0) {
1041 		printf("%s: Someone reset channel %c\n",
1042 			ahc_name(ahc), intr_channel);
1043 		if (intr_channel != cur_channel)
1044 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
1045 		ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
1046 	} else if ((status & SCSIPERR) != 0) {
1047 		/*
1048 		 * Determine the bus phase and queue an appropriate message.
1049 		 * SCSIPERR is latched true as soon as a parity error
1050 		 * occurs.  If the sequencer acked the transfer that
1051 		 * caused the parity error and the currently presented
1052 		 * transfer on the bus has correct parity, SCSIPERR will
1053 		 * be cleared by CLRSCSIPERR.  Use this to determine if
1054 		 * we should look at the last phase the sequencer recorded,
1055 		 * or the current phase presented on the bus.
1056 		 */
1057 		struct	ahc_devinfo devinfo;
1058 		u_int	mesg_out;
1059 		u_int	curphase;
1060 		u_int	errorphase;
1061 		u_int	lastphase;
1062 		u_int	scsirate;
1063 		u_int	i;
1064 		u_int	sstat2;
1065 		int	silent;
1066 
1067 		lastphase = ahc_inb(ahc, LASTPHASE);
1068 		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
1069 		sstat2 = ahc_inb(ahc, SSTAT2);
1070 		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
1071 		/*
1072 		 * For all phases save DATA, the sequencer won't
1073 		 * automatically ack a byte that has a parity error
1074 		 * in it.  So the only way that the current phase
1075 		 * could be 'data-in' is if the parity error is for
1076 		 * an already acked byte in the data phase.  During
1077 		 * synchronous data-in transfers, we may actually
1078 		 * ack bytes before latching the current phase in
1079 		 * LASTPHASE, leading to the discrepancy between
1080 		 * curphase and lastphase.
1081 		 */
1082 		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
1083 		 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
1084 			errorphase = curphase;
1085 		else
1086 			errorphase = lastphase;
1087 
1088 		for (i = 0; i < num_phases; i++) {
1089 			if (errorphase == ahc_phase_table[i].phase)
1090 				break;
1091 		}
1092 		mesg_out = ahc_phase_table[i].mesg_out;
1093 		silent = FALSE;
1094 		if (scb != NULL) {
1095 			if (SCB_IS_SILENT(scb))
1096 				silent = TRUE;
1097 			else
1098 				ahc_print_path(ahc, scb);
1099 			scb->flags |= SCB_TRANSMISSION_ERROR;
1100 		} else
1101 			printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1102 			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1103 		scsirate = ahc_inb(ahc, SCSIRATE);
1104 		if (silent == FALSE) {
1105 			printf("parity error detected %s. "
1106 			       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1107 			       ahc_phase_table[i].phasemsg,
1108 			       ahc_inw(ahc, SEQADDR0),
1109 			       scsirate);
1110 			if ((ahc->features & AHC_DT) != 0) {
1111 				if ((sstat2 & CRCVALERR) != 0)
1112 					printf("\tCRC Value Mismatch\n");
1113 				if ((sstat2 & CRCENDERR) != 0)
1114 					printf("\tNo terminal CRC packet "
1115 					       "recevied\n");
1116 				if ((sstat2 & CRCREQERR) != 0)
1117 					printf("\tIllegal CRC packet "
1118 					       "request\n");
1119 				if ((sstat2 & DUAL_EDGE_ERR) != 0)
1120 					printf("\tUnexpected %sDT Data Phase\n",
1121 					       (scsirate & SINGLE_EDGE)
1122 					     ? "" : "non-");
1123 			}
1124 		}
1125 
1126 		if ((ahc->features & AHC_DT) != 0
1127 		 && (sstat2 & DUAL_EDGE_ERR) != 0) {
1128 			/*
1129 			 * This error applies regardless of
1130 			 * data direction, so ignore the value
1131 			 * in the phase table.
1132 			 */
1133 			mesg_out = MSG_INITIATOR_DET_ERR;
1134 		}
1135 
1136 		/*
1137 		 * We've set the hardware to assert ATN if we
1138 		 * get a parity error on "in" phases, so all we
1139 		 * need to do is stuff the message buffer with
1140 		 * the appropriate message.  "In" phases have set
1141 		 * mesg_out to something other than MSG_NOP.
1142 		 */
1143 		if (mesg_out != MSG_NOOP) {
1144 			if (ahc->msg_type != MSG_TYPE_NONE)
1145 				ahc->send_msg_perror = TRUE;
1146 			else
1147 				ahc_outb(ahc, MSG_OUT, mesg_out);
1148 		}
1149 		/*
1150 		 * Force a renegotiation with this target just in
1151 		 * case we are out of sync for some external reason
1152 		 * unknown (or unreported) by the target.
1153 		 */
1154 		ahc_fetch_devinfo(ahc, &devinfo);
1155 		ahc_force_renegotiation(ahc, &devinfo);
1156 
1157 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1158 		ahc_unpause(ahc);
1159 	} else if ((status & SELTO) != 0) {
1160 		u_int	scbptr;
1161 
1162 		/* Stop the selection */
1163 		ahc_outb(ahc, SCSISEQ, 0);
1164 
1165 		/* No more pending messages */
1166 		ahc_clear_msg_state(ahc);
1167 
1168 		/* Clear interrupt state */
1169 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1170 		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1171 
1172 		/*
1173 		 * Although the driver does not care about the
1174 		 * 'Selection in Progress' status bit, the busy
1175 		 * LED does.  SELINGO is only cleared by a sucessfull
1176 		 * selection, so we must manually clear it to insure
1177 		 * the LED turns off just incase no future successful
1178 		 * selections occur (e.g. no devices on the bus).
1179 		 */
1180 		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1181 
1182 		scbptr = ahc_inb(ahc, WAITING_SCBH);
1183 		ahc_outb(ahc, SCBPTR, scbptr);
1184 		scb_index = ahc_inb(ahc, SCB_TAG);
1185 
1186 		scb = ahc_lookup_scb(ahc, scb_index);
1187 		if (scb == NULL) {
1188 			printf("%s: ahc_intr - referenced scb not "
1189 			       "valid during SELTO scb(%d, %d)\n",
1190 			       ahc_name(ahc), scbptr, scb_index);
1191 			ahc_dump_card_state(ahc);
1192 		} else {
1193 			struct ahc_devinfo devinfo;
1194 #ifdef AHC_DEBUG
1195 			if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
1196 				ahc_print_path(ahc, scb);
1197 				printf("Saw Selection Timeout for SCB 0x%x\n",
1198 				       scb_index);
1199 			}
1200 #endif
1201 			ahc_scb_devinfo(ahc, &devinfo, scb);
1202 			aic_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1203 			ahc_freeze_devq(ahc, scb);
1204 
1205 			/*
1206 			 * Cancel any pending transactions on the device
1207 			 * now that it seems to be missing.  This will
1208 			 * also revert us to async/narrow transfers until
1209 			 * we can renegotiate with the device.
1210 			 */
1211 			ahc_handle_devreset(ahc, &devinfo,
1212 					    CAM_SEL_TIMEOUT,
1213 					    "Selection Timeout",
1214 					    /*verbose_level*/1);
1215 		}
1216 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1217 		ahc_restart(ahc);
1218 	} else if ((status & BUSFREE) != 0
1219 		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1220 		struct	ahc_devinfo devinfo;
1221 		u_int	lastphase;
1222 		u_int	saved_scsiid;
1223 		u_int	saved_lun;
1224 		u_int	target;
1225 		u_int	initiator_role_id;
1226 		char	channel;
1227 		int	printerror;
1228 
1229 		/*
1230 		 * Clear our selection hardware as soon as possible.
1231 		 * We may have an entry in the waiting Q for this target,
1232 		 * that is affected by this busfree and we don't want to
1233 		 * go about selecting the target while we handle the event.
1234 		 */
1235 		ahc_outb(ahc, SCSISEQ,
1236 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1237 
1238 		/*
1239 		 * Disable busfree interrupts and clear the busfree
1240 		 * interrupt status.  We do this here so that several
1241 		 * bus transactions occur prior to clearing the SCSIINT
1242 		 * latch.  It can take a bit for the clearing to take effect.
1243 		 */
1244 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1245 		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1246 
1247 		/*
1248 		 * Look at what phase we were last in.
1249 		 * If its message out, chances are pretty good
1250 		 * that the busfree was in response to one of
1251 		 * our abort requests.
1252 		 */
1253 		lastphase = ahc_inb(ahc, LASTPHASE);
1254 		saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1255 		saved_lun = ahc_inb(ahc, SAVED_LUN);
1256 		target = SCSIID_TARGET(ahc, saved_scsiid);
1257 		initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1258 		channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1259 		ahc_compile_devinfo(&devinfo, initiator_role_id,
1260 				    target, saved_lun, channel, ROLE_INITIATOR);
1261 		printerror = 1;
1262 
1263 		if (lastphase == P_MESGOUT) {
1264 			u_int tag;
1265 
1266 			tag = SCB_LIST_NULL;
1267 			if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1268 			 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1269 				if (ahc->msgout_buf[ahc->msgout_index - 1]
1270 				 == MSG_ABORT_TAG)
1271 					tag = scb->hscb->tag;
1272 				ahc_print_path(ahc, scb);
1273 				printf("SCB %d - Abort%s Completed.\n",
1274 				       scb->hscb->tag, tag == SCB_LIST_NULL ?
1275 				       "" : " Tag");
1276 				ahc_abort_scbs(ahc, target, channel,
1277 					       saved_lun, tag,
1278 					       ROLE_INITIATOR,
1279 					       CAM_REQ_ABORTED);
1280 				printerror = 0;
1281 			} else if (ahc_sent_msg(ahc, AHCMSG_1B,
1282 						MSG_BUS_DEV_RESET, TRUE)) {
1283 #ifdef __FreeBSD__
1284 				/*
1285 				 * Don't mark the user's request for this BDR
1286 				 * as completing with CAM_BDR_SENT.  CAM3
1287 				 * specifies CAM_REQ_CMP.
1288 				 */
1289 				if (scb != NULL
1290 				 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1291 				 && ahc_match_scb(ahc, scb, target, channel,
1292 						  CAM_LUN_WILDCARD,
1293 						  SCB_LIST_NULL,
1294 						  ROLE_INITIATOR)) {
1295 					aic_set_transaction_status(scb, CAM_REQ_CMP);
1296 				}
1297 #endif
1298 				ahc_compile_devinfo(&devinfo,
1299 						    initiator_role_id,
1300 						    target,
1301 						    CAM_LUN_WILDCARD,
1302 						    channel,
1303 						    ROLE_INITIATOR);
1304 				ahc_handle_devreset(ahc, &devinfo,
1305 						    CAM_BDR_SENT,
1306 						    "Bus Device Reset",
1307 						    /*verbose_level*/0);
1308 				printerror = 0;
1309 			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1310 						MSG_EXT_PPR, FALSE)) {
1311 				struct ahc_initiator_tinfo *tinfo;
1312 				struct ahc_tmode_tstate *tstate;
1313 
1314 				/*
1315 				 * PPR Rejected.  Try non-ppr negotiation
1316 				 * and retry command.
1317 				 */
1318 				tinfo = ahc_fetch_transinfo(ahc,
1319 							    devinfo.channel,
1320 							    devinfo.our_scsiid,
1321 							    devinfo.target,
1322 							    &tstate);
1323 				tinfo->curr.transport_version = 2;
1324 				tinfo->goal.transport_version = 2;
1325 				tinfo->goal.ppr_options = 0;
1326 				ahc_qinfifo_requeue_tail(ahc, scb);
1327 				printerror = 0;
1328 			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1329 						MSG_EXT_WDTR, FALSE)) {
1330 				/*
1331 				 * Negotiation Rejected.  Go-narrow and
1332 				 * retry command.
1333 				 */
1334 				ahc_set_width(ahc, &devinfo,
1335 					      MSG_EXT_WDTR_BUS_8_BIT,
1336 					      AHC_TRANS_CUR|AHC_TRANS_GOAL,
1337 					      /*paused*/TRUE);
1338 				ahc_qinfifo_requeue_tail(ahc, scb);
1339 				printerror = 0;
1340 			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1341 						MSG_EXT_SDTR, FALSE)) {
1342 				/*
1343 				 * Negotiation Rejected.  Go-async and
1344 				 * retry command.
1345 				 */
1346 				ahc_set_syncrate(ahc, &devinfo,
1347 						/*syncrate*/NULL,
1348 						/*period*/0, /*offset*/0,
1349 						/*ppr_options*/0,
1350 						AHC_TRANS_CUR|AHC_TRANS_GOAL,
1351 						/*paused*/TRUE);
1352 				ahc_qinfifo_requeue_tail(ahc, scb);
1353 				printerror = 0;
1354 			}
1355 		}
1356 		if (printerror != 0) {
1357 			u_int i;
1358 
1359 			if (scb != NULL) {
1360 				u_int tag;
1361 
1362 				if ((scb->hscb->control & TAG_ENB) != 0)
1363 					tag = scb->hscb->tag;
1364 				else
1365 					tag = SCB_LIST_NULL;
1366 				ahc_print_path(ahc, scb);
1367 				ahc_abort_scbs(ahc, target, channel,
1368 					       SCB_GET_LUN(scb), tag,
1369 					       ROLE_INITIATOR,
1370 					       CAM_UNEXP_BUSFREE);
1371 			} else {
1372 				/*
1373 				 * We had not fully identified this connection,
1374 				 * so we cannot abort anything.
1375 				 */
1376 				printf("%s: ", ahc_name(ahc));
1377 			}
1378 			for (i = 0; i < num_phases; i++) {
1379 				if (lastphase == ahc_phase_table[i].phase)
1380 					break;
1381 			}
1382 			if (lastphase != P_BUSFREE) {
1383 				/*
1384 				 * Renegotiate with this device at the
1385 				 * next oportunity just in case this busfree
1386 				 * is due to a negotiation mismatch with the
1387 				 * device.
1388 				 */
1389 				ahc_force_renegotiation(ahc, &devinfo);
1390 			}
1391 			printf("Unexpected busfree %s\n"
1392 			       "SEQADDR == 0x%x\n",
1393 			       ahc_phase_table[i].phasemsg,
1394 			       ahc_inb(ahc, SEQADDR0)
1395 				| (ahc_inb(ahc, SEQADDR1) << 8));
1396 		}
1397 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1398 		ahc_restart(ahc);
1399 	} else {
1400 		printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1401 		       ahc_name(ahc), status);
1402 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1403 	}
1404 }
1405 
1406 /*
1407  * Force renegotiation to occur the next time we initiate
1408  * a command to the current device.
1409  */
1410 static void
1411 ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1412 {
1413 	struct	ahc_initiator_tinfo *targ_info;
1414 	struct	ahc_tmode_tstate *tstate;
1415 
1416 	targ_info = ahc_fetch_transinfo(ahc,
1417 					devinfo->channel,
1418 					devinfo->our_scsiid,
1419 					devinfo->target,
1420 					&tstate);
1421 	ahc_update_neg_request(ahc, devinfo, tstate,
1422 			       targ_info, AHC_NEG_IF_NON_ASYNC);
1423 }
1424 
1425 #define AHC_MAX_STEPS 2000
1426 void
1427 ahc_clear_critical_section(struct ahc_softc *ahc)
1428 {
1429 	int	stepping;
1430 	int	steps;
1431 	u_int	simode0;
1432 	u_int	simode1;
1433 
1434 	if (ahc->num_critical_sections == 0)
1435 		return;
1436 
1437 	stepping = FALSE;
1438 	steps = 0;
1439 	simode0 = 0;
1440 	simode1 = 0;
1441 	for (;;) {
1442 		struct	cs *cs;
1443 		u_int	seqaddr;
1444 		u_int	i;
1445 
1446 		seqaddr = ahc_inb(ahc, SEQADDR0)
1447 			| (ahc_inb(ahc, SEQADDR1) << 8);
1448 
1449 		/*
1450 		 * Seqaddr represents the next instruction to execute,
1451 		 * so we are really executing the instruction just
1452 		 * before it.
1453 		 */
1454 		cs = ahc->critical_sections;
1455 		for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1456 
1457 			if (cs->begin < seqaddr && cs->end >= seqaddr)
1458 				break;
1459 		}
1460 
1461 		if (i == ahc->num_critical_sections)
1462 			break;
1463 
1464 		if (steps > AHC_MAX_STEPS) {
1465 			printf("%s: Infinite loop in critical section\n",
1466 			       ahc_name(ahc));
1467 			ahc_dump_card_state(ahc);
1468 			panic("critical section loop");
1469 		}
1470 
1471 		steps++;
1472 		if (stepping == FALSE) {
1473 
1474 			/*
1475 			 * Disable all interrupt sources so that the
1476 			 * sequencer will not be stuck by a pausing
1477 			 * interrupt condition while we attempt to
1478 			 * leave a critical section.
1479 			 */
1480 			simode0 = ahc_inb(ahc, SIMODE0);
1481 			ahc_outb(ahc, SIMODE0, 0);
1482 			simode1 = ahc_inb(ahc, SIMODE1);
1483 			if ((ahc->features & AHC_DT) != 0)
1484 				/*
1485 				 * On DT class controllers, we
1486 				 * use the enhanced busfree logic.
1487 				 * Unfortunately we cannot re-enable
1488 				 * busfree detection within the
1489 				 * current connection, so we must
1490 				 * leave it on while single stepping.
1491 				 */
1492 				ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE);
1493 			else
1494 				ahc_outb(ahc, SIMODE1, 0);
1495 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1496 			ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP);
1497 			stepping = TRUE;
1498 		}
1499 		if ((ahc->features & AHC_DT) != 0) {
1500 			ahc_outb(ahc, CLRSINT1, CLRBUSFREE);
1501 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1502 		}
1503 		ahc_outb(ahc, HCNTRL, ahc->unpause);
1504 		while (!ahc_is_paused(ahc))
1505 			aic_delay(200);
1506 	}
1507 	if (stepping) {
1508 		ahc_outb(ahc, SIMODE0, simode0);
1509 		ahc_outb(ahc, SIMODE1, simode1);
1510 		ahc_outb(ahc, SEQCTL, ahc->seqctl);
1511 	}
1512 }
1513 
1514 /*
1515  * Clear any pending interrupt status.
1516  */
1517 void
1518 ahc_clear_intstat(struct ahc_softc *ahc)
1519 {
1520 	/* Clear any interrupt conditions this may have caused */
1521 	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1522 				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1523 				CLRREQINIT);
1524 	ahc_flush_device_writes(ahc);
1525 	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1526  	ahc_flush_device_writes(ahc);
1527 	ahc_outb(ahc, CLRINT, CLRSCSIINT);
1528 	ahc_flush_device_writes(ahc);
1529 }
1530 
1531 /**************************** Debugging Routines ******************************/
1532 #ifdef AHC_DEBUG
1533 uint32_t ahc_debug = AHC_DEBUG_OPTS;
1534 #endif
1535 
1536 void
1537 ahc_print_scb(struct scb *scb)
1538 {
1539 	int i;
1540 
1541 	struct hardware_scb *hscb = scb->hscb;
1542 
1543 	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1544 	       (void *)scb,
1545 	       hscb->control,
1546 	       hscb->scsiid,
1547 	       hscb->lun,
1548 	       hscb->cdb_len);
1549 	printf("Shared Data: ");
1550 	for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
1551 		printf("%#02x", hscb->shared_data.cdb[i]);
1552 	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1553 		aic_le32toh(hscb->dataptr),
1554 		aic_le32toh(hscb->datacnt),
1555 		aic_le32toh(hscb->sgptr),
1556 		hscb->tag);
1557 	if (scb->sg_count > 0) {
1558 		for (i = 0; i < scb->sg_count; i++) {
1559 			printf("sg[%d] - Addr 0x%x%x : Length %d\n",
1560 			       i,
1561 			       (aic_le32toh(scb->sg_list[i].len) >> 24
1562 			        & SG_HIGH_ADDR_BITS),
1563 			       aic_le32toh(scb->sg_list[i].addr),
1564 			       aic_le32toh(scb->sg_list[i].len));
1565 		}
1566 	}
1567 }
1568 
1569 /************************* Transfer Negotiation *******************************/
1570 /*
1571  * Allocate per target mode instance (ID we respond to as a target)
1572  * transfer negotiation data structures.
1573  */
1574 static struct ahc_tmode_tstate *
1575 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1576 {
1577 	struct ahc_tmode_tstate *master_tstate;
1578 	struct ahc_tmode_tstate *tstate;
1579 	int i;
1580 
1581 	master_tstate = ahc->enabled_targets[ahc->our_id];
1582 	if (channel == 'B') {
1583 		scsi_id += 8;
1584 		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1585 	}
1586 	if (ahc->enabled_targets[scsi_id] != NULL
1587 	 && ahc->enabled_targets[scsi_id] != master_tstate)
1588 		panic("%s: ahc_alloc_tstate - Target already allocated",
1589 		      ahc_name(ahc));
1590 	tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate),
1591 						   M_DEVBUF, M_NOWAIT);
1592 	if (tstate == NULL)
1593 		return (NULL);
1594 
1595 	/*
1596 	 * If we have allocated a master tstate, copy user settings from
1597 	 * the master tstate (taken from SRAM or the EEPROM) for this
1598 	 * channel, but reset our current and goal settings to async/narrow
1599 	 * until an initiator talks to us.
1600 	 */
1601 	if (master_tstate != NULL) {
1602 		memcpy(tstate, master_tstate, sizeof(*tstate));
1603 		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1604 		tstate->ultraenb = 0;
1605 		for (i = 0; i < AHC_NUM_TARGETS; i++) {
1606 			memset(&tstate->transinfo[i].curr, 0,
1607 			      sizeof(tstate->transinfo[i].curr));
1608 			memset(&tstate->transinfo[i].goal, 0,
1609 			      sizeof(tstate->transinfo[i].goal));
1610 		}
1611 	} else
1612 		memset(tstate, 0, sizeof(*tstate));
1613 	ahc->enabled_targets[scsi_id] = tstate;
1614 	return (tstate);
1615 }
1616 
1617 #ifdef AHC_TARGET_MODE
1618 /*
1619  * Free per target mode instance (ID we respond to as a target)
1620  * transfer negotiation data structures.
1621  */
1622 static void
1623 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1624 {
1625 	struct ahc_tmode_tstate *tstate;
1626 
1627 	/*
1628 	 * Don't clean up our "master" tstate.
1629 	 * It has our default user settings.
1630 	 */
1631 	if (((channel == 'B' && scsi_id == ahc->our_id_b)
1632 	  || (channel == 'A' && scsi_id == ahc->our_id))
1633 	 && force == FALSE)
1634 		return;
1635 
1636 	if (channel == 'B')
1637 		scsi_id += 8;
1638 	tstate = ahc->enabled_targets[scsi_id];
1639 	if (tstate != NULL)
1640 		free(tstate, M_DEVBUF);
1641 	ahc->enabled_targets[scsi_id] = NULL;
1642 }
1643 #endif
1644 
1645 /*
1646  * Called when we have an active connection to a target on the bus,
1647  * this function finds the nearest syncrate to the input period limited
1648  * by the capabilities of the bus connectivity of and sync settings for
1649  * the target.
1650  */
1651 struct ahc_syncrate *
1652 ahc_devlimited_syncrate(struct ahc_softc *ahc,
1653 			struct ahc_initiator_tinfo *tinfo,
1654 			u_int *period, u_int *ppr_options, role_t role)
1655 {
1656 	struct	ahc_transinfo *transinfo;
1657 	u_int	maxsync;
1658 
1659 	if ((ahc->features & AHC_ULTRA2) != 0) {
1660 		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1661 		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1662 			maxsync = AHC_SYNCRATE_DT;
1663 		} else {
1664 			maxsync = AHC_SYNCRATE_ULTRA;
1665 			/* Can't do DT on an SE bus */
1666 			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1667 		}
1668 	} else if ((ahc->features & AHC_ULTRA) != 0) {
1669 		maxsync = AHC_SYNCRATE_ULTRA;
1670 	} else {
1671 		maxsync = AHC_SYNCRATE_FAST;
1672 	}
1673 	/*
1674 	 * Never allow a value higher than our current goal
1675 	 * period otherwise we may allow a target initiated
1676 	 * negotiation to go above the limit as set by the
1677 	 * user.  In the case of an initiator initiated
1678 	 * sync negotiation, we limit based on the user
1679 	 * setting.  This allows the system to still accept
1680 	 * incoming negotiations even if target initiated
1681 	 * negotiation is not performed.
1682 	 */
1683 	if (role == ROLE_TARGET)
1684 		transinfo = &tinfo->user;
1685 	else
1686 		transinfo = &tinfo->goal;
1687 	*ppr_options &= transinfo->ppr_options;
1688 	if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
1689 		maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2);
1690 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1691 	}
1692 	if (transinfo->period == 0) {
1693 		*period = 0;
1694 		*ppr_options = 0;
1695 		return (NULL);
1696 	}
1697 	*period = MAX(*period, transinfo->period);
1698 	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1699 }
1700 
1701 /*
1702  * Look up the valid period to SCSIRATE conversion in our table.
1703  * Return the period and offset that should be sent to the target
1704  * if this was the beginning of an SDTR.
1705  */
1706 struct ahc_syncrate *
1707 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1708 		  u_int *ppr_options, u_int maxsync)
1709 {
1710 	struct ahc_syncrate *syncrate;
1711 
1712 	if ((ahc->features & AHC_DT) == 0)
1713 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1714 
1715 	/* Skip all DT only entries if DT is not available */
1716 	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1717 	 && maxsync < AHC_SYNCRATE_ULTRA2)
1718 		maxsync = AHC_SYNCRATE_ULTRA2;
1719 
1720 	for (syncrate = &ahc_syncrates[maxsync];
1721 	     syncrate->rate != NULL;
1722 	     syncrate++) {
1723 
1724 		/*
1725 		 * The Ultra2 table doesn't go as low
1726 		 * as for the Fast/Ultra cards.
1727 		 */
1728 		if ((ahc->features & AHC_ULTRA2) != 0
1729 		 && (syncrate->sxfr_u2 == 0))
1730 			break;
1731 
1732 		if (*period <= syncrate->period) {
1733 			/*
1734 			 * When responding to a target that requests
1735 			 * sync, the requested rate may fall between
1736 			 * two rates that we can output, but still be
1737 			 * a rate that we can receive.  Because of this,
1738 			 * we want to respond to the target with
1739 			 * the same rate that it sent to us even
1740 			 * if the period we use to send data to it
1741 			 * is lower.  Only lower the response period
1742 			 * if we must.
1743 			 */
1744 			if (syncrate == &ahc_syncrates[maxsync])
1745 				*period = syncrate->period;
1746 
1747 			/*
1748 			 * At some speeds, we only support
1749 			 * ST transfers.
1750 			 */
1751 		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1752 				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1753 			break;
1754 		}
1755 	}
1756 
1757 	if ((*period == 0)
1758 	 || (syncrate->rate == NULL)
1759 	 || ((ahc->features & AHC_ULTRA2) != 0
1760 	  && (syncrate->sxfr_u2 == 0))) {
1761 		/* Use asynchronous transfers. */
1762 		*period = 0;
1763 		syncrate = NULL;
1764 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1765 	}
1766 	return (syncrate);
1767 }
1768 
1769 /*
1770  * Convert from an entry in our syncrate table to the SCSI equivalent
1771  * sync "period" factor.
1772  */
1773 u_int
1774 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1775 {
1776 	struct ahc_syncrate *syncrate;
1777 
1778 	if ((ahc->features & AHC_ULTRA2) != 0)
1779 		scsirate &= SXFR_ULTRA2;
1780 	else
1781 		scsirate &= SXFR;
1782 
1783 	syncrate = &ahc_syncrates[maxsync];
1784 	while (syncrate->rate != NULL) {
1785 
1786 		if ((ahc->features & AHC_ULTRA2) != 0) {
1787 			if (syncrate->sxfr_u2 == 0)
1788 				break;
1789 			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1790 				return (syncrate->period);
1791 		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1792 				return (syncrate->period);
1793 		}
1794 		syncrate++;
1795 	}
1796 	return (0); /* async */
1797 }
1798 
1799 /*
1800  * Truncate the given synchronous offset to a value the
1801  * current adapter type and syncrate are capable of.
1802  */
1803 void
1804 ahc_validate_offset(struct ahc_softc *ahc,
1805 		    struct ahc_initiator_tinfo *tinfo,
1806 		    struct ahc_syncrate *syncrate,
1807 		    u_int *offset, int wide, role_t role)
1808 {
1809 	u_int maxoffset;
1810 
1811 	/* Limit offset to what we can do */
1812 	if (syncrate == NULL) {
1813 		maxoffset = 0;
1814 	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1815 		maxoffset = MAX_OFFSET_ULTRA2;
1816 	} else {
1817 		if (wide)
1818 			maxoffset = MAX_OFFSET_16BIT;
1819 		else
1820 			maxoffset = MAX_OFFSET_8BIT;
1821 	}
1822 	*offset = MIN(*offset, maxoffset);
1823 	if (tinfo != NULL) {
1824 		if (role == ROLE_TARGET)
1825 			*offset = MIN(*offset, tinfo->user.offset);
1826 		else
1827 			*offset = MIN(*offset, tinfo->goal.offset);
1828 	}
1829 }
1830 
1831 /*
1832  * Truncate the given transfer width parameter to a value the
1833  * current adapter type is capable of.
1834  */
1835 void
1836 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1837 		   u_int *bus_width, role_t role)
1838 {
1839 	switch (*bus_width) {
1840 	default:
1841 		if (ahc->features & AHC_WIDE) {
1842 			/* Respond Wide */
1843 			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1844 			break;
1845 		}
1846 		/* FALLTHROUGH */
1847 	case MSG_EXT_WDTR_BUS_8_BIT:
1848 		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1849 		break;
1850 	}
1851 	if (tinfo != NULL) {
1852 		if (role == ROLE_TARGET)
1853 			*bus_width = MIN(tinfo->user.width, *bus_width);
1854 		else
1855 			*bus_width = MIN(tinfo->goal.width, *bus_width);
1856 	}
1857 }
1858 
1859 /*
1860  * Update the bitmask of targets for which the controller should
1861  * negotiate with at the next convenient oportunity.  This currently
1862  * means the next time we send the initial identify messages for
1863  * a new transaction.
1864  */
1865 int
1866 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1867 		       struct ahc_tmode_tstate *tstate,
1868 		       struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type)
1869 {
1870 	u_int auto_negotiate_orig;
1871 
1872 	auto_negotiate_orig = tstate->auto_negotiate;
1873 	if (neg_type == AHC_NEG_ALWAYS) {
1874 		/*
1875 		 * Force our "current" settings to be
1876 		 * unknown so that unless a bus reset
1877 		 * occurs the need to renegotiate is
1878 		 * recorded persistently.
1879 		 */
1880 		if ((ahc->features & AHC_WIDE) != 0)
1881 			tinfo->curr.width = AHC_WIDTH_UNKNOWN;
1882 		tinfo->curr.period = AHC_PERIOD_UNKNOWN;
1883 		tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
1884 	}
1885 	if (tinfo->curr.period != tinfo->goal.period
1886 	 || tinfo->curr.width != tinfo->goal.width
1887 	 || tinfo->curr.offset != tinfo->goal.offset
1888 	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
1889 	 || (neg_type == AHC_NEG_IF_NON_ASYNC
1890 	  && (tinfo->goal.offset != 0
1891 	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1892 	   || tinfo->goal.ppr_options != 0)))
1893 		tstate->auto_negotiate |= devinfo->target_mask;
1894 	else
1895 		tstate->auto_negotiate &= ~devinfo->target_mask;
1896 
1897 	return (auto_negotiate_orig != tstate->auto_negotiate);
1898 }
1899 
1900 /*
1901  * Update the user/goal/curr tables of synchronous negotiation
1902  * parameters as well as, in the case of a current or active update,
1903  * any data structures on the host controller.  In the case of an
1904  * active update, the specified target is currently talking to us on
1905  * the bus, so the transfer parameter update must take effect
1906  * immediately.
1907  */
1908 void
1909 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1910 		 struct ahc_syncrate *syncrate, u_int period,
1911 		 u_int offset, u_int ppr_options, u_int type, int paused)
1912 {
1913 	struct	ahc_initiator_tinfo *tinfo;
1914 	struct	ahc_tmode_tstate *tstate;
1915 	u_int	old_period;
1916 	u_int	old_offset;
1917 	u_int	old_ppr;
1918 	int	active;
1919 	int	update_needed;
1920 
1921 	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1922 	update_needed = 0;
1923 
1924 	if (syncrate == NULL) {
1925 		period = 0;
1926 		offset = 0;
1927 	}
1928 
1929 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1930 				    devinfo->target, &tstate);
1931 
1932 	if ((type & AHC_TRANS_USER) != 0) {
1933 		tinfo->user.period = period;
1934 		tinfo->user.offset = offset;
1935 		tinfo->user.ppr_options = ppr_options;
1936 	}
1937 
1938 	if ((type & AHC_TRANS_GOAL) != 0) {
1939 		tinfo->goal.period = period;
1940 		tinfo->goal.offset = offset;
1941 		tinfo->goal.ppr_options = ppr_options;
1942 	}
1943 
1944 	old_period = tinfo->curr.period;
1945 	old_offset = tinfo->curr.offset;
1946 	old_ppr	   = tinfo->curr.ppr_options;
1947 
1948 	if ((type & AHC_TRANS_CUR) != 0
1949 	 && (old_period != period
1950 	  || old_offset != offset
1951 	  || old_ppr != ppr_options)) {
1952 		u_int	scsirate;
1953 
1954 		update_needed++;
1955 		scsirate = tinfo->scsirate;
1956 		if ((ahc->features & AHC_ULTRA2) != 0) {
1957 
1958 			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1959 			if (syncrate != NULL) {
1960 				scsirate |= syncrate->sxfr_u2;
1961 				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1962 					scsirate |= ENABLE_CRC;
1963 				else
1964 					scsirate |= SINGLE_EDGE;
1965 			}
1966 		} else {
1967 
1968 			scsirate &= ~(SXFR|SOFS);
1969 			/*
1970 			 * Ensure Ultra mode is set properly for
1971 			 * this target.
1972 			 */
1973 			tstate->ultraenb &= ~devinfo->target_mask;
1974 			if (syncrate != NULL) {
1975 				if (syncrate->sxfr & ULTRA_SXFR) {
1976 					tstate->ultraenb |=
1977 						devinfo->target_mask;
1978 				}
1979 				scsirate |= syncrate->sxfr & SXFR;
1980 				scsirate |= offset & SOFS;
1981 			}
1982 			if (active) {
1983 				u_int sxfrctl0;
1984 
1985 				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1986 				sxfrctl0 &= ~FAST20;
1987 				if (tstate->ultraenb & devinfo->target_mask)
1988 					sxfrctl0 |= FAST20;
1989 				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1990 			}
1991 		}
1992 		if (active) {
1993 			ahc_outb(ahc, SCSIRATE, scsirate);
1994 			if ((ahc->features & AHC_ULTRA2) != 0)
1995 				ahc_outb(ahc, SCSIOFFSET, offset);
1996 		}
1997 
1998 		tinfo->scsirate = scsirate;
1999 		tinfo->curr.period = period;
2000 		tinfo->curr.offset = offset;
2001 		tinfo->curr.ppr_options = ppr_options;
2002 
2003 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
2004 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
2005 		if (bootverbose) {
2006 			if (offset != 0) {
2007 				printf("%s: target %d synchronous at %sMHz%s, "
2008 				       "offset = 0x%x\n", ahc_name(ahc),
2009 				       devinfo->target, syncrate->rate,
2010 				       (ppr_options & MSG_EXT_PPR_DT_REQ)
2011 				       ? " DT" : "", offset);
2012 			} else {
2013 				printf("%s: target %d using "
2014 				       "asynchronous transfers\n",
2015 				       ahc_name(ahc), devinfo->target);
2016 			}
2017 		}
2018 	}
2019 
2020 	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
2021 						tinfo, AHC_NEG_TO_GOAL);
2022 
2023 	if (update_needed)
2024 		ahc_update_pending_scbs(ahc);
2025 }
2026 
2027 /*
2028  * Update the user/goal/curr tables of wide negotiation
2029  * parameters as well as, in the case of a current or active update,
2030  * any data structures on the host controller.  In the case of an
2031  * active update, the specified target is currently talking to us on
2032  * the bus, so the transfer parameter update must take effect
2033  * immediately.
2034  */
2035 void
2036 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2037 	      u_int width, u_int type, int paused)
2038 {
2039 	struct	ahc_initiator_tinfo *tinfo;
2040 	struct	ahc_tmode_tstate *tstate;
2041 	u_int	oldwidth;
2042 	int	active;
2043 	int	update_needed;
2044 
2045 	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
2046 	update_needed = 0;
2047 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2048 				    devinfo->target, &tstate);
2049 
2050 	if ((type & AHC_TRANS_USER) != 0)
2051 		tinfo->user.width = width;
2052 
2053 	if ((type & AHC_TRANS_GOAL) != 0)
2054 		tinfo->goal.width = width;
2055 
2056 	oldwidth = tinfo->curr.width;
2057 	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
2058 		u_int	scsirate;
2059 
2060 		update_needed++;
2061 		scsirate =  tinfo->scsirate;
2062 		scsirate &= ~WIDEXFER;
2063 		if (width == MSG_EXT_WDTR_BUS_16_BIT)
2064 			scsirate |= WIDEXFER;
2065 
2066 		tinfo->scsirate = scsirate;
2067 
2068 		if (active)
2069 			ahc_outb(ahc, SCSIRATE, scsirate);
2070 
2071 		tinfo->curr.width = width;
2072 
2073 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
2074 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
2075 		if (bootverbose) {
2076 			printf("%s: target %d using %dbit transfers\n",
2077 			       ahc_name(ahc), devinfo->target,
2078 			       8 * (0x01 << width));
2079 		}
2080 	}
2081 
2082 	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
2083 						tinfo, AHC_NEG_TO_GOAL);
2084 	if (update_needed)
2085 		ahc_update_pending_scbs(ahc);
2086 }
2087 
2088 /*
2089  * Update the current state of tagged queuing for a given target.
2090  */
2091 void
2092 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2093 	     ahc_queue_alg alg)
2094 {
2095  	ahc_platform_set_tags(ahc, devinfo, alg);
2096  	ahc_send_async(ahc, devinfo->channel, devinfo->target,
2097  		       devinfo->lun, AC_TRANSFER_NEG, &alg);
2098 }
2099 
2100 /*
2101  * When the transfer settings for a connection change, update any
2102  * in-transit SCBs to contain the new data so the hardware will
2103  * be set correctly during future (re)selections.
2104  */
2105 static void
2106 ahc_update_pending_scbs(struct ahc_softc *ahc)
2107 {
2108 	struct	scb *pending_scb;
2109 	int	pending_scb_count;
2110 	int	i;
2111 	int	paused;
2112 	u_int	saved_scbptr;
2113 
2114 	/*
2115 	 * Traverse the pending SCB list and ensure that all of the
2116 	 * SCBs there have the proper settings.
2117 	 */
2118 	pending_scb_count = 0;
2119 	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
2120 		struct ahc_devinfo devinfo;
2121 		struct hardware_scb *pending_hscb;
2122 		struct ahc_initiator_tinfo *tinfo;
2123 		struct ahc_tmode_tstate *tstate;
2124 
2125 		ahc_scb_devinfo(ahc, &devinfo, pending_scb);
2126 		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
2127 					    devinfo.our_scsiid,
2128 					    devinfo.target, &tstate);
2129 		pending_hscb = pending_scb->hscb;
2130 		pending_hscb->control &= ~ULTRAENB;
2131 		if ((tstate->ultraenb & devinfo.target_mask) != 0)
2132 			pending_hscb->control |= ULTRAENB;
2133 		pending_hscb->scsirate = tinfo->scsirate;
2134 		pending_hscb->scsioffset = tinfo->curr.offset;
2135 		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
2136 		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
2137 			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
2138 			pending_hscb->control &= ~MK_MESSAGE;
2139 		}
2140 		ahc_sync_scb(ahc, pending_scb,
2141 			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2142 		pending_scb_count++;
2143 	}
2144 
2145 	if (pending_scb_count == 0)
2146 		return;
2147 
2148 	if (ahc_is_paused(ahc)) {
2149 		paused = 1;
2150 	} else {
2151 		paused = 0;
2152 		ahc_pause(ahc);
2153 	}
2154 
2155 	saved_scbptr = ahc_inb(ahc, SCBPTR);
2156 	/* Ensure that the hscbs down on the card match the new information */
2157 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
2158 		struct	hardware_scb *pending_hscb;
2159 		u_int	control;
2160 		u_int	scb_tag;
2161 
2162 		ahc_outb(ahc, SCBPTR, i);
2163 		scb_tag = ahc_inb(ahc, SCB_TAG);
2164 		pending_scb = ahc_lookup_scb(ahc, scb_tag);
2165 		if (pending_scb == NULL)
2166 			continue;
2167 
2168 		pending_hscb = pending_scb->hscb;
2169 		control = ahc_inb(ahc, SCB_CONTROL);
2170 		control &= ~(ULTRAENB|MK_MESSAGE);
2171 		control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
2172 		ahc_outb(ahc, SCB_CONTROL, control);
2173 		ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
2174 		ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
2175 	}
2176 	ahc_outb(ahc, SCBPTR, saved_scbptr);
2177 
2178 	if (paused == 0)
2179 		ahc_unpause(ahc);
2180 }
2181 
2182 /**************************** Pathing Information *****************************/
2183 static void
2184 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2185 {
2186 	u_int	saved_scsiid;
2187 	role_t	role;
2188 	int	our_id;
2189 
2190 	if (ahc_inb(ahc, SSTAT0) & TARGET)
2191 		role = ROLE_TARGET;
2192 	else
2193 		role = ROLE_INITIATOR;
2194 
2195 	if (role == ROLE_TARGET
2196 	 && (ahc->features & AHC_MULTI_TID) != 0
2197 	 && (ahc_inb(ahc, SEQ_FLAGS)
2198  	   & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
2199 		/* We were selected, so pull our id from TARGIDIN */
2200 		our_id = ahc_inb(ahc, TARGIDIN) & OID;
2201 	} else if ((ahc->features & AHC_ULTRA2) != 0)
2202 		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
2203 	else
2204 		our_id = ahc_inb(ahc, SCSIID) & OID;
2205 
2206 	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2207 	ahc_compile_devinfo(devinfo,
2208 			    our_id,
2209 			    SCSIID_TARGET(ahc, saved_scsiid),
2210 			    ahc_inb(ahc, SAVED_LUN),
2211 			    SCSIID_CHANNEL(ahc, saved_scsiid),
2212 			    role);
2213 }
2214 
2215 struct ahc_phase_table_entry*
2216 ahc_lookup_phase_entry(int phase)
2217 {
2218 	struct ahc_phase_table_entry *entry;
2219 	struct ahc_phase_table_entry *last_entry;
2220 
2221 	/*
2222 	 * num_phases doesn't include the default entry which
2223 	 * will be returned if the phase doesn't match.
2224 	 */
2225 	last_entry = &ahc_phase_table[num_phases];
2226 	for (entry = ahc_phase_table; entry < last_entry; entry++) {
2227 		if (phase == entry->phase)
2228 			break;
2229 	}
2230 	return (entry);
2231 }
2232 
2233 void
2234 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2235 		    u_int lun, char channel, role_t role)
2236 {
2237 	devinfo->our_scsiid = our_id;
2238 	devinfo->target = target;
2239 	devinfo->lun = lun;
2240 	devinfo->target_offset = target;
2241 	devinfo->channel = channel;
2242 	devinfo->role = role;
2243 	if (channel == 'B')
2244 		devinfo->target_offset += 8;
2245 	devinfo->target_mask = (0x01 << devinfo->target_offset);
2246 }
2247 
2248 void
2249 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2250 {
2251 	printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
2252 	       devinfo->target, devinfo->lun);
2253 }
2254 
2255 static void
2256 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2257 		struct scb *scb)
2258 {
2259 	role_t	role;
2260 	int	our_id;
2261 
2262 	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2263 	role = ROLE_INITIATOR;
2264 	if ((scb->flags & SCB_TARGET_SCB) != 0)
2265 		role = ROLE_TARGET;
2266 	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2267 			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2268 }
2269 
2270 
2271 /************************ Message Phase Processing ****************************/
2272 static void
2273 ahc_assert_atn(struct ahc_softc *ahc)
2274 {
2275 	u_int scsisigo;
2276 
2277 	scsisigo = ATNO;
2278 	if ((ahc->features & AHC_DT) == 0)
2279 		scsisigo |= ahc_inb(ahc, SCSISIGI);
2280 	ahc_outb(ahc, SCSISIGO, scsisigo);
2281 }
2282 
2283 /*
2284  * When an initiator transaction with the MK_MESSAGE flag either reconnects
2285  * or enters the initial message out phase, we are interrupted.  Fill our
2286  * outgoing message buffer with the appropriate message and beging handing
2287  * the message phase(s) manually.
2288  */
2289 static void
2290 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2291 			   struct scb *scb)
2292 {
2293 	/*
2294 	 * To facilitate adding multiple messages together,
2295 	 * each routine should increment the index and len
2296 	 * variables instead of setting them explicitly.
2297 	 */
2298 	ahc->msgout_index = 0;
2299 	ahc->msgout_len = 0;
2300 
2301 	if ((scb->flags & SCB_DEVICE_RESET) == 0
2302 	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2303 		u_int identify_msg;
2304 
2305 		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2306 		if ((scb->hscb->control & DISCENB) != 0)
2307 			identify_msg |= MSG_IDENTIFY_DISCFLAG;
2308 		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2309 		ahc->msgout_len++;
2310 
2311 		if ((scb->hscb->control & TAG_ENB) != 0) {
2312 			ahc->msgout_buf[ahc->msgout_index++] =
2313 			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2314 			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2315 			ahc->msgout_len += 2;
2316 		}
2317 	}
2318 
2319 	if (scb->flags & SCB_DEVICE_RESET) {
2320 		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2321 		ahc->msgout_len++;
2322 		ahc_print_path(ahc, scb);
2323 		printf("Bus Device Reset Message Sent\n");
2324 		/*
2325 		 * Clear our selection hardware in advance of
2326 		 * the busfree.  We may have an entry in the waiting
2327 		 * Q for this target, and we don't want to go about
2328 		 * selecting while we handle the busfree and blow it
2329 		 * away.
2330 		 */
2331 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2332 	} else if ((scb->flags & SCB_ABORT) != 0) {
2333 		if ((scb->hscb->control & TAG_ENB) != 0)
2334 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2335 		else
2336 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2337 		ahc->msgout_len++;
2338 		ahc_print_path(ahc, scb);
2339 		printf("Abort%s Message Sent\n",
2340 		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2341 		/*
2342 		 * Clear our selection hardware in advance of
2343 		 * the busfree.  We may have an entry in the waiting
2344 		 * Q for this target, and we don't want to go about
2345 		 * selecting while we handle the busfree and blow it
2346 		 * away.
2347 		 */
2348 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2349 	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2350 		ahc_build_transfer_msg(ahc, devinfo);
2351 	} else {
2352 		printf("ahc_intr: AWAITING_MSG for an SCB that "
2353 		       "does not have a waiting message\n");
2354 		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2355 		       devinfo->target_mask);
2356 		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2357 		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2358 		      ahc_inb(ahc, MSG_OUT), scb->flags);
2359 	}
2360 
2361 	/*
2362 	 * Clear the MK_MESSAGE flag from the SCB so we aren't
2363 	 * asked to send this message again.
2364 	 */
2365 	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2366 	scb->hscb->control &= ~MK_MESSAGE;
2367 	ahc->msgout_index = 0;
2368 	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2369 }
2370 
2371 /*
2372  * Build an appropriate transfer negotiation message for the
2373  * currently active target.
2374  */
2375 static void
2376 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2377 {
2378 	/*
2379 	 * We need to initiate transfer negotiations.
2380 	 * If our current and goal settings are identical,
2381 	 * we want to renegotiate due to a check condition.
2382 	 */
2383 	struct	ahc_initiator_tinfo *tinfo;
2384 	struct	ahc_tmode_tstate *tstate;
2385 	struct	ahc_syncrate *rate;
2386 	int	dowide;
2387 	int	dosync;
2388 	int	doppr;
2389 	u_int	period;
2390 	u_int	ppr_options;
2391 	u_int	offset;
2392 
2393 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2394 				    devinfo->target, &tstate);
2395 	/*
2396 	 * Filter our period based on the current connection.
2397 	 * If we can't perform DT transfers on this segment (not in LVD
2398 	 * mode for instance), then our decision to issue a PPR message
2399 	 * may change.
2400 	 */
2401 	period = tinfo->goal.period;
2402 	offset = tinfo->goal.offset;
2403 	ppr_options = tinfo->goal.ppr_options;
2404 	/* Target initiated PPR is not allowed in the SCSI spec */
2405 	if (devinfo->role == ROLE_TARGET)
2406 		ppr_options = 0;
2407 	rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2408 				       &ppr_options, devinfo->role);
2409 	dowide = tinfo->curr.width != tinfo->goal.width;
2410 	dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
2411 	/*
2412 	 * Only use PPR if we have options that need it, even if the device
2413 	 * claims to support it.  There might be an expander in the way
2414 	 * that doesn't.
2415 	 */
2416 	doppr = ppr_options != 0;
2417 
2418 	if (!dowide && !dosync && !doppr) {
2419 		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2420 		dosync = tinfo->goal.offset != 0;
2421 	}
2422 
2423 	if (!dowide && !dosync && !doppr) {
2424 		/*
2425 		 * Force async with a WDTR message if we have a wide bus,
2426 		 * or just issue an SDTR with a 0 offset.
2427 		 */
2428 		if ((ahc->features & AHC_WIDE) != 0)
2429 			dowide = 1;
2430 		else
2431 			dosync = 1;
2432 
2433 		if (bootverbose) {
2434 			ahc_print_devinfo(ahc, devinfo);
2435 			printf("Ensuring async\n");
2436 		}
2437 	}
2438 
2439 	/* Target initiated PPR is not allowed in the SCSI spec */
2440 	if (devinfo->role == ROLE_TARGET)
2441 		doppr = 0;
2442 
2443 	/*
2444 	 * Both the PPR message and SDTR message require the
2445 	 * goal syncrate to be limited to what the target device
2446 	 * is capable of handling (based on whether an LVD->SE
2447 	 * expander is on the bus), so combine these two cases.
2448 	 * Regardless, guarantee that if we are using WDTR and SDTR
2449 	 * messages that WDTR comes first.
2450 	 */
2451 	if (doppr || (dosync && !dowide)) {
2452 
2453 		offset = tinfo->goal.offset;
2454 		ahc_validate_offset(ahc, tinfo, rate, &offset,
2455 				    doppr ? tinfo->goal.width
2456 					  : tinfo->curr.width,
2457 				    devinfo->role);
2458 		if (doppr) {
2459 			ahc_construct_ppr(ahc, devinfo, period, offset,
2460 					  tinfo->goal.width, ppr_options);
2461 		} else {
2462 			ahc_construct_sdtr(ahc, devinfo, period, offset);
2463 		}
2464 	} else {
2465 		ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2466 	}
2467 }
2468 
2469 /*
2470  * Build a synchronous negotiation message in our message
2471  * buffer based on the input parameters.
2472  */
2473 static void
2474 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2475 		   u_int period, u_int offset)
2476 {
2477 	if (offset == 0)
2478 		period = AHC_ASYNC_XFER_PERIOD;
2479 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2480 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2481 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2482 	ahc->msgout_buf[ahc->msgout_index++] = period;
2483 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2484 	ahc->msgout_len += 5;
2485 	if (bootverbose) {
2486 		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2487 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2488 		       devinfo->lun, period, offset);
2489 	}
2490 }
2491 
2492 /*
2493  * Build a wide negotiation message in our message
2494  * buffer based on the input parameters.
2495  */
2496 static void
2497 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2498 		   u_int bus_width)
2499 {
2500 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2501 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2502 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2503 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2504 	ahc->msgout_len += 4;
2505 	if (bootverbose) {
2506 		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2507 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2508 		       devinfo->lun, bus_width);
2509 	}
2510 }
2511 
2512 /*
2513  * Build a parallel protocol request message in our message
2514  * buffer based on the input parameters.
2515  */
2516 static void
2517 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2518 		  u_int period, u_int offset, u_int bus_width,
2519 		  u_int ppr_options)
2520 {
2521 	if (offset == 0)
2522 		period = AHC_ASYNC_XFER_PERIOD;
2523 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2524 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2525 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2526 	ahc->msgout_buf[ahc->msgout_index++] = period;
2527 	ahc->msgout_buf[ahc->msgout_index++] = 0;
2528 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2529 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2530 	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2531 	ahc->msgout_len += 8;
2532 	if (bootverbose) {
2533 		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2534 		       "offset %x, ppr_options %x\n", ahc_name(ahc),
2535 		       devinfo->channel, devinfo->target, devinfo->lun,
2536 		       bus_width, period, offset, ppr_options);
2537 	}
2538 }
2539 
2540 /*
2541  * Clear any active message state.
2542  */
2543 static void
2544 ahc_clear_msg_state(struct ahc_softc *ahc)
2545 {
2546 	ahc->msgout_len = 0;
2547 	ahc->msgin_index = 0;
2548 	ahc->msg_type = MSG_TYPE_NONE;
2549 	if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2550 		/*
2551 		 * The target didn't care to respond to our
2552 		 * message request, so clear ATN.
2553 		 */
2554 		ahc_outb(ahc, CLRSINT1, CLRATNO);
2555 	}
2556 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2557 	ahc_outb(ahc, SEQ_FLAGS2,
2558 		 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
2559 }
2560 
2561 static void
2562 ahc_handle_proto_violation(struct ahc_softc *ahc)
2563 {
2564 	struct	ahc_devinfo devinfo;
2565 	struct	scb *scb;
2566 	u_int	scbid;
2567 	u_int	seq_flags;
2568 	u_int	curphase;
2569 	u_int	lastphase;
2570 	int	found;
2571 
2572 	ahc_fetch_devinfo(ahc, &devinfo);
2573 	scbid = ahc_inb(ahc, SCB_TAG);
2574 	scb = ahc_lookup_scb(ahc, scbid);
2575 	seq_flags = ahc_inb(ahc, SEQ_FLAGS);
2576 	curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2577 	lastphase = ahc_inb(ahc, LASTPHASE);
2578 	if ((seq_flags & NOT_IDENTIFIED) != 0) {
2579 
2580 		/*
2581 		 * The reconnecting target either did not send an
2582 		 * identify message, or did, but we didn't find an SCB
2583 		 * to match.
2584 		 */
2585 		ahc_print_devinfo(ahc, &devinfo);
2586 		printf("Target did not send an IDENTIFY message. "
2587 		       "LASTPHASE = 0x%x.\n", lastphase);
2588 		scb = NULL;
2589 	} else if (scb == NULL) {
2590 		/*
2591 		 * We don't seem to have an SCB active for this
2592 		 * transaction.  Print an error and reset the bus.
2593 		 */
2594 		ahc_print_devinfo(ahc, &devinfo);
2595 		printf("No SCB found during protocol violation\n");
2596 		goto proto_violation_reset;
2597 	} else {
2598 		aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
2599 		if ((seq_flags & NO_CDB_SENT) != 0) {
2600 			ahc_print_path(ahc, scb);
2601 			printf("No or incomplete CDB sent to device.\n");
2602 		} else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
2603 			/*
2604 			 * The target never bothered to provide status to
2605 			 * us prior to completing the command.  Since we don't
2606 			 * know the disposition of this command, we must attempt
2607 			 * to abort it.  Assert ATN and prepare to send an abort
2608 			 * message.
2609 			 */
2610 			ahc_print_path(ahc, scb);
2611 			printf("Completed command without status.\n");
2612 		} else {
2613 			ahc_print_path(ahc, scb);
2614 			printf("Unknown protocol violation.\n");
2615 			ahc_dump_card_state(ahc);
2616 		}
2617 	}
2618 	if ((lastphase & ~P_DATAIN_DT) == 0
2619 	 || lastphase == P_COMMAND) {
2620 proto_violation_reset:
2621 		/*
2622 		 * Target either went directly to data/command
2623 		 * phase or didn't respond to our ATN.
2624 		 * The only safe thing to do is to blow
2625 		 * it away with a bus reset.
2626 		 */
2627 		found = ahc_reset_channel(ahc, 'A', TRUE);
2628 		printf("%s: Issued Channel %c Bus Reset. "
2629 		       "%d SCBs aborted\n", ahc_name(ahc), 'A', found);
2630 	} else {
2631 		/*
2632 		 * Leave the selection hardware off in case
2633 		 * this abort attempt will affect yet to
2634 		 * be sent commands.
2635 		 */
2636 		ahc_outb(ahc, SCSISEQ,
2637 			 ahc_inb(ahc, SCSISEQ) & ~ENSELO);
2638 		ahc_assert_atn(ahc);
2639 		ahc_outb(ahc, MSG_OUT, HOST_MSG);
2640 		if (scb == NULL) {
2641 			ahc_print_devinfo(ahc, &devinfo);
2642 			ahc->msgout_buf[0] = MSG_ABORT_TASK;
2643 			ahc->msgout_len = 1;
2644 			ahc->msgout_index = 0;
2645 			ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2646 		} else {
2647 			ahc_print_path(ahc, scb);
2648 			scb->flags |= SCB_ABORT;
2649 		}
2650 		printf("Protocol violation %s.  Attempting to abort.\n",
2651 		       ahc_lookup_phase_entry(curphase)->phasemsg);
2652 	}
2653 }
2654 
2655 /*
2656  * Manual message loop handler.
2657  */
2658 static void
2659 ahc_handle_message_phase(struct ahc_softc *ahc)
2660 {
2661 	struct	ahc_devinfo devinfo;
2662 	u_int	bus_phase;
2663 	int	end_session;
2664 
2665 	ahc_fetch_devinfo(ahc, &devinfo);
2666 	end_session = FALSE;
2667 	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2668 
2669 reswitch:
2670 	switch (ahc->msg_type) {
2671 	case MSG_TYPE_INITIATOR_MSGOUT:
2672 	{
2673 		int lastbyte;
2674 		int phasemis;
2675 		int msgdone;
2676 
2677 		if (ahc->msgout_len == 0)
2678 			panic("HOST_MSG_LOOP interrupt with no active message");
2679 
2680 #ifdef AHC_DEBUG
2681 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2682 			ahc_print_devinfo(ahc, &devinfo);
2683 			printf("INITIATOR_MSG_OUT");
2684 		}
2685 #endif
2686 		phasemis = bus_phase != P_MESGOUT;
2687 		if (phasemis) {
2688 #ifdef AHC_DEBUG
2689 			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2690 				printf(" PHASEMIS %s\n",
2691 				       ahc_lookup_phase_entry(bus_phase)
2692 							     ->phasemsg);
2693 			}
2694 #endif
2695 			if (bus_phase == P_MESGIN) {
2696 				/*
2697 				 * Change gears and see if
2698 				 * this messages is of interest to
2699 				 * us or should be passed back to
2700 				 * the sequencer.
2701 				 */
2702 				ahc_outb(ahc, CLRSINT1, CLRATNO);
2703 				ahc->send_msg_perror = FALSE;
2704 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2705 				ahc->msgin_index = 0;
2706 				goto reswitch;
2707 			}
2708 			end_session = TRUE;
2709 			break;
2710 		}
2711 
2712 		if (ahc->send_msg_perror) {
2713 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2714 			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2715 #ifdef AHC_DEBUG
2716 			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2717 				printf(" byte 0x%x\n", ahc->send_msg_perror);
2718 #endif
2719 			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2720 			break;
2721 		}
2722 
2723 		msgdone	= ahc->msgout_index == ahc->msgout_len;
2724 		if (msgdone) {
2725 			/*
2726 			 * The target has requested a retry.
2727 			 * Re-assert ATN, reset our message index to
2728 			 * 0, and try again.
2729 			 */
2730 			ahc->msgout_index = 0;
2731 			ahc_assert_atn(ahc);
2732 		}
2733 
2734 		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2735 		if (lastbyte) {
2736 			/* Last byte is signified by dropping ATN */
2737 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2738 		}
2739 
2740 		/*
2741 		 * Clear our interrupt status and present
2742 		 * the next byte on the bus.
2743 		 */
2744 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2745 #ifdef AHC_DEBUG
2746 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2747 			printf(" byte 0x%x\n",
2748 			       ahc->msgout_buf[ahc->msgout_index]);
2749 #endif
2750 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2751 		break;
2752 	}
2753 	case MSG_TYPE_INITIATOR_MSGIN:
2754 	{
2755 		int phasemis;
2756 		int message_done;
2757 
2758 #ifdef AHC_DEBUG
2759 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2760 			ahc_print_devinfo(ahc, &devinfo);
2761 			printf("INITIATOR_MSG_IN");
2762 		}
2763 #endif
2764 		phasemis = bus_phase != P_MESGIN;
2765 		if (phasemis) {
2766 #ifdef AHC_DEBUG
2767 			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2768 				printf(" PHASEMIS %s\n",
2769 				       ahc_lookup_phase_entry(bus_phase)
2770 							     ->phasemsg);
2771 			}
2772 #endif
2773 			ahc->msgin_index = 0;
2774 			if (bus_phase == P_MESGOUT
2775 			 && (ahc->send_msg_perror == TRUE
2776 			  || (ahc->msgout_len != 0
2777 			   && ahc->msgout_index == 0))) {
2778 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2779 				goto reswitch;
2780 			}
2781 			end_session = TRUE;
2782 			break;
2783 		}
2784 
2785 		/* Pull the byte in without acking it */
2786 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2787 #ifdef AHC_DEBUG
2788 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2789 			printf(" byte 0x%x\n",
2790 			       ahc->msgin_buf[ahc->msgin_index]);
2791 #endif
2792 
2793 		message_done = ahc_parse_msg(ahc, &devinfo);
2794 
2795 		if (message_done) {
2796 			/*
2797 			 * Clear our incoming message buffer in case there
2798 			 * is another message following this one.
2799 			 */
2800 			ahc->msgin_index = 0;
2801 
2802 			/*
2803 			 * If this message illicited a response,
2804 			 * assert ATN so the target takes us to the
2805 			 * message out phase.
2806 			 */
2807 			if (ahc->msgout_len != 0) {
2808 #ifdef AHC_DEBUG
2809 				if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2810 					ahc_print_devinfo(ahc, &devinfo);
2811 					printf("Asserting ATN for response\n");
2812 				}
2813 #endif
2814 				ahc_assert_atn(ahc);
2815 			}
2816 		} else
2817 			ahc->msgin_index++;
2818 
2819 		if (message_done == MSGLOOP_TERMINATED) {
2820 			end_session = TRUE;
2821 		} else {
2822 			/* Ack the byte */
2823 			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2824 			ahc_inb(ahc, SCSIDATL);
2825 		}
2826 		break;
2827 	}
2828 	case MSG_TYPE_TARGET_MSGIN:
2829 	{
2830 		int msgdone;
2831 
2832 		if (ahc->msgout_len == 0)
2833 			panic("Target MSGIN with no active message");
2834 
2835 #ifdef AHC_DEBUG
2836 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2837 			ahc_print_devinfo(ahc, &devinfo);
2838 			printf("TARGET_MSG_IN");
2839 		}
2840 #endif
2841 
2842 		/*
2843 		 * If we interrupted a mesgout session, the initiator
2844 		 * will not know this until our first REQ.  So, we
2845 		 * only honor mesgout requests after we've sent our
2846 		 * first byte.
2847 		 */
2848 		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2849 		 && ahc->msgout_index > 0) {
2850 
2851 			/*
2852 			 * Change gears and see if this messages is
2853 			 * of interest to us or should be passed back
2854 			 * to the sequencer.
2855 			 */
2856 #ifdef AHC_DEBUG
2857 			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2858 				printf(" Honoring ATN Request.\n");
2859 #endif
2860 			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2861 
2862 			/*
2863 			 * Disable SCSI Programmed I/O during the
2864 			 * phase change so as to avoid phantom REQs.
2865 			 */
2866 			ahc_outb(ahc, SXFRCTL0,
2867 				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2868 
2869 			/*
2870 			 * Since SPIORDY asserts when ACK is asserted
2871 			 * for P_MSGOUT, and SPIORDY's assertion triggered
2872 			 * our entry into this routine, wait for ACK to
2873 			 * *de-assert* before changing phases.
2874 			 */
2875 			while ((ahc_inb(ahc, SCSISIGI) & ACKI) != 0)
2876 				;
2877 
2878 			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2879 
2880 			/*
2881 			 * All phase line changes require a bus
2882 			 * settle delay before REQ is asserted.
2883 			 * [SCSI SPI4 10.7.1]
2884 			 */
2885 			ahc_flush_device_writes(ahc);
2886 			aic_delay(AHC_BUSSETTLE_DELAY);
2887 
2888 			ahc->msgin_index = 0;
2889 			/* Enable SCSI Programmed I/O to REQ for first byte */
2890 			ahc_outb(ahc, SXFRCTL0,
2891 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2892 			break;
2893 		}
2894 
2895 		msgdone = ahc->msgout_index == ahc->msgout_len;
2896 		if (msgdone) {
2897 			ahc_outb(ahc, SXFRCTL0,
2898 				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2899 			end_session = TRUE;
2900 			break;
2901 		}
2902 
2903 		/*
2904 		 * Present the next byte on the bus.
2905 		 */
2906 #ifdef AHC_DEBUG
2907 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2908 			printf(" byte 0x%x\n",
2909 			       ahc->msgout_buf[ahc->msgout_index]);
2910 #endif
2911 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2912 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2913 		break;
2914 	}
2915 	case MSG_TYPE_TARGET_MSGOUT:
2916 	{
2917 		int lastbyte;
2918 		int msgdone;
2919 
2920 #ifdef AHC_DEBUG
2921 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2922 			ahc_print_devinfo(ahc, &devinfo);
2923 			printf("TARGET_MSG_OUT");
2924 		}
2925 #endif
2926 		/*
2927 		 * The initiator signals that this is
2928 		 * the last byte by dropping ATN.
2929 		 */
2930 		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2931 
2932 		/*
2933 		 * Read the latched byte, but turn off SPIOEN first
2934 		 * so that we don't inadvertently cause a REQ for the
2935 		 * next byte.
2936 		 */
2937 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2938 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2939 
2940 #ifdef AHC_DEBUG
2941 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2942 			printf(" byte 0x%x\n",
2943 			       ahc->msgin_buf[ahc->msgin_index]);
2944 #endif
2945 
2946 		msgdone = ahc_parse_msg(ahc, &devinfo);
2947 		if (msgdone == MSGLOOP_TERMINATED) {
2948 			/*
2949 			 * The message is *really* done in that it caused
2950 			 * us to go to bus free.  The sequencer has already
2951 			 * been reset at this point, so pull the ejection
2952 			 * handle.
2953 			 */
2954 			return;
2955 		}
2956 
2957 		ahc->msgin_index++;
2958 
2959 		/*
2960 		 * XXX Read spec about initiator dropping ATN too soon
2961 		 *     and use msgdone to detect it.
2962 		 */
2963 		if (msgdone == MSGLOOP_MSGCOMPLETE) {
2964 			ahc->msgin_index = 0;
2965 
2966 			/*
2967 			 * If this message illicited a response, transition
2968 			 * to the Message in phase and send it.
2969 			 */
2970 			if (ahc->msgout_len != 0) {
2971 #ifdef AHC_DEBUG
2972 				if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2973 					ahc_print_devinfo(ahc, &devinfo);
2974 					printf(" preparing response.\n");
2975 				}
2976 #endif
2977 				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2978 
2979 				/*
2980 				 * All phase line changes require a bus
2981 				 * settle delay before REQ is asserted.
2982 				 * [SCSI SPI4 10.7.1]  When transitioning
2983 				 * from an OUT to an IN phase, we must
2984 				 * also wait a data release delay to allow
2985 				 * the initiator time to release the data
2986 				 * lines. [SCSI SPI4 10.12]
2987 				 */
2988 				ahc_flush_device_writes(ahc);
2989 				aic_delay(AHC_BUSSETTLE_DELAY
2990 					+ AHC_DATARELEASE_DELAY);
2991 
2992 				/*
2993 				 * Enable SCSI Programmed I/O.  This will
2994 				 * immediately cause SPIORDY to assert,
2995 				 * and the sequencer will call our message
2996 				 * loop again.
2997 				 */
2998 				ahc_outb(ahc, SXFRCTL0,
2999 					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3000 				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3001 				ahc->msgin_index = 0;
3002 				break;
3003 			}
3004 		}
3005 
3006 		if (lastbyte)
3007 			end_session = TRUE;
3008 		else {
3009 			/* Ask for the next byte. */
3010 			ahc_outb(ahc, SXFRCTL0,
3011 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3012 		}
3013 
3014 		break;
3015 	}
3016 	default:
3017 		panic("Unknown REQINIT message type");
3018 	}
3019 
3020 	if (end_session) {
3021 		ahc_clear_msg_state(ahc);
3022 		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
3023 	} else
3024 		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
3025 }
3026 
3027 /*
3028  * See if we sent a particular extended message to the target.
3029  * If "full" is true, return true only if the target saw the full
3030  * message.  If "full" is false, return true if the target saw at
3031  * least the first byte of the message.
3032  */
3033 static int
3034 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
3035 {
3036 	int found;
3037 	u_int index;
3038 
3039 	found = FALSE;
3040 	index = 0;
3041 
3042 	while (index < ahc->msgout_len) {
3043 		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
3044 			u_int end_index;
3045 
3046 			end_index = index + 1 + ahc->msgout_buf[index + 1];
3047 			if (ahc->msgout_buf[index+2] == msgval
3048 			 && type == AHCMSG_EXT) {
3049 
3050 				if (full) {
3051 					if (ahc->msgout_index > end_index)
3052 						found = TRUE;
3053 				} else if (ahc->msgout_index > index)
3054 					found = TRUE;
3055 			}
3056 			index = end_index;
3057 		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
3058 			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
3059 
3060 			/* Skip tag type and tag id or residue param*/
3061 			index += 2;
3062 		} else {
3063 			/* Single byte message */
3064 			if (type == AHCMSG_1B
3065 			 && ahc->msgout_buf[index] == msgval
3066 			 && ahc->msgout_index > index)
3067 				found = TRUE;
3068 			index++;
3069 		}
3070 
3071 		if (found)
3072 			break;
3073 	}
3074 	return (found);
3075 }
3076 
3077 /*
3078  * Wait for a complete incoming message, parse it, and respond accordingly.
3079  */
3080 static int
3081 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3082 {
3083 	struct	ahc_initiator_tinfo *tinfo;
3084 	struct	ahc_tmode_tstate *tstate;
3085 	int	reject;
3086 	int	done;
3087 	int	response;
3088 	u_int	targ_scsirate;
3089 
3090 	done = MSGLOOP_IN_PROG;
3091 	response = FALSE;
3092 	reject = FALSE;
3093 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
3094 				    devinfo->target, &tstate);
3095 	targ_scsirate = tinfo->scsirate;
3096 
3097 	/*
3098 	 * Parse as much of the message as is available,
3099 	 * rejecting it if we don't support it.  When
3100 	 * the entire message is available and has been
3101 	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
3102 	 * that we have parsed an entire message.
3103 	 *
3104 	 * In the case of extended messages, we accept the length
3105 	 * byte outright and perform more checking once we know the
3106 	 * extended message type.
3107 	 */
3108 	switch (ahc->msgin_buf[0]) {
3109 	case MSG_DISCONNECT:
3110 	case MSG_SAVEDATAPOINTER:
3111 	case MSG_CMDCOMPLETE:
3112 	case MSG_RESTOREPOINTERS:
3113 	case MSG_IGN_WIDE_RESIDUE:
3114 		/*
3115 		 * End our message loop as these are messages
3116 		 * the sequencer handles on its own.
3117 		 */
3118 		done = MSGLOOP_TERMINATED;
3119 		break;
3120 	case MSG_MESSAGE_REJECT:
3121 		response = ahc_handle_msg_reject(ahc, devinfo);
3122 		/* FALLTHROUGH */
3123 	case MSG_NOOP:
3124 		done = MSGLOOP_MSGCOMPLETE;
3125 		break;
3126 	case MSG_EXTENDED:
3127 	{
3128 		/* Wait for enough of the message to begin validation */
3129 		if (ahc->msgin_index < 2)
3130 			break;
3131 		switch (ahc->msgin_buf[2]) {
3132 		case MSG_EXT_SDTR:
3133 		{
3134 			struct	 ahc_syncrate *syncrate;
3135 			u_int	 period;
3136 			u_int	 ppr_options;
3137 			u_int	 offset;
3138 			u_int	 saved_offset;
3139 
3140 			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
3141 				reject = TRUE;
3142 				break;
3143 			}
3144 
3145 			/*
3146 			 * Wait until we have both args before validating
3147 			 * and acting on this message.
3148 			 *
3149 			 * Add one to MSG_EXT_SDTR_LEN to account for
3150 			 * the extended message preamble.
3151 			 */
3152 			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
3153 				break;
3154 
3155 			period = ahc->msgin_buf[3];
3156 			ppr_options = 0;
3157 			saved_offset = offset = ahc->msgin_buf[4];
3158 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3159 							   &ppr_options,
3160 							   devinfo->role);
3161 			ahc_validate_offset(ahc, tinfo, syncrate, &offset,
3162 					    targ_scsirate & WIDEXFER,
3163 					    devinfo->role);
3164 			if (bootverbose) {
3165 				printf("(%s:%c:%d:%d): Received "
3166 				       "SDTR period %x, offset %x\n\t"
3167 				       "Filtered to period %x, offset %x\n",
3168 				       ahc_name(ahc), devinfo->channel,
3169 				       devinfo->target, devinfo->lun,
3170 				       ahc->msgin_buf[3], saved_offset,
3171 				       period, offset);
3172 			}
3173 			ahc_set_syncrate(ahc, devinfo,
3174 					 syncrate, period,
3175 					 offset, ppr_options,
3176 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3177 					 /*paused*/TRUE);
3178 
3179 			/*
3180 			 * See if we initiated Sync Negotiation
3181 			 * and didn't have to fall down to async
3182 			 * transfers.
3183 			 */
3184 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
3185 				/* We started it */
3186 				if (saved_offset != offset) {
3187 					/* Went too low - force async */
3188 					reject = TRUE;
3189 				}
3190 			} else {
3191 				/*
3192 				 * Send our own SDTR in reply
3193 				 */
3194 				if (bootverbose
3195 				 && devinfo->role == ROLE_INITIATOR) {
3196 					printf("(%s:%c:%d:%d): Target "
3197 					       "Initiated SDTR\n",
3198 					       ahc_name(ahc), devinfo->channel,
3199 					       devinfo->target, devinfo->lun);
3200 				}
3201 				ahc->msgout_index = 0;
3202 				ahc->msgout_len = 0;
3203 				ahc_construct_sdtr(ahc, devinfo,
3204 						   period, offset);
3205 				ahc->msgout_index = 0;
3206 				response = TRUE;
3207 			}
3208 			done = MSGLOOP_MSGCOMPLETE;
3209 			break;
3210 		}
3211 		case MSG_EXT_WDTR:
3212 		{
3213 			u_int bus_width;
3214 			u_int saved_width;
3215 			u_int sending_reply;
3216 
3217 			sending_reply = FALSE;
3218 			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
3219 				reject = TRUE;
3220 				break;
3221 			}
3222 
3223 			/*
3224 			 * Wait until we have our arg before validating
3225 			 * and acting on this message.
3226 			 *
3227 			 * Add one to MSG_EXT_WDTR_LEN to account for
3228 			 * the extended message preamble.
3229 			 */
3230 			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
3231 				break;
3232 
3233 			bus_width = ahc->msgin_buf[3];
3234 			saved_width = bus_width;
3235 			ahc_validate_width(ahc, tinfo, &bus_width,
3236 					   devinfo->role);
3237 			if (bootverbose) {
3238 				printf("(%s:%c:%d:%d): Received WDTR "
3239 				       "%x filtered to %x\n",
3240 				       ahc_name(ahc), devinfo->channel,
3241 				       devinfo->target, devinfo->lun,
3242 				       saved_width, bus_width);
3243 			}
3244 
3245 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
3246 				/*
3247 				 * Don't send a WDTR back to the
3248 				 * target, since we asked first.
3249 				 * If the width went higher than our
3250 				 * request, reject it.
3251 				 */
3252 				if (saved_width > bus_width) {
3253 					reject = TRUE;
3254 					printf("(%s:%c:%d:%d): requested %dBit "
3255 					       "transfers.  Rejecting...\n",
3256 					       ahc_name(ahc), devinfo->channel,
3257 					       devinfo->target, devinfo->lun,
3258 					       8 * (0x01 << bus_width));
3259 					bus_width = 0;
3260 				}
3261 			} else {
3262 				/*
3263 				 * Send our own WDTR in reply
3264 				 */
3265 				if (bootverbose
3266 				 && devinfo->role == ROLE_INITIATOR) {
3267 					printf("(%s:%c:%d:%d): Target "
3268 					       "Initiated WDTR\n",
3269 					       ahc_name(ahc), devinfo->channel,
3270 					       devinfo->target, devinfo->lun);
3271 				}
3272 				ahc->msgout_index = 0;
3273 				ahc->msgout_len = 0;
3274 				ahc_construct_wdtr(ahc, devinfo, bus_width);
3275 				ahc->msgout_index = 0;
3276 				response = TRUE;
3277 				sending_reply = TRUE;
3278 			}
3279 			/*
3280 			 * After a wide message, we are async, but
3281 			 * some devices don't seem to honor this portion
3282 			 * of the spec.  Force a renegotiation of the
3283 			 * sync component of our transfer agreement even
3284 			 * if our goal is async.  By updating our width
3285 			 * after forcing the negotiation, we avoid
3286 			 * renegotiating for width.
3287 			 */
3288 			ahc_update_neg_request(ahc, devinfo, tstate,
3289 					       tinfo, AHC_NEG_ALWAYS);
3290 			ahc_set_width(ahc, devinfo, bus_width,
3291 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3292 				      /*paused*/TRUE);
3293 			if (sending_reply == FALSE && reject == FALSE) {
3294 
3295 				/*
3296 				 * We will always have an SDTR to send.
3297 				 */
3298 				ahc->msgout_index = 0;
3299 				ahc->msgout_len = 0;
3300 				ahc_build_transfer_msg(ahc, devinfo);
3301 				ahc->msgout_index = 0;
3302 				response = TRUE;
3303 			}
3304 			done = MSGLOOP_MSGCOMPLETE;
3305 			break;
3306 		}
3307 		case MSG_EXT_PPR:
3308 		{
3309 			struct	ahc_syncrate *syncrate;
3310 			u_int	period;
3311 			u_int	offset;
3312 			u_int	bus_width;
3313 			u_int	ppr_options;
3314 			u_int	saved_width;
3315 			u_int	saved_offset;
3316 			u_int	saved_ppr_options;
3317 
3318 			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
3319 				reject = TRUE;
3320 				break;
3321 			}
3322 
3323 			/*
3324 			 * Wait until we have all args before validating
3325 			 * and acting on this message.
3326 			 *
3327 			 * Add one to MSG_EXT_PPR_LEN to account for
3328 			 * the extended message preamble.
3329 			 */
3330 			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
3331 				break;
3332 
3333 			period = ahc->msgin_buf[3];
3334 			offset = ahc->msgin_buf[5];
3335 			bus_width = ahc->msgin_buf[6];
3336 			saved_width = bus_width;
3337 			ppr_options = ahc->msgin_buf[7];
3338 			/*
3339 			 * According to the spec, a DT only
3340 			 * period factor with no DT option
3341 			 * set implies async.
3342 			 */
3343 			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
3344 			 && period == 9)
3345 				offset = 0;
3346 			saved_ppr_options = ppr_options;
3347 			saved_offset = offset;
3348 
3349 			/*
3350 			 * Mask out any options we don't support
3351 			 * on any controller.  Transfer options are
3352 			 * only available if we are negotiating wide.
3353 			 */
3354 			ppr_options &= MSG_EXT_PPR_DT_REQ;
3355 			if (bus_width == 0)
3356 				ppr_options = 0;
3357 
3358 			ahc_validate_width(ahc, tinfo, &bus_width,
3359 					   devinfo->role);
3360 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3361 							   &ppr_options,
3362 							   devinfo->role);
3363 			ahc_validate_offset(ahc, tinfo, syncrate,
3364 					    &offset, bus_width,
3365 					    devinfo->role);
3366 
3367 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
3368 				/*
3369 				 * If we are unable to do any of the
3370 				 * requested options (we went too low),
3371 				 * then we'll have to reject the message.
3372 				 */
3373 				if (saved_width > bus_width
3374 				 || saved_offset != offset
3375 				 || saved_ppr_options != ppr_options) {
3376 					reject = TRUE;
3377 					period = 0;
3378 					offset = 0;
3379 					bus_width = 0;
3380 					ppr_options = 0;
3381 					syncrate = NULL;
3382 				}
3383 			} else {
3384 				if (devinfo->role != ROLE_TARGET)
3385 					printf("(%s:%c:%d:%d): Target "
3386 					       "Initiated PPR\n",
3387 					       ahc_name(ahc), devinfo->channel,
3388 					       devinfo->target, devinfo->lun);
3389 				else
3390 					printf("(%s:%c:%d:%d): Initiator "
3391 					       "Initiated PPR\n",
3392 					       ahc_name(ahc), devinfo->channel,
3393 					       devinfo->target, devinfo->lun);
3394 				ahc->msgout_index = 0;
3395 				ahc->msgout_len = 0;
3396 				ahc_construct_ppr(ahc, devinfo, period, offset,
3397 						  bus_width, ppr_options);
3398 				ahc->msgout_index = 0;
3399 				response = TRUE;
3400 			}
3401 			if (bootverbose) {
3402 				printf("(%s:%c:%d:%d): Received PPR width %x, "
3403 				       "period %x, offset %x,options %x\n"
3404 				       "\tFiltered to width %x, period %x, "
3405 				       "offset %x, options %x\n",
3406 				       ahc_name(ahc), devinfo->channel,
3407 				       devinfo->target, devinfo->lun,
3408 				       saved_width, ahc->msgin_buf[3],
3409 				       saved_offset, saved_ppr_options,
3410 				       bus_width, period, offset, ppr_options);
3411 			}
3412 			ahc_set_width(ahc, devinfo, bus_width,
3413 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3414 				      /*paused*/TRUE);
3415 			ahc_set_syncrate(ahc, devinfo,
3416 					 syncrate, period,
3417 					 offset, ppr_options,
3418 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3419 					 /*paused*/TRUE);
3420 			done = MSGLOOP_MSGCOMPLETE;
3421 			break;
3422 		}
3423 		default:
3424 			/* Unknown extended message.  Reject it. */
3425 			reject = TRUE;
3426 			break;
3427 		}
3428 		break;
3429 	}
3430 #ifdef AHC_TARGET_MODE
3431 	case MSG_BUS_DEV_RESET:
3432 		ahc_handle_devreset(ahc, devinfo,
3433 				    CAM_BDR_SENT,
3434 				    "Bus Device Reset Received",
3435 				    /*verbose_level*/0);
3436 		ahc_restart(ahc);
3437 		done = MSGLOOP_TERMINATED;
3438 		break;
3439 	case MSG_ABORT_TAG:
3440 	case MSG_ABORT:
3441 	case MSG_CLEAR_QUEUE:
3442 	{
3443 		int tag;
3444 
3445 		/* Target mode messages */
3446 		if (devinfo->role != ROLE_TARGET) {
3447 			reject = TRUE;
3448 			break;
3449 		}
3450 		tag = SCB_LIST_NULL;
3451 		if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
3452 			tag = ahc_inb(ahc, INITIATOR_TAG);
3453 		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3454 			       devinfo->lun, tag, ROLE_TARGET,
3455 			       CAM_REQ_ABORTED);
3456 
3457 		tstate = ahc->enabled_targets[devinfo->our_scsiid];
3458 		if (tstate != NULL) {
3459 			struct ahc_tmode_lstate* lstate;
3460 
3461 			lstate = tstate->enabled_luns[devinfo->lun];
3462 			if (lstate != NULL) {
3463 				ahc_queue_lstate_event(ahc, lstate,
3464 						       devinfo->our_scsiid,
3465 						       ahc->msgin_buf[0],
3466 						       /*arg*/tag);
3467 				ahc_send_lstate_events(ahc, lstate);
3468 			}
3469 		}
3470 		ahc_restart(ahc);
3471 		done = MSGLOOP_TERMINATED;
3472 		break;
3473 	}
3474 #endif
3475 	case MSG_TERM_IO_PROC:
3476 	default:
3477 		reject = TRUE;
3478 		break;
3479 	}
3480 
3481 	if (reject) {
3482 		/*
3483 		 * Setup to reject the message.
3484 		 */
3485 		ahc->msgout_index = 0;
3486 		ahc->msgout_len = 1;
3487 		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
3488 		done = MSGLOOP_MSGCOMPLETE;
3489 		response = TRUE;
3490 	}
3491 
3492 	if (done != MSGLOOP_IN_PROG && !response)
3493 		/* Clear the outgoing message buffer */
3494 		ahc->msgout_len = 0;
3495 
3496 	return (done);
3497 }
3498 
3499 /*
3500  * Process a message reject message.
3501  */
3502 static int
3503 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3504 {
3505 	/*
3506 	 * What we care about here is if we had an
3507 	 * outstanding SDTR or WDTR message for this
3508 	 * target.  If we did, this is a signal that
3509 	 * the target is refusing negotiation.
3510 	 */
3511 	struct scb *scb;
3512 	struct ahc_initiator_tinfo *tinfo;
3513 	struct ahc_tmode_tstate *tstate;
3514 	u_int scb_index;
3515 	u_int last_msg;
3516 	int   response = 0;
3517 
3518 	scb_index = ahc_inb(ahc, SCB_TAG);
3519 	scb = ahc_lookup_scb(ahc, scb_index);
3520 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3521 				    devinfo->our_scsiid,
3522 				    devinfo->target, &tstate);
3523 	/* Might be necessary */
3524 	last_msg = ahc_inb(ahc, LAST_MSG);
3525 
3526 	if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3527 		/*
3528 		 * Target does not support the PPR message.
3529 		 * Attempt to negotiate SPI-2 style.
3530 		 */
3531 		if (bootverbose) {
3532 			printf("(%s:%c:%d:%d): PPR Rejected. "
3533 			       "Trying WDTR/SDTR\n",
3534 			       ahc_name(ahc), devinfo->channel,
3535 			       devinfo->target, devinfo->lun);
3536 		}
3537 		tinfo->goal.ppr_options = 0;
3538 		tinfo->curr.transport_version = 2;
3539 		tinfo->goal.transport_version = 2;
3540 		ahc->msgout_index = 0;
3541 		ahc->msgout_len = 0;
3542 		ahc_build_transfer_msg(ahc, devinfo);
3543 		ahc->msgout_index = 0;
3544 		response = 1;
3545 	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3546 
3547 		/* note 8bit xfers */
3548 		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
3549 		       "8bit transfers\n", ahc_name(ahc),
3550 		       devinfo->channel, devinfo->target, devinfo->lun);
3551 		ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3552 			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3553 			      /*paused*/TRUE);
3554 		/*
3555 		 * No need to clear the sync rate.  If the target
3556 		 * did not accept the command, our syncrate is
3557 		 * unaffected.  If the target started the negotiation,
3558 		 * but rejected our response, we already cleared the
3559 		 * sync rate before sending our WDTR.
3560 		 */
3561 		if (tinfo->goal.offset != tinfo->curr.offset) {
3562 
3563 			/* Start the sync negotiation */
3564 			ahc->msgout_index = 0;
3565 			ahc->msgout_len = 0;
3566 			ahc_build_transfer_msg(ahc, devinfo);
3567 			ahc->msgout_index = 0;
3568 			response = 1;
3569 		}
3570 	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3571 		/* note asynch xfers and clear flag */
3572 		ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3573 				 /*offset*/0, /*ppr_options*/0,
3574 				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3575 				 /*paused*/TRUE);
3576 		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3577 		       "Using asynchronous transfers\n",
3578 		       ahc_name(ahc), devinfo->channel,
3579 		       devinfo->target, devinfo->lun);
3580 	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
3581 		int tag_type;
3582 		int mask;
3583 
3584 		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
3585 
3586 		if (tag_type == MSG_SIMPLE_TASK) {
3587 			printf("(%s:%c:%d:%d): refuses tagged commands.  "
3588 			       "Performing non-tagged I/O\n", ahc_name(ahc),
3589 			       devinfo->channel, devinfo->target, devinfo->lun);
3590 			ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE);
3591 			mask = ~0x23;
3592 		} else {
3593 			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
3594 			       "Performing simple queue tagged I/O only\n",
3595 			       ahc_name(ahc), devinfo->channel, devinfo->target,
3596 			       devinfo->lun, tag_type == MSG_ORDERED_TASK
3597 			       ? "ordered" : "head of queue");
3598 			ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC);
3599 			mask = ~0x03;
3600 		}
3601 
3602 		/*
3603 		 * Resend the identify for this CCB as the target
3604 		 * may believe that the selection is invalid otherwise.
3605 		 */
3606 		ahc_outb(ahc, SCB_CONTROL,
3607 			 ahc_inb(ahc, SCB_CONTROL) & mask);
3608 	 	scb->hscb->control &= mask;
3609 		aic_set_transaction_tag(scb, /*enabled*/FALSE,
3610 					/*type*/MSG_SIMPLE_TASK);
3611 		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3612 		ahc_assert_atn(ahc);
3613 
3614 		/*
3615 		 * This transaction is now at the head of
3616 		 * the untagged queue for this target.
3617 		 */
3618 		if ((ahc->flags & AHC_SCB_BTT) == 0) {
3619 			struct scb_tailq *untagged_q;
3620 
3621 			untagged_q =
3622 			    &(ahc->untagged_queues[devinfo->target_offset]);
3623 			TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3624 			scb->flags |= SCB_UNTAGGEDQ;
3625 		}
3626 		ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3627 			     scb->hscb->tag);
3628 
3629 		/*
3630 		 * Requeue all tagged commands for this target
3631 		 * currently in our posession so they can be
3632 		 * converted to untagged commands.
3633 		 */
3634 		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3635 				   SCB_GET_CHANNEL(ahc, scb),
3636 				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3637 				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3638 				   SEARCH_COMPLETE);
3639 	} else {
3640 		/*
3641 		 * Otherwise, we ignore it.
3642 		 */
3643 		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3644 		       ahc_name(ahc), devinfo->channel, devinfo->target,
3645 		       last_msg);
3646 	}
3647 	return (response);
3648 }
3649 
3650 /*
3651  * Process an ingnore wide residue message.
3652  */
3653 static void
3654 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3655 {
3656 	u_int scb_index;
3657 	struct scb *scb;
3658 
3659 	scb_index = ahc_inb(ahc, SCB_TAG);
3660 	scb = ahc_lookup_scb(ahc, scb_index);
3661 	/*
3662 	 * XXX Actually check data direction in the sequencer?
3663 	 * Perhaps add datadir to some spare bits in the hscb?
3664 	 */
3665 	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3666 	 || aic_get_transfer_dir(scb) != CAM_DIR_IN) {
3667 		/*
3668 		 * Ignore the message if we haven't
3669 		 * seen an appropriate data phase yet.
3670 		 */
3671 	} else {
3672 		/*
3673 		 * If the residual occurred on the last
3674 		 * transfer and the transfer request was
3675 		 * expected to end on an odd count, do
3676 		 * nothing.  Otherwise, subtract a byte
3677 		 * and update the residual count accordingly.
3678 		 */
3679 		uint32_t sgptr;
3680 
3681 		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3682 		if ((sgptr & SG_LIST_NULL) != 0
3683 		 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) {
3684 			/*
3685 			 * If the residual occurred on the last
3686 			 * transfer and the transfer request was
3687 			 * expected to end on an odd count, do
3688 			 * nothing.
3689 			 */
3690 		} else {
3691 			struct ahc_dma_seg *sg;
3692 			uint32_t data_cnt;
3693 			uint32_t data_addr;
3694 			uint32_t sglen;
3695 
3696 			/* Pull in all of the sgptr */
3697 			sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR);
3698 			data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT);
3699 
3700 			if ((sgptr & SG_LIST_NULL) != 0) {
3701 				/*
3702 				 * The residual data count is not updated
3703 				 * for the command run to completion case.
3704 				 * Explicitly zero the count.
3705 				 */
3706 				data_cnt &= ~AHC_SG_LEN_MASK;
3707 			}
3708 
3709 			data_addr = ahc_inl(ahc, SHADDR);
3710 
3711 			data_cnt += 1;
3712 			data_addr -= 1;
3713 			sgptr &= SG_PTR_MASK;
3714 
3715 			sg = ahc_sg_bus_to_virt(scb, sgptr);
3716 
3717 			/*
3718 			 * The residual sg ptr points to the next S/G
3719 			 * to load so we must go back one.
3720 			 */
3721 			sg--;
3722 			sglen = aic_le32toh(sg->len) & AHC_SG_LEN_MASK;
3723 			if (sg != scb->sg_list
3724 			 && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
3725 
3726 				sg--;
3727 				sglen = aic_le32toh(sg->len);
3728 				/*
3729 				 * Preserve High Address and SG_LIST bits
3730 				 * while setting the count to 1.
3731 				 */
3732 				data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
3733 				data_addr = aic_le32toh(sg->addr)
3734 					  + (sglen & AHC_SG_LEN_MASK) - 1;
3735 
3736 				/*
3737 				 * Increment sg so it points to the
3738 				 * "next" sg.
3739 				 */
3740 				sg++;
3741 				sgptr = ahc_sg_virt_to_bus(scb, sg);
3742 			}
3743 			ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3744 			ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3745 			/*
3746 			 * Toggle the "oddness" of the transfer length
3747 			 * to handle this mid-transfer ignore wide
3748 			 * residue.  This ensures that the oddness is
3749 			 * correct for subsequent data transfers.
3750 			 */
3751 			ahc_outb(ahc, SCB_LUN,
3752 				 ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD);
3753 		}
3754 	}
3755 }
3756 
3757 
3758 /*
3759  * Reinitialize the data pointers for the active transfer
3760  * based on its current residual.
3761  */
3762 static void
3763 ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
3764 {
3765 	struct	 scb *scb;
3766 	struct	 ahc_dma_seg *sg;
3767 	u_int	 scb_index;
3768 	uint32_t sgptr;
3769 	uint32_t resid;
3770 	uint32_t dataptr;
3771 
3772 	scb_index = ahc_inb(ahc, SCB_TAG);
3773 	scb = ahc_lookup_scb(ahc, scb_index);
3774 	sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3775 	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3776 	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
3777 	      |	ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3778 
3779 	sgptr &= SG_PTR_MASK;
3780 	sg = ahc_sg_bus_to_virt(scb, sgptr);
3781 
3782 	/* The residual sg_ptr always points to the next sg */
3783 	sg--;
3784 
3785 	resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
3786 	      | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
3787 	      | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
3788 
3789 	dataptr = aic_le32toh(sg->addr)
3790 		+ (aic_le32toh(sg->len) & AHC_SG_LEN_MASK)
3791 		- resid;
3792 	if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
3793 		u_int dscommand1;
3794 
3795 		dscommand1 = ahc_inb(ahc, DSCOMMAND1);
3796 		ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
3797 		ahc_outb(ahc, HADDR,
3798 			 (aic_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
3799 		ahc_outb(ahc, DSCOMMAND1, dscommand1);
3800 	}
3801 	ahc_outb(ahc, HADDR + 3, dataptr >> 24);
3802 	ahc_outb(ahc, HADDR + 2, dataptr >> 16);
3803 	ahc_outb(ahc, HADDR + 1, dataptr >> 8);
3804 	ahc_outb(ahc, HADDR, dataptr);
3805 	ahc_outb(ahc, HCNT + 2, resid >> 16);
3806 	ahc_outb(ahc, HCNT + 1, resid >> 8);
3807 	ahc_outb(ahc, HCNT, resid);
3808 	if ((ahc->features & AHC_ULTRA2) == 0) {
3809 		ahc_outb(ahc, STCNT + 2, resid >> 16);
3810 		ahc_outb(ahc, STCNT + 1, resid >> 8);
3811 		ahc_outb(ahc, STCNT, resid);
3812 	}
3813 }
3814 
3815 /*
3816  * Handle the effects of issuing a bus device reset message.
3817  */
3818 static void
3819 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3820 		    cam_status status, char *message, int verbose_level)
3821 {
3822 #ifdef AHC_TARGET_MODE
3823 	struct ahc_tmode_tstate* tstate;
3824 	u_int lun;
3825 #endif
3826 	int found;
3827 
3828 	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3829 			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3830 			       status);
3831 
3832 #ifdef AHC_TARGET_MODE
3833 	/*
3834 	 * Send an immediate notify ccb to all target mord peripheral
3835 	 * drivers affected by this action.
3836 	 */
3837 	tstate = ahc->enabled_targets[devinfo->our_scsiid];
3838 	if (tstate != NULL) {
3839 		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3840 			struct ahc_tmode_lstate* lstate;
3841 
3842 			lstate = tstate->enabled_luns[lun];
3843 			if (lstate == NULL)
3844 				continue;
3845 
3846 			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3847 					       MSG_BUS_DEV_RESET, /*arg*/0);
3848 			ahc_send_lstate_events(ahc, lstate);
3849 		}
3850 	}
3851 #endif
3852 
3853 	/*
3854 	 * Go back to async/narrow transfers and renegotiate.
3855 	 */
3856 	ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3857 		      AHC_TRANS_CUR, /*paused*/TRUE);
3858 	ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3859 			 /*period*/0, /*offset*/0, /*ppr_options*/0,
3860 			 AHC_TRANS_CUR, /*paused*/TRUE);
3861 
3862 	if (status != CAM_SEL_TIMEOUT)
3863 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
3864 			       CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
3865 
3866 	if (message != NULL
3867 	 && (verbose_level <= bootverbose))
3868 		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3869 		       message, devinfo->channel, devinfo->target, found);
3870 }
3871 
3872 #ifdef AHC_TARGET_MODE
3873 static void
3874 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3875 		       struct scb *scb)
3876 {
3877 
3878 	/*
3879 	 * To facilitate adding multiple messages together,
3880 	 * each routine should increment the index and len
3881 	 * variables instead of setting them explicitly.
3882 	 */
3883 	ahc->msgout_index = 0;
3884 	ahc->msgout_len = 0;
3885 
3886 	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
3887 		ahc_build_transfer_msg(ahc, devinfo);
3888 	else
3889 		panic("ahc_intr: AWAITING target message with no message");
3890 
3891 	ahc->msgout_index = 0;
3892 	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3893 }
3894 #endif
3895 /**************************** Initialization **********************************/
3896 /*
3897  * Allocate a controller structure for a new device
3898  * and perform initial initializion.
3899  */
3900 struct ahc_softc *
3901 ahc_alloc(void *platform_arg, char *name)
3902 {
3903 	struct  ahc_softc *ahc;
3904 	int	i;
3905 
3906 #ifndef	__FreeBSD__
3907 	ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3908 	if (!ahc) {
3909 		printf("aic7xxx: cannot malloc softc!\n");
3910 		free(name, M_DEVBUF);
3911 		return NULL;
3912 	}
3913 #else
3914 	ahc = device_get_softc((device_t)platform_arg);
3915 #endif
3916 	memset(ahc, 0, sizeof(*ahc));
3917 	ahc->seep_config = malloc(sizeof(*ahc->seep_config),
3918 				  M_DEVBUF, M_NOWAIT);
3919 	if (ahc->seep_config == NULL) {
3920 #ifndef	__FreeBSD__
3921 		free(ahc, M_DEVBUF);
3922 #endif
3923 		free(name, M_DEVBUF);
3924 		return (NULL);
3925 	}
3926 	LIST_INIT(&ahc->pending_scbs);
3927 	LIST_INIT(&ahc->timedout_scbs);
3928 	/* We don't know our unit number until the OSM sets it */
3929 	ahc->name = name;
3930 	ahc->unit = -1;
3931 	ahc->description = NULL;
3932 	ahc->channel = 'A';
3933 	ahc->channel_b = 'B';
3934 	ahc->chip = AHC_NONE;
3935 	ahc->features = AHC_FENONE;
3936 	ahc->bugs = AHC_BUGNONE;
3937 	ahc->flags = AHC_FNONE;
3938 	/*
3939 	 * Default to all error reporting enabled with the
3940 	 * sequencer operating at its fastest speed.
3941 	 * The bus attach code may modify this.
3942 	 */
3943 	ahc->seqctl = FASTMODE;
3944 
3945 	for (i = 0; i < AHC_NUM_TARGETS; i++)
3946 		TAILQ_INIT(&ahc->untagged_queues[i]);
3947 	if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3948 		ahc_free(ahc);
3949 		ahc = NULL;
3950 	}
3951 	return (ahc);
3952 }
3953 
3954 int
3955 ahc_softc_init(struct ahc_softc *ahc)
3956 {
3957 
3958 	/* The IRQMS bit is only valid on VL and EISA chips */
3959 	if ((ahc->chip & AHC_PCI) == 0)
3960 		ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
3961 	else
3962 		ahc->unpause = 0;
3963 	ahc->pause = ahc->unpause | PAUSE;
3964 	/* XXX The shared scb data stuff should be deprecated */
3965 	if (ahc->scb_data == NULL) {
3966 		ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3967 				       M_DEVBUF, M_NOWAIT);
3968 		if (ahc->scb_data == NULL)
3969 			return (ENOMEM);
3970 		memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3971 	}
3972 
3973 	return (0);
3974 }
3975 
3976 void
3977 ahc_softc_insert(struct ahc_softc *ahc)
3978 {
3979 	struct ahc_softc *list_ahc;
3980 
3981 #if AIC_PCI_CONFIG > 0
3982 	/*
3983 	 * Second Function PCI devices need to inherit some
3984 	 * settings from function 0.
3985 	 */
3986 	if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3987 	 && (ahc->features & AHC_MULTI_FUNC) != 0) {
3988 		TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3989 			aic_dev_softc_t list_pci;
3990 			aic_dev_softc_t pci;
3991 
3992 			list_pci = list_ahc->dev_softc;
3993 			pci = ahc->dev_softc;
3994 			if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci)
3995 			 && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) {
3996 				struct ahc_softc *master;
3997 				struct ahc_softc *slave;
3998 
3999 				if (aic_get_pci_function(list_pci) == 0) {
4000 					master = list_ahc;
4001 					slave = ahc;
4002 				} else {
4003 					master = ahc;
4004 					slave = list_ahc;
4005 				}
4006 				slave->flags &= ~AHC_BIOS_ENABLED;
4007 				slave->flags |=
4008 				    master->flags & AHC_BIOS_ENABLED;
4009 				slave->flags &= ~AHC_PRIMARY_CHANNEL;
4010 				slave->flags |=
4011 				    master->flags & AHC_PRIMARY_CHANNEL;
4012 				break;
4013 			}
4014 		}
4015 	}
4016 #endif
4017 
4018 	/*
4019 	 * Insertion sort into our list of softcs.
4020 	 */
4021 	list_ahc = TAILQ_FIRST(&ahc_tailq);
4022 	while (list_ahc != NULL
4023 	    && ahc_softc_comp(ahc, list_ahc) <= 0)
4024 		list_ahc = TAILQ_NEXT(list_ahc, links);
4025 	if (list_ahc != NULL)
4026 		TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
4027 	else
4028 		TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
4029 	ahc->init_level++;
4030 }
4031 
4032 /*
4033  * Verify that the passed in softc pointer is for a
4034  * controller that is still configured.
4035  */
4036 struct ahc_softc *
4037 ahc_find_softc(struct ahc_softc *ahc)
4038 {
4039 	struct ahc_softc *list_ahc;
4040 
4041 	TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
4042 		if (list_ahc == ahc)
4043 			return (ahc);
4044 	}
4045 	return (NULL);
4046 }
4047 
4048 void
4049 ahc_set_unit(struct ahc_softc *ahc, int unit)
4050 {
4051 	ahc->unit = unit;
4052 }
4053 
4054 void
4055 ahc_set_name(struct ahc_softc *ahc, char *name)
4056 {
4057 	if (ahc->name != NULL)
4058 		free(ahc->name, M_DEVBUF);
4059 	ahc->name = name;
4060 }
4061 
4062 void
4063 ahc_free(struct ahc_softc *ahc)
4064 {
4065 	int i;
4066 
4067 	ahc_terminate_recovery_thread(ahc);
4068 	switch (ahc->init_level) {
4069 	default:
4070 	case 5:
4071 		ahc_shutdown(ahc);
4072 		/* FALLTHROUGH */
4073 	case 4:
4074 		aic_dmamap_unload(ahc, ahc->shared_data_dmat,
4075 				  ahc->shared_data_dmamap);
4076 		/* FALLTHROUGH */
4077 	case 3:
4078 		aic_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
4079 				ahc->shared_data_dmamap);
4080 		aic_dmamap_destroy(ahc, ahc->shared_data_dmat,
4081 				   ahc->shared_data_dmamap);
4082 		/* FALLTHROUGH */
4083 	case 2:
4084 		aic_dma_tag_destroy(ahc, ahc->shared_data_dmat);
4085 	case 1:
4086 #ifndef __linux__
4087 		aic_dma_tag_destroy(ahc, ahc->buffer_dmat);
4088 #endif
4089 		break;
4090 	case 0:
4091 		break;
4092 	}
4093 
4094 #ifndef __linux__
4095 	aic_dma_tag_destroy(ahc, ahc->parent_dmat);
4096 #endif
4097 	ahc_platform_free(ahc);
4098 	ahc_fini_scbdata(ahc);
4099 	for (i = 0; i < AHC_NUM_TARGETS; i++) {
4100 		struct ahc_tmode_tstate *tstate;
4101 
4102 		tstate = ahc->enabled_targets[i];
4103 		if (tstate != NULL) {
4104 #ifdef AHC_TARGET_MODE
4105 			int j;
4106 
4107 			for (j = 0; j < AHC_NUM_LUNS; j++) {
4108 				struct ahc_tmode_lstate *lstate;
4109 
4110 				lstate = tstate->enabled_luns[j];
4111 				if (lstate != NULL) {
4112 					xpt_free_path(lstate->path);
4113 					free(lstate, M_DEVBUF);
4114 				}
4115 			}
4116 #endif
4117 			free(tstate, M_DEVBUF);
4118 		}
4119 	}
4120 #ifdef AHC_TARGET_MODE
4121 	if (ahc->black_hole != NULL) {
4122 		xpt_free_path(ahc->black_hole->path);
4123 		free(ahc->black_hole, M_DEVBUF);
4124 	}
4125 #endif
4126 	if (ahc->name != NULL)
4127 		free(ahc->name, M_DEVBUF);
4128 	if (ahc->seep_config != NULL)
4129 		free(ahc->seep_config, M_DEVBUF);
4130 #ifndef __FreeBSD__
4131 	free(ahc, M_DEVBUF);
4132 #endif
4133 	return;
4134 }
4135 
4136 void
4137 ahc_shutdown(void *arg)
4138 {
4139 	struct	ahc_softc *ahc;
4140 	int	i;
4141 
4142 	ahc = (struct ahc_softc *)arg;
4143 
4144 	/* This will reset most registers to 0, but not all */
4145 	ahc_reset(ahc, /*reinit*/FALSE);
4146 	ahc_outb(ahc, SCSISEQ, 0);
4147 	ahc_outb(ahc, SXFRCTL0, 0);
4148 	ahc_outb(ahc, DSPCISTATUS, 0);
4149 
4150 	for (i = TARG_SCSIRATE; i < SCSICONF; i++)
4151 		ahc_outb(ahc, i, 0);
4152 }
4153 
4154 /*
4155  * Reset the controller and record some information about it
4156  * that is only available just after a reset.  If "reinit" is
4157  * non-zero, this reset occured after initial configuration
4158  * and the caller requests that the chip be fully reinitialized
4159  * to a runable state.  Chip interrupts are *not* enabled after
4160  * a reinitialization.  The caller must enable interrupts via
4161  * ahc_intr_enable().
4162  */
4163 int
4164 ahc_reset(struct ahc_softc *ahc, int reinit)
4165 {
4166 	u_int	sblkctl;
4167 	u_int	sxfrctl1_a, sxfrctl1_b;
4168 	int	error;
4169 	int	wait;
4170 
4171 	/*
4172 	 * Preserve the value of the SXFRCTL1 register for all channels.
4173 	 * It contains settings that affect termination and we don't want
4174 	 * to disturb the integrity of the bus.
4175 	 */
4176 	ahc_pause(ahc);
4177 	sxfrctl1_b = 0;
4178 	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
4179 		u_int sblkctl;
4180 
4181 		/*
4182 		 * Save channel B's settings in case this chip
4183 		 * is setup for TWIN channel operation.
4184 		 */
4185 		sblkctl = ahc_inb(ahc, SBLKCTL);
4186 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4187 		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
4188 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4189 	}
4190 	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
4191 
4192 	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
4193 
4194 	/*
4195 	 * Ensure that the reset has finished.  We delay 1000us
4196 	 * prior to reading the register to make sure the chip
4197 	 * has sufficiently completed its reset to handle register
4198 	 * accesses.
4199 	 */
4200 	wait = 1000;
4201 	do {
4202 		aic_delay(1000);
4203 	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
4204 
4205 	if (wait == 0) {
4206 		printf("%s: WARNING - Failed chip reset!  "
4207 		       "Trying to initialize anyway.\n", ahc_name(ahc));
4208 	}
4209 	ahc_outb(ahc, HCNTRL, ahc->pause);
4210 
4211 	/* Determine channel configuration */
4212 	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
4213 	/* No Twin Channel PCI cards */
4214 	if ((ahc->chip & AHC_PCI) != 0)
4215 		sblkctl &= ~SELBUSB;
4216 	switch (sblkctl) {
4217 	case 0:
4218 		/* Single Narrow Channel */
4219 		break;
4220 	case 2:
4221 		/* Wide Channel */
4222 		ahc->features |= AHC_WIDE;
4223 		break;
4224 	case 8:
4225 		/* Twin Channel */
4226 		ahc->features |= AHC_TWIN;
4227 		break;
4228 	default:
4229 		printf(" Unsupported adapter type.  Ignoring\n");
4230 		return(-1);
4231 	}
4232 
4233 	/*
4234 	 * Reload sxfrctl1.
4235 	 *
4236 	 * We must always initialize STPWEN to 1 before we
4237 	 * restore the saved values.  STPWEN is initialized
4238 	 * to a tri-state condition which can only be cleared
4239 	 * by turning it on.
4240 	 */
4241 	if ((ahc->features & AHC_TWIN) != 0) {
4242 		u_int sblkctl;
4243 
4244 		sblkctl = ahc_inb(ahc, SBLKCTL);
4245 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4246 		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
4247 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4248 	}
4249 	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
4250 
4251 	error = 0;
4252 	if (reinit != 0)
4253 		/*
4254 		 * If a recovery action has forced a chip reset,
4255 		 * re-initialize the chip to our liking.
4256 		 */
4257 		error = ahc->bus_chip_init(ahc);
4258 #ifdef AHC_DUMP_SEQ
4259 	else
4260 		ahc_dumpseq(ahc);
4261 #endif
4262 
4263 	return (error);
4264 }
4265 
4266 /*
4267  * Determine the number of SCBs available on the controller
4268  */
4269 int
4270 ahc_probe_scbs(struct ahc_softc *ahc) {
4271 	int i;
4272 
4273 	for (i = 0; i < AHC_SCB_MAX; i++) {
4274 
4275 		ahc_outb(ahc, SCBPTR, i);
4276 		ahc_outb(ahc, SCB_BASE, i);
4277 		if (ahc_inb(ahc, SCB_BASE) != i)
4278 			break;
4279 		ahc_outb(ahc, SCBPTR, 0);
4280 		if (ahc_inb(ahc, SCB_BASE) != 0)
4281 			break;
4282 	}
4283 	return (i);
4284 }
4285 
4286 static void
4287 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
4288 {
4289 	bus_addr_t *baddr;
4290 
4291 	baddr = (bus_addr_t *)arg;
4292 	*baddr = segs->ds_addr;
4293 }
4294 
4295 static void
4296 ahc_build_free_scb_list(struct ahc_softc *ahc)
4297 {
4298 	int scbsize;
4299 	int i;
4300 
4301 	scbsize = 32;
4302 	if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
4303 		scbsize = 64;
4304 
4305 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
4306 		int j;
4307 
4308 		ahc_outb(ahc, SCBPTR, i);
4309 
4310 		/*
4311 		 * Touch all SCB bytes to avoid parity errors
4312 		 * should one of our debugging routines read
4313 		 * an otherwise uninitiatlized byte.
4314 		 */
4315 		for (j = 0; j < scbsize; j++)
4316 			ahc_outb(ahc, SCB_BASE+j, 0xFF);
4317 
4318 		/* Clear the control byte. */
4319 		ahc_outb(ahc, SCB_CONTROL, 0);
4320 
4321 		/* Set the next pointer */
4322 		if ((ahc->flags & AHC_PAGESCBS) != 0)
4323 			ahc_outb(ahc, SCB_NEXT, i+1);
4324 		else
4325 			ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4326 
4327 		/* Make the tag number, SCSIID, and lun invalid */
4328 		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
4329 		ahc_outb(ahc, SCB_SCSIID, 0xFF);
4330 		ahc_outb(ahc, SCB_LUN, 0xFF);
4331 	}
4332 
4333 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
4334 		/* SCB 0 heads the free list. */
4335 		ahc_outb(ahc, FREE_SCBH, 0);
4336 	} else {
4337 		/* No free list. */
4338 		ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
4339 	}
4340 
4341 	/* Make sure that the last SCB terminates the free list */
4342 	ahc_outb(ahc, SCBPTR, i-1);
4343 	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4344 }
4345 
4346 static int
4347 ahc_init_scbdata(struct ahc_softc *ahc)
4348 {
4349 	struct scb_data *scb_data;
4350 
4351 	scb_data = ahc->scb_data;
4352 	SLIST_INIT(&scb_data->free_scbs);
4353 	SLIST_INIT(&scb_data->sg_maps);
4354 
4355 	/* Allocate SCB resources */
4356 	scb_data->scbarray =
4357 	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
4358 				 M_DEVBUF, M_NOWAIT);
4359 	if (scb_data->scbarray == NULL)
4360 		return (ENOMEM);
4361 	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
4362 
4363 	/* Determine the number of hardware SCBs and initialize them */
4364 
4365 	scb_data->maxhscbs = ahc_probe_scbs(ahc);
4366 	if (ahc->scb_data->maxhscbs == 0) {
4367 		printf("%s: No SCB space found\n", ahc_name(ahc));
4368 		return (ENXIO);
4369 	}
4370 
4371 	/*
4372 	 * Create our DMA tags.  These tags define the kinds of device
4373 	 * accessible memory allocations and memory mappings we will
4374 	 * need to perform during normal operation.
4375 	 *
4376 	 * Unless we need to further restrict the allocation, we rely
4377 	 * on the restrictions of the parent dmat, hence the common
4378 	 * use of MAXADDR and MAXSIZE.
4379 	 */
4380 
4381 	/* DMA tag for our hardware scb structures */
4382 	if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4383 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4384 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4385 			       /*highaddr*/BUS_SPACE_MAXADDR,
4386 			       /*filter*/NULL, /*filterarg*/NULL,
4387 			       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4388 			       /*nsegments*/1,
4389 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4390 			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
4391 		goto error_exit;
4392 	}
4393 
4394 	scb_data->init_level++;
4395 
4396 	/* Allocation for our hscbs */
4397 	if (aic_dmamem_alloc(ahc, scb_data->hscb_dmat,
4398 			     (void **)&scb_data->hscbs,
4399 			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
4400 		goto error_exit;
4401 	}
4402 
4403 	scb_data->init_level++;
4404 
4405 	/* And permanently map them */
4406 	aic_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
4407 			scb_data->hscbs,
4408 			AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4409 			ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
4410 
4411 	scb_data->init_level++;
4412 
4413 	/* DMA tag for our sense buffers */
4414 	if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4415 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4416 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4417 			       /*highaddr*/BUS_SPACE_MAXADDR,
4418 			       /*filter*/NULL, /*filterarg*/NULL,
4419 			       AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4420 			       /*nsegments*/1,
4421 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4422 			       /*flags*/0, &scb_data->sense_dmat) != 0) {
4423 		goto error_exit;
4424 	}
4425 
4426 	scb_data->init_level++;
4427 
4428 	/* Allocate them */
4429 	if (aic_dmamem_alloc(ahc, scb_data->sense_dmat,
4430 			     (void **)&scb_data->sense,
4431 			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
4432 		goto error_exit;
4433 	}
4434 
4435 	scb_data->init_level++;
4436 
4437 	/* And permanently map them */
4438 	aic_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
4439 			scb_data->sense,
4440 			AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4441 			ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
4442 
4443 	scb_data->init_level++;
4444 
4445 	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
4446 	if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
4447 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4448 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4449 			       /*highaddr*/BUS_SPACE_MAXADDR,
4450 			       /*filter*/NULL, /*filterarg*/NULL,
4451 			       PAGE_SIZE, /*nsegments*/1,
4452 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4453 			       /*flags*/0, &scb_data->sg_dmat) != 0) {
4454 		goto error_exit;
4455 	}
4456 
4457 	scb_data->init_level++;
4458 
4459 	/* Perform initial CCB allocation */
4460 	memset(scb_data->hscbs, 0,
4461 	       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
4462 	ahc_alloc_scbs(ahc);
4463 
4464 	if (scb_data->numscbs == 0) {
4465 		printf("%s: ahc_init_scbdata - "
4466 		       "Unable to allocate initial scbs\n",
4467 		       ahc_name(ahc));
4468 		goto error_exit;
4469 	}
4470 
4471 	/*
4472 	 * Reserve the next queued SCB.
4473 	 */
4474 	ahc->next_queued_scb = ahc_get_scb(ahc);
4475 
4476 	/*
4477 	 * Note that we were successfull
4478 	 */
4479 	return (0);
4480 
4481 error_exit:
4482 
4483 	return (ENOMEM);
4484 }
4485 
4486 static void
4487 ahc_fini_scbdata(struct ahc_softc *ahc)
4488 {
4489 	struct scb_data *scb_data;
4490 
4491 	scb_data = ahc->scb_data;
4492 	if (scb_data == NULL)
4493 		return;
4494 
4495 	switch (scb_data->init_level) {
4496 	default:
4497 	case 7:
4498 	{
4499 		struct sg_map_node *sg_map;
4500 
4501 		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
4502 			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
4503 			aic_dmamap_unload(ahc, scb_data->sg_dmat,
4504 					  sg_map->sg_dmamap);
4505 			aic_dmamem_free(ahc, scb_data->sg_dmat,
4506 					sg_map->sg_vaddr,
4507 					sg_map->sg_dmamap);
4508 			free(sg_map, M_DEVBUF);
4509 		}
4510 		aic_dma_tag_destroy(ahc, scb_data->sg_dmat);
4511 	}
4512 	case 6:
4513 		aic_dmamap_unload(ahc, scb_data->sense_dmat,
4514 				  scb_data->sense_dmamap);
4515 	case 5:
4516 		aic_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
4517 				scb_data->sense_dmamap);
4518 		aic_dmamap_destroy(ahc, scb_data->sense_dmat,
4519 				   scb_data->sense_dmamap);
4520 	case 4:
4521 		aic_dma_tag_destroy(ahc, scb_data->sense_dmat);
4522 	case 3:
4523 		aic_dmamap_unload(ahc, scb_data->hscb_dmat,
4524 				  scb_data->hscb_dmamap);
4525 	case 2:
4526 		aic_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
4527 				scb_data->hscb_dmamap);
4528 		aic_dmamap_destroy(ahc, scb_data->hscb_dmat,
4529 				   scb_data->hscb_dmamap);
4530 	case 1:
4531 		aic_dma_tag_destroy(ahc, scb_data->hscb_dmat);
4532 		break;
4533 	case 0:
4534 		break;
4535 	}
4536 	if (scb_data->scbarray != NULL)
4537 		free(scb_data->scbarray, M_DEVBUF);
4538 }
4539 
4540 void
4541 ahc_alloc_scbs(struct ahc_softc *ahc)
4542 {
4543 	struct scb_data *scb_data;
4544 	struct scb *next_scb;
4545 	struct sg_map_node *sg_map;
4546 	bus_addr_t physaddr;
4547 	struct ahc_dma_seg *segs;
4548 	int newcount;
4549 	int i;
4550 
4551 	scb_data = ahc->scb_data;
4552 	if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
4553 		/* Can't allocate any more */
4554 		return;
4555 
4556 	next_scb = &scb_data->scbarray[scb_data->numscbs];
4557 
4558 	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
4559 
4560 	if (sg_map == NULL)
4561 		return;
4562 
4563 	/* Allocate S/G space for the next batch of SCBS */
4564 	if (aic_dmamem_alloc(ahc, scb_data->sg_dmat,
4565 			     (void **)&sg_map->sg_vaddr,
4566 			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
4567 		free(sg_map, M_DEVBUF);
4568 		return;
4569 	}
4570 
4571 	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
4572 
4573 	aic_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
4574 			sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
4575 			&sg_map->sg_physaddr, /*flags*/0);
4576 
4577 	segs = sg_map->sg_vaddr;
4578 	physaddr = sg_map->sg_physaddr;
4579 
4580 	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
4581 	newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
4582 	for (i = 0; i < newcount; i++) {
4583 		struct scb_platform_data *pdata;
4584 #ifndef __linux__
4585 		int error;
4586 #endif
4587 		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
4588 							   M_DEVBUF, M_NOWAIT);
4589 		if (pdata == NULL)
4590 			break;
4591 		next_scb->platform_data = pdata;
4592 		next_scb->sg_map = sg_map;
4593 		next_scb->sg_list = segs;
4594 		/*
4595 		 * The sequencer always starts with the second entry.
4596 		 * The first entry is embedded in the scb.
4597 		 */
4598 		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
4599 		next_scb->ahc_softc = ahc;
4600 		next_scb->flags = SCB_FLAG_NONE;
4601 #ifndef __linux__
4602 		error = aic_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
4603 					  &next_scb->dmamap);
4604 		if (error != 0)
4605 			break;
4606 #endif
4607 		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
4608 		next_scb->hscb->tag = ahc->scb_data->numscbs;
4609 		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
4610 				  next_scb, links.sle);
4611 		segs += AHC_NSEG;
4612 		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
4613 		next_scb++;
4614 		ahc->scb_data->numscbs++;
4615 	}
4616 }
4617 
4618 void
4619 ahc_controller_info(struct ahc_softc *ahc, char *buf)
4620 {
4621 	int len;
4622 
4623 	len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4624 	buf += len;
4625 	if ((ahc->features & AHC_TWIN) != 0)
4626  		len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4627 			      "B SCSI Id=%d, primary %c, ",
4628 			      ahc->our_id, ahc->our_id_b,
4629 			      (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4630 	else {
4631 		const char *speed;
4632 		const char *type;
4633 
4634 		speed = "";
4635 		if ((ahc->features & AHC_ULTRA) != 0) {
4636 			speed = "Ultra ";
4637 		} else if ((ahc->features & AHC_DT) != 0) {
4638 			speed = "Ultra160 ";
4639 		} else if ((ahc->features & AHC_ULTRA2) != 0) {
4640 			speed = "Ultra2 ";
4641 		}
4642 		if ((ahc->features & AHC_WIDE) != 0) {
4643 			type = "Wide";
4644 		} else {
4645 			type = "Single";
4646 		}
4647 		len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
4648 			      speed, type, ahc->channel, ahc->our_id);
4649 	}
4650 	buf += len;
4651 
4652 	if ((ahc->flags & AHC_PAGESCBS) != 0)
4653 		sprintf(buf, "%d/%d SCBs",
4654 			ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
4655 	else
4656 		sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4657 }
4658 
4659 int
4660 ahc_chip_init(struct ahc_softc *ahc)
4661 {
4662 	int	 term;
4663 	int	 error;
4664 	u_int	 i;
4665 	u_int	 scsi_conf;
4666 	u_int	 scsiseq_template;
4667 	uint32_t physaddr;
4668 
4669 	ahc_outb(ahc, SEQ_FLAGS, 0);
4670 	ahc_outb(ahc, SEQ_FLAGS2, 0);
4671 
4672 	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4673 	if (ahc->features & AHC_TWIN) {
4674 
4675 		/*
4676 		 * Setup Channel B first.
4677 		 */
4678 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4679 		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4680 		ahc_outb(ahc, SCSIID, ahc->our_id_b);
4681 		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4682 		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4683 					|term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4684 		if ((ahc->features & AHC_ULTRA2) != 0)
4685 			ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4686 		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4687 		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4688 
4689 		/* Select Channel A */
4690 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4691 	}
4692 	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4693 	if ((ahc->features & AHC_ULTRA2) != 0)
4694 		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4695 	else
4696 		ahc_outb(ahc, SCSIID, ahc->our_id);
4697 	scsi_conf = ahc_inb(ahc, SCSICONF);
4698 	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4699 				|term|ahc->seltime
4700 				|ENSTIMER|ACTNEGEN);
4701 	if ((ahc->features & AHC_ULTRA2) != 0)
4702 		ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4703 	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4704 	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4705 
4706 	/* There are no untagged SCBs active yet. */
4707 	for (i = 0; i < 16; i++) {
4708 		ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4709 		if ((ahc->flags & AHC_SCB_BTT) != 0) {
4710 			int lun;
4711 
4712 			/*
4713 			 * The SCB based BTT allows an entry per
4714 			 * target and lun pair.
4715 			 */
4716 			for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4717 				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4718 		}
4719 	}
4720 
4721 	/* All of our queues are empty */
4722 	for (i = 0; i < 256; i++)
4723 		ahc->qoutfifo[i] = SCB_LIST_NULL;
4724 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
4725 
4726 	for (i = 0; i < 256; i++)
4727 		ahc->qinfifo[i] = SCB_LIST_NULL;
4728 
4729 	if ((ahc->features & AHC_MULTI_TID) != 0) {
4730 		ahc_outb(ahc, TARGID, 0);
4731 		ahc_outb(ahc, TARGID + 1, 0);
4732 	}
4733 
4734 	/*
4735 	 * Tell the sequencer where it can find our arrays in memory.
4736 	 */
4737 	physaddr = ahc->scb_data->hscb_busaddr;
4738 	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4739 	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4740 	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4741 	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4742 
4743 	physaddr = ahc->shared_data_busaddr;
4744 	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4745 	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4746 	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4747 	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4748 
4749 	/*
4750 	 * Initialize the group code to command length table.
4751 	 * This overrides the values in TARG_SCSIRATE, so only
4752 	 * setup the table after we have processed that information.
4753 	 */
4754 	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4755 	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4756 	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4757 	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4758 	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4759 	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4760 	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4761 	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4762 
4763 	if ((ahc->features & AHC_HS_MAILBOX) != 0)
4764 		ahc_outb(ahc, HS_MAILBOX, 0);
4765 
4766 	/* Tell the sequencer of our initial queue positions */
4767 	if ((ahc->features & AHC_TARGETMODE) != 0) {
4768 		ahc->tqinfifonext = 1;
4769 		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4770 		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4771 	}
4772 	ahc->qinfifonext = 0;
4773 	ahc->qoutfifonext = 0;
4774 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4775 		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4776 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4777 		ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext);
4778 		ahc_outb(ahc, SDSCB_QOFF, 0);
4779 	} else {
4780 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4781 		ahc_outb(ahc, QINPOS, ahc->qinfifonext);
4782 		ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext);
4783 	}
4784 
4785 	/* We don't have any waiting selections */
4786 	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4787 
4788 	/* Our disconnection list is empty too */
4789 	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4790 
4791 	/* Message out buffer starts empty */
4792 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4793 
4794 	/*
4795 	 * Setup the allowed SCSI Sequences based on operational mode.
4796 	 * If we are a target, we'll enalbe select in operations once
4797 	 * we've had a lun enabled.
4798 	 */
4799 	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4800 	if ((ahc->flags & AHC_INITIATORROLE) != 0)
4801 		scsiseq_template |= ENRSELI;
4802 	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4803 
4804 	/* Initialize our list of free SCBs. */
4805 	ahc_build_free_scb_list(ahc);
4806 
4807 	/*
4808 	 * Tell the sequencer which SCB will be the next one it receives.
4809 	 */
4810 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4811 
4812 	/*
4813 	 * Load the Sequencer program and Enable the adapter
4814 	 * in "fast" mode.
4815 	 */
4816 	if (bootverbose)
4817 		printf("%s: Downloading Sequencer Program...",
4818 		       ahc_name(ahc));
4819 
4820 	error = ahc_loadseq(ahc);
4821 	if (error != 0)
4822 		return (error);
4823 
4824 	if ((ahc->features & AHC_ULTRA2) != 0) {
4825 		int wait;
4826 
4827 		/*
4828 		 * Wait for up to 500ms for our transceivers
4829 		 * to settle.  If the adapter does not have
4830 		 * a cable attached, the transceivers may
4831 		 * never settle, so don't complain if we
4832 		 * fail here.
4833 		 */
4834 		for (wait = 5000;
4835 		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4836 		     wait--)
4837 			aic_delay(100);
4838 	}
4839 	ahc_restart(ahc);
4840 	return (0);
4841 }
4842 
4843 /*
4844  * Start the board, ready for normal operation
4845  */
4846 int
4847 ahc_init(struct ahc_softc *ahc)
4848 {
4849 	int	 max_targ;
4850 	int	 error;
4851 	u_int	 i;
4852 	u_int	 scsi_conf;
4853 	u_int	 ultraenb;
4854 	u_int	 discenable;
4855 	u_int	 tagenable;
4856 	size_t	 driver_data_size;
4857 
4858 #ifdef AHC_DEBUG
4859 	if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0)
4860 		ahc->flags |= AHC_SEQUENCER_DEBUG;
4861 #endif
4862 
4863 #ifdef AHC_PRINT_SRAM
4864 	printf("Scratch Ram:");
4865 	for (i = 0x20; i < 0x5f; i++) {
4866 		if (((i % 8) == 0) && (i != 0)) {
4867 			printf ("\n              ");
4868 		}
4869 		printf (" 0x%x", ahc_inb(ahc, i));
4870 	}
4871 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4872 		for (i = 0x70; i < 0x7f; i++) {
4873 			if (((i % 8) == 0) && (i != 0)) {
4874 				printf ("\n              ");
4875 			}
4876 			printf (" 0x%x", ahc_inb(ahc, i));
4877 		}
4878 	}
4879 	printf ("\n");
4880 	/*
4881 	 * Reading uninitialized scratch ram may
4882 	 * generate parity errors.
4883 	 */
4884 	ahc_outb(ahc, CLRINT, CLRPARERR);
4885 	ahc_outb(ahc, CLRINT, CLRBRKADRINT);
4886 #endif
4887 	max_targ = 15;
4888 
4889 	/*
4890 	 * Assume we have a board at this stage and it has been reset.
4891 	 */
4892 	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4893 		ahc->our_id = ahc->our_id_b = 7;
4894 
4895 	/*
4896 	 * Default to allowing initiator operations.
4897 	 */
4898 	ahc->flags |= AHC_INITIATORROLE;
4899 
4900 	/*
4901 	 * Only allow target mode features if this unit has them enabled.
4902 	 */
4903 	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
4904 		ahc->features &= ~AHC_TARGETMODE;
4905 
4906 #ifndef __linux__
4907 	/* DMA tag for mapping buffers into device visible space. */
4908 	if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4909 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4910 			       /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
4911 					? (bus_addr_t)0x7FFFFFFFFFULL
4912 					: BUS_SPACE_MAXADDR_32BIT,
4913 			       /*highaddr*/BUS_SPACE_MAXADDR,
4914 			       /*filter*/NULL, /*filterarg*/NULL,
4915 			       /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
4916 			       /*nsegments*/AHC_NSEG,
4917 			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4918 			       /*flags*/BUS_DMA_ALLOCNOW,
4919 			       &ahc->buffer_dmat) != 0) {
4920 		return (ENOMEM);
4921 	}
4922 #endif
4923 
4924 	ahc->init_level++;
4925 
4926 	/*
4927 	 * DMA tag for our command fifos and other data in system memory
4928 	 * the card's sequencer must be able to access.  For initiator
4929 	 * roles, we need to allocate space for the qinfifo and qoutfifo.
4930 	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4931 	 * When providing for the target mode role, we must additionally
4932 	 * provide space for the incoming target command fifo and an extra
4933 	 * byte to deal with a dma bug in some chip versions.
4934 	 */
4935 	driver_data_size = 2 * 256 * sizeof(uint8_t);
4936 	if ((ahc->features & AHC_TARGETMODE) != 0)
4937 		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4938 				 + /*DMA WideOdd Bug Buffer*/1;
4939 	if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4940 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4941 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4942 			       /*highaddr*/BUS_SPACE_MAXADDR,
4943 			       /*filter*/NULL, /*filterarg*/NULL,
4944 			       driver_data_size,
4945 			       /*nsegments*/1,
4946 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4947 			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
4948 		return (ENOMEM);
4949 	}
4950 
4951 	ahc->init_level++;
4952 
4953 	/* Allocation of driver data */
4954 	if (aic_dmamem_alloc(ahc, ahc->shared_data_dmat,
4955 			     (void **)&ahc->qoutfifo,
4956 			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4957 		return (ENOMEM);
4958 	}
4959 
4960 	ahc->init_level++;
4961 
4962 	/* And permanently map it in */
4963 	aic_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4964 			ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4965 			&ahc->shared_data_busaddr, /*flags*/0);
4966 
4967 	if ((ahc->features & AHC_TARGETMODE) != 0) {
4968 		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4969 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4970 		ahc->dma_bug_buf = ahc->shared_data_busaddr
4971 				 + driver_data_size - 1;
4972 		/* All target command blocks start out invalid. */
4973 		for (i = 0; i < AHC_TMODE_CMDS; i++)
4974 			ahc->targetcmds[i].cmd_valid = 0;
4975 		ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
4976 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4977 	}
4978 	ahc->qinfifo = &ahc->qoutfifo[256];
4979 
4980 	ahc->init_level++;
4981 
4982 	/* Allocate SCB data now that buffer_dmat is initialized */
4983 	if (ahc->scb_data->maxhscbs == 0)
4984 		if (ahc_init_scbdata(ahc) != 0)
4985 			return (ENOMEM);
4986 
4987 	/*
4988 	 * Allocate a tstate to house information for our
4989 	 * initiator presence on the bus as well as the user
4990 	 * data for any target mode initiator.
4991 	 */
4992 	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4993 		printf("%s: unable to allocate ahc_tmode_tstate.  "
4994 		       "Failing attach\n", ahc_name(ahc));
4995 		return (ENOMEM);
4996 	}
4997 
4998 	if ((ahc->features & AHC_TWIN) != 0) {
4999 		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
5000 			printf("%s: unable to allocate ahc_tmode_tstate.  "
5001 			       "Failing attach\n", ahc_name(ahc));
5002 			return (ENOMEM);
5003 		}
5004 	}
5005 
5006 	/*
5007 	 * Fire up a recovery thread for this controller.
5008 	 */
5009 	error = ahc_spawn_recovery_thread(ahc);
5010 	if (error != 0)
5011 		return (error);
5012 
5013 	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
5014 		ahc->flags |= AHC_PAGESCBS;
5015 	} else {
5016 		ahc->flags &= ~AHC_PAGESCBS;
5017 	}
5018 
5019 #ifdef AHC_DEBUG
5020 	if (ahc_debug & AHC_SHOW_MISC) {
5021 		printf("%s: hardware scb %u bytes; kernel scb %u bytes; "
5022 		       "ahc_dma %u bytes\n",
5023 			ahc_name(ahc),
5024 			(u_int)sizeof(struct hardware_scb),
5025 			(u_int)sizeof(struct scb),
5026 			(u_int)sizeof(struct ahc_dma_seg));
5027 	}
5028 #endif /* AHC_DEBUG */
5029 
5030 	/*
5031 	 * Look at the information that board initialization or
5032 	 * the board bios has left us.
5033 	 */
5034 	if (ahc->features & AHC_TWIN) {
5035 		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
5036 		if ((scsi_conf & RESET_SCSI) != 0
5037 		 && (ahc->flags & AHC_INITIATORROLE) != 0)
5038 			ahc->flags |= AHC_RESET_BUS_B;
5039 	}
5040 
5041 	scsi_conf = ahc_inb(ahc, SCSICONF);
5042 	if ((scsi_conf & RESET_SCSI) != 0
5043 	 && (ahc->flags & AHC_INITIATORROLE) != 0)
5044 		ahc->flags |= AHC_RESET_BUS_A;
5045 
5046 	ultraenb = 0;
5047 	tagenable = ALL_TARGETS_MASK;
5048 
5049 	/* Grab the disconnection disable table and invert it for our needs */
5050 	if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
5051 		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
5052 			"device parameters\n", ahc_name(ahc));
5053 		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
5054 			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
5055 		discenable = ALL_TARGETS_MASK;
5056 		if ((ahc->features & AHC_ULTRA) != 0)
5057 			ultraenb = ALL_TARGETS_MASK;
5058 	} else {
5059 		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
5060 			   | ahc_inb(ahc, DISC_DSB));
5061 		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
5062 			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
5063 				      | ahc_inb(ahc, ULTRA_ENB);
5064 	}
5065 
5066 	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
5067 		max_targ = 7;
5068 
5069 	for (i = 0; i <= max_targ; i++) {
5070 		struct ahc_initiator_tinfo *tinfo;
5071 		struct ahc_tmode_tstate *tstate;
5072 		u_int our_id;
5073 		u_int target_id;
5074 		char channel;
5075 
5076 		channel = 'A';
5077 		our_id = ahc->our_id;
5078 		target_id = i;
5079 		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
5080 			channel = 'B';
5081 			our_id = ahc->our_id_b;
5082 			target_id = i % 8;
5083 		}
5084 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
5085 					    target_id, &tstate);
5086 		/* Default to async narrow across the board */
5087 		memset(tinfo, 0, sizeof(*tinfo));
5088 		if (ahc->flags & AHC_USEDEFAULTS) {
5089 			if ((ahc->features & AHC_WIDE) != 0)
5090 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
5091 
5092 			/*
5093 			 * These will be truncated when we determine the
5094 			 * connection type we have with the target.
5095 			 */
5096 			tinfo->user.period = ahc_syncrates->period;
5097 			tinfo->user.offset = MAX_OFFSET;
5098 		} else {
5099 			u_int scsirate;
5100 			uint16_t mask;
5101 
5102 			/* Take the settings leftover in scratch RAM. */
5103 			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
5104 			mask = (0x01 << i);
5105 			if ((ahc->features & AHC_ULTRA2) != 0) {
5106 				u_int offset;
5107 				u_int maxsync;
5108 
5109 				if ((scsirate & SOFS) == 0x0F) {
5110 					/*
5111 					 * Haven't negotiated yet,
5112 					 * so the format is different.
5113 					 */
5114 					scsirate = (scsirate & SXFR) >> 4
5115 						 | (ultraenb & mask)
5116 						  ? 0x08 : 0x0
5117 						 | (scsirate & WIDEXFER);
5118 					offset = MAX_OFFSET_ULTRA2;
5119 				} else
5120 					offset = ahc_inb(ahc, TARG_OFFSET + i);
5121 				if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
5122 					/* Set to the lowest sync rate, 5MHz */
5123 					scsirate |= 0x1c;
5124 				maxsync = AHC_SYNCRATE_ULTRA2;
5125 				if ((ahc->features & AHC_DT) != 0)
5126 					maxsync = AHC_SYNCRATE_DT;
5127 				tinfo->user.period =
5128 				    ahc_find_period(ahc, scsirate, maxsync);
5129 				if (offset == 0)
5130 					tinfo->user.period = 0;
5131 				else
5132 					tinfo->user.offset = MAX_OFFSET;
5133 				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
5134 				 && (ahc->features & AHC_DT) != 0)
5135 					tinfo->user.ppr_options =
5136 					    MSG_EXT_PPR_DT_REQ;
5137 			} else if ((scsirate & SOFS) != 0) {
5138 				if ((scsirate & SXFR) == 0x40
5139 				 && (ultraenb & mask) != 0) {
5140 					/* Treat 10MHz as a non-ultra speed */
5141 					scsirate &= ~SXFR;
5142 				 	ultraenb &= ~mask;
5143 				}
5144 				tinfo->user.period =
5145 				    ahc_find_period(ahc, scsirate,
5146 						    (ultraenb & mask)
5147 						   ? AHC_SYNCRATE_ULTRA
5148 						   : AHC_SYNCRATE_FAST);
5149 				if (tinfo->user.period != 0)
5150 					tinfo->user.offset = MAX_OFFSET;
5151 			}
5152 			if (tinfo->user.period == 0)
5153 				tinfo->user.offset = 0;
5154 			if ((scsirate & WIDEXFER) != 0
5155 			 && (ahc->features & AHC_WIDE) != 0)
5156 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
5157 			tinfo->user.protocol_version = 4;
5158 			if ((ahc->features & AHC_DT) != 0)
5159 				tinfo->user.transport_version = 3;
5160 			else
5161 				tinfo->user.transport_version = 2;
5162 			tinfo->goal.protocol_version = 2;
5163 			tinfo->goal.transport_version = 2;
5164 			tinfo->curr.protocol_version = 2;
5165 			tinfo->curr.transport_version = 2;
5166 		}
5167 		tstate->ultraenb = 0;
5168 	}
5169 	ahc->user_discenable = discenable;
5170 	ahc->user_tagenable = tagenable;
5171 
5172 	return (ahc->bus_chip_init(ahc));
5173 }
5174 
5175 void
5176 ahc_intr_enable(struct ahc_softc *ahc, int enable)
5177 {
5178 	u_int hcntrl;
5179 
5180 	hcntrl = ahc_inb(ahc, HCNTRL);
5181 	hcntrl &= ~INTEN;
5182 	ahc->pause &= ~INTEN;
5183 	ahc->unpause &= ~INTEN;
5184 	if (enable) {
5185 		hcntrl |= INTEN;
5186 		ahc->pause |= INTEN;
5187 		ahc->unpause |= INTEN;
5188 	}
5189 	ahc_outb(ahc, HCNTRL, hcntrl);
5190 }
5191 
5192 /*
5193  * Ensure that the card is paused in a location
5194  * outside of all critical sections and that all
5195  * pending work is completed prior to returning.
5196  * This routine should only be called from outside
5197  * an interrupt context.
5198  */
5199 void
5200 ahc_pause_and_flushwork(struct ahc_softc *ahc)
5201 {
5202 	int intstat;
5203 	int maxloops;
5204 	int paused;
5205 
5206 	maxloops = 1000;
5207 	ahc->flags |= AHC_ALL_INTERRUPTS;
5208 	paused = FALSE;
5209 	do {
5210 		if (paused) {
5211 			ahc_unpause(ahc);
5212 			/*
5213 			 * Give the sequencer some time to service
5214 			 * any active selections.
5215 			 */
5216 			aic_delay(500);
5217 		}
5218 		ahc_intr(ahc);
5219 		ahc_pause(ahc);
5220 		paused = TRUE;
5221 		ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
5222 		intstat = ahc_inb(ahc, INTSTAT);
5223 		if ((intstat & INT_PEND) == 0) {
5224 			ahc_clear_critical_section(ahc);
5225 			intstat = ahc_inb(ahc, INTSTAT);
5226 		}
5227 	} while (--maxloops
5228 	      && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
5229 	      && ((intstat & INT_PEND) != 0
5230 	       || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0));
5231 	if (maxloops == 0) {
5232 		printf("Infinite interrupt loop, INTSTAT = %x",
5233 		       ahc_inb(ahc, INTSTAT));
5234 	}
5235 	ahc_platform_flushwork(ahc);
5236 	ahc->flags &= ~AHC_ALL_INTERRUPTS;
5237 }
5238 
5239 int
5240 ahc_suspend(struct ahc_softc *ahc)
5241 {
5242 
5243 	ahc_pause_and_flushwork(ahc);
5244 
5245 	if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
5246 		ahc_unpause(ahc);
5247 		return (EBUSY);
5248 	}
5249 
5250 #ifdef AHC_TARGET_MODE
5251 	/*
5252 	 * XXX What about ATIOs that have not yet been serviced?
5253 	 * Perhaps we should just refuse to be suspended if we
5254 	 * are acting in a target role.
5255 	 */
5256 	if (ahc->pending_device != NULL) {
5257 		ahc_unpause(ahc);
5258 		return (EBUSY);
5259 	}
5260 #endif
5261 	ahc_shutdown(ahc);
5262 	return (0);
5263 }
5264 
5265 int
5266 ahc_resume(struct ahc_softc *ahc)
5267 {
5268 
5269 	ahc_reset(ahc, /*reinit*/TRUE);
5270 	ahc_intr_enable(ahc, TRUE);
5271 	ahc_restart(ahc);
5272 	return (0);
5273 }
5274 
5275 /************************** Busy Target Table *********************************/
5276 /*
5277  * Return the untagged transaction id for a given target/channel lun.
5278  * Optionally, clear the entry.
5279  */
5280 u_int
5281 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5282 {
5283 	u_int scbid;
5284 	u_int target_offset;
5285 
5286 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5287 		u_int saved_scbptr;
5288 
5289 		saved_scbptr = ahc_inb(ahc, SCBPTR);
5290 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5291 		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
5292 		ahc_outb(ahc, SCBPTR, saved_scbptr);
5293 	} else {
5294 		target_offset = TCL_TARGET_OFFSET(tcl);
5295 		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
5296 	}
5297 
5298 	return (scbid);
5299 }
5300 
5301 void
5302 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5303 {
5304 	u_int target_offset;
5305 
5306 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5307 		u_int saved_scbptr;
5308 
5309 		saved_scbptr = ahc_inb(ahc, SCBPTR);
5310 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5311 		ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
5312 		ahc_outb(ahc, SCBPTR, saved_scbptr);
5313 	} else {
5314 		target_offset = TCL_TARGET_OFFSET(tcl);
5315 		ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
5316 	}
5317 }
5318 
5319 void
5320 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
5321 {
5322 	u_int target_offset;
5323 
5324 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5325 		u_int saved_scbptr;
5326 
5327 		saved_scbptr = ahc_inb(ahc, SCBPTR);
5328 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5329 		ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
5330 		ahc_outb(ahc, SCBPTR, saved_scbptr);
5331 	} else {
5332 		target_offset = TCL_TARGET_OFFSET(tcl);
5333 		ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
5334 	}
5335 }
5336 
5337 /************************** SCB and SCB queue management **********************/
5338 int
5339 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
5340 	      char channel, int lun, u_int tag, role_t role)
5341 {
5342 	int targ = SCB_GET_TARGET(ahc, scb);
5343 	char chan = SCB_GET_CHANNEL(ahc, scb);
5344 	int slun = SCB_GET_LUN(scb);
5345 	int match;
5346 
5347 	match = ((chan == channel) || (channel == ALL_CHANNELS));
5348 	if (match != 0)
5349 		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
5350 	if (match != 0)
5351 		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
5352 	if (match != 0) {
5353 #ifdef AHC_TARGET_MODE
5354 		int group;
5355 
5356 		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
5357 		if (role == ROLE_INITIATOR) {
5358 			match = (group != XPT_FC_GROUP_TMODE)
5359 			      && ((tag == scb->hscb->tag)
5360 			       || (tag == SCB_LIST_NULL));
5361 		} else if (role == ROLE_TARGET) {
5362 			match = (group == XPT_FC_GROUP_TMODE)
5363 			      && ((tag == scb->io_ctx->csio.tag_id)
5364 			       || (tag == SCB_LIST_NULL));
5365 		}
5366 #else /* !AHC_TARGET_MODE */
5367 		match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
5368 #endif /* AHC_TARGET_MODE */
5369 	}
5370 
5371 	return match;
5372 }
5373 
5374 void
5375 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
5376 {
5377 	int	target;
5378 	char	channel;
5379 	int	lun;
5380 
5381 	target = SCB_GET_TARGET(ahc, scb);
5382 	lun = SCB_GET_LUN(scb);
5383 	channel = SCB_GET_CHANNEL(ahc, scb);
5384 
5385 	ahc_search_qinfifo(ahc, target, channel, lun,
5386 			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
5387 			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5388 
5389 	ahc_platform_freeze_devq(ahc, scb);
5390 }
5391 
5392 void
5393 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
5394 {
5395 	struct scb *prev_scb;
5396 
5397 	prev_scb = NULL;
5398 	if (ahc_qinfifo_count(ahc) != 0) {
5399 		u_int prev_tag;
5400 		uint8_t prev_pos;
5401 
5402 		prev_pos = ahc->qinfifonext - 1;
5403 		prev_tag = ahc->qinfifo[prev_pos];
5404 		prev_scb = ahc_lookup_scb(ahc, prev_tag);
5405 	}
5406 	ahc_qinfifo_requeue(ahc, prev_scb, scb);
5407 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5408 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5409 	} else {
5410 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5411 	}
5412 }
5413 
5414 static void
5415 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
5416 		    struct scb *scb)
5417 {
5418 	if (prev_scb == NULL) {
5419 		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5420 	} else {
5421 		prev_scb->hscb->next = scb->hscb->tag;
5422 		ahc_sync_scb(ahc, prev_scb,
5423 			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5424 	}
5425 	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
5426 	scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5427 	ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5428 }
5429 
5430 static int
5431 ahc_qinfifo_count(struct ahc_softc *ahc)
5432 {
5433 	uint8_t qinpos;
5434 	uint8_t diff;
5435 
5436 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5437 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
5438 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
5439 	} else
5440 		qinpos = ahc_inb(ahc, QINPOS);
5441 	diff = ahc->qinfifonext - qinpos;
5442 	return (diff);
5443 }
5444 
5445 int
5446 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5447 		   int lun, u_int tag, role_t role, uint32_t status,
5448 		   ahc_search_action action)
5449 {
5450 	struct	scb *scb;
5451 	struct	scb *prev_scb;
5452 	uint8_t qinstart;
5453 	uint8_t qinpos;
5454 	uint8_t qintail;
5455 	uint8_t next;
5456 	uint8_t prev;
5457 	uint8_t curscbptr;
5458 	int	found;
5459 	int	have_qregs;
5460 
5461 	qintail = ahc->qinfifonext;
5462 	have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
5463 	if (have_qregs) {
5464 		qinstart = ahc_inb(ahc, SNSCB_QOFF);
5465 		ahc_outb(ahc, SNSCB_QOFF, qinstart);
5466 	} else
5467 		qinstart = ahc_inb(ahc, QINPOS);
5468 	qinpos = qinstart;
5469 	found = 0;
5470 	prev_scb = NULL;
5471 
5472 	if (action == SEARCH_COMPLETE) {
5473 		/*
5474 		 * Don't attempt to run any queued untagged transactions
5475 		 * until we are done with the abort process.
5476 		 */
5477 		ahc_freeze_untagged_queues(ahc);
5478 	}
5479 
5480 	/*
5481 	 * Start with an empty queue.  Entries that are not chosen
5482 	 * for removal will be re-added to the queue as we go.
5483 	 */
5484 	ahc->qinfifonext = qinpos;
5485 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
5486 
5487 	while (qinpos != qintail) {
5488 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
5489 		if (scb == NULL) {
5490 			printf("qinpos = %d, SCB index = %d\n",
5491 				qinpos, ahc->qinfifo[qinpos]);
5492 			panic("Loop 1\n");
5493 		}
5494 
5495 		if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
5496 			/*
5497 			 * We found an scb that needs to be acted on.
5498 			 */
5499 			found++;
5500 			switch (action) {
5501 			case SEARCH_COMPLETE:
5502 			{
5503 				cam_status ostat;
5504 				cam_status cstat;
5505 
5506 				ostat = aic_get_transaction_status(scb);
5507 				if (ostat == CAM_REQ_INPROG)
5508 					aic_set_transaction_status(scb, status);
5509 				cstat = aic_get_transaction_status(scb);
5510 				if (cstat != CAM_REQ_CMP)
5511 					aic_freeze_scb(scb);
5512 				if ((scb->flags & SCB_ACTIVE) == 0)
5513 					printf("Inactive SCB in qinfifo\n");
5514 				ahc_done(ahc, scb);
5515 
5516 				/* FALLTHROUGH */
5517 			}
5518 			case SEARCH_REMOVE:
5519 				break;
5520 			case SEARCH_COUNT:
5521 				ahc_qinfifo_requeue(ahc, prev_scb, scb);
5522 				prev_scb = scb;
5523 				break;
5524 			}
5525 		} else {
5526 			ahc_qinfifo_requeue(ahc, prev_scb, scb);
5527 			prev_scb = scb;
5528 		}
5529 		qinpos++;
5530 	}
5531 
5532 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5533 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5534 	} else {
5535 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5536 	}
5537 
5538 	if (action != SEARCH_COUNT
5539 	 && (found != 0)
5540 	 && (qinstart != ahc->qinfifonext)) {
5541 		/*
5542 		 * The sequencer may be in the process of dmaing
5543 		 * down the SCB at the beginning of the queue.
5544 		 * This could be problematic if either the first,
5545 		 * or the second SCB is removed from the queue
5546 		 * (the first SCB includes a pointer to the "next"
5547 		 * SCB to dma). If we have removed any entries, swap
5548 		 * the first element in the queue with the next HSCB
5549 		 * so the sequencer will notice that NEXT_QUEUED_SCB
5550 		 * has changed during its dma attempt and will retry
5551 		 * the DMA.
5552 		 */
5553 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5554 
5555 		if (scb == NULL) {
5556 			printf("found = %d, qinstart = %d, qinfifionext = %d\n",
5557 				found, qinstart, ahc->qinfifonext);
5558 			panic("First/Second Qinfifo fixup\n");
5559 		}
5560 		/*
5561 		 * ahc_swap_with_next_hscb forces our next pointer to
5562 		 * point to the reserved SCB for future commands.  Save
5563 		 * and restore our original next pointer to maintain
5564 		 * queue integrity.
5565 		 */
5566 		next = scb->hscb->next;
5567 		ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
5568 		ahc_swap_with_next_hscb(ahc, scb);
5569 		scb->hscb->next = next;
5570 		ahc->qinfifo[qinstart] = scb->hscb->tag;
5571 
5572 		/* Tell the card about the new head of the qinfifo. */
5573 		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5574 
5575 		/* Fixup the tail "next" pointer. */
5576 		qintail = ahc->qinfifonext - 1;
5577 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
5578 		scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5579 	}
5580 
5581 	/*
5582 	 * Search waiting for selection list.
5583 	 */
5584 	curscbptr = ahc_inb(ahc, SCBPTR);
5585 	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
5586 	prev = SCB_LIST_NULL;
5587 
5588 	while (next != SCB_LIST_NULL) {
5589 		uint8_t scb_index;
5590 
5591 		ahc_outb(ahc, SCBPTR, next);
5592 		scb_index = ahc_inb(ahc, SCB_TAG);
5593 		if (scb_index >= ahc->scb_data->numscbs) {
5594 			printf("Waiting List inconsistency. "
5595 			       "SCB index == %d, yet numscbs == %d.",
5596 			       scb_index, ahc->scb_data->numscbs);
5597 			ahc_dump_card_state(ahc);
5598 			panic("for safety");
5599 		}
5600 		scb = ahc_lookup_scb(ahc, scb_index);
5601 		if (scb == NULL) {
5602 			printf("scb_index = %d, next = %d\n",
5603 				scb_index, next);
5604 			panic("Waiting List traversal\n");
5605 		}
5606 		if (ahc_match_scb(ahc, scb, target, channel,
5607 				  lun, SCB_LIST_NULL, role)) {
5608 			/*
5609 			 * We found an scb that needs to be acted on.
5610 			 */
5611 			found++;
5612 			switch (action) {
5613 			case SEARCH_COMPLETE:
5614 			{
5615 				cam_status ostat;
5616 				cam_status cstat;
5617 
5618 				ostat = aic_get_transaction_status(scb);
5619 				if (ostat == CAM_REQ_INPROG)
5620 					aic_set_transaction_status(scb,
5621 								   status);
5622 				cstat = aic_get_transaction_status(scb);
5623 				if (cstat != CAM_REQ_CMP)
5624 					aic_freeze_scb(scb);
5625 				if ((scb->flags & SCB_ACTIVE) == 0)
5626 					printf("Inactive SCB in Wait List\n");
5627 				ahc_done(ahc, scb);
5628 				/* FALLTHROUGH */
5629 			}
5630 			case SEARCH_REMOVE:
5631 				next = ahc_rem_wscb(ahc, next, prev);
5632 				break;
5633 			case SEARCH_COUNT:
5634 				prev = next;
5635 				next = ahc_inb(ahc, SCB_NEXT);
5636 				break;
5637 			}
5638 		} else {
5639 
5640 			prev = next;
5641 			next = ahc_inb(ahc, SCB_NEXT);
5642 		}
5643 	}
5644 	ahc_outb(ahc, SCBPTR, curscbptr);
5645 
5646 	found += ahc_search_untagged_queues(ahc, /*aic_io_ctx_t*/NULL, target,
5647 					    channel, lun, status, action);
5648 
5649 	if (action == SEARCH_COMPLETE)
5650 		ahc_release_untagged_queues(ahc);
5651 	return (found);
5652 }
5653 
5654 int
5655 ahc_search_untagged_queues(struct ahc_softc *ahc, aic_io_ctx_t ctx,
5656 			   int target, char channel, int lun, uint32_t status,
5657 			   ahc_search_action action)
5658 {
5659 	struct	scb *scb;
5660 	int	maxtarget;
5661 	int	found;
5662 	int	i;
5663 
5664 	if (action == SEARCH_COMPLETE) {
5665 		/*
5666 		 * Don't attempt to run any queued untagged transactions
5667 		 * until we are done with the abort process.
5668 		 */
5669 		ahc_freeze_untagged_queues(ahc);
5670 	}
5671 
5672 	found = 0;
5673 	i = 0;
5674 	if ((ahc->flags & AHC_SCB_BTT) == 0) {
5675 
5676 		maxtarget = 16;
5677 		if (target != CAM_TARGET_WILDCARD) {
5678 
5679 			i = target;
5680 			if (channel == 'B')
5681 				i += 8;
5682 			maxtarget = i + 1;
5683 		}
5684 	} else {
5685 		maxtarget = 0;
5686 	}
5687 
5688 	for (; i < maxtarget; i++) {
5689 		struct scb_tailq *untagged_q;
5690 		struct scb *next_scb;
5691 
5692 		untagged_q = &(ahc->untagged_queues[i]);
5693 		next_scb = TAILQ_FIRST(untagged_q);
5694 		while (next_scb != NULL) {
5695 
5696 			scb = next_scb;
5697 			next_scb = TAILQ_NEXT(scb, links.tqe);
5698 
5699 			/*
5700 			 * The head of the list may be the currently
5701 			 * active untagged command for a device.
5702 			 * We're only searching for commands that
5703 			 * have not been started.  A transaction
5704 			 * marked active but still in the qinfifo
5705 			 * is removed by the qinfifo scanning code
5706 			 * above.
5707 			 */
5708 			if ((scb->flags & SCB_ACTIVE) != 0)
5709 				continue;
5710 
5711 			if (ahc_match_scb(ahc, scb, target, channel, lun,
5712 					  SCB_LIST_NULL, ROLE_INITIATOR) == 0
5713 			 || (ctx != NULL && ctx != scb->io_ctx))
5714 				continue;
5715 
5716 			/*
5717 			 * We found an scb that needs to be acted on.
5718 			 */
5719 			found++;
5720 			switch (action) {
5721 			case SEARCH_COMPLETE:
5722 			{
5723 				cam_status ostat;
5724 				cam_status cstat;
5725 
5726 				ostat = aic_get_transaction_status(scb);
5727 				if (ostat == CAM_REQ_INPROG)
5728 					aic_set_transaction_status(scb, status);
5729 				cstat = aic_get_transaction_status(scb);
5730 				if (cstat != CAM_REQ_CMP)
5731 					aic_freeze_scb(scb);
5732 				ahc_done(ahc, scb);
5733 				break;
5734 			}
5735 			case SEARCH_REMOVE:
5736 				scb->flags &= ~SCB_UNTAGGEDQ;
5737 				TAILQ_REMOVE(untagged_q, scb, links.tqe);
5738 				break;
5739 			case SEARCH_COUNT:
5740 				break;
5741 			}
5742 		}
5743 	}
5744 
5745 	if (action == SEARCH_COMPLETE)
5746 		ahc_release_untagged_queues(ahc);
5747 	return (found);
5748 }
5749 
5750 int
5751 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5752 		     int lun, u_int tag, int stop_on_first, int remove,
5753 		     int save_state)
5754 {
5755 	struct	scb *scbp;
5756 	u_int	next;
5757 	u_int	prev;
5758 	u_int	count;
5759 	u_int	active_scb;
5760 
5761 	count = 0;
5762 	next = ahc_inb(ahc, DISCONNECTED_SCBH);
5763 	prev = SCB_LIST_NULL;
5764 
5765 	if (save_state) {
5766 		/* restore this when we're done */
5767 		active_scb = ahc_inb(ahc, SCBPTR);
5768 	} else
5769 		/* Silence compiler */
5770 		active_scb = SCB_LIST_NULL;
5771 
5772 	while (next != SCB_LIST_NULL) {
5773 		u_int scb_index;
5774 
5775 		ahc_outb(ahc, SCBPTR, next);
5776 		scb_index = ahc_inb(ahc, SCB_TAG);
5777 		if (scb_index >= ahc->scb_data->numscbs) {
5778 			printf("Disconnected List inconsistency. "
5779 			       "SCB index == %d, yet numscbs == %d.",
5780 			       scb_index, ahc->scb_data->numscbs);
5781 			ahc_dump_card_state(ahc);
5782 			panic("for safety");
5783 		}
5784 
5785 		if (next == prev) {
5786 			panic("Disconnected List Loop. "
5787 			      "cur SCBPTR == %x, prev SCBPTR == %x.",
5788 			      next, prev);
5789 		}
5790 		scbp = ahc_lookup_scb(ahc, scb_index);
5791 		if (ahc_match_scb(ahc, scbp, target, channel, lun,
5792 				  tag, ROLE_INITIATOR)) {
5793 			count++;
5794 			if (remove) {
5795 				next =
5796 				    ahc_rem_scb_from_disc_list(ahc, prev, next);
5797 			} else {
5798 				prev = next;
5799 				next = ahc_inb(ahc, SCB_NEXT);
5800 			}
5801 			if (stop_on_first)
5802 				break;
5803 		} else {
5804 			prev = next;
5805 			next = ahc_inb(ahc, SCB_NEXT);
5806 		}
5807 	}
5808 	if (save_state)
5809 		ahc_outb(ahc, SCBPTR, active_scb);
5810 	return (count);
5811 }
5812 
5813 /*
5814  * Remove an SCB from the on chip list of disconnected transactions.
5815  * This is empty/unused if we are not performing SCB paging.
5816  */
5817 static u_int
5818 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5819 {
5820 	u_int next;
5821 
5822 	ahc_outb(ahc, SCBPTR, scbptr);
5823 	next = ahc_inb(ahc, SCB_NEXT);
5824 
5825 	ahc_outb(ahc, SCB_CONTROL, 0);
5826 
5827 	ahc_add_curscb_to_free_list(ahc);
5828 
5829 	if (prev != SCB_LIST_NULL) {
5830 		ahc_outb(ahc, SCBPTR, prev);
5831 		ahc_outb(ahc, SCB_NEXT, next);
5832 	} else
5833 		ahc_outb(ahc, DISCONNECTED_SCBH, next);
5834 
5835 	return (next);
5836 }
5837 
5838 /*
5839  * Add the SCB as selected by SCBPTR onto the on chip list of
5840  * free hardware SCBs.  This list is empty/unused if we are not
5841  * performing SCB paging.
5842  */
5843 static void
5844 ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5845 {
5846 	/*
5847 	 * Invalidate the tag so that our abort
5848 	 * routines don't think it's active.
5849 	 */
5850 	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5851 
5852 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
5853 		ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5854 		ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5855 	}
5856 }
5857 
5858 /*
5859  * Manipulate the waiting for selection list and return the
5860  * scb that follows the one that we remove.
5861  */
5862 static u_int
5863 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5864 {
5865 	u_int curscb, next;
5866 
5867 	/*
5868 	 * Select the SCB we want to abort and
5869 	 * pull the next pointer out of it.
5870 	 */
5871 	curscb = ahc_inb(ahc, SCBPTR);
5872 	ahc_outb(ahc, SCBPTR, scbpos);
5873 	next = ahc_inb(ahc, SCB_NEXT);
5874 
5875 	/* Clear the necessary fields */
5876 	ahc_outb(ahc, SCB_CONTROL, 0);
5877 
5878 	ahc_add_curscb_to_free_list(ahc);
5879 
5880 	/* update the waiting list */
5881 	if (prev == SCB_LIST_NULL) {
5882 		/* First in the list */
5883 		ahc_outb(ahc, WAITING_SCBH, next);
5884 
5885 		/*
5886 		 * Ensure we aren't attempting to perform
5887 		 * selection for this entry.
5888 		 */
5889 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5890 	} else {
5891 		/*
5892 		 * Select the scb that pointed to us
5893 		 * and update its next pointer.
5894 		 */
5895 		ahc_outb(ahc, SCBPTR, prev);
5896 		ahc_outb(ahc, SCB_NEXT, next);
5897 	}
5898 
5899 	/*
5900 	 * Point us back at the original scb position.
5901 	 */
5902 	ahc_outb(ahc, SCBPTR, curscb);
5903 	return next;
5904 }
5905 
5906 /******************************** Error Handling ******************************/
5907 /*
5908  * Abort all SCBs that match the given description (target/channel/lun/tag),
5909  * setting their status to the passed in status if the status has not already
5910  * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
5911  * is paused before it is called.
5912  */
5913 int
5914 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5915 	       int lun, u_int tag, role_t role, uint32_t status)
5916 {
5917 	struct	scb *scbp;
5918 	struct	scb *scbp_next;
5919 	u_int	active_scb;
5920 	int	i, j;
5921 	int	maxtarget;
5922 	int	minlun;
5923 	int	maxlun;
5924 
5925 	int	found;
5926 
5927 	/*
5928 	 * Don't attempt to run any queued untagged transactions
5929 	 * until we are done with the abort process.
5930 	 */
5931 	ahc_freeze_untagged_queues(ahc);
5932 
5933 	/* restore this when we're done */
5934 	active_scb = ahc_inb(ahc, SCBPTR);
5935 
5936 	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5937 				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5938 
5939 	/*
5940 	 * Clean out the busy target table for any untagged commands.
5941 	 */
5942 	i = 0;
5943 	maxtarget = 16;
5944 	if (target != CAM_TARGET_WILDCARD) {
5945 		i = target;
5946 		if (channel == 'B')
5947 			i += 8;
5948 		maxtarget = i + 1;
5949 	}
5950 
5951 	if (lun == CAM_LUN_WILDCARD) {
5952 
5953 		/*
5954 		 * Unless we are using an SCB based
5955 		 * busy targets table, there is only
5956 		 * one table entry for all luns of
5957 		 * a target.
5958 		 */
5959 		minlun = 0;
5960 		maxlun = 1;
5961 		if ((ahc->flags & AHC_SCB_BTT) != 0)
5962 			maxlun = AHC_NUM_LUNS;
5963 	} else {
5964 		minlun = lun;
5965 		maxlun = lun + 1;
5966 	}
5967 
5968 	if (role != ROLE_TARGET) {
5969 		for (;i < maxtarget; i++) {
5970 			for (j = minlun;j < maxlun; j++) {
5971 				u_int scbid;
5972 				u_int tcl;
5973 
5974 				tcl = BUILD_TCL(i << 4, j);
5975 				scbid = ahc_index_busy_tcl(ahc, tcl);
5976 				scbp = ahc_lookup_scb(ahc, scbid);
5977 				if (scbp == NULL
5978 				 || ahc_match_scb(ahc, scbp, target, channel,
5979 						  lun, tag, role) == 0)
5980 					continue;
5981 				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5982 			}
5983 		}
5984 
5985 		/*
5986 		 * Go through the disconnected list and remove any entries we
5987 		 * have queued for completion, 0'ing their control byte too.
5988 		 * We save the active SCB and restore it ourselves, so there
5989 		 * is no reason for this search to restore it too.
5990 		 */
5991 		ahc_search_disc_list(ahc, target, channel, lun, tag,
5992 				     /*stop_on_first*/FALSE, /*remove*/TRUE,
5993 				     /*save_state*/FALSE);
5994 	}
5995 
5996 	/*
5997 	 * Go through the hardware SCB array looking for commands that
5998 	 * were active but not on any list.  In some cases, these remnants
5999 	 * might not still have mappings in the scbindex array (e.g. unexpected
6000 	 * bus free with the same scb queued for an abort).  Don't hold this
6001 	 * against them.
6002 	 */
6003 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
6004 		u_int scbid;
6005 
6006 		ahc_outb(ahc, SCBPTR, i);
6007 		scbid = ahc_inb(ahc, SCB_TAG);
6008 		scbp = ahc_lookup_scb(ahc, scbid);
6009 		if ((scbp == NULL && scbid != SCB_LIST_NULL)
6010 		 || (scbp != NULL
6011 		  && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
6012 			ahc_add_curscb_to_free_list(ahc);
6013 	}
6014 
6015 	/*
6016 	 * Go through the pending CCB list and look for
6017 	 * commands for this target that are still active.
6018 	 * These are other tagged commands that were
6019 	 * disconnected when the reset occurred.
6020 	 */
6021 	scbp_next = LIST_FIRST(&ahc->pending_scbs);
6022 	while (scbp_next != NULL) {
6023 		scbp = scbp_next;
6024 		scbp_next = LIST_NEXT(scbp, pending_links);
6025 		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
6026 			cam_status ostat;
6027 
6028 			ostat = aic_get_transaction_status(scbp);
6029 			if (ostat == CAM_REQ_INPROG)
6030 				aic_set_transaction_status(scbp, status);
6031 			if (aic_get_transaction_status(scbp) != CAM_REQ_CMP)
6032 				aic_freeze_scb(scbp);
6033 			if ((scbp->flags & SCB_ACTIVE) == 0)
6034 				printf("Inactive SCB on pending list\n");
6035 			ahc_done(ahc, scbp);
6036 			found++;
6037 		}
6038 	}
6039 	ahc_outb(ahc, SCBPTR, active_scb);
6040 	ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
6041 	ahc_release_untagged_queues(ahc);
6042 	return found;
6043 }
6044 
6045 static void
6046 ahc_reset_current_bus(struct ahc_softc *ahc)
6047 {
6048 	uint8_t scsiseq;
6049 
6050 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
6051 	scsiseq = ahc_inb(ahc, SCSISEQ);
6052 	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
6053 	ahc_flush_device_writes(ahc);
6054 	aic_delay(AHC_BUSRESET_DELAY);
6055 	/* Turn off the bus reset */
6056 	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
6057 
6058 	ahc_clear_intstat(ahc);
6059 
6060 	/* Re-enable reset interrupts */
6061 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
6062 }
6063 
6064 int
6065 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
6066 {
6067 	struct	ahc_devinfo devinfo;
6068 	u_int	initiator, target, max_scsiid;
6069 	u_int	sblkctl;
6070 	u_int	scsiseq;
6071 	u_int	simode1;
6072 	int	found;
6073 	int	restart_needed;
6074 	char	cur_channel;
6075 
6076 	ahc->pending_device = NULL;
6077 
6078 	ahc_compile_devinfo(&devinfo,
6079 			    CAM_TARGET_WILDCARD,
6080 			    CAM_TARGET_WILDCARD,
6081 			    CAM_LUN_WILDCARD,
6082 			    channel, ROLE_UNKNOWN);
6083 	ahc_pause(ahc);
6084 
6085 	/* Make sure the sequencer is in a safe location. */
6086 	ahc_clear_critical_section(ahc);
6087 
6088 	/*
6089 	 * Run our command complete fifos to ensure that we perform
6090 	 * completion processing on any commands that 'completed'
6091 	 * before the reset occurred.
6092 	 */
6093 	ahc_run_qoutfifo(ahc);
6094 #ifdef AHC_TARGET_MODE
6095 	/*
6096 	 * XXX - In Twin mode, the tqinfifo may have commands
6097 	 *	 for an unaffected channel in it.  However, if
6098 	 *	 we have run out of ATIO resources to drain that
6099 	 *	 queue, we may not get them all out here.  Further,
6100 	 *	 the blocked transactions for the reset channel
6101 	 *	 should just be killed off, irrespecitve of whether
6102 	 *	 we are blocked on ATIO resources.  Write a routine
6103 	 *	 to compact the tqinfifo appropriately.
6104 	 */
6105 	if ((ahc->flags & AHC_TARGETROLE) != 0) {
6106 		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
6107 	}
6108 #endif
6109 
6110 	/*
6111 	 * Reset the bus if we are initiating this reset
6112 	 */
6113 	sblkctl = ahc_inb(ahc, SBLKCTL);
6114 	cur_channel = 'A';
6115 	if ((ahc->features & AHC_TWIN) != 0
6116 	 && ((sblkctl & SELBUSB) != 0))
6117 	    cur_channel = 'B';
6118 	scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6119 	if (cur_channel != channel) {
6120 		/* Case 1: Command for another bus is active
6121 		 * Stealthily reset the other bus without
6122 		 * upsetting the current bus.
6123 		 */
6124 		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
6125 		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
6126 #ifdef AHC_TARGET_MODE
6127 		/*
6128 		 * Bus resets clear ENSELI, so we cannot
6129 		 * defer re-enabling bus reset interrupts
6130 		 * if we are in target mode.
6131 		 */
6132 		if ((ahc->flags & AHC_TARGETROLE) != 0)
6133 			simode1 |= ENSCSIRST;
6134 #endif
6135 		ahc_outb(ahc, SIMODE1, simode1);
6136 		if (initiate_reset)
6137 			ahc_reset_current_bus(ahc);
6138 		ahc_clear_intstat(ahc);
6139 		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6140 		ahc_outb(ahc, SBLKCTL, sblkctl);
6141 		restart_needed = FALSE;
6142 	} else {
6143 		/* Case 2: A command from this bus is active or we're idle */
6144 		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
6145 #ifdef AHC_TARGET_MODE
6146 		/*
6147 		 * Bus resets clear ENSELI, so we cannot
6148 		 * defer re-enabling bus reset interrupts
6149 		 * if we are in target mode.
6150 		 */
6151 		if ((ahc->flags & AHC_TARGETROLE) != 0)
6152 			simode1 |= ENSCSIRST;
6153 #endif
6154 		ahc_outb(ahc, SIMODE1, simode1);
6155 		if (initiate_reset)
6156 			ahc_reset_current_bus(ahc);
6157 		ahc_clear_intstat(ahc);
6158 		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6159 		restart_needed = TRUE;
6160 	}
6161 
6162 	/*
6163 	 * Clean up all the state information for the
6164 	 * pending transactions on this bus.
6165 	 */
6166 	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
6167 			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
6168 			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
6169 
6170 	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
6171 
6172 #ifdef AHC_TARGET_MODE
6173 	/*
6174 	 * Send an immediate notify ccb to all target more peripheral
6175 	 * drivers affected by this action.
6176 	 */
6177 	for (target = 0; target <= max_scsiid; target++) {
6178 		struct ahc_tmode_tstate* tstate;
6179 		u_int lun;
6180 
6181 		tstate = ahc->enabled_targets[target];
6182 		if (tstate == NULL)
6183 			continue;
6184 		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
6185 			struct ahc_tmode_lstate* lstate;
6186 
6187 			lstate = tstate->enabled_luns[lun];
6188 			if (lstate == NULL)
6189 				continue;
6190 
6191 			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
6192 					       EVENT_TYPE_BUS_RESET, /*arg*/0);
6193 			ahc_send_lstate_events(ahc, lstate);
6194 		}
6195 	}
6196 #endif
6197 	/* Notify the XPT that a bus reset occurred */
6198 	ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
6199 		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
6200 
6201 	/*
6202 	 * Revert to async/narrow transfers until we renegotiate.
6203 	 */
6204 	for (target = 0; target <= max_scsiid; target++) {
6205 
6206 		if (ahc->enabled_targets[target] == NULL)
6207 			continue;
6208 		for (initiator = 0; initiator <= max_scsiid; initiator++) {
6209 			struct ahc_devinfo devinfo;
6210 
6211 			ahc_compile_devinfo(&devinfo, target, initiator,
6212 					    CAM_LUN_WILDCARD,
6213 					    channel, ROLE_UNKNOWN);
6214 			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6215 				      AHC_TRANS_CUR, /*paused*/TRUE);
6216 			ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
6217 					 /*period*/0, /*offset*/0,
6218 					 /*ppr_options*/0, AHC_TRANS_CUR,
6219 					 /*paused*/TRUE);
6220 		}
6221 	}
6222 
6223 	if (restart_needed)
6224 		ahc_restart(ahc);
6225 	else
6226 		ahc_unpause(ahc);
6227 	return found;
6228 }
6229 
6230 
6231 /***************************** Residual Processing ****************************/
6232 /*
6233  * Calculate the residual for a just completed SCB.
6234  */
6235 void
6236 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6237 {
6238 	struct hardware_scb *hscb;
6239 	struct status_pkt *spkt;
6240 	uint32_t sgptr;
6241 	uint32_t resid_sgptr;
6242 	uint32_t resid;
6243 
6244 	/*
6245 	 * 5 cases.
6246 	 * 1) No residual.
6247 	 *    SG_RESID_VALID clear in sgptr.
6248 	 * 2) Transferless command
6249 	 * 3) Never performed any transfers.
6250 	 *    sgptr has SG_FULL_RESID set.
6251 	 * 4) No residual but target did not
6252 	 *    save data pointers after the
6253 	 *    last transfer, so sgptr was
6254 	 *    never updated.
6255 	 * 5) We have a partial residual.
6256 	 *    Use residual_sgptr to determine
6257 	 *    where we are.
6258 	 */
6259 
6260 	hscb = scb->hscb;
6261 	sgptr = aic_le32toh(hscb->sgptr);
6262 	if ((sgptr & SG_RESID_VALID) == 0)
6263 		/* Case 1 */
6264 		return;
6265 	sgptr &= ~SG_RESID_VALID;
6266 
6267 	if ((sgptr & SG_LIST_NULL) != 0)
6268 		/* Case 2 */
6269 		return;
6270 
6271 	spkt = &hscb->shared_data.status;
6272 	resid_sgptr = aic_le32toh(spkt->residual_sg_ptr);
6273 	if ((sgptr & SG_FULL_RESID) != 0) {
6274 		/* Case 3 */
6275 		resid = aic_get_transfer_length(scb);
6276 	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
6277 		/* Case 4 */
6278 		return;
6279 	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
6280 		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
6281 		/* NOTREACHED */
6282 		return;
6283 	} else {
6284 		struct ahc_dma_seg *sg;
6285 
6286 		/*
6287 		 * Remainder of the SG where the transfer
6288 		 * stopped.
6289 		 */
6290 		resid = aic_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
6291 		sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
6292 
6293 		/* The residual sg_ptr always points to the next sg */
6294 		sg--;
6295 
6296 		/*
6297 		 * Add up the contents of all residual
6298 		 * SG segments that are after the SG where
6299 		 * the transfer stopped.
6300 		 */
6301 		while ((aic_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
6302 			sg++;
6303 			resid += aic_le32toh(sg->len) & AHC_SG_LEN_MASK;
6304 		}
6305 	}
6306 	if ((scb->flags & SCB_SENSE) == 0)
6307 		aic_set_residual(scb, resid);
6308 	else
6309 		aic_set_sense_residual(scb, resid);
6310 
6311 #ifdef AHC_DEBUG
6312 	if ((ahc_debug & AHC_SHOW_MISC) != 0) {
6313 		ahc_print_path(ahc, scb);
6314 		printf("Handled %sResidual of %d bytes\n",
6315 		       (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
6316 	}
6317 #endif
6318 }
6319 
6320 /******************************* Target Mode **********************************/
6321 #ifdef AHC_TARGET_MODE
6322 /*
6323  * Add a target mode event to this lun's queue
6324  */
6325 static void
6326 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
6327 		       u_int initiator_id, u_int event_type, u_int event_arg)
6328 {
6329 	struct ahc_tmode_event *event;
6330 	int pending;
6331 
6332 	xpt_freeze_devq(lstate->path, /*count*/1);
6333 	if (lstate->event_w_idx >= lstate->event_r_idx)
6334 		pending = lstate->event_w_idx - lstate->event_r_idx;
6335 	else
6336 		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
6337 			- (lstate->event_r_idx - lstate->event_w_idx);
6338 
6339 	if (event_type == EVENT_TYPE_BUS_RESET
6340 	 || event_type == MSG_BUS_DEV_RESET) {
6341 		/*
6342 		 * Any earlier events are irrelevant, so reset our buffer.
6343 		 * This has the effect of allowing us to deal with reset
6344 		 * floods (an external device holding down the reset line)
6345 		 * without losing the event that is really interesting.
6346 		 */
6347 		lstate->event_r_idx = 0;
6348 		lstate->event_w_idx = 0;
6349 		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
6350 	}
6351 
6352 	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
6353 		xpt_print_path(lstate->path);
6354 		printf("immediate event %x:%x lost\n",
6355 		       lstate->event_buffer[lstate->event_r_idx].event_type,
6356 		       lstate->event_buffer[lstate->event_r_idx].event_arg);
6357 		lstate->event_r_idx++;
6358 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6359 			lstate->event_r_idx = 0;
6360 		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
6361 	}
6362 
6363 	event = &lstate->event_buffer[lstate->event_w_idx];
6364 	event->initiator_id = initiator_id;
6365 	event->event_type = event_type;
6366 	event->event_arg = event_arg;
6367 	lstate->event_w_idx++;
6368 	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6369 		lstate->event_w_idx = 0;
6370 }
6371 
6372 /*
6373  * Send any target mode events queued up waiting
6374  * for immediate notify resources.
6375  */
6376 void
6377 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
6378 {
6379 	struct ccb_hdr *ccbh;
6380 	struct ccb_immed_notify *inot;
6381 
6382 	while (lstate->event_r_idx != lstate->event_w_idx
6383 	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
6384 		struct ahc_tmode_event *event;
6385 
6386 		event = &lstate->event_buffer[lstate->event_r_idx];
6387 		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
6388 		inot = (struct ccb_immed_notify *)ccbh;
6389 		switch (event->event_type) {
6390 		case EVENT_TYPE_BUS_RESET:
6391 			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
6392 			break;
6393 		default:
6394 			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
6395 			inot->message_args[0] = event->event_type;
6396 			inot->message_args[1] = event->event_arg;
6397 			break;
6398 		}
6399 		inot->initiator_id = event->initiator_id;
6400 		inot->sense_len = 0;
6401 		xpt_done((union ccb *)inot);
6402 		lstate->event_r_idx++;
6403 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6404 			lstate->event_r_idx = 0;
6405 	}
6406 }
6407 #endif
6408 
6409 /******************** Sequencer Program Patching/Download *********************/
6410 
6411 #ifdef AHC_DUMP_SEQ
6412 void
6413 ahc_dumpseq(struct ahc_softc* ahc)
6414 {
6415 	int i;
6416 
6417 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6418 	ahc_outb(ahc, SEQADDR0, 0);
6419 	ahc_outb(ahc, SEQADDR1, 0);
6420 	for (i = 0; i < ahc->instruction_ram_size; i++) {
6421 		uint8_t ins_bytes[4];
6422 
6423 		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
6424 		printf("0x%08x\n", ins_bytes[0] << 24
6425 				 | ins_bytes[1] << 16
6426 				 | ins_bytes[2] << 8
6427 				 | ins_bytes[3]);
6428 	}
6429 }
6430 #endif
6431 
6432 static int
6433 ahc_loadseq(struct ahc_softc *ahc)
6434 {
6435 	struct	cs cs_table[num_critical_sections];
6436 	u_int	begin_set[num_critical_sections];
6437 	u_int	end_set[num_critical_sections];
6438 	struct	patch *cur_patch;
6439 	u_int	cs_count;
6440 	u_int	cur_cs;
6441 	u_int	i;
6442 	u_int	skip_addr;
6443 	u_int	sg_prefetch_cnt;
6444 	int	downloaded;
6445 	uint8_t	download_consts[7];
6446 
6447 	/*
6448 	 * Start out with 0 critical sections
6449 	 * that apply to this firmware load.
6450 	 */
6451 	cs_count = 0;
6452 	cur_cs = 0;
6453 	memset(begin_set, 0, sizeof(begin_set));
6454 	memset(end_set, 0, sizeof(end_set));
6455 
6456 	/* Setup downloadable constant table */
6457 	download_consts[QOUTFIFO_OFFSET] = 0;
6458 	if (ahc->targetcmds != NULL)
6459 		download_consts[QOUTFIFO_OFFSET] += 32;
6460 	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
6461 	download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
6462 	download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
6463 	sg_prefetch_cnt = ahc->pci_cachesize;
6464 	if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
6465 		sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
6466 	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
6467 	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
6468 	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
6469 
6470 	cur_patch = patches;
6471 	downloaded = 0;
6472 	skip_addr = 0;
6473 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6474 	ahc_outb(ahc, SEQADDR0, 0);
6475 	ahc_outb(ahc, SEQADDR1, 0);
6476 
6477 	for (i = 0; i < sizeof(seqprog)/4; i++) {
6478 		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
6479 			/*
6480 			 * Don't download this instruction as it
6481 			 * is in a patch that was removed.
6482 			 */
6483 			continue;
6484 		}
6485 
6486 		if (downloaded == ahc->instruction_ram_size) {
6487 			/*
6488 			 * We're about to exceed the instruction
6489 			 * storage capacity for this chip.  Fail
6490 			 * the load.
6491 			 */
6492 			printf("\n%s: Program too large for instruction memory "
6493 			       "size of %d!\n", ahc_name(ahc),
6494 			       ahc->instruction_ram_size);
6495 			return (ENOMEM);
6496 		}
6497 
6498 		/*
6499 		 * Move through the CS table until we find a CS
6500 		 * that might apply to this instruction.
6501 		 */
6502 		for (; cur_cs < num_critical_sections; cur_cs++) {
6503 			if (critical_sections[cur_cs].end <= i) {
6504 				if (begin_set[cs_count] == TRUE
6505 				 && end_set[cs_count] == FALSE) {
6506 					cs_table[cs_count].end = downloaded;
6507 				 	end_set[cs_count] = TRUE;
6508 					cs_count++;
6509 				}
6510 				continue;
6511 			}
6512 			if (critical_sections[cur_cs].begin <= i
6513 			 && begin_set[cs_count] == FALSE) {
6514 				cs_table[cs_count].begin = downloaded;
6515 				begin_set[cs_count] = TRUE;
6516 			}
6517 			break;
6518 		}
6519 		ahc_download_instr(ahc, i, download_consts);
6520 		downloaded++;
6521 	}
6522 
6523 	ahc->num_critical_sections = cs_count;
6524 	if (cs_count != 0) {
6525 
6526 		cs_count *= sizeof(struct cs);
6527 		ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
6528 		if (ahc->critical_sections == NULL)
6529 			panic("ahc_loadseq: Could not malloc");
6530 		memcpy(ahc->critical_sections, cs_table, cs_count);
6531 	}
6532 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
6533 
6534 	if (bootverbose) {
6535 		printf(" %d instructions downloaded\n", downloaded);
6536 		printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
6537 		       ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
6538 	}
6539 	return (0);
6540 }
6541 
6542 static int
6543 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
6544 		u_int start_instr, u_int *skip_addr)
6545 {
6546 	struct	patch *cur_patch;
6547 	struct	patch *last_patch;
6548 	u_int	num_patches;
6549 
6550 	num_patches = sizeof(patches)/sizeof(struct patch);
6551 	last_patch = &patches[num_patches];
6552 	cur_patch = *start_patch;
6553 
6554 	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
6555 
6556 		if (cur_patch->patch_func(ahc) == 0) {
6557 
6558 			/* Start rejecting code */
6559 			*skip_addr = start_instr + cur_patch->skip_instr;
6560 			cur_patch += cur_patch->skip_patch;
6561 		} else {
6562 			/* Accepted this patch.  Advance to the next
6563 			 * one and wait for our intruction pointer to
6564 			 * hit this point.
6565 			 */
6566 			cur_patch++;
6567 		}
6568 	}
6569 
6570 	*start_patch = cur_patch;
6571 	if (start_instr < *skip_addr)
6572 		/* Still skipping */
6573 		return (0);
6574 
6575 	return (1);
6576 }
6577 
6578 static void
6579 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6580 {
6581 	union	ins_formats instr;
6582 	struct	ins_format1 *fmt1_ins;
6583 	struct	ins_format3 *fmt3_ins;
6584 	u_int	opcode;
6585 
6586 	/*
6587 	 * The firmware is always compiled into a little endian format.
6588 	 */
6589 	instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
6590 
6591 	fmt1_ins = &instr.format1;
6592 	fmt3_ins = NULL;
6593 
6594 	/* Pull the opcode */
6595 	opcode = instr.format1.opcode;
6596 	switch (opcode) {
6597 	case AIC_OP_JMP:
6598 	case AIC_OP_JC:
6599 	case AIC_OP_JNC:
6600 	case AIC_OP_CALL:
6601 	case AIC_OP_JNE:
6602 	case AIC_OP_JNZ:
6603 	case AIC_OP_JE:
6604 	case AIC_OP_JZ:
6605 	{
6606 		struct patch *cur_patch;
6607 		int address_offset;
6608 		u_int address;
6609 		u_int skip_addr;
6610 		u_int i;
6611 
6612 		fmt3_ins = &instr.format3;
6613 		address_offset = 0;
6614 		address = fmt3_ins->address;
6615 		cur_patch = patches;
6616 		skip_addr = 0;
6617 
6618 		for (i = 0; i < address;) {
6619 
6620 			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
6621 
6622 			if (skip_addr > i) {
6623 				int end_addr;
6624 
6625 				end_addr = MIN(address, skip_addr);
6626 				address_offset += end_addr - i;
6627 				i = skip_addr;
6628 			} else {
6629 				i++;
6630 			}
6631 		}
6632 		address -= address_offset;
6633 		fmt3_ins->address = address;
6634 		/* FALLTHROUGH */
6635 	}
6636 	case AIC_OP_OR:
6637 	case AIC_OP_AND:
6638 	case AIC_OP_XOR:
6639 	case AIC_OP_ADD:
6640 	case AIC_OP_ADC:
6641 	case AIC_OP_BMOV:
6642 		if (fmt1_ins->parity != 0) {
6643 			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6644 		}
6645 		fmt1_ins->parity = 0;
6646 		if ((ahc->features & AHC_CMD_CHAN) == 0
6647 		 && opcode == AIC_OP_BMOV) {
6648 			/*
6649 			 * Block move was added at the same time
6650 			 * as the command channel.  Verify that
6651 			 * this is only a move of a single element
6652 			 * and convert the BMOV to a MOV
6653 			 * (AND with an immediate of FF).
6654 			 */
6655 			if (fmt1_ins->immediate != 1)
6656 				panic("%s: BMOV not supported\n",
6657 				      ahc_name(ahc));
6658 			fmt1_ins->opcode = AIC_OP_AND;
6659 			fmt1_ins->immediate = 0xff;
6660 		}
6661 		/* FALLTHROUGH */
6662 	case AIC_OP_ROL:
6663 		if ((ahc->features & AHC_ULTRA2) != 0) {
6664 			int i, count;
6665 
6666 			/* Calculate odd parity for the instruction */
6667 			for (i = 0, count = 0; i < 31; i++) {
6668 				uint32_t mask;
6669 
6670 				mask = 0x01 << i;
6671 				if ((instr.integer & mask) != 0)
6672 					count++;
6673 			}
6674 			if ((count & 0x01) == 0)
6675 				instr.format1.parity = 1;
6676 		} else {
6677 			/* Compress the instruction for older sequencers */
6678 			if (fmt3_ins != NULL) {
6679 				instr.integer =
6680 					fmt3_ins->immediate
6681 				      | (fmt3_ins->source << 8)
6682 				      | (fmt3_ins->address << 16)
6683 				      |	(fmt3_ins->opcode << 25);
6684 			} else {
6685 				instr.integer =
6686 					fmt1_ins->immediate
6687 				      | (fmt1_ins->source << 8)
6688 				      | (fmt1_ins->destination << 16)
6689 				      |	(fmt1_ins->ret << 24)
6690 				      |	(fmt1_ins->opcode << 25);
6691 			}
6692 		}
6693 		/* The sequencer is a little endian cpu */
6694 		instr.integer = aic_htole32(instr.integer);
6695 		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6696 		break;
6697 	default:
6698 		panic("Unknown opcode encountered in seq program");
6699 		break;
6700 	}
6701 }
6702 
6703 int
6704 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
6705 		   const char *name, u_int address, u_int value,
6706 		   u_int *cur_column, u_int wrap_point)
6707 {
6708 	int	printed;
6709 	u_int	printed_mask;
6710 	u_int	dummy_column;
6711 
6712 	if (cur_column == NULL) {
6713 		dummy_column = 0;
6714 		cur_column = &dummy_column;
6715 	}
6716 
6717 	if (*cur_column >= wrap_point) {
6718 		printf("\n");
6719 		*cur_column = 0;
6720 	}
6721 	printed = printf("%s[0x%x]", name, value);
6722 	if (table == NULL) {
6723 		printed += printf(" ");
6724 		*cur_column += printed;
6725 		return (printed);
6726 	}
6727 	printed_mask = 0;
6728 	while (printed_mask != 0xFF) {
6729 		int entry;
6730 
6731 		for (entry = 0; entry < num_entries; entry++) {
6732 			if (((value & table[entry].mask)
6733 			  != table[entry].value)
6734 			 || ((printed_mask & table[entry].mask)
6735 			  == table[entry].mask))
6736 				continue;
6737 
6738 			printed += printf("%s%s",
6739 					  printed_mask == 0 ? ":(" : "|",
6740 					  table[entry].name);
6741 			printed_mask |= table[entry].mask;
6742 
6743 			break;
6744 		}
6745 		if (entry >= num_entries)
6746 			break;
6747 	}
6748 	if (printed_mask != 0)
6749 		printed += printf(") ");
6750 	else
6751 		printed += printf(" ");
6752 	if (cur_column != NULL)
6753 		*cur_column += printed;
6754 	return (printed);
6755 }
6756 
6757 void
6758 ahc_dump_card_state(struct ahc_softc *ahc)
6759 {
6760 	struct	scb *scb;
6761 	struct	scb_tailq *untagged_q;
6762 	u_int	cur_col;
6763 	int	paused;
6764 	int	target;
6765 	int	maxtarget;
6766 	int	i;
6767 	uint8_t last_phase;
6768 	uint8_t qinpos;
6769 	uint8_t qintail;
6770 	uint8_t qoutpos;
6771 	uint8_t scb_index;
6772 	uint8_t saved_scbptr;
6773 
6774 	if (ahc_is_paused(ahc)) {
6775 		paused = 1;
6776 	} else {
6777 		paused = 0;
6778 		ahc_pause(ahc);
6779 	}
6780 
6781 	saved_scbptr = ahc_inb(ahc, SCBPTR);
6782 	last_phase = ahc_inb(ahc, LASTPHASE);
6783 	printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
6784 	       "%s: Dumping Card State %s, at SEQADDR 0x%x\n",
6785 	       ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
6786 	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6787 	if (paused)
6788 		printf("Card was paused\n");
6789 	printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
6790 	       ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
6791 	       ahc_inb(ahc, ARG_2));
6792 	printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
6793 	       ahc_inb(ahc, SCBPTR));
6794 	cur_col = 0;
6795 	if ((ahc->features & AHC_DT) != 0)
6796 		ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50);
6797 	ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50);
6798 	ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50);
6799 	ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50);
6800 	ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50);
6801 	ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50);
6802 	ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50);
6803 	ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50);
6804 	ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50);
6805 	ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50);
6806 	ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50);
6807 	ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50);
6808 	ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50);
6809 	ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50);
6810 	ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50);
6811 	ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50);
6812 	ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50);
6813 	ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
6814 	ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
6815 	if (cur_col != 0)
6816 		printf("\n");
6817 	printf("STACK:");
6818 	for (i = 0; i < STACK_SIZE; i++)
6819 	       printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
6820 	printf("\nSCB count = %d\n", ahc->scb_data->numscbs);
6821 	printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6822 	printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6823 	/* QINFIFO */
6824 	printf("QINFIFO entries: ");
6825 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6826 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
6827 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
6828 	} else
6829 		qinpos = ahc_inb(ahc, QINPOS);
6830 	qintail = ahc->qinfifonext;
6831 	while (qinpos != qintail) {
6832 		printf("%d ", ahc->qinfifo[qinpos]);
6833 		qinpos++;
6834 	}
6835 	printf("\n");
6836 
6837 	printf("Waiting Queue entries: ");
6838 	scb_index = ahc_inb(ahc, WAITING_SCBH);
6839 	i = 0;
6840 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6841 		ahc_outb(ahc, SCBPTR, scb_index);
6842 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6843 		scb_index = ahc_inb(ahc, SCB_NEXT);
6844 	}
6845 	printf("\n");
6846 
6847 	printf("Disconnected Queue entries: ");
6848 	scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6849 	i = 0;
6850 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6851 		ahc_outb(ahc, SCBPTR, scb_index);
6852 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6853 		scb_index = ahc_inb(ahc, SCB_NEXT);
6854 	}
6855 	printf("\n");
6856 
6857 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
6858 	printf("QOUTFIFO entries: ");
6859 	qoutpos = ahc->qoutfifonext;
6860 	i = 0;
6861 	while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6862 		printf("%d ", ahc->qoutfifo[qoutpos]);
6863 		qoutpos++;
6864 	}
6865 	printf("\n");
6866 
6867 	printf("Sequencer Free SCB List: ");
6868 	scb_index = ahc_inb(ahc, FREE_SCBH);
6869 	i = 0;
6870 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6871 		ahc_outb(ahc, SCBPTR, scb_index);
6872 		printf("%d ", scb_index);
6873 		scb_index = ahc_inb(ahc, SCB_NEXT);
6874 	}
6875 	printf("\n");
6876 
6877 	printf("Sequencer SCB Info: ");
6878 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
6879 		ahc_outb(ahc, SCBPTR, i);
6880 		cur_col = printf("\n%3d ", i);
6881 
6882 		ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
6883 		ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
6884 		ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
6885 		ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6886 	}
6887 	printf("\n");
6888 
6889 	printf("Pending list: ");
6890 	i = 0;
6891 	LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6892 		if (i++ > 256)
6893 			break;
6894 		cur_col = printf("\n%3d ", scb->hscb->tag);
6895 		ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
6896 		ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
6897 		ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
6898 		if ((ahc->flags & AHC_PAGESCBS) == 0) {
6899 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
6900 			printf("(");
6901 			ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
6902 					      &cur_col, 60);
6903 			ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6904 			printf(")");
6905 		}
6906 	}
6907 	printf("\n");
6908 
6909 	printf("Kernel Free SCB list: ");
6910 	i = 0;
6911 	SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6912 		if (i++ > 256)
6913 			break;
6914 		printf("%d ", scb->hscb->tag);
6915 	}
6916 	printf("\n");
6917 
6918 	maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6919 	for (target = 0; target <= maxtarget; target++) {
6920 		untagged_q = &ahc->untagged_queues[target];
6921 		if (TAILQ_FIRST(untagged_q) == NULL)
6922 			continue;
6923 		printf("Untagged Q(%d): ", target);
6924 		i = 0;
6925 		TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6926 			if (i++ > 256)
6927 				break;
6928 			printf("%d ", scb->hscb->tag);
6929 		}
6930 		printf("\n");
6931 	}
6932 
6933 	ahc_platform_dump_card_state(ahc);
6934 	printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
6935 	ahc_outb(ahc, SCBPTR, saved_scbptr);
6936 	if (paused == 0)
6937 		ahc_unpause(ahc);
6938 }
6939 
6940 /*************************** Timeout Handling *********************************/
6941 void
6942 ahc_timeout(struct scb *scb)
6943 {
6944 	struct ahc_softc *ahc;
6945 
6946 	ahc = scb->ahc_softc;
6947 	if ((scb->flags & SCB_ACTIVE) != 0) {
6948 		if ((scb->flags & SCB_TIMEDOUT) == 0) {
6949 			LIST_INSERT_HEAD(&ahc->timedout_scbs, scb,
6950 					 timedout_links);
6951 			scb->flags |= SCB_TIMEDOUT;
6952 		}
6953 		ahc_wakeup_recovery_thread(ahc);
6954 	}
6955 }
6956 
6957 /*
6958  * Re-schedule a timeout for the passed in SCB if we determine that some
6959  * other SCB is in the process of recovery or an SCB with a longer
6960  * timeout is still pending.  Limit our search to just "other_scb"
6961  * if it is non-NULL.
6962  */
6963 static int
6964 ahc_other_scb_timeout(struct ahc_softc *ahc, struct scb *scb,
6965 		      struct scb *other_scb)
6966 {
6967 	u_int	newtimeout;
6968 	int	found;
6969 
6970 	ahc_print_path(ahc, scb);
6971 	printf("Other SCB Timeout%s",
6972  	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
6973 	       ? " again\n" : "\n");
6974 
6975 	newtimeout = aic_get_timeout(scb);
6976 	scb->flags |= SCB_OTHERTCL_TIMEOUT;
6977 	found = 0;
6978 	if (other_scb != NULL) {
6979 		if ((other_scb->flags
6980 		   & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0
6981 		 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) {
6982 			found++;
6983 			newtimeout = MAX(aic_get_timeout(other_scb),
6984 					 newtimeout);
6985 		}
6986 	} else {
6987 		LIST_FOREACH(other_scb, &ahc->pending_scbs, pending_links) {
6988 			if ((other_scb->flags
6989 			   & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0
6990 			 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) {
6991 				found++;
6992 				newtimeout =
6993 				    MAX(aic_get_timeout(other_scb),
6994 					newtimeout);
6995 			}
6996 		}
6997 	}
6998 
6999 	if (found != 0)
7000 		aic_scb_timer_reset(scb, newtimeout);
7001 	else {
7002 		ahc_print_path(ahc, scb);
7003 		printf("No other SCB worth waiting for...\n");
7004 	}
7005 
7006 	return (found != 0);
7007 }
7008 
7009 /*
7010  * ahc_recover_commands determines if any of the commands that have currently
7011  * timedout are the root cause for this timeout.  Innocent commands are given
7012  * a new timeout while we wait for the command executing on the bus to timeout.
7013  * This routine is invoked from a thread context so we are allowed to sleep.
7014  * Our lock is not held on entry.
7015  */
7016 void
7017 ahc_recover_commands(struct ahc_softc *ahc)
7018 {
7019 	struct	scb *scb;
7020 	long	s;
7021 	int	found;
7022 	int	restart_needed;
7023 	u_int	last_phase;
7024 
7025 	ahc_lock(ahc, &s);
7026 
7027 	/*
7028 	 * Pause the controller and manually flush any
7029 	 * commands that have just completed but that our
7030 	 * interrupt handler has yet to see.
7031 	 */
7032 	ahc_pause_and_flushwork(ahc);
7033 
7034 	if (LIST_EMPTY(&ahc->timedout_scbs) != 0) {
7035 		/*
7036 		 * The timedout commands have already
7037 		 * completed.  This typically means
7038 		 * that either the timeout value was on
7039 		 * the hairy edge of what the device
7040 		 * requires or - more likely - interrupts
7041 		 * are not happening.
7042 		 */
7043 		printf("%s: Timedout SCBs already complete. "
7044 		       "Interrupts may not be functioning.\n", ahc_name(ahc));
7045 		ahc_unpause(ahc);
7046 		ahc_unlock(ahc, &s);
7047 		return;
7048 	}
7049 
7050 	restart_needed = 0;
7051 	printf("%s: Recovery Initiated\n", ahc_name(ahc));
7052 	ahc_dump_card_state(ahc);
7053 
7054 	last_phase = ahc_inb(ahc, LASTPHASE);
7055 	while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) {
7056 		u_int	active_scb_index;
7057 		u_int	saved_scbptr;
7058 		int	target;
7059 		int	lun;
7060 		int	i;
7061 		char	channel;
7062 
7063 		target = SCB_GET_TARGET(ahc, scb);
7064 		channel = SCB_GET_CHANNEL(ahc, scb);
7065 		lun = SCB_GET_LUN(scb);
7066 
7067 		ahc_print_path(ahc, scb);
7068 		printf("SCB 0x%x - timed out\n", scb->hscb->tag);
7069 		if (scb->sg_count > 0) {
7070 			for (i = 0; i < scb->sg_count; i++) {
7071 				printf("sg[%d] - Addr 0x%x : Length %d\n",
7072 				       i,
7073 				       scb->sg_list[i].addr,
7074 				       scb->sg_list[i].len & AHC_SG_LEN_MASK);
7075 			}
7076 		}
7077 		if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
7078 			/*
7079 			 * Been down this road before.
7080 			 * Do a full bus reset.
7081 			 */
7082 			aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
7083 bus_reset:
7084 			found = ahc_reset_channel(ahc, channel,
7085 						  /*Initiate Reset*/TRUE);
7086 			printf("%s: Issued Channel %c Bus Reset. "
7087 			       "%d SCBs aborted\n", ahc_name(ahc), channel,
7088 			       found);
7089 			continue;
7090 		}
7091 
7092 		/*
7093 		 * Remove the command from the timedout list in
7094 		 * preparation for requeing it.
7095 		 */
7096 		LIST_REMOVE(scb, timedout_links);
7097 		scb->flags &= ~SCB_TIMEDOUT;
7098 
7099 		/*
7100 		 * If we are a target, transition to bus free and report
7101 		 * the timeout.
7102 		 *
7103 		 * The target/initiator that is holding up the bus may not
7104 		 * be the same as the one that triggered this timeout
7105 		 * (different commands have different timeout lengths).
7106 		 * If the bus is idle and we are actiing as the initiator
7107 		 * for this request, queue a BDR message to the timed out
7108 		 * target.  Otherwise, if the timed out transaction is
7109 		 * active:
7110 		 *   Initiator transaction:
7111 		 *	Stuff the message buffer with a BDR message and assert
7112 		 *	ATN in the hopes that the target will let go of the bus
7113 		 *	and go to the mesgout phase.  If this fails, we'll
7114 		 *	get another timeout 2 seconds later which will attempt
7115 		 *	a bus reset.
7116 		 *
7117 		 *   Target transaction:
7118 		 *	Transition to BUS FREE and report the error.
7119 		 *	It's good to be the target!
7120 		 */
7121 		saved_scbptr = ahc_inb(ahc, SCBPTR);
7122 		active_scb_index = ahc_inb(ahc, SCB_TAG);
7123 
7124 		if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
7125 		  && (active_scb_index < ahc->scb_data->numscbs)) {
7126 			struct scb *active_scb;
7127 
7128 			/*
7129 			 * If the active SCB is not us, assume that
7130 			 * the active SCB has a longer timeout than
7131 			 * the timedout SCB, and wait for the active
7132 			 * SCB to timeout.
7133 			 */
7134 			active_scb = ahc_lookup_scb(ahc, active_scb_index);
7135 			if (active_scb != scb) {
7136 				if (ahc_other_scb_timeout(ahc, scb,
7137 							  active_scb) == 0)
7138 					goto bus_reset;
7139 				continue;
7140 			}
7141 
7142 			/* It's us */
7143 			if ((scb->flags & SCB_TARGET_SCB) != 0) {
7144 
7145 				/*
7146 				 * Send back any queued up transactions
7147 				 * and properly record the error condition.
7148 				 */
7149 				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
7150 					       SCB_GET_CHANNEL(ahc, scb),
7151 					       SCB_GET_LUN(scb),
7152 					       scb->hscb->tag,
7153 					       ROLE_TARGET,
7154 					       CAM_CMD_TIMEOUT);
7155 
7156 				/* Will clear us from the bus */
7157 				restart_needed = 1;
7158 				break;
7159 			}
7160 
7161 			ahc_set_recoveryscb(ahc, active_scb);
7162 			ahc_outb(ahc, MSG_OUT, HOST_MSG);
7163 			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
7164 			ahc_print_path(ahc, active_scb);
7165 			printf("BDR message in message buffer\n");
7166 			active_scb->flags |= SCB_DEVICE_RESET;
7167 			aic_scb_timer_reset(scb, 2 * 1000);
7168 		} else if (last_phase != P_BUSFREE
7169 			&& (ahc_inb(ahc, SSTAT1) & REQINIT) == 0) {
7170 			/*
7171 			 * SCB is not identified, there
7172 			 * is no pending REQ, and the sequencer
7173 			 * has not seen a busfree.  Looks like
7174 			 * a stuck connection waiting to
7175 			 * go busfree.  Reset the bus.
7176 			 */
7177 			printf("%s: Connection stuck awaiting busfree or "
7178 			       "Identify Msg.\n", ahc_name(ahc));
7179 			goto bus_reset;
7180 		} else {
7181 			int	 disconnected;
7182 
7183 			if (last_phase != P_BUSFREE
7184 			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
7185 				/* Hung target selection.  Goto busfree */
7186 				printf("%s: Hung target selection\n",
7187 				       ahc_name(ahc));
7188 				restart_needed = 1;
7189 				break;
7190 			}
7191 
7192 			/* XXX Shouldn't panic.  Just punt instead? */
7193 			if ((scb->flags & SCB_TARGET_SCB) != 0)
7194 				panic("Timed-out target SCB but bus idle");
7195 
7196 			if (ahc_search_qinfifo(ahc, target, channel, lun,
7197 					       scb->hscb->tag, ROLE_INITIATOR,
7198 					       /*status*/0, SEARCH_COUNT) > 0) {
7199 				disconnected = FALSE;
7200 			} else {
7201 				disconnected = TRUE;
7202 			}
7203 
7204 			if (disconnected) {
7205 
7206 				ahc_set_recoveryscb(ahc, scb);
7207 				/*
7208 				 * Actually re-queue this SCB in an attempt
7209 				 * to select the device before it reconnects.
7210 				 * In either case (selection or reselection),
7211 				 * we will now issue a target reset to the
7212 				 * timed-out device.
7213 				 *
7214 				 * Set the MK_MESSAGE control bit indicating
7215 				 * that we desire to send a message.  We
7216 				 * also set the disconnected flag since
7217 				 * in the paging case there is no guarantee
7218 				 * that our SCB control byte matches the
7219 				 * version on the card.  We don't want the
7220 				 * sequencer to abort the command thinking
7221 				 * an unsolicited reselection occurred.
7222 				 */
7223 				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
7224 				scb->flags |= SCB_DEVICE_RESET;
7225 
7226 				/*
7227 				 * Remove any cached copy of this SCB in the
7228 				 * disconnected list in preparation for the
7229 				 * queuing of our abort SCB.  We use the
7230 				 * same element in the SCB, SCB_NEXT, for
7231 				 * both the qinfifo and the disconnected list.
7232 				 */
7233 				ahc_search_disc_list(ahc, target, channel,
7234 						     lun, scb->hscb->tag,
7235 						     /*stop_on_first*/TRUE,
7236 						     /*remove*/TRUE,
7237 						     /*save_state*/FALSE);
7238 
7239 				/*
7240 				 * In the non-paging case, the sequencer will
7241 				 * never re-reference the in-core SCB.
7242 				 * To make sure we are notified during
7243 				 * reslection, set the MK_MESSAGE flag in
7244 				 * the card's copy of the SCB.
7245 				 */
7246 				if ((ahc->flags & AHC_PAGESCBS) == 0) {
7247 					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
7248 					ahc_outb(ahc, SCB_CONTROL,
7249 						 ahc_inb(ahc, SCB_CONTROL)
7250 						| MK_MESSAGE);
7251 				}
7252 
7253 				/*
7254 				 * Clear out any entries in the QINFIFO first
7255 				 * so we are the next SCB for this target
7256 				 * to run.
7257 				 */
7258 				ahc_search_qinfifo(ahc,
7259 						   SCB_GET_TARGET(ahc, scb),
7260 						   channel, SCB_GET_LUN(scb),
7261 						   SCB_LIST_NULL,
7262 						   ROLE_INITIATOR,
7263 						   CAM_REQUEUE_REQ,
7264 						   SEARCH_COMPLETE);
7265 				ahc_print_path(ahc, scb);
7266 				printf("Queuing a BDR SCB\n");
7267 				ahc_qinfifo_requeue_tail(ahc, scb);
7268 				ahc_outb(ahc, SCBPTR, saved_scbptr);
7269 				aic_scb_timer_reset(scb, 2 * 1000);
7270 			} else {
7271 				/* Go "immediatly" to the bus reset */
7272 				/* This shouldn't happen */
7273 				ahc_set_recoveryscb(ahc, scb);
7274 				ahc_print_path(ahc, scb);
7275 				printf("SCB %d: Immediate reset.  "
7276 					"Flags = 0x%x\n", scb->hscb->tag,
7277 					scb->flags);
7278 				goto bus_reset;
7279 			}
7280 		}
7281 		break;
7282 	}
7283 
7284 	/*
7285 	 * Any remaining SCBs were not the "culprit", so remove
7286 	 * them from the timeout list.  The timer for these commands
7287 	 * will be reset once the recovery SCB completes.
7288 	 */
7289 	while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) {
7290 
7291 		LIST_REMOVE(scb, timedout_links);
7292 		scb->flags &= ~SCB_TIMEDOUT;
7293 	}
7294 
7295 	if (restart_needed)
7296 		ahc_restart(ahc);
7297 	else
7298 		ahc_unpause(ahc);
7299 	ahc_unlock(ahc, &s);
7300 }
7301 
7302 /************************* Target Mode ****************************************/
7303 #ifdef AHC_TARGET_MODE
7304 cam_status
7305 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
7306 		    struct ahc_tmode_tstate **tstate,
7307 		    struct ahc_tmode_lstate **lstate,
7308 		    int notfound_failure)
7309 {
7310 
7311 	if ((ahc->features & AHC_TARGETMODE) == 0)
7312 		return (CAM_REQ_INVALID);
7313 
7314 	/*
7315 	 * Handle the 'black hole' device that sucks up
7316 	 * requests to unattached luns on enabled targets.
7317 	 */
7318 	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
7319 	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
7320 		*tstate = NULL;
7321 		*lstate = ahc->black_hole;
7322 	} else {
7323 		u_int max_id;
7324 
7325 		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
7326 		if (ccb->ccb_h.target_id > max_id)
7327 			return (CAM_TID_INVALID);
7328 
7329 		if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
7330 			return (CAM_LUN_INVALID);
7331 
7332 		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
7333 		*lstate = NULL;
7334 		if (*tstate != NULL)
7335 			*lstate =
7336 			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
7337 	}
7338 
7339 	if (notfound_failure != 0 && *lstate == NULL)
7340 		return (CAM_PATH_INVALID);
7341 
7342 	return (CAM_REQ_CMP);
7343 }
7344 
7345 void
7346 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
7347 {
7348 	struct	   ahc_tmode_tstate *tstate;
7349 	struct	   ahc_tmode_lstate *lstate;
7350 	struct	   ccb_en_lun *cel;
7351 	cam_status status;
7352 	u_long	   s;
7353 	u_int	   target;
7354 	u_int	   lun;
7355 	u_int	   target_mask;
7356 	u_int	   our_id;
7357 	int	   error;
7358 	char	   channel;
7359 
7360 	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
7361 				     /*notfound_failure*/FALSE);
7362 
7363 	if (status != CAM_REQ_CMP) {
7364 		ccb->ccb_h.status = status;
7365 		return;
7366 	}
7367 
7368 	if (cam_sim_bus(sim) == 0)
7369 		our_id = ahc->our_id;
7370 	else
7371 		our_id = ahc->our_id_b;
7372 
7373 	if (ccb->ccb_h.target_id != our_id) {
7374 		/*
7375 		 * our_id represents our initiator ID, or
7376 		 * the ID of the first target to have an
7377 		 * enabled lun in target mode.  There are
7378 		 * two cases that may preclude enabling a
7379 		 * target id other than our_id.
7380 		 *
7381 		 *   o our_id is for an active initiator role.
7382 		 *     Since the hardware does not support
7383 		 *     reselections to the initiator role at
7384 		 *     anything other than our_id, and our_id
7385 		 *     is used by the hardware to indicate the
7386 		 *     ID to use for both select-out and
7387 		 *     reselect-out operations, the only target
7388 		 *     ID we can support in this mode is our_id.
7389 		 *
7390 		 *   o The MULTARGID feature is not available and
7391 		 *     a previous target mode ID has been enabled.
7392 		 */
7393 		if ((ahc->features & AHC_MULTIROLE) != 0) {
7394 
7395 			if ((ahc->features & AHC_MULTI_TID) != 0
7396 		   	 && (ahc->flags & AHC_INITIATORROLE) != 0) {
7397 				/*
7398 				 * Only allow additional targets if
7399 				 * the initiator role is disabled.
7400 				 * The hardware cannot handle a re-select-in
7401 				 * on the initiator id during a re-select-out
7402 				 * on a different target id.
7403 				 */
7404 				status = CAM_TID_INVALID;
7405 			} else if ((ahc->flags & AHC_INITIATORROLE) != 0
7406 				|| ahc->enabled_luns > 0) {
7407 				/*
7408 				 * Only allow our target id to change
7409 				 * if the initiator role is not configured
7410 				 * and there are no enabled luns which
7411 				 * are attached to the currently registered
7412 				 * scsi id.
7413 				 */
7414 				status = CAM_TID_INVALID;
7415 			}
7416 		} else if ((ahc->features & AHC_MULTI_TID) == 0
7417 			&& ahc->enabled_luns > 0) {
7418 
7419 			status = CAM_TID_INVALID;
7420 		}
7421 	}
7422 
7423 	if (status != CAM_REQ_CMP) {
7424 		ccb->ccb_h.status = status;
7425 		return;
7426 	}
7427 
7428 	/*
7429 	 * We now have an id that is valid.
7430 	 * If we aren't in target mode, switch modes.
7431 	 */
7432 	if ((ahc->flags & AHC_TARGETROLE) == 0
7433 	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
7434 		u_long	 s;
7435 		ahc_flag saved_flags;
7436 
7437 		printf("Configuring Target Mode\n");
7438 		ahc_lock(ahc, &s);
7439 		if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
7440 			ccb->ccb_h.status = CAM_BUSY;
7441 			ahc_unlock(ahc, &s);
7442 			return;
7443 		}
7444 		saved_flags = ahc->flags;
7445 		ahc->flags |= AHC_TARGETROLE;
7446 		if ((ahc->features & AHC_MULTIROLE) == 0)
7447 			ahc->flags &= ~AHC_INITIATORROLE;
7448 		ahc_pause(ahc);
7449 		error = ahc_loadseq(ahc);
7450 		if (error != 0) {
7451 			/*
7452 			 * Restore original configuration and notify
7453 			 * the caller that we cannot support target mode.
7454 			 * Since the adapter started out in this
7455 			 * configuration, the firmware load will succeed,
7456 			 * so there is no point in checking ahc_loadseq's
7457 			 * return value.
7458 			 */
7459 			ahc->flags = saved_flags;
7460 			(void)ahc_loadseq(ahc);
7461 			ahc_restart(ahc);
7462 			ahc_unlock(ahc, &s);
7463 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
7464 			return;
7465 		}
7466 		ahc_restart(ahc);
7467 		ahc_unlock(ahc, &s);
7468 	}
7469 	cel = &ccb->cel;
7470 	target = ccb->ccb_h.target_id;
7471 	lun = ccb->ccb_h.target_lun;
7472 	channel = SIM_CHANNEL(ahc, sim);
7473 	target_mask = 0x01 << target;
7474 	if (channel == 'B')
7475 		target_mask <<= 8;
7476 
7477 	if (cel->enable != 0) {
7478 		u_int scsiseq;
7479 
7480 		/* Are we already enabled?? */
7481 		if (lstate != NULL) {
7482 			xpt_print_path(ccb->ccb_h.path);
7483 			printf("Lun already enabled\n");
7484 			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
7485 			return;
7486 		}
7487 
7488 		if (cel->grp6_len != 0
7489 		 || cel->grp7_len != 0) {
7490 			/*
7491 			 * Don't (yet?) support vendor
7492 			 * specific commands.
7493 			 */
7494 			ccb->ccb_h.status = CAM_REQ_INVALID;
7495 			printf("Non-zero Group Codes\n");
7496 			return;
7497 		}
7498 
7499 		/*
7500 		 * Seems to be okay.
7501 		 * Setup our data structures.
7502 		 */
7503 		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
7504 			tstate = ahc_alloc_tstate(ahc, target, channel);
7505 			if (tstate == NULL) {
7506 				xpt_print_path(ccb->ccb_h.path);
7507 				printf("Couldn't allocate tstate\n");
7508 				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7509 				return;
7510 			}
7511 		}
7512 		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
7513 		if (lstate == NULL) {
7514 			xpt_print_path(ccb->ccb_h.path);
7515 			printf("Couldn't allocate lstate\n");
7516 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7517 			return;
7518 		}
7519 		memset(lstate, 0, sizeof(*lstate));
7520 		status = xpt_create_path(&lstate->path, /*periph*/NULL,
7521 					 xpt_path_path_id(ccb->ccb_h.path),
7522 					 xpt_path_target_id(ccb->ccb_h.path),
7523 					 xpt_path_lun_id(ccb->ccb_h.path));
7524 		if (status != CAM_REQ_CMP) {
7525 			free(lstate, M_DEVBUF);
7526 			xpt_print_path(ccb->ccb_h.path);
7527 			printf("Couldn't allocate path\n");
7528 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7529 			return;
7530 		}
7531 		SLIST_INIT(&lstate->accept_tios);
7532 		SLIST_INIT(&lstate->immed_notifies);
7533 		ahc_lock(ahc, &s);
7534 		ahc_pause(ahc);
7535 		if (target != CAM_TARGET_WILDCARD) {
7536 			tstate->enabled_luns[lun] = lstate;
7537 			ahc->enabled_luns++;
7538 
7539 			if ((ahc->features & AHC_MULTI_TID) != 0) {
7540 				u_int targid_mask;
7541 
7542 				targid_mask = ahc_inb(ahc, TARGID)
7543 					    | (ahc_inb(ahc, TARGID + 1) << 8);
7544 
7545 				targid_mask |= target_mask;
7546 				ahc_outb(ahc, TARGID, targid_mask);
7547 				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
7548 
7549 				ahc_update_scsiid(ahc, targid_mask);
7550 			} else {
7551 				u_int our_id;
7552 				char  channel;
7553 
7554 				channel = SIM_CHANNEL(ahc, sim);
7555 				our_id = SIM_SCSI_ID(ahc, sim);
7556 
7557 				/*
7558 				 * This can only happen if selections
7559 				 * are not enabled
7560 				 */
7561 				if (target != our_id) {
7562 					u_int sblkctl;
7563 					char  cur_channel;
7564 					int   swap;
7565 
7566 					sblkctl = ahc_inb(ahc, SBLKCTL);
7567 					cur_channel = (sblkctl & SELBUSB)
7568 						    ? 'B' : 'A';
7569 					if ((ahc->features & AHC_TWIN) == 0)
7570 						cur_channel = 'A';
7571 					swap = cur_channel != channel;
7572 					if (channel == 'A')
7573 						ahc->our_id = target;
7574 					else
7575 						ahc->our_id_b = target;
7576 
7577 					if (swap)
7578 						ahc_outb(ahc, SBLKCTL,
7579 							 sblkctl ^ SELBUSB);
7580 
7581 					ahc_outb(ahc, SCSIID, target);
7582 
7583 					if (swap)
7584 						ahc_outb(ahc, SBLKCTL, sblkctl);
7585 				}
7586 			}
7587 		} else
7588 			ahc->black_hole = lstate;
7589 		/* Allow select-in operations */
7590 		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
7591 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7592 			scsiseq |= ENSELI;
7593 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7594 			scsiseq = ahc_inb(ahc, SCSISEQ);
7595 			scsiseq |= ENSELI;
7596 			ahc_outb(ahc, SCSISEQ, scsiseq);
7597 		}
7598 		ahc_unpause(ahc);
7599 		ahc_unlock(ahc, &s);
7600 		ccb->ccb_h.status = CAM_REQ_CMP;
7601 		xpt_print_path(ccb->ccb_h.path);
7602 		printf("Lun now enabled for target mode\n");
7603 	} else {
7604 		struct scb *scb;
7605 		int i, empty;
7606 
7607 		if (lstate == NULL) {
7608 			ccb->ccb_h.status = CAM_LUN_INVALID;
7609 			return;
7610 		}
7611 
7612 		ahc_lock(ahc, &s);
7613 
7614 		ccb->ccb_h.status = CAM_REQ_CMP;
7615 		LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
7616 			struct ccb_hdr *ccbh;
7617 
7618 			ccbh = &scb->io_ctx->ccb_h;
7619 			if (ccbh->func_code == XPT_CONT_TARGET_IO
7620 			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
7621 				printf("CTIO pending\n");
7622 				ccb->ccb_h.status = CAM_REQ_INVALID;
7623 				ahc_unlock(ahc, &s);
7624 				return;
7625 			}
7626 		}
7627 
7628 		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
7629 			printf("ATIOs pending\n");
7630 			ccb->ccb_h.status = CAM_REQ_INVALID;
7631 		}
7632 
7633 		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
7634 			printf("INOTs pending\n");
7635 			ccb->ccb_h.status = CAM_REQ_INVALID;
7636 		}
7637 
7638 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
7639 			ahc_unlock(ahc, &s);
7640 			return;
7641 		}
7642 
7643 		xpt_print_path(ccb->ccb_h.path);
7644 		printf("Target mode disabled\n");
7645 		xpt_free_path(lstate->path);
7646 		free(lstate, M_DEVBUF);
7647 
7648 		ahc_pause(ahc);
7649 		/* Can we clean up the target too? */
7650 		if (target != CAM_TARGET_WILDCARD) {
7651 			tstate->enabled_luns[lun] = NULL;
7652 			ahc->enabled_luns--;
7653 			for (empty = 1, i = 0; i < 8; i++)
7654 				if (tstate->enabled_luns[i] != NULL) {
7655 					empty = 0;
7656 					break;
7657 				}
7658 
7659 			if (empty) {
7660 				ahc_free_tstate(ahc, target, channel,
7661 						/*force*/FALSE);
7662 				if (ahc->features & AHC_MULTI_TID) {
7663 					u_int targid_mask;
7664 
7665 					targid_mask = ahc_inb(ahc, TARGID)
7666 						    | (ahc_inb(ahc, TARGID + 1)
7667 						       << 8);
7668 
7669 					targid_mask &= ~target_mask;
7670 					ahc_outb(ahc, TARGID, targid_mask);
7671 					ahc_outb(ahc, TARGID+1,
7672 					 	 (targid_mask >> 8));
7673 					ahc_update_scsiid(ahc, targid_mask);
7674 				}
7675 			}
7676 		} else {
7677 
7678 			ahc->black_hole = NULL;
7679 
7680 			/*
7681 			 * We can't allow selections without
7682 			 * our black hole device.
7683 			 */
7684 			empty = TRUE;
7685 		}
7686 		if (ahc->enabled_luns == 0) {
7687 			/* Disallow select-in */
7688 			u_int scsiseq;
7689 
7690 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7691 			scsiseq &= ~ENSELI;
7692 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7693 			scsiseq = ahc_inb(ahc, SCSISEQ);
7694 			scsiseq &= ~ENSELI;
7695 			ahc_outb(ahc, SCSISEQ, scsiseq);
7696 
7697 			if ((ahc->features & AHC_MULTIROLE) == 0) {
7698 				printf("Configuring Initiator Mode\n");
7699 				ahc->flags &= ~AHC_TARGETROLE;
7700 				ahc->flags |= AHC_INITIATORROLE;
7701 				/*
7702 				 * Returning to a configuration that
7703 				 * fit previously will always succeed.
7704 				 */
7705 				(void)ahc_loadseq(ahc);
7706 				ahc_restart(ahc);
7707 				/*
7708 				 * Unpaused.  The extra unpause
7709 				 * that follows is harmless.
7710 				 */
7711 			}
7712 		}
7713 		ahc_unpause(ahc);
7714 		ahc_unlock(ahc, &s);
7715 	}
7716 }
7717 
7718 static void
7719 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
7720 {
7721 	u_int scsiid_mask;
7722 	u_int scsiid;
7723 
7724 	if ((ahc->features & AHC_MULTI_TID) == 0)
7725 		panic("ahc_update_scsiid called on non-multitid unit\n");
7726 
7727 	/*
7728 	 * Since we will rely on the TARGID mask
7729 	 * for selection enables, ensure that OID
7730 	 * in SCSIID is not set to some other ID
7731 	 * that we don't want to allow selections on.
7732 	 */
7733 	if ((ahc->features & AHC_ULTRA2) != 0)
7734 		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
7735 	else
7736 		scsiid = ahc_inb(ahc, SCSIID);
7737 	scsiid_mask = 0x1 << (scsiid & OID);
7738 	if ((targid_mask & scsiid_mask) == 0) {
7739 		u_int our_id;
7740 
7741 		/* ffs counts from 1 */
7742 		our_id = ffs(targid_mask);
7743 		if (our_id == 0)
7744 			our_id = ahc->our_id;
7745 		else
7746 			our_id--;
7747 		scsiid &= TID;
7748 		scsiid |= our_id;
7749 	}
7750 	if ((ahc->features & AHC_ULTRA2) != 0)
7751 		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
7752 	else
7753 		ahc_outb(ahc, SCSIID, scsiid);
7754 }
7755 
7756 void
7757 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
7758 {
7759 	struct target_cmd *cmd;
7760 
7761 	/*
7762 	 * If the card supports auto-access pause,
7763 	 * we can access the card directly regardless
7764 	 * of whether it is paused or not.
7765 	 */
7766 	if ((ahc->features & AHC_AUTOPAUSE) != 0)
7767 		paused = TRUE;
7768 
7769 	ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
7770 	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
7771 
7772 		/*
7773 		 * Only advance through the queue if we
7774 		 * have the resources to process the command.
7775 		 */
7776 		if (ahc_handle_target_cmd(ahc, cmd) != 0)
7777 			break;
7778 
7779 		cmd->cmd_valid = 0;
7780 		aic_dmamap_sync(ahc, ahc->shared_data_dmat,
7781 				ahc->shared_data_dmamap,
7782 				ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
7783 				sizeof(struct target_cmd),
7784 				BUS_DMASYNC_PREREAD);
7785 		ahc->tqinfifonext++;
7786 
7787 		/*
7788 		 * Lazily update our position in the target mode incoming
7789 		 * command queue as seen by the sequencer.
7790 		 */
7791 		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
7792 			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
7793 				u_int hs_mailbox;
7794 
7795 				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
7796 				hs_mailbox &= ~HOST_TQINPOS;
7797 				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
7798 				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
7799 			} else {
7800 				if (!paused)
7801 					ahc_pause(ahc);
7802 				ahc_outb(ahc, KERNEL_TQINPOS,
7803 					 ahc->tqinfifonext & HOST_TQINPOS);
7804 				if (!paused)
7805 					ahc_unpause(ahc);
7806 			}
7807 		}
7808 	}
7809 }
7810 
7811 static int
7812 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7813 {
7814 	struct	  ahc_tmode_tstate *tstate;
7815 	struct	  ahc_tmode_lstate *lstate;
7816 	struct	  ccb_accept_tio *atio;
7817 	uint8_t *byte;
7818 	int	  initiator;
7819 	int	  target;
7820 	int	  lun;
7821 
7822 	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
7823 	target = SCSIID_OUR_ID(cmd->scsiid);
7824 	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
7825 
7826 	byte = cmd->bytes;
7827 	tstate = ahc->enabled_targets[target];
7828 	lstate = NULL;
7829 	if (tstate != NULL)
7830 		lstate = tstate->enabled_luns[lun];
7831 
7832 	/*
7833 	 * Commands for disabled luns go to the black hole driver.
7834 	 */
7835 	if (lstate == NULL)
7836 		lstate = ahc->black_hole;
7837 
7838 	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
7839 	if (atio == NULL) {
7840 		ahc->flags |= AHC_TQINFIFO_BLOCKED;
7841 		/*
7842 		 * Wait for more ATIOs from the peripheral driver for this lun.
7843 		 */
7844 		if (bootverbose)
7845 			printf("%s: ATIOs exhausted\n", ahc_name(ahc));
7846 		return (1);
7847 	} else
7848 		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
7849 #ifdef AHC_DEBUG
7850 	if (ahc_debug & AHC_SHOW_TQIN) {
7851 		printf("Incoming command from %d for %d:%d%s\n",
7852 		       initiator, target, lun,
7853 		       lstate == ahc->black_hole ? "(Black Holed)" : "");
7854 	}
7855 #endif
7856 	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
7857 
7858 	if (lstate == ahc->black_hole) {
7859 		/* Fill in the wildcards */
7860 		atio->ccb_h.target_id = target;
7861 		atio->ccb_h.target_lun = lun;
7862 	}
7863 
7864 	/*
7865 	 * Package it up and send it off to
7866 	 * whomever has this lun enabled.
7867 	 */
7868 	atio->sense_len = 0;
7869 	atio->init_id = initiator;
7870 	if (byte[0] != 0xFF) {
7871 		/* Tag was included */
7872 		atio->tag_action = *byte++;
7873 		atio->tag_id = *byte++;
7874 		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
7875 	} else {
7876 		atio->ccb_h.flags = 0;
7877 	}
7878 	byte++;
7879 
7880 	/* Okay.  Now determine the cdb size based on the command code */
7881 	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
7882 	case 0:
7883 		atio->cdb_len = 6;
7884 		break;
7885 	case 1:
7886 	case 2:
7887 		atio->cdb_len = 10;
7888 		break;
7889 	case 4:
7890 		atio->cdb_len = 16;
7891 		break;
7892 	case 5:
7893 		atio->cdb_len = 12;
7894 		break;
7895 	case 3:
7896 	default:
7897 		/* Only copy the opcode. */
7898 		atio->cdb_len = 1;
7899 		printf("Reserved or VU command code type encountered\n");
7900 		break;
7901 	}
7902 
7903 	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
7904 
7905 	atio->ccb_h.status |= CAM_CDB_RECVD;
7906 
7907 	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
7908 		/*
7909 		 * We weren't allowed to disconnect.
7910 		 * We're hanging on the bus until a
7911 		 * continue target I/O comes in response
7912 		 * to this accept tio.
7913 		 */
7914 #ifdef AHC_DEBUG
7915 		if (ahc_debug & AHC_SHOW_TQIN) {
7916 			printf("Received Immediate Command %d:%d:%d - %p\n",
7917 			       initiator, target, lun, ahc->pending_device);
7918 		}
7919 #endif
7920 		ahc->pending_device = lstate;
7921 		aic_freeze_ccb((union ccb *)atio);
7922 		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
7923 	}
7924 	xpt_done((union ccb*)atio);
7925 	return (0);
7926 }
7927 
7928 #endif
7929