xref: /freebsd/sys/dev/aic7xxx/aic7xxx.c (revision 4b2eaea43fec8e8792be611dea204071a10b655a)
1 /*
2  * Core routines and tables shareable across OS platforms.
3  *
4  * Copyright (c) 1994-2002 Justin T. Gibbs.
5  * Copyright (c) 2000-2002 Adaptec Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15  *    substantially similar to the "NO WARRANTY" disclaimer below
16  *    ("Disclaimer") and any redistribution must be conditioned upon
17  *    including a substantially similar Disclaimer requirement for further
18  *    binary redistribution.
19  * 3. Neither the names of the above-listed copyright holders nor the names
20  *    of any contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * Alternatively, this software may be distributed under the terms of the
24  * GNU General Public License ("GPL") version 2 as published by the Free
25  * Software Foundation.
26  *
27  * NO WARRANTY
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGES.
39  *
40  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#112 $
41  *
42  * $FreeBSD$
43  */
44 
45 #ifdef __linux__
46 #include "aic7xxx_osm.h"
47 #include "aic7xxx_inline.h"
48 #include "aicasm/aicasm_insformat.h"
49 #else
50 #include <dev/aic7xxx/aic7xxx_osm.h>
51 #include <dev/aic7xxx/aic7xxx_inline.h>
52 #include <dev/aic7xxx/aicasm/aicasm_insformat.h>
53 #endif
54 
55 /****************************** Softc Data ************************************/
56 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
57 
58 /***************************** Lookup Tables **********************************/
59 char *ahc_chip_names[] =
60 {
61 	"NONE",
62 	"aic7770",
63 	"aic7850",
64 	"aic7855",
65 	"aic7859",
66 	"aic7860",
67 	"aic7870",
68 	"aic7880",
69 	"aic7895",
70 	"aic7895C",
71 	"aic7890/91",
72 	"aic7896/97",
73 	"aic7892",
74 	"aic7899"
75 };
76 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
77 
78 /*
79  * Hardware error codes.
80  */
81 struct ahc_hard_error_entry {
82         uint8_t errno;
83 	char *errmesg;
84 };
85 
86 static struct ahc_hard_error_entry ahc_hard_errors[] = {
87 	{ ILLHADDR,	"Illegal Host Access" },
88 	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
89 	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
90 	{ SQPARERR,	"Sequencer Parity Error" },
91 	{ DPARERR,	"Data-path Parity Error" },
92 	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
93 	{ PCIERRSTAT,	"PCI Error detected" },
94 	{ CIOPARERR,	"CIOBUS Parity Error" },
95 };
96 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
97 
98 static struct ahc_phase_table_entry ahc_phase_table[] =
99 {
100 	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
101 	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
102 	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
103 	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
104 	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
105 	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
106 	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
107 	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
108 	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
109 	{ 0,		MSG_NOOP,		"in unknown phase"	}
110 };
111 
112 /*
113  * In most cases we only wish to itterate over real phases, so
114  * exclude the last element from the count.
115  */
116 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
117 
118 /*
119  * Valid SCSIRATE values.  (p. 3-17)
120  * Provides a mapping of tranfer periods in ns to the proper value to
121  * stick in the scsixfer reg.
122  */
123 static struct ahc_syncrate ahc_syncrates[] =
124 {
125       /* ultra2    fast/ultra  period     rate */
126 	{ 0x42,      0x000,      9,      "80.0" },
127 	{ 0x03,      0x000,     10,      "40.0" },
128 	{ 0x04,      0x000,     11,      "33.0" },
129 	{ 0x05,      0x100,     12,      "20.0" },
130 	{ 0x06,      0x110,     15,      "16.0" },
131 	{ 0x07,      0x120,     18,      "13.4" },
132 	{ 0x08,      0x000,     25,      "10.0" },
133 	{ 0x19,      0x010,     31,      "8.0"  },
134 	{ 0x1a,      0x020,     37,      "6.67" },
135 	{ 0x1b,      0x030,     43,      "5.7"  },
136 	{ 0x1c,      0x040,     50,      "5.0"  },
137 	{ 0x00,      0x050,     56,      "4.4"  },
138 	{ 0x00,      0x060,     62,      "4.0"  },
139 	{ 0x00,      0x070,     68,      "3.6"  },
140 	{ 0x00,      0x000,      0,      NULL   }
141 };
142 
143 /* Our Sequencer Program */
144 #include "aic7xxx_seq.h"
145 
146 /**************************** Function Declarations ***************************/
147 static void		ahc_force_renegotiation(struct ahc_softc *ahc);
148 static struct ahc_tmode_tstate*
149 			ahc_alloc_tstate(struct ahc_softc *ahc,
150 					 u_int scsi_id, char channel);
151 #ifdef AHC_TARGET_MODE
152 static void		ahc_free_tstate(struct ahc_softc *ahc,
153 					u_int scsi_id, char channel, int force);
154 #endif
155 static struct ahc_syncrate*
156 			ahc_devlimited_syncrate(struct ahc_softc *ahc,
157 					        struct ahc_initiator_tinfo *,
158 						u_int *period,
159 						u_int *ppr_options,
160 						role_t role);
161 static void		ahc_update_pending_scbs(struct ahc_softc *ahc);
162 static void		ahc_fetch_devinfo(struct ahc_softc *ahc,
163 					  struct ahc_devinfo *devinfo);
164 static void		ahc_scb_devinfo(struct ahc_softc *ahc,
165 					struct ahc_devinfo *devinfo,
166 					struct scb *scb);
167 static void		ahc_assert_atn(struct ahc_softc *ahc);
168 static void		ahc_setup_initiator_msgout(struct ahc_softc *ahc,
169 						   struct ahc_devinfo *devinfo,
170 						   struct scb *scb);
171 static void		ahc_build_transfer_msg(struct ahc_softc *ahc,
172 					       struct ahc_devinfo *devinfo);
173 static void		ahc_construct_sdtr(struct ahc_softc *ahc,
174 					   struct ahc_devinfo *devinfo,
175 					   u_int period, u_int offset);
176 static void		ahc_construct_wdtr(struct ahc_softc *ahc,
177 					   struct ahc_devinfo *devinfo,
178 					   u_int bus_width);
179 static void		ahc_construct_ppr(struct ahc_softc *ahc,
180 					  struct ahc_devinfo *devinfo,
181 					  u_int period, u_int offset,
182 					  u_int bus_width, u_int ppr_options);
183 static void		ahc_clear_msg_state(struct ahc_softc *ahc);
184 static void		ahc_handle_proto_violation(struct ahc_softc *ahc);
185 static void		ahc_handle_message_phase(struct ahc_softc *ahc);
186 typedef enum {
187 	AHCMSG_1B,
188 	AHCMSG_2B,
189 	AHCMSG_EXT
190 } ahc_msgtype;
191 static int		ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
192 				     u_int msgval, int full);
193 static int		ahc_parse_msg(struct ahc_softc *ahc,
194 				      struct ahc_devinfo *devinfo);
195 static int		ahc_handle_msg_reject(struct ahc_softc *ahc,
196 					      struct ahc_devinfo *devinfo);
197 static void		ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
198 						struct ahc_devinfo *devinfo);
199 static void		ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
200 static void		ahc_handle_devreset(struct ahc_softc *ahc,
201 					    struct ahc_devinfo *devinfo,
202 					    cam_status status, char *message,
203 					    int verbose_level);
204 #if AHC_TARGET_MODE
205 static void		ahc_setup_target_msgin(struct ahc_softc *ahc,
206 					       struct ahc_devinfo *devinfo,
207 					       struct scb *scb);
208 #endif
209 
210 static bus_dmamap_callback_t	ahc_dmamap_cb;
211 static void			ahc_build_free_scb_list(struct ahc_softc *ahc);
212 static int			ahc_init_scbdata(struct ahc_softc *ahc);
213 static void			ahc_fini_scbdata(struct ahc_softc *ahc);
214 static void		ahc_qinfifo_requeue(struct ahc_softc *ahc,
215 					    struct scb *prev_scb,
216 					    struct scb *scb);
217 static int		ahc_qinfifo_count(struct ahc_softc *ahc);
218 static u_int		ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
219 						   u_int prev, u_int scbptr);
220 static void		ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
221 static u_int		ahc_rem_wscb(struct ahc_softc *ahc,
222 				     u_int scbpos, u_int prev);
223 static void		ahc_reset_current_bus(struct ahc_softc *ahc);
224 #ifdef AHC_DUMP_SEQ
225 static void		ahc_dumpseq(struct ahc_softc *ahc);
226 #endif
227 static void		ahc_loadseq(struct ahc_softc *ahc);
228 static int		ahc_check_patch(struct ahc_softc *ahc,
229 					struct patch **start_patch,
230 					u_int start_instr, u_int *skip_addr);
231 static void		ahc_download_instr(struct ahc_softc *ahc,
232 					   u_int instrptr, uint8_t *dconsts);
233 #ifdef AHC_TARGET_MODE
234 static void		ahc_queue_lstate_event(struct ahc_softc *ahc,
235 					       struct ahc_tmode_lstate *lstate,
236 					       u_int initiator_id,
237 					       u_int event_type,
238 					       u_int event_arg);
239 static void		ahc_update_scsiid(struct ahc_softc *ahc,
240 					  u_int targid_mask);
241 static int		ahc_handle_target_cmd(struct ahc_softc *ahc,
242 					      struct target_cmd *cmd);
243 #endif
244 /************************* Sequencer Execution Control ************************/
245 /*
246  * Restart the sequencer program from address zero
247  */
248 void
249 ahc_restart(struct ahc_softc *ahc)
250 {
251 
252 	ahc_pause(ahc);
253 
254 	/* No more pending messages. */
255 	ahc_clear_msg_state(ahc);
256 
257 	ahc_outb(ahc, SCSISIGO, 0);		/* De-assert BSY */
258 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);	/* No message to send */
259 	ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
260 	ahc_outb(ahc, LASTPHASE, P_BUSFREE);
261 	ahc_outb(ahc, SAVED_SCSIID, 0xFF);
262 	ahc_outb(ahc, SAVED_LUN, 0xFF);
263 
264 	/*
265 	 * Ensure that the sequencer's idea of TQINPOS
266 	 * matches our own.  The sequencer increments TQINPOS
267 	 * only after it sees a DMA complete and a reset could
268 	 * occur before the increment leaving the kernel to believe
269 	 * the command arrived but the sequencer to not.
270 	 */
271 	ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
272 
273 	/* Always allow reselection */
274 	ahc_outb(ahc, SCSISEQ,
275 		 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
276 	if ((ahc->features & AHC_CMD_CHAN) != 0) {
277 		/* Ensure that no DMA operations are in progress */
278 		ahc_outb(ahc, CCSCBCNT, 0);
279 		ahc_outb(ahc, CCSGCTL, 0);
280 		ahc_outb(ahc, CCSCBCTL, 0);
281 	}
282 	/*
283 	 * If we were in the process of DMA'ing SCB data into
284 	 * an SCB, replace that SCB on the free list.  This prevents
285 	 * an SCB leak.
286 	 */
287 	if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
288 		ahc_add_curscb_to_free_list(ahc);
289 		ahc_outb(ahc, SEQ_FLAGS2,
290 			 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
291 	}
292 	ahc_outb(ahc, MWI_RESIDUAL, 0);
293 	ahc_outb(ahc, SEQCTL, FASTMODE);
294 	ahc_outb(ahc, SEQADDR0, 0);
295 	ahc_outb(ahc, SEQADDR1, 0);
296 	ahc_unpause(ahc);
297 }
298 
299 /************************* Input/Output Queues ********************************/
300 void
301 ahc_run_qoutfifo(struct ahc_softc *ahc)
302 {
303 	struct scb *scb;
304 	u_int  scb_index;
305 
306 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
307 	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
308 
309 		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
310 		if ((ahc->qoutfifonext & 0x03) == 0x03) {
311 			u_int modnext;
312 
313 			/*
314 			 * Clear 32bits of QOUTFIFO at a time
315 			 * so that we don't clobber an incoming
316 			 * byte DMA to the array on architectures
317 			 * that only support 32bit load and store
318 			 * operations.
319 			 */
320 			modnext = ahc->qoutfifonext & ~0x3;
321 			*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
322 			ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
323 					ahc->shared_data_dmamap,
324 					/*offset*/modnext, /*len*/4,
325 					BUS_DMASYNC_PREREAD);
326 		}
327 		ahc->qoutfifonext++;
328 
329 		scb = ahc_lookup_scb(ahc, scb_index);
330 		if (scb == NULL) {
331 			printf("%s: WARNING no command for scb %d "
332 			       "(cmdcmplt)\nQOUTPOS = %d\n",
333 			       ahc_name(ahc), scb_index,
334 			       (ahc->qoutfifonext - 1) & 0xFF);
335 			continue;
336 		}
337 
338 		/*
339 		 * Save off the residual
340 		 * if there is one.
341 		 */
342 		ahc_update_residual(ahc, scb);
343 		ahc_done(ahc, scb);
344 	}
345 }
346 
347 void
348 ahc_run_untagged_queues(struct ahc_softc *ahc)
349 {
350 	int i;
351 
352 	for (i = 0; i < 16; i++)
353 		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
354 }
355 
356 void
357 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
358 {
359 	struct scb *scb;
360 
361 	if (ahc->untagged_queue_lock != 0)
362 		return;
363 
364 	if ((scb = TAILQ_FIRST(queue)) != NULL
365 	 && (scb->flags & SCB_ACTIVE) == 0) {
366 		scb->flags |= SCB_ACTIVE;
367 		ahc_queue_scb(ahc, scb);
368 	}
369 }
370 
371 /************************* Interrupt Handling *********************************/
372 void
373 ahc_handle_brkadrint(struct ahc_softc *ahc)
374 {
375 	/*
376 	 * We upset the sequencer :-(
377 	 * Lookup the error message
378 	 */
379 	int i;
380 	int error;
381 
382 	error = ahc_inb(ahc, ERROR);
383 	for (i = 0; error != 1 && i < num_errors; i++)
384 		error >>= 1;
385 	printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
386 	       ahc_name(ahc), ahc_hard_errors[i].errmesg,
387 	       ahc_inb(ahc, SEQADDR0) |
388 	       (ahc_inb(ahc, SEQADDR1) << 8));
389 
390 	ahc_dump_card_state(ahc);
391 
392 	/* Tell everyone that this HBA is no longer availible */
393 	ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
394 		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
395 		       CAM_NO_HBA);
396 
397 	/* Disable all interrupt sources by resetting the controller */
398 	ahc_shutdown(ahc);
399 }
400 
401 void
402 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
403 {
404 	struct scb *scb;
405 	struct ahc_devinfo devinfo;
406 
407 	ahc_fetch_devinfo(ahc, &devinfo);
408 
409 	/*
410 	 * Clear the upper byte that holds SEQINT status
411 	 * codes and clear the SEQINT bit. We will unpause
412 	 * the sequencer, if appropriate, after servicing
413 	 * the request.
414 	 */
415 	ahc_outb(ahc, CLRINT, CLRSEQINT);
416 	switch (intstat & SEQINT_MASK) {
417 	case BAD_STATUS:
418 	{
419 		u_int  scb_index;
420 		struct hardware_scb *hscb;
421 
422 		/*
423 		 * Set the default return value to 0 (don't
424 		 * send sense).  The sense code will change
425 		 * this if needed.
426 		 */
427 		ahc_outb(ahc, RETURN_1, 0);
428 
429 		/*
430 		 * The sequencer will notify us when a command
431 		 * has an error that would be of interest to
432 		 * the kernel.  This allows us to leave the sequencer
433 		 * running in the common case of command completes
434 		 * without error.  The sequencer will already have
435 		 * dma'd the SCB back up to us, so we can reference
436 		 * the in kernel copy directly.
437 		 */
438 		scb_index = ahc_inb(ahc, SCB_TAG);
439 		scb = ahc_lookup_scb(ahc, scb_index);
440 		if (scb == NULL) {
441 			ahc_print_devinfo(ahc, &devinfo);
442 			printf("ahc_intr - referenced scb "
443 			       "not valid during seqint 0x%x scb(%d)\n",
444 			       intstat, scb_index);
445 			ahc_dump_card_state(ahc);
446 			panic("for safety");
447 			goto unpause;
448 		}
449 
450 		hscb = scb->hscb;
451 
452 		/* Don't want to clobber the original sense code */
453 		if ((scb->flags & SCB_SENSE) != 0) {
454 			/*
455 			 * Clear the SCB_SENSE Flag and have
456 			 * the sequencer do a normal command
457 			 * complete.
458 			 */
459 			scb->flags &= ~SCB_SENSE;
460 			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
461 			break;
462 		}
463 		ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
464 		/* Freeze the queue until the client sees the error. */
465 		ahc_freeze_devq(ahc, scb);
466 		ahc_freeze_scb(scb);
467 		ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
468 		switch (hscb->shared_data.status.scsi_status) {
469 		case SCSI_STATUS_OK:
470 			printf("%s: Interrupted for staus of 0???\n",
471 			       ahc_name(ahc));
472 			break;
473 		case SCSI_STATUS_CMD_TERMINATED:
474 		case SCSI_STATUS_CHECK_COND:
475 		{
476 			struct ahc_dma_seg *sg;
477 			struct scsi_sense *sc;
478 			struct ahc_initiator_tinfo *targ_info;
479 			struct ahc_tmode_tstate *tstate;
480 			struct ahc_transinfo *tinfo;
481 #ifdef AHC_DEBUG
482 			if (ahc_debug & AHC_SHOW_SENSE) {
483 				ahc_print_path(ahc, scb);
484 				printf("SCB %d: requests Check Status\n",
485 				       scb->hscb->tag);
486 			}
487 #endif
488 
489 			if (ahc_perform_autosense(scb) == 0)
490 				break;
491 
492 			targ_info = ahc_fetch_transinfo(ahc,
493 							devinfo.channel,
494 							devinfo.our_scsiid,
495 							devinfo.target,
496 							&tstate);
497 			tinfo = &targ_info->curr;
498 			sg = scb->sg_list;
499 			sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
500 			/*
501 			 * Save off the residual if there is one.
502 			 */
503 			ahc_update_residual(ahc, scb);
504 #ifdef AHC_DEBUG
505 			if (ahc_debug & AHC_SHOW_SENSE) {
506 				ahc_print_path(ahc, scb);
507 				printf("Sending Sense\n");
508 			}
509 #endif
510 			sg->addr = ahc_get_sense_bufaddr(ahc, scb);
511 			sg->len = ahc_get_sense_bufsize(ahc, scb);
512 			sg->len |= AHC_DMA_LAST_SEG;
513 
514 			/* Fixup byte order */
515 			sg->addr = ahc_htole32(sg->addr);
516 			sg->len = ahc_htole32(sg->len);
517 
518 			sc->opcode = REQUEST_SENSE;
519 			sc->byte2 = 0;
520 			if (tinfo->protocol_version <= SCSI_REV_2
521 			 && SCB_GET_LUN(scb) < 8)
522 				sc->byte2 = SCB_GET_LUN(scb) << 5;
523 			sc->unused[0] = 0;
524 			sc->unused[1] = 0;
525 			sc->length = sg->len;
526 			sc->control = 0;
527 
528 			/*
529 			 * We can't allow the target to disconnect.
530 			 * This will be an untagged transaction and
531 			 * having the target disconnect will make this
532 			 * transaction indestinguishable from outstanding
533 			 * tagged transactions.
534 			 */
535 			hscb->control = 0;
536 
537 			/*
538 			 * This request sense could be because the
539 			 * the device lost power or in some other
540 			 * way has lost our transfer negotiations.
541 			 * Renegotiate if appropriate.  Unit attention
542 			 * errors will be reported before any data
543 			 * phases occur.
544 			 */
545 			if (ahc_get_residual(scb)
546 			 == ahc_get_transfer_length(scb)) {
547 				ahc_update_neg_request(ahc, &devinfo,
548 						       tstate, targ_info,
549 						       AHC_NEG_IF_NON_ASYNC);
550 			}
551 			if (tstate->auto_negotiate & devinfo.target_mask) {
552 				hscb->control |= MK_MESSAGE;
553 				scb->flags &= ~SCB_NEGOTIATE;
554 				scb->flags |= SCB_AUTO_NEGOTIATE;
555 			}
556 			hscb->cdb_len = sizeof(*sc);
557 			hscb->dataptr = sg->addr;
558 			hscb->datacnt = sg->len;
559 			hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
560 			hscb->sgptr = ahc_htole32(hscb->sgptr);
561 			scb->sg_count = 1;
562 			scb->flags |= SCB_SENSE;
563 			ahc_qinfifo_requeue_tail(ahc, scb);
564 			ahc_outb(ahc, RETURN_1, SEND_SENSE);
565 			/*
566 			 * Ensure we have enough time to actually
567 			 * retrieve the sense.
568 			 */
569 			ahc_scb_timer_reset(scb, 5 * 1000000);
570 			break;
571 		}
572 		default:
573 			break;
574 		}
575 		break;
576 	}
577 	case NO_MATCH:
578 	{
579 		/* Ensure we don't leave the selection hardware on */
580 		ahc_outb(ahc, SCSISEQ,
581 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
582 
583 		printf("%s:%c:%d: no active SCB for reconnecting "
584 		       "target - issuing BUS DEVICE RESET\n",
585 		       ahc_name(ahc), devinfo.channel, devinfo.target);
586 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
587 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
588 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
589 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
590 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
591 		       "SINDEX == 0x%x\n",
592 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
593 		       ahc_index_busy_tcl(ahc,
594 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
595 				      ahc_inb(ahc, SAVED_LUN))),
596 		       ahc_inb(ahc, SINDEX));
597 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
598 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
599 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
600 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
601 		       ahc_inb(ahc, SCB_CONTROL));
602 		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
603 		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
604 		printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
605 		printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
606 		ahc_dump_card_state(ahc);
607 		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
608 		ahc->msgout_len = 1;
609 		ahc->msgout_index = 0;
610 		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
611 		ahc_outb(ahc, MSG_OUT, HOST_MSG);
612 		ahc_assert_atn(ahc);
613 		break;
614 	}
615 	case SEND_REJECT:
616 	{
617 		u_int rejbyte = ahc_inb(ahc, ACCUM);
618 		printf("%s:%c:%d: Warning - unknown message received from "
619 		       "target (0x%x).  Rejecting\n",
620 		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
621 		break;
622 	}
623 	case PROTO_VIOLATION:
624 	{
625 		ahc_handle_proto_violation(ahc);
626 		break;
627 	}
628 	case IGN_WIDE_RES:
629 		ahc_handle_ign_wide_residue(ahc, &devinfo);
630 		break;
631 	case PDATA_REINIT:
632 		ahc_reinitialize_dataptrs(ahc);
633 		break;
634 	case BAD_PHASE:
635 	{
636 		u_int lastphase;
637 
638 		lastphase = ahc_inb(ahc, LASTPHASE);
639 		printf("%s:%c:%d: unknown scsi bus phase %x, "
640 		       "lastphase = 0x%x.  Attempting to continue\n",
641 		       ahc_name(ahc), devinfo.channel, devinfo.target,
642 		       lastphase, ahc_inb(ahc, SCSISIGI));
643 		break;
644 	}
645 	case MISSED_BUSFREE:
646 	{
647 		u_int lastphase;
648 
649 		lastphase = ahc_inb(ahc, LASTPHASE);
650 		printf("%s:%c:%d: Missed busfree. "
651 		       "Lastphase = 0x%x, Curphase = 0x%x\n",
652 		       ahc_name(ahc), devinfo.channel, devinfo.target,
653 		       lastphase, ahc_inb(ahc, SCSISIGI));
654 		ahc_restart(ahc);
655 		return;
656 	}
657 	case HOST_MSG_LOOP:
658 	{
659 		/*
660 		 * The sequencer has encountered a message phase
661 		 * that requires host assistance for completion.
662 		 * While handling the message phase(s), we will be
663 		 * notified by the sequencer after each byte is
664 		 * transfered so we can track bus phase changes.
665 		 *
666 		 * If this is the first time we've seen a HOST_MSG_LOOP
667 		 * interrupt, initialize the state of the host message
668 		 * loop.
669 		 */
670 		if (ahc->msg_type == MSG_TYPE_NONE) {
671 			struct scb *scb;
672 			u_int scb_index;
673 			u_int bus_phase;
674 
675 			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
676 			if (bus_phase != P_MESGIN
677 			 && bus_phase != P_MESGOUT) {
678 				printf("ahc_intr: HOST_MSG_LOOP bad "
679 				       "phase 0x%x\n",
680 				      bus_phase);
681 				/*
682 				 * Probably transitioned to bus free before
683 				 * we got here.  Just punt the message.
684 				 */
685 				ahc_clear_intstat(ahc);
686 				ahc_restart(ahc);
687 				return;
688 			}
689 
690 			scb_index = ahc_inb(ahc, SCB_TAG);
691 			scb = ahc_lookup_scb(ahc, scb_index);
692 			if (devinfo.role == ROLE_INITIATOR) {
693 				if (scb == NULL)
694 					panic("HOST_MSG_LOOP with "
695 					      "invalid SCB %x\n", scb_index);
696 
697 				if (bus_phase == P_MESGOUT)
698 					ahc_setup_initiator_msgout(ahc,
699 								   &devinfo,
700 								   scb);
701 				else {
702 					ahc->msg_type =
703 					    MSG_TYPE_INITIATOR_MSGIN;
704 					ahc->msgin_index = 0;
705 				}
706 			}
707 #if AHC_TARGET_MODE
708 			else {
709 				if (bus_phase == P_MESGOUT) {
710 					ahc->msg_type =
711 					    MSG_TYPE_TARGET_MSGOUT;
712 					ahc->msgin_index = 0;
713 				}
714 				else
715 					ahc_setup_target_msgin(ahc,
716 							       &devinfo,
717 							       scb);
718 			}
719 #endif
720 		}
721 
722 		ahc_handle_message_phase(ahc);
723 		break;
724 	}
725 	case PERR_DETECTED:
726 	{
727 		/*
728 		 * If we've cleared the parity error interrupt
729 		 * but the sequencer still believes that SCSIPERR
730 		 * is true, it must be that the parity error is
731 		 * for the currently presented byte on the bus,
732 		 * and we are not in a phase (data-in) where we will
733 		 * eventually ack this byte.  Ack the byte and
734 		 * throw it away in the hope that the target will
735 		 * take us to message out to deliver the appropriate
736 		 * error message.
737 		 */
738 		if ((intstat & SCSIINT) == 0
739 		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
740 
741 			if ((ahc->features & AHC_DT) == 0) {
742 				u_int curphase;
743 
744 				/*
745 				 * The hardware will only let you ack bytes
746 				 * if the expected phase in SCSISIGO matches
747 				 * the current phase.  Make sure this is
748 				 * currently the case.
749 				 */
750 				curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
751 				ahc_outb(ahc, LASTPHASE, curphase);
752 				ahc_outb(ahc, SCSISIGO, curphase);
753 			}
754 			if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) {
755 				int wait;
756 
757 				/*
758 				 * In a data phase.  Faster to bitbucket
759 				 * the data than to individually ack each
760 				 * byte.  This is also the only strategy
761 				 * that will work with AUTOACK enabled.
762 				 */
763 				ahc_outb(ahc, SXFRCTL1,
764 					 ahc_inb(ahc, SXFRCTL1) | BITBUCKET);
765 				wait = 5000;
766 				while (--wait != 0) {
767 					if ((ahc_inb(ahc, SCSISIGI)
768 					  & (CDI|MSGI)) != 0)
769 						break;
770 					ahc_delay(100);
771 				}
772 				ahc_outb(ahc, SXFRCTL1,
773 					 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
774 				if (wait == 0) {
775 					struct	scb *scb;
776 					u_int	scb_index;
777 
778 					ahc_print_devinfo(ahc, &devinfo);
779 					printf("Unable to clear parity error.  "
780 					       "Resetting bus.\n");
781 					scb_index = ahc_inb(ahc, SCB_TAG);
782 					scb = ahc_lookup_scb(ahc, scb_index);
783 					if (scb != NULL)
784 						ahc_set_transaction_status(scb,
785 						    CAM_UNCOR_PARITY);
786 					ahc_reset_channel(ahc, devinfo.channel,
787 							  /*init reset*/TRUE);
788 				}
789 			} else {
790 				ahc_inb(ahc, SCSIDATL);
791 			}
792 		}
793 		break;
794 	}
795 	case DATA_OVERRUN:
796 	{
797 		/*
798 		 * When the sequencer detects an overrun, it
799 		 * places the controller in "BITBUCKET" mode
800 		 * and allows the target to complete its transfer.
801 		 * Unfortunately, none of the counters get updated
802 		 * when the controller is in this mode, so we have
803 		 * no way of knowing how large the overrun was.
804 		 */
805 		u_int scbindex = ahc_inb(ahc, SCB_TAG);
806 		u_int lastphase = ahc_inb(ahc, LASTPHASE);
807 		u_int i;
808 
809 		scb = ahc_lookup_scb(ahc, scbindex);
810 		for (i = 0; i < num_phases; i++) {
811 			if (lastphase == ahc_phase_table[i].phase)
812 				break;
813 		}
814 		ahc_print_path(ahc, scb);
815 		printf("data overrun detected %s."
816 		       "  Tag == 0x%x.\n",
817 		       ahc_phase_table[i].phasemsg,
818   		       scb->hscb->tag);
819 		ahc_print_path(ahc, scb);
820 		printf("%s seen Data Phase.  Length = %ld.  NumSGs = %d.\n",
821 		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
822 		       ahc_get_transfer_length(scb), scb->sg_count);
823 		if (scb->sg_count > 0) {
824 			for (i = 0; i < scb->sg_count; i++) {
825 
826 				printf("sg[%d] - Addr 0x%x%x : Length %d\n",
827 				       i,
828 				       (ahc_le32toh(scb->sg_list[i].len) >> 24
829 				        & SG_HIGH_ADDR_BITS),
830 				       ahc_le32toh(scb->sg_list[i].addr),
831 				       ahc_le32toh(scb->sg_list[i].len)
832 				       & AHC_SG_LEN_MASK);
833 			}
834 		}
835 		/*
836 		 * Set this and it will take effect when the
837 		 * target does a command complete.
838 		 */
839 		ahc_freeze_devq(ahc, scb);
840 		if ((scb->flags & SCB_SENSE) == 0) {
841 			ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
842 		} else {
843 			scb->flags &= ~SCB_SENSE;
844 			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
845 		}
846 		ahc_freeze_scb(scb);
847 
848 		if ((ahc->features & AHC_ULTRA2) != 0) {
849 			/*
850 			 * Clear the channel in case we return
851 			 * to data phase later.
852 			 */
853 			ahc_outb(ahc, SXFRCTL0,
854 				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
855 			ahc_outb(ahc, SXFRCTL0,
856 				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
857 		}
858 		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
859 			u_int dscommand1;
860 
861 			/* Ensure HHADDR is 0 for future DMA operations. */
862 			dscommand1 = ahc_inb(ahc, DSCOMMAND1);
863 			ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
864 			ahc_outb(ahc, HADDR, 0);
865 			ahc_outb(ahc, DSCOMMAND1, dscommand1);
866 		}
867 		break;
868 	}
869 	case MKMSG_FAILED:
870 	{
871 		u_int scbindex;
872 
873 		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
874 		       ahc_name(ahc), devinfo.channel, devinfo.target,
875 		       devinfo.lun);
876 		scbindex = ahc_inb(ahc, SCB_TAG);
877 		scb = ahc_lookup_scb(ahc, scbindex);
878 		if (scb != NULL
879 		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
880 			/*
881 			 * Ensure that we didn't put a second instance of this
882 			 * SCB into the QINFIFO.
883 			 */
884 			ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
885 					   SCB_GET_CHANNEL(ahc, scb),
886 					   SCB_GET_LUN(scb), scb->hscb->tag,
887 					   ROLE_INITIATOR, /*status*/0,
888 					   SEARCH_REMOVE);
889 		break;
890 	}
891 	case NO_FREE_SCB:
892 	{
893 		printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
894 		ahc_dump_card_state(ahc);
895 		panic("for safety");
896 		break;
897 	}
898 	case SCB_MISMATCH:
899 	{
900 		u_int scbptr;
901 
902 		scbptr = ahc_inb(ahc, SCBPTR);
903 		printf("Bogus TAG after DMA.  SCBPTR %d, tag %d, our tag %d\n",
904 		       scbptr, ahc_inb(ahc, ARG_1),
905 		       ahc->scb_data->hscbs[scbptr].tag);
906 		ahc_dump_card_state(ahc);
907 		panic("for saftey");
908 		break;
909 	}
910 	case OUT_OF_RANGE:
911 	{
912 		printf("%s: BTT calculation out of range\n", ahc_name(ahc));
913 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
914 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
915 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
916 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
917 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
918 		       "SINDEX == 0x%x\n, A == 0x%x\n",
919 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
920 		       ahc_index_busy_tcl(ahc,
921 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
922 				      ahc_inb(ahc, SAVED_LUN))),
923 		       ahc_inb(ahc, SINDEX),
924 		       ahc_inb(ahc, ACCUM));
925 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
926 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
927 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
928 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
929 		       ahc_inb(ahc, SCB_CONTROL));
930 		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
931 		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
932 		ahc_dump_card_state(ahc);
933 		panic("for safety");
934 		break;
935 	}
936 	default:
937 		printf("ahc_intr: seqint, "
938 		       "intstat == 0x%x, scsisigi = 0x%x\n",
939 		       intstat, ahc_inb(ahc, SCSISIGI));
940 		break;
941 	}
942 unpause:
943 	/*
944 	 *  The sequencer is paused immediately on
945 	 *  a SEQINT, so we should restart it when
946 	 *  we're done.
947 	 */
948 	ahc_unpause(ahc);
949 }
950 
951 void
952 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
953 {
954 	u_int	scb_index;
955 	u_int	status0;
956 	u_int	status;
957 	struct	scb *scb;
958 	char	cur_channel;
959 	char	intr_channel;
960 
961 	if ((ahc->features & AHC_TWIN) != 0
962 	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
963 		cur_channel = 'B';
964 	else
965 		cur_channel = 'A';
966 	intr_channel = cur_channel;
967 
968 	if ((ahc->features & AHC_ULTRA2) != 0)
969 		status0 = ahc_inb(ahc, SSTAT0) & IOERR;
970 	else
971 		status0 = 0;
972 	status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
973 	if (status == 0 && status0 == 0) {
974 		if ((ahc->features & AHC_TWIN) != 0) {
975 			/* Try the other channel */
976 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
977 			status = ahc_inb(ahc, SSTAT1)
978 			       & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
979 			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
980 		}
981 		if (status == 0) {
982 			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
983 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
984 			ahc_unpause(ahc);
985 			return;
986 		}
987 	}
988 
989 	/* Make sure the sequencer is in a safe location. */
990 	ahc_clear_critical_section(ahc);
991 
992 	scb_index = ahc_inb(ahc, SCB_TAG);
993 	scb = ahc_lookup_scb(ahc, scb_index);
994 	if (scb != NULL
995 	 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
996 		scb = NULL;
997 
998 	if ((ahc->features & AHC_ULTRA2) != 0
999 	 && (status0 & IOERR) != 0) {
1000 		int now_lvd;
1001 
1002 		now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
1003 		printf("%s: Transceiver State Has Changed to %s mode\n",
1004 		       ahc_name(ahc), now_lvd ? "LVD" : "SE");
1005 		ahc_outb(ahc, CLRSINT0, CLRIOERR);
1006 		/*
1007 		 * When transitioning to SE mode, the reset line
1008 		 * glitches, triggering an arbitration bug in some
1009 		 * Ultra2 controllers.  This bug is cleared when we
1010 		 * assert the reset line.  Since a reset glitch has
1011 		 * already occurred with this transition and a
1012 		 * transceiver state change is handled just like
1013 		 * a bus reset anyway, asserting the reset line
1014 		 * ourselves is safe.
1015 		 */
1016 		ahc_reset_channel(ahc, intr_channel,
1017 				 /*Initiate Reset*/now_lvd == 0);
1018 	} else if ((status & SCSIRSTI) != 0) {
1019 		printf("%s: Someone reset channel %c\n",
1020 			ahc_name(ahc), intr_channel);
1021 		if (intr_channel != cur_channel)
1022 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
1023 		ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
1024 	} else if ((status & SCSIPERR) != 0) {
1025 		/*
1026 		 * Determine the bus phase and queue an appropriate message.
1027 		 * SCSIPERR is latched true as soon as a parity error
1028 		 * occurs.  If the sequencer acked the transfer that
1029 		 * caused the parity error and the currently presented
1030 		 * transfer on the bus has correct parity, SCSIPERR will
1031 		 * be cleared by CLRSCSIPERR.  Use this to determine if
1032 		 * we should look at the last phase the sequencer recorded,
1033 		 * or the current phase presented on the bus.
1034 		 */
1035 		u_int mesg_out;
1036 		u_int curphase;
1037 		u_int errorphase;
1038 		u_int lastphase;
1039 		u_int scsirate;
1040 		u_int i;
1041 		u_int sstat2;
1042 		int   silent;
1043 
1044 		lastphase = ahc_inb(ahc, LASTPHASE);
1045 		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
1046 		sstat2 = ahc_inb(ahc, SSTAT2);
1047 		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
1048 		/*
1049 		 * For all phases save DATA, the sequencer won't
1050 		 * automatically ack a byte that has a parity error
1051 		 * in it.  So the only way that the current phase
1052 		 * could be 'data-in' is if the parity error is for
1053 		 * an already acked byte in the data phase.  During
1054 		 * synchronous data-in transfers, we may actually
1055 		 * ack bytes before latching the current phase in
1056 		 * LASTPHASE, leading to the discrepancy between
1057 		 * curphase and lastphase.
1058 		 */
1059 		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
1060 		 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
1061 			errorphase = curphase;
1062 		else
1063 			errorphase = lastphase;
1064 
1065 		for (i = 0; i < num_phases; i++) {
1066 			if (errorphase == ahc_phase_table[i].phase)
1067 				break;
1068 		}
1069 		mesg_out = ahc_phase_table[i].mesg_out;
1070 		silent = FALSE;
1071 		if (scb != NULL) {
1072 			if (SCB_IS_SILENT(scb))
1073 				silent = TRUE;
1074 			else
1075 				ahc_print_path(ahc, scb);
1076 			scb->flags |= SCB_TRANSMISSION_ERROR;
1077 		} else
1078 			printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1079 			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1080 		scsirate = ahc_inb(ahc, SCSIRATE);
1081 		if (silent == FALSE) {
1082 			printf("parity error detected %s. "
1083 			       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1084 			       ahc_phase_table[i].phasemsg,
1085 			       ahc_inw(ahc, SEQADDR0),
1086 			       scsirate);
1087 			if ((ahc->features & AHC_DT) != 0) {
1088 				if ((sstat2 & CRCVALERR) != 0)
1089 					printf("\tCRC Value Mismatch\n");
1090 				if ((sstat2 & CRCENDERR) != 0)
1091 					printf("\tNo terminal CRC packet "
1092 					       "recevied\n");
1093 				if ((sstat2 & CRCREQERR) != 0)
1094 					printf("\tIllegal CRC packet "
1095 					       "request\n");
1096 				if ((sstat2 & DUAL_EDGE_ERR) != 0)
1097 					printf("\tUnexpected %sDT Data Phase\n",
1098 					       (scsirate & SINGLE_EDGE)
1099 					     ? "" : "non-");
1100 			}
1101 		}
1102 
1103 		if ((ahc->features & AHC_DT) != 0
1104 		 && (sstat2 & DUAL_EDGE_ERR) != 0) {
1105 			/*
1106 			 * This error applies regardless of
1107 			 * data direction, so ignore the value
1108 			 * in the phase table.
1109 			 */
1110 			mesg_out = MSG_INITIATOR_DET_ERR;
1111 		}
1112 
1113 		/*
1114 		 * We've set the hardware to assert ATN if we
1115 		 * get a parity error on "in" phases, so all we
1116 		 * need to do is stuff the message buffer with
1117 		 * the appropriate message.  "In" phases have set
1118 		 * mesg_out to something other than MSG_NOP.
1119 		 */
1120 		if (mesg_out != MSG_NOOP) {
1121 			if (ahc->msg_type != MSG_TYPE_NONE)
1122 				ahc->send_msg_perror = TRUE;
1123 			else
1124 				ahc_outb(ahc, MSG_OUT, mesg_out);
1125 		}
1126 		/*
1127 		 * Force a renegotiation with this target just in
1128 		 * case we are out of sync for some external reason
1129 		 * unknown (or unreported) by the target.
1130 		 */
1131 		ahc_force_renegotiation(ahc);
1132 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1133 		ahc_unpause(ahc);
1134 	} else if ((status & SELTO) != 0) {
1135 		u_int	scbptr;
1136 
1137 		/* Stop the selection */
1138 		ahc_outb(ahc, SCSISEQ, 0);
1139 
1140 		/* No more pending messages */
1141 		ahc_clear_msg_state(ahc);
1142 
1143 		/* Clear interrupt state */
1144 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1145 		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1146 
1147 		/*
1148 		 * Although the driver does not care about the
1149 		 * 'Selection in Progress' status bit, the busy
1150 		 * LED does.  SELINGO is only cleared by a sucessfull
1151 		 * selection, so we must manually clear it to insure
1152 		 * the LED turns off just incase no future successful
1153 		 * selections occur (e.g. no devices on the bus).
1154 		 */
1155 		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1156 
1157 		scbptr = ahc_inb(ahc, WAITING_SCBH);
1158 		ahc_outb(ahc, SCBPTR, scbptr);
1159 		scb_index = ahc_inb(ahc, SCB_TAG);
1160 
1161 		scb = ahc_lookup_scb(ahc, scb_index);
1162 		if (scb == NULL) {
1163 			printf("%s: ahc_intr - referenced scb not "
1164 			       "valid during SELTO scb(%d, %d)\n",
1165 			       ahc_name(ahc), scbptr, scb_index);
1166 			ahc_dump_card_state(ahc);
1167 		} else {
1168 #ifdef AHC_DEBUG
1169 			if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
1170 				ahc_print_path(ahc, scb);
1171 				printf("Saw Selection Timeout for SCB 0x%x\n",
1172 				       scb_index);
1173 			}
1174 #endif
1175 			/*
1176 			 * Force a renegotiation with this target just in
1177 			 * case the cable was pulled and will later be
1178 			 * re-attached.  The target may forget its negotiation
1179 			 * settings with us should it attempt to reselect
1180 			 * during the interruption.  The target will not issue
1181 			 * a unit attention in this case, so we must always
1182 			 * renegotiate.
1183 			 */
1184 			ahc_force_renegotiation(ahc);
1185 			ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1186 			ahc_freeze_devq(ahc, scb);
1187 		}
1188 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1189 		ahc_restart(ahc);
1190 	} else if ((status & BUSFREE) != 0
1191 		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1192 		u_int lastphase;
1193 		u_int saved_scsiid;
1194 		u_int saved_lun;
1195 		u_int target;
1196 		u_int initiator_role_id;
1197 		char channel;
1198 		int printerror;
1199 
1200 		/*
1201 		 * Clear our selection hardware as soon as possible.
1202 		 * We may have an entry in the waiting Q for this target,
1203 		 * that is affected by this busfree and we don't want to
1204 		 * go about selecting the target while we handle the event.
1205 		 */
1206 		ahc_outb(ahc, SCSISEQ,
1207 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1208 
1209 		/*
1210 		 * Disable busfree interrupts and clear the busfree
1211 		 * interrupt status.  We do this here so that several
1212 		 * bus transactions occur prior to clearing the SCSIINT
1213 		 * latch.  It can take a bit for the clearing to take effect.
1214 		 */
1215 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1216 		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1217 
1218 		/*
1219 		 * Look at what phase we were last in.
1220 		 * If its message out, chances are pretty good
1221 		 * that the busfree was in response to one of
1222 		 * our abort requests.
1223 		 */
1224 		lastphase = ahc_inb(ahc, LASTPHASE);
1225 		saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1226 		saved_lun = ahc_inb(ahc, SAVED_LUN);
1227 		target = SCSIID_TARGET(ahc, saved_scsiid);
1228 		initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1229 		channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1230 		printerror = 1;
1231 
1232 		if (lastphase == P_MESGOUT) {
1233 			struct ahc_devinfo devinfo;
1234 			u_int tag;
1235 
1236 			ahc_fetch_devinfo(ahc, &devinfo);
1237 			tag = SCB_LIST_NULL;
1238 			if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1239 			 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1240 				if (ahc->msgout_buf[ahc->msgout_index - 1]
1241 				 == MSG_ABORT_TAG)
1242 					tag = scb->hscb->tag;
1243 				ahc_print_path(ahc, scb);
1244 				printf("SCB %d - Abort%s Completed.\n",
1245 				       scb->hscb->tag, tag == SCB_LIST_NULL ?
1246 				       "" : " Tag");
1247 				ahc_abort_scbs(ahc, target, channel,
1248 					       saved_lun, tag,
1249 					       ROLE_INITIATOR,
1250 					       CAM_REQ_ABORTED);
1251 				printerror = 0;
1252 			} else if (ahc_sent_msg(ahc, AHCMSG_1B,
1253 						MSG_BUS_DEV_RESET, TRUE)) {
1254 #ifdef __FreeBSD__
1255 				/*
1256 				 * Don't mark the user's request for this BDR
1257 				 * as completing with CAM_BDR_SENT.  CAM3
1258 				 * specifies CAM_REQ_CMP.
1259 				 */
1260 				if (scb != NULL
1261 				 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1262 				 && ahc_match_scb(ahc, scb, target, channel,
1263 						  CAM_LUN_WILDCARD,
1264 						  SCB_LIST_NULL,
1265 						  ROLE_INITIATOR)) {
1266 					ahc_set_transaction_status(scb, CAM_REQ_CMP);
1267 				}
1268 #endif
1269 				ahc_compile_devinfo(&devinfo,
1270 						    initiator_role_id,
1271 						    target,
1272 						    CAM_LUN_WILDCARD,
1273 						    channel,
1274 						    ROLE_INITIATOR);
1275 				ahc_handle_devreset(ahc, &devinfo,
1276 						    CAM_BDR_SENT,
1277 						    "Bus Device Reset",
1278 						    /*verbose_level*/0);
1279 				printerror = 0;
1280 			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1281 						MSG_EXT_PPR, FALSE)) {
1282 				struct ahc_initiator_tinfo *tinfo;
1283 				struct ahc_tmode_tstate *tstate;
1284 
1285 				/*
1286 				 * PPR Rejected.  Try non-ppr negotiation
1287 				 * and retry command.
1288 				 */
1289 				tinfo = ahc_fetch_transinfo(ahc,
1290 							    devinfo.channel,
1291 							    devinfo.our_scsiid,
1292 							    devinfo.target,
1293 							    &tstate);
1294 				tinfo->curr.transport_version = 2;
1295 				tinfo->goal.transport_version = 2;
1296 				tinfo->goal.ppr_options = 0;
1297 				ahc_qinfifo_requeue_tail(ahc, scb);
1298 				printerror = 0;
1299 			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1300 						MSG_EXT_WDTR, FALSE)
1301 				|| ahc_sent_msg(ahc, AHCMSG_EXT,
1302 						MSG_EXT_SDTR, FALSE)) {
1303 				/*
1304 				 * Negotiation Rejected.  Go-async and
1305 				 * retry command.
1306 				 */
1307 				ahc_set_width(ahc, &devinfo,
1308 					      MSG_EXT_WDTR_BUS_8_BIT,
1309 					      AHC_TRANS_CUR|AHC_TRANS_GOAL,
1310 					      /*paused*/TRUE);
1311 				ahc_set_syncrate(ahc, &devinfo,
1312 						/*syncrate*/NULL,
1313 						/*period*/0, /*offset*/0,
1314 						/*ppr_options*/0,
1315 						AHC_TRANS_CUR|AHC_TRANS_GOAL,
1316 						/*paused*/TRUE);
1317 				ahc_qinfifo_requeue_tail(ahc, scb);
1318 				printerror = 0;
1319 			}
1320 		}
1321 		if (printerror != 0) {
1322 			u_int i;
1323 
1324 			if (scb != NULL) {
1325 				u_int tag;
1326 
1327 				if ((scb->hscb->control & TAG_ENB) != 0)
1328 					tag = scb->hscb->tag;
1329 				else
1330 					tag = SCB_LIST_NULL;
1331 				ahc_print_path(ahc, scb);
1332 				ahc_abort_scbs(ahc, target, channel,
1333 					       SCB_GET_LUN(scb), tag,
1334 					       ROLE_INITIATOR,
1335 					       CAM_UNEXP_BUSFREE);
1336 			} else {
1337 				/*
1338 				 * We had not fully identified this connection,
1339 				 * so we cannot abort anything.
1340 				 */
1341 				printf("%s: ", ahc_name(ahc));
1342 			}
1343 			for (i = 0; i < num_phases; i++) {
1344 				if (lastphase == ahc_phase_table[i].phase)
1345 					break;
1346 			}
1347 			/*
1348 			 * Renegotiate with this device at the
1349 			 * next oportunity just in case this busfree
1350 			 * is due to a negotiation mismatch with the
1351 			 * device.
1352 			 */
1353 			ahc_force_renegotiation(ahc);
1354 			printf("Unexpected busfree %s\n"
1355 			       "SEQADDR == 0x%x\n",
1356 			       ahc_phase_table[i].phasemsg,
1357 			       ahc_inb(ahc, SEQADDR0)
1358 				| (ahc_inb(ahc, SEQADDR1) << 8));
1359 		}
1360 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1361 		ahc_restart(ahc);
1362 	} else {
1363 		printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1364 		       ahc_name(ahc), status);
1365 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1366 	}
1367 }
1368 
1369 /*
1370  * Force renegotiation to occur the next time we initiate
1371  * a command to the current device.
1372  */
1373 static void
1374 ahc_force_renegotiation(struct ahc_softc *ahc)
1375 {
1376 	struct	ahc_devinfo devinfo;
1377 	struct	ahc_initiator_tinfo *targ_info;
1378 	struct	ahc_tmode_tstate *tstate;
1379 
1380 	ahc_fetch_devinfo(ahc, &devinfo);
1381 	targ_info = ahc_fetch_transinfo(ahc,
1382 					devinfo.channel,
1383 					devinfo.our_scsiid,
1384 					devinfo.target,
1385 					&tstate);
1386 	ahc_update_neg_request(ahc, &devinfo, tstate,
1387 			       targ_info, AHC_NEG_IF_NON_ASYNC);
1388 }
1389 
1390 #define AHC_MAX_STEPS 2000
1391 void
1392 ahc_clear_critical_section(struct ahc_softc *ahc)
1393 {
1394 	int	stepping;
1395 	int	steps;
1396 	u_int	simode0;
1397 	u_int	simode1;
1398 
1399 	if (ahc->num_critical_sections == 0)
1400 		return;
1401 
1402 	stepping = FALSE;
1403 	steps = 0;
1404 	simode0 = 0;
1405 	simode1 = 0;
1406 	for (;;) {
1407 		struct	cs *cs;
1408 		u_int	seqaddr;
1409 		u_int	i;
1410 
1411 		seqaddr = ahc_inb(ahc, SEQADDR0)
1412 			| (ahc_inb(ahc, SEQADDR1) << 8);
1413 
1414 		/*
1415 		 * Seqaddr represents the next instruction to execute,
1416 		 * so we are really executing the instruction just
1417 		 * before it.
1418 		 */
1419 		if (seqaddr != 0)
1420 			seqaddr -= 1;
1421 		cs = ahc->critical_sections;
1422 		for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1423 
1424 			if (cs->begin < seqaddr && cs->end >= seqaddr)
1425 				break;
1426 		}
1427 
1428 		if (i == ahc->num_critical_sections)
1429 			break;
1430 
1431 		if (steps > AHC_MAX_STEPS) {
1432 			printf("%s: Infinite loop in critical section\n",
1433 			       ahc_name(ahc));
1434 			ahc_dump_card_state(ahc);
1435 			panic("critical section loop");
1436 		}
1437 
1438 		steps++;
1439 		if (stepping == FALSE) {
1440 
1441 			/*
1442 			 * Disable all interrupt sources so that the
1443 			 * sequencer will not be stuck by a pausing
1444 			 * interrupt condition while we attempt to
1445 			 * leave a critical section.
1446 			 */
1447 			simode0 = ahc_inb(ahc, SIMODE0);
1448 			ahc_outb(ahc, SIMODE0, 0);
1449 			simode1 = ahc_inb(ahc, SIMODE1);
1450 			if ((ahc->features & AHC_DT) != 0)
1451 				/*
1452 				 * On DT class controllers, we
1453 				 * use the enhanced busfree logic.
1454 				 * Unfortunately we cannot re-enable
1455 				 * busfree detection within the
1456 				 * current connection, so we must
1457 				 * leave it on while single stepping.
1458 				 */
1459 				ahc_outb(ahc, SIMODE1, ENBUSFREE);
1460 			else
1461 				ahc_outb(ahc, SIMODE1, 0);
1462 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1463 			ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP);
1464 			stepping = TRUE;
1465 		}
1466 		if ((ahc->features & AHC_DT) != 0) {
1467 			ahc_outb(ahc, CLRSINT1, CLRBUSFREE);
1468 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1469 		}
1470 		ahc_outb(ahc, HCNTRL, ahc->unpause);
1471 		while (!ahc_is_paused(ahc))
1472 			ahc_delay(200);
1473 	}
1474 	if (stepping) {
1475 		ahc_outb(ahc, SIMODE0, simode0);
1476 		ahc_outb(ahc, SIMODE1, simode1);
1477 		ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP);
1478 	}
1479 }
1480 
1481 /*
1482  * Clear any pending interrupt status.
1483  */
1484 void
1485 ahc_clear_intstat(struct ahc_softc *ahc)
1486 {
1487 	/* Clear any interrupt conditions this may have caused */
1488 	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1489 				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1490 				CLRREQINIT);
1491 	ahc_flush_device_writes(ahc);
1492 	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1493  	ahc_flush_device_writes(ahc);
1494 	ahc_outb(ahc, CLRINT, CLRSCSIINT);
1495 	ahc_flush_device_writes(ahc);
1496 }
1497 
1498 /**************************** Debugging Routines ******************************/
1499 #ifdef AHC_DEBUG
1500 uint32_t ahc_debug = AHC_DEBUG_OPTS;
1501 #endif
1502 
1503 void
1504 ahc_print_scb(struct scb *scb)
1505 {
1506 	int i;
1507 
1508 	struct hardware_scb *hscb = scb->hscb;
1509 
1510 	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1511 	       (void *)scb,
1512 	       hscb->control,
1513 	       hscb->scsiid,
1514 	       hscb->lun,
1515 	       hscb->cdb_len);
1516 	printf("Shared Data: ");
1517 	for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
1518 		printf("%#02x", hscb->shared_data.cdb[i]);
1519 	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1520 		ahc_le32toh(hscb->dataptr),
1521 		ahc_le32toh(hscb->datacnt),
1522 		ahc_le32toh(hscb->sgptr),
1523 		hscb->tag);
1524 	if (scb->sg_count > 0) {
1525 		for (i = 0; i < scb->sg_count; i++) {
1526 			printf("sg[%d] - Addr 0x%x%x : Length %d\n",
1527 			       i,
1528 			       (ahc_le32toh(scb->sg_list[i].len) >> 24
1529 			        & SG_HIGH_ADDR_BITS),
1530 			       ahc_le32toh(scb->sg_list[i].addr),
1531 			       ahc_le32toh(scb->sg_list[i].len));
1532 		}
1533 	}
1534 }
1535 
1536 /************************* Transfer Negotiation *******************************/
1537 /*
1538  * Allocate per target mode instance (ID we respond to as a target)
1539  * transfer negotiation data structures.
1540  */
1541 static struct ahc_tmode_tstate *
1542 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1543 {
1544 	struct ahc_tmode_tstate *master_tstate;
1545 	struct ahc_tmode_tstate *tstate;
1546 	int i;
1547 
1548 	master_tstate = ahc->enabled_targets[ahc->our_id];
1549 	if (channel == 'B') {
1550 		scsi_id += 8;
1551 		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1552 	}
1553 	if (ahc->enabled_targets[scsi_id] != NULL
1554 	 && ahc->enabled_targets[scsi_id] != master_tstate)
1555 		panic("%s: ahc_alloc_tstate - Target already allocated",
1556 		      ahc_name(ahc));
1557 	tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate),
1558 						   M_DEVBUF, M_NOWAIT);
1559 	if (tstate == NULL)
1560 		return (NULL);
1561 
1562 	/*
1563 	 * If we have allocated a master tstate, copy user settings from
1564 	 * the master tstate (taken from SRAM or the EEPROM) for this
1565 	 * channel, but reset our current and goal settings to async/narrow
1566 	 * until an initiator talks to us.
1567 	 */
1568 	if (master_tstate != NULL) {
1569 		memcpy(tstate, master_tstate, sizeof(*tstate));
1570 		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1571 		tstate->ultraenb = 0;
1572 		for (i = 0; i < AHC_NUM_TARGETS; i++) {
1573 			memset(&tstate->transinfo[i].curr, 0,
1574 			      sizeof(tstate->transinfo[i].curr));
1575 			memset(&tstate->transinfo[i].goal, 0,
1576 			      sizeof(tstate->transinfo[i].goal));
1577 		}
1578 	} else
1579 		memset(tstate, 0, sizeof(*tstate));
1580 	ahc->enabled_targets[scsi_id] = tstate;
1581 	return (tstate);
1582 }
1583 
1584 #ifdef AHC_TARGET_MODE
1585 /*
1586  * Free per target mode instance (ID we respond to as a target)
1587  * transfer negotiation data structures.
1588  */
1589 static void
1590 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1591 {
1592 	struct ahc_tmode_tstate *tstate;
1593 
1594 	/*
1595 	 * Don't clean up our "master" tstate.
1596 	 * It has our default user settings.
1597 	 */
1598 	if (((channel == 'B' && scsi_id == ahc->our_id_b)
1599 	  || (channel == 'A' && scsi_id == ahc->our_id))
1600 	 && force == FALSE)
1601 		return;
1602 
1603 	if (channel == 'B')
1604 		scsi_id += 8;
1605 	tstate = ahc->enabled_targets[scsi_id];
1606 	if (tstate != NULL)
1607 		free(tstate, M_DEVBUF);
1608 	ahc->enabled_targets[scsi_id] = NULL;
1609 }
1610 #endif
1611 
1612 /*
1613  * Called when we have an active connection to a target on the bus,
1614  * this function finds the nearest syncrate to the input period limited
1615  * by the capabilities of the bus connectivity of and sync settings for
1616  * the target.
1617  */
1618 struct ahc_syncrate *
1619 ahc_devlimited_syncrate(struct ahc_softc *ahc,
1620 			struct ahc_initiator_tinfo *tinfo,
1621 			u_int *period, u_int *ppr_options, role_t role)
1622 {
1623 	struct	ahc_transinfo *transinfo;
1624 	u_int	maxsync;
1625 
1626 	if ((ahc->features & AHC_ULTRA2) != 0) {
1627 		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1628 		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1629 			maxsync = AHC_SYNCRATE_DT;
1630 		} else {
1631 			maxsync = AHC_SYNCRATE_ULTRA;
1632 			/* Can't do DT on an SE bus */
1633 			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1634 		}
1635 	} else if ((ahc->features & AHC_ULTRA) != 0) {
1636 		maxsync = AHC_SYNCRATE_ULTRA;
1637 	} else {
1638 		maxsync = AHC_SYNCRATE_FAST;
1639 	}
1640 	/*
1641 	 * Never allow a value higher than our current goal
1642 	 * period otherwise we may allow a target initiated
1643 	 * negotiation to go above the limit as set by the
1644 	 * user.  In the case of an initiator initiated
1645 	 * sync negotiation, we limit based on the user
1646 	 * setting.  This allows the system to still accept
1647 	 * incoming negotiations even if target initiated
1648 	 * negotiation is not performed.
1649 	 */
1650 	if (role == ROLE_TARGET)
1651 		transinfo = &tinfo->user;
1652 	else
1653 		transinfo = &tinfo->goal;
1654 	*ppr_options &= transinfo->ppr_options;
1655 	if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
1656 		maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2);
1657 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1658 	}
1659 	if (transinfo->period == 0) {
1660 		*period = 0;
1661 		*ppr_options = 0;
1662 		return (NULL);
1663 	}
1664 	*period = MAX(*period, transinfo->period);
1665 	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1666 }
1667 
1668 /*
1669  * Look up the valid period to SCSIRATE conversion in our table.
1670  * Return the period and offset that should be sent to the target
1671  * if this was the beginning of an SDTR.
1672  */
1673 struct ahc_syncrate *
1674 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1675 		  u_int *ppr_options, u_int maxsync)
1676 {
1677 	struct ahc_syncrate *syncrate;
1678 
1679 	if ((ahc->features & AHC_DT) == 0)
1680 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1681 
1682 	/* Skip all DT only entries if DT is not available */
1683 	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1684 	 && maxsync < AHC_SYNCRATE_ULTRA2)
1685 		maxsync = AHC_SYNCRATE_ULTRA2;
1686 
1687 	for (syncrate = &ahc_syncrates[maxsync];
1688 	     syncrate->rate != NULL;
1689 	     syncrate++) {
1690 
1691 		/*
1692 		 * The Ultra2 table doesn't go as low
1693 		 * as for the Fast/Ultra cards.
1694 		 */
1695 		if ((ahc->features & AHC_ULTRA2) != 0
1696 		 && (syncrate->sxfr_u2 == 0))
1697 			break;
1698 
1699 		if (*period <= syncrate->period) {
1700 			/*
1701 			 * When responding to a target that requests
1702 			 * sync, the requested rate may fall between
1703 			 * two rates that we can output, but still be
1704 			 * a rate that we can receive.  Because of this,
1705 			 * we want to respond to the target with
1706 			 * the same rate that it sent to us even
1707 			 * if the period we use to send data to it
1708 			 * is lower.  Only lower the response period
1709 			 * if we must.
1710 			 */
1711 			if (syncrate == &ahc_syncrates[maxsync])
1712 				*period = syncrate->period;
1713 
1714 			/*
1715 			 * At some speeds, we only support
1716 			 * ST transfers.
1717 			 */
1718 		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1719 				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1720 			break;
1721 		}
1722 	}
1723 
1724 	if ((*period == 0)
1725 	 || (syncrate->rate == NULL)
1726 	 || ((ahc->features & AHC_ULTRA2) != 0
1727 	  && (syncrate->sxfr_u2 == 0))) {
1728 		/* Use asynchronous transfers. */
1729 		*period = 0;
1730 		syncrate = NULL;
1731 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1732 	}
1733 	return (syncrate);
1734 }
1735 
1736 /*
1737  * Convert from an entry in our syncrate table to the SCSI equivalent
1738  * sync "period" factor.
1739  */
1740 u_int
1741 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1742 {
1743 	struct ahc_syncrate *syncrate;
1744 
1745 	if ((ahc->features & AHC_ULTRA2) != 0)
1746 		scsirate &= SXFR_ULTRA2;
1747 	else
1748 		scsirate &= SXFR;
1749 
1750 	syncrate = &ahc_syncrates[maxsync];
1751 	while (syncrate->rate != NULL) {
1752 
1753 		if ((ahc->features & AHC_ULTRA2) != 0) {
1754 			if (syncrate->sxfr_u2 == 0)
1755 				break;
1756 			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1757 				return (syncrate->period);
1758 		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1759 				return (syncrate->period);
1760 		}
1761 		syncrate++;
1762 	}
1763 	return (0); /* async */
1764 }
1765 
1766 /*
1767  * Truncate the given synchronous offset to a value the
1768  * current adapter type and syncrate are capable of.
1769  */
1770 void
1771 ahc_validate_offset(struct ahc_softc *ahc,
1772 		    struct ahc_initiator_tinfo *tinfo,
1773 		    struct ahc_syncrate *syncrate,
1774 		    u_int *offset, int wide, role_t role)
1775 {
1776 	u_int maxoffset;
1777 
1778 	/* Limit offset to what we can do */
1779 	if (syncrate == NULL) {
1780 		maxoffset = 0;
1781 	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1782 		maxoffset = MAX_OFFSET_ULTRA2;
1783 	} else {
1784 		if (wide)
1785 			maxoffset = MAX_OFFSET_16BIT;
1786 		else
1787 			maxoffset = MAX_OFFSET_8BIT;
1788 	}
1789 	*offset = MIN(*offset, maxoffset);
1790 	if (tinfo != NULL) {
1791 		if (role == ROLE_TARGET)
1792 			*offset = MIN(*offset, tinfo->user.offset);
1793 		else
1794 			*offset = MIN(*offset, tinfo->goal.offset);
1795 	}
1796 }
1797 
1798 /*
1799  * Truncate the given transfer width parameter to a value the
1800  * current adapter type is capable of.
1801  */
1802 void
1803 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1804 		   u_int *bus_width, role_t role)
1805 {
1806 	switch (*bus_width) {
1807 	default:
1808 		if (ahc->features & AHC_WIDE) {
1809 			/* Respond Wide */
1810 			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1811 			break;
1812 		}
1813 		/* FALLTHROUGH */
1814 	case MSG_EXT_WDTR_BUS_8_BIT:
1815 		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1816 		break;
1817 	}
1818 	if (tinfo != NULL) {
1819 		if (role == ROLE_TARGET)
1820 			*bus_width = MIN(tinfo->user.width, *bus_width);
1821 		else
1822 			*bus_width = MIN(tinfo->goal.width, *bus_width);
1823 	}
1824 }
1825 
1826 /*
1827  * Update the bitmask of targets for which the controller should
1828  * negotiate with at the next convenient oportunity.  This currently
1829  * means the next time we send the initial identify messages for
1830  * a new transaction.
1831  */
1832 int
1833 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1834 		       struct ahc_tmode_tstate *tstate,
1835 		       struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type)
1836 {
1837 	u_int auto_negotiate_orig;
1838 
1839 	auto_negotiate_orig = tstate->auto_negotiate;
1840 	if (neg_type == AHC_NEG_ALWAYS) {
1841 		/*
1842 		 * Force our "current" settings to be
1843 		 * unknown so that unless a bus reset
1844 		 * occurs the need to renegotiate is
1845 		 * recorded persistently.
1846 		 */
1847 		if ((ahc->features & AHC_WIDE) != 0)
1848 			tinfo->curr.width = AHC_WIDTH_UNKNOWN;
1849 		tinfo->curr.period = AHC_PERIOD_UNKNOWN;
1850 		tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
1851 	}
1852 	if (tinfo->curr.period != tinfo->goal.period
1853 	 || tinfo->curr.width != tinfo->goal.width
1854 	 || tinfo->curr.offset != tinfo->goal.offset
1855 	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
1856 	 || (neg_type == AHC_NEG_IF_NON_ASYNC
1857 	  && (tinfo->goal.offset != 0
1858 	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1859 	   || tinfo->goal.ppr_options != 0)))
1860 		tstate->auto_negotiate |= devinfo->target_mask;
1861 	else
1862 		tstate->auto_negotiate &= ~devinfo->target_mask;
1863 
1864 	return (auto_negotiate_orig != tstate->auto_negotiate);
1865 }
1866 
1867 /*
1868  * Update the user/goal/curr tables of synchronous negotiation
1869  * parameters as well as, in the case of a current or active update,
1870  * any data structures on the host controller.  In the case of an
1871  * active update, the specified target is currently talking to us on
1872  * the bus, so the transfer parameter update must take effect
1873  * immediately.
1874  */
1875 void
1876 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1877 		 struct ahc_syncrate *syncrate, u_int period,
1878 		 u_int offset, u_int ppr_options, u_int type, int paused)
1879 {
1880 	struct	ahc_initiator_tinfo *tinfo;
1881 	struct	ahc_tmode_tstate *tstate;
1882 	u_int	old_period;
1883 	u_int	old_offset;
1884 	u_int	old_ppr;
1885 	int	active;
1886 	int	update_needed;
1887 
1888 	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1889 	update_needed = 0;
1890 
1891 	if (syncrate == NULL) {
1892 		period = 0;
1893 		offset = 0;
1894 	}
1895 
1896 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1897 				    devinfo->target, &tstate);
1898 
1899 	if ((type & AHC_TRANS_USER) != 0) {
1900 		tinfo->user.period = period;
1901 		tinfo->user.offset = offset;
1902 		tinfo->user.ppr_options = ppr_options;
1903 	}
1904 
1905 	if ((type & AHC_TRANS_GOAL) != 0) {
1906 		tinfo->goal.period = period;
1907 		tinfo->goal.offset = offset;
1908 		tinfo->goal.ppr_options = ppr_options;
1909 	}
1910 
1911 	old_period = tinfo->curr.period;
1912 	old_offset = tinfo->curr.offset;
1913 	old_ppr	   = tinfo->curr.ppr_options;
1914 
1915 	if ((type & AHC_TRANS_CUR) != 0
1916 	 && (old_period != period
1917 	  || old_offset != offset
1918 	  || old_ppr != ppr_options)) {
1919 		u_int	scsirate;
1920 
1921 		update_needed++;
1922 		scsirate = tinfo->scsirate;
1923 		if ((ahc->features & AHC_ULTRA2) != 0) {
1924 
1925 			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1926 			if (syncrate != NULL) {
1927 				scsirate |= syncrate->sxfr_u2;
1928 				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1929 					scsirate |= ENABLE_CRC;
1930 				else
1931 					scsirate |= SINGLE_EDGE;
1932 			}
1933 		} else {
1934 
1935 			scsirate &= ~(SXFR|SOFS);
1936 			/*
1937 			 * Ensure Ultra mode is set properly for
1938 			 * this target.
1939 			 */
1940 			tstate->ultraenb &= ~devinfo->target_mask;
1941 			if (syncrate != NULL) {
1942 				if (syncrate->sxfr & ULTRA_SXFR) {
1943 					tstate->ultraenb |=
1944 						devinfo->target_mask;
1945 				}
1946 				scsirate |= syncrate->sxfr & SXFR;
1947 				scsirate |= offset & SOFS;
1948 			}
1949 			if (active) {
1950 				u_int sxfrctl0;
1951 
1952 				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1953 				sxfrctl0 &= ~FAST20;
1954 				if (tstate->ultraenb & devinfo->target_mask)
1955 					sxfrctl0 |= FAST20;
1956 				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1957 			}
1958 		}
1959 		if (active) {
1960 			ahc_outb(ahc, SCSIRATE, scsirate);
1961 			if ((ahc->features & AHC_ULTRA2) != 0)
1962 				ahc_outb(ahc, SCSIOFFSET, offset);
1963 		}
1964 
1965 		tinfo->scsirate = scsirate;
1966 		tinfo->curr.period = period;
1967 		tinfo->curr.offset = offset;
1968 		tinfo->curr.ppr_options = ppr_options;
1969 
1970 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1971 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
1972 		if (bootverbose) {
1973 			if (offset != 0) {
1974 				printf("%s: target %d synchronous at %sMHz%s, "
1975 				       "offset = 0x%x\n", ahc_name(ahc),
1976 				       devinfo->target, syncrate->rate,
1977 				       (ppr_options & MSG_EXT_PPR_DT_REQ)
1978 				       ? " DT" : "", offset);
1979 			} else {
1980 				printf("%s: target %d using "
1981 				       "asynchronous transfers\n",
1982 				       ahc_name(ahc), devinfo->target);
1983 			}
1984 		}
1985 	}
1986 
1987 	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
1988 						tinfo, AHC_NEG_TO_GOAL);
1989 
1990 	if (update_needed)
1991 		ahc_update_pending_scbs(ahc);
1992 }
1993 
1994 /*
1995  * Update the user/goal/curr tables of wide negotiation
1996  * parameters as well as, in the case of a current or active update,
1997  * any data structures on the host controller.  In the case of an
1998  * active update, the specified target is currently talking to us on
1999  * the bus, so the transfer parameter update must take effect
2000  * immediately.
2001  */
2002 void
2003 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2004 	      u_int width, u_int type, int paused)
2005 {
2006 	struct	ahc_initiator_tinfo *tinfo;
2007 	struct	ahc_tmode_tstate *tstate;
2008 	u_int	oldwidth;
2009 	int	active;
2010 	int	update_needed;
2011 
2012 	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
2013 	update_needed = 0;
2014 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2015 				    devinfo->target, &tstate);
2016 
2017 	if ((type & AHC_TRANS_USER) != 0)
2018 		tinfo->user.width = width;
2019 
2020 	if ((type & AHC_TRANS_GOAL) != 0)
2021 		tinfo->goal.width = width;
2022 
2023 	oldwidth = tinfo->curr.width;
2024 	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
2025 		u_int	scsirate;
2026 
2027 		update_needed++;
2028 		scsirate =  tinfo->scsirate;
2029 		scsirate &= ~WIDEXFER;
2030 		if (width == MSG_EXT_WDTR_BUS_16_BIT)
2031 			scsirate |= WIDEXFER;
2032 
2033 		tinfo->scsirate = scsirate;
2034 
2035 		if (active)
2036 			ahc_outb(ahc, SCSIRATE, scsirate);
2037 
2038 		tinfo->curr.width = width;
2039 
2040 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
2041 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
2042 		if (bootverbose) {
2043 			printf("%s: target %d using %dbit transfers\n",
2044 			       ahc_name(ahc), devinfo->target,
2045 			       8 * (0x01 << width));
2046 		}
2047 	}
2048 
2049 	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
2050 						tinfo, AHC_NEG_TO_GOAL);
2051 	if (update_needed)
2052 		ahc_update_pending_scbs(ahc);
2053 }
2054 
2055 /*
2056  * Update the current state of tagged queuing for a given target.
2057  */
2058 void
2059 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2060 	     ahc_queue_alg alg)
2061 {
2062  	ahc_platform_set_tags(ahc, devinfo, alg);
2063  	ahc_send_async(ahc, devinfo->channel, devinfo->target,
2064  		       devinfo->lun, AC_TRANSFER_NEG, &alg);
2065 }
2066 
2067 /*
2068  * When the transfer settings for a connection change, update any
2069  * in-transit SCBs to contain the new data so the hardware will
2070  * be set correctly during future (re)selections.
2071  */
2072 static void
2073 ahc_update_pending_scbs(struct ahc_softc *ahc)
2074 {
2075 	struct	scb *pending_scb;
2076 	int	pending_scb_count;
2077 	int	i;
2078 	int	paused;
2079 	u_int	saved_scbptr;
2080 
2081 	/*
2082 	 * Traverse the pending SCB list and ensure that all of the
2083 	 * SCBs there have the proper settings.
2084 	 */
2085 	pending_scb_count = 0;
2086 	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
2087 		struct ahc_devinfo devinfo;
2088 		struct hardware_scb *pending_hscb;
2089 		struct ahc_initiator_tinfo *tinfo;
2090 		struct ahc_tmode_tstate *tstate;
2091 
2092 		ahc_scb_devinfo(ahc, &devinfo, pending_scb);
2093 		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
2094 					    devinfo.our_scsiid,
2095 					    devinfo.target, &tstate);
2096 		pending_hscb = pending_scb->hscb;
2097 		pending_hscb->control &= ~ULTRAENB;
2098 		if ((tstate->ultraenb & devinfo.target_mask) != 0)
2099 			pending_hscb->control |= ULTRAENB;
2100 		pending_hscb->scsirate = tinfo->scsirate;
2101 		pending_hscb->scsioffset = tinfo->curr.offset;
2102 		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
2103 		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
2104 			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
2105 			pending_hscb->control &= ~MK_MESSAGE;
2106 		}
2107 		ahc_sync_scb(ahc, pending_scb,
2108 			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2109 		pending_scb_count++;
2110 	}
2111 
2112 	if (pending_scb_count == 0)
2113 		return;
2114 
2115 	if (ahc_is_paused(ahc)) {
2116 		paused = 1;
2117 	} else {
2118 		paused = 0;
2119 		ahc_pause(ahc);
2120 	}
2121 
2122 	saved_scbptr = ahc_inb(ahc, SCBPTR);
2123 	/* Ensure that the hscbs down on the card match the new information */
2124 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
2125 		struct	hardware_scb *pending_hscb;
2126 		u_int	control;
2127 		u_int	scb_tag;
2128 
2129 		ahc_outb(ahc, SCBPTR, i);
2130 		scb_tag = ahc_inb(ahc, SCB_TAG);
2131 		pending_scb = ahc_lookup_scb(ahc, scb_tag);
2132 		if (pending_scb == NULL)
2133 			continue;
2134 
2135 		pending_hscb = pending_scb->hscb;
2136 		control = ahc_inb(ahc, SCB_CONTROL);
2137 		control &= ~(ULTRAENB|MK_MESSAGE);
2138 		control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
2139 		ahc_outb(ahc, SCB_CONTROL, control);
2140 		ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
2141 		ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
2142 	}
2143 	ahc_outb(ahc, SCBPTR, saved_scbptr);
2144 
2145 	if (paused == 0)
2146 		ahc_unpause(ahc);
2147 }
2148 
2149 /**************************** Pathing Information *****************************/
2150 static void
2151 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2152 {
2153 	u_int	saved_scsiid;
2154 	role_t	role;
2155 	int	our_id;
2156 
2157 	if (ahc_inb(ahc, SSTAT0) & TARGET)
2158 		role = ROLE_TARGET;
2159 	else
2160 		role = ROLE_INITIATOR;
2161 
2162 	if (role == ROLE_TARGET
2163 	 && (ahc->features & AHC_MULTI_TID) != 0
2164 	 && (ahc_inb(ahc, SEQ_FLAGS)
2165  	   & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
2166 		/* We were selected, so pull our id from TARGIDIN */
2167 		our_id = ahc_inb(ahc, TARGIDIN) & OID;
2168 	} else if ((ahc->features & AHC_ULTRA2) != 0)
2169 		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
2170 	else
2171 		our_id = ahc_inb(ahc, SCSIID) & OID;
2172 
2173 	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2174 	ahc_compile_devinfo(devinfo,
2175 			    our_id,
2176 			    SCSIID_TARGET(ahc, saved_scsiid),
2177 			    ahc_inb(ahc, SAVED_LUN),
2178 			    SCSIID_CHANNEL(ahc, saved_scsiid),
2179 			    role);
2180 }
2181 
2182 struct ahc_phase_table_entry*
2183 ahc_lookup_phase_entry(int phase)
2184 {
2185 	struct ahc_phase_table_entry *entry;
2186 	struct ahc_phase_table_entry *last_entry;
2187 
2188 	/*
2189 	 * num_phases doesn't include the default entry which
2190 	 * will be returned if the phase doesn't match.
2191 	 */
2192 	last_entry = &ahc_phase_table[num_phases];
2193 	for (entry = ahc_phase_table; entry < last_entry; entry++) {
2194 		if (phase == entry->phase)
2195 			break;
2196 	}
2197 	return (entry);
2198 }
2199 
2200 void
2201 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2202 		    u_int lun, char channel, role_t role)
2203 {
2204 	devinfo->our_scsiid = our_id;
2205 	devinfo->target = target;
2206 	devinfo->lun = lun;
2207 	devinfo->target_offset = target;
2208 	devinfo->channel = channel;
2209 	devinfo->role = role;
2210 	if (channel == 'B')
2211 		devinfo->target_offset += 8;
2212 	devinfo->target_mask = (0x01 << devinfo->target_offset);
2213 }
2214 
2215 void
2216 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2217 {
2218 	printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
2219 	       devinfo->target, devinfo->lun);
2220 }
2221 
2222 static void
2223 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2224 		struct scb *scb)
2225 {
2226 	role_t	role;
2227 	int	our_id;
2228 
2229 	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2230 	role = ROLE_INITIATOR;
2231 	if ((scb->flags & SCB_TARGET_SCB) != 0)
2232 		role = ROLE_TARGET;
2233 	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2234 			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2235 }
2236 
2237 
2238 /************************ Message Phase Processing ****************************/
2239 static void
2240 ahc_assert_atn(struct ahc_softc *ahc)
2241 {
2242 	u_int scsisigo;
2243 
2244 	scsisigo = ATNO;
2245 	if ((ahc->features & AHC_DT) == 0)
2246 		scsisigo |= ahc_inb(ahc, SCSISIGI);
2247 	ahc_outb(ahc, SCSISIGO, scsisigo);
2248 }
2249 
2250 /*
2251  * When an initiator transaction with the MK_MESSAGE flag either reconnects
2252  * or enters the initial message out phase, we are interrupted.  Fill our
2253  * outgoing message buffer with the appropriate message and beging handing
2254  * the message phase(s) manually.
2255  */
2256 static void
2257 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2258 			   struct scb *scb)
2259 {
2260 	/*
2261 	 * To facilitate adding multiple messages together,
2262 	 * each routine should increment the index and len
2263 	 * variables instead of setting them explicitly.
2264 	 */
2265 	ahc->msgout_index = 0;
2266 	ahc->msgout_len = 0;
2267 
2268 	if ((scb->flags & SCB_DEVICE_RESET) == 0
2269 	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2270 		u_int identify_msg;
2271 
2272 		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2273 		if ((scb->hscb->control & DISCENB) != 0)
2274 			identify_msg |= MSG_IDENTIFY_DISCFLAG;
2275 		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2276 		ahc->msgout_len++;
2277 
2278 		if ((scb->hscb->control & TAG_ENB) != 0) {
2279 			ahc->msgout_buf[ahc->msgout_index++] =
2280 			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2281 			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2282 			ahc->msgout_len += 2;
2283 		}
2284 	}
2285 
2286 	if (scb->flags & SCB_DEVICE_RESET) {
2287 		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2288 		ahc->msgout_len++;
2289 		ahc_print_path(ahc, scb);
2290 		printf("Bus Device Reset Message Sent\n");
2291 		/*
2292 		 * Clear our selection hardware in advance of
2293 		 * the busfree.  We may have an entry in the waiting
2294 		 * Q for this target, and we don't want to go about
2295 		 * selecting while we handle the busfree and blow it
2296 		 * away.
2297 		 */
2298 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2299 	} else if ((scb->flags & SCB_ABORT) != 0) {
2300 		if ((scb->hscb->control & TAG_ENB) != 0)
2301 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2302 		else
2303 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2304 		ahc->msgout_len++;
2305 		ahc_print_path(ahc, scb);
2306 		printf("Abort%s Message Sent\n",
2307 		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2308 		/*
2309 		 * Clear our selection hardware in advance of
2310 		 * the busfree.  We may have an entry in the waiting
2311 		 * Q for this target, and we don't want to go about
2312 		 * selecting while we handle the busfree and blow it
2313 		 * away.
2314 		 */
2315 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2316 	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2317 		ahc_build_transfer_msg(ahc, devinfo);
2318 	} else {
2319 		printf("ahc_intr: AWAITING_MSG for an SCB that "
2320 		       "does not have a waiting message\n");
2321 		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2322 		       devinfo->target_mask);
2323 		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2324 		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2325 		      ahc_inb(ahc, MSG_OUT), scb->flags);
2326 	}
2327 
2328 	/*
2329 	 * Clear the MK_MESSAGE flag from the SCB so we aren't
2330 	 * asked to send this message again.
2331 	 */
2332 	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2333 	scb->hscb->control &= ~MK_MESSAGE;
2334 	ahc->msgout_index = 0;
2335 	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2336 }
2337 
2338 /*
2339  * Build an appropriate transfer negotiation message for the
2340  * currently active target.
2341  */
2342 static void
2343 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2344 {
2345 	/*
2346 	 * We need to initiate transfer negotiations.
2347 	 * If our current and goal settings are identical,
2348 	 * we want to renegotiate due to a check condition.
2349 	 */
2350 	struct	ahc_initiator_tinfo *tinfo;
2351 	struct	ahc_tmode_tstate *tstate;
2352 	struct	ahc_syncrate *rate;
2353 	int	dowide;
2354 	int	dosync;
2355 	int	doppr;
2356 	u_int	period;
2357 	u_int	ppr_options;
2358 	u_int	offset;
2359 
2360 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2361 				    devinfo->target, &tstate);
2362 	/*
2363 	 * Filter our period based on the current connection.
2364 	 * If we can't perform DT transfers on this segment (not in LVD
2365 	 * mode for instance), then our decision to issue a PPR message
2366 	 * may change.
2367 	 */
2368 	period = tinfo->goal.period;
2369 	ppr_options = tinfo->goal.ppr_options;
2370 	/* Target initiated PPR is not allowed in the SCSI spec */
2371 	if (devinfo->role == ROLE_TARGET)
2372 		ppr_options = 0;
2373 	rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2374 				       &ppr_options, devinfo->role);
2375 	dowide = tinfo->curr.width != tinfo->goal.width;
2376 	dosync = tinfo->curr.period != period;
2377 	/*
2378 	 * Only use PPR if we have options that need it, even if the device
2379 	 * claims to support it.  There might be an expander in the way
2380 	 * that doesn't.
2381 	 */
2382 	doppr = ppr_options != 0;
2383 
2384 	if (!dowide && !dosync && !doppr) {
2385 		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2386 		dosync = tinfo->goal.offset != 0;
2387 	}
2388 
2389 	if (!dowide && !dosync && !doppr) {
2390 		/*
2391 		 * Force async with a WDTR message if we have a wide bus,
2392 		 * or just issue an SDTR with a 0 offset.
2393 		 */
2394 		if ((ahc->features & AHC_WIDE) != 0)
2395 			dowide = 1;
2396 		else
2397 			dosync = 1;
2398 
2399 		if (bootverbose) {
2400 			ahc_print_devinfo(ahc, devinfo);
2401 			printf("Ensuring async\n");
2402 		}
2403 	}
2404 
2405 	/* Target initiated PPR is not allowed in the SCSI spec */
2406 	if (devinfo->role == ROLE_TARGET)
2407 		doppr = 0;
2408 
2409 	/*
2410 	 * Both the PPR message and SDTR message require the
2411 	 * goal syncrate to be limited to what the target device
2412 	 * is capable of handling (based on whether an LVD->SE
2413 	 * expander is on the bus), so combine these two cases.
2414 	 * Regardless, guarantee that if we are using WDTR and SDTR
2415 	 * messages that WDTR comes first.
2416 	 */
2417 	if (doppr || (dosync && !dowide)) {
2418 
2419 		offset = tinfo->goal.offset;
2420 		ahc_validate_offset(ahc, tinfo, rate, &offset,
2421 				    doppr ? tinfo->goal.width
2422 					  : tinfo->curr.width,
2423 				    devinfo->role);
2424 		if (doppr) {
2425 			ahc_construct_ppr(ahc, devinfo, period, offset,
2426 					  tinfo->goal.width, ppr_options);
2427 		} else {
2428 			ahc_construct_sdtr(ahc, devinfo, period, offset);
2429 		}
2430 	} else {
2431 		ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2432 	}
2433 }
2434 
2435 /*
2436  * Build a synchronous negotiation message in our message
2437  * buffer based on the input parameters.
2438  */
2439 static void
2440 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2441 		   u_int period, u_int offset)
2442 {
2443 	if (offset == 0)
2444 		period = AHC_ASYNC_XFER_PERIOD;
2445 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2446 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2447 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2448 	ahc->msgout_buf[ahc->msgout_index++] = period;
2449 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2450 	ahc->msgout_len += 5;
2451 	if (bootverbose) {
2452 		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2453 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2454 		       devinfo->lun, period, offset);
2455 	}
2456 }
2457 
2458 /*
2459  * Build a wide negotiation message in our message
2460  * buffer based on the input parameters.
2461  */
2462 static void
2463 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2464 		   u_int bus_width)
2465 {
2466 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2467 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2468 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2469 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2470 	ahc->msgout_len += 4;
2471 	if (bootverbose) {
2472 		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2473 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2474 		       devinfo->lun, bus_width);
2475 	}
2476 }
2477 
2478 /*
2479  * Build a parallel protocol request message in our message
2480  * buffer based on the input parameters.
2481  */
2482 static void
2483 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2484 		  u_int period, u_int offset, u_int bus_width,
2485 		  u_int ppr_options)
2486 {
2487 	if (offset == 0)
2488 		period = AHC_ASYNC_XFER_PERIOD;
2489 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2490 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2491 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2492 	ahc->msgout_buf[ahc->msgout_index++] = period;
2493 	ahc->msgout_buf[ahc->msgout_index++] = 0;
2494 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2495 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2496 	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2497 	ahc->msgout_len += 8;
2498 	if (bootverbose) {
2499 		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2500 		       "offset %x, ppr_options %x\n", ahc_name(ahc),
2501 		       devinfo->channel, devinfo->target, devinfo->lun,
2502 		       bus_width, period, offset, ppr_options);
2503 	}
2504 }
2505 
2506 /*
2507  * Clear any active message state.
2508  */
2509 static void
2510 ahc_clear_msg_state(struct ahc_softc *ahc)
2511 {
2512 	ahc->msgout_len = 0;
2513 	ahc->msgin_index = 0;
2514 	ahc->msg_type = MSG_TYPE_NONE;
2515 	if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2516 		/*
2517 		 * The target didn't care to respond to our
2518 		 * message request, so clear ATN.
2519 		 */
2520 		ahc_outb(ahc, CLRSINT1, CLRATNO);
2521 	}
2522 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2523 	ahc_outb(ahc, SEQ_FLAGS2,
2524 		 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
2525 }
2526 
2527 static void
2528 ahc_handle_proto_violation(struct ahc_softc *ahc)
2529 {
2530 	struct	ahc_devinfo devinfo;
2531 	struct	scb *scb;
2532 	u_int	scbid;
2533 	u_int	seq_flags;
2534 	u_int	curphase;
2535 	u_int	lastphase;
2536 	int	found;
2537 
2538 	ahc_fetch_devinfo(ahc, &devinfo);
2539 	scbid = ahc_inb(ahc, SCB_TAG);
2540 	scb = ahc_lookup_scb(ahc, scbid);
2541 	seq_flags = ahc_inb(ahc, SEQ_FLAGS);
2542 	curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2543 	lastphase = ahc_inb(ahc, LASTPHASE);
2544 	if ((seq_flags & NOT_IDENTIFIED) != 0) {
2545 
2546 		/*
2547 		 * The reconnecting target either did not send an
2548 		 * identify message, or did, but we didn't find an SCB
2549 		 * to match.
2550 		 */
2551 		ahc_print_devinfo(ahc, &devinfo);
2552 		printf("Target did not send an IDENTIFY message. "
2553 		       "LASTPHASE = 0x%x.\n", lastphase);
2554 		scb = NULL;
2555 	} else if (scb == NULL) {
2556 		/*
2557 		 * We don't seem to have an SCB active for this
2558 		 * transaction.  Print an error and reset the bus.
2559 		 */
2560 		ahc_print_devinfo(ahc, &devinfo);
2561 		printf("No SCB found during protocol violation\n");
2562 		goto proto_violation_reset;
2563 	} else {
2564 		ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
2565 		if ((seq_flags & NO_CDB_SENT) != 0) {
2566 			ahc_print_path(ahc, scb);
2567 			printf("No or incomplete CDB sent to device.\n");
2568 		} else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
2569 			/*
2570 			 * The target never bothered to provide status to
2571 			 * us prior to completing the command.  Since we don't
2572 			 * know the disposition of this command, we must attempt
2573 			 * to abort it.  Assert ATN and prepare to send an abort
2574 			 * message.
2575 			 */
2576 			ahc_print_path(ahc, scb);
2577 			printf("Completed command without status.\n");
2578 		} else {
2579 			ahc_print_path(ahc, scb);
2580 			printf("Unknown protocol violation.\n");
2581 			ahc_dump_card_state(ahc);
2582 		}
2583 	}
2584 	if ((lastphase & ~P_DATAIN_DT) == 0
2585 	 || lastphase == P_COMMAND) {
2586 proto_violation_reset:
2587 		/*
2588 		 * Target either went directly to data/command
2589 		 * phase or didn't respond to our ATN.
2590 		 * The only safe thing to do is to blow
2591 		 * it away with a bus reset.
2592 		 */
2593 		found = ahc_reset_channel(ahc, 'A', TRUE);
2594 		printf("%s: Issued Channel %c Bus Reset. "
2595 		       "%d SCBs aborted\n", ahc_name(ahc), 'A', found);
2596 	} else {
2597 		/*
2598 		 * Leave the selection hardware off in case
2599 		 * this abort attempt will affect yet to
2600 		 * be sent commands.
2601 		 */
2602 		ahc_outb(ahc, SCSISEQ,
2603 			 ahc_inb(ahc, SCSISEQ) & ~ENSELO);
2604 		ahc_assert_atn(ahc);
2605 		ahc_outb(ahc, MSG_OUT, HOST_MSG);
2606 		if (scb == NULL) {
2607 			ahc_print_devinfo(ahc, &devinfo);
2608 			ahc->msgout_buf[0] = MSG_ABORT_TASK;
2609 			ahc->msgout_len = 1;
2610 			ahc->msgout_index = 0;
2611 			ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2612 		} else {
2613 			ahc_print_path(ahc, scb);
2614 			scb->flags |= SCB_ABORT;
2615 		}
2616 		printf("Protocol violation %s.  Attempting to abort.\n",
2617 		       ahc_lookup_phase_entry(curphase)->phasemsg);
2618 	}
2619 }
2620 
2621 /*
2622  * Manual message loop handler.
2623  */
2624 static void
2625 ahc_handle_message_phase(struct ahc_softc *ahc)
2626 {
2627 	struct	ahc_devinfo devinfo;
2628 	u_int	bus_phase;
2629 	int	end_session;
2630 
2631 	ahc_fetch_devinfo(ahc, &devinfo);
2632 	end_session = FALSE;
2633 	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2634 
2635 reswitch:
2636 	switch (ahc->msg_type) {
2637 	case MSG_TYPE_INITIATOR_MSGOUT:
2638 	{
2639 		int lastbyte;
2640 		int phasemis;
2641 		int msgdone;
2642 
2643 		if (ahc->msgout_len == 0)
2644 			panic("HOST_MSG_LOOP interrupt with no active message");
2645 
2646 #ifdef AHC_DEBUG
2647 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2648 			ahc_print_devinfo(ahc, &devinfo);
2649 			printf("INITIATOR_MSG_OUT");
2650 		}
2651 #endif
2652 		phasemis = bus_phase != P_MESGOUT;
2653 		if (phasemis) {
2654 #ifdef AHC_DEBUG
2655 			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2656 				printf(" PHASEMIS %s\n",
2657 				       ahc_lookup_phase_entry(bus_phase)
2658 							     ->phasemsg);
2659 			}
2660 #endif
2661 			if (bus_phase == P_MESGIN) {
2662 				/*
2663 				 * Change gears and see if
2664 				 * this messages is of interest to
2665 				 * us or should be passed back to
2666 				 * the sequencer.
2667 				 */
2668 				ahc_outb(ahc, CLRSINT1, CLRATNO);
2669 				ahc->send_msg_perror = FALSE;
2670 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2671 				ahc->msgin_index = 0;
2672 				goto reswitch;
2673 			}
2674 			end_session = TRUE;
2675 			break;
2676 		}
2677 
2678 		if (ahc->send_msg_perror) {
2679 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2680 			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2681 #ifdef AHC_DEBUG
2682 			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2683 				printf(" byte 0x%x\n", ahc->send_msg_perror);
2684 #endif
2685 			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2686 			break;
2687 		}
2688 
2689 		msgdone	= ahc->msgout_index == ahc->msgout_len;
2690 		if (msgdone) {
2691 			/*
2692 			 * The target has requested a retry.
2693 			 * Re-assert ATN, reset our message index to
2694 			 * 0, and try again.
2695 			 */
2696 			ahc->msgout_index = 0;
2697 			ahc_assert_atn(ahc);
2698 		}
2699 
2700 		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2701 		if (lastbyte) {
2702 			/* Last byte is signified by dropping ATN */
2703 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2704 		}
2705 
2706 		/*
2707 		 * Clear our interrupt status and present
2708 		 * the next byte on the bus.
2709 		 */
2710 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2711 #ifdef AHC_DEBUG
2712 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2713 			printf(" byte 0x%x\n",
2714 			       ahc->msgout_buf[ahc->msgout_index]);
2715 #endif
2716 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2717 		break;
2718 	}
2719 	case MSG_TYPE_INITIATOR_MSGIN:
2720 	{
2721 		int phasemis;
2722 		int message_done;
2723 
2724 #ifdef AHC_DEBUG
2725 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2726 			ahc_print_devinfo(ahc, &devinfo);
2727 			printf("INITIATOR_MSG_IN");
2728 		}
2729 #endif
2730 		phasemis = bus_phase != P_MESGIN;
2731 		if (phasemis) {
2732 #ifdef AHC_DEBUG
2733 			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2734 				printf(" PHASEMIS %s\n",
2735 				       ahc_lookup_phase_entry(bus_phase)
2736 							     ->phasemsg);
2737 			}
2738 #endif
2739 			ahc->msgin_index = 0;
2740 			if (bus_phase == P_MESGOUT
2741 			 && (ahc->send_msg_perror == TRUE
2742 			  || (ahc->msgout_len != 0
2743 			   && ahc->msgout_index == 0))) {
2744 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2745 				goto reswitch;
2746 			}
2747 			end_session = TRUE;
2748 			break;
2749 		}
2750 
2751 		/* Pull the byte in without acking it */
2752 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2753 #ifdef AHC_DEBUG
2754 		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2755 			printf(" byte 0x%x\n",
2756 			       ahc->msgin_buf[ahc->msgin_index]);
2757 #endif
2758 
2759 		message_done = ahc_parse_msg(ahc, &devinfo);
2760 
2761 		if (message_done) {
2762 			/*
2763 			 * Clear our incoming message buffer in case there
2764 			 * is another message following this one.
2765 			 */
2766 			ahc->msgin_index = 0;
2767 
2768 			/*
2769 			 * If this message illicited a response,
2770 			 * assert ATN so the target takes us to the
2771 			 * message out phase.
2772 			 */
2773 			if (ahc->msgout_len != 0) {
2774 #ifdef AHC_DEBUG
2775 				if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2776 					ahc_print_devinfo(ahc, &devinfo);
2777 					printf("Asserting ATN for response\n");
2778 				}
2779 #endif
2780 				ahc_assert_atn(ahc);
2781 			}
2782 		} else
2783 			ahc->msgin_index++;
2784 
2785 		if (message_done == MSGLOOP_TERMINATED) {
2786 			end_session = TRUE;
2787 		} else {
2788 			/* Ack the byte */
2789 			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2790 			ahc_inb(ahc, SCSIDATL);
2791 		}
2792 		break;
2793 	}
2794 	case MSG_TYPE_TARGET_MSGIN:
2795 	{
2796 		int msgdone;
2797 		int msgout_request;
2798 
2799 		if (ahc->msgout_len == 0)
2800 			panic("Target MSGIN with no active message");
2801 
2802 		/*
2803 		 * If we interrupted a mesgout session, the initiator
2804 		 * will not know this until our first REQ.  So, we
2805 		 * only honor mesgout requests after we've sent our
2806 		 * first byte.
2807 		 */
2808 		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2809 		 && ahc->msgout_index > 0)
2810 			msgout_request = TRUE;
2811 		else
2812 			msgout_request = FALSE;
2813 
2814 		if (msgout_request) {
2815 
2816 			/*
2817 			 * Change gears and see if
2818 			 * this messages is of interest to
2819 			 * us or should be passed back to
2820 			 * the sequencer.
2821 			 */
2822 			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2823 			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2824 			ahc->msgin_index = 0;
2825 			/* Dummy read to REQ for first byte */
2826 			ahc_inb(ahc, SCSIDATL);
2827 			ahc_outb(ahc, SXFRCTL0,
2828 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2829 			break;
2830 		}
2831 
2832 		msgdone = ahc->msgout_index == ahc->msgout_len;
2833 		if (msgdone) {
2834 			ahc_outb(ahc, SXFRCTL0,
2835 				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2836 			end_session = TRUE;
2837 			break;
2838 		}
2839 
2840 		/*
2841 		 * Present the next byte on the bus.
2842 		 */
2843 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2844 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2845 		break;
2846 	}
2847 	case MSG_TYPE_TARGET_MSGOUT:
2848 	{
2849 		int lastbyte;
2850 		int msgdone;
2851 
2852 		/*
2853 		 * The initiator signals that this is
2854 		 * the last byte by dropping ATN.
2855 		 */
2856 		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2857 
2858 		/*
2859 		 * Read the latched byte, but turn off SPIOEN first
2860 		 * so that we don't inadvertently cause a REQ for the
2861 		 * next byte.
2862 		 */
2863 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2864 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2865 		msgdone = ahc_parse_msg(ahc, &devinfo);
2866 		if (msgdone == MSGLOOP_TERMINATED) {
2867 			/*
2868 			 * The message is *really* done in that it caused
2869 			 * us to go to bus free.  The sequencer has already
2870 			 * been reset at this point, so pull the ejection
2871 			 * handle.
2872 			 */
2873 			return;
2874 		}
2875 
2876 		ahc->msgin_index++;
2877 
2878 		/*
2879 		 * XXX Read spec about initiator dropping ATN too soon
2880 		 *     and use msgdone to detect it.
2881 		 */
2882 		if (msgdone == MSGLOOP_MSGCOMPLETE) {
2883 			ahc->msgin_index = 0;
2884 
2885 			/*
2886 			 * If this message illicited a response, transition
2887 			 * to the Message in phase and send it.
2888 			 */
2889 			if (ahc->msgout_len != 0) {
2890 				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2891 				ahc_outb(ahc, SXFRCTL0,
2892 					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2893 				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2894 				ahc->msgin_index = 0;
2895 				break;
2896 			}
2897 		}
2898 
2899 		if (lastbyte)
2900 			end_session = TRUE;
2901 		else {
2902 			/* Ask for the next byte. */
2903 			ahc_outb(ahc, SXFRCTL0,
2904 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2905 		}
2906 
2907 		break;
2908 	}
2909 	default:
2910 		panic("Unknown REQINIT message type");
2911 	}
2912 
2913 	if (end_session) {
2914 		ahc_clear_msg_state(ahc);
2915 		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2916 	} else
2917 		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2918 }
2919 
2920 /*
2921  * See if we sent a particular extended message to the target.
2922  * If "full" is true, return true only if the target saw the full
2923  * message.  If "full" is false, return true if the target saw at
2924  * least the first byte of the message.
2925  */
2926 static int
2927 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
2928 {
2929 	int found;
2930 	u_int index;
2931 
2932 	found = FALSE;
2933 	index = 0;
2934 
2935 	while (index < ahc->msgout_len) {
2936 		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2937 			u_int end_index;
2938 
2939 			end_index = index + 1 + ahc->msgout_buf[index + 1];
2940 			if (ahc->msgout_buf[index+2] == msgval
2941 			 && type == AHCMSG_EXT) {
2942 
2943 				if (full) {
2944 					if (ahc->msgout_index > end_index)
2945 						found = TRUE;
2946 				} else if (ahc->msgout_index > index)
2947 					found = TRUE;
2948 			}
2949 			index = end_index;
2950 		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
2951 			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2952 
2953 			/* Skip tag type and tag id or residue param*/
2954 			index += 2;
2955 		} else {
2956 			/* Single byte message */
2957 			if (type == AHCMSG_1B
2958 			 && ahc->msgout_buf[index] == msgval
2959 			 && ahc->msgout_index > index)
2960 				found = TRUE;
2961 			index++;
2962 		}
2963 
2964 		if (found)
2965 			break;
2966 	}
2967 	return (found);
2968 }
2969 
2970 /*
2971  * Wait for a complete incoming message, parse it, and respond accordingly.
2972  */
2973 static int
2974 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2975 {
2976 	struct	ahc_initiator_tinfo *tinfo;
2977 	struct	ahc_tmode_tstate *tstate;
2978 	int	reject;
2979 	int	done;
2980 	int	response;
2981 	u_int	targ_scsirate;
2982 
2983 	done = MSGLOOP_IN_PROG;
2984 	response = FALSE;
2985 	reject = FALSE;
2986 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2987 				    devinfo->target, &tstate);
2988 	targ_scsirate = tinfo->scsirate;
2989 
2990 	/*
2991 	 * Parse as much of the message as is availible,
2992 	 * rejecting it if we don't support it.  When
2993 	 * the entire message is availible and has been
2994 	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
2995 	 * that we have parsed an entire message.
2996 	 *
2997 	 * In the case of extended messages, we accept the length
2998 	 * byte outright and perform more checking once we know the
2999 	 * extended message type.
3000 	 */
3001 	switch (ahc->msgin_buf[0]) {
3002 	case MSG_DISCONNECT:
3003 	case MSG_SAVEDATAPOINTER:
3004 	case MSG_CMDCOMPLETE:
3005 	case MSG_RESTOREPOINTERS:
3006 	case MSG_IGN_WIDE_RESIDUE:
3007 		/*
3008 		 * End our message loop as these are messages
3009 		 * the sequencer handles on its own.
3010 		 */
3011 		done = MSGLOOP_TERMINATED;
3012 		break;
3013 	case MSG_MESSAGE_REJECT:
3014 		response = ahc_handle_msg_reject(ahc, devinfo);
3015 		/* FALLTHROUGH */
3016 	case MSG_NOOP:
3017 		done = MSGLOOP_MSGCOMPLETE;
3018 		break;
3019 	case MSG_EXTENDED:
3020 	{
3021 		/* Wait for enough of the message to begin validation */
3022 		if (ahc->msgin_index < 2)
3023 			break;
3024 		switch (ahc->msgin_buf[2]) {
3025 		case MSG_EXT_SDTR:
3026 		{
3027 			struct	 ahc_syncrate *syncrate;
3028 			u_int	 period;
3029 			u_int	 ppr_options;
3030 			u_int	 offset;
3031 			u_int	 saved_offset;
3032 
3033 			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
3034 				reject = TRUE;
3035 				break;
3036 			}
3037 
3038 			/*
3039 			 * Wait until we have both args before validating
3040 			 * and acting on this message.
3041 			 *
3042 			 * Add one to MSG_EXT_SDTR_LEN to account for
3043 			 * the extended message preamble.
3044 			 */
3045 			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
3046 				break;
3047 
3048 			period = ahc->msgin_buf[3];
3049 			ppr_options = 0;
3050 			saved_offset = offset = ahc->msgin_buf[4];
3051 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3052 							   &ppr_options,
3053 							   devinfo->role);
3054 			ahc_validate_offset(ahc, tinfo, syncrate, &offset,
3055 					    targ_scsirate & WIDEXFER,
3056 					    devinfo->role);
3057 			if (bootverbose) {
3058 				printf("(%s:%c:%d:%d): Received "
3059 				       "SDTR period %x, offset %x\n\t"
3060 				       "Filtered to period %x, offset %x\n",
3061 				       ahc_name(ahc), devinfo->channel,
3062 				       devinfo->target, devinfo->lun,
3063 				       ahc->msgin_buf[3], saved_offset,
3064 				       period, offset);
3065 			}
3066 			ahc_set_syncrate(ahc, devinfo,
3067 					 syncrate, period,
3068 					 offset, ppr_options,
3069 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3070 					 /*paused*/TRUE);
3071 
3072 			/*
3073 			 * See if we initiated Sync Negotiation
3074 			 * and didn't have to fall down to async
3075 			 * transfers.
3076 			 */
3077 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
3078 				/* We started it */
3079 				if (saved_offset != offset) {
3080 					/* Went too low - force async */
3081 					reject = TRUE;
3082 				}
3083 			} else {
3084 				/*
3085 				 * Send our own SDTR in reply
3086 				 */
3087 				if (bootverbose
3088 				 && devinfo->role == ROLE_INITIATOR) {
3089 					printf("(%s:%c:%d:%d): Target "
3090 					       "Initiated SDTR\n",
3091 					       ahc_name(ahc), devinfo->channel,
3092 					       devinfo->target, devinfo->lun);
3093 				}
3094 				ahc->msgout_index = 0;
3095 				ahc->msgout_len = 0;
3096 				ahc_construct_sdtr(ahc, devinfo,
3097 						   period, offset);
3098 				ahc->msgout_index = 0;
3099 				response = TRUE;
3100 			}
3101 			done = MSGLOOP_MSGCOMPLETE;
3102 			break;
3103 		}
3104 		case MSG_EXT_WDTR:
3105 		{
3106 			u_int bus_width;
3107 			u_int saved_width;
3108 			u_int sending_reply;
3109 
3110 			sending_reply = FALSE;
3111 			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
3112 				reject = TRUE;
3113 				break;
3114 			}
3115 
3116 			/*
3117 			 * Wait until we have our arg before validating
3118 			 * and acting on this message.
3119 			 *
3120 			 * Add one to MSG_EXT_WDTR_LEN to account for
3121 			 * the extended message preamble.
3122 			 */
3123 			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
3124 				break;
3125 
3126 			bus_width = ahc->msgin_buf[3];
3127 			saved_width = bus_width;
3128 			ahc_validate_width(ahc, tinfo, &bus_width,
3129 					   devinfo->role);
3130 			if (bootverbose) {
3131 				printf("(%s:%c:%d:%d): Received WDTR "
3132 				       "%x filtered to %x\n",
3133 				       ahc_name(ahc), devinfo->channel,
3134 				       devinfo->target, devinfo->lun,
3135 				       saved_width, bus_width);
3136 			}
3137 
3138 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
3139 				/*
3140 				 * Don't send a WDTR back to the
3141 				 * target, since we asked first.
3142 				 * If the width went higher than our
3143 				 * request, reject it.
3144 				 */
3145 				if (saved_width > bus_width) {
3146 					reject = TRUE;
3147 					printf("(%s:%c:%d:%d): requested %dBit "
3148 					       "transfers.  Rejecting...\n",
3149 					       ahc_name(ahc), devinfo->channel,
3150 					       devinfo->target, devinfo->lun,
3151 					       8 * (0x01 << bus_width));
3152 					bus_width = 0;
3153 				}
3154 			} else {
3155 				/*
3156 				 * Send our own WDTR in reply
3157 				 */
3158 				if (bootverbose
3159 				 && devinfo->role == ROLE_INITIATOR) {
3160 					printf("(%s:%c:%d:%d): Target "
3161 					       "Initiated WDTR\n",
3162 					       ahc_name(ahc), devinfo->channel,
3163 					       devinfo->target, devinfo->lun);
3164 				}
3165 				ahc->msgout_index = 0;
3166 				ahc->msgout_len = 0;
3167 				ahc_construct_wdtr(ahc, devinfo, bus_width);
3168 				ahc->msgout_index = 0;
3169 				response = TRUE;
3170 				sending_reply = TRUE;
3171 			}
3172 			ahc_set_width(ahc, devinfo, bus_width,
3173 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3174 				      /*paused*/TRUE);
3175 			/* After a wide message, we are async */
3176 			ahc_set_syncrate(ahc, devinfo,
3177 					 /*syncrate*/NULL, /*period*/0,
3178 					 /*offset*/0, /*ppr_options*/0,
3179 					 AHC_TRANS_ACTIVE, /*paused*/TRUE);
3180 			if (sending_reply == FALSE && reject == FALSE) {
3181 
3182 				if (tinfo->goal.offset) {
3183 					ahc->msgout_index = 0;
3184 					ahc->msgout_len = 0;
3185 					ahc_build_transfer_msg(ahc, devinfo);
3186 					ahc->msgout_index = 0;
3187 					response = TRUE;
3188 				}
3189 			}
3190 			done = MSGLOOP_MSGCOMPLETE;
3191 			break;
3192 		}
3193 		case MSG_EXT_PPR:
3194 		{
3195 			struct	ahc_syncrate *syncrate;
3196 			u_int	period;
3197 			u_int	offset;
3198 			u_int	bus_width;
3199 			u_int	ppr_options;
3200 			u_int	saved_width;
3201 			u_int	saved_offset;
3202 			u_int	saved_ppr_options;
3203 
3204 			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
3205 				reject = TRUE;
3206 				break;
3207 			}
3208 
3209 			/*
3210 			 * Wait until we have all args before validating
3211 			 * and acting on this message.
3212 			 *
3213 			 * Add one to MSG_EXT_PPR_LEN to account for
3214 			 * the extended message preamble.
3215 			 */
3216 			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
3217 				break;
3218 
3219 			period = ahc->msgin_buf[3];
3220 			offset = ahc->msgin_buf[5];
3221 			bus_width = ahc->msgin_buf[6];
3222 			saved_width = bus_width;
3223 			ppr_options = ahc->msgin_buf[7];
3224 			/*
3225 			 * According to the spec, a DT only
3226 			 * period factor with no DT option
3227 			 * set implies async.
3228 			 */
3229 			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
3230 			 && period == 9)
3231 				offset = 0;
3232 			saved_ppr_options = ppr_options;
3233 			saved_offset = offset;
3234 
3235 			/*
3236 			 * Mask out any options we don't support
3237 			 * on any controller.  Transfer options are
3238 			 * only available if we are negotiating wide.
3239 			 */
3240 			ppr_options &= MSG_EXT_PPR_DT_REQ;
3241 			if (bus_width == 0)
3242 				ppr_options = 0;
3243 
3244 			ahc_validate_width(ahc, tinfo, &bus_width,
3245 					   devinfo->role);
3246 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3247 							   &ppr_options,
3248 							   devinfo->role);
3249 			ahc_validate_offset(ahc, tinfo, syncrate,
3250 					    &offset, bus_width,
3251 					    devinfo->role);
3252 
3253 			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
3254 				/*
3255 				 * If we are unable to do any of the
3256 				 * requested options (we went too low),
3257 				 * then we'll have to reject the message.
3258 				 */
3259 				if (saved_width > bus_width
3260 				 || saved_offset != offset
3261 				 || saved_ppr_options != ppr_options) {
3262 					reject = TRUE;
3263 					period = 0;
3264 					offset = 0;
3265 					bus_width = 0;
3266 					ppr_options = 0;
3267 					syncrate = NULL;
3268 				}
3269 			} else {
3270 				if (devinfo->role != ROLE_TARGET)
3271 					printf("(%s:%c:%d:%d): Target "
3272 					       "Initiated PPR\n",
3273 					       ahc_name(ahc), devinfo->channel,
3274 					       devinfo->target, devinfo->lun);
3275 				else
3276 					printf("(%s:%c:%d:%d): Initiator "
3277 					       "Initiated PPR\n",
3278 					       ahc_name(ahc), devinfo->channel,
3279 					       devinfo->target, devinfo->lun);
3280 				ahc->msgout_index = 0;
3281 				ahc->msgout_len = 0;
3282 				ahc_construct_ppr(ahc, devinfo, period, offset,
3283 						  bus_width, ppr_options);
3284 				ahc->msgout_index = 0;
3285 				response = TRUE;
3286 			}
3287 			if (bootverbose) {
3288 				printf("(%s:%c:%d:%d): Received PPR width %x, "
3289 				       "period %x, offset %x,options %x\n"
3290 				       "\tFiltered to width %x, period %x, "
3291 				       "offset %x, options %x\n",
3292 				       ahc_name(ahc), devinfo->channel,
3293 				       devinfo->target, devinfo->lun,
3294 				       saved_width, ahc->msgin_buf[3],
3295 				       saved_offset, saved_ppr_options,
3296 				       bus_width, period, offset, ppr_options);
3297 			}
3298 			ahc_set_width(ahc, devinfo, bus_width,
3299 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3300 				      /*paused*/TRUE);
3301 			ahc_set_syncrate(ahc, devinfo,
3302 					 syncrate, period,
3303 					 offset, ppr_options,
3304 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3305 					 /*paused*/TRUE);
3306 			done = MSGLOOP_MSGCOMPLETE;
3307 			break;
3308 		}
3309 		default:
3310 			/* Unknown extended message.  Reject it. */
3311 			reject = TRUE;
3312 			break;
3313 		}
3314 		break;
3315 	}
3316 #ifdef AHC_TARGET_MODE
3317 	case MSG_BUS_DEV_RESET:
3318 		ahc_handle_devreset(ahc, devinfo,
3319 				    CAM_BDR_SENT,
3320 				    "Bus Device Reset Received",
3321 				    /*verbose_level*/0);
3322 		ahc_restart(ahc);
3323 		done = MSGLOOP_TERMINATED;
3324 		break;
3325 	case MSG_ABORT_TAG:
3326 	case MSG_ABORT:
3327 	case MSG_CLEAR_QUEUE:
3328 	{
3329 		int tag;
3330 
3331 		/* Target mode messages */
3332 		if (devinfo->role != ROLE_TARGET) {
3333 			reject = TRUE;
3334 			break;
3335 		}
3336 		tag = SCB_LIST_NULL;
3337 		if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
3338 			tag = ahc_inb(ahc, INITIATOR_TAG);
3339 		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3340 			       devinfo->lun, tag, ROLE_TARGET,
3341 			       CAM_REQ_ABORTED);
3342 
3343 		tstate = ahc->enabled_targets[devinfo->our_scsiid];
3344 		if (tstate != NULL) {
3345 			struct ahc_tmode_lstate* lstate;
3346 
3347 			lstate = tstate->enabled_luns[devinfo->lun];
3348 			if (lstate != NULL) {
3349 				ahc_queue_lstate_event(ahc, lstate,
3350 						       devinfo->our_scsiid,
3351 						       ahc->msgin_buf[0],
3352 						       /*arg*/tag);
3353 				ahc_send_lstate_events(ahc, lstate);
3354 			}
3355 		}
3356 		ahc_restart(ahc);
3357 		done = MSGLOOP_TERMINATED;
3358 		break;
3359 	}
3360 #endif
3361 	case MSG_TERM_IO_PROC:
3362 	default:
3363 		reject = TRUE;
3364 		break;
3365 	}
3366 
3367 	if (reject) {
3368 		/*
3369 		 * Setup to reject the message.
3370 		 */
3371 		ahc->msgout_index = 0;
3372 		ahc->msgout_len = 1;
3373 		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
3374 		done = MSGLOOP_MSGCOMPLETE;
3375 		response = TRUE;
3376 	}
3377 
3378 	if (done != MSGLOOP_IN_PROG && !response)
3379 		/* Clear the outgoing message buffer */
3380 		ahc->msgout_len = 0;
3381 
3382 	return (done);
3383 }
3384 
3385 /*
3386  * Process a message reject message.
3387  */
3388 static int
3389 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3390 {
3391 	/*
3392 	 * What we care about here is if we had an
3393 	 * outstanding SDTR or WDTR message for this
3394 	 * target.  If we did, this is a signal that
3395 	 * the target is refusing negotiation.
3396 	 */
3397 	struct scb *scb;
3398 	struct ahc_initiator_tinfo *tinfo;
3399 	struct ahc_tmode_tstate *tstate;
3400 	u_int scb_index;
3401 	u_int last_msg;
3402 	int   response = 0;
3403 
3404 	scb_index = ahc_inb(ahc, SCB_TAG);
3405 	scb = ahc_lookup_scb(ahc, scb_index);
3406 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3407 				    devinfo->our_scsiid,
3408 				    devinfo->target, &tstate);
3409 	/* Might be necessary */
3410 	last_msg = ahc_inb(ahc, LAST_MSG);
3411 
3412 	if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3413 		/*
3414 		 * Target does not support the PPR message.
3415 		 * Attempt to negotiate SPI-2 style.
3416 		 */
3417 		if (bootverbose) {
3418 			printf("(%s:%c:%d:%d): PPR Rejected. "
3419 			       "Trying WDTR/SDTR\n",
3420 			       ahc_name(ahc), devinfo->channel,
3421 			       devinfo->target, devinfo->lun);
3422 		}
3423 		tinfo->goal.ppr_options = 0;
3424 		tinfo->curr.transport_version = 2;
3425 		tinfo->goal.transport_version = 2;
3426 		ahc->msgout_index = 0;
3427 		ahc->msgout_len = 0;
3428 		ahc_build_transfer_msg(ahc, devinfo);
3429 		ahc->msgout_index = 0;
3430 		response = 1;
3431 	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3432 
3433 		/* note 8bit xfers */
3434 		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
3435 		       "8bit transfers\n", ahc_name(ahc),
3436 		       devinfo->channel, devinfo->target, devinfo->lun);
3437 		ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3438 			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3439 			      /*paused*/TRUE);
3440 		/*
3441 		 * No need to clear the sync rate.  If the target
3442 		 * did not accept the command, our syncrate is
3443 		 * unaffected.  If the target started the negotiation,
3444 		 * but rejected our response, we already cleared the
3445 		 * sync rate before sending our WDTR.
3446 		 */
3447 		if (tinfo->goal.offset != tinfo->curr.offset) {
3448 
3449 			/* Start the sync negotiation */
3450 			ahc->msgout_index = 0;
3451 			ahc->msgout_len = 0;
3452 			ahc_build_transfer_msg(ahc, devinfo);
3453 			ahc->msgout_index = 0;
3454 			response = 1;
3455 		}
3456 	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3457 		/* note asynch xfers and clear flag */
3458 		ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3459 				 /*offset*/0, /*ppr_options*/0,
3460 				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3461 				 /*paused*/TRUE);
3462 		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3463 		       "Using asynchronous transfers\n",
3464 		       ahc_name(ahc), devinfo->channel,
3465 		       devinfo->target, devinfo->lun);
3466 	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
3467 		int tag_type;
3468 		int mask;
3469 
3470 		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
3471 
3472 		if (tag_type == MSG_SIMPLE_TASK) {
3473 			printf("(%s:%c:%d:%d): refuses tagged commands.  "
3474 			       "Performing non-tagged I/O\n", ahc_name(ahc),
3475 			       devinfo->channel, devinfo->target, devinfo->lun);
3476 			ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE);
3477 			mask = ~0x23;
3478 		} else {
3479 			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
3480 			       "Performing simple queue tagged I/O only\n",
3481 			       ahc_name(ahc), devinfo->channel, devinfo->target,
3482 			       devinfo->lun, tag_type == MSG_ORDERED_TASK
3483 			       ? "ordered" : "head of queue");
3484 			ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC);
3485 			mask = ~0x03;
3486 		}
3487 
3488 		/*
3489 		 * Resend the identify for this CCB as the target
3490 		 * may believe that the selection is invalid otherwise.
3491 		 */
3492 		ahc_outb(ahc, SCB_CONTROL,
3493 			 ahc_inb(ahc, SCB_CONTROL) & mask);
3494 	 	scb->hscb->control &= mask;
3495 		ahc_set_transaction_tag(scb, /*enabled*/FALSE,
3496 					/*type*/MSG_SIMPLE_TASK);
3497 		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3498 		ahc_assert_atn(ahc);
3499 
3500 		/*
3501 		 * This transaction is now at the head of
3502 		 * the untagged queue for this target.
3503 		 */
3504 		if ((ahc->flags & AHC_SCB_BTT) == 0) {
3505 			struct scb_tailq *untagged_q;
3506 
3507 			untagged_q =
3508 			    &(ahc->untagged_queues[devinfo->target_offset]);
3509 			TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3510 			scb->flags |= SCB_UNTAGGEDQ;
3511 		}
3512 		ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3513 			     scb->hscb->tag);
3514 
3515 		/*
3516 		 * Requeue all tagged commands for this target
3517 		 * currently in our posession so they can be
3518 		 * converted to untagged commands.
3519 		 */
3520 		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3521 				   SCB_GET_CHANNEL(ahc, scb),
3522 				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3523 				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3524 				   SEARCH_COMPLETE);
3525 	} else {
3526 		/*
3527 		 * Otherwise, we ignore it.
3528 		 */
3529 		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3530 		       ahc_name(ahc), devinfo->channel, devinfo->target,
3531 		       last_msg);
3532 	}
3533 	return (response);
3534 }
3535 
3536 /*
3537  * Process an ingnore wide residue message.
3538  */
3539 static void
3540 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3541 {
3542 	u_int scb_index;
3543 	struct scb *scb;
3544 
3545 	scb_index = ahc_inb(ahc, SCB_TAG);
3546 	scb = ahc_lookup_scb(ahc, scb_index);
3547 	/*
3548 	 * XXX Actually check data direction in the sequencer?
3549 	 * Perhaps add datadir to some spare bits in the hscb?
3550 	 */
3551 	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3552 	 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3553 		/*
3554 		 * Ignore the message if we haven't
3555 		 * seen an appropriate data phase yet.
3556 		 */
3557 	} else {
3558 		/*
3559 		 * If the residual occurred on the last
3560 		 * transfer and the transfer request was
3561 		 * expected to end on an odd count, do
3562 		 * nothing.  Otherwise, subtract a byte
3563 		 * and update the residual count accordingly.
3564 		 */
3565 		uint32_t sgptr;
3566 
3567 		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3568 		if ((sgptr & SG_LIST_NULL) != 0
3569 		 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
3570 			/*
3571 			 * If the residual occurred on the last
3572 			 * transfer and the transfer request was
3573 			 * expected to end on an odd count, do
3574 			 * nothing.
3575 			 */
3576 		} else {
3577 			struct ahc_dma_seg *sg;
3578 			uint32_t data_cnt;
3579 			uint32_t data_addr;
3580 			uint32_t sglen;
3581 
3582 			/* Pull in the rest of the sgptr */
3583 			sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3584 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3585 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
3586 			sgptr &= SG_PTR_MASK;
3587 			data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24)
3588 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
3589 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
3590 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
3591 
3592 			data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
3593 				  | (ahc_inb(ahc, SHADDR + 2) << 16)
3594 				  | (ahc_inb(ahc, SHADDR + 1) << 8)
3595 				  | (ahc_inb(ahc, SHADDR));
3596 
3597 			data_cnt += 1;
3598 			data_addr -= 1;
3599 
3600 			sg = ahc_sg_bus_to_virt(scb, sgptr);
3601 			/*
3602 			 * The residual sg ptr points to the next S/G
3603 			 * to load so we must go back one.
3604 			 */
3605 			sg--;
3606 			sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
3607 			if (sg != scb->sg_list
3608 			 && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
3609 
3610 				sg--;
3611 				sglen = ahc_le32toh(sg->len);
3612 				/*
3613 				 * Preserve High Address and SG_LIST bits
3614 				 * while setting the count to 1.
3615 				 */
3616 				data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
3617 				data_addr = ahc_le32toh(sg->addr)
3618 					  + (sglen & AHC_SG_LEN_MASK) - 1;
3619 
3620 				/*
3621 				 * Increment sg so it points to the
3622 				 * "next" sg.
3623 				 */
3624 				sg++;
3625 				sgptr = ahc_sg_virt_to_bus(scb, sg);
3626 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
3627 					 sgptr >> 24);
3628 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
3629 					 sgptr >> 16);
3630 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
3631 					 sgptr >> 8);
3632 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3633 			}
3634 
3635 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
3636 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
3637 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
3638 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3639 		}
3640 	}
3641 }
3642 
3643 
3644 /*
3645  * Reinitialize the data pointers for the active transfer
3646  * based on its current residual.
3647  */
3648 static void
3649 ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
3650 {
3651 	struct	 scb *scb;
3652 	struct	 ahc_dma_seg *sg;
3653 	u_int	 scb_index;
3654 	uint32_t sgptr;
3655 	uint32_t resid;
3656 	uint32_t dataptr;
3657 
3658 	scb_index = ahc_inb(ahc, SCB_TAG);
3659 	scb = ahc_lookup_scb(ahc, scb_index);
3660 	sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3661 	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3662 	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
3663 	      |	ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3664 
3665 	sgptr &= SG_PTR_MASK;
3666 	sg = ahc_sg_bus_to_virt(scb, sgptr);
3667 
3668 	/* The residual sg_ptr always points to the next sg */
3669 	sg--;
3670 
3671 	resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
3672 	      | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
3673 	      | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
3674 
3675 	dataptr = ahc_le32toh(sg->addr)
3676 		+ (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK)
3677 		- resid;
3678 	if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
3679 		u_int dscommand1;
3680 
3681 		dscommand1 = ahc_inb(ahc, DSCOMMAND1);
3682 		ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
3683 		ahc_outb(ahc, HADDR,
3684 			 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
3685 		ahc_outb(ahc, DSCOMMAND1, dscommand1);
3686 	}
3687 	ahc_outb(ahc, HADDR + 3, dataptr >> 24);
3688 	ahc_outb(ahc, HADDR + 2, dataptr >> 16);
3689 	ahc_outb(ahc, HADDR + 1, dataptr >> 8);
3690 	ahc_outb(ahc, HADDR, dataptr);
3691 	ahc_outb(ahc, HCNT + 2, resid >> 16);
3692 	ahc_outb(ahc, HCNT + 1, resid >> 8);
3693 	ahc_outb(ahc, HCNT, resid);
3694 	if ((ahc->features & AHC_ULTRA2) == 0) {
3695 		ahc_outb(ahc, STCNT + 2, resid >> 16);
3696 		ahc_outb(ahc, STCNT + 1, resid >> 8);
3697 		ahc_outb(ahc, STCNT, resid);
3698 	}
3699 }
3700 
3701 /*
3702  * Handle the effects of issuing a bus device reset message.
3703  */
3704 static void
3705 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3706 		    cam_status status, char *message, int verbose_level)
3707 {
3708 #ifdef AHC_TARGET_MODE
3709 	struct ahc_tmode_tstate* tstate;
3710 	u_int lun;
3711 #endif
3712 	int found;
3713 
3714 	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3715 			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3716 			       status);
3717 
3718 #ifdef AHC_TARGET_MODE
3719 	/*
3720 	 * Send an immediate notify ccb to all target mord peripheral
3721 	 * drivers affected by this action.
3722 	 */
3723 	tstate = ahc->enabled_targets[devinfo->our_scsiid];
3724 	if (tstate != NULL) {
3725 		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3726 			struct ahc_tmode_lstate* lstate;
3727 
3728 			lstate = tstate->enabled_luns[lun];
3729 			if (lstate == NULL)
3730 				continue;
3731 
3732 			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3733 					       MSG_BUS_DEV_RESET, /*arg*/0);
3734 			ahc_send_lstate_events(ahc, lstate);
3735 		}
3736 	}
3737 #endif
3738 
3739 	/*
3740 	 * Go back to async/narrow transfers and renegotiate.
3741 	 */
3742 	ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3743 		      AHC_TRANS_CUR, /*paused*/TRUE);
3744 	ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3745 			 /*period*/0, /*offset*/0, /*ppr_options*/0,
3746 			 AHC_TRANS_CUR, /*paused*/TRUE);
3747 
3748 	ahc_send_async(ahc, devinfo->channel, devinfo->target,
3749 		       CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
3750 
3751 	if (message != NULL
3752 	 && (verbose_level <= bootverbose))
3753 		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3754 		       message, devinfo->channel, devinfo->target, found);
3755 }
3756 
3757 #ifdef AHC_TARGET_MODE
3758 static void
3759 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3760 		       struct scb *scb)
3761 {
3762 
3763 	/*
3764 	 * To facilitate adding multiple messages together,
3765 	 * each routine should increment the index and len
3766 	 * variables instead of setting them explicitly.
3767 	 */
3768 	ahc->msgout_index = 0;
3769 	ahc->msgout_len = 0;
3770 
3771 	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
3772 		ahc_build_transfer_msg(ahc, devinfo);
3773 	else
3774 		panic("ahc_intr: AWAITING target message with no message");
3775 
3776 	ahc->msgout_index = 0;
3777 	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3778 }
3779 #endif
3780 /**************************** Initialization **********************************/
3781 /*
3782  * Allocate a controller structure for a new device
3783  * and perform initial initializion.
3784  */
3785 struct ahc_softc *
3786 ahc_alloc(void *platform_arg, char *name)
3787 {
3788 	struct  ahc_softc *ahc;
3789 	int	i;
3790 
3791 #ifndef	__FreeBSD__
3792 	ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3793 	if (!ahc) {
3794 		printf("aic7xxx: cannot malloc softc!\n");
3795 		free(name, M_DEVBUF);
3796 		return NULL;
3797 	}
3798 #else
3799 	ahc = device_get_softc((device_t)platform_arg);
3800 #endif
3801 	memset(ahc, 0, sizeof(*ahc));
3802 	ahc->seep_config = malloc(sizeof(*ahc->seep_config),
3803 				  M_DEVBUF, M_NOWAIT);
3804 	if (ahc->seep_config == NULL) {
3805 #ifndef	__FreeBSD__
3806 		free(ahc, M_DEVBUF);
3807 #endif
3808 		free(name, M_DEVBUF);
3809 		return (NULL);
3810 	}
3811 	LIST_INIT(&ahc->pending_scbs);
3812 	/* We don't know our unit number until the OSM sets it */
3813 	ahc->name = name;
3814 	ahc->unit = -1;
3815 	ahc->description = NULL;
3816 	ahc->channel = 'A';
3817 	ahc->channel_b = 'B';
3818 	ahc->chip = AHC_NONE;
3819 	ahc->features = AHC_FENONE;
3820 	ahc->bugs = AHC_BUGNONE;
3821 	ahc->flags = AHC_FNONE;
3822 
3823 	for (i = 0; i < AHC_NUM_TARGETS; i++)
3824 		TAILQ_INIT(&ahc->untagged_queues[i]);
3825 	if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3826 		ahc_free(ahc);
3827 		ahc = NULL;
3828 	}
3829 	return (ahc);
3830 }
3831 
3832 int
3833 ahc_softc_init(struct ahc_softc *ahc)
3834 {
3835 
3836 	/* The IRQMS bit is only valid on VL and EISA chips */
3837 	if ((ahc->chip & AHC_PCI) == 0)
3838 		ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
3839 	else
3840 		ahc->unpause = 0;
3841 	ahc->pause = ahc->unpause | PAUSE;
3842 	/* XXX The shared scb data stuff should be deprecated */
3843 	if (ahc->scb_data == NULL) {
3844 		ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3845 				       M_DEVBUF, M_NOWAIT);
3846 		if (ahc->scb_data == NULL)
3847 			return (ENOMEM);
3848 		memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3849 	}
3850 
3851 	return (0);
3852 }
3853 
3854 void
3855 ahc_softc_insert(struct ahc_softc *ahc)
3856 {
3857 	struct ahc_softc *list_ahc;
3858 
3859 #if AHC_PCI_CONFIG > 0
3860 	/*
3861 	 * Second Function PCI devices need to inherit some
3862 	 * settings from function 0.
3863 	 */
3864 	if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3865 	 && (ahc->features & AHC_MULTI_FUNC) != 0) {
3866 		TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3867 			ahc_dev_softc_t list_pci;
3868 			ahc_dev_softc_t pci;
3869 
3870 			list_pci = list_ahc->dev_softc;
3871 			pci = ahc->dev_softc;
3872 			if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
3873 			 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) {
3874 				struct ahc_softc *master;
3875 				struct ahc_softc *slave;
3876 
3877 				if (ahc_get_pci_function(list_pci) == 0) {
3878 					master = list_ahc;
3879 					slave = ahc;
3880 				} else {
3881 					master = ahc;
3882 					slave = list_ahc;
3883 				}
3884 				slave->flags &= ~AHC_BIOS_ENABLED;
3885 				slave->flags |=
3886 				    master->flags & AHC_BIOS_ENABLED;
3887 				slave->flags &= ~AHC_PRIMARY_CHANNEL;
3888 				slave->flags |=
3889 				    master->flags & AHC_PRIMARY_CHANNEL;
3890 				break;
3891 			}
3892 		}
3893 	}
3894 #endif
3895 
3896 	/*
3897 	 * Insertion sort into our list of softcs.
3898 	 */
3899 	list_ahc = TAILQ_FIRST(&ahc_tailq);
3900 	while (list_ahc != NULL
3901 	    && ahc_softc_comp(list_ahc, ahc) <= 0)
3902 		list_ahc = TAILQ_NEXT(list_ahc, links);
3903 	if (list_ahc != NULL)
3904 		TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3905 	else
3906 		TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3907 	ahc->init_level++;
3908 }
3909 
3910 /*
3911  * Verify that the passed in softc pointer is for a
3912  * controller that is still configured.
3913  */
3914 struct ahc_softc *
3915 ahc_find_softc(struct ahc_softc *ahc)
3916 {
3917 	struct ahc_softc *list_ahc;
3918 
3919 	TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3920 		if (list_ahc == ahc)
3921 			return (ahc);
3922 	}
3923 	return (NULL);
3924 }
3925 
3926 void
3927 ahc_set_unit(struct ahc_softc *ahc, int unit)
3928 {
3929 	ahc->unit = unit;
3930 }
3931 
3932 void
3933 ahc_set_name(struct ahc_softc *ahc, char *name)
3934 {
3935 	if (ahc->name != NULL)
3936 		free(ahc->name, M_DEVBUF);
3937 	ahc->name = name;
3938 }
3939 
3940 void
3941 ahc_free(struct ahc_softc *ahc)
3942 {
3943 	int i;
3944 
3945 	ahc_fini_scbdata(ahc);
3946 	switch (ahc->init_level) {
3947 	default:
3948 	case 5:
3949 		ahc_shutdown(ahc);
3950 		TAILQ_REMOVE(&ahc_tailq, ahc, links);
3951 		/* FALLTHROUGH */
3952 	case 4:
3953 		ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3954 				  ahc->shared_data_dmamap);
3955 		/* FALLTHROUGH */
3956 	case 3:
3957 		ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3958 				ahc->shared_data_dmamap);
3959 		ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3960 				   ahc->shared_data_dmamap);
3961 		/* FALLTHROUGH */
3962 	case 2:
3963 		ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
3964 	case 1:
3965 #ifndef __linux__
3966 		ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3967 #endif
3968 		break;
3969 	case 0:
3970 		break;
3971 	}
3972 
3973 #ifndef __linux__
3974 	ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
3975 #endif
3976 	ahc_platform_free(ahc);
3977 	for (i = 0; i < AHC_NUM_TARGETS; i++) {
3978 		struct ahc_tmode_tstate *tstate;
3979 
3980 		tstate = ahc->enabled_targets[i];
3981 		if (tstate != NULL) {
3982 #if AHC_TARGET_MODE
3983 			int j;
3984 
3985 			for (j = 0; j < AHC_NUM_LUNS; j++) {
3986 				struct ahc_tmode_lstate *lstate;
3987 
3988 				lstate = tstate->enabled_luns[j];
3989 				if (lstate != NULL) {
3990 					xpt_free_path(lstate->path);
3991 					free(lstate, M_DEVBUF);
3992 				}
3993 			}
3994 #endif
3995 			free(tstate, M_DEVBUF);
3996 		}
3997 	}
3998 #if AHC_TARGET_MODE
3999 	if (ahc->black_hole != NULL) {
4000 		xpt_free_path(ahc->black_hole->path);
4001 		free(ahc->black_hole, M_DEVBUF);
4002 	}
4003 #endif
4004 	if (ahc->name != NULL)
4005 		free(ahc->name, M_DEVBUF);
4006 	if (ahc->seep_config != NULL)
4007 		free(ahc->seep_config, M_DEVBUF);
4008 #ifndef __FreeBSD__
4009 	free(ahc, M_DEVBUF);
4010 #endif
4011 	return;
4012 }
4013 
4014 void
4015 ahc_shutdown(void *arg)
4016 {
4017 	struct	ahc_softc *ahc;
4018 	int	i;
4019 
4020 	ahc = (struct ahc_softc *)arg;
4021 
4022 	/* This will reset most registers to 0, but not all */
4023 	ahc_reset(ahc);
4024 	ahc_outb(ahc, SCSISEQ, 0);
4025 	ahc_outb(ahc, SXFRCTL0, 0);
4026 	ahc_outb(ahc, DSPCISTATUS, 0);
4027 
4028 	for (i = TARG_SCSIRATE; i < SCSICONF; i++)
4029 		ahc_outb(ahc, i, 0);
4030 }
4031 
4032 /*
4033  * Reset the controller and record some information about it
4034  * that is only available just after a reset.
4035  */
4036 int
4037 ahc_reset(struct ahc_softc *ahc)
4038 {
4039 	u_int	sblkctl;
4040 	u_int	sxfrctl1_a, sxfrctl1_b;
4041 	int	wait;
4042 
4043 	/*
4044 	 * Preserve the value of the SXFRCTL1 register for all channels.
4045 	 * It contains settings that affect termination and we don't want
4046 	 * to disturb the integrity of the bus.
4047 	 */
4048 	ahc_pause(ahc);
4049 	if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) {
4050 		/*
4051 		 * The chip has not been initialized since
4052 		 * PCI/EISA/VLB bus reset.  Don't trust
4053 		 * "left over BIOS data".
4054 		 */
4055 		ahc->flags |= AHC_NO_BIOS_INIT;
4056 	}
4057 	sxfrctl1_b = 0;
4058 	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
4059 		u_int sblkctl;
4060 
4061 		/*
4062 		 * Save channel B's settings in case this chip
4063 		 * is setup for TWIN channel operation.
4064 		 */
4065 		sblkctl = ahc_inb(ahc, SBLKCTL);
4066 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4067 		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
4068 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4069 	}
4070 	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
4071 
4072 	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
4073 
4074 	/*
4075 	 * Ensure that the reset has finished.  We delay 1000us
4076 	 * prior to reading the register to make sure the chip
4077 	 * has sufficiently completed its reset to handle register
4078 	 * accesses.
4079 	 */
4080 	wait = 1000;
4081 	do {
4082 		ahc_delay(1000);
4083 	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
4084 
4085 	if (wait == 0) {
4086 		printf("%s: WARNING - Failed chip reset!  "
4087 		       "Trying to initialize anyway.\n", ahc_name(ahc));
4088 	}
4089 	ahc_outb(ahc, HCNTRL, ahc->pause);
4090 
4091 	/* Determine channel configuration */
4092 	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
4093 	/* No Twin Channel PCI cards */
4094 	if ((ahc->chip & AHC_PCI) != 0)
4095 		sblkctl &= ~SELBUSB;
4096 	switch (sblkctl) {
4097 	case 0:
4098 		/* Single Narrow Channel */
4099 		break;
4100 	case 2:
4101 		/* Wide Channel */
4102 		ahc->features |= AHC_WIDE;
4103 		break;
4104 	case 8:
4105 		/* Twin Channel */
4106 		ahc->features |= AHC_TWIN;
4107 		break;
4108 	default:
4109 		printf(" Unsupported adapter type.  Ignoring\n");
4110 		return(-1);
4111 	}
4112 
4113 	/*
4114 	 * Reload sxfrctl1.
4115 	 *
4116 	 * We must always initialize STPWEN to 1 before we
4117 	 * restore the saved values.  STPWEN is initialized
4118 	 * to a tri-state condition which can only be cleared
4119 	 * by turning it on.
4120 	 */
4121 	if ((ahc->features & AHC_TWIN) != 0) {
4122 		u_int sblkctl;
4123 
4124 		sblkctl = ahc_inb(ahc, SBLKCTL);
4125 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4126 		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
4127 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4128 	}
4129 	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
4130 
4131 #ifdef AHC_DUMP_SEQ
4132 	if (ahc->init_level == 0)
4133 		ahc_dumpseq(ahc);
4134 #endif
4135 
4136 	return (0);
4137 }
4138 
4139 /*
4140  * Determine the number of SCBs available on the controller
4141  */
4142 int
4143 ahc_probe_scbs(struct ahc_softc *ahc) {
4144 	int i;
4145 
4146 	for (i = 0; i < AHC_SCB_MAX; i++) {
4147 
4148 		ahc_outb(ahc, SCBPTR, i);
4149 		ahc_outb(ahc, SCB_BASE, i);
4150 		if (ahc_inb(ahc, SCB_BASE) != i)
4151 			break;
4152 		ahc_outb(ahc, SCBPTR, 0);
4153 		if (ahc_inb(ahc, SCB_BASE) != 0)
4154 			break;
4155 	}
4156 	return (i);
4157 }
4158 
4159 static void
4160 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
4161 {
4162 	bus_addr_t *baddr;
4163 
4164 	baddr = (bus_addr_t *)arg;
4165 	*baddr = segs->ds_addr;
4166 }
4167 
4168 static void
4169 ahc_build_free_scb_list(struct ahc_softc *ahc)
4170 {
4171 	int scbsize;
4172 	int i;
4173 
4174 	scbsize = 32;
4175 	if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
4176 		scbsize = 64;
4177 
4178 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
4179 		int j;
4180 
4181 		ahc_outb(ahc, SCBPTR, i);
4182 
4183 		/*
4184 		 * Touch all SCB bytes to avoid parity errors
4185 		 * should one of our debugging routines read
4186 		 * an otherwise uninitiatlized byte.
4187 		 */
4188 		for (j = 0; j < scbsize; j++)
4189 			ahc_outb(ahc, SCB_BASE+j, 0xFF);
4190 
4191 		/* Clear the control byte. */
4192 		ahc_outb(ahc, SCB_CONTROL, 0);
4193 
4194 		/* Set the next pointer */
4195 		if ((ahc->flags & AHC_PAGESCBS) != 0)
4196 			ahc_outb(ahc, SCB_NEXT, i+1);
4197 		else
4198 			ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4199 
4200 		/* Make the tag number, SCSIID, and lun invalid */
4201 		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
4202 		ahc_outb(ahc, SCB_SCSIID, 0xFF);
4203 		ahc_outb(ahc, SCB_LUN, 0xFF);
4204 	}
4205 
4206 	/* Make sure that the last SCB terminates the free list */
4207 	ahc_outb(ahc, SCBPTR, i-1);
4208 	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4209 }
4210 
4211 static int
4212 ahc_init_scbdata(struct ahc_softc *ahc)
4213 {
4214 	struct scb_data *scb_data;
4215 
4216 	scb_data = ahc->scb_data;
4217 	SLIST_INIT(&scb_data->free_scbs);
4218 	SLIST_INIT(&scb_data->sg_maps);
4219 
4220 	/* Allocate SCB resources */
4221 	scb_data->scbarray =
4222 	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
4223 				 M_DEVBUF, M_NOWAIT);
4224 	if (scb_data->scbarray == NULL)
4225 		return (ENOMEM);
4226 	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
4227 
4228 	/* Determine the number of hardware SCBs and initialize them */
4229 
4230 	scb_data->maxhscbs = ahc_probe_scbs(ahc);
4231 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
4232 		/* SCB 0 heads the free list */
4233 		ahc_outb(ahc, FREE_SCBH, 0);
4234 	} else {
4235 		ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
4236 	}
4237 
4238 	if (ahc->scb_data->maxhscbs == 0) {
4239 		printf("%s: No SCB space found\n", ahc_name(ahc));
4240 		return (ENXIO);
4241 	}
4242 
4243 	ahc_build_free_scb_list(ahc);
4244 
4245 	/*
4246 	 * Create our DMA tags.  These tags define the kinds of device
4247 	 * accessible memory allocations and memory mappings we will
4248 	 * need to perform during normal operation.
4249 	 *
4250 	 * Unless we need to further restrict the allocation, we rely
4251 	 * on the restrictions of the parent dmat, hence the common
4252 	 * use of MAXADDR and MAXSIZE.
4253 	 */
4254 
4255 	/* DMA tag for our hardware scb structures */
4256 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4257 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4258 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4259 			       /*highaddr*/BUS_SPACE_MAXADDR,
4260 			       /*filter*/NULL, /*filterarg*/NULL,
4261 			       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4262 			       /*nsegments*/1,
4263 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4264 			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
4265 		goto error_exit;
4266 	}
4267 
4268 	scb_data->init_level++;
4269 
4270 	/* Allocation for our hscbs */
4271 	if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
4272 			     (void **)&scb_data->hscbs,
4273 			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
4274 		goto error_exit;
4275 	}
4276 
4277 	scb_data->init_level++;
4278 
4279 	/* And permanently map them */
4280 	ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
4281 			scb_data->hscbs,
4282 			AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4283 			ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
4284 
4285 	scb_data->init_level++;
4286 
4287 	/* DMA tag for our sense buffers */
4288 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4289 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4290 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4291 			       /*highaddr*/BUS_SPACE_MAXADDR,
4292 			       /*filter*/NULL, /*filterarg*/NULL,
4293 			       AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4294 			       /*nsegments*/1,
4295 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4296 			       /*flags*/0, &scb_data->sense_dmat) != 0) {
4297 		goto error_exit;
4298 	}
4299 
4300 	scb_data->init_level++;
4301 
4302 	/* Allocate them */
4303 	if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
4304 			     (void **)&scb_data->sense,
4305 			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
4306 		goto error_exit;
4307 	}
4308 
4309 	scb_data->init_level++;
4310 
4311 	/* And permanently map them */
4312 	ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
4313 			scb_data->sense,
4314 			AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4315 			ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
4316 
4317 	scb_data->init_level++;
4318 
4319 	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
4320 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
4321 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4322 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4323 			       /*highaddr*/BUS_SPACE_MAXADDR,
4324 			       /*filter*/NULL, /*filterarg*/NULL,
4325 			       PAGE_SIZE, /*nsegments*/1,
4326 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4327 			       /*flags*/0, &scb_data->sg_dmat) != 0) {
4328 		goto error_exit;
4329 	}
4330 
4331 	scb_data->init_level++;
4332 
4333 	/* Perform initial CCB allocation */
4334 	memset(scb_data->hscbs, 0,
4335 	       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
4336 	ahc_alloc_scbs(ahc);
4337 
4338 	if (scb_data->numscbs == 0) {
4339 		printf("%s: ahc_init_scbdata - "
4340 		       "Unable to allocate initial scbs\n",
4341 		       ahc_name(ahc));
4342 		goto error_exit;
4343 	}
4344 
4345 	/*
4346 	 * Tell the sequencer which SCB will be the next one it receives.
4347 	 */
4348 	ahc->next_queued_scb = ahc_get_scb(ahc);
4349 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4350 
4351 	/*
4352 	 * Note that we were successfull
4353 	 */
4354 	return (0);
4355 
4356 error_exit:
4357 
4358 	return (ENOMEM);
4359 }
4360 
4361 static void
4362 ahc_fini_scbdata(struct ahc_softc *ahc)
4363 {
4364 	struct scb_data *scb_data;
4365 
4366 	scb_data = ahc->scb_data;
4367 	if (scb_data == NULL)
4368 		return;
4369 
4370 	switch (scb_data->init_level) {
4371 	default:
4372 	case 7:
4373 	{
4374 		struct sg_map_node *sg_map;
4375 
4376 		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
4377 			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
4378 			ahc_dmamap_unload(ahc, scb_data->sg_dmat,
4379 					  sg_map->sg_dmamap);
4380 			ahc_dmamem_free(ahc, scb_data->sg_dmat,
4381 					sg_map->sg_vaddr,
4382 					sg_map->sg_dmamap);
4383 			free(sg_map, M_DEVBUF);
4384 		}
4385 		ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
4386 	}
4387 	case 6:
4388 		ahc_dmamap_unload(ahc, scb_data->sense_dmat,
4389 				  scb_data->sense_dmamap);
4390 	case 5:
4391 		ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
4392 				scb_data->sense_dmamap);
4393 		ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
4394 				   scb_data->sense_dmamap);
4395 	case 4:
4396 		ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
4397 	case 3:
4398 		ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
4399 				  scb_data->hscb_dmamap);
4400 	case 2:
4401 		ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
4402 				scb_data->hscb_dmamap);
4403 		ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
4404 				   scb_data->hscb_dmamap);
4405 	case 1:
4406 		ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
4407 		break;
4408 	case 0:
4409 		break;
4410 	}
4411 	if (scb_data->scbarray != NULL)
4412 		free(scb_data->scbarray, M_DEVBUF);
4413 }
4414 
4415 void
4416 ahc_alloc_scbs(struct ahc_softc *ahc)
4417 {
4418 	struct scb_data *scb_data;
4419 	struct scb *next_scb;
4420 	struct sg_map_node *sg_map;
4421 	bus_addr_t physaddr;
4422 	struct ahc_dma_seg *segs;
4423 	int newcount;
4424 	int i;
4425 
4426 	scb_data = ahc->scb_data;
4427 	if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
4428 		/* Can't allocate any more */
4429 		return;
4430 
4431 	next_scb = &scb_data->scbarray[scb_data->numscbs];
4432 
4433 	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
4434 
4435 	if (sg_map == NULL)
4436 		return;
4437 
4438 	/* Allocate S/G space for the next batch of SCBS */
4439 	if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
4440 			     (void **)&sg_map->sg_vaddr,
4441 			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
4442 		free(sg_map, M_DEVBUF);
4443 		return;
4444 	}
4445 
4446 	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
4447 
4448 	ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
4449 			sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
4450 			&sg_map->sg_physaddr, /*flags*/0);
4451 
4452 	segs = sg_map->sg_vaddr;
4453 	physaddr = sg_map->sg_physaddr;
4454 
4455 	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
4456 	newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
4457 	for (i = 0; i < newcount; i++) {
4458 		struct scb_platform_data *pdata;
4459 #ifndef __linux__
4460 		int error;
4461 #endif
4462 		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
4463 							   M_DEVBUF, M_NOWAIT);
4464 		if (pdata == NULL)
4465 			break;
4466 		next_scb->platform_data = pdata;
4467 		next_scb->sg_map = sg_map;
4468 		next_scb->sg_list = segs;
4469 		/*
4470 		 * The sequencer always starts with the second entry.
4471 		 * The first entry is embedded in the scb.
4472 		 */
4473 		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
4474 		next_scb->ahc_softc = ahc;
4475 		next_scb->flags = SCB_FREE;
4476 #ifndef __linux__
4477 		error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
4478 					  &next_scb->dmamap);
4479 		if (error != 0)
4480 			break;
4481 #endif
4482 		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
4483 		next_scb->hscb->tag = ahc->scb_data->numscbs;
4484 		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
4485 				  next_scb, links.sle);
4486 		segs += AHC_NSEG;
4487 		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
4488 		next_scb++;
4489 		ahc->scb_data->numscbs++;
4490 	}
4491 }
4492 
4493 void
4494 ahc_controller_info(struct ahc_softc *ahc, char *buf)
4495 {
4496 	int len;
4497 
4498 	len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4499 	buf += len;
4500 	if ((ahc->features & AHC_TWIN) != 0)
4501  		len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4502 			      "B SCSI Id=%d, primary %c, ",
4503 			      ahc->our_id, ahc->our_id_b,
4504 			      (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4505 	else {
4506 		const char *speed;
4507 		const char *type;
4508 
4509 		speed = "";
4510 		if ((ahc->features & AHC_ULTRA) != 0) {
4511 			speed = "Ultra ";
4512 		} else if ((ahc->features & AHC_DT) != 0) {
4513 			speed = "Ultra160 ";
4514 		} else if ((ahc->features & AHC_ULTRA2) != 0) {
4515 			speed = "Ultra2 ";
4516 		}
4517 		if ((ahc->features & AHC_WIDE) != 0) {
4518 			type = "Wide";
4519 		} else {
4520 			type = "Single";
4521 		}
4522 		len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
4523 			      speed, type, ahc->channel, ahc->our_id);
4524 	}
4525 	buf += len;
4526 
4527 	if ((ahc->flags & AHC_PAGESCBS) != 0)
4528 		sprintf(buf, "%d/%d SCBs",
4529 			ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
4530 	else
4531 		sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4532 }
4533 
4534 /*
4535  * Start the board, ready for normal operation
4536  */
4537 int
4538 ahc_init(struct ahc_softc *ahc)
4539 {
4540 	int	 max_targ;
4541 	int	 i;
4542 	int	 term;
4543 	u_int	 scsi_conf;
4544 	u_int	 scsiseq_template;
4545 	u_int	 ultraenb;
4546 	u_int	 discenable;
4547 	u_int	 tagenable;
4548 	size_t	 driver_data_size;
4549 	uint32_t physaddr;
4550 
4551 #ifdef AHC_DEBUG
4552 	if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0)
4553 		ahc->flags |= AHC_SEQUENCER_DEBUG;
4554 #endif
4555 
4556 #ifdef AHC_PRINT_SRAM
4557 	printf("Scratch Ram:");
4558 	for (i = 0x20; i < 0x5f; i++) {
4559 		if (((i % 8) == 0) && (i != 0)) {
4560 			printf ("\n              ");
4561 		}
4562 		printf (" 0x%x", ahc_inb(ahc, i));
4563 	}
4564 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4565 		for (i = 0x70; i < 0x7f; i++) {
4566 			if (((i % 8) == 0) && (i != 0)) {
4567 				printf ("\n              ");
4568 			}
4569 			printf (" 0x%x", ahc_inb(ahc, i));
4570 		}
4571 	}
4572 	printf ("\n");
4573 	/*
4574 	 * Reading uninitialized scratch ram may
4575 	 * generate parity errors.
4576 	 */
4577 	ahc_outb(ahc, CLRINT, CLRPARERR);
4578 	ahc_outb(ahc, CLRINT, CLRBRKADRINT);
4579 #endif
4580 	max_targ = 15;
4581 
4582 	/*
4583 	 * Assume we have a board at this stage and it has been reset.
4584 	 */
4585 	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4586 		ahc->our_id = ahc->our_id_b = 7;
4587 
4588 	/*
4589 	 * Default to allowing initiator operations.
4590 	 */
4591 	ahc->flags |= AHC_INITIATORROLE;
4592 
4593 	/*
4594 	 * Only allow target mode features if this unit has them enabled.
4595 	 */
4596 	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
4597 		ahc->features &= ~AHC_TARGETMODE;
4598 
4599 #ifndef __linux__
4600 	/* DMA tag for mapping buffers into device visible space. */
4601 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4602 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4603 			       /*lowaddr*/BUS_SPACE_MAXADDR,
4604 			       /*highaddr*/BUS_SPACE_MAXADDR,
4605 			       /*filter*/NULL, /*filterarg*/NULL,
4606 			       /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
4607 			       /*nsegments*/AHC_NSEG,
4608 			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4609 			       /*flags*/BUS_DMA_ALLOCNOW,
4610 			       &ahc->buffer_dmat) != 0) {
4611 		return (ENOMEM);
4612 	}
4613 #endif
4614 
4615 	ahc->init_level++;
4616 
4617 	/*
4618 	 * DMA tag for our command fifos and other data in system memory
4619 	 * the card's sequencer must be able to access.  For initiator
4620 	 * roles, we need to allocate space for the qinfifo and qoutfifo.
4621 	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4622 	 * When providing for the target mode role, we must additionally
4623 	 * provide space for the incoming target command fifo and an extra
4624 	 * byte to deal with a dma bug in some chip versions.
4625 	 */
4626 	driver_data_size = 2 * 256 * sizeof(uint8_t);
4627 	if ((ahc->features & AHC_TARGETMODE) != 0)
4628 		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4629 				 + /*DMA WideOdd Bug Buffer*/1;
4630 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4631 			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4632 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4633 			       /*highaddr*/BUS_SPACE_MAXADDR,
4634 			       /*filter*/NULL, /*filterarg*/NULL,
4635 			       driver_data_size,
4636 			       /*nsegments*/1,
4637 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4638 			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
4639 		return (ENOMEM);
4640 	}
4641 
4642 	ahc->init_level++;
4643 
4644 	/* Allocation of driver data */
4645 	if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
4646 			     (void **)&ahc->qoutfifo,
4647 			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4648 		return (ENOMEM);
4649 	}
4650 
4651 	ahc->init_level++;
4652 
4653 	/* And permanently map it in */
4654 	ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4655 			ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4656 			&ahc->shared_data_busaddr, /*flags*/0);
4657 
4658 	if ((ahc->features & AHC_TARGETMODE) != 0) {
4659 		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4660 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4661 		ahc->dma_bug_buf = ahc->shared_data_busaddr
4662 				 + driver_data_size - 1;
4663 		/* All target command blocks start out invalid. */
4664 		for (i = 0; i < AHC_TMODE_CMDS; i++)
4665 			ahc->targetcmds[i].cmd_valid = 0;
4666 		ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
4667 		ahc->tqinfifonext = 1;
4668 		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4669 		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4670 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4671 	}
4672 	ahc->qinfifo = &ahc->qoutfifo[256];
4673 
4674 	ahc->init_level++;
4675 
4676 	/* Allocate SCB data now that buffer_dmat is initialized */
4677 	if (ahc->scb_data->maxhscbs == 0)
4678 		if (ahc_init_scbdata(ahc) != 0)
4679 			return (ENOMEM);
4680 
4681 	/*
4682 	 * Allocate a tstate to house information for our
4683 	 * initiator presence on the bus as well as the user
4684 	 * data for any target mode initiator.
4685 	 */
4686 	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4687 		printf("%s: unable to allocate ahc_tmode_tstate.  "
4688 		       "Failing attach\n", ahc_name(ahc));
4689 		return (ENOMEM);
4690 	}
4691 
4692 	if ((ahc->features & AHC_TWIN) != 0) {
4693 		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4694 			printf("%s: unable to allocate ahc_tmode_tstate.  "
4695 			       "Failing attach\n", ahc_name(ahc));
4696 			return (ENOMEM);
4697 		}
4698 	}
4699 
4700 	ahc_outb(ahc, SEQ_FLAGS, 0);
4701 	ahc_outb(ahc, SEQ_FLAGS2, 0);
4702 
4703 	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
4704 		ahc->flags |= AHC_PAGESCBS;
4705 	} else {
4706 		ahc->flags &= ~AHC_PAGESCBS;
4707 	}
4708 
4709 #ifdef AHC_DEBUG
4710 	if (ahc_debug & AHC_SHOW_MISC) {
4711 		printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4712 		       "ahc_dma %d bytes\n",
4713 			ahc_name(ahc),
4714 			sizeof(struct hardware_scb),
4715 			sizeof(struct scb),
4716 			sizeof(struct ahc_dma_seg));
4717 	}
4718 #endif /* AHC_DEBUG */
4719 
4720 	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4721 	if (ahc->features & AHC_TWIN) {
4722 
4723 		/*
4724 		 * The device is gated to channel B after a chip reset,
4725 		 * so set those values first
4726 		 */
4727 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4728 		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4729 		ahc_outb(ahc, SCSIID, ahc->our_id_b);
4730 		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4731 		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4732 					|term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4733 		if ((ahc->features & AHC_ULTRA2) != 0)
4734 			ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4735 		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4736 		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4737 
4738 		if ((scsi_conf & RESET_SCSI) != 0
4739 		 && (ahc->flags & AHC_INITIATORROLE) != 0)
4740 			ahc->flags |= AHC_RESET_BUS_B;
4741 
4742 		/* Select Channel A */
4743 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4744 	}
4745 	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4746 	if ((ahc->features & AHC_ULTRA2) != 0)
4747 		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4748 	else
4749 		ahc_outb(ahc, SCSIID, ahc->our_id);
4750 	scsi_conf = ahc_inb(ahc, SCSICONF);
4751 	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4752 				|term|ahc->seltime
4753 				|ENSTIMER|ACTNEGEN);
4754 	if ((ahc->features & AHC_ULTRA2) != 0)
4755 		ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4756 	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4757 	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4758 
4759 	if ((scsi_conf & RESET_SCSI) != 0
4760 	 && (ahc->flags & AHC_INITIATORROLE) != 0)
4761 		ahc->flags |= AHC_RESET_BUS_A;
4762 
4763 	/*
4764 	 * Look at the information that board initialization or
4765 	 * the board bios has left us.
4766 	 */
4767 	ultraenb = 0;
4768 	tagenable = ALL_TARGETS_MASK;
4769 
4770 	/* Grab the disconnection disable table and invert it for our needs */
4771 	if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
4772 		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
4773 			"device parameters\n", ahc_name(ahc));
4774 		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4775 			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4776 		discenable = ALL_TARGETS_MASK;
4777 		if ((ahc->features & AHC_ULTRA) != 0)
4778 			ultraenb = ALL_TARGETS_MASK;
4779 	} else {
4780 		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4781 			   | ahc_inb(ahc, DISC_DSB));
4782 		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4783 			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4784 				      | ahc_inb(ahc, ULTRA_ENB);
4785 	}
4786 
4787 	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4788 		max_targ = 7;
4789 
4790 	for (i = 0; i <= max_targ; i++) {
4791 		struct ahc_initiator_tinfo *tinfo;
4792 		struct ahc_tmode_tstate *tstate;
4793 		u_int our_id;
4794 		u_int target_id;
4795 		char channel;
4796 
4797 		channel = 'A';
4798 		our_id = ahc->our_id;
4799 		target_id = i;
4800 		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4801 			channel = 'B';
4802 			our_id = ahc->our_id_b;
4803 			target_id = i % 8;
4804 		}
4805 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4806 					    target_id, &tstate);
4807 		/* Default to async narrow across the board */
4808 		memset(tinfo, 0, sizeof(*tinfo));
4809 		if (ahc->flags & AHC_USEDEFAULTS) {
4810 			if ((ahc->features & AHC_WIDE) != 0)
4811 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4812 
4813 			/*
4814 			 * These will be truncated when we determine the
4815 			 * connection type we have with the target.
4816 			 */
4817 			tinfo->user.period = ahc_syncrates->period;
4818 			tinfo->user.offset = ~0;
4819 		} else {
4820 			u_int scsirate;
4821 			uint16_t mask;
4822 
4823 			/* Take the settings leftover in scratch RAM. */
4824 			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4825 			mask = (0x01 << i);
4826 			if ((ahc->features & AHC_ULTRA2) != 0) {
4827 				u_int offset;
4828 				u_int maxsync;
4829 
4830 				if ((scsirate & SOFS) == 0x0F) {
4831 					/*
4832 					 * Haven't negotiated yet,
4833 					 * so the format is different.
4834 					 */
4835 					scsirate = (scsirate & SXFR) >> 4
4836 						 | (ultraenb & mask)
4837 						  ? 0x08 : 0x0
4838 						 | (scsirate & WIDEXFER);
4839 					offset = MAX_OFFSET_ULTRA2;
4840 				} else
4841 					offset = ahc_inb(ahc, TARG_OFFSET + i);
4842 				if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
4843 					/* Set to the lowest sync rate, 5MHz */
4844 					scsirate |= 0x1c;
4845 				maxsync = AHC_SYNCRATE_ULTRA2;
4846 				if ((ahc->features & AHC_DT) != 0)
4847 					maxsync = AHC_SYNCRATE_DT;
4848 				tinfo->user.period =
4849 				    ahc_find_period(ahc, scsirate, maxsync);
4850 				if (offset == 0)
4851 					tinfo->user.period = 0;
4852 				else
4853 					tinfo->user.offset = ~0;
4854 				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4855 				 && (ahc->features & AHC_DT) != 0)
4856 					tinfo->user.ppr_options =
4857 					    MSG_EXT_PPR_DT_REQ;
4858 			} else if ((scsirate & SOFS) != 0) {
4859 				if ((scsirate & SXFR) == 0x40
4860 				 && (ultraenb & mask) != 0) {
4861 					/* Treat 10MHz as a non-ultra speed */
4862 					scsirate &= ~SXFR;
4863 				 	ultraenb &= ~mask;
4864 				}
4865 				tinfo->user.period =
4866 				    ahc_find_period(ahc, scsirate,
4867 						    (ultraenb & mask)
4868 						   ? AHC_SYNCRATE_ULTRA
4869 						   : AHC_SYNCRATE_FAST);
4870 				if (tinfo->user.period != 0)
4871 					tinfo->user.offset = ~0;
4872 			}
4873 			if (tinfo->user.period == 0)
4874 				tinfo->user.offset = 0;
4875 			if ((scsirate & WIDEXFER) != 0
4876 			 && (ahc->features & AHC_WIDE) != 0)
4877 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4878 			tinfo->user.protocol_version = 4;
4879 			if ((ahc->features & AHC_DT) != 0)
4880 				tinfo->user.transport_version = 3;
4881 			else
4882 				tinfo->user.transport_version = 2;
4883 			tinfo->goal.protocol_version = 2;
4884 			tinfo->goal.transport_version = 2;
4885 			tinfo->curr.protocol_version = 2;
4886 			tinfo->curr.transport_version = 2;
4887 		}
4888 		tstate->ultraenb = 0;
4889 	}
4890 	ahc->user_discenable = discenable;
4891 	ahc->user_tagenable = tagenable;
4892 
4893 	/* There are no untagged SCBs active yet. */
4894 	for (i = 0; i < 16; i++) {
4895 		ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4896 		if ((ahc->flags & AHC_SCB_BTT) != 0) {
4897 			int lun;
4898 
4899 			/*
4900 			 * The SCB based BTT allows an entry per
4901 			 * target and lun pair.
4902 			 */
4903 			for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4904 				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4905 		}
4906 	}
4907 
4908 	/* All of our queues are empty */
4909 	for (i = 0; i < 256; i++)
4910 		ahc->qoutfifo[i] = SCB_LIST_NULL;
4911 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
4912 
4913 	for (i = 0; i < 256; i++)
4914 		ahc->qinfifo[i] = SCB_LIST_NULL;
4915 
4916 	if ((ahc->features & AHC_MULTI_TID) != 0) {
4917 		ahc_outb(ahc, TARGID, 0);
4918 		ahc_outb(ahc, TARGID + 1, 0);
4919 	}
4920 
4921 	/*
4922 	 * Tell the sequencer where it can find our arrays in memory.
4923 	 */
4924 	physaddr = ahc->scb_data->hscb_busaddr;
4925 	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4926 	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4927 	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4928 	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4929 
4930 	physaddr = ahc->shared_data_busaddr;
4931 	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4932 	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4933 	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4934 	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4935 
4936 	/*
4937 	 * Initialize the group code to command length table.
4938 	 * This overrides the values in TARG_SCSIRATE, so only
4939 	 * setup the table after we have processed that information.
4940 	 */
4941 	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4942 	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4943 	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4944 	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4945 	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4946 	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4947 	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4948 	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4949 
4950 	/* Tell the sequencer of our initial queue positions */
4951 	ahc_outb(ahc, KERNEL_QINPOS, 0);
4952 	ahc_outb(ahc, QINPOS, 0);
4953 	ahc_outb(ahc, QOUTPOS, 0);
4954 
4955 	/*
4956 	 * Use the built in queue management registers
4957 	 * if they are available.
4958 	 */
4959 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4960 		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4961 		ahc_outb(ahc, SDSCB_QOFF, 0);
4962 		ahc_outb(ahc, SNSCB_QOFF, 0);
4963 		ahc_outb(ahc, HNSCB_QOFF, 0);
4964 	}
4965 
4966 
4967 	/* We don't have any waiting selections */
4968 	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4969 
4970 	/* Our disconnection list is empty too */
4971 	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4972 
4973 	/* Message out buffer starts empty */
4974 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4975 
4976 	/*
4977 	 * Setup the allowed SCSI Sequences based on operational mode.
4978 	 * If we are a target, we'll enalbe select in operations once
4979 	 * we've had a lun enabled.
4980 	 */
4981 	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4982 	if ((ahc->flags & AHC_INITIATORROLE) != 0)
4983 		scsiseq_template |= ENRSELI;
4984 	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4985 
4986 	/*
4987 	 * Load the Sequencer program and Enable the adapter
4988 	 * in "fast" mode.
4989 	 */
4990 	if (bootverbose)
4991 		printf("%s: Downloading Sequencer Program...",
4992 		       ahc_name(ahc));
4993 
4994 	ahc_loadseq(ahc);
4995 
4996 	if ((ahc->features & AHC_ULTRA2) != 0) {
4997 		int wait;
4998 
4999 		/*
5000 		 * Wait for up to 500ms for our transceivers
5001 		 * to settle.  If the adapter does not have
5002 		 * a cable attached, the tranceivers may
5003 		 * never settle, so don't complain if we
5004 		 * fail here.
5005 		 */
5006 		ahc_pause(ahc);
5007 		for (wait = 5000;
5008 		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
5009 		     wait--)
5010 			ahc_delay(100);
5011 		ahc_unpause(ahc);
5012 	}
5013 	return (0);
5014 }
5015 
5016 void
5017 ahc_intr_enable(struct ahc_softc *ahc, int enable)
5018 {
5019 	u_int hcntrl;
5020 
5021 	hcntrl = ahc_inb(ahc, HCNTRL);
5022 	hcntrl &= ~INTEN;
5023 	ahc->pause &= ~INTEN;
5024 	ahc->unpause &= ~INTEN;
5025 	if (enable) {
5026 		hcntrl |= INTEN;
5027 		ahc->pause |= INTEN;
5028 		ahc->unpause |= INTEN;
5029 	}
5030 	ahc_outb(ahc, HCNTRL, hcntrl);
5031 }
5032 
5033 /*
5034  * Ensure that the card is paused in a location
5035  * outside of all critical sections and that all
5036  * pending work is completed prior to returning.
5037  * This routine should only be called from outside
5038  * an interrupt context.
5039  */
5040 void
5041 ahc_pause_and_flushwork(struct ahc_softc *ahc)
5042 {
5043 	int intstat;
5044 	int maxloops;
5045 	int paused;
5046 
5047 	maxloops = 1000;
5048 	ahc->flags |= AHC_ALL_INTERRUPTS;
5049 	intstat = 0;
5050 	paused = FALSE;
5051 	do {
5052 		if (paused)
5053 			ahc_unpause(ahc);
5054 		ahc_intr(ahc);
5055 		ahc_pause(ahc);
5056 		paused = TRUE;
5057 		ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
5058 		ahc_clear_critical_section(ahc);
5059 		if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0)
5060 			break;
5061 	} while (--maxloops
5062 	      && (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) != 0
5063 	       || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO))));
5064 	if (maxloops == 0) {
5065 		printf("Infinite interrupt loop, INTSTAT = %x",
5066 		       ahc_inb(ahc, INTSTAT));
5067 	}
5068 	ahc_platform_flushwork(ahc);
5069 	ahc->flags &= ~AHC_ALL_INTERRUPTS;
5070 }
5071 
5072 int
5073 ahc_suspend(struct ahc_softc *ahc)
5074 {
5075 	uint8_t *ptr;
5076 	int	 i;
5077 
5078 	ahc_pause_and_flushwork(ahc);
5079 
5080 	if (LIST_FIRST(&ahc->pending_scbs) != NULL)
5081 		return (EBUSY);
5082 
5083 #if AHC_TARGET_MODE
5084 	/*
5085 	 * XXX What about ATIOs that have not yet been serviced?
5086 	 * Perhaps we should just refuse to be suspended if we
5087 	 * are acting in a target role.
5088 	 */
5089 	if (ahc->pending_device != NULL)
5090 		return (EBUSY);
5091 #endif
5092 
5093 	/* Save volatile registers */
5094 	if ((ahc->features & AHC_TWIN) != 0) {
5095 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
5096 		ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ);
5097 		ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
5098 		ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
5099 		ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0);
5100 		ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1);
5101 		ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER);
5102 		ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL);
5103 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
5104 	}
5105 	ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ);
5106 	ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
5107 	ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
5108 	ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0);
5109 	ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1);
5110 	ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER);
5111 	ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL);
5112 
5113 	if ((ahc->chip & AHC_PCI) != 0) {
5114 		ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
5115 		ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
5116 	}
5117 
5118 	if ((ahc->features & AHC_DT) != 0) {
5119 		u_int sfunct;
5120 
5121 		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
5122 		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
5123 		ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE);
5124 		ahc_outb(ahc, SFUNCT, sfunct);
5125 		ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1);
5126 	}
5127 
5128 	if ((ahc->features & AHC_MULTI_FUNC) != 0)
5129 		ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR);
5130 
5131 	if ((ahc->features & AHC_ULTRA2) != 0)
5132 		ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
5133 
5134 	ptr = ahc->suspend_state.scratch_ram;
5135 	for (i = 0; i < 64; i++)
5136 		*ptr++ = ahc_inb(ahc, SRAM_BASE + i);
5137 
5138 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
5139 		for (i = 0; i < 16; i++)
5140 			*ptr++ = ahc_inb(ahc, TARG_OFFSET + i);
5141 	}
5142 
5143 	ptr = ahc->suspend_state.btt;
5144 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5145 		for (i = 0;i < AHC_NUM_TARGETS; i++) {
5146 			int j;
5147 
5148 			for (j = 0;j < AHC_NUM_LUNS; j++) {
5149 				u_int tcl;
5150 
5151 				tcl = BUILD_TCL(i << 4, j);
5152 				*ptr = ahc_index_busy_tcl(ahc, tcl);
5153 			}
5154 		}
5155 	}
5156 	ahc_shutdown(ahc);
5157 	return (0);
5158 }
5159 
5160 int
5161 ahc_resume(struct ahc_softc *ahc)
5162 {
5163 	uint8_t *ptr;
5164 	int	 i;
5165 
5166 	ahc_reset(ahc);
5167 
5168 	ahc_build_free_scb_list(ahc);
5169 
5170 	/* Restore volatile registers */
5171 	if ((ahc->features & AHC_TWIN) != 0) {
5172 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
5173 		ahc_outb(ahc, SCSIID, ahc->our_id);
5174 		ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq);
5175 		ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0);
5176 		ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1);
5177 		ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0);
5178 		ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1);
5179 		ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer);
5180 		ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl);
5181 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
5182 	}
5183 	ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq);
5184 	ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0);
5185 	ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1);
5186 	ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0);
5187 	ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1);
5188 	ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer);
5189 	ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl);
5190 	if ((ahc->features & AHC_ULTRA2) != 0)
5191 		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
5192 	else
5193 		ahc_outb(ahc, SCSIID, ahc->our_id);
5194 
5195 	if ((ahc->chip & AHC_PCI) != 0) {
5196 		ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0);
5197 		ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus);
5198 	}
5199 
5200 	if ((ahc->features & AHC_DT) != 0) {
5201 		u_int sfunct;
5202 
5203 		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
5204 		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
5205 		ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode);
5206 		ahc_outb(ahc, SFUNCT, sfunct);
5207 		ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1);
5208 	}
5209 
5210 	if ((ahc->features & AHC_MULTI_FUNC) != 0)
5211 		ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr);
5212 
5213 	if ((ahc->features & AHC_ULTRA2) != 0)
5214 		ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh);
5215 
5216 	ptr = ahc->suspend_state.scratch_ram;
5217 	for (i = 0; i < 64; i++)
5218 		ahc_outb(ahc, SRAM_BASE + i, *ptr++);
5219 
5220 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
5221 		for (i = 0; i < 16; i++)
5222 			ahc_outb(ahc, TARG_OFFSET + i, *ptr++);
5223 	}
5224 
5225 	ptr = ahc->suspend_state.btt;
5226 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5227 		for (i = 0;i < AHC_NUM_TARGETS; i++) {
5228 			int j;
5229 
5230 			for (j = 0;j < AHC_NUM_LUNS; j++) {
5231 				u_int tcl;
5232 
5233 				tcl = BUILD_TCL(i << 4, j);
5234 				ahc_busy_tcl(ahc, tcl, *ptr);
5235 			}
5236 		}
5237 	}
5238 	return (0);
5239 }
5240 
5241 /************************** Busy Target Table *********************************/
5242 /*
5243  * Return the untagged transaction id for a given target/channel lun.
5244  * Optionally, clear the entry.
5245  */
5246 u_int
5247 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5248 {
5249 	u_int scbid;
5250 	u_int target_offset;
5251 
5252 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5253 		u_int saved_scbptr;
5254 
5255 		saved_scbptr = ahc_inb(ahc, SCBPTR);
5256 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5257 		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
5258 		ahc_outb(ahc, SCBPTR, saved_scbptr);
5259 	} else {
5260 		target_offset = TCL_TARGET_OFFSET(tcl);
5261 		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
5262 	}
5263 
5264 	return (scbid);
5265 }
5266 
5267 void
5268 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5269 {
5270 	u_int target_offset;
5271 
5272 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5273 		u_int saved_scbptr;
5274 
5275 		saved_scbptr = ahc_inb(ahc, SCBPTR);
5276 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5277 		ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
5278 		ahc_outb(ahc, SCBPTR, saved_scbptr);
5279 	} else {
5280 		target_offset = TCL_TARGET_OFFSET(tcl);
5281 		ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
5282 	}
5283 }
5284 
5285 void
5286 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
5287 {
5288 	u_int target_offset;
5289 
5290 	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5291 		u_int saved_scbptr;
5292 
5293 		saved_scbptr = ahc_inb(ahc, SCBPTR);
5294 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5295 		ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
5296 		ahc_outb(ahc, SCBPTR, saved_scbptr);
5297 	} else {
5298 		target_offset = TCL_TARGET_OFFSET(tcl);
5299 		ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
5300 	}
5301 }
5302 
5303 /************************** SCB and SCB queue management **********************/
5304 int
5305 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
5306 	      char channel, int lun, u_int tag, role_t role)
5307 {
5308 	int targ = SCB_GET_TARGET(ahc, scb);
5309 	char chan = SCB_GET_CHANNEL(ahc, scb);
5310 	int slun = SCB_GET_LUN(scb);
5311 	int match;
5312 
5313 	match = ((chan == channel) || (channel == ALL_CHANNELS));
5314 	if (match != 0)
5315 		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
5316 	if (match != 0)
5317 		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
5318 	if (match != 0) {
5319 #if AHC_TARGET_MODE
5320 		int group;
5321 
5322 		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
5323 		if (role == ROLE_INITIATOR) {
5324 			match = (group != XPT_FC_GROUP_TMODE)
5325 			      && ((tag == scb->hscb->tag)
5326 			       || (tag == SCB_LIST_NULL));
5327 		} else if (role == ROLE_TARGET) {
5328 			match = (group == XPT_FC_GROUP_TMODE)
5329 			      && ((tag == scb->io_ctx->csio.tag_id)
5330 			       || (tag == SCB_LIST_NULL));
5331 		}
5332 #else /* !AHC_TARGET_MODE */
5333 		match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
5334 #endif /* AHC_TARGET_MODE */
5335 	}
5336 
5337 	return match;
5338 }
5339 
5340 void
5341 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
5342 {
5343 	int	target;
5344 	char	channel;
5345 	int	lun;
5346 
5347 	target = SCB_GET_TARGET(ahc, scb);
5348 	lun = SCB_GET_LUN(scb);
5349 	channel = SCB_GET_CHANNEL(ahc, scb);
5350 
5351 	ahc_search_qinfifo(ahc, target, channel, lun,
5352 			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
5353 			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5354 
5355 	ahc_platform_freeze_devq(ahc, scb);
5356 }
5357 
5358 void
5359 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
5360 {
5361 	struct scb *prev_scb;
5362 
5363 	prev_scb = NULL;
5364 	if (ahc_qinfifo_count(ahc) != 0) {
5365 		u_int prev_tag;
5366 		uint8_t prev_pos;
5367 
5368 		prev_pos = ahc->qinfifonext - 1;
5369 		prev_tag = ahc->qinfifo[prev_pos];
5370 		prev_scb = ahc_lookup_scb(ahc, prev_tag);
5371 	}
5372 	ahc_qinfifo_requeue(ahc, prev_scb, scb);
5373 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5374 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5375 	} else {
5376 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5377 	}
5378 }
5379 
5380 static void
5381 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
5382 		    struct scb *scb)
5383 {
5384 	if (prev_scb == NULL) {
5385 		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5386 	} else {
5387 		prev_scb->hscb->next = scb->hscb->tag;
5388 		ahc_sync_scb(ahc, prev_scb,
5389 			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5390 	}
5391 	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
5392 	scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5393 	ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5394 }
5395 
5396 static int
5397 ahc_qinfifo_count(struct ahc_softc *ahc)
5398 {
5399 	uint8_t qinpos;
5400 	uint8_t diff;
5401 
5402 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5403 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
5404 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
5405 	} else
5406 		qinpos = ahc_inb(ahc, QINPOS);
5407 	diff = ahc->qinfifonext - qinpos;
5408 	return (diff);
5409 }
5410 
5411 int
5412 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5413 		   int lun, u_int tag, role_t role, uint32_t status,
5414 		   ahc_search_action action)
5415 {
5416 	struct	scb *scb;
5417 	struct	scb *prev_scb;
5418 	uint8_t qinstart;
5419 	uint8_t qinpos;
5420 	uint8_t qintail;
5421 	uint8_t next;
5422 	uint8_t prev;
5423 	uint8_t curscbptr;
5424 	int	found;
5425 	int	have_qregs;
5426 
5427 	qintail = ahc->qinfifonext;
5428 	have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
5429 	if (have_qregs) {
5430 		qinstart = ahc_inb(ahc, SNSCB_QOFF);
5431 		ahc_outb(ahc, SNSCB_QOFF, qinstart);
5432 	} else
5433 		qinstart = ahc_inb(ahc, QINPOS);
5434 	qinpos = qinstart;
5435 	found = 0;
5436 	prev_scb = NULL;
5437 
5438 	if (action == SEARCH_COMPLETE) {
5439 		/*
5440 		 * Don't attempt to run any queued untagged transactions
5441 		 * until we are done with the abort process.
5442 		 */
5443 		ahc_freeze_untagged_queues(ahc);
5444 	}
5445 
5446 	/*
5447 	 * Start with an empty queue.  Entries that are not chosen
5448 	 * for removal will be re-added to the queue as we go.
5449 	 */
5450 	ahc->qinfifonext = qinpos;
5451 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
5452 
5453 	while (qinpos != qintail) {
5454 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
5455 		if (scb == NULL) {
5456 			printf("qinpos = %d, SCB index = %d\n",
5457 				qinpos, ahc->qinfifo[qinpos]);
5458 			panic("Loop 1\n");
5459 		}
5460 
5461 		if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
5462 			/*
5463 			 * We found an scb that needs to be acted on.
5464 			 */
5465 			found++;
5466 			switch (action) {
5467 			case SEARCH_COMPLETE:
5468 			{
5469 				cam_status ostat;
5470 				cam_status cstat;
5471 
5472 				ostat = ahc_get_transaction_status(scb);
5473 				if (ostat == CAM_REQ_INPROG)
5474 					ahc_set_transaction_status(scb, status);
5475 				cstat = ahc_get_transaction_status(scb);
5476 				if (cstat != CAM_REQ_CMP)
5477 					ahc_freeze_scb(scb);
5478 				if ((scb->flags & SCB_ACTIVE) == 0)
5479 					printf("Inactive SCB in qinfifo\n");
5480 				ahc_done(ahc, scb);
5481 
5482 				/* FALLTHROUGH */
5483 			}
5484 			case SEARCH_REMOVE:
5485 				break;
5486 			case SEARCH_COUNT:
5487 				ahc_qinfifo_requeue(ahc, prev_scb, scb);
5488 				prev_scb = scb;
5489 				break;
5490 			}
5491 		} else {
5492 			ahc_qinfifo_requeue(ahc, prev_scb, scb);
5493 			prev_scb = scb;
5494 		}
5495 		qinpos++;
5496 	}
5497 
5498 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5499 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5500 	} else {
5501 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5502 	}
5503 
5504 	if (action != SEARCH_COUNT
5505 	 && (found != 0)
5506 	 && (qinstart != ahc->qinfifonext)) {
5507 		/*
5508 		 * The sequencer may be in the process of dmaing
5509 		 * down the SCB at the beginning of the queue.
5510 		 * This could be problematic if either the first,
5511 		 * or the second SCB is removed from the queue
5512 		 * (the first SCB includes a pointer to the "next"
5513 		 * SCB to dma). If we have removed any entries, swap
5514 		 * the first element in the queue with the next HSCB
5515 		 * so the sequencer will notice that NEXT_QUEUED_SCB
5516 		 * has changed during its dma attempt and will retry
5517 		 * the DMA.
5518 		 */
5519 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5520 
5521 		if (scb == NULL) {
5522 			printf("found = %d, qinstart = %d, qinfifionext = %d\n",
5523 				found, qinstart, ahc->qinfifonext);
5524 			panic("First/Second Qinfifo fixup\n");
5525 		}
5526 		/*
5527 		 * ahc_swap_with_next_hscb forces our next pointer to
5528 		 * point to the reserved SCB for future commands.  Save
5529 		 * and restore our original next pointer to maintain
5530 		 * queue integrity.
5531 		 */
5532 		next = scb->hscb->next;
5533 		ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
5534 		ahc_swap_with_next_hscb(ahc, scb);
5535 		scb->hscb->next = next;
5536 		ahc->qinfifo[qinstart] = scb->hscb->tag;
5537 
5538 		/* Tell the card about the new head of the qinfifo. */
5539 		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5540 
5541 		/* Fixup the tail "next" pointer. */
5542 		qintail = ahc->qinfifonext - 1;
5543 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
5544 		scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5545 	}
5546 
5547 	/*
5548 	 * Search waiting for selection list.
5549 	 */
5550 	curscbptr = ahc_inb(ahc, SCBPTR);
5551 	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
5552 	prev = SCB_LIST_NULL;
5553 
5554 	while (next != SCB_LIST_NULL) {
5555 		uint8_t scb_index;
5556 
5557 		ahc_outb(ahc, SCBPTR, next);
5558 		scb_index = ahc_inb(ahc, SCB_TAG);
5559 		if (scb_index >= ahc->scb_data->numscbs) {
5560 			printf("Waiting List inconsistency. "
5561 			       "SCB index == %d, yet numscbs == %d.",
5562 			       scb_index, ahc->scb_data->numscbs);
5563 			ahc_dump_card_state(ahc);
5564 			panic("for safety");
5565 		}
5566 		scb = ahc_lookup_scb(ahc, scb_index);
5567 		if (scb == NULL) {
5568 			printf("scb_index = %d, next = %d\n",
5569 				scb_index, next);
5570 			panic("Waiting List traversal\n");
5571 		}
5572 		if (ahc_match_scb(ahc, scb, target, channel,
5573 				  lun, SCB_LIST_NULL, role)) {
5574 			/*
5575 			 * We found an scb that needs to be acted on.
5576 			 */
5577 			found++;
5578 			switch (action) {
5579 			case SEARCH_COMPLETE:
5580 			{
5581 				cam_status ostat;
5582 				cam_status cstat;
5583 
5584 				ostat = ahc_get_transaction_status(scb);
5585 				if (ostat == CAM_REQ_INPROG)
5586 					ahc_set_transaction_status(scb,
5587 								   status);
5588 				cstat = ahc_get_transaction_status(scb);
5589 				if (cstat != CAM_REQ_CMP)
5590 					ahc_freeze_scb(scb);
5591 				if ((scb->flags & SCB_ACTIVE) == 0)
5592 					printf("Inactive SCB in Waiting List\n");
5593 				ahc_done(ahc, scb);
5594 				/* FALLTHROUGH */
5595 			}
5596 			case SEARCH_REMOVE:
5597 				next = ahc_rem_wscb(ahc, next, prev);
5598 				break;
5599 			case SEARCH_COUNT:
5600 				prev = next;
5601 				next = ahc_inb(ahc, SCB_NEXT);
5602 				break;
5603 			}
5604 		} else {
5605 
5606 			prev = next;
5607 			next = ahc_inb(ahc, SCB_NEXT);
5608 		}
5609 	}
5610 	ahc_outb(ahc, SCBPTR, curscbptr);
5611 
5612 	found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target,
5613 					    channel, lun, status, action);
5614 
5615 	if (action == SEARCH_COMPLETE)
5616 		ahc_release_untagged_queues(ahc);
5617 	return (found);
5618 }
5619 
5620 int
5621 ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx,
5622 			   int target, char channel, int lun, uint32_t status,
5623 			   ahc_search_action action)
5624 {
5625 	struct	scb *scb;
5626 	int	maxtarget;
5627 	int	found;
5628 	int	i;
5629 
5630 	if (action == SEARCH_COMPLETE) {
5631 		/*
5632 		 * Don't attempt to run any queued untagged transactions
5633 		 * until we are done with the abort process.
5634 		 */
5635 		ahc_freeze_untagged_queues(ahc);
5636 	}
5637 
5638 	found = 0;
5639 	i = 0;
5640 	if ((ahc->flags & AHC_SCB_BTT) == 0) {
5641 
5642 		maxtarget = 16;
5643 		if (target != CAM_TARGET_WILDCARD) {
5644 
5645 			i = target;
5646 			if (channel == 'B')
5647 				i += 8;
5648 			maxtarget = i + 1;
5649 		}
5650 	} else {
5651 		maxtarget = 0;
5652 	}
5653 
5654 	for (; i < maxtarget; i++) {
5655 		struct scb_tailq *untagged_q;
5656 		struct scb *next_scb;
5657 
5658 		untagged_q = &(ahc->untagged_queues[i]);
5659 		next_scb = TAILQ_FIRST(untagged_q);
5660 		while (next_scb != NULL) {
5661 
5662 			scb = next_scb;
5663 			next_scb = TAILQ_NEXT(scb, links.tqe);
5664 
5665 			/*
5666 			 * The head of the list may be the currently
5667 			 * active untagged command for a device.
5668 			 * We're only searching for commands that
5669 			 * have not been started.  A transaction
5670 			 * marked active but still in the qinfifo
5671 			 * is removed by the qinfifo scanning code
5672 			 * above.
5673 			 */
5674 			if ((scb->flags & SCB_ACTIVE) != 0)
5675 				continue;
5676 
5677 			if (ahc_match_scb(ahc, scb, target, channel, lun,
5678 					  SCB_LIST_NULL, ROLE_INITIATOR) == 0
5679 			 || (ctx != NULL && ctx != scb->io_ctx))
5680 				continue;
5681 
5682 			/*
5683 			 * We found an scb that needs to be acted on.
5684 			 */
5685 			found++;
5686 			switch (action) {
5687 			case SEARCH_COMPLETE:
5688 			{
5689 				cam_status ostat;
5690 				cam_status cstat;
5691 
5692 				ostat = ahc_get_transaction_status(scb);
5693 				if (ostat == CAM_REQ_INPROG)
5694 					ahc_set_transaction_status(scb, status);
5695 				cstat = ahc_get_transaction_status(scb);
5696 				if (cstat != CAM_REQ_CMP)
5697 					ahc_freeze_scb(scb);
5698 				if ((scb->flags & SCB_ACTIVE) == 0)
5699 					printf("Inactive SCB in untaggedQ\n");
5700 				ahc_done(ahc, scb);
5701 				break;
5702 			}
5703 			case SEARCH_REMOVE:
5704 				scb->flags &= ~SCB_UNTAGGEDQ;
5705 				TAILQ_REMOVE(untagged_q, scb, links.tqe);
5706 				break;
5707 			case SEARCH_COUNT:
5708 				break;
5709 			}
5710 		}
5711 	}
5712 
5713 	if (action == SEARCH_COMPLETE)
5714 		ahc_release_untagged_queues(ahc);
5715 	return (found);
5716 }
5717 
5718 int
5719 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5720 		     int lun, u_int tag, int stop_on_first, int remove,
5721 		     int save_state)
5722 {
5723 	struct	scb *scbp;
5724 	u_int	next;
5725 	u_int	prev;
5726 	u_int	count;
5727 	u_int	active_scb;
5728 
5729 	count = 0;
5730 	next = ahc_inb(ahc, DISCONNECTED_SCBH);
5731 	prev = SCB_LIST_NULL;
5732 
5733 	if (save_state) {
5734 		/* restore this when we're done */
5735 		active_scb = ahc_inb(ahc, SCBPTR);
5736 	} else
5737 		/* Silence compiler */
5738 		active_scb = SCB_LIST_NULL;
5739 
5740 	while (next != SCB_LIST_NULL) {
5741 		u_int scb_index;
5742 
5743 		ahc_outb(ahc, SCBPTR, next);
5744 		scb_index = ahc_inb(ahc, SCB_TAG);
5745 		if (scb_index >= ahc->scb_data->numscbs) {
5746 			printf("Disconnected List inconsistency. "
5747 			       "SCB index == %d, yet numscbs == %d.",
5748 			       scb_index, ahc->scb_data->numscbs);
5749 			ahc_dump_card_state(ahc);
5750 			panic("for safety");
5751 		}
5752 
5753 		if (next == prev) {
5754 			panic("Disconnected List Loop. "
5755 			      "cur SCBPTR == %x, prev SCBPTR == %x.",
5756 			      next, prev);
5757 		}
5758 		scbp = ahc_lookup_scb(ahc, scb_index);
5759 		if (ahc_match_scb(ahc, scbp, target, channel, lun,
5760 				  tag, ROLE_INITIATOR)) {
5761 			count++;
5762 			if (remove) {
5763 				next =
5764 				    ahc_rem_scb_from_disc_list(ahc, prev, next);
5765 			} else {
5766 				prev = next;
5767 				next = ahc_inb(ahc, SCB_NEXT);
5768 			}
5769 			if (stop_on_first)
5770 				break;
5771 		} else {
5772 			prev = next;
5773 			next = ahc_inb(ahc, SCB_NEXT);
5774 		}
5775 	}
5776 	if (save_state)
5777 		ahc_outb(ahc, SCBPTR, active_scb);
5778 	return (count);
5779 }
5780 
5781 /*
5782  * Remove an SCB from the on chip list of disconnected transactions.
5783  * This is empty/unused if we are not performing SCB paging.
5784  */
5785 static u_int
5786 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5787 {
5788 	u_int next;
5789 
5790 	ahc_outb(ahc, SCBPTR, scbptr);
5791 	next = ahc_inb(ahc, SCB_NEXT);
5792 
5793 	ahc_outb(ahc, SCB_CONTROL, 0);
5794 
5795 	ahc_add_curscb_to_free_list(ahc);
5796 
5797 	if (prev != SCB_LIST_NULL) {
5798 		ahc_outb(ahc, SCBPTR, prev);
5799 		ahc_outb(ahc, SCB_NEXT, next);
5800 	} else
5801 		ahc_outb(ahc, DISCONNECTED_SCBH, next);
5802 
5803 	return (next);
5804 }
5805 
5806 /*
5807  * Add the SCB as selected by SCBPTR onto the on chip list of
5808  * free hardware SCBs.  This list is empty/unused if we are not
5809  * performing SCB paging.
5810  */
5811 static void
5812 ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5813 {
5814 	/*
5815 	 * Invalidate the tag so that our abort
5816 	 * routines don't think it's active.
5817 	 */
5818 	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5819 
5820 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
5821 		ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5822 		ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5823 	}
5824 }
5825 
5826 /*
5827  * Manipulate the waiting for selection list and return the
5828  * scb that follows the one that we remove.
5829  */
5830 static u_int
5831 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5832 {
5833 	u_int curscb, next;
5834 
5835 	/*
5836 	 * Select the SCB we want to abort and
5837 	 * pull the next pointer out of it.
5838 	 */
5839 	curscb = ahc_inb(ahc, SCBPTR);
5840 	ahc_outb(ahc, SCBPTR, scbpos);
5841 	next = ahc_inb(ahc, SCB_NEXT);
5842 
5843 	/* Clear the necessary fields */
5844 	ahc_outb(ahc, SCB_CONTROL, 0);
5845 
5846 	ahc_add_curscb_to_free_list(ahc);
5847 
5848 	/* update the waiting list */
5849 	if (prev == SCB_LIST_NULL) {
5850 		/* First in the list */
5851 		ahc_outb(ahc, WAITING_SCBH, next);
5852 
5853 		/*
5854 		 * Ensure we aren't attempting to perform
5855 		 * selection for this entry.
5856 		 */
5857 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5858 	} else {
5859 		/*
5860 		 * Select the scb that pointed to us
5861 		 * and update its next pointer.
5862 		 */
5863 		ahc_outb(ahc, SCBPTR, prev);
5864 		ahc_outb(ahc, SCB_NEXT, next);
5865 	}
5866 
5867 	/*
5868 	 * Point us back at the original scb position.
5869 	 */
5870 	ahc_outb(ahc, SCBPTR, curscb);
5871 	return next;
5872 }
5873 
5874 /******************************** Error Handling ******************************/
5875 /*
5876  * Abort all SCBs that match the given description (target/channel/lun/tag),
5877  * setting their status to the passed in status if the status has not already
5878  * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
5879  * is paused before it is called.
5880  */
5881 int
5882 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5883 	       int lun, u_int tag, role_t role, uint32_t status)
5884 {
5885 	struct	scb *scbp;
5886 	struct	scb *scbp_next;
5887 	u_int	active_scb;
5888 	int	i, j;
5889 	int	maxtarget;
5890 	int	minlun;
5891 	int	maxlun;
5892 
5893 	int	found;
5894 
5895 	/*
5896 	 * Don't attempt to run any queued untagged transactions
5897 	 * until we are done with the abort process.
5898 	 */
5899 	ahc_freeze_untagged_queues(ahc);
5900 
5901 	/* restore this when we're done */
5902 	active_scb = ahc_inb(ahc, SCBPTR);
5903 
5904 	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5905 				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5906 
5907 	/*
5908 	 * Clean out the busy target table for any untagged commands.
5909 	 */
5910 	i = 0;
5911 	maxtarget = 16;
5912 	if (target != CAM_TARGET_WILDCARD) {
5913 		i = target;
5914 		if (channel == 'B')
5915 			i += 8;
5916 		maxtarget = i + 1;
5917 	}
5918 
5919 	if (lun == CAM_LUN_WILDCARD) {
5920 
5921 		/*
5922 		 * Unless we are using an SCB based
5923 		 * busy targets table, there is only
5924 		 * one table entry for all luns of
5925 		 * a target.
5926 		 */
5927 		minlun = 0;
5928 		maxlun = 1;
5929 		if ((ahc->flags & AHC_SCB_BTT) != 0)
5930 			maxlun = AHC_NUM_LUNS;
5931 	} else {
5932 		minlun = lun;
5933 		maxlun = lun + 1;
5934 	}
5935 
5936 	if (role != ROLE_TARGET) {
5937 		for (;i < maxtarget; i++) {
5938 			for (j = minlun;j < maxlun; j++) {
5939 				u_int scbid;
5940 				u_int tcl;
5941 
5942 				tcl = BUILD_TCL(i << 4, j);
5943 				scbid = ahc_index_busy_tcl(ahc, tcl);
5944 				scbp = ahc_lookup_scb(ahc, scbid);
5945 				if (scbp == NULL
5946 				 || ahc_match_scb(ahc, scbp, target, channel,
5947 						  lun, tag, role) == 0)
5948 					continue;
5949 				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5950 			}
5951 		}
5952 
5953 		/*
5954 		 * Go through the disconnected list and remove any entries we
5955 		 * have queued for completion, 0'ing their control byte too.
5956 		 * We save the active SCB and restore it ourselves, so there
5957 		 * is no reason for this search to restore it too.
5958 		 */
5959 		ahc_search_disc_list(ahc, target, channel, lun, tag,
5960 				     /*stop_on_first*/FALSE, /*remove*/TRUE,
5961 				     /*save_state*/FALSE);
5962 	}
5963 
5964 	/*
5965 	 * Go through the hardware SCB array looking for commands that
5966 	 * were active but not on any list.  In some cases, these remnants
5967 	 * might not still have mappings in the scbindex array (e.g. unexpected
5968 	 * bus free with the same scb queued for an abort).  Don't hold this
5969 	 * against them.
5970 	 */
5971 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5972 		u_int scbid;
5973 
5974 		ahc_outb(ahc, SCBPTR, i);
5975 		scbid = ahc_inb(ahc, SCB_TAG);
5976 		scbp = ahc_lookup_scb(ahc, scbid);
5977 		if ((scbp == NULL && scbid != SCB_LIST_NULL)
5978 		 || (scbp != NULL
5979 		  && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
5980 			ahc_add_curscb_to_free_list(ahc);
5981 	}
5982 
5983 	/*
5984 	 * Go through the pending CCB list and look for
5985 	 * commands for this target that are still active.
5986 	 * These are other tagged commands that were
5987 	 * disconnected when the reset occurred.
5988 	 */
5989 	scbp_next = LIST_FIRST(&ahc->pending_scbs);
5990 	while (scbp_next != NULL) {
5991 		scbp = scbp_next;
5992 		scbp_next = LIST_NEXT(scbp, pending_links);
5993 		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5994 			cam_status ostat;
5995 
5996 			ostat = ahc_get_transaction_status(scbp);
5997 			if (ostat == CAM_REQ_INPROG)
5998 				ahc_set_transaction_status(scbp, status);
5999 			if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
6000 				ahc_freeze_scb(scbp);
6001 			if ((scbp->flags & SCB_ACTIVE) == 0)
6002 				printf("Inactive SCB on pending list\n");
6003 			ahc_done(ahc, scbp);
6004 			found++;
6005 		}
6006 	}
6007 	ahc_outb(ahc, SCBPTR, active_scb);
6008 	ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
6009 	ahc_release_untagged_queues(ahc);
6010 	return found;
6011 }
6012 
6013 static void
6014 ahc_reset_current_bus(struct ahc_softc *ahc)
6015 {
6016 	uint8_t scsiseq;
6017 
6018 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
6019 	scsiseq = ahc_inb(ahc, SCSISEQ);
6020 	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
6021 	ahc_flush_device_writes(ahc);
6022 	ahc_delay(AHC_BUSRESET_DELAY);
6023 	/* Turn off the bus reset */
6024 	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
6025 
6026 	ahc_clear_intstat(ahc);
6027 
6028 	/* Re-enable reset interrupts */
6029 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
6030 }
6031 
6032 int
6033 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
6034 {
6035 	struct	ahc_devinfo devinfo;
6036 	u_int	initiator, target, max_scsiid;
6037 	u_int	sblkctl;
6038 	u_int	scsiseq;
6039 	u_int	simode1;
6040 	int	found;
6041 	int	restart_needed;
6042 	char	cur_channel;
6043 
6044 	ahc->pending_device = NULL;
6045 
6046 	ahc_compile_devinfo(&devinfo,
6047 			    CAM_TARGET_WILDCARD,
6048 			    CAM_TARGET_WILDCARD,
6049 			    CAM_LUN_WILDCARD,
6050 			    channel, ROLE_UNKNOWN);
6051 	ahc_pause(ahc);
6052 
6053 	/* Make sure the sequencer is in a safe location. */
6054 	ahc_clear_critical_section(ahc);
6055 
6056 	/*
6057 	 * Run our command complete fifos to ensure that we perform
6058 	 * completion processing on any commands that 'completed'
6059 	 * before the reset occurred.
6060 	 */
6061 	ahc_run_qoutfifo(ahc);
6062 #if AHC_TARGET_MODE
6063 	/*
6064 	 * XXX - In Twin mode, the tqinfifo may have commands
6065 	 *	 for an unaffected channel in it.  However, if
6066 	 *	 we have run out of ATIO resources to drain that
6067 	 *	 queue, we may not get them all out here.  Further,
6068 	 *	 the blocked transactions for the reset channel
6069 	 *	 should just be killed off, irrespecitve of whether
6070 	 *	 we are blocked on ATIO resources.  Write a routine
6071 	 *	 to compact the tqinfifo appropriately.
6072 	 */
6073 	if ((ahc->flags & AHC_TARGETROLE) != 0) {
6074 		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
6075 	}
6076 #endif
6077 
6078 	/*
6079 	 * Reset the bus if we are initiating this reset
6080 	 */
6081 	sblkctl = ahc_inb(ahc, SBLKCTL);
6082 	cur_channel = 'A';
6083 	if ((ahc->features & AHC_TWIN) != 0
6084 	 && ((sblkctl & SELBUSB) != 0))
6085 	    cur_channel = 'B';
6086 	scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6087 	if (cur_channel != channel) {
6088 		/* Case 1: Command for another bus is active
6089 		 * Stealthily reset the other bus without
6090 		 * upsetting the current bus.
6091 		 */
6092 		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
6093 		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
6094 #if AHC_TARGET_MODE
6095 		/*
6096 		 * Bus resets clear ENSELI, so we cannot
6097 		 * defer re-enabling bus reset interrupts
6098 		 * if we are in target mode.
6099 		 */
6100 		if ((ahc->flags & AHC_TARGETROLE) != 0)
6101 			simode1 |= ENSCSIRST;
6102 #endif
6103 		ahc_outb(ahc, SIMODE1, simode1);
6104 		if (initiate_reset)
6105 			ahc_reset_current_bus(ahc);
6106 		ahc_clear_intstat(ahc);
6107 		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6108 		ahc_outb(ahc, SBLKCTL, sblkctl);
6109 		restart_needed = FALSE;
6110 	} else {
6111 		/* Case 2: A command from this bus is active or we're idle */
6112 		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
6113 #if AHC_TARGET_MODE
6114 		/*
6115 		 * Bus resets clear ENSELI, so we cannot
6116 		 * defer re-enabling bus reset interrupts
6117 		 * if we are in target mode.
6118 		 */
6119 		if ((ahc->flags & AHC_TARGETROLE) != 0)
6120 			simode1 |= ENSCSIRST;
6121 #endif
6122 		ahc_outb(ahc, SIMODE1, simode1);
6123 		if (initiate_reset)
6124 			ahc_reset_current_bus(ahc);
6125 		ahc_clear_intstat(ahc);
6126 		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6127 		restart_needed = TRUE;
6128 	}
6129 
6130 	/*
6131 	 * Clean up all the state information for the
6132 	 * pending transactions on this bus.
6133 	 */
6134 	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
6135 			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
6136 			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
6137 
6138 	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
6139 
6140 #ifdef AHC_TARGET_MODE
6141 	/*
6142 	 * Send an immediate notify ccb to all target more peripheral
6143 	 * drivers affected by this action.
6144 	 */
6145 	for (target = 0; target <= max_scsiid; target++) {
6146 		struct ahc_tmode_tstate* tstate;
6147 		u_int lun;
6148 
6149 		tstate = ahc->enabled_targets[target];
6150 		if (tstate == NULL)
6151 			continue;
6152 		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
6153 			struct ahc_tmode_lstate* lstate;
6154 
6155 			lstate = tstate->enabled_luns[lun];
6156 			if (lstate == NULL)
6157 				continue;
6158 
6159 			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
6160 					       EVENT_TYPE_BUS_RESET, /*arg*/0);
6161 			ahc_send_lstate_events(ahc, lstate);
6162 		}
6163 	}
6164 #endif
6165 	/* Notify the XPT that a bus reset occurred */
6166 	ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
6167 		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
6168 
6169 	/*
6170 	 * Revert to async/narrow transfers until we renegotiate.
6171 	 */
6172 	for (target = 0; target <= max_scsiid; target++) {
6173 
6174 		if (ahc->enabled_targets[target] == NULL)
6175 			continue;
6176 		for (initiator = 0; initiator <= max_scsiid; initiator++) {
6177 			struct ahc_devinfo devinfo;
6178 
6179 			ahc_compile_devinfo(&devinfo, target, initiator,
6180 					    CAM_LUN_WILDCARD,
6181 					    channel, ROLE_UNKNOWN);
6182 			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6183 				      AHC_TRANS_CUR, /*paused*/TRUE);
6184 			ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
6185 					 /*period*/0, /*offset*/0,
6186 					 /*ppr_options*/0, AHC_TRANS_CUR,
6187 					 /*paused*/TRUE);
6188 		}
6189 	}
6190 
6191 	if (restart_needed)
6192 		ahc_restart(ahc);
6193 	else
6194 		ahc_unpause(ahc);
6195 	return found;
6196 }
6197 
6198 
6199 /***************************** Residual Processing ****************************/
6200 /*
6201  * Calculate the residual for a just completed SCB.
6202  */
6203 void
6204 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6205 {
6206 	struct hardware_scb *hscb;
6207 	struct status_pkt *spkt;
6208 	uint32_t sgptr;
6209 	uint32_t resid_sgptr;
6210 	uint32_t resid;
6211 
6212 	/*
6213 	 * 5 cases.
6214 	 * 1) No residual.
6215 	 *    SG_RESID_VALID clear in sgptr.
6216 	 * 2) Transferless command
6217 	 * 3) Never performed any transfers.
6218 	 *    sgptr has SG_FULL_RESID set.
6219 	 * 4) No residual but target did not
6220 	 *    save data pointers after the
6221 	 *    last transfer, so sgptr was
6222 	 *    never updated.
6223 	 * 5) We have a partial residual.
6224 	 *    Use residual_sgptr to determine
6225 	 *    where we are.
6226 	 */
6227 
6228 	hscb = scb->hscb;
6229 	sgptr = ahc_le32toh(hscb->sgptr);
6230 	if ((sgptr & SG_RESID_VALID) == 0)
6231 		/* Case 1 */
6232 		return;
6233 	sgptr &= ~SG_RESID_VALID;
6234 
6235 	if ((sgptr & SG_LIST_NULL) != 0)
6236 		/* Case 2 */
6237 		return;
6238 
6239 	spkt = &hscb->shared_data.status;
6240 	resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
6241 	if ((sgptr & SG_FULL_RESID) != 0) {
6242 		/* Case 3 */
6243 		resid = ahc_get_transfer_length(scb);
6244 	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
6245 		/* Case 4 */
6246 		return;
6247 	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
6248 		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
6249 	} else {
6250 		struct ahc_dma_seg *sg;
6251 
6252 		/*
6253 		 * Remainder of the SG where the transfer
6254 		 * stopped.
6255 		 */
6256 		resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
6257 		sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
6258 
6259 		/* The residual sg_ptr always points to the next sg */
6260 		sg--;
6261 
6262 		/*
6263 		 * Add up the contents of all residual
6264 		 * SG segments that are after the SG where
6265 		 * the transfer stopped.
6266 		 */
6267 		while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
6268 			sg++;
6269 			resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
6270 		}
6271 	}
6272 	if ((scb->flags & SCB_SENSE) == 0)
6273 		ahc_set_residual(scb, resid);
6274 	else
6275 		ahc_set_sense_residual(scb, resid);
6276 
6277 #ifdef AHC_DEBUG
6278 	if ((ahc_debug & AHC_SHOW_MISC) != 0) {
6279 		ahc_print_path(ahc, scb);
6280 		printf("Handled %sResidual of %d bytes\n",
6281 		       (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
6282 	}
6283 #endif
6284 }
6285 
6286 /******************************* Target Mode **********************************/
6287 #ifdef AHC_TARGET_MODE
6288 /*
6289  * Add a target mode event to this lun's queue
6290  */
6291 static void
6292 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
6293 		       u_int initiator_id, u_int event_type, u_int event_arg)
6294 {
6295 	struct ahc_tmode_event *event;
6296 	int pending;
6297 
6298 	xpt_freeze_devq(lstate->path, /*count*/1);
6299 	if (lstate->event_w_idx >= lstate->event_r_idx)
6300 		pending = lstate->event_w_idx - lstate->event_r_idx;
6301 	else
6302 		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
6303 			- (lstate->event_r_idx - lstate->event_w_idx);
6304 
6305 	if (event_type == EVENT_TYPE_BUS_RESET
6306 	 || event_type == MSG_BUS_DEV_RESET) {
6307 		/*
6308 		 * Any earlier events are irrelevant, so reset our buffer.
6309 		 * This has the effect of allowing us to deal with reset
6310 		 * floods (an external device holding down the reset line)
6311 		 * without losing the event that is really interesting.
6312 		 */
6313 		lstate->event_r_idx = 0;
6314 		lstate->event_w_idx = 0;
6315 		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
6316 	}
6317 
6318 	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
6319 		xpt_print_path(lstate->path);
6320 		printf("immediate event %x:%x lost\n",
6321 		       lstate->event_buffer[lstate->event_r_idx].event_type,
6322 		       lstate->event_buffer[lstate->event_r_idx].event_arg);
6323 		lstate->event_r_idx++;
6324 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6325 			lstate->event_r_idx = 0;
6326 		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
6327 	}
6328 
6329 	event = &lstate->event_buffer[lstate->event_w_idx];
6330 	event->initiator_id = initiator_id;
6331 	event->event_type = event_type;
6332 	event->event_arg = event_arg;
6333 	lstate->event_w_idx++;
6334 	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6335 		lstate->event_w_idx = 0;
6336 }
6337 
6338 /*
6339  * Send any target mode events queued up waiting
6340  * for immediate notify resources.
6341  */
6342 void
6343 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
6344 {
6345 	struct ccb_hdr *ccbh;
6346 	struct ccb_immed_notify *inot;
6347 
6348 	while (lstate->event_r_idx != lstate->event_w_idx
6349 	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
6350 		struct ahc_tmode_event *event;
6351 
6352 		event = &lstate->event_buffer[lstate->event_r_idx];
6353 		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
6354 		inot = (struct ccb_immed_notify *)ccbh;
6355 		switch (event->event_type) {
6356 		case EVENT_TYPE_BUS_RESET:
6357 			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
6358 			break;
6359 		default:
6360 			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
6361 			inot->message_args[0] = event->event_type;
6362 			inot->message_args[1] = event->event_arg;
6363 			break;
6364 		}
6365 		inot->initiator_id = event->initiator_id;
6366 		inot->sense_len = 0;
6367 		xpt_done((union ccb *)inot);
6368 		lstate->event_r_idx++;
6369 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6370 			lstate->event_r_idx = 0;
6371 	}
6372 }
6373 #endif
6374 
6375 /******************** Sequencer Program Patching/Download *********************/
6376 
6377 #ifdef AHC_DUMP_SEQ
6378 void
6379 ahc_dumpseq(struct ahc_softc* ahc)
6380 {
6381 	int i;
6382 	int max_prog;
6383 
6384 	if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
6385 		max_prog = 448;
6386 	else if ((ahc->features & AHC_ULTRA2) != 0)
6387 		max_prog = 768;
6388 	else
6389 		max_prog = 512;
6390 
6391 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6392 	ahc_outb(ahc, SEQADDR0, 0);
6393 	ahc_outb(ahc, SEQADDR1, 0);
6394 	for (i = 0; i < max_prog; i++) {
6395 		uint8_t ins_bytes[4];
6396 
6397 		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
6398 		printf("0x%08x\n", ins_bytes[0] << 24
6399 				 | ins_bytes[1] << 16
6400 				 | ins_bytes[2] << 8
6401 				 | ins_bytes[3]);
6402 	}
6403 }
6404 #endif
6405 
6406 static void
6407 ahc_loadseq(struct ahc_softc *ahc)
6408 {
6409 	struct	cs cs_table[num_critical_sections];
6410 	u_int	begin_set[num_critical_sections];
6411 	u_int	end_set[num_critical_sections];
6412 	struct	patch *cur_patch;
6413 	u_int	cs_count;
6414 	u_int	cur_cs;
6415 	u_int	i;
6416 	int	downloaded;
6417 	u_int	skip_addr;
6418 	u_int	sg_prefetch_cnt;
6419 	uint8_t	download_consts[7];
6420 
6421 	/*
6422 	 * Start out with 0 critical sections
6423 	 * that apply to this firmware load.
6424 	 */
6425 	cs_count = 0;
6426 	cur_cs = 0;
6427 	memset(begin_set, 0, sizeof(begin_set));
6428 	memset(end_set, 0, sizeof(end_set));
6429 
6430 	/* Setup downloadable constant table */
6431 	download_consts[QOUTFIFO_OFFSET] = 0;
6432 	if (ahc->targetcmds != NULL)
6433 		download_consts[QOUTFIFO_OFFSET] += 32;
6434 	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
6435 	download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
6436 	download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
6437 	sg_prefetch_cnt = ahc->pci_cachesize;
6438 	if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
6439 		sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
6440 	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
6441 	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
6442 	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
6443 
6444 	cur_patch = patches;
6445 	downloaded = 0;
6446 	skip_addr = 0;
6447 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6448 	ahc_outb(ahc, SEQADDR0, 0);
6449 	ahc_outb(ahc, SEQADDR1, 0);
6450 
6451 	for (i = 0; i < sizeof(seqprog)/4; i++) {
6452 		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
6453 			/*
6454 			 * Don't download this instruction as it
6455 			 * is in a patch that was removed.
6456 			 */
6457 			continue;
6458 		}
6459 		/*
6460 		 * Move through the CS table until we find a CS
6461 		 * that might apply to this instruction.
6462 		 */
6463 		for (; cur_cs < num_critical_sections; cur_cs++) {
6464 			if (critical_sections[cur_cs].end <= i) {
6465 				if (begin_set[cs_count] == TRUE
6466 				 && end_set[cs_count] == FALSE) {
6467 					cs_table[cs_count].end = downloaded;
6468 				 	end_set[cs_count] = TRUE;
6469 					cs_count++;
6470 				}
6471 				continue;
6472 			}
6473 			if (critical_sections[cur_cs].begin <= i
6474 			 && begin_set[cs_count] == FALSE) {
6475 				cs_table[cs_count].begin = downloaded;
6476 				begin_set[cs_count] = TRUE;
6477 			}
6478 			break;
6479 		}
6480 		ahc_download_instr(ahc, i, download_consts);
6481 		downloaded++;
6482 	}
6483 
6484 	ahc->num_critical_sections = cs_count;
6485 	if (cs_count != 0) {
6486 
6487 		cs_count *= sizeof(struct cs);
6488 		ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
6489 		if (ahc->critical_sections == NULL)
6490 			panic("ahc_loadseq: Could not malloc");
6491 		memcpy(ahc->critical_sections, cs_table, cs_count);
6492 	}
6493 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
6494 	ahc_restart(ahc);
6495 
6496 	if (bootverbose) {
6497 		printf(" %d instructions downloaded\n", downloaded);
6498 		printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
6499 		       ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
6500 	}
6501 }
6502 
6503 static int
6504 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
6505 		u_int start_instr, u_int *skip_addr)
6506 {
6507 	struct	patch *cur_patch;
6508 	struct	patch *last_patch;
6509 	u_int	num_patches;
6510 
6511 	num_patches = sizeof(patches)/sizeof(struct patch);
6512 	last_patch = &patches[num_patches];
6513 	cur_patch = *start_patch;
6514 
6515 	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
6516 
6517 		if (cur_patch->patch_func(ahc) == 0) {
6518 
6519 			/* Start rejecting code */
6520 			*skip_addr = start_instr + cur_patch->skip_instr;
6521 			cur_patch += cur_patch->skip_patch;
6522 		} else {
6523 			/* Accepted this patch.  Advance to the next
6524 			 * one and wait for our intruction pointer to
6525 			 * hit this point.
6526 			 */
6527 			cur_patch++;
6528 		}
6529 	}
6530 
6531 	*start_patch = cur_patch;
6532 	if (start_instr < *skip_addr)
6533 		/* Still skipping */
6534 		return (0);
6535 
6536 	return (1);
6537 }
6538 
6539 static void
6540 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6541 {
6542 	union	ins_formats instr;
6543 	struct	ins_format1 *fmt1_ins;
6544 	struct	ins_format3 *fmt3_ins;
6545 	u_int	opcode;
6546 
6547 	/*
6548 	 * The firmware is always compiled into a little endian format.
6549 	 */
6550 	instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
6551 
6552 	fmt1_ins = &instr.format1;
6553 	fmt3_ins = NULL;
6554 
6555 	/* Pull the opcode */
6556 	opcode = instr.format1.opcode;
6557 	switch (opcode) {
6558 	case AIC_OP_JMP:
6559 	case AIC_OP_JC:
6560 	case AIC_OP_JNC:
6561 	case AIC_OP_CALL:
6562 	case AIC_OP_JNE:
6563 	case AIC_OP_JNZ:
6564 	case AIC_OP_JE:
6565 	case AIC_OP_JZ:
6566 	{
6567 		struct patch *cur_patch;
6568 		int address_offset;
6569 		u_int address;
6570 		u_int skip_addr;
6571 		u_int i;
6572 
6573 		fmt3_ins = &instr.format3;
6574 		address_offset = 0;
6575 		address = fmt3_ins->address;
6576 		cur_patch = patches;
6577 		skip_addr = 0;
6578 
6579 		for (i = 0; i < address;) {
6580 
6581 			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
6582 
6583 			if (skip_addr > i) {
6584 				int end_addr;
6585 
6586 				end_addr = MIN(address, skip_addr);
6587 				address_offset += end_addr - i;
6588 				i = skip_addr;
6589 			} else {
6590 				i++;
6591 			}
6592 		}
6593 		address -= address_offset;
6594 		fmt3_ins->address = address;
6595 		/* FALLTHROUGH */
6596 	}
6597 	case AIC_OP_OR:
6598 	case AIC_OP_AND:
6599 	case AIC_OP_XOR:
6600 	case AIC_OP_ADD:
6601 	case AIC_OP_ADC:
6602 	case AIC_OP_BMOV:
6603 		if (fmt1_ins->parity != 0) {
6604 			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6605 		}
6606 		fmt1_ins->parity = 0;
6607 		if ((ahc->features & AHC_CMD_CHAN) == 0
6608 		 && opcode == AIC_OP_BMOV) {
6609 			/*
6610 			 * Block move was added at the same time
6611 			 * as the command channel.  Verify that
6612 			 * this is only a move of a single element
6613 			 * and convert the BMOV to a MOV
6614 			 * (AND with an immediate of FF).
6615 			 */
6616 			if (fmt1_ins->immediate != 1)
6617 				panic("%s: BMOV not supported\n",
6618 				      ahc_name(ahc));
6619 			fmt1_ins->opcode = AIC_OP_AND;
6620 			fmt1_ins->immediate = 0xff;
6621 		}
6622 		/* FALLTHROUGH */
6623 	case AIC_OP_ROL:
6624 		if ((ahc->features & AHC_ULTRA2) != 0) {
6625 			int i, count;
6626 
6627 			/* Calculate odd parity for the instruction */
6628 			for (i = 0, count = 0; i < 31; i++) {
6629 				uint32_t mask;
6630 
6631 				mask = 0x01 << i;
6632 				if ((instr.integer & mask) != 0)
6633 					count++;
6634 			}
6635 			if ((count & 0x01) == 0)
6636 				instr.format1.parity = 1;
6637 		} else {
6638 			/* Compress the instruction for older sequencers */
6639 			if (fmt3_ins != NULL) {
6640 				instr.integer =
6641 					fmt3_ins->immediate
6642 				      | (fmt3_ins->source << 8)
6643 				      | (fmt3_ins->address << 16)
6644 				      |	(fmt3_ins->opcode << 25);
6645 			} else {
6646 				instr.integer =
6647 					fmt1_ins->immediate
6648 				      | (fmt1_ins->source << 8)
6649 				      | (fmt1_ins->destination << 16)
6650 				      |	(fmt1_ins->ret << 24)
6651 				      |	(fmt1_ins->opcode << 25);
6652 			}
6653 		}
6654 		/* The sequencer is a little endian cpu */
6655 		instr.integer = ahc_htole32(instr.integer);
6656 		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6657 		break;
6658 	default:
6659 		panic("Unknown opcode encountered in seq program");
6660 		break;
6661 	}
6662 }
6663 
6664 int
6665 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
6666 		   const char *name, u_int address, u_int value,
6667 		   u_int *cur_column, u_int wrap_point)
6668 {
6669 	int	printed;
6670 	u_int	printed_mask;
6671 
6672 	if (cur_column != NULL && *cur_column >= wrap_point) {
6673 		printf("\n");
6674 		*cur_column = 0;
6675 	}
6676 	printed = printf("%s[0x%x]", name, value);
6677 	if (table == NULL) {
6678 		printed += printf(" ");
6679 		*cur_column += printed;
6680 		return (printed);
6681 	}
6682 	printed_mask = 0;
6683 	while (printed_mask != 0xFF) {
6684 		int entry;
6685 
6686 		for (entry = 0; entry < num_entries; entry++) {
6687 			if (((value & table[entry].mask)
6688 			  != table[entry].value)
6689 			 || ((printed_mask & table[entry].mask)
6690 			  == table[entry].mask))
6691 				continue;
6692 
6693 			printed += printf("%s%s",
6694 					  printed_mask == 0 ? ":(" : "|",
6695 					  table[entry].name);
6696 			printed_mask |= table[entry].mask;
6697 
6698 			break;
6699 		}
6700 		if (entry >= num_entries)
6701 			break;
6702 	}
6703 	if (printed_mask != 0)
6704 		printed += printf(") ");
6705 	else
6706 		printed += printf(" ");
6707 	if (cur_column != NULL)
6708 		*cur_column += printed;
6709 	return (printed);
6710 }
6711 
6712 void
6713 ahc_dump_card_state(struct ahc_softc *ahc)
6714 {
6715 	struct	scb *scb;
6716 	struct	scb_tailq *untagged_q;
6717 	u_int	cur_col;
6718 	int	paused;
6719 	int	target;
6720 	int	maxtarget;
6721 	int	i;
6722 	uint8_t last_phase;
6723 	uint8_t qinpos;
6724 	uint8_t qintail;
6725 	uint8_t qoutpos;
6726 	uint8_t scb_index;
6727 	uint8_t saved_scbptr;
6728 
6729 	if (ahc_is_paused(ahc)) {
6730 		paused = 1;
6731 	} else {
6732 		paused = 0;
6733 		ahc_pause(ahc);
6734 	}
6735 
6736 	saved_scbptr = ahc_inb(ahc, SCBPTR);
6737 	last_phase = ahc_inb(ahc, LASTPHASE);
6738 	printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
6739 	       "%s: Dumping Card State %s, at SEQADDR 0x%x\n",
6740 	       ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
6741 	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6742 	if (paused)
6743 		printf("Card was paused\n");
6744 	printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
6745 	       ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
6746 	       ahc_inb(ahc, ARG_2));
6747 	printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
6748 	       ahc_inb(ahc, SCBPTR));
6749 	cur_col = 0;
6750 	if ((ahc->features & AHC_DT) != 0)
6751 		ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50);
6752 	ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50);
6753 	ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50);
6754 	ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50);
6755 	ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50);
6756 	ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50);
6757 	ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50);
6758 	ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50);
6759 	ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50);
6760 	ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50);
6761 	ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50);
6762 	ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50);
6763 	ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50);
6764 	ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50);
6765 	ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50);
6766 	ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50);
6767 	ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50);
6768 	ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
6769 	ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
6770 	if (cur_col != 0)
6771 		printf("\n");
6772 	printf("STACK:");
6773 	for (i = 0; i < STACK_SIZE; i++)
6774 	       printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
6775 	printf("\nSCB count = %d\n", ahc->scb_data->numscbs);
6776 	printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6777 	printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6778 	/* QINFIFO */
6779 	printf("QINFIFO entries: ");
6780 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6781 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
6782 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
6783 	} else
6784 		qinpos = ahc_inb(ahc, QINPOS);
6785 	qintail = ahc->qinfifonext;
6786 	while (qinpos != qintail) {
6787 		printf("%d ", ahc->qinfifo[qinpos]);
6788 		qinpos++;
6789 	}
6790 	printf("\n");
6791 
6792 	printf("Waiting Queue entries: ");
6793 	scb_index = ahc_inb(ahc, WAITING_SCBH);
6794 	i = 0;
6795 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6796 		ahc_outb(ahc, SCBPTR, scb_index);
6797 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6798 		scb_index = ahc_inb(ahc, SCB_NEXT);
6799 	}
6800 	printf("\n");
6801 
6802 	printf("Disconnected Queue entries: ");
6803 	scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6804 	i = 0;
6805 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6806 		ahc_outb(ahc, SCBPTR, scb_index);
6807 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6808 		scb_index = ahc_inb(ahc, SCB_NEXT);
6809 	}
6810 	printf("\n");
6811 
6812 	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
6813 	printf("QOUTFIFO entries: ");
6814 	qoutpos = ahc->qoutfifonext;
6815 	i = 0;
6816 	while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6817 		printf("%d ", ahc->qoutfifo[qoutpos]);
6818 		qoutpos++;
6819 	}
6820 	printf("\n");
6821 
6822 	printf("Sequencer Free SCB List: ");
6823 	scb_index = ahc_inb(ahc, FREE_SCBH);
6824 	i = 0;
6825 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6826 		ahc_outb(ahc, SCBPTR, scb_index);
6827 		printf("%d ", scb_index);
6828 		scb_index = ahc_inb(ahc, SCB_NEXT);
6829 	}
6830 	printf("\n");
6831 
6832 	printf("Sequencer SCB Info: ");
6833 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
6834 		ahc_outb(ahc, SCBPTR, i);
6835 		cur_col = printf("\n%3d ", i);
6836 
6837 		ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
6838 		ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
6839 		ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
6840 		ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6841 	}
6842 	printf("\n");
6843 
6844 	printf("Pending list: ");
6845 	i = 0;
6846 	LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6847 		if (i++ > 256)
6848 			break;
6849 		cur_col = printf("\n%3d ", scb->hscb->tag);
6850 		ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
6851 		ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
6852 		ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
6853 		if ((ahc->flags & AHC_PAGESCBS) == 0) {
6854 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
6855 			printf("(");
6856 			ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
6857 					      &cur_col, 60);
6858 			ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6859 			printf(")");
6860 		}
6861 	}
6862 	printf("\n");
6863 
6864 	printf("Kernel Free SCB list: ");
6865 	i = 0;
6866 	SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6867 		if (i++ > 256)
6868 			break;
6869 		printf("%d ", scb->hscb->tag);
6870 	}
6871 	printf("\n");
6872 
6873 	maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6874 	for (target = 0; target <= maxtarget; target++) {
6875 		untagged_q = &ahc->untagged_queues[target];
6876 		if (TAILQ_FIRST(untagged_q) == NULL)
6877 			continue;
6878 		printf("Untagged Q(%d): ", target);
6879 		i = 0;
6880 		TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6881 			if (i++ > 256)
6882 				break;
6883 			printf("%d ", scb->hscb->tag);
6884 		}
6885 		printf("\n");
6886 	}
6887 
6888 	ahc_platform_dump_card_state(ahc);
6889 	printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
6890 	ahc_outb(ahc, SCBPTR, saved_scbptr);
6891 	if (paused == 0)
6892 		ahc_unpause(ahc);
6893 }
6894 
6895 /************************* Target Mode ****************************************/
6896 #ifdef AHC_TARGET_MODE
6897 cam_status
6898 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
6899 		    struct ahc_tmode_tstate **tstate,
6900 		    struct ahc_tmode_lstate **lstate,
6901 		    int notfound_failure)
6902 {
6903 
6904 	if ((ahc->features & AHC_TARGETMODE) == 0)
6905 		return (CAM_REQ_INVALID);
6906 
6907 	/*
6908 	 * Handle the 'black hole' device that sucks up
6909 	 * requests to unattached luns on enabled targets.
6910 	 */
6911 	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
6912 	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
6913 		*tstate = NULL;
6914 		*lstate = ahc->black_hole;
6915 	} else {
6916 		u_int max_id;
6917 
6918 		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
6919 		if (ccb->ccb_h.target_id > max_id)
6920 			return (CAM_TID_INVALID);
6921 
6922 		if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
6923 			return (CAM_LUN_INVALID);
6924 
6925 		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
6926 		*lstate = NULL;
6927 		if (*tstate != NULL)
6928 			*lstate =
6929 			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
6930 	}
6931 
6932 	if (notfound_failure != 0 && *lstate == NULL)
6933 		return (CAM_PATH_INVALID);
6934 
6935 	return (CAM_REQ_CMP);
6936 }
6937 
6938 void
6939 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6940 {
6941 	struct	   ahc_tmode_tstate *tstate;
6942 	struct	   ahc_tmode_lstate *lstate;
6943 	struct	   ccb_en_lun *cel;
6944 	cam_status status;
6945 	u_int	   target;
6946 	u_int	   lun;
6947 	u_int	   target_mask;
6948 	u_int	   our_id;
6949 	u_long	   s;
6950 	char	   channel;
6951 
6952 	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
6953 				     /*notfound_failure*/FALSE);
6954 
6955 	if (status != CAM_REQ_CMP) {
6956 		ccb->ccb_h.status = status;
6957 		return;
6958 	}
6959 
6960 	if (cam_sim_bus(sim) == 0)
6961 		our_id = ahc->our_id;
6962 	else
6963 		our_id = ahc->our_id_b;
6964 
6965 	if (ccb->ccb_h.target_id != our_id) {
6966 		/*
6967 		 * our_id represents our initiator ID, or
6968 		 * the ID of the first target to have an
6969 		 * enabled lun in target mode.  There are
6970 		 * two cases that may preclude enabling a
6971 		 * target id other than our_id.
6972 		 *
6973 		 *   o our_id is for an active initiator role.
6974 		 *     Since the hardware does not support
6975 		 *     reselections to the initiator role at
6976 		 *     anything other than our_id, and our_id
6977 		 *     is used by the hardware to indicate the
6978 		 *     ID to use for both select-out and
6979 		 *     reselect-out operations, the only target
6980 		 *     ID we can support in this mode is our_id.
6981 		 *
6982 		 *   o The MULTARGID feature is not available and
6983 		 *     a previous target mode ID has been enabled.
6984 		 */
6985 		if ((ahc->features & AHC_MULTIROLE) != 0) {
6986 
6987 			if ((ahc->features & AHC_MULTI_TID) != 0
6988 		   	 && (ahc->flags & AHC_INITIATORROLE) != 0) {
6989 				/*
6990 				 * Only allow additional targets if
6991 				 * the initiator role is disabled.
6992 				 * The hardware cannot handle a re-select-in
6993 				 * on the initiator id during a re-select-out
6994 				 * on a different target id.
6995 				 */
6996 				status = CAM_TID_INVALID;
6997 			} else if ((ahc->flags & AHC_INITIATORROLE) != 0
6998 				|| ahc->enabled_luns > 0) {
6999 				/*
7000 				 * Only allow our target id to change
7001 				 * if the initiator role is not configured
7002 				 * and there are no enabled luns which
7003 				 * are attached to the currently registered
7004 				 * scsi id.
7005 				 */
7006 				status = CAM_TID_INVALID;
7007 			}
7008 		} else if ((ahc->features & AHC_MULTI_TID) == 0
7009 			&& ahc->enabled_luns > 0) {
7010 
7011 			status = CAM_TID_INVALID;
7012 		}
7013 	}
7014 
7015 	if (status != CAM_REQ_CMP) {
7016 		ccb->ccb_h.status = status;
7017 		return;
7018 	}
7019 
7020 	/*
7021 	 * We now have an id that is valid.
7022 	 * If we aren't in target mode, switch modes.
7023 	 */
7024 	if ((ahc->flags & AHC_TARGETROLE) == 0
7025 	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
7026 		u_long	s;
7027 
7028 		printf("Configuring Target Mode\n");
7029 		ahc_lock(ahc, &s);
7030 		if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
7031 			ccb->ccb_h.status = CAM_BUSY;
7032 			ahc_unlock(ahc, &s);
7033 			return;
7034 		}
7035 		ahc->flags |= AHC_TARGETROLE;
7036 		if ((ahc->features & AHC_MULTIROLE) == 0)
7037 			ahc->flags &= ~AHC_INITIATORROLE;
7038 		ahc_pause(ahc);
7039 		ahc_loadseq(ahc);
7040 		ahc_unlock(ahc, &s);
7041 	}
7042 	cel = &ccb->cel;
7043 	target = ccb->ccb_h.target_id;
7044 	lun = ccb->ccb_h.target_lun;
7045 	channel = SIM_CHANNEL(ahc, sim);
7046 	target_mask = 0x01 << target;
7047 	if (channel == 'B')
7048 		target_mask <<= 8;
7049 
7050 	if (cel->enable != 0) {
7051 		u_int scsiseq;
7052 
7053 		/* Are we already enabled?? */
7054 		if (lstate != NULL) {
7055 			xpt_print_path(ccb->ccb_h.path);
7056 			printf("Lun already enabled\n");
7057 			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
7058 			return;
7059 		}
7060 
7061 		if (cel->grp6_len != 0
7062 		 || cel->grp7_len != 0) {
7063 			/*
7064 			 * Don't (yet?) support vendor
7065 			 * specific commands.
7066 			 */
7067 			ccb->ccb_h.status = CAM_REQ_INVALID;
7068 			printf("Non-zero Group Codes\n");
7069 			return;
7070 		}
7071 
7072 		/*
7073 		 * Seems to be okay.
7074 		 * Setup our data structures.
7075 		 */
7076 		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
7077 			tstate = ahc_alloc_tstate(ahc, target, channel);
7078 			if (tstate == NULL) {
7079 				xpt_print_path(ccb->ccb_h.path);
7080 				printf("Couldn't allocate tstate\n");
7081 				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7082 				return;
7083 			}
7084 		}
7085 		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
7086 		if (lstate == NULL) {
7087 			xpt_print_path(ccb->ccb_h.path);
7088 			printf("Couldn't allocate lstate\n");
7089 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7090 			return;
7091 		}
7092 		memset(lstate, 0, sizeof(*lstate));
7093 		status = xpt_create_path(&lstate->path, /*periph*/NULL,
7094 					 xpt_path_path_id(ccb->ccb_h.path),
7095 					 xpt_path_target_id(ccb->ccb_h.path),
7096 					 xpt_path_lun_id(ccb->ccb_h.path));
7097 		if (status != CAM_REQ_CMP) {
7098 			free(lstate, M_DEVBUF);
7099 			xpt_print_path(ccb->ccb_h.path);
7100 			printf("Couldn't allocate path\n");
7101 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7102 			return;
7103 		}
7104 		SLIST_INIT(&lstate->accept_tios);
7105 		SLIST_INIT(&lstate->immed_notifies);
7106 		ahc_lock(ahc, &s);
7107 		ahc_pause(ahc);
7108 		if (target != CAM_TARGET_WILDCARD) {
7109 			tstate->enabled_luns[lun] = lstate;
7110 			ahc->enabled_luns++;
7111 
7112 			if ((ahc->features & AHC_MULTI_TID) != 0) {
7113 				u_int targid_mask;
7114 
7115 				targid_mask = ahc_inb(ahc, TARGID)
7116 					    | (ahc_inb(ahc, TARGID + 1) << 8);
7117 
7118 				targid_mask |= target_mask;
7119 				ahc_outb(ahc, TARGID, targid_mask);
7120 				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
7121 
7122 				ahc_update_scsiid(ahc, targid_mask);
7123 			} else {
7124 				u_int our_id;
7125 				char  channel;
7126 
7127 				channel = SIM_CHANNEL(ahc, sim);
7128 				our_id = SIM_SCSI_ID(ahc, sim);
7129 
7130 				/*
7131 				 * This can only happen if selections
7132 				 * are not enabled
7133 				 */
7134 				if (target != our_id) {
7135 					u_int sblkctl;
7136 					char  cur_channel;
7137 					int   swap;
7138 
7139 					sblkctl = ahc_inb(ahc, SBLKCTL);
7140 					cur_channel = (sblkctl & SELBUSB)
7141 						    ? 'B' : 'A';
7142 					if ((ahc->features & AHC_TWIN) == 0)
7143 						cur_channel = 'A';
7144 					swap = cur_channel != channel;
7145 					if (channel == 'A')
7146 						ahc->our_id = target;
7147 					else
7148 						ahc->our_id_b = target;
7149 
7150 					if (swap)
7151 						ahc_outb(ahc, SBLKCTL,
7152 							 sblkctl ^ SELBUSB);
7153 
7154 					ahc_outb(ahc, SCSIID, target);
7155 
7156 					if (swap)
7157 						ahc_outb(ahc, SBLKCTL, sblkctl);
7158 				}
7159 			}
7160 		} else
7161 			ahc->black_hole = lstate;
7162 		/* Allow select-in operations */
7163 		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
7164 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7165 			scsiseq |= ENSELI;
7166 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7167 			scsiseq = ahc_inb(ahc, SCSISEQ);
7168 			scsiseq |= ENSELI;
7169 			ahc_outb(ahc, SCSISEQ, scsiseq);
7170 		}
7171 		ahc_unpause(ahc);
7172 		ahc_unlock(ahc, &s);
7173 		ccb->ccb_h.status = CAM_REQ_CMP;
7174 		xpt_print_path(ccb->ccb_h.path);
7175 		printf("Lun now enabled for target mode\n");
7176 	} else {
7177 		struct scb *scb;
7178 		int i, empty;
7179 
7180 		if (lstate == NULL) {
7181 			ccb->ccb_h.status = CAM_LUN_INVALID;
7182 			return;
7183 		}
7184 
7185 		ahc_lock(ahc, &s);
7186 
7187 		ccb->ccb_h.status = CAM_REQ_CMP;
7188 		LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
7189 			struct ccb_hdr *ccbh;
7190 
7191 			ccbh = &scb->io_ctx->ccb_h;
7192 			if (ccbh->func_code == XPT_CONT_TARGET_IO
7193 			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
7194 				printf("CTIO pending\n");
7195 				ccb->ccb_h.status = CAM_REQ_INVALID;
7196 				ahc_unlock(ahc, &s);
7197 				return;
7198 			}
7199 		}
7200 
7201 		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
7202 			printf("ATIOs pending\n");
7203 			ccb->ccb_h.status = CAM_REQ_INVALID;
7204 		}
7205 
7206 		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
7207 			printf("INOTs pending\n");
7208 			ccb->ccb_h.status = CAM_REQ_INVALID;
7209 		}
7210 
7211 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
7212 			ahc_unlock(ahc, &s);
7213 			return;
7214 		}
7215 
7216 		xpt_print_path(ccb->ccb_h.path);
7217 		printf("Target mode disabled\n");
7218 		xpt_free_path(lstate->path);
7219 		free(lstate, M_DEVBUF);
7220 
7221 		ahc_pause(ahc);
7222 		/* Can we clean up the target too? */
7223 		if (target != CAM_TARGET_WILDCARD) {
7224 			tstate->enabled_luns[lun] = NULL;
7225 			ahc->enabled_luns--;
7226 			for (empty = 1, i = 0; i < 8; i++)
7227 				if (tstate->enabled_luns[i] != NULL) {
7228 					empty = 0;
7229 					break;
7230 				}
7231 
7232 			if (empty) {
7233 				ahc_free_tstate(ahc, target, channel,
7234 						/*force*/FALSE);
7235 				if (ahc->features & AHC_MULTI_TID) {
7236 					u_int targid_mask;
7237 
7238 					targid_mask = ahc_inb(ahc, TARGID)
7239 						    | (ahc_inb(ahc, TARGID + 1)
7240 						       << 8);
7241 
7242 					targid_mask &= ~target_mask;
7243 					ahc_outb(ahc, TARGID, targid_mask);
7244 					ahc_outb(ahc, TARGID+1,
7245 					 	 (targid_mask >> 8));
7246 					ahc_update_scsiid(ahc, targid_mask);
7247 				}
7248 			}
7249 		} else {
7250 
7251 			ahc->black_hole = NULL;
7252 
7253 			/*
7254 			 * We can't allow selections without
7255 			 * our black hole device.
7256 			 */
7257 			empty = TRUE;
7258 		}
7259 		if (ahc->enabled_luns == 0) {
7260 			/* Disallow select-in */
7261 			u_int scsiseq;
7262 
7263 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7264 			scsiseq &= ~ENSELI;
7265 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7266 			scsiseq = ahc_inb(ahc, SCSISEQ);
7267 			scsiseq &= ~ENSELI;
7268 			ahc_outb(ahc, SCSISEQ, scsiseq);
7269 
7270 			if ((ahc->features & AHC_MULTIROLE) == 0) {
7271 				printf("Configuring Initiator Mode\n");
7272 				ahc->flags &= ~AHC_TARGETROLE;
7273 				ahc->flags |= AHC_INITIATORROLE;
7274 				ahc_pause(ahc);
7275 				ahc_loadseq(ahc);
7276 			}
7277 		}
7278 		ahc_unpause(ahc);
7279 		ahc_unlock(ahc, &s);
7280 	}
7281 }
7282 
7283 static void
7284 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
7285 {
7286 	u_int scsiid_mask;
7287 	u_int scsiid;
7288 
7289 	if ((ahc->features & AHC_MULTI_TID) == 0)
7290 		panic("ahc_update_scsiid called on non-multitid unit\n");
7291 
7292 	/*
7293 	 * Since we will rely on the TARGID mask
7294 	 * for selection enables, ensure that OID
7295 	 * in SCSIID is not set to some other ID
7296 	 * that we don't want to allow selections on.
7297 	 */
7298 	if ((ahc->features & AHC_ULTRA2) != 0)
7299 		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
7300 	else
7301 		scsiid = ahc_inb(ahc, SCSIID);
7302 	scsiid_mask = 0x1 << (scsiid & OID);
7303 	if ((targid_mask & scsiid_mask) == 0) {
7304 		u_int our_id;
7305 
7306 		/* ffs counts from 1 */
7307 		our_id = ffs(targid_mask);
7308 		if (our_id == 0)
7309 			our_id = ahc->our_id;
7310 		else
7311 			our_id--;
7312 		scsiid &= TID;
7313 		scsiid |= our_id;
7314 	}
7315 	if ((ahc->features & AHC_ULTRA2) != 0)
7316 		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
7317 	else
7318 		ahc_outb(ahc, SCSIID, scsiid);
7319 }
7320 
7321 void
7322 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
7323 {
7324 	struct target_cmd *cmd;
7325 
7326 	/*
7327 	 * If the card supports auto-access pause,
7328 	 * we can access the card directly regardless
7329 	 * of whether it is paused or not.
7330 	 */
7331 	if ((ahc->features & AHC_AUTOPAUSE) != 0)
7332 		paused = TRUE;
7333 
7334 	ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
7335 	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
7336 
7337 		/*
7338 		 * Only advance through the queue if we
7339 		 * have the resources to process the command.
7340 		 */
7341 		if (ahc_handle_target_cmd(ahc, cmd) != 0)
7342 			break;
7343 
7344 		cmd->cmd_valid = 0;
7345 		ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
7346 				ahc->shared_data_dmamap,
7347 				ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
7348 				sizeof(struct target_cmd),
7349 				BUS_DMASYNC_PREREAD);
7350 		ahc->tqinfifonext++;
7351 
7352 		/*
7353 		 * Lazily update our position in the target mode incoming
7354 		 * command queue as seen by the sequencer.
7355 		 */
7356 		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
7357 			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
7358 				u_int hs_mailbox;
7359 
7360 				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
7361 				hs_mailbox &= ~HOST_TQINPOS;
7362 				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
7363 				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
7364 			} else {
7365 				if (!paused)
7366 					ahc_pause(ahc);
7367 				ahc_outb(ahc, KERNEL_TQINPOS,
7368 					 ahc->tqinfifonext & HOST_TQINPOS);
7369 				if (!paused)
7370 					ahc_unpause(ahc);
7371 			}
7372 		}
7373 	}
7374 }
7375 
7376 static int
7377 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7378 {
7379 	struct	  ahc_tmode_tstate *tstate;
7380 	struct	  ahc_tmode_lstate *lstate;
7381 	struct	  ccb_accept_tio *atio;
7382 	uint8_t *byte;
7383 	int	  initiator;
7384 	int	  target;
7385 	int	  lun;
7386 
7387 	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
7388 	target = SCSIID_OUR_ID(cmd->scsiid);
7389 	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
7390 
7391 	byte = cmd->bytes;
7392 	tstate = ahc->enabled_targets[target];
7393 	lstate = NULL;
7394 	if (tstate != NULL)
7395 		lstate = tstate->enabled_luns[lun];
7396 
7397 	/*
7398 	 * Commands for disabled luns go to the black hole driver.
7399 	 */
7400 	if (lstate == NULL)
7401 		lstate = ahc->black_hole;
7402 
7403 	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
7404 	if (atio == NULL) {
7405 		ahc->flags |= AHC_TQINFIFO_BLOCKED;
7406 		/*
7407 		 * Wait for more ATIOs from the peripheral driver for this lun.
7408 		 */
7409 		if (bootverbose)
7410 			printf("%s: ATIOs exhausted\n", ahc_name(ahc));
7411 		return (1);
7412 	} else
7413 		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
7414 #if 0
7415 	printf("Incoming command from %d for %d:%d%s\n",
7416 	       initiator, target, lun,
7417 	       lstate == ahc->black_hole ? "(Black Holed)" : "");
7418 #endif
7419 	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
7420 
7421 	if (lstate == ahc->black_hole) {
7422 		/* Fill in the wildcards */
7423 		atio->ccb_h.target_id = target;
7424 		atio->ccb_h.target_lun = lun;
7425 	}
7426 
7427 	/*
7428 	 * Package it up and send it off to
7429 	 * whomever has this lun enabled.
7430 	 */
7431 	atio->sense_len = 0;
7432 	atio->init_id = initiator;
7433 	if (byte[0] != 0xFF) {
7434 		/* Tag was included */
7435 		atio->tag_action = *byte++;
7436 		atio->tag_id = *byte++;
7437 		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
7438 	} else {
7439 		atio->ccb_h.flags = 0;
7440 	}
7441 	byte++;
7442 
7443 	/* Okay.  Now determine the cdb size based on the command code */
7444 	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
7445 	case 0:
7446 		atio->cdb_len = 6;
7447 		break;
7448 	case 1:
7449 	case 2:
7450 		atio->cdb_len = 10;
7451 		break;
7452 	case 4:
7453 		atio->cdb_len = 16;
7454 		break;
7455 	case 5:
7456 		atio->cdb_len = 12;
7457 		break;
7458 	case 3:
7459 	default:
7460 		/* Only copy the opcode. */
7461 		atio->cdb_len = 1;
7462 		printf("Reserved or VU command code type encountered\n");
7463 		break;
7464 	}
7465 
7466 	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
7467 
7468 	atio->ccb_h.status |= CAM_CDB_RECVD;
7469 
7470 	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
7471 		/*
7472 		 * We weren't allowed to disconnect.
7473 		 * We're hanging on the bus until a
7474 		 * continue target I/O comes in response
7475 		 * to this accept tio.
7476 		 */
7477 #if 0
7478 		printf("Received Immediate Command %d:%d:%d - %p\n",
7479 		       initiator, target, lun, ahc->pending_device);
7480 #endif
7481 		ahc->pending_device = lstate;
7482 		ahc_freeze_ccb((union ccb *)atio);
7483 		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
7484 	}
7485 	xpt_done((union ccb*)atio);
7486 	return (0);
7487 }
7488 
7489 #endif
7490