xref: /freebsd/sys/dev/aic7xxx/aic7xxx.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Core routines and tables shareable across OS platforms.
3  *
4  * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * Alternatively, this software may be distributed under the terms of the
17  * GNU Public License ("GPL").
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $Id: //depot/src/aic7xxx/aic7xxx.c#18 $
32  *
33  * $FreeBSD$
34  */
35 
36 #ifdef	__linux__
37 #include "aic7xxx_linux.h"
38 #include "aic7xxx_inline.h"
39 #include "aicasm/aicasm_insformat.h"
40 #endif
41 
42 #ifdef	__FreeBSD__
43 #include <dev/aic7xxx/aic7xxx_freebsd.h>
44 #include <dev/aic7xxx/aic7xxx_inline.h>
45 #include <dev/aic7xxx/aicasm/aicasm_insformat.h>
46 #endif
47 
48 /****************************** Softc Data ************************************/
49 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
50 
51 /***************************** Lookup Tables **********************************/
52 char *ahc_chip_names[] =
53 {
54 	"NONE",
55 	"aic7770",
56 	"aic7850",
57 	"aic7855",
58 	"aic7859",
59 	"aic7860",
60 	"aic7870",
61 	"aic7880",
62 	"aic7895",
63 	"aic7895C",
64 	"aic7890/91",
65 	"aic7896/97",
66 	"aic7892",
67 	"aic7899"
68 };
69 const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
70 
71 struct hard_error_entry hard_error[] = {
72 	{ ILLHADDR,	"Illegal Host Access" },
73 	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
74 	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
75 	{ SQPARERR,	"Sequencer Parity Error" },
76 	{ DPARERR,	"Data-path Parity Error" },
77 	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
78 	{ PCIERRSTAT,	"PCI Error detected" },
79 	{ CIOPARERR,	"CIOBUS Parity Error" },
80 };
81 const u_int num_errors = NUM_ELEMENTS(hard_error);
82 
83 struct phase_table_entry phase_table[] =
84 {
85 	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
86 	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
87 	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
88 	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
89 	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
90 	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
91 	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
92 	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
93 	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
94 	{ 0,		MSG_NOOP,		"in unknown phase"	}
95 };
96 
97 /*
98  * In most cases we only wish to itterate over real phases, so
99  * exclude the last element from the count.
100  */
101 const u_int num_phases = NUM_ELEMENTS(phase_table) - 1;
102 
103 /*
104  * Valid SCSIRATE values.  (p. 3-17)
105  * Provides a mapping of tranfer periods in ns to the proper value to
106  * stick in the scsixfer reg.
107  */
108 struct ahc_syncrate ahc_syncrates[] =
109 {
110       /* ultra2    fast/ultra  period     rate */
111 	{ 0x42,      0x000,      9,      "80.0" },
112 	{ 0x03,      0x000,     10,      "40.0" },
113 	{ 0x04,      0x000,     11,      "33.0" },
114 	{ 0x05,      0x100,     12,      "20.0" },
115 	{ 0x06,      0x110,     15,      "16.0" },
116 	{ 0x07,      0x120,     18,      "13.4" },
117 	{ 0x08,      0x000,     25,      "10.0" },
118 	{ 0x19,      0x010,     31,      "8.0"  },
119 	{ 0x1a,      0x020,     37,      "6.67" },
120 	{ 0x1b,      0x030,     43,      "5.7"  },
121 	{ 0x1c,      0x040,     50,      "5.0"  },
122 	{ 0x00,      0x050,     56,      "4.4"  },
123 	{ 0x00,      0x060,     62,      "4.0"  },
124 	{ 0x00,      0x070,     68,      "3.6"  },
125 	{ 0x00,      0x000,      0,      NULL   }
126 };
127 
128 /* Our Sequencer Program */
129 #include "aic7xxx_seq.h"
130 
131 /**************************** Function Declarations ***************************/
132 static struct tmode_tstate*
133 			ahc_alloc_tstate(struct ahc_softc *ahc,
134 					 u_int scsi_id, char channel);
135 static void		ahc_free_tstate(struct ahc_softc *ahc,
136 					u_int scsi_id, char channel, int force);
137 static struct ahc_syncrate*
138 			ahc_devlimited_syncrate(struct ahc_softc *ahc,
139 					        struct ahc_initiator_tinfo *,
140 						u_int *period,
141 						u_int *ppr_options,
142 						role_t role);
143 static void		ahc_update_target_msg_request(struct ahc_softc *ahc,
144 					      struct ahc_devinfo *devinfo,
145 					      struct ahc_initiator_tinfo *tinfo,
146 					      int force, int paused);
147 static void		ahc_update_pending_syncrates(struct ahc_softc *ahc);
148 static void		ahc_fetch_devinfo(struct ahc_softc *ahc,
149 					  struct ahc_devinfo *devinfo);
150 static void		ahc_scb_devinfo(struct ahc_softc *ahc,
151 					struct ahc_devinfo *devinfo,
152 					struct scb *scb);
153 static void		ahc_setup_initiator_msgout(struct ahc_softc *ahc,
154 						   struct ahc_devinfo *devinfo,
155 						   struct scb *scb);
156 static void		ahc_build_transfer_msg(struct ahc_softc *ahc,
157 					       struct ahc_devinfo *devinfo);
158 static void		ahc_construct_sdtr(struct ahc_softc *ahc,
159 					   struct ahc_devinfo *devinfo,
160 					   u_int period, u_int offset);
161 static void		ahc_construct_wdtr(struct ahc_softc *ahc,
162 					   struct ahc_devinfo *devinfo,
163 					   u_int bus_width);
164 static void		ahc_construct_ppr(struct ahc_softc *ahc,
165 					  struct ahc_devinfo *devinfo,
166 					  u_int period, u_int offset,
167 					  u_int bus_width, u_int ppr_options);
168 static void		ahc_clear_msg_state(struct ahc_softc *ahc);
169 static void		ahc_handle_message_phase(struct ahc_softc *ahc);
170 static int		ahc_sent_msg(struct ahc_softc *ahc,
171 				     u_int msgtype, int full);
172 static int		ahc_parse_msg(struct ahc_softc *ahc,
173 				      struct ahc_devinfo *devinfo);
174 static int		ahc_handle_msg_reject(struct ahc_softc *ahc,
175 					      struct ahc_devinfo *devinfo);
176 static void		ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
177 						struct ahc_devinfo *devinfo);
178 static void		ahc_handle_devreset(struct ahc_softc *ahc,
179 					    struct ahc_devinfo *devinfo,
180 					    cam_status status, char *message,
181 					    int verbose_level);
182 
183 static bus_dmamap_callback_t	ahc_dmamap_cb;
184 static int			ahc_init_scbdata(struct ahc_softc *ahc);
185 static void			ahc_fini_scbdata(struct ahc_softc *ahc);
186 static void		ahc_busy_tcl(struct ahc_softc *ahc,
187 				     u_int tcl, u_int busyid);
188 static void		ahc_qinfifo_requeue(struct ahc_softc *ahc,
189 					    struct scb *prev_scb,
190 					    struct scb *scb);
191 static int		ahc_qinfifo_count(struct ahc_softc *ahc);
192 static u_int		ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
193 						   u_int prev, u_int scbptr);
194 static void		ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
195 static u_int		ahc_rem_wscb(struct ahc_softc *ahc,
196 				     u_int scbpos, u_int prev);
197 static int		ahc_abort_scbs(struct ahc_softc *ahc, int target,
198 				       char channel, int lun, u_int tag,
199 				       role_t role, uint32_t status);
200 static void		ahc_reset_current_bus(struct ahc_softc *ahc);
201 static void		ahc_calc_residual(struct scb *scb);
202 #ifdef AHC_DUMP_SEQ
203 static void		ahc_dumpseq(struct ahc_softc *ahc);
204 #endif
205 static void		ahc_loadseq(struct ahc_softc *ahc);
206 static int		ahc_check_patch(struct ahc_softc *ahc,
207 					struct patch **start_patch,
208 					u_int start_instr, u_int *skip_addr);
209 static void		ahc_download_instr(struct ahc_softc *ahc,
210 					   u_int instrptr, uint8_t *dconsts);
211 #ifdef AHC_TARGET_MODE
212 static void		ahc_queue_lstate_event(struct ahc_softc *ahc,
213 					       struct tmode_lstate *lstate,
214 					       u_int initiator_id,
215 					       u_int event_type,
216 					       u_int event_arg);
217 static void		ahc_update_scsiid(struct ahc_softc *ahc,
218 					  u_int targid_mask);
219 static int		ahc_handle_target_cmd(struct ahc_softc *ahc,
220 					      struct target_cmd *cmd);
221 #endif
222 /************************* Sequencer Execution Control ************************/
223 /*
224  * Restart the sequencer program from address zero
225  */
226 void
227 restart_sequencer(struct ahc_softc *ahc)
228 {
229 	uint16_t stack[4];
230 
231 	pause_sequencer(ahc);
232 	ahc_outb(ahc, SCSISIGO, 0);		/* De-assert BSY */
233 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);	/* No message to send */
234 	ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
235 
236 	/*
237 	 * Ensure that the sequencer's idea of TQINPOS
238 	 * matches our own.  The sequencer increments TQINPOS
239 	 * only after it sees a DMA complete and a reset could
240 	 * occur before the increment leaving the kernel to believe
241 	 * the command arrived but the sequencer to not.
242 	 */
243 	ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
244 
245 	/* Always allow reselection */
246 	ahc_outb(ahc, SCSISEQ,
247 		 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
248 	if ((ahc->features & AHC_CMD_CHAN) != 0) {
249 		/* Ensure that no DMA operations are in progress */
250 		ahc_outb(ahc, CCSCBCNT, 0);
251 		ahc_outb(ahc, CCSGCTL, 0);
252 		ahc_outb(ahc, CCSCBCTL, 0);
253 	}
254 	ahc_outb(ahc, MWI_RESIDUAL, 0);
255 	/*
256 	 * Avoid stack pointer lockup on aic7895 chips where SEQADDR0
257 	 * cannot be changed without first writing to SEQADDR1.  This
258 	 * seems to only happen if an interrupt or pause occurs mid
259 	 * update of the stack pointer (i.e. during a ret).
260 	 */
261 	stack[0] = ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8);
262 	stack[1] = ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8);
263 	stack[2] = ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8);
264 	stack[3] = ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8);
265 	if (stack[0] == stack[1]
266 	 && stack[1] == stack[2]
267 	 && stack[2] == stack[3]
268 	 && stack[0] != 0)
269 		ahc_outb(ahc, SEQADDR1, 0);
270 	ahc_outb(ahc, SEQCTL, FASTMODE);
271 	ahc_outb(ahc, SEQADDR0, 0);
272 	ahc_outb(ahc, SEQADDR1, 0);
273 	unpause_sequencer(ahc);
274 }
275 
276 /************************* Input/Output Queues ********************************/
277 void
278 ahc_run_qoutfifo(struct ahc_softc *ahc)
279 {
280 	struct scb *scb;
281 	u_int  scb_index;
282 
283 	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
284 
285 		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
286 		if ((ahc->qoutfifonext & 0x03) == 0x03) {
287 			u_int modnext;
288 
289 			/*
290 			 * Clear 32bits of QOUTFIFO at a time
291 			 * so that we don't clobber an incomming
292 			 * byte DMA to the array on architectures
293 			 * that only support 32bit load and store
294 			 * operations.
295 			 */
296 			modnext = ahc->qoutfifonext & ~0x3;
297 			*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
298 		}
299 		ahc->qoutfifonext++;
300 
301 		scb = ahc_lookup_scb(ahc, scb_index);
302 		if (scb == NULL) {
303 			printf("%s: WARNING no command for scb %d "
304 			       "(cmdcmplt)\nQOUTPOS = %d\n",
305 			       ahc_name(ahc), scb_index,
306 			       ahc->qoutfifonext - 1);
307 			continue;
308 		}
309 
310 		/*
311 		 * Save off the residual
312 		 * if there is one.
313 		 */
314 		if (ahc_check_residual(scb) != 0)
315 			ahc_calc_residual(scb);
316 		else
317 			ahc_set_residual(scb, 0);
318 		ahc_done(ahc, scb);
319 	}
320 }
321 
322 void
323 ahc_run_untagged_queues(struct ahc_softc *ahc)
324 {
325 	int i;
326 
327 	for (i = 0; i < 16; i++)
328 		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
329 }
330 
331 void
332 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
333 {
334 	struct scb *scb;
335 
336 	if (ahc->untagged_queue_lock != 0)
337 		return;
338 
339 	if ((scb = TAILQ_FIRST(queue)) != NULL
340 	 && (scb->flags & SCB_ACTIVE) == 0) {
341 		scb->flags |= SCB_ACTIVE;
342 		ahc_queue_scb(ahc, scb);
343 	}
344 }
345 
346 /************************* Interrupt Handling *********************************/
347 void
348 ahc_handle_brkadrint(struct ahc_softc *ahc)
349 {
350 	/*
351 	 * We upset the sequencer :-(
352 	 * Lookup the error message
353 	 */
354 	int i, error, num_errors;
355 
356 	error = ahc_inb(ahc, ERROR);
357 	num_errors =  sizeof(hard_error)/sizeof(hard_error[0]);
358 	for (i = 0; error != 1 && i < num_errors; i++)
359 		error >>= 1;
360 	printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
361 	       ahc_name(ahc), hard_error[i].errmesg,
362 	       ahc_inb(ahc, SEQADDR0) |
363 	       (ahc_inb(ahc, SEQADDR1) << 8));
364 
365 	ahc_dump_card_state(ahc);
366 
367 	/* Tell everyone that this HBA is no longer availible */
368 	ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
369 		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
370 		       CAM_NO_HBA);
371 
372 	/* Disable all interrupt sources by resetting the controller */
373 	ahc_shutdown(ahc);
374 }
375 
376 void
377 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
378 {
379 	struct scb *scb;
380 	struct ahc_devinfo devinfo;
381 
382 	ahc_fetch_devinfo(ahc, &devinfo);
383 
384 	/*
385 	 * Clear the upper byte that holds SEQINT status
386 	 * codes and clear the SEQINT bit. We will unpause
387 	 * the sequencer, if appropriate, after servicing
388 	 * the request.
389 	 */
390 	ahc_outb(ahc, CLRINT, CLRSEQINT);
391 	switch (intstat & SEQINT_MASK) {
392 	case BAD_STATUS:
393 	{
394 		u_int  scb_index;
395 		struct hardware_scb *hscb;
396 
397 		/*
398 		 * Set the default return value to 0 (don't
399 		 * send sense).  The sense code will change
400 		 * this if needed.
401 		 */
402 		ahc_outb(ahc, RETURN_1, 0);
403 
404 		/*
405 		 * The sequencer will notify us when a command
406 		 * has an error that would be of interest to
407 		 * the kernel.  This allows us to leave the sequencer
408 		 * running in the common case of command completes
409 		 * without error.  The sequencer will already have
410 		 * dma'd the SCB back up to us, so we can reference
411 		 * the in kernel copy directly.
412 		 */
413 		scb_index = ahc_inb(ahc, SCB_TAG);
414 		scb = ahc_lookup_scb(ahc, scb_index);
415 		if (scb == NULL) {
416 			printf("%s:%c:%d: ahc_intr - referenced scb "
417 			       "not valid during seqint 0x%x scb(%d)\n",
418 			       ahc_name(ahc), devinfo.channel,
419 			       devinfo.target, intstat, scb_index);
420 			ahc_dump_card_state(ahc);
421 			panic("for safety");
422 			goto unpause;
423 		}
424 
425 		hscb = scb->hscb;
426 
427 		/* Don't want to clobber the original sense code */
428 		if ((scb->flags & SCB_SENSE) != 0) {
429 			/*
430 			 * Clear the SCB_SENSE Flag and have
431 			 * the sequencer do a normal command
432 			 * complete.
433 			 */
434 			scb->flags &= ~SCB_SENSE;
435 			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
436 			break;
437 		}
438 		ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
439 		/* Freeze the queue until the client sees the error. */
440 		ahc_freeze_devq(ahc, scb);
441 		ahc_freeze_scb(scb);
442 		ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
443 		switch (hscb->shared_data.status.scsi_status) {
444 		case SCSI_STATUS_OK:
445 			printf("%s: Interrupted for staus of 0???\n",
446 			       ahc_name(ahc));
447 			break;
448 		case SCSI_STATUS_CMD_TERMINATED:
449 		case SCSI_STATUS_CHECK_COND:
450 #ifdef AHC_DEBUG
451 			if (ahc_debug & AHC_SHOWSENSE) {
452 				ahc_print_path(ahc, scb);
453 				printf("SCB %d: requests Check Status\n",
454 				       scb->hscb->tag);
455 			}
456 #endif
457 
458 			if (ahc_perform_autosense(scb)) {
459 				struct ahc_dma_seg *sg;
460 				struct scsi_sense *sc;
461 				struct ahc_initiator_tinfo *targ_info;
462 				struct tmode_tstate *tstate;
463 				struct ahc_transinfo *tinfo;
464 
465 				targ_info =
466 				    ahc_fetch_transinfo(ahc,
467 							devinfo.channel,
468 							devinfo.our_scsiid,
469 							devinfo.target,
470 							&tstate);
471 				tinfo = &targ_info->current;
472 				sg = scb->sg_list;
473 				sc = (struct scsi_sense *)
474 				     (&hscb->shared_data.cdb);
475 				/*
476 				 * Save off the residual if there is one.
477 				 */
478 				if (ahc_check_residual(scb))
479 					ahc_calc_residual(scb);
480 				else
481 					ahc_set_residual(scb, 0);
482 #ifdef AHC_DEBUG
483 				if (ahc_debug & AHC_SHOWSENSE) {
484 					ahc_print_path(ahc, scb);
485 					printf("Sending Sense\n");
486 				}
487 #endif
488 				sg->addr = ahc->scb_data->sense_busaddr
489 				   + (hscb->tag*sizeof(struct scsi_sense_data));
490 				sg->len = ahc_get_sense_bufsize(ahc, scb);
491 				sg->len |= AHC_DMA_LAST_SEG;
492 
493 				sc->opcode = REQUEST_SENSE;
494 				sc->byte2 = 0;
495 				if (tinfo->protocol_version <= SCSI_REV_2
496 				 && SCB_GET_LUN(scb) < 8)
497 					sc->byte2 = SCB_GET_LUN(scb) << 5;
498 				sc->unused[0] = 0;
499 				sc->unused[1] = 0;
500 				sc->length = sg->len;
501 				sc->control = 0;
502 
503 				/*
504 				 * Would be nice to preserve DISCENB here,
505 				 * but due to the way we manage busy targets,
506 				 * we can't.
507 				 */
508 				hscb->control = 0;
509 
510 				/*
511 				 * This request sense could be because the
512 				 * the device lost power or in some other
513 				 * way has lost our transfer negotiations.
514 				 * Renegotiate if appropriate.  Unit attention
515 				 * errors will be reported before any data
516 				 * phases occur.
517 				 */
518 				if (ahc_get_residual(scb)
519 				 == ahc_get_transfer_length(scb)) {
520 					ahc_update_target_msg_request(ahc,
521 							      &devinfo,
522 							      targ_info,
523 							      /*force*/TRUE,
524 							      /*paused*/TRUE);
525 				}
526 				hscb->cdb_len = sizeof(*sc);
527 				hscb->dataptr = sg->addr;
528 				hscb->datacnt = sg->len;
529 				hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
530 				scb->sg_count = 1;
531 				scb->flags |= SCB_SENSE;
532 				ahc_qinfifo_requeue_tail(ahc, scb);
533 				ahc_outb(ahc, RETURN_1, SEND_SENSE);
534 #ifdef __FreeBSD__
535 				/*
536 				 * Ensure we have enough time to actually
537 				 * retrieve the sense.
538 				 */
539 				untimeout(ahc_timeout, (caddr_t)scb,
540 					  scb->io_ctx->ccb_h.timeout_ch);
541 				scb->io_ctx->ccb_h.timeout_ch =
542 				    timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
543 #endif
544 			}
545 			break;
546 		default:
547 			break;
548 		}
549 		break;
550 	}
551 	case NO_MATCH:
552 	{
553 		/* Ensure we don't leave the selection hardware on */
554 		ahc_outb(ahc, SCSISEQ,
555 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
556 
557 		printf("%s:%c:%d: no active SCB for reconnecting "
558 		       "target - issuing BUS DEVICE RESET\n",
559 		       ahc_name(ahc), devinfo.channel, devinfo.target);
560 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
561 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
562 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
563 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
564 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
565 		       "SINDEX == 0x%x\n",
566 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
567 		       ahc_index_busy_tcl(ahc,
568 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
569 				      ahc_inb(ahc, SAVED_LUN)),
570 		       /*unbusy*/FALSE), ahc_inb(ahc, SINDEX));
571 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
572 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
573 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
574 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
575 		       ahc_inb(ahc, SCB_CONTROL));
576 		ahc_dump_card_state(ahc);
577 		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
578 		ahc->msgout_len = 1;
579 		ahc->msgout_index = 0;
580 		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
581 		ahc_outb(ahc, MSG_OUT, HOST_MSG);
582 		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO);
583 		break;
584 	}
585 	case SEND_REJECT:
586 	{
587 		u_int rejbyte = ahc_inb(ahc, ACCUM);
588 		printf("%s:%c:%d: Warning - unknown message received from "
589 		       "target (0x%x).  Rejecting\n",
590 		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
591 		break;
592 	}
593 	case NO_IDENT:
594 	{
595 		/*
596 		 * The reconnecting target either did not send an identify
597 		 * message, or did, but we didn't find an SCB to match and
598 		 * before it could respond to our ATN/abort, it hit a dataphase.
599 		 * The only safe thing to do is to blow it away with a bus
600 		 * reset.
601 		 */
602 		int found;
603 
604 		printf("%s:%c:%d: Target did not send an IDENTIFY message. "
605 		       "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
606 		       ahc_name(ahc), devinfo.channel, devinfo.target,
607 		       ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
608 		found = ahc_reset_channel(ahc, devinfo.channel,
609 					  /*initiate reset*/TRUE);
610 		printf("%s: Issued Channel %c Bus Reset. "
611 		       "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
612 		       found);
613 		return;
614 	}
615 	case IGN_WIDE_RES:
616 		ahc_handle_ign_wide_residue(ahc, &devinfo);
617 		break;
618 	case BAD_PHASE:
619 	{
620 		u_int lastphase;
621 
622 		lastphase = ahc_inb(ahc, LASTPHASE);
623 		printf("%s:%c:%d: unknown scsi bus phase %x, "
624 		       "lastphase = 0x%x.  Attempting to continue\n",
625 		       ahc_name(ahc), devinfo.channel, devinfo.target,
626 		       lastphase, ahc_inb(ahc, SCSISIGI));
627 		break;
628 	}
629 	case MISSED_BUSFREE:
630 	{
631 		u_int lastphase;
632 
633 		lastphase = ahc_inb(ahc, LASTPHASE);
634 		printf("%s:%c:%d: Missed busfree. "
635 		       "Lastphase = 0x%x, Curphase = 0x%x\n",
636 		       ahc_name(ahc), devinfo.channel, devinfo.target,
637 		       lastphase, ahc_inb(ahc, SCSISIGI));
638 		restart_sequencer(ahc);
639 		return;
640 	}
641 	case HOST_MSG_LOOP:
642 	{
643 		/*
644 		 * The sequencer has encountered a message phase
645 		 * that requires host assistance for completion.
646 		 * While handling the message phase(s), we will be
647 		 * notified by the sequencer after each byte is
648 		 * transfered so we can track bus phase changes.
649 		 *
650 		 * If this is the first time we've seen a HOST_MSG_LOOP
651 		 * interrupt, initialize the state of the host message
652 		 * loop.
653 		 */
654 		if (ahc->msg_type == MSG_TYPE_NONE) {
655 			u_int bus_phase;
656 
657 			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
658 			if (bus_phase != P_MESGIN
659 			 && bus_phase != P_MESGOUT) {
660 				printf("ahc_intr: HOST_MSG_LOOP bad "
661 				       "phase 0x%x\n",
662 				      bus_phase);
663 				/*
664 				 * Probably transitioned to bus free before
665 				 * we got here.  Just punt the message.
666 				 */
667 				ahc_clear_intstat(ahc);
668 				restart_sequencer(ahc);
669 				return;
670 			}
671 
672 			if (devinfo.role == ROLE_INITIATOR) {
673 				struct scb *scb;
674 				u_int scb_index;
675 
676 				scb_index = ahc_inb(ahc, SCB_TAG);
677 				scb = ahc_lookup_scb(ahc, scb_index);
678 
679 				if (scb == NULL)
680 					panic("HOST_MSG_LOOP with "
681 					      "invalid SCB %x\n", scb_index);
682 
683 				if (bus_phase == P_MESGOUT)
684 					ahc_setup_initiator_msgout(ahc,
685 								   &devinfo,
686 								   scb);
687 				else {
688 					ahc->msg_type =
689 					    MSG_TYPE_INITIATOR_MSGIN;
690 					ahc->msgin_index = 0;
691 				}
692 			} else {
693 				if (bus_phase == P_MESGOUT) {
694 					ahc->msg_type =
695 					    MSG_TYPE_TARGET_MSGOUT;
696 					ahc->msgin_index = 0;
697 				}
698 #if AHC_TARGET_MODE
699 				else
700 					ahc_setup_target_msgin(ahc, &devinfo);
701 #endif
702 			}
703 		}
704 
705 		ahc_handle_message_phase(ahc);
706 		break;
707 	}
708 	case PERR_DETECTED:
709 	{
710 		/*
711 		 * If we've cleared the parity error interrupt
712 		 * but the sequencer still believes that SCSIPERR
713 		 * is true, it must be that the parity error is
714 		 * for the currently presented byte on the bus,
715 		 * and we are not in a phase (data-in) where we will
716 		 * eventually ack this byte.  Ack the byte and
717 		 * throw it away in the hope that the target will
718 		 * take us to message out to deliver the appropriate
719 		 * error message.
720 		 */
721 		if ((intstat & SCSIINT) == 0
722 		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
723 			u_int curphase;
724 
725 			/*
726 			 * The hardware will only let you ack bytes
727 			 * if the expected phase in SCSISIGO matches
728 			 * the current phase.  Make sure this is
729 			 * currently the case.
730 			 */
731 			curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
732 			ahc_outb(ahc, LASTPHASE, curphase);
733 			ahc_outb(ahc, SCSISIGO, curphase);
734 			ahc_inb(ahc, SCSIDATL);
735 		}
736 		break;
737 	}
738 	case DATA_OVERRUN:
739 	{
740 		/*
741 		 * When the sequencer detects an overrun, it
742 		 * places the controller in "BITBUCKET" mode
743 		 * and allows the target to complete its transfer.
744 		 * Unfortunately, none of the counters get updated
745 		 * when the controller is in this mode, so we have
746 		 * no way of knowing how large the overrun was.
747 		 */
748 		u_int scbindex = ahc_inb(ahc, SCB_TAG);
749 		u_int lastphase = ahc_inb(ahc, LASTPHASE);
750 		u_int i;
751 
752 		scb = ahc_lookup_scb(ahc, scbindex);
753 		for (i = 0; i < num_phases; i++) {
754 			if (lastphase == phase_table[i].phase)
755 				break;
756 		}
757 		ahc_print_path(ahc, scb);
758 		printf("data overrun detected %s."
759 		       "  Tag == 0x%x.\n",
760 		       phase_table[i].phasemsg,
761   		       scb->hscb->tag);
762 		ahc_print_path(ahc, scb);
763 		printf("%s seen Data Phase.  Length = %ld.  NumSGs = %d.\n",
764 		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
765 		       ahc_get_transfer_length(scb), scb->sg_count);
766 		if (scb->sg_count > 0) {
767 			for (i = 0; i < scb->sg_count; i++) {
768 				printf("sg[%d] - Addr 0x%x : Length %d\n",
769 				       i,
770 				       scb->sg_list[i].addr,
771 				       scb->sg_list[i].len & AHC_SG_LEN_MASK);
772 			}
773 		}
774 		/*
775 		 * Set this and it will take effect when the
776 		 * target does a command complete.
777 		 */
778 		ahc_freeze_devq(ahc, scb);
779 		ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
780 		ahc_freeze_scb(scb);
781 		break;
782 	}
783 	case MKMSG_FAILED:
784 	{
785 		u_int scbindex;
786 
787 		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
788 		       ahc_name(ahc), devinfo.channel, devinfo.target,
789 		       devinfo.lun);
790 		scbindex = ahc_inb(ahc, SCB_TAG);
791 		scb = ahc_lookup_scb(ahc, scbindex);
792 		if (scb != NULL
793 		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
794 			/*
795 			 * Ensure that we didn't put a second instance of this
796 			 * SCB into the QINFIFO.
797 			 */
798 			ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
799 					   SCB_GET_CHANNEL(ahc, scb),
800 					   SCB_GET_LUN(scb), scb->hscb->tag,
801 					   ROLE_INITIATOR, /*status*/0,
802 					   SEARCH_REMOVE);
803 		break;
804 	}
805 	case ABORT_QINSCB:
806 	{
807 		printf("%s: Abort QINSCB\n", ahc_name(ahc));
808 		break;
809 	}
810 	case NO_FREE_SCB:
811 	{
812 		printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
813 		ahc_dump_card_state(ahc);
814 		panic("for safety");
815 		break;
816 	}
817 	case SCB_MISMATCH:
818 	{
819 		u_int scbptr;
820 
821 		scbptr = ahc_inb(ahc, SCBPTR);
822 		printf("Bogus TAG after DMA.  SCBPTR %d, tag %d, our tag %d\n",
823 		       scbptr, ahc_inb(ahc, ARG_1),
824 		       ahc->scb_data->hscbs[scbptr].tag);
825 		ahc_dump_card_state(ahc);
826 		panic("for saftey");
827 		break;
828 	}
829 	case OUT_OF_RANGE:
830 	{
831 		printf("%s: BTT calculation out of range\n", ahc_name(ahc));
832 		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
833 		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
834 		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
835 		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
836 		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
837 		       "SINDEX == 0x%x\n, A == 0x%x\n",
838 		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
839 		       ahc_index_busy_tcl(ahc,
840 			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
841 				      ahc_inb(ahc, SAVED_LUN)),
842 		       /*unbusy*/FALSE), ahc_inb(ahc, SINDEX),
843 		       ahc_inb(ahc, ACCUM));
844 		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
845 		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
846 		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
847 		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
848 		       ahc_inb(ahc, SCB_CONTROL));
849 		panic("for safety");
850 		break;
851 	}
852 	default:
853 		printf("ahc_intr: seqint, "
854 		       "intstat == 0x%x, scsisigi = 0x%x\n",
855 		       intstat, ahc_inb(ahc, SCSISIGI));
856 		break;
857 	}
858 unpause:
859 	/*
860 	 *  The sequencer is paused immediately on
861 	 *  a SEQINT, so we should restart it when
862 	 *  we're done.
863 	 */
864 	unpause_sequencer(ahc);
865 }
866 
867 void
868 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
869 {
870 	u_int	scb_index;
871 	u_int	status0;
872 	u_int	status;
873 	struct	scb *scb;
874 	char	cur_channel;
875 	char	intr_channel;
876 
877 	/* Make sure the sequencer is in a safe location. */
878 	ahc_clear_critical_section(ahc);
879 
880 	if ((ahc->features & AHC_TWIN) != 0
881 	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
882 		cur_channel = 'B';
883 	else
884 		cur_channel = 'A';
885 	intr_channel = cur_channel;
886 
887 	if ((ahc->features & AHC_ULTRA2) != 0)
888 		status0 = ahc_inb(ahc, SSTAT0) & IOERR;
889 	else
890 		status0 = 0;
891 	status = ahc_inb(ahc, SSTAT1);
892 	if (status == 0 && status0 == 0) {
893 		if ((ahc->features & AHC_TWIN) != 0) {
894 			/* Try the other channel */
895 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
896 			status = ahc_inb(ahc, SSTAT1);
897 		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
898 			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
899 		}
900 		if (status == 0) {
901 			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
902 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
903 			unpause_sequencer(ahc);
904 			return;
905 		}
906 	}
907 
908 	scb_index = ahc_inb(ahc, SCB_TAG);
909 	scb = ahc_lookup_scb(ahc, scb_index);
910 	if (scb != NULL
911 	 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
912 		scb = NULL;
913 
914 	if ((ahc->features & AHC_ULTRA2) != 0
915 		&& (status0 & IOERR) != 0) {
916 		int now_lvd;
917 
918 		now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
919 		printf("%s: Transceiver State Has Changed to %s mode\n",
920 		       ahc_name(ahc), now_lvd ? "LVD" : "SE");
921 		ahc_outb(ahc, CLRSINT0, CLRIOERR);
922 		/*
923 		 * When transitioning to SE mode, the reset line
924 		 * glitches, triggering an arbitration bug in some
925 		 * Ultra2 controllers.  This bug is cleared when we
926 		 * assert the reset line.  Since a reset glitch has
927 		 * already occurred with this transition and a
928 		 * transceiver state change is handled just like
929 		 * a bus reset anyway, asserting the reset line
930 		 * ourselves is safe.
931 		 */
932 		ahc_reset_channel(ahc, intr_channel,
933 				 /*Initiate Reset*/now_lvd == 0);
934 	} else if ((status & SCSIRSTI) != 0) {
935 		printf("%s: Someone reset channel %c\n",
936 			ahc_name(ahc), intr_channel);
937 		ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
938 	} else if ((status & SCSIPERR) != 0) {
939 		/*
940 		 * Determine the bus phase and queue an appropriate message.
941 		 * SCSIPERR is latched true as soon as a parity error
942 		 * occurs.  If the sequencer acked the transfer that
943 		 * caused the parity error and the currently presented
944 		 * transfer on the bus has correct parity, SCSIPERR will
945 		 * be cleared by CLRSCSIPERR.  Use this to determine if
946 		 * we should look at the last phase the sequencer recorded,
947 		 * or the current phase presented on the bus.
948 		 */
949 		u_int mesg_out;
950 		u_int curphase;
951 		u_int errorphase;
952 		u_int lastphase;
953 		u_int scsirate;
954 		u_int i;
955 		u_int sstat2;
956 
957 		lastphase = ahc_inb(ahc, LASTPHASE);
958 		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
959 		sstat2 = ahc_inb(ahc, SSTAT2);
960 		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
961 		/*
962 		 * For all phases save DATA, the sequencer won't
963 		 * automatically ack a byte that has a parity error
964 		 * in it.  So the only way that the current phase
965 		 * could be 'data-in' is if the parity error is for
966 		 * an already acked byte in the data phase.  During
967 		 * synchronous data-in transfers, we may actually
968 		 * ack bytes before latching the current phase in
969 		 * LASTPHASE, leading to the discrepancy between
970 		 * curphase and lastphase.
971 		 */
972 		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
973 		 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
974 			errorphase = curphase;
975 		else
976 			errorphase = lastphase;
977 
978 		for (i = 0; i < num_phases; i++) {
979 			if (errorphase == phase_table[i].phase)
980 				break;
981 		}
982 		mesg_out = phase_table[i].mesg_out;
983 		if (scb != NULL)
984 			ahc_print_path(ahc, scb);
985 		else
986 			printf("%s:%c:%d: ", ahc_name(ahc),
987 			       intr_channel,
988 			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
989 		scsirate = ahc_inb(ahc, SCSIRATE);
990 		printf("parity error detected %s. "
991 		       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
992 		       phase_table[i].phasemsg,
993 		       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
994 		       scsirate);
995 
996 		if ((ahc->features & AHC_DT) != 0) {
997 
998 			if ((sstat2 & CRCVALERR) != 0)
999 				printf("\tCRC Value Mismatch\n");
1000 			if ((sstat2 & CRCENDERR) != 0)
1001 				printf("\tNo terminal CRC packet recevied\n");
1002 			if ((sstat2 & CRCREQERR) != 0)
1003 				printf("\tIllegal CRC packet request\n");
1004 			if ((sstat2 & DUAL_EDGE_ERR) != 0)
1005 				printf("\tUnexpected %sDT Data Phase\n",
1006 				       (scsirate & SINGLE_EDGE) ? "" : "non-");
1007 		}
1008 
1009 		/*
1010 		 * We've set the hardware to assert ATN if we
1011 		 * get a parity error on "in" phases, so all we
1012 		 * need to do is stuff the message buffer with
1013 		 * the appropriate message.  "In" phases have set
1014 		 * mesg_out to something other than MSG_NOP.
1015 		 */
1016 		if (mesg_out != MSG_NOOP) {
1017 			if (ahc->msg_type != MSG_TYPE_NONE)
1018 				ahc->send_msg_perror = TRUE;
1019 			else
1020 				ahc_outb(ahc, MSG_OUT, mesg_out);
1021 		}
1022 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1023 		unpause_sequencer(ahc);
1024 	} else if ((status & BUSFREE) != 0
1025 		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1026 		/*
1027 		 * First look at what phase we were last in.
1028 		 * If its message out, chances are pretty good
1029 		 * that the busfree was in response to one of
1030 		 * our abort requests.
1031 		 */
1032 		u_int lastphase = ahc_inb(ahc, LASTPHASE);
1033 		u_int saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1034 		u_int saved_lun = ahc_inb(ahc, SAVED_LUN);
1035 		u_int target = SCSIID_TARGET(ahc, saved_scsiid);
1036 		u_int initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1037 		char channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1038 		int printerror = 1;
1039 
1040 		ahc_outb(ahc, SCSISEQ,
1041 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1042 		if (lastphase == P_MESGOUT) {
1043 			u_int message;
1044 			u_int tag;
1045 
1046 			message = ahc->msgout_buf[ahc->msgout_index - 1];
1047 			tag = SCB_LIST_NULL;
1048 			switch (message) {
1049 			case MSG_ABORT_TAG:
1050 				tag = scb->hscb->tag;
1051 				/* FALLTRHOUGH */
1052 			case MSG_ABORT:
1053 				ahc_print_path(ahc, scb);
1054 				printf("SCB %d - Abort %s Completed.\n",
1055 				       scb->hscb->tag, tag == SCB_LIST_NULL ?
1056 				       "" : "Tag");
1057 				ahc_abort_scbs(ahc, target, channel,
1058 					       saved_lun, tag,
1059 					       ROLE_INITIATOR,
1060 					       CAM_REQ_ABORTED);
1061 				printerror = 0;
1062 				break;
1063 			case MSG_BUS_DEV_RESET:
1064 			{
1065 				struct ahc_devinfo devinfo;
1066 #ifdef __FreeBSD__
1067 				/*
1068 				 * Don't mark the user's request for this BDR
1069 				 * as completing with CAM_BDR_SENT.  CAM3
1070 				 * specifies CAM_REQ_CMP.
1071 				 */
1072 				if (scb != NULL
1073 				 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1074 				 && ahc_match_scb(ahc, scb, target, channel,
1075 						  CAM_LUN_WILDCARD,
1076 						  SCB_LIST_NULL,
1077 						  ROLE_INITIATOR)) {
1078 					ahc_set_transaction_status(scb, CAM_REQ_CMP);
1079 				}
1080 #endif
1081 				ahc_compile_devinfo(&devinfo,
1082 						    initiator_role_id,
1083 						    target,
1084 						    CAM_LUN_WILDCARD,
1085 						    channel,
1086 						    ROLE_INITIATOR);
1087 				ahc_handle_devreset(ahc, &devinfo,
1088 						    CAM_BDR_SENT,
1089 						    "Bus Device Reset",
1090 						    /*verbose_level*/0);
1091 				printerror = 0;
1092 				break;
1093 			}
1094 			default:
1095 				break;
1096 			}
1097 		}
1098 		if (printerror != 0) {
1099 			u_int i;
1100 
1101 			if (scb != NULL) {
1102 				u_int tag;
1103 
1104 				if ((scb->hscb->control & TAG_ENB) != 0)
1105 					tag = scb->hscb->tag;
1106 				else
1107 					tag = SCB_LIST_NULL;
1108 				ahc_print_path(ahc, scb);
1109 				ahc_abort_scbs(ahc, target, channel,
1110 					       SCB_GET_LUN(scb), tag,
1111 					       ROLE_INITIATOR,
1112 					       CAM_UNEXP_BUSFREE);
1113 			} else {
1114 				/*
1115 				 * We had not fully identified this connection,
1116 				 * so we cannot abort anything.
1117 				 */
1118 				printf("%s: ", ahc_name(ahc));
1119 			}
1120 			for (i = 0; i < num_phases; i++) {
1121 				if (lastphase == phase_table[i].phase)
1122 					break;
1123 			}
1124 			printf("Unexpected busfree %s\n"
1125 			       "SEQADDR == 0x%x\n",
1126 			       phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0)
1127 				| (ahc_inb(ahc, SEQADDR1) << 8));
1128 		}
1129 		ahc_clear_msg_state(ahc);
1130 		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1131 		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1132 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1133 		restart_sequencer(ahc);
1134 	} else if ((status & SELTO) != 0) {
1135 		u_int scbptr;
1136 
1137 		scbptr = ahc_inb(ahc, WAITING_SCBH);
1138 		ahc_outb(ahc, SCBPTR, scbptr);
1139 		scb_index = ahc_inb(ahc, SCB_TAG);
1140 
1141 		scb = ahc_lookup_scb(ahc, scb_index);
1142 		if (scb == NULL) {
1143 			printf("%s: ahc_intr - referenced scb not "
1144 			       "valid during SELTO scb(%d, %d)\n",
1145 			       ahc_name(ahc), scbptr, scb_index);
1146 		} else {
1147 			ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1148 			ahc_freeze_devq(ahc, scb);
1149 		}
1150 		/* Stop the selection */
1151 		ahc_outb(ahc, SCSISEQ, 0);
1152 
1153 		/* No more pending messages */
1154 		ahc_clear_msg_state(ahc);
1155 
1156 		/*
1157 		 * Although the driver does not care about the
1158 		 * 'Selection in Progress' status bit, the busy
1159 		 * LED does.  SELINGO is only cleared by a sucessful
1160 		 * selection, so we must manually clear it to insure
1161 		 * the LED turns off just incase no future successful
1162 		 * selections occur (e.g. no devices on the bus).
1163 		 */
1164 		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1165 
1166 		/* Clear interrupt state */
1167 		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1168 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1169 		restart_sequencer(ahc);
1170 	} else {
1171 		ahc_print_path(ahc, scb);
1172 		printf("Unknown SCSIINT. Status = 0x%x\n", status);
1173 		ahc_outb(ahc, CLRSINT1, status);
1174 		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1175 		unpause_sequencer(ahc);
1176 	}
1177 }
1178 
1179 #define AHC_MAX_STEPS 2000
1180 void
1181 ahc_clear_critical_section(struct ahc_softc *ahc)
1182 {
1183 	int	stepping;
1184 	int	steps;
1185 	u_int	simode0;
1186 	u_int	simode1;
1187 
1188 	if (ahc->num_critical_sections == 0)
1189 		return;
1190 
1191 	stepping = FALSE;
1192 	steps = 0;
1193 	simode0 = 0;
1194 	simode1 = 0;
1195 	for (;;) {
1196 		struct	cs *cs;
1197 		u_int	seqaddr;
1198 		u_int	i;
1199 
1200 		seqaddr = ahc_inb(ahc, SEQADDR0)
1201 			| (ahc_inb(ahc, SEQADDR1) << 8);
1202 
1203 		cs = ahc->critical_sections;
1204 		for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1205 
1206 			if (cs->begin < seqaddr && cs->end >= seqaddr)
1207 				break;
1208 		}
1209 
1210 		if (i == ahc->num_critical_sections)
1211 			break;
1212 
1213 		if (steps > AHC_MAX_STEPS) {
1214 			printf("%s: Infinite loop in critical section\n",
1215 			       ahc_name(ahc));
1216 			ahc_dump_card_state(ahc);
1217 			panic("critical section loop");
1218 		}
1219 
1220 		steps++;
1221 		if (stepping == FALSE) {
1222 
1223 			/*
1224 			 * Disable all interrupt sources so that the
1225 			 * sequencer will not be stuck by a pausing
1226 			 * interrupt condition while we attempt to
1227 			 * leave a critical section.
1228 			 */
1229 			simode0 = ahc_inb(ahc, SIMODE0);
1230 			ahc_outb(ahc, SIMODE0, 0);
1231 			simode1 = ahc_inb(ahc, SIMODE1);
1232 			ahc_outb(ahc, SIMODE1, 0);
1233 			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1234 			ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP);
1235 			stepping = TRUE;
1236 		}
1237 		ahc_outb(ahc, HCNTRL, ahc->unpause);
1238 		do {
1239 			ahc_delay(200);
1240 		} while (!sequencer_paused(ahc));
1241 	}
1242 	if (stepping) {
1243 		ahc_outb(ahc, SIMODE0, simode0);
1244 		ahc_outb(ahc, SIMODE1, simode1);
1245 		ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP);
1246 	}
1247 }
1248 
1249 /*
1250  * Clear any pending interrupt status.
1251  */
1252 void
1253 ahc_clear_intstat(struct ahc_softc *ahc)
1254 {
1255 	/* Clear any interrupt conditions this may have caused */
1256 	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1257 	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1258 				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1259 				CLRREQINIT);
1260 	ahc_outb(ahc, CLRINT, CLRSCSIINT);
1261 }
1262 
1263 /**************************** Debugging Routines ******************************/
1264 void
1265 ahc_print_scb(struct scb *scb)
1266 {
1267 	int i;
1268 
1269 	struct hardware_scb *hscb = scb->hscb;
1270 
1271 	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1272 	       scb,
1273 	       hscb->control,
1274 	       hscb->scsiid,
1275 	       hscb->lun,
1276 	       hscb->cdb_len);
1277 	i = 0;
1278 	printf("Shared Data: %#02x %#02x %#02x %#02x\n",
1279 	       hscb->shared_data.cdb[i++],
1280 	       hscb->shared_data.cdb[i++],
1281 	       hscb->shared_data.cdb[i++],
1282 	       hscb->shared_data.cdb[i++]);
1283 	printf("             %#02x %#02x %#02x %#02x\n",
1284 	       hscb->shared_data.cdb[i++],
1285 	       hscb->shared_data.cdb[i++],
1286 	       hscb->shared_data.cdb[i++],
1287 	       hscb->shared_data.cdb[i++]);
1288 	printf("             %#02x %#02x %#02x %#02x\n",
1289 	       hscb->shared_data.cdb[i++],
1290 	       hscb->shared_data.cdb[i++],
1291 	       hscb->shared_data.cdb[i++],
1292 	       hscb->shared_data.cdb[i++]);
1293 	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1294 		hscb->dataptr,
1295 		hscb->datacnt,
1296 		hscb->sgptr,
1297 		hscb->tag);
1298 	if (scb->sg_count > 0) {
1299 		for (i = 0; i < scb->sg_count; i++) {
1300 			printf("sg[%d] - Addr 0x%x : Length %d\n",
1301 			       i,
1302 			       scb->sg_list[i].addr,
1303 			       scb->sg_list[i].len);
1304 		}
1305 	}
1306 }
1307 
1308 /************************* Transfer Negotiation *******************************/
1309 /*
1310  * Allocate per target mode instance (ID we respond to as a target)
1311  * transfer negotiation data structures.
1312  */
1313 static struct tmode_tstate *
1314 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1315 {
1316 	struct tmode_tstate *master_tstate;
1317 	struct tmode_tstate *tstate;
1318 	int i;
1319 
1320 	master_tstate = ahc->enabled_targets[ahc->our_id];
1321 	if (channel == 'B') {
1322 		scsi_id += 8;
1323 		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1324 	}
1325 	if (ahc->enabled_targets[scsi_id] != NULL
1326 	 && ahc->enabled_targets[scsi_id] != master_tstate)
1327 		panic("%s: ahc_alloc_tstate - Target already allocated",
1328 		      ahc_name(ahc));
1329 	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
1330 	if (tstate == NULL)
1331 		return (NULL);
1332 
1333 	/*
1334 	 * If we have allocated a master tstate, copy user settings from
1335 	 * the master tstate (taken from SRAM or the EEPROM) for this
1336 	 * channel, but reset our current and goal settings to async/narrow
1337 	 * until an initiator talks to us.
1338 	 */
1339 	if (master_tstate != NULL) {
1340 		memcpy(tstate, master_tstate, sizeof(*tstate));
1341 		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1342 		tstate->ultraenb = 0;
1343 		for (i = 0; i < 16; i++) {
1344 			memset(&tstate->transinfo[i].current, 0,
1345 			      sizeof(tstate->transinfo[i].current));
1346 			memset(&tstate->transinfo[i].goal, 0,
1347 			      sizeof(tstate->transinfo[i].goal));
1348 		}
1349 	} else
1350 		memset(tstate, 0, sizeof(*tstate));
1351 	ahc->enabled_targets[scsi_id] = tstate;
1352 	return (tstate);
1353 }
1354 
1355 /*
1356  * Free per target mode instance (ID we respond to as a target)
1357  * transfer negotiation data structures.
1358  */
1359 static void
1360 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1361 {
1362 	struct tmode_tstate *tstate;
1363 
1364 	/* Don't clean up the entry for our initiator role */
1365 	if ((ahc->flags & AHC_INITIATORROLE) != 0
1366 	 && ((channel == 'B' && scsi_id == ahc->our_id_b)
1367 	  || (channel == 'A' && scsi_id == ahc->our_id))
1368 	 && force == FALSE)
1369 		return;
1370 
1371 	if (channel == 'B')
1372 		scsi_id += 8;
1373 	tstate = ahc->enabled_targets[scsi_id];
1374 	if (tstate != NULL)
1375 		free(tstate, M_DEVBUF);
1376 	ahc->enabled_targets[scsi_id] = NULL;
1377 }
1378 
1379 /*
1380  * Called when we have an active connection to a target on the bus,
1381  * this function finds the nearest syncrate to the input period limited
1382  * by the capabilities of the bus connectivity of and sync settings for
1383  * the target.
1384  */
1385 struct ahc_syncrate *
1386 ahc_devlimited_syncrate(struct ahc_softc *ahc,
1387 			struct ahc_initiator_tinfo *tinfo,
1388 			u_int *period, u_int *ppr_options, role_t role) {
1389 	struct	ahc_transinfo *transinfo;
1390 	u_int	maxsync;
1391 
1392 	if ((ahc->features & AHC_ULTRA2) != 0) {
1393 		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1394 		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1395 			maxsync = AHC_SYNCRATE_DT;
1396 		} else {
1397 			maxsync = AHC_SYNCRATE_ULTRA;
1398 			/* Can't do DT on an SE bus */
1399 			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1400 		}
1401 	} else if ((ahc->features & AHC_ULTRA) != 0) {
1402 		maxsync = AHC_SYNCRATE_ULTRA;
1403 	} else {
1404 		maxsync = AHC_SYNCRATE_FAST;
1405 	}
1406 	/*
1407 	 * Never allow a value higher than our current goal
1408 	 * period otherwise we may allow a target initiated
1409 	 * negotiation to go above the limit as set by the
1410 	 * user.  In the case of an initiator initiated
1411 	 * sync negotiation, we limit based on the user
1412 	 * setting.  This allows the system to still accept
1413 	 * incoming negotiations even if target initiated
1414 	 * negotiation is not performed.
1415 	 */
1416 	if (role == ROLE_TARGET)
1417 		transinfo = &tinfo->user;
1418 	else
1419 		transinfo = &tinfo->goal;
1420 	*ppr_options &= transinfo->ppr_options;
1421 	if (transinfo->period == 0) {
1422 		*period = 0;
1423 		*ppr_options = 0;
1424 		return (NULL);
1425 	}
1426 	*period = MAX(*period, transinfo->period);
1427 	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1428 }
1429 
1430 /*
1431  * Look up the valid period to SCSIRATE conversion in our table.
1432  * Return the period and offset that should be sent to the target
1433  * if this was the beginning of an SDTR.
1434  */
1435 struct ahc_syncrate *
1436 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1437 		  u_int *ppr_options, u_int maxsync)
1438 {
1439 	struct ahc_syncrate *syncrate;
1440 
1441 	if ((ahc->features & AHC_DT) == 0)
1442 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1443 
1444 	for (syncrate = &ahc_syncrates[maxsync];
1445 	     syncrate->rate != NULL;
1446 	     syncrate++) {
1447 
1448 		/*
1449 		 * The Ultra2 table doesn't go as low
1450 		 * as for the Fast/Ultra cards.
1451 		 */
1452 		if ((ahc->features & AHC_ULTRA2) != 0
1453 		 && (syncrate->sxfr_u2 == 0))
1454 			break;
1455 
1456 		/* Skip any DT entries if DT is not available */
1457 		if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1458 		 && (syncrate->sxfr_u2 & DT_SXFR) != 0)
1459 			continue;
1460 
1461 		if (*period <= syncrate->period) {
1462 			/*
1463 			 * When responding to a target that requests
1464 			 * sync, the requested rate may fall between
1465 			 * two rates that we can output, but still be
1466 			 * a rate that we can receive.  Because of this,
1467 			 * we want to respond to the target with
1468 			 * the same rate that it sent to us even
1469 			 * if the period we use to send data to it
1470 			 * is lower.  Only lower the response period
1471 			 * if we must.
1472 			 */
1473 			if (syncrate == &ahc_syncrates[maxsync])
1474 				*period = syncrate->period;
1475 
1476 			/*
1477 			 * At some speeds, we only support
1478 			 * ST transfers.
1479 			 */
1480 		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1481 				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1482 			break;
1483 		}
1484 	}
1485 
1486 	if ((*period == 0)
1487 	 || (syncrate->rate == NULL)
1488 	 || ((ahc->features & AHC_ULTRA2) != 0
1489 	  && (syncrate->sxfr_u2 == 0))) {
1490 		/* Use asynchronous transfers. */
1491 		*period = 0;
1492 		syncrate = NULL;
1493 		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1494 	}
1495 	return (syncrate);
1496 }
1497 
1498 /*
1499  * Convert from an entry in our syncrate table to the SCSI equivalent
1500  * sync "period" factor.
1501  */
1502 u_int
1503 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1504 {
1505 	struct ahc_syncrate *syncrate;
1506 
1507 	if ((ahc->features & AHC_ULTRA2) != 0)
1508 		scsirate &= SXFR_ULTRA2;
1509 	else
1510 		scsirate &= SXFR;
1511 
1512 	syncrate = &ahc_syncrates[maxsync];
1513 	while (syncrate->rate != NULL) {
1514 
1515 		if ((ahc->features & AHC_ULTRA2) != 0) {
1516 			if (syncrate->sxfr_u2 == 0)
1517 				break;
1518 			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1519 				return (syncrate->period);
1520 		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1521 				return (syncrate->period);
1522 		}
1523 		syncrate++;
1524 	}
1525 	return (0); /* async */
1526 }
1527 
1528 /*
1529  * Truncate the given synchronous offset to a value the
1530  * current adapter type and syncrate are capable of.
1531  */
1532 void
1533 ahc_validate_offset(struct ahc_softc *ahc,
1534 		    struct ahc_initiator_tinfo *tinfo,
1535 		    struct ahc_syncrate *syncrate,
1536 		    u_int *offset, int wide, role_t role)
1537 {
1538 	u_int maxoffset;
1539 
1540 	/* Limit offset to what we can do */
1541 	if (syncrate == NULL) {
1542 		maxoffset = 0;
1543 	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1544 		maxoffset = MAX_OFFSET_ULTRA2;
1545 	} else {
1546 		if (wide)
1547 			maxoffset = MAX_OFFSET_16BIT;
1548 		else
1549 			maxoffset = MAX_OFFSET_8BIT;
1550 	}
1551 	*offset = MIN(*offset, maxoffset);
1552 	if (tinfo != NULL) {
1553 		if (role == ROLE_TARGET)
1554 			*offset = MIN(*offset, tinfo->user.offset);
1555 		else
1556 			*offset = MIN(*offset, tinfo->goal.offset);
1557 	}
1558 }
1559 
1560 /*
1561  * Truncate the given transfer width parameter to a value the
1562  * current adapter type is capable of.
1563  */
1564 void
1565 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1566 		   u_int *bus_width, role_t role)
1567 {
1568 	switch (*bus_width) {
1569 	default:
1570 		if (ahc->features & AHC_WIDE) {
1571 			/* Respond Wide */
1572 			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1573 			break;
1574 		}
1575 		/* FALLTHROUGH */
1576 	case MSG_EXT_WDTR_BUS_8_BIT:
1577 		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1578 		break;
1579 	}
1580 	if (tinfo != NULL) {
1581 		if (role == ROLE_TARGET)
1582 			*bus_width = MIN(tinfo->user.width, *bus_width);
1583 		else
1584 			*bus_width = MIN(tinfo->goal.width, *bus_width);
1585 	}
1586 }
1587 
1588 /*
1589  * Update the bitmask of targets for which the controller should
1590  * negotiate with at the next convenient oportunity.  This currently
1591  * means the next time we send the initial identify messages for
1592  * a new transaction.
1593  */
1594 static void
1595 ahc_update_target_msg_request(struct ahc_softc *ahc,
1596 			      struct ahc_devinfo *devinfo,
1597 			      struct ahc_initiator_tinfo *tinfo,
1598 			      int force, int paused)
1599 {
1600 	u_int targ_msg_req_orig;
1601 
1602 	targ_msg_req_orig = ahc->targ_msg_req;
1603 	if (tinfo->current.period != tinfo->goal.period
1604 	 || tinfo->current.width != tinfo->goal.width
1605 	 || tinfo->current.offset != tinfo->goal.offset
1606 	 || tinfo->current.ppr_options != tinfo->goal.ppr_options
1607 	 || (force
1608 	  && (tinfo->goal.period != 0
1609 	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1610 	   || tinfo->goal.ppr_options != 0)))
1611 		ahc->targ_msg_req |= devinfo->target_mask;
1612 	else
1613 		ahc->targ_msg_req &= ~devinfo->target_mask;
1614 
1615 	if (ahc->targ_msg_req != targ_msg_req_orig) {
1616 		/* Update the message request bit for this target */
1617 		if (!paused)
1618 			pause_sequencer(ahc);
1619 
1620 		ahc_outb(ahc, TARGET_MSG_REQUEST,
1621 			 ahc->targ_msg_req & 0xFF);
1622 		ahc_outb(ahc, TARGET_MSG_REQUEST + 1,
1623 			 (ahc->targ_msg_req >> 8) & 0xFF);
1624 
1625 		if (!paused)
1626 			unpause_sequencer(ahc);
1627 	}
1628 }
1629 
1630 /*
1631  * Update the user/goal/current tables of synchronous negotiation
1632  * parameters as well as, in the case of a current or active update,
1633  * any data structures on the host controller.  In the case of an
1634  * active update, the specified target is currently talking to us on
1635  * the bus, so the transfer parameter update must take effect
1636  * immediately.
1637  */
1638 void
1639 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1640 		 struct ahc_syncrate *syncrate, u_int period,
1641 		 u_int offset, u_int ppr_options, u_int type, int paused)
1642 {
1643 	struct	ahc_initiator_tinfo *tinfo;
1644 	struct	tmode_tstate *tstate;
1645 	u_int	old_period;
1646 	u_int	old_offset;
1647 	u_int	old_ppr;
1648 	int	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1649 
1650 	if (syncrate == NULL) {
1651 		period = 0;
1652 		offset = 0;
1653 	}
1654 
1655 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1656 				    devinfo->target, &tstate);
1657 	old_period = tinfo->current.period;
1658 	old_offset = tinfo->current.offset;
1659 	old_ppr	   = tinfo->current.ppr_options;
1660 
1661 	if ((type & AHC_TRANS_CUR) != 0
1662 	 && (old_period != period
1663 	  || old_offset != offset
1664 	  || old_ppr != ppr_options)) {
1665 		u_int	scsirate;
1666 
1667 		scsirate = tinfo->scsirate;
1668 		if ((ahc->features & AHC_ULTRA2) != 0) {
1669 
1670 			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1671 			if (syncrate != NULL) {
1672 				scsirate |= syncrate->sxfr_u2;
1673 				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1674 					scsirate |= ENABLE_CRC;
1675 				else
1676 					scsirate |= SINGLE_EDGE;
1677 			}
1678 		} else {
1679 
1680 			scsirate &= ~(SXFR|SOFS);
1681 			/*
1682 			 * Ensure Ultra mode is set properly for
1683 			 * this target.
1684 			 */
1685 			tstate->ultraenb &= ~devinfo->target_mask;
1686 			if (syncrate != NULL) {
1687 				if (syncrate->sxfr & ULTRA_SXFR) {
1688 					tstate->ultraenb |=
1689 						devinfo->target_mask;
1690 				}
1691 				scsirate |= syncrate->sxfr & SXFR;
1692 				scsirate |= offset & SOFS;
1693 			}
1694 			if (active) {
1695 				u_int sxfrctl0;
1696 
1697 				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1698 				sxfrctl0 &= ~FAST20;
1699 				if (tstate->ultraenb & devinfo->target_mask)
1700 					sxfrctl0 |= FAST20;
1701 				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1702 			}
1703 		}
1704 		if (active) {
1705 			ahc_outb(ahc, SCSIRATE, scsirate);
1706 			if ((ahc->features & AHC_ULTRA2) != 0)
1707 				ahc_outb(ahc, SCSIOFFSET, offset);
1708 		}
1709 
1710 		tinfo->scsirate = scsirate;
1711 		tinfo->current.period = period;
1712 		tinfo->current.offset = offset;
1713 		tinfo->current.ppr_options = ppr_options;
1714 
1715 		/* Update the syncrates in any pending scbs */
1716 		ahc_update_pending_syncrates(ahc);
1717 
1718 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1719 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1720 		if (bootverbose) {
1721 			if (offset != 0) {
1722 				printf("%s: target %d synchronous at %sMHz%s, "
1723 				       "offset = 0x%x\n", ahc_name(ahc),
1724 				       devinfo->target, syncrate->rate,
1725 				       (ppr_options & MSG_EXT_PPR_DT_REQ)
1726 				       ? " DT" : "", offset);
1727 			} else {
1728 				printf("%s: target %d using "
1729 				       "asynchronous transfers\n",
1730 				       ahc_name(ahc), devinfo->target);
1731 			}
1732 		}
1733 	}
1734 
1735 	if ((type & AHC_TRANS_GOAL) != 0) {
1736 		tinfo->goal.period = period;
1737 		tinfo->goal.offset = offset;
1738 		tinfo->goal.ppr_options = ppr_options;
1739 	}
1740 
1741 	if ((type & AHC_TRANS_USER) != 0) {
1742 		tinfo->user.period = period;
1743 		tinfo->user.offset = offset;
1744 		tinfo->user.ppr_options = ppr_options;
1745 	}
1746 
1747 	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1748 				      /*force*/FALSE,
1749 				      paused);
1750 }
1751 
1752 /*
1753  * Update the user/goal/current tables of wide negotiation
1754  * parameters as well as, in the case of a current or active update,
1755  * any data structures on the host controller.  In the case of an
1756  * active update, the specified target is currently talking to us on
1757  * the bus, so the transfer parameter update must take effect
1758  * immediately.
1759  */
1760 void
1761 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1762 	      u_int width, u_int type, int paused)
1763 {
1764 	struct ahc_initiator_tinfo *tinfo;
1765 	struct tmode_tstate *tstate;
1766 	u_int  oldwidth;
1767 	int    active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1768 
1769 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1770 				    devinfo->target, &tstate);
1771 	oldwidth = tinfo->current.width;
1772 
1773 	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1774 		u_int	scsirate;
1775 
1776 		scsirate =  tinfo->scsirate;
1777 		scsirate &= ~WIDEXFER;
1778 		if (width == MSG_EXT_WDTR_BUS_16_BIT)
1779 			scsirate |= WIDEXFER;
1780 
1781 		tinfo->scsirate = scsirate;
1782 
1783 		if (active)
1784 			ahc_outb(ahc, SCSIRATE, scsirate);
1785 
1786 		tinfo->current.width = width;
1787 
1788 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1789 			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1790 		if (bootverbose) {
1791 			printf("%s: target %d using %dbit transfers\n",
1792 			       ahc_name(ahc), devinfo->target,
1793 			       8 * (0x01 << width));
1794 		}
1795 	}
1796 	if ((type & AHC_TRANS_GOAL) != 0)
1797 		tinfo->goal.width = width;
1798 	if ((type & AHC_TRANS_USER) != 0)
1799 		tinfo->user.width = width;
1800 
1801 	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1802 				      /*force*/FALSE, paused);
1803 }
1804 
1805 /*
1806  * Update the current state of tagged queuing for a given target.
1807  */
1808 void
1809 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable)
1810 {
1811 	struct ahc_initiator_tinfo *tinfo;
1812 	struct tmode_tstate *tstate;
1813 	uint16_t orig_tagenable;
1814 
1815 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1816 				    devinfo->target, &tstate);
1817 
1818 	orig_tagenable = tstate->tagenable;
1819 	if (enable)
1820 		tstate->tagenable |= devinfo->target_mask;
1821 	else
1822 		tstate->tagenable &= ~devinfo->target_mask;
1823 
1824 	if (orig_tagenable != tstate->tagenable) {
1825 		ahc_platform_set_tags(ahc, devinfo, enable);
1826 		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1827 			       devinfo->lun, AC_TRANSFER_NEG);
1828 	}
1829 
1830 }
1831 
1832 /*
1833  * When the transfer settings for a connection change, update any
1834  * in-transit SCBs to contain the new data so the hardware will
1835  * be set correctly during future (re)selections.
1836  */
1837 static void
1838 ahc_update_pending_syncrates(struct ahc_softc *ahc)
1839 {
1840 	struct	scb *pending_scb;
1841 	int	pending_scb_count;
1842 	int	i;
1843 	u_int	saved_scbptr;
1844 
1845 	/*
1846 	 * Traverse the pending SCB list and ensure that all of the
1847 	 * SCBs there have the proper settings.
1848 	 */
1849 	pending_scb_count = 0;
1850 	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
1851 		struct ahc_devinfo devinfo;
1852 		struct hardware_scb *pending_hscb;
1853 		struct ahc_initiator_tinfo *tinfo;
1854 		struct tmode_tstate *tstate;
1855 
1856 		ahc_scb_devinfo(ahc, &devinfo, pending_scb);
1857 		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
1858 					    devinfo.our_scsiid,
1859 					    devinfo.target, &tstate);
1860 		pending_hscb = pending_scb->hscb;
1861 		pending_hscb->control &= ~ULTRAENB;
1862 		if ((tstate->ultraenb & devinfo.target_mask) != 0)
1863 			pending_hscb->control |= ULTRAENB;
1864 		pending_hscb->scsirate = tinfo->scsirate;
1865 		pending_hscb->scsioffset = tinfo->current.offset;
1866 		pending_scb_count++;
1867 	}
1868 
1869 	if (pending_scb_count == 0)
1870 		return;
1871 
1872 	saved_scbptr = ahc_inb(ahc, SCBPTR);
1873 	/* Ensure that the hscbs down on the card match the new information */
1874 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
1875 		struct	hardware_scb *pending_hscb;
1876 		u_int	control;
1877 		u_int	scb_tag;
1878 
1879 		ahc_outb(ahc, SCBPTR, i);
1880 		scb_tag = ahc_inb(ahc, SCB_TAG);
1881 		pending_scb = ahc_lookup_scb(ahc, scb_tag);
1882 		if (pending_scb == NULL)
1883 			continue;
1884 
1885 		pending_hscb = pending_scb->hscb;
1886 		control = ahc_inb(ahc, SCB_CONTROL);
1887 		control &= ~ULTRAENB;
1888 		if ((pending_hscb->control & ULTRAENB) != 0)
1889 			control |= ULTRAENB;
1890 		ahc_outb(ahc, SCB_CONTROL, control);
1891 		ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
1892 		ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
1893 	}
1894 	ahc_outb(ahc, SCBPTR, saved_scbptr);
1895 }
1896 
1897 /**************************** Pathing Information *****************************/
1898 static void
1899 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1900 {
1901 	u_int	saved_scsiid;
1902 	role_t	role;
1903 	int	our_id;
1904 
1905 	if (ahc_inb(ahc, SSTAT0) & TARGET)
1906 		role = ROLE_TARGET;
1907 	else
1908 		role = ROLE_INITIATOR;
1909 
1910 	if (role == ROLE_TARGET
1911 	 && (ahc->features & AHC_MULTI_TID) != 0
1912 	 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
1913 		/* We were selected, so pull our id from TARGIDIN */
1914 		our_id = ahc_inb(ahc, TARGIDIN) & OID;
1915 	} else if ((ahc->features & AHC_ULTRA2) != 0)
1916 		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
1917 	else
1918 		our_id = ahc_inb(ahc, SCSIID) & OID;
1919 
1920 	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1921 	ahc_compile_devinfo(devinfo,
1922 			    our_id,
1923 			    SCSIID_TARGET(ahc, saved_scsiid),
1924 			    ahc_inb(ahc, SAVED_LUN),
1925 			    SCSIID_CHANNEL(ahc, saved_scsiid),
1926 			    role);
1927 }
1928 
1929 void
1930 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
1931 		    u_int lun, char channel, role_t role)
1932 {
1933 	devinfo->our_scsiid = our_id;
1934 	devinfo->target = target;
1935 	devinfo->lun = lun;
1936 	devinfo->target_offset = target;
1937 	devinfo->channel = channel;
1938 	devinfo->role = role;
1939 	if (channel == 'B')
1940 		devinfo->target_offset += 8;
1941 	devinfo->target_mask = (0x01 << devinfo->target_offset);
1942 }
1943 
1944 static void
1945 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1946 		struct scb *scb)
1947 {
1948 	role_t	role;
1949 	int	our_id;
1950 
1951 	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
1952 	role = ROLE_INITIATOR;
1953 	if ((scb->hscb->control & TARGET_SCB) != 0)
1954 		role = ROLE_TARGET;
1955 	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
1956 			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
1957 }
1958 
1959 
1960 /************************ Message Phase Processing ****************************/
1961 /*
1962  * When an initiator transaction with the MK_MESSAGE flag either reconnects
1963  * or enters the initial message out phase, we are interrupted.  Fill our
1964  * outgoing message buffer with the appropriate message and beging handing
1965  * the message phase(s) manually.
1966  */
1967 static void
1968 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1969 			   struct scb *scb)
1970 {
1971 	/*
1972 	 * To facilitate adding multiple messages together,
1973 	 * each routine should increment the index and len
1974 	 * variables instead of setting them explicitly.
1975 	 */
1976 	ahc->msgout_index = 0;
1977 	ahc->msgout_len = 0;
1978 
1979 	if ((scb->flags & SCB_DEVICE_RESET) == 0
1980 	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
1981 		u_int identify_msg;
1982 
1983 		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
1984 		if ((scb->hscb->control & DISCENB) != 0)
1985 			identify_msg |= MSG_IDENTIFY_DISCFLAG;
1986 		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
1987 		ahc->msgout_len++;
1988 
1989 		if ((scb->hscb->control & TAG_ENB) != 0) {
1990 			ahc->msgout_buf[ahc->msgout_index++] =
1991 			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
1992 			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
1993 			ahc->msgout_len += 2;
1994 		}
1995 	}
1996 
1997 	if (scb->flags & SCB_DEVICE_RESET) {
1998 		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
1999 		ahc->msgout_len++;
2000 		ahc_print_path(ahc, scb);
2001 		printf("Bus Device Reset Message Sent\n");
2002 	} else if ((scb->flags & SCB_ABORT) != 0) {
2003 		if ((scb->hscb->control & TAG_ENB) != 0)
2004 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2005 		else
2006 			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2007 		ahc->msgout_len++;
2008 		ahc_print_path(ahc, scb);
2009 		printf("Abort Message Sent\n");
2010 	} else if ((ahc->targ_msg_req & devinfo->target_mask) != 0
2011 		|| (scb->flags & SCB_NEGOTIATE) != 0) {
2012 		ahc_build_transfer_msg(ahc, devinfo);
2013 	} else {
2014 		printf("ahc_intr: AWAITING_MSG for an SCB that "
2015 		       "does not have a waiting message\n");
2016 		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2017 		       devinfo->target_mask);
2018 		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2019 		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2020 		      ahc_inb(ahc, MSG_OUT), scb->flags);
2021 	}
2022 
2023 	/*
2024 	 * Clear the MK_MESSAGE flag from the SCB so we aren't
2025 	 * asked to send this message again.
2026 	 */
2027 	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2028 	ahc->msgout_index = 0;
2029 	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2030 }
2031 /*
2032  * Build an appropriate transfer negotiation message for the
2033  * currently active target.
2034  */
2035 static void
2036 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2037 {
2038 	/*
2039 	 * We need to initiate transfer negotiations.
2040 	 * If our current and goal settings are identical,
2041 	 * we want to renegotiate due to a check condition.
2042 	 */
2043 	struct	ahc_initiator_tinfo *tinfo;
2044 	struct	tmode_tstate *tstate;
2045 	struct	ahc_syncrate *rate;
2046 	int	dowide;
2047 	int	dosync;
2048 	int	doppr;
2049 	int	use_ppr;
2050 	u_int	period;
2051 	u_int	ppr_options;
2052 	u_int	offset;
2053 
2054 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2055 				    devinfo->target, &tstate);
2056 	dowide = tinfo->current.width != tinfo->goal.width;
2057 	dosync = tinfo->current.period != tinfo->goal.period;
2058 	doppr = tinfo->current.ppr_options != tinfo->goal.ppr_options;
2059 
2060 	if (!dowide && !dosync && !doppr) {
2061 		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2062 		dosync = tinfo->goal.period != 0;
2063 		doppr = tinfo->goal.ppr_options != 0;
2064 	}
2065 
2066 	if (!dowide && !dosync && !doppr) {
2067 		panic("ahc_intr: AWAITING_MSG for negotiation, "
2068 		      "but no negotiation needed\n");
2069 	}
2070 
2071 	use_ppr = (tinfo->current.transport_version >= 3) || doppr;
2072 	/* Target initiated PPR is not allowed in the SCSI spec */
2073 	if (devinfo->role == ROLE_TARGET)
2074 		use_ppr = 0;
2075 
2076 	/*
2077 	 * Both the PPR message and SDTR message require the
2078 	 * goal syncrate to be limited to what the target device
2079 	 * is capable of handling (based on whether an LVD->SE
2080 	 * expander is on the bus), so combine these two cases.
2081 	 * Regardless, guarantee that if we are using WDTR and SDTR
2082 	 * messages that WDTR comes first.
2083 	 */
2084 	if (use_ppr || (dosync && !dowide)) {
2085 
2086 		period = tinfo->goal.period;
2087 		ppr_options = tinfo->goal.ppr_options;
2088 		if (use_ppr == 0)
2089 			ppr_options = 0;
2090 		rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2091 					       &ppr_options, devinfo->role);
2092 		offset = tinfo->goal.offset;
2093 		ahc_validate_offset(ahc, tinfo, rate, &offset,
2094 				    use_ppr ? tinfo->goal.width
2095 					    : tinfo->current.width,
2096 				    devinfo->role);
2097 		if (use_ppr) {
2098 			ahc_construct_ppr(ahc, devinfo, period, offset,
2099 					  tinfo->goal.width, ppr_options);
2100 		} else {
2101 			ahc_construct_sdtr(ahc, devinfo, period, offset);
2102 		}
2103 	} else {
2104 		ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2105 	}
2106 }
2107 
2108 /*
2109  * Build a synchronous negotiation message in our message
2110  * buffer based on the input parameters.
2111  */
2112 static void
2113 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2114 		   u_int period, u_int offset)
2115 {
2116 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2117 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2118 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2119 	ahc->msgout_buf[ahc->msgout_index++] = period;
2120 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2121 	ahc->msgout_len += 5;
2122 	if (bootverbose) {
2123 		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2124 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2125 		       devinfo->lun, period, offset);
2126 	}
2127 }
2128 
2129 /*
2130  * Build a wide negotiateion message in our message
2131  * buffer based on the input parameters.
2132  */
2133 static void
2134 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2135 		   u_int bus_width)
2136 {
2137 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2138 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2139 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2140 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2141 	ahc->msgout_len += 4;
2142 	if (bootverbose) {
2143 		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2144 		       ahc_name(ahc), devinfo->channel, devinfo->target,
2145 		       devinfo->lun, bus_width);
2146 	}
2147 }
2148 
2149 /*
2150  * Build a parallel protocol request message in our message
2151  * buffer based on the input parameters.
2152  */
2153 static void
2154 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2155 		  u_int period, u_int offset, u_int bus_width,
2156 		  u_int ppr_options)
2157 {
2158 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2159 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2160 	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2161 	ahc->msgout_buf[ahc->msgout_index++] = period;
2162 	ahc->msgout_buf[ahc->msgout_index++] = 0;
2163 	ahc->msgout_buf[ahc->msgout_index++] = offset;
2164 	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2165 	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2166 	ahc->msgout_len += 8;
2167 	if (bootverbose) {
2168 		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2169 		       "offset %x, ppr_options %x\n", ahc_name(ahc),
2170 		       devinfo->channel, devinfo->target, devinfo->lun,
2171 		       bus_width, period, offset, ppr_options);
2172 	}
2173 }
2174 
2175 /*
2176  * Clear any active message state.
2177  */
2178 static void
2179 ahc_clear_msg_state(struct ahc_softc *ahc)
2180 {
2181 	ahc->msgout_len = 0;
2182 	ahc->msgin_index = 0;
2183 	ahc->msg_type = MSG_TYPE_NONE;
2184 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2185 }
2186 
2187 /*
2188  * Manual message loop handler.
2189  */
2190 static void
2191 ahc_handle_message_phase(struct ahc_softc *ahc)
2192 {
2193 	struct	ahc_devinfo devinfo;
2194 	u_int	bus_phase;
2195 	int	end_session;
2196 
2197 	ahc_fetch_devinfo(ahc, &devinfo);
2198 	end_session = FALSE;
2199 	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2200 
2201 reswitch:
2202 	switch (ahc->msg_type) {
2203 	case MSG_TYPE_INITIATOR_MSGOUT:
2204 	{
2205 		int lastbyte;
2206 		int phasemis;
2207 		int msgdone;
2208 
2209 		if (ahc->msgout_len == 0)
2210 			panic("HOST_MSG_LOOP interrupt with no active message");
2211 
2212 		phasemis = bus_phase != P_MESGOUT;
2213 		if (phasemis) {
2214 			if (bus_phase == P_MESGIN) {
2215 				/*
2216 				 * Change gears and see if
2217 				 * this messages is of interest to
2218 				 * us or should be passed back to
2219 				 * the sequencer.
2220 				 */
2221 				ahc_outb(ahc, CLRSINT1, CLRATNO);
2222 				ahc->send_msg_perror = FALSE;
2223 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2224 				ahc->msgin_index = 0;
2225 				goto reswitch;
2226 			}
2227 			end_session = TRUE;
2228 			break;
2229 		}
2230 
2231 		if (ahc->send_msg_perror) {
2232 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2233 			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2234 			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2235 			break;
2236 		}
2237 
2238 		msgdone	= ahc->msgout_index == ahc->msgout_len;
2239 		if (msgdone) {
2240 			/*
2241 			 * The target has requested a retry.
2242 			 * Re-assert ATN, reset our message index to
2243 			 * 0, and try again.
2244 			 */
2245 			ahc->msgout_index = 0;
2246 			ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
2247 		}
2248 
2249 		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2250 		if (lastbyte) {
2251 			/* Last byte is signified by dropping ATN */
2252 			ahc_outb(ahc, CLRSINT1, CLRATNO);
2253 		}
2254 
2255 		/*
2256 		 * Clear our interrupt status and present
2257 		 * the next byte on the bus.
2258 		 */
2259 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2260 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2261 		break;
2262 	}
2263 	case MSG_TYPE_INITIATOR_MSGIN:
2264 	{
2265 		int phasemis;
2266 		int message_done;
2267 
2268 		phasemis = bus_phase != P_MESGIN;
2269 
2270 		if (phasemis) {
2271 			ahc->msgin_index = 0;
2272 			if (bus_phase == P_MESGOUT
2273 			 && (ahc->send_msg_perror == TRUE
2274 			  || (ahc->msgout_len != 0
2275 			   && ahc->msgout_index == 0))) {
2276 				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2277 				goto reswitch;
2278 			}
2279 			end_session = TRUE;
2280 			break;
2281 		}
2282 
2283 		/* Pull the byte in without acking it */
2284 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2285 
2286 		message_done = ahc_parse_msg(ahc, &devinfo);
2287 
2288 		if (message_done) {
2289 			/*
2290 			 * Clear our incoming message buffer in case there
2291 			 * is another message following this one.
2292 			 */
2293 			ahc->msgin_index = 0;
2294 
2295 			/*
2296 			 * If this message illicited a response,
2297 			 * assert ATN so the target takes us to the
2298 			 * message out phase.
2299 			 */
2300 			if (ahc->msgout_len != 0)
2301 				ahc_outb(ahc, SCSISIGO,
2302 					 ahc_inb(ahc, SCSISIGO) | ATNO);
2303 		} else
2304 			ahc->msgin_index++;
2305 
2306 		/* Ack the byte */
2307 		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2308 		ahc_inb(ahc, SCSIDATL);
2309 		break;
2310 	}
2311 	case MSG_TYPE_TARGET_MSGIN:
2312 	{
2313 		int msgdone;
2314 		int msgout_request;
2315 
2316 		if (ahc->msgout_len == 0)
2317 			panic("Target MSGIN with no active message");
2318 
2319 		/*
2320 		 * If we interrupted a mesgout session, the initiator
2321 		 * will not know this until our first REQ.  So, we
2322 		 * only honor mesgout requests after we've sent our
2323 		 * first byte.
2324 		 */
2325 		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2326 		 && ahc->msgout_index > 0)
2327 			msgout_request = TRUE;
2328 		else
2329 			msgout_request = FALSE;
2330 
2331 		if (msgout_request) {
2332 
2333 			/*
2334 			 * Change gears and see if
2335 			 * this messages is of interest to
2336 			 * us or should be passed back to
2337 			 * the sequencer.
2338 			 */
2339 			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2340 			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2341 			ahc->msgin_index = 0;
2342 			/* Dummy read to REQ for first byte */
2343 			ahc_inb(ahc, SCSIDATL);
2344 			ahc_outb(ahc, SXFRCTL0,
2345 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2346 			break;
2347 		}
2348 
2349 		msgdone = ahc->msgout_index == ahc->msgout_len;
2350 		if (msgdone) {
2351 			ahc_outb(ahc, SXFRCTL0,
2352 				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2353 			end_session = TRUE;
2354 			break;
2355 		}
2356 
2357 		/*
2358 		 * Present the next byte on the bus.
2359 		 */
2360 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2361 		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2362 		break;
2363 	}
2364 	case MSG_TYPE_TARGET_MSGOUT:
2365 	{
2366 		int lastbyte;
2367 		int msgdone;
2368 
2369 		/*
2370 		 * The initiator signals that this is
2371 		 * the last byte by dropping ATN.
2372 		 */
2373 		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2374 
2375 		/*
2376 		 * Read the latched byte, but turn off SPIOEN first
2377 		 * so that we don't inadvertantly cause a REQ for the
2378 		 * next byte.
2379 		 */
2380 		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2381 		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2382 		msgdone = ahc_parse_msg(ahc, &devinfo);
2383 		if (msgdone == MSGLOOP_TERMINATED) {
2384 			/*
2385 			 * The message is *really* done in that it caused
2386 			 * us to go to bus free.  The sequencer has already
2387 			 * been reset at this point, so pull the ejection
2388 			 * handle.
2389 			 */
2390 			return;
2391 		}
2392 
2393 		ahc->msgin_index++;
2394 
2395 		/*
2396 		 * XXX Read spec about initiator dropping ATN too soon
2397 		 *     and use msgdone to detect it.
2398 		 */
2399 		if (msgdone == MSGLOOP_MSGCOMPLETE) {
2400 			ahc->msgin_index = 0;
2401 
2402 			/*
2403 			 * If this message illicited a response, transition
2404 			 * to the Message in phase and send it.
2405 			 */
2406 			if (ahc->msgout_len != 0) {
2407 				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2408 				ahc_outb(ahc, SXFRCTL0,
2409 					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2410 				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2411 				ahc->msgin_index = 0;
2412 				break;
2413 			}
2414 		}
2415 
2416 		if (lastbyte)
2417 			end_session = TRUE;
2418 		else {
2419 			/* Ask for the next byte. */
2420 			ahc_outb(ahc, SXFRCTL0,
2421 				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2422 		}
2423 
2424 		break;
2425 	}
2426 	default:
2427 		panic("Unknown REQINIT message type");
2428 	}
2429 
2430 	if (end_session) {
2431 		ahc_clear_msg_state(ahc);
2432 		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2433 	} else
2434 		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2435 }
2436 
2437 /*
2438  * See if we sent a particular extended message to the target.
2439  * If "full" is true, return true only if the target saw the full
2440  * message.  If "full" is false, return true if the target saw at
2441  * least the first byte of the message.
2442  */
2443 static int
2444 ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full)
2445 {
2446 	int found;
2447 	u_int index;
2448 
2449 	found = FALSE;
2450 	index = 0;
2451 
2452 	while (index < ahc->msgout_len) {
2453 		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2454 
2455 			/* Found a candidate */
2456 			if (ahc->msgout_buf[index+2] == msgtype) {
2457 				u_int end_index;
2458 
2459 				end_index = index + 1
2460 					  + ahc->msgout_buf[index + 1];
2461 				if (full) {
2462 					if (ahc->msgout_index > end_index)
2463 						found = TRUE;
2464 				} else if (ahc->msgout_index > index)
2465 					found = TRUE;
2466 			}
2467 			break;
2468 		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
2469 			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2470 
2471 			/* Skip tag type and tag id or residue param*/
2472 			index += 2;
2473 		} else {
2474 			/* Single byte message */
2475 			index++;
2476 		}
2477 	}
2478 	return (found);
2479 }
2480 
2481 /*
2482  * Wait for a complete incomming message, parse it, and respond accordingly.
2483  */
2484 static int
2485 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2486 {
2487 	struct	ahc_initiator_tinfo *tinfo;
2488 	struct	tmode_tstate *tstate;
2489 	int	reject;
2490 	int	done;
2491 	int	response;
2492 	u_int	targ_scsirate;
2493 
2494 	done = MSGLOOP_IN_PROG;
2495 	response = FALSE;
2496 	reject = FALSE;
2497 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2498 				    devinfo->target, &tstate);
2499 	targ_scsirate = tinfo->scsirate;
2500 
2501 	/*
2502 	 * Parse as much of the message as is availible,
2503 	 * rejecting it if we don't support it.  When
2504 	 * the entire message is availible and has been
2505 	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
2506 	 * that we have parsed an entire message.
2507 	 *
2508 	 * In the case of extended messages, we accept the length
2509 	 * byte outright and perform more checking once we know the
2510 	 * extended message type.
2511 	 */
2512 	switch (ahc->msgin_buf[0]) {
2513 	case MSG_MESSAGE_REJECT:
2514 		response = ahc_handle_msg_reject(ahc, devinfo);
2515 		/* FALLTHROUGH */
2516 	case MSG_NOOP:
2517 		done = MSGLOOP_MSGCOMPLETE;
2518 		break;
2519 	case MSG_EXTENDED:
2520 	{
2521 		/* Wait for enough of the message to begin validation */
2522 		if (ahc->msgin_index < 2)
2523 			break;
2524 		switch (ahc->msgin_buf[2]) {
2525 		case MSG_EXT_SDTR:
2526 		{
2527 			struct	 ahc_syncrate *syncrate;
2528 			u_int	 period;
2529 			u_int	 ppr_options;
2530 			u_int	 offset;
2531 			u_int	 saved_offset;
2532 
2533 			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
2534 				reject = TRUE;
2535 				break;
2536 			}
2537 
2538 			/*
2539 			 * Wait until we have both args before validating
2540 			 * and acting on this message.
2541 			 *
2542 			 * Add one to MSG_EXT_SDTR_LEN to account for
2543 			 * the extended message preamble.
2544 			 */
2545 			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
2546 				break;
2547 
2548 			period = ahc->msgin_buf[3];
2549 			ppr_options = 0;
2550 			saved_offset = offset = ahc->msgin_buf[4];
2551 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2552 							   &ppr_options,
2553 							   devinfo->role);
2554 			ahc_validate_offset(ahc, tinfo, syncrate, &offset,
2555 					    targ_scsirate & WIDEXFER,
2556 					    devinfo->role);
2557 			if (bootverbose) {
2558 				printf("(%s:%c:%d:%d): Received "
2559 				       "SDTR period %x, offset %x\n\t"
2560 				       "Filtered to period %x, offset %x\n",
2561 				       ahc_name(ahc), devinfo->channel,
2562 				       devinfo->target, devinfo->lun,
2563 				       ahc->msgin_buf[3], saved_offset,
2564 				       period, offset);
2565 			}
2566 			ahc_set_syncrate(ahc, devinfo,
2567 					 syncrate, period,
2568 					 offset, ppr_options,
2569 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2570 					 /*paused*/TRUE);
2571 
2572 			/*
2573 			 * See if we initiated Sync Negotiation
2574 			 * and didn't have to fall down to async
2575 			 * transfers.
2576 			 */
2577 			if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/TRUE)) {
2578 				/* We started it */
2579 				if (saved_offset != offset) {
2580 					/* Went too low - force async */
2581 					reject = TRUE;
2582 				}
2583 			} else {
2584 				/*
2585 				 * Send our own SDTR in reply
2586 				 */
2587 				if (bootverbose) {
2588 					printf("(%s:%c:%d:%d): Target "
2589 					       "Initiated SDTR\n",
2590 					       ahc_name(ahc), devinfo->channel,
2591 					       devinfo->target, devinfo->lun);
2592 				}
2593 				ahc->msgout_index = 0;
2594 				ahc->msgout_len = 0;
2595 				ahc_construct_sdtr(ahc, devinfo,
2596 						   period, offset);
2597 				ahc->msgout_index = 0;
2598 				response = TRUE;
2599 			}
2600 			done = MSGLOOP_MSGCOMPLETE;
2601 			break;
2602 		}
2603 		case MSG_EXT_WDTR:
2604 		{
2605 			u_int bus_width;
2606 			u_int saved_width;
2607 			u_int sending_reply;
2608 
2609 			sending_reply = FALSE;
2610 			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
2611 				reject = TRUE;
2612 				break;
2613 			}
2614 
2615 			/*
2616 			 * Wait until we have our arg before validating
2617 			 * and acting on this message.
2618 			 *
2619 			 * Add one to MSG_EXT_WDTR_LEN to account for
2620 			 * the extended message preamble.
2621 			 */
2622 			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
2623 				break;
2624 
2625 			bus_width = ahc->msgin_buf[3];
2626 			saved_width = bus_width;
2627 			ahc_validate_width(ahc, tinfo, &bus_width,
2628 					   devinfo->role);
2629 			if (bootverbose) {
2630 				printf("(%s:%c:%d:%d): Received WDTR "
2631 				       "%x filtered to %x\n",
2632 				       ahc_name(ahc), devinfo->channel,
2633 				       devinfo->target, devinfo->lun,
2634 				       saved_width, bus_width);
2635 			}
2636 
2637 			if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/TRUE)) {
2638 				/*
2639 				 * Don't send a WDTR back to the
2640 				 * target, since we asked first.
2641 				 * If the width went higher than our
2642 				 * request, reject it.
2643 				 */
2644 				if (saved_width > bus_width) {
2645 					reject = TRUE;
2646 					printf("(%s:%c:%d:%d): requested %dBit "
2647 					       "transfers.  Rejecting...\n",
2648 					       ahc_name(ahc), devinfo->channel,
2649 					       devinfo->target, devinfo->lun,
2650 					       8 * (0x01 << bus_width));
2651 					bus_width = 0;
2652 				}
2653 			} else {
2654 				/*
2655 				 * Send our own WDTR in reply
2656 				 */
2657 				if (bootverbose) {
2658 					printf("(%s:%c:%d:%d): Target "
2659 					       "Initiated WDTR\n",
2660 					       ahc_name(ahc), devinfo->channel,
2661 					       devinfo->target, devinfo->lun);
2662 				}
2663 				ahc->msgout_index = 0;
2664 				ahc->msgout_len = 0;
2665 				ahc_construct_wdtr(ahc, devinfo, bus_width);
2666 				ahc->msgout_index = 0;
2667 				response = TRUE;
2668 				sending_reply = TRUE;
2669 			}
2670 			ahc_set_width(ahc, devinfo, bus_width,
2671 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2672 				      /*paused*/TRUE);
2673 			/* After a wide message, we are async */
2674 			ahc_set_syncrate(ahc, devinfo,
2675 					 /*syncrate*/NULL, /*period*/0,
2676 					 /*offset*/0, /*ppr_options*/0,
2677 					 AHC_TRANS_ACTIVE, /*paused*/TRUE);
2678 			if (sending_reply == FALSE && reject == FALSE) {
2679 
2680 				if (tinfo->goal.period) {
2681 					ahc->msgout_index = 0;
2682 					ahc->msgout_len = 0;
2683 					ahc_build_transfer_msg(ahc, devinfo);
2684 					ahc->msgout_index = 0;
2685 					response = TRUE;
2686 				}
2687 			}
2688 			done = MSGLOOP_MSGCOMPLETE;
2689 			break;
2690 		}
2691 		case MSG_EXT_PPR:
2692 		{
2693 			struct	ahc_syncrate *syncrate;
2694 			u_int	period;
2695 			u_int	offset;
2696 			u_int	bus_width;
2697 			u_int	ppr_options;
2698 			u_int	saved_width;
2699 			u_int	saved_offset;
2700 			u_int	saved_ppr_options;
2701 
2702 			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
2703 				reject = TRUE;
2704 				break;
2705 			}
2706 
2707 			/*
2708 			 * Wait until we have all args before validating
2709 			 * and acting on this message.
2710 			 *
2711 			 * Add one to MSG_EXT_PPR_LEN to account for
2712 			 * the extended message preamble.
2713 			 */
2714 			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
2715 				break;
2716 
2717 			period = ahc->msgin_buf[3];
2718 			offset = ahc->msgin_buf[5];
2719 			bus_width = ahc->msgin_buf[6];
2720 			saved_width = bus_width;
2721 			ppr_options = ahc->msgin_buf[7];
2722 			/*
2723 			 * According to the spec, a DT only
2724 			 * period factor with no DT option
2725 			 * set implies async.
2726 			 */
2727 			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2728 			 && period == 9)
2729 				offset = 0;
2730 			saved_ppr_options = ppr_options;
2731 			saved_offset = offset;
2732 
2733 			/*
2734 			 * Mask out any options we don't support
2735 			 * on any controller.  Transfer options are
2736 			 * only available if we are negotiating wide.
2737 			 */
2738 			ppr_options &= MSG_EXT_PPR_DT_REQ;
2739 			if (bus_width == 0)
2740 				ppr_options = 0;
2741 
2742 			ahc_validate_width(ahc, tinfo, &bus_width,
2743 					   devinfo->role);
2744 			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2745 							   &ppr_options,
2746 							   devinfo->role);
2747 			ahc_validate_offset(ahc, tinfo, syncrate,
2748 					    &offset, bus_width,
2749 					    devinfo->role);
2750 
2751 			if (ahc_sent_msg(ahc, MSG_EXT_PPR, /*full*/TRUE)) {
2752 				/*
2753 				 * If we are unable to do any of the
2754 				 * requested options (we went too low),
2755 				 * then we'll have to reject the message.
2756 				 */
2757 				if (saved_width > bus_width
2758 				 || saved_offset != offset
2759 				 || saved_ppr_options != ppr_options) {
2760 					reject = TRUE;
2761 					period = 0;
2762 					offset = 0;
2763 					bus_width = 0;
2764 					ppr_options = 0;
2765 					syncrate = NULL;
2766 				}
2767 			} else {
2768 				if (devinfo->role != ROLE_TARGET)
2769 					printf("(%s:%c:%d:%d): Target "
2770 					       "Initiated PPR\n",
2771 					       ahc_name(ahc), devinfo->channel,
2772 					       devinfo->target, devinfo->lun);
2773 				else
2774 					printf("(%s:%c:%d:%d): Initiator "
2775 					       "Initiated PPR\n",
2776 					       ahc_name(ahc), devinfo->channel,
2777 					       devinfo->target, devinfo->lun);
2778 				ahc->msgout_index = 0;
2779 				ahc->msgout_len = 0;
2780 				ahc_construct_ppr(ahc, devinfo, period, offset,
2781 						  bus_width, ppr_options);
2782 				ahc->msgout_index = 0;
2783 				response = TRUE;
2784 			}
2785 			if (bootverbose) {
2786 				printf("(%s:%c:%d:%d): Received PPR width %x, "
2787 				       "period %x, offset %x,options %x\n"
2788 				       "\tFiltered to width %x, period %x, "
2789 				       "offset %x, options %x\n",
2790 				       ahc_name(ahc), devinfo->channel,
2791 				       devinfo->target, devinfo->lun,
2792 				       ahc->msgin_buf[3], saved_width,
2793 				       saved_offset, saved_ppr_options,
2794 				       bus_width, period, offset, ppr_options);
2795 			}
2796 			ahc_set_width(ahc, devinfo, bus_width,
2797 				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2798 				      /*paused*/TRUE);
2799 			ahc_set_syncrate(ahc, devinfo,
2800 					 syncrate, period,
2801 					 offset, ppr_options,
2802 					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2803 					 /*paused*/TRUE);
2804 			done = MSGLOOP_MSGCOMPLETE;
2805 			break;
2806 		}
2807 		default:
2808 			/* Unknown extended message.  Reject it. */
2809 			reject = TRUE;
2810 			break;
2811 		}
2812 		break;
2813 	}
2814 	case MSG_BUS_DEV_RESET:
2815 		ahc_handle_devreset(ahc, devinfo,
2816 				    CAM_BDR_SENT,
2817 				    "Bus Device Reset Received",
2818 				    /*verbose_level*/0);
2819 		restart_sequencer(ahc);
2820 		done = MSGLOOP_TERMINATED;
2821 		break;
2822 	case MSG_ABORT_TAG:
2823 	case MSG_ABORT:
2824 	case MSG_CLEAR_QUEUE:
2825 #ifdef AHC_TARGET_MODE
2826 		/* Target mode messages */
2827 		if (devinfo->role != ROLE_TARGET) {
2828 			reject = TRUE;
2829 			break;
2830 		}
2831 		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
2832 			       devinfo->lun,
2833 			       ahc->msgin_buf[0] == MSG_ABORT_TAG
2834 						  ? SCB_LIST_NULL
2835 						  : ahc_inb(ahc, INITIATOR_TAG),
2836 			       ROLE_TARGET, CAM_REQ_ABORTED);
2837 
2838 		tstate = ahc->enabled_targets[devinfo->our_scsiid];
2839 		if (tstate != NULL) {
2840 			struct tmode_lstate* lstate;
2841 
2842 			lstate = tstate->enabled_luns[devinfo->lun];
2843 			if (lstate != NULL) {
2844 				ahc_queue_lstate_event(ahc, lstate,
2845 						       devinfo->our_scsiid,
2846 						       ahc->msgin_buf[0],
2847 						       /*arg*/0);
2848 				ahc_send_lstate_events(ahc, lstate);
2849 			}
2850 		}
2851 		done = MSGLOOP_MSGCOMPLETE;
2852 		break;
2853 #endif
2854 	case MSG_TERM_IO_PROC:
2855 	default:
2856 		reject = TRUE;
2857 		break;
2858 	}
2859 
2860 	if (reject) {
2861 		/*
2862 		 * Setup to reject the message.
2863 		 */
2864 		ahc->msgout_index = 0;
2865 		ahc->msgout_len = 1;
2866 		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
2867 		done = MSGLOOP_MSGCOMPLETE;
2868 		response = TRUE;
2869 	}
2870 
2871 	if (done != MSGLOOP_IN_PROG && !response)
2872 		/* Clear the outgoing message buffer */
2873 		ahc->msgout_len = 0;
2874 
2875 	return (done);
2876 }
2877 
2878 /*
2879  * Process a message reject message.
2880  */
2881 static int
2882 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2883 {
2884 	/*
2885 	 * What we care about here is if we had an
2886 	 * outstanding SDTR or WDTR message for this
2887 	 * target.  If we did, this is a signal that
2888 	 * the target is refusing negotiation.
2889 	 */
2890 	struct scb *scb;
2891 	struct ahc_initiator_tinfo *tinfo;
2892 	struct tmode_tstate *tstate;
2893 	u_int scb_index;
2894 	u_int last_msg;
2895 	int   response = 0;
2896 
2897 	scb_index = ahc_inb(ahc, SCB_TAG);
2898 	scb = ahc_lookup_scb(ahc, scb_index);
2899 	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
2900 				    devinfo->our_scsiid,
2901 				    devinfo->target, &tstate);
2902 	/* Might be necessary */
2903 	last_msg = ahc_inb(ahc, LAST_MSG);
2904 
2905 	if (ahc_sent_msg(ahc, MSG_EXT_PPR, /*full*/FALSE)) {
2906 		/*
2907 		 * Target does not support the PPR message.
2908 		 * Attempt to negotiate SPI-2 style.
2909 		 */
2910 		if (bootverbose) {
2911 			printf("(%s:%c:%d:%d): PPR Rejected. "
2912 			       "Trying WDTR/SDTR\n",
2913 			       ahc_name(ahc), devinfo->channel,
2914 			       devinfo->target, devinfo->lun);
2915 		}
2916 		tinfo->goal.ppr_options = 0;
2917 		tinfo->current.transport_version = 2;
2918 		tinfo->goal.transport_version = 2;
2919 		ahc->msgout_index = 0;
2920 		ahc->msgout_len = 0;
2921 		ahc_build_transfer_msg(ahc, devinfo);
2922 		ahc->msgout_index = 0;
2923 		response = 1;
2924 	} else if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/FALSE)) {
2925 
2926 		/* note 8bit xfers */
2927 		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
2928 		       "8bit transfers\n", ahc_name(ahc),
2929 		       devinfo->channel, devinfo->target, devinfo->lun);
2930 		ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
2931 			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2932 			      /*paused*/TRUE);
2933 		/*
2934 		 * No need to clear the sync rate.  If the target
2935 		 * did not accept the command, our syncrate is
2936 		 * unaffected.  If the target started the negotiation,
2937 		 * but rejected our response, we already cleared the
2938 		 * sync rate before sending our WDTR.
2939 		 */
2940 		if (tinfo->goal.period) {
2941 
2942 			/* Start the sync negotiation */
2943 			ahc->msgout_index = 0;
2944 			ahc->msgout_len = 0;
2945 			ahc_build_transfer_msg(ahc, devinfo);
2946 			ahc->msgout_index = 0;
2947 			response = 1;
2948 		}
2949 	} else if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/FALSE)) {
2950 		/* note asynch xfers and clear flag */
2951 		ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
2952 				 /*offset*/0, /*ppr_options*/0,
2953 				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2954 				 /*paused*/TRUE);
2955 		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
2956 		       "Using asynchronous transfers\n",
2957 		       ahc_name(ahc), devinfo->channel,
2958 		       devinfo->target, devinfo->lun);
2959 	} else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) {
2960 
2961 		printf("(%s:%c:%d:%d): refuses tagged commands.  Performing "
2962 		       "non-tagged I/O\n", ahc_name(ahc),
2963 		       devinfo->channel, devinfo->target, devinfo->lun);
2964 		ahc_set_tags(ahc, devinfo, FALSE);
2965 
2966 		/*
2967 		 * Resend the identify for this CCB as the target
2968 		 * may believe that the selection is invalid otherwise.
2969 		 */
2970 		ahc_outb(ahc, SCB_CONTROL,
2971 			 ahc_inb(ahc, SCB_CONTROL) & ~MSG_SIMPLE_Q_TAG);
2972 	 	scb->hscb->control &= ~MSG_SIMPLE_Q_TAG;
2973 		ahc_set_transaction_tag(scb, /*enabled*/FALSE,
2974 					/*type*/MSG_SIMPLE_Q_TAG);
2975 		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
2976 		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
2977 
2978 		/*
2979 		 * This transaction is now at the head of
2980 		 * the untagged queue for this target.
2981 		 */
2982 		if ((ahc->features & AHC_SCB_BTT) == 0) {
2983 			struct scb_tailq *untagged_q;
2984 
2985 			untagged_q = &(ahc->untagged_queues[devinfo->target]);
2986 			TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
2987 			scb->flags |= SCB_UNTAGGEDQ;
2988 		}
2989 		ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
2990 			     scb->hscb->tag);
2991 
2992 		/*
2993 		 * Requeue all tagged commands for this target
2994 		 * currently in our posession so they can be
2995 		 * converted to untagged commands.
2996 		 */
2997 		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
2998 				   SCB_GET_CHANNEL(ahc, scb),
2999 				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3000 				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3001 				   SEARCH_COMPLETE);
3002 	} else {
3003 		/*
3004 		 * Otherwise, we ignore it.
3005 		 */
3006 		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3007 		       ahc_name(ahc), devinfo->channel, devinfo->target,
3008 		       last_msg);
3009 	}
3010 	return (response);
3011 }
3012 
3013 /*
3014  * Process an ingnore wide residue message.
3015  */
3016 static void
3017 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3018 {
3019 	u_int scb_index;
3020 	struct scb *scb;
3021 
3022 	scb_index = ahc_inb(ahc, SCB_TAG);
3023 	scb = ahc_lookup_scb(ahc, scb_index);
3024 	/*
3025 	 * XXX Actually check data direction in the sequencer?
3026 	 * Perhaps add datadir to some spare bits in the hscb?
3027 	 */
3028 	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3029 	 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3030 		/*
3031 		 * Ignore the message if we haven't
3032 		 * seen an appropriate data phase yet.
3033 		 */
3034 	} else {
3035 		/*
3036 		 * If the residual occurred on the last
3037 		 * transfer and the transfer request was
3038 		 * expected to end on an odd count, do
3039 		 * nothing.  Otherwise, subtract a byte
3040 		 * and update the residual count accordingly.
3041 		 */
3042 		uint32_t sgptr;
3043 
3044 		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3045 		if ((sgptr & SG_LIST_NULL) != 0
3046 		 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
3047 			/*
3048 			 * If the residual occurred on the last
3049 			 * transfer and the transfer request was
3050 			 * expected to end on an odd count, do
3051 			 * nothing.
3052 			 */
3053 		} else {
3054 			struct ahc_dma_seg *sg;
3055 			uint32_t data_cnt;
3056 			uint32_t data_addr;
3057 
3058 			/* Pull in the rest of the sgptr */
3059 			sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3060 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3061 			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
3062 			sgptr &= SG_PTR_MASK;
3063 			data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
3064 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
3065 				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
3066 
3067 			data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
3068 				  | (ahc_inb(ahc, SHADDR + 2) << 16)
3069 				  | (ahc_inb(ahc, SHADDR + 1) << 8)
3070 				  | (ahc_inb(ahc, SHADDR));
3071 
3072 			data_cnt += 1;
3073 			data_addr -= 1;
3074 
3075 			sg = ahc_sg_bus_to_virt(scb, sgptr);
3076 			/*
3077 			 * The residual sg ptr points to the next S/G
3078 			 * to load so we must go back one.
3079 			 */
3080 			sg--;
3081 			if (sg != scb->sg_list
3082 			 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) {
3083 
3084 				sg--;
3085 				data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG);
3086 				data_addr = sg->addr
3087 					  + (sg->len & AHC_SG_LEN_MASK) - 1;
3088 
3089 				/*
3090 				 * Increment sg so it points to the
3091 				 * "next" sg.
3092 				 */
3093 				sg++;
3094 				sgptr = ahc_sg_virt_to_bus(scb, sg);
3095 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
3096 					 sgptr >> 24);
3097 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
3098 					 sgptr >> 16);
3099 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
3100 					 sgptr >> 8);
3101 				ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3102 			}
3103 
3104 /* XXX What about high address byte??? */
3105 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
3106 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
3107 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
3108 			ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3109 
3110 /* XXX Perhaps better to just keep the saved address in sram */
3111 			if ((ahc->features & AHC_ULTRA2) != 0) {
3112 				ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3113 				ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3114 				ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3115 				ahc_outb(ahc, HADDR, data_addr);
3116 				ahc_outb(ahc, DFCNTRL, PRELOADEN);
3117 				ahc_outb(ahc, SXFRCTL0,
3118 					 ahc_inb(ahc, SXFRCTL0) | CLRCHN);
3119 			} else {
3120 				ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3121 				ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3122 				ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3123 				ahc_outb(ahc, HADDR, data_addr);
3124 			}
3125 		}
3126 	}
3127 }
3128 
3129 /*
3130  * Handle the effects of issuing a bus device reset message.
3131  */
3132 static void
3133 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3134 		    cam_status status, char *message, int verbose_level)
3135 {
3136 #ifdef AHC_TARGET_MODE
3137 	struct tmode_tstate* tstate;
3138 	u_int lun;
3139 #endif
3140 	int found;
3141 
3142 	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3143 			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3144 			       status);
3145 
3146 #ifdef AHC_TARGET_MODE
3147 	/*
3148 	 * Send an immediate notify ccb to all target mord peripheral
3149 	 * drivers affected by this action.
3150 	 */
3151 	tstate = ahc->enabled_targets[devinfo->our_scsiid];
3152 	if (tstate != NULL) {
3153 		for (lun = 0; lun <= 7; lun++) {
3154 			struct tmode_lstate* lstate;
3155 
3156 			lstate = tstate->enabled_luns[lun];
3157 			if (lstate == NULL)
3158 				continue;
3159 
3160 			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3161 					       MSG_BUS_DEV_RESET, /*arg*/0);
3162 			ahc_send_lstate_events(ahc, lstate);
3163 		}
3164 	}
3165 #endif
3166 
3167 	/*
3168 	 * Go back to async/narrow transfers and renegotiate.
3169 	 */
3170 	ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3171 		      AHC_TRANS_CUR, /*paused*/TRUE);
3172 	ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3173 			 /*period*/0, /*offset*/0, /*ppr_options*/0,
3174 			 AHC_TRANS_CUR, /*paused*/TRUE);
3175 
3176 	ahc_send_async(ahc, devinfo->channel, devinfo->target,
3177 		       CAM_LUN_WILDCARD, AC_SENT_BDR);
3178 
3179 	if (message != NULL
3180 	 && (verbose_level <= bootverbose))
3181 		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3182 		       message, devinfo->channel, devinfo->target, found);
3183 }
3184 
3185 #ifdef AHC_TARGET_MODE
3186 void
3187 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3188 {
3189 	/*
3190 	 * To facilitate adding multiple messages together,
3191 	 * each routine should increment the index and len
3192 	 * variables instead of setting them explicitly.
3193 	 */
3194 	ahc->msgout_index = 0;
3195 	ahc->msgout_len = 0;
3196 
3197 	if ((ahc->targ_msg_req & devinfo->target_mask) != 0)
3198 		ahc_build_transfer_msg(ahc, devinfo);
3199 	else
3200 		panic("ahc_intr: AWAITING target message with no message");
3201 
3202 	ahc->msgout_index = 0;
3203 	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3204 }
3205 #endif
3206 /**************************** Initialization **********************************/
3207 /*
3208  * Allocate a controller structure for a new device
3209  * and perform initial initializion.
3210  */
3211 struct ahc_softc *
3212 ahc_alloc(void *platform_arg, char *name)
3213 {
3214 	struct  ahc_softc *ahc;
3215 	int	i;
3216 
3217 	ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3218 	if (!ahc) {
3219 		printf("aic7xxx: cannot malloc softc!\n");
3220 		free(name, M_DEVBUF);
3221 		return NULL;
3222 	}
3223 	memset(ahc, 0, sizeof(*ahc));
3224 	LIST_INIT(&ahc->pending_scbs);
3225 	/* We don't know or unit number until the OSM sets it */
3226 	ahc->name = name;
3227 	for (i = 0; i < 16; i++)
3228 		TAILQ_INIT(&ahc->untagged_queues[i]);
3229 	if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3230 		ahc_free(ahc);
3231 		ahc = NULL;
3232 	}
3233 	return (ahc);
3234 }
3235 
3236 int
3237 ahc_softc_init(struct ahc_softc *ahc, struct ahc_probe_config *config)
3238 {
3239 
3240 	ahc->chip = config->chip;
3241 	ahc->features = config->features;
3242 	ahc->bugs = config->bugs;
3243 	ahc->flags = config->flags;
3244 	ahc->channel = config->channel;
3245 	ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN;
3246 	ahc->description = config->description;
3247 	/* The IRQMS bit is only valid on VL and EISA chips */
3248 	if ((ahc->chip & AHC_PCI) != 0)
3249 		ahc->unpause &= ~IRQMS;
3250 	ahc->pause = ahc->unpause | PAUSE;
3251 	/* XXX The shared scb data stuff should be depricated */
3252 	if (ahc->scb_data == NULL) {
3253 		ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3254 				       M_DEVBUF, M_NOWAIT);
3255 		if (ahc->scb_data == NULL)
3256 			return (ENOMEM);
3257 		memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3258 	}
3259 
3260 	return (0);
3261 }
3262 
3263 void
3264 ahc_softc_insert(struct ahc_softc *ahc)
3265 {
3266 	struct ahc_softc *list_ahc;
3267 
3268 #ifdef AHC_SUPPORT_PCI
3269 	/*
3270 	 * Second Function PCI devices need to inherit some
3271 	 * settings from function 0.  We assume that function 0
3272 	 * will always be found prior to function 1.
3273 	 */
3274 	if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3275 	 && ahc_get_pci_function(ahc->dev_softc) == 1) {
3276 		TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3277 			ahc_dev_softc_t list_pci;
3278 			ahc_dev_softc_t pci;
3279 
3280 			list_pci = list_ahc->dev_softc;
3281 			pci = ahc->dev_softc;
3282 			if (ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)
3283 			 && ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
3284 			 && ahc_get_pci_function(list_pci) == 0) {
3285 				ahc->flags &= ~AHC_BIOS_ENABLED;
3286 				ahc->flags |=
3287 				    list_ahc->flags & AHC_BIOS_ENABLED;
3288 				ahc->flags &= ~AHC_CHANNEL_B_PRIMARY;
3289 				ahc->flags |=
3290 				    list_ahc->flags & AHC_CHANNEL_B_PRIMARY;
3291 				break;
3292 			}
3293 		}
3294 	}
3295 #endif
3296 
3297 	/*
3298 	 * Insertion sort into our list of softcs.
3299 	 */
3300 	list_ahc = TAILQ_FIRST(&ahc_tailq);
3301 	while (list_ahc != NULL
3302 	    && ahc_softc_comp(list_ahc, ahc) <= 0)
3303 		list_ahc = TAILQ_NEXT(list_ahc, links);
3304 	if (list_ahc != NULL)
3305 		TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3306 	else
3307 		TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3308 	ahc->init_level++;
3309 }
3310 
3311 void
3312 ahc_set_unit(struct ahc_softc *ahc, int unit)
3313 {
3314 	ahc->unit = unit;
3315 }
3316 
3317 void
3318 ahc_set_name(struct ahc_softc *ahc, char *name)
3319 {
3320 	if (ahc->name != NULL)
3321 		free(ahc->name, M_DEVBUF);
3322 	ahc->name = name;
3323 }
3324 
3325 void
3326 ahc_free(struct ahc_softc *ahc)
3327 {
3328 	ahc_fini_scbdata(ahc);
3329 	switch (ahc->init_level) {
3330 	case 4:
3331 		ahc_shutdown(ahc);
3332 		TAILQ_REMOVE(&ahc_tailq, ahc, links);
3333 		/* FALLTHROUGH */
3334 	case 3:
3335 		ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3336 				  ahc->shared_data_dmamap);
3337 		/* FALLTHROUGH */
3338 	case 2:
3339 		ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3340 				ahc->shared_data_dmamap);
3341 		ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3342 				   ahc->shared_data_dmamap);
3343 		/* FALLTHROUGH */
3344 	case 1:
3345 #ifndef __linux__
3346 		ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3347 #endif
3348 		break;
3349 	}
3350 
3351 	ahc_platform_free(ahc);
3352 #if XXX
3353 	for () {
3354 		ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id,
3355 				char channel, int force);
3356 	}
3357 #endif
3358 	if (ahc->name != NULL)
3359 		free(ahc->name, M_DEVBUF);
3360 	free(ahc, M_DEVBUF);
3361 	return;
3362 }
3363 
3364 void
3365 ahc_shutdown(void *arg)
3366 {
3367 	struct	ahc_softc *ahc;
3368 	int	i;
3369 
3370 	ahc = (struct ahc_softc *)arg;
3371 
3372 	/* This will reset most registers to 0, but not all */
3373 	ahc_reset(ahc);
3374 	ahc_outb(ahc, SCSISEQ, 0);
3375 	ahc_outb(ahc, SXFRCTL0, 0);
3376 	ahc_outb(ahc, DSPCISTATUS, 0);
3377 
3378 	for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++)
3379 		ahc_outb(ahc, i, 0);
3380 }
3381 
3382 /*
3383  * Reset the controller and record some information about it
3384  * that is only availabel just after a reset.
3385  */
3386 int
3387 ahc_reset(struct ahc_softc *ahc)
3388 {
3389 	u_int	sblkctl;
3390 	u_int	sxfrctl1_a, sxfrctl1_b;
3391 	int	wait;
3392 
3393 	/*
3394 	 * Preserve the value of the SXFRCTL1 register for all channels.
3395 	 * It contains settings that affect termination and we don't want
3396 	 * to disturb the integrity of the bus.
3397 	 */
3398 	pause_sequencer(ahc);
3399 	sxfrctl1_b = 0;
3400 	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
3401 		u_int sblkctl;
3402 
3403 		/*
3404 		 * Save channel B's settings in case this chip
3405 		 * is setup for TWIN channel operation.
3406 		 */
3407 		sblkctl = ahc_inb(ahc, SBLKCTL);
3408 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3409 		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
3410 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3411 	}
3412 	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
3413 
3414 	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
3415 
3416 	/*
3417 	 * Ensure that the reset has finished
3418 	 */
3419 	wait = 1000;
3420 	do {
3421 		ahc_delay(1000);
3422 	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
3423 
3424 	if (wait == 0) {
3425 		printf("%s: WARNING - Failed chip reset!  "
3426 		       "Trying to initialize anyway.\n", ahc_name(ahc));
3427 		ahc_outb(ahc, HCNTRL, ahc->pause);
3428 	}
3429 
3430 	/* Determine channel configuration */
3431 	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
3432 	/* No Twin Channel PCI cards */
3433 	if ((ahc->chip & AHC_PCI) != 0)
3434 		sblkctl &= ~SELBUSB;
3435 	switch (sblkctl) {
3436 	case 0:
3437 		/* Single Narrow Channel */
3438 		break;
3439 	case 2:
3440 		/* Wide Channel */
3441 		ahc->features |= AHC_WIDE;
3442 		break;
3443 	case 8:
3444 		/* Twin Channel */
3445 		ahc->features |= AHC_TWIN;
3446 		break;
3447 	default:
3448 		printf(" Unsupported adapter type.  Ignoring\n");
3449 		return(-1);
3450 	}
3451 
3452 	/*
3453 	 * Reload sxfrctl1.
3454 	 *
3455 	 * We must always initialize STPWEN to 1 before we
3456 	 * restore the saved values.  STPWEN is initialized
3457 	 * to a tri-state condition which can only be cleared
3458 	 * by turning it on.
3459 	 */
3460 	if ((ahc->features & AHC_TWIN) != 0) {
3461 		u_int sblkctl;
3462 
3463 		sblkctl = ahc_inb(ahc, SBLKCTL);
3464 		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3465 		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
3466 		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3467 	}
3468 	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
3469 
3470 #ifdef AHC_DUMP_SEQ
3471 	if (ahc->init_level == 0)
3472 		ahc_dumpseq(ahc);
3473 #endif
3474 
3475 	return (0);
3476 }
3477 
3478 /*
3479  * Determine the number of SCBs available on the controller
3480  */
3481 int
3482 ahc_probe_scbs(struct ahc_softc *ahc) {
3483 	int i;
3484 
3485 	for (i = 0; i < AHC_SCB_MAX; i++) {
3486 		ahc_outb(ahc, SCBPTR, i);
3487 		ahc_outb(ahc, SCB_BASE, i);
3488 		if (ahc_inb(ahc, SCB_BASE) != i)
3489 			break;
3490 		ahc_outb(ahc, SCBPTR, 0);
3491 		if (ahc_inb(ahc, SCB_BASE) != 0)
3492 			break;
3493 	}
3494 	return (i);
3495 }
3496 
3497 void
3498 ahc_init_probe_config(struct ahc_probe_config *probe_config)
3499 {
3500 	probe_config->description = NULL;
3501 	probe_config->channel = 'A';
3502 	probe_config->channel_b = 'B';
3503 	probe_config->chip = AHC_NONE;
3504 	probe_config->features = AHC_FENONE;
3505 	probe_config->bugs = AHC_BUGNONE;
3506 	probe_config->flags = AHC_FNONE;
3507 }
3508 
3509 static void
3510 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3511 {
3512 	bus_addr_t *baddr;
3513 
3514 	baddr = (bus_addr_t *)arg;
3515 	*baddr = segs->ds_addr;
3516 }
3517 
3518 static int
3519 ahc_init_scbdata(struct ahc_softc *ahc)
3520 {
3521 	struct scb_data *scb_data;
3522 	int i;
3523 
3524 	scb_data = ahc->scb_data;
3525 	SLIST_INIT(&scb_data->free_scbs);
3526 	SLIST_INIT(&scb_data->sg_maps);
3527 
3528 	/* Allocate SCB resources */
3529 	scb_data->scbarray =
3530 	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX,
3531 				 M_DEVBUF, M_NOWAIT);
3532 	if (scb_data->scbarray == NULL)
3533 		return (ENOMEM);
3534 	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX);
3535 
3536 	/* Determine the number of hardware SCBs and initialize them */
3537 
3538 	scb_data->maxhscbs = ahc_probe_scbs(ahc);
3539 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
3540 		/* SCB 0 heads the free list */
3541 		ahc_outb(ahc, FREE_SCBH, 0);
3542 	} else {
3543 		ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
3544 	}
3545 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
3546 		ahc_outb(ahc, SCBPTR, i);
3547 
3548 		/* Clear the control byte. */
3549 		ahc_outb(ahc, SCB_CONTROL, 0);
3550 
3551 		/* Set the next pointer */
3552 		if ((ahc->flags & AHC_PAGESCBS) != 0)
3553 			ahc_outb(ahc, SCB_NEXT, i+1);
3554 		else
3555 			ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3556 
3557 		/* Make the tag number invalid */
3558 		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
3559 	}
3560 
3561 	/* Make sure that the last SCB terminates the free list */
3562 	ahc_outb(ahc, SCBPTR, i-1);
3563 	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3564 
3565 	/* Ensure we clear the 0 SCB's control byte. */
3566 	ahc_outb(ahc, SCBPTR, 0);
3567 	ahc_outb(ahc, SCB_CONTROL, 0);
3568 
3569 	scb_data->maxhscbs = i;
3570 
3571 	if (ahc->scb_data->maxhscbs == 0)
3572 		panic("%s: No SCB space found", ahc_name(ahc));
3573 
3574 	/*
3575 	 * Create our DMA tags.  These tags define the kinds of device
3576 	 * accessible memory allocations and memory mappings we will
3577 	 * need to perform during normal operation.
3578 	 *
3579 	 * Unless we need to further restrict the allocation, we rely
3580 	 * on the restrictions of the parent dmat, hence the common
3581 	 * use of MAXADDR and MAXSIZE.
3582 	 */
3583 
3584 	/* DMA tag for our hardware scb structures */
3585 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3586 			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3587 			       /*highaddr*/BUS_SPACE_MAXADDR,
3588 			       /*filter*/NULL, /*filterarg*/NULL,
3589 			       AHC_SCB_MAX * sizeof(struct hardware_scb),
3590 			       /*nsegments*/1,
3591 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3592 			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
3593 		goto error_exit;
3594 	}
3595 
3596 	scb_data->init_level++;
3597 
3598 	/* Allocation for our ccbs */
3599 	if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
3600 			     (void **)&scb_data->hscbs,
3601 			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
3602 		goto error_exit;
3603 	}
3604 
3605 	scb_data->init_level++;
3606 
3607 	/* And permanently map them */
3608 	ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
3609 			scb_data->hscbs,
3610 			AHC_SCB_MAX * sizeof(struct hardware_scb),
3611 			ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
3612 
3613 	scb_data->init_level++;
3614 
3615 	/* DMA tag for our sense buffers */
3616 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3617 			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3618 			       /*highaddr*/BUS_SPACE_MAXADDR,
3619 			       /*filter*/NULL, /*filterarg*/NULL,
3620 			       AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3621 			       /*nsegments*/1,
3622 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3623 			       /*flags*/0, &scb_data->sense_dmat) != 0) {
3624 		goto error_exit;
3625 	}
3626 
3627 	scb_data->init_level++;
3628 
3629 	/* Allocate them */
3630 	if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
3631 			     (void **)&scb_data->sense,
3632 			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
3633 		goto error_exit;
3634 	}
3635 
3636 	scb_data->init_level++;
3637 
3638 	/* And permanently map them */
3639 	ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
3640 			scb_data->sense,
3641 			AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3642 			ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
3643 
3644 	scb_data->init_level++;
3645 
3646 	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
3647 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3648 			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3649 			       /*highaddr*/BUS_SPACE_MAXADDR,
3650 			       /*filter*/NULL, /*filterarg*/NULL,
3651 			       PAGE_SIZE, /*nsegments*/1,
3652 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3653 			       /*flags*/0, &scb_data->sg_dmat) != 0) {
3654 		goto error_exit;
3655 	}
3656 
3657 	scb_data->init_level++;
3658 
3659 	/* Perform initial CCB allocation */
3660 	memset(scb_data->hscbs, 0, AHC_SCB_MAX * sizeof(struct hardware_scb));
3661 	ahc_alloc_scbs(ahc);
3662 
3663 	if (scb_data->numscbs == 0) {
3664 		printf("%s: ahc_init_scbdata - "
3665 		       "Unable to allocate initial scbs\n",
3666 		       ahc_name(ahc));
3667 		goto error_exit;
3668 	}
3669 
3670 	/*
3671 	 * Tell the sequencer which SCB will be the next one it receives.
3672 	 */
3673 	ahc->next_queued_scb = ahc_get_scb(ahc);
3674 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
3675 
3676 	/*
3677 	 * Note that we were successfull
3678 	 */
3679 	return (0);
3680 
3681 error_exit:
3682 
3683 	return (ENOMEM);
3684 }
3685 
3686 static void
3687 ahc_fini_scbdata(struct ahc_softc *ahc)
3688 {
3689 	struct scb_data *scb_data;
3690 
3691 	scb_data = ahc->scb_data;
3692 	if (scb_data == NULL)
3693 		return;
3694 
3695 	switch (scb_data->init_level) {
3696 	default:
3697 	case 7:
3698 	{
3699 		struct sg_map_node *sg_map;
3700 
3701 		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
3702 			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
3703 			ahc_dmamap_unload(ahc, scb_data->sg_dmat,
3704 					  sg_map->sg_dmamap);
3705 			ahc_dmamem_free(ahc, scb_data->sg_dmat,
3706 					sg_map->sg_vaddr,
3707 					sg_map->sg_dmamap);
3708 			free(sg_map, M_DEVBUF);
3709 		}
3710 		ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
3711 	}
3712 	case 6:
3713 		ahc_dmamap_unload(ahc, scb_data->sense_dmat,
3714 				  scb_data->sense_dmamap);
3715 	case 5:
3716 		ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
3717 				scb_data->sense_dmamap);
3718 		ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
3719 				   scb_data->sense_dmamap);
3720 	case 4:
3721 		ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
3722 	case 3:
3723 		ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
3724 				  scb_data->hscb_dmamap);
3725 	case 2:
3726 		ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
3727 				scb_data->hscb_dmamap);
3728 		ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
3729 				   scb_data->hscb_dmamap);
3730 	case 1:
3731 		ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
3732 		break;
3733 	}
3734 	if (scb_data->scbarray != NULL)
3735 		free(scb_data->scbarray, M_DEVBUF);
3736 }
3737 
3738 void
3739 ahc_alloc_scbs(struct ahc_softc *ahc)
3740 {
3741 	struct scb_data *scb_data;
3742 	struct scb *next_scb;
3743 	struct sg_map_node *sg_map;
3744 	bus_addr_t physaddr;
3745 	struct ahc_dma_seg *segs;
3746 	int newcount;
3747 	int i;
3748 
3749 	scb_data = ahc->scb_data;
3750 	if (scb_data->numscbs >= AHC_SCB_MAX)
3751 		/* Can't allocate any more */
3752 		return;
3753 
3754 	next_scb = &scb_data->scbarray[scb_data->numscbs];
3755 
3756 	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
3757 
3758 	if (sg_map == NULL)
3759 		return;
3760 
3761 	/* Allocate S/G space for the next batch of SCBS */
3762 	if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
3763 			     (void **)&sg_map->sg_vaddr,
3764 			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
3765 		free(sg_map, M_DEVBUF);
3766 		return;
3767 	}
3768 
3769 	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
3770 
3771 	ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
3772 			sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
3773 			&sg_map->sg_physaddr, /*flags*/0);
3774 
3775 	segs = sg_map->sg_vaddr;
3776 	physaddr = sg_map->sg_physaddr;
3777 
3778 	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
3779 	for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) {
3780 		struct scb_platform_data *pdata;
3781 #ifndef __linux__
3782 		int error;
3783 #endif
3784 		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
3785 							   M_DEVBUF, M_NOWAIT);
3786 		if (pdata == NULL)
3787 			break;
3788 		next_scb->platform_data = pdata;
3789 		next_scb->sg_list = segs;
3790 		/*
3791 		 * The sequencer always starts with the second entry.
3792 		 * The first entry is embedded in the scb.
3793 		 */
3794 		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
3795 		next_scb->ahc_softc = ahc;
3796 		next_scb->flags = SCB_FREE;
3797 #ifndef __linux__
3798 		error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
3799 					  &next_scb->dmamap);
3800 		if (error != 0)
3801 			break;
3802 #endif
3803 		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
3804 		next_scb->hscb->tag = ahc->scb_data->numscbs;
3805 		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
3806 				  next_scb, links.sle);
3807 		segs += AHC_NSEG;
3808 		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
3809 		next_scb++;
3810 		ahc->scb_data->numscbs++;
3811 	}
3812 }
3813 
3814 void
3815 ahc_controller_info(struct ahc_softc *ahc, char *buf)
3816 {
3817 	int len;
3818 
3819 	len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
3820 	buf += len;
3821 	if ((ahc->features & AHC_TWIN) != 0)
3822  		len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
3823 			      "B SCSI Id=%d, primary %c, ",
3824 			      ahc->our_id, ahc->our_id_b,
3825 			      ahc->flags & AHC_CHANNEL_B_PRIMARY ? 'B': 'A');
3826 	else {
3827 		const char *type;
3828 
3829 		if ((ahc->features & AHC_WIDE) != 0) {
3830 			type = "Wide";
3831 		} else {
3832 			type = "Single";
3833 		}
3834 		len = sprintf(buf, "%s Channel %c, SCSI Id=%d, ",
3835 			      type, ahc->channel, ahc->our_id);
3836 	}
3837 	buf += len;
3838 
3839 	if ((ahc->flags & AHC_PAGESCBS) != 0)
3840 		sprintf(buf, "%d/%d SCBs",
3841 			ahc->scb_data->maxhscbs, AHC_SCB_MAX);
3842 	else
3843 		sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
3844 }
3845 
3846 /*
3847  * Start the board, ready for normal operation
3848  */
3849 int
3850 ahc_init(struct ahc_softc *ahc)
3851 {
3852 	int	 max_targ;
3853 	int	 i;
3854 	int	 term;
3855 	u_int	 scsi_conf;
3856 	u_int	 scsiseq_template;
3857 	u_int	 ultraenb;
3858 	u_int	 discenable;
3859 	u_int	 tagenable;
3860 	size_t	 driver_data_size;
3861 	uint32_t physaddr;
3862 
3863 #ifdef AHC_DEBUG_SEQUENCER
3864 	ahc->flags |= AHC_SEQUENCER_DEBUG;
3865 #endif
3866 
3867 #ifdef AHC_PRINT_SRAM
3868 	printf("Scratch Ram:");
3869 	for (i = 0x20; i < 0x5f; i++) {
3870 		if (((i % 8) == 0) && (i != 0)) {
3871 			printf ("\n              ");
3872 		}
3873 		printf (" 0x%x", ahc_inb(ahc, i));
3874 	}
3875 	if ((ahc->features & AHC_MORE_SRAM) != 0) {
3876 		for (i = 0x70; i < 0x7f; i++) {
3877 			if (((i % 8) == 0) && (i != 0)) {
3878 				printf ("\n              ");
3879 			}
3880 			printf (" 0x%x", ahc_inb(ahc, i));
3881 		}
3882 	}
3883 	printf ("\n");
3884 #endif
3885 	max_targ = 15;
3886 
3887 	/*
3888 	 * Assume we have a board at this stage and it has been reset.
3889 	 */
3890 	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
3891 		ahc->our_id = ahc->our_id_b = 7;
3892 
3893 	/*
3894 	 * Default to allowing initiator operations.
3895 	 */
3896 	ahc->flags |= AHC_INITIATORROLE;
3897 
3898 	/*
3899 	 * Only allow target mode features if this unit has them enabled.
3900 	 */
3901 	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
3902 		ahc->features &= ~AHC_TARGETMODE;
3903 
3904 #ifndef __linux__
3905 	/* DMA tag for mapping buffers into device visible space. */
3906 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3907 			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3908 			       /*highaddr*/BUS_SPACE_MAXADDR,
3909 			       /*filter*/NULL, /*filterarg*/NULL,
3910 			       /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
3911 			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
3912 			       /*flags*/BUS_DMA_ALLOCNOW,
3913 			       &ahc->buffer_dmat) != 0) {
3914 		return (ENOMEM);
3915 	}
3916 #endif
3917 
3918 	ahc->init_level++;
3919 
3920 	/*
3921 	 * DMA tag for our command fifos and other data in system memory
3922 	 * the card's sequencer must be able to access.  For initiator
3923 	 * roles, we need to allocate space for the the qinfifo and qoutfifo.
3924 	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
3925 	 * When providing for the target mode role, we must additionally
3926 	 * provide space for the incoming target command fifo and an extra
3927 	 * byte to deal with a dma bug in some chip versions.
3928 	 */
3929 	driver_data_size = 2 * 256 * sizeof(uint8_t);
3930 	if ((ahc->features & AHC_TARGETMODE) != 0)
3931 		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
3932 				 + /*DMA WideOdd Bug Buffer*/1;
3933 	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3934 			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3935 			       /*highaddr*/BUS_SPACE_MAXADDR,
3936 			       /*filter*/NULL, /*filterarg*/NULL,
3937 			       driver_data_size,
3938 			       /*nsegments*/1,
3939 			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3940 			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
3941 		return (ENOMEM);
3942 	}
3943 
3944 	ahc->init_level++;
3945 
3946 	/* Allocation of driver data */
3947 	if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
3948 			     (void **)&ahc->qoutfifo,
3949 			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
3950 		return (ENOMEM);
3951 	}
3952 
3953 	ahc->init_level++;
3954 
3955 	/* And permanently map it in */
3956 	ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
3957 			ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
3958 			&ahc->shared_data_busaddr, /*flags*/0);
3959 
3960 	if ((ahc->features & AHC_TARGETMODE) != 0) {
3961 		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
3962 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
3963 		ahc->dma_bug_buf = ahc->shared_data_busaddr
3964 				 + driver_data_size - 1;
3965 		/* All target command blocks start out invalid. */
3966 		for (i = 0; i < AHC_TMODE_CMDS; i++)
3967 			ahc->targetcmds[i].cmd_valid = 0;
3968 		ahc->tqinfifonext = 1;
3969 		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
3970 		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
3971 		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
3972 	}
3973 	ahc->qinfifo = &ahc->qoutfifo[256];
3974 
3975 	ahc->init_level++;
3976 
3977 	/* Allocate SCB data now that buffer_dmat is initialized */
3978 	if (ahc->scb_data->maxhscbs == 0)
3979 		if (ahc_init_scbdata(ahc) != 0)
3980 			return (ENOMEM);
3981 
3982 	/*
3983 	 * Allocate a tstate to house information for our
3984 	 * initiator presence on the bus as well as the user
3985 	 * data for any target mode initiator.
3986 	 */
3987 	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
3988 		printf("%s: unable to allocate tmode_tstate.  "
3989 		       "Failing attach\n", ahc_name(ahc));
3990 		return (-1);
3991 	}
3992 
3993 	if ((ahc->features & AHC_TWIN) != 0) {
3994 		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
3995 			printf("%s: unable to allocate tmode_tstate.  "
3996 			       "Failing attach\n", ahc_name(ahc));
3997 			return (-1);
3998 		}
3999 	}
4000 
4001 	ahc_outb(ahc, SEQ_FLAGS, 0);
4002 
4003 	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) {
4004 		ahc->flags |= AHC_PAGESCBS;
4005 	} else {
4006 		ahc->flags &= ~AHC_PAGESCBS;
4007 	}
4008 
4009 #ifdef AHC_DEBUG
4010 	if (ahc_debug & AHC_SHOWMISC) {
4011 		printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4012 		       "ahc_dma %d bytes\n",
4013 			ahc_name(ahc),
4014 			sizeof(struct hardware_scb),
4015 			sizeof(struct scb),
4016 			sizeof(struct ahc_dma_seg));
4017 	}
4018 #endif /* AHC_DEBUG */
4019 
4020 	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4021 	if (ahc->features & AHC_TWIN) {
4022 
4023 		/*
4024 		 * The device is gated to channel B after a chip reset,
4025 		 * so set those values first
4026 		 */
4027 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4028 		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4029 		ahc_outb(ahc, SCSIID, ahc->our_id_b);
4030 		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4031 		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4032 					|term|ENSTIMER|ACTNEGEN);
4033 		if ((ahc->features & AHC_ULTRA2) != 0)
4034 			ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4035 		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4036 		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4037 
4038 		if ((scsi_conf & RESET_SCSI) != 0
4039 		 && (ahc->flags & AHC_INITIATORROLE) != 0)
4040 			ahc->flags |= AHC_RESET_BUS_B;
4041 
4042 		/* Select Channel A */
4043 		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4044 	}
4045 	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4046 	if ((ahc->features & AHC_ULTRA2) != 0)
4047 		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4048 	else
4049 		ahc_outb(ahc, SCSIID, ahc->our_id);
4050 	scsi_conf = ahc_inb(ahc, SCSICONF);
4051 	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4052 				|term
4053 				|ENSTIMER|ACTNEGEN);
4054 	if ((ahc->features & AHC_ULTRA2) != 0)
4055 		ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4056 	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4057 	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4058 
4059 	if ((scsi_conf & RESET_SCSI) != 0
4060 	 && (ahc->flags & AHC_INITIATORROLE) != 0)
4061 		ahc->flags |= AHC_RESET_BUS_A;
4062 
4063 	/*
4064 	 * Look at the information that board initialization or
4065 	 * the board bios has left us.
4066 	 */
4067 	ultraenb = 0;
4068 	tagenable = ALL_TARGETS_MASK;
4069 
4070 	/* Grab the disconnection disable table and invert it for our needs */
4071 	if (ahc->flags & AHC_USEDEFAULTS) {
4072 		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
4073 			"device parameters\n", ahc_name(ahc));
4074 		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4075 			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4076 		discenable = ALL_TARGETS_MASK;
4077 		if ((ahc->features & AHC_ULTRA) != 0)
4078 			ultraenb = ALL_TARGETS_MASK;
4079 	} else {
4080 		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4081 			   | ahc_inb(ahc, DISC_DSB));
4082 		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4083 			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4084 				      | ahc_inb(ahc, ULTRA_ENB);
4085 	}
4086 
4087 	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4088 		max_targ = 7;
4089 
4090 	for (i = 0; i <= max_targ; i++) {
4091 		struct ahc_initiator_tinfo *tinfo;
4092 		struct tmode_tstate *tstate;
4093 		u_int our_id;
4094 		u_int target_id;
4095 		char channel;
4096 
4097 		channel = 'A';
4098 		our_id = ahc->our_id;
4099 		target_id = i;
4100 		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4101 			channel = 'B';
4102 			our_id = ahc->our_id_b;
4103 			target_id = i % 8;
4104 		}
4105 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4106 					    target_id, &tstate);
4107 		/* Default to async narrow across the board */
4108 		memset(tinfo, 0, sizeof(*tinfo));
4109 		if (ahc->flags & AHC_USEDEFAULTS) {
4110 			if ((ahc->features & AHC_WIDE) != 0)
4111 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4112 
4113 			/*
4114 			 * These will be truncated when we determine the
4115 			 * connection type we have with the target.
4116 			 */
4117 			tinfo->user.period = ahc_syncrates->period;
4118 			tinfo->user.offset = ~0;
4119 		} else {
4120 			u_int scsirate;
4121 			uint16_t mask;
4122 
4123 			/* Take the settings leftover in scratch RAM. */
4124 			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4125 			mask = (0x01 << i);
4126 			if ((ahc->features & AHC_ULTRA2) != 0) {
4127 				u_int offset;
4128 				u_int maxsync;
4129 
4130 				if ((scsirate & SOFS) == 0x0F) {
4131 					/*
4132 					 * Haven't negotiated yet,
4133 					 * so the format is different.
4134 					 */
4135 					scsirate = (scsirate & SXFR) >> 4
4136 						 | (ultraenb & mask)
4137 						  ? 0x08 : 0x0
4138 						 | (scsirate & WIDEXFER);
4139 					offset = MAX_OFFSET_ULTRA2;
4140 				} else
4141 					offset = ahc_inb(ahc, TARG_OFFSET + i);
4142 				maxsync = AHC_SYNCRATE_ULTRA2;
4143 				if ((ahc->features & AHC_DT) != 0)
4144 					maxsync = AHC_SYNCRATE_DT;
4145 				tinfo->user.period =
4146 				    ahc_find_period(ahc, scsirate, maxsync);
4147 				if (offset == 0)
4148 					tinfo->user.period = 0;
4149 				else
4150 					tinfo->user.offset = ~0;
4151 				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4152 				 && (ahc->features & AHC_DT) != 0)
4153 					tinfo->user.ppr_options =
4154 					    MSG_EXT_PPR_DT_REQ;
4155 			} else if ((scsirate & SOFS) != 0) {
4156 				if ((scsirate & SXFR) == 0x40
4157 				 && (ultraenb & mask) != 0) {
4158 					/* Treat 10MHz as a non-ultra speed */
4159 					scsirate &= ~SXFR;
4160 				 	ultraenb &= ~mask;
4161 				}
4162 				tinfo->user.period =
4163 				    ahc_find_period(ahc, scsirate,
4164 						    (ultraenb & mask)
4165 						   ? AHC_SYNCRATE_ULTRA
4166 						   : AHC_SYNCRATE_FAST);
4167 				if (tinfo->user.period != 0)
4168 					tinfo->user.offset = ~0;
4169 			}
4170 			if ((scsirate & WIDEXFER) != 0
4171 			 && (ahc->features & AHC_WIDE) != 0)
4172 				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4173 			tinfo->user.protocol_version = 4;
4174 			if ((ahc->features & AHC_DT) != 0)
4175 				tinfo->user.transport_version = 3;
4176 			else
4177 				tinfo->user.transport_version = 2;
4178 			tinfo->goal.protocol_version = 2;
4179 			tinfo->goal.transport_version = 2;
4180 			tinfo->current.protocol_version = 2;
4181 			tinfo->current.transport_version = 2;
4182 		}
4183 		tstate->ultraenb = ultraenb;
4184 		tstate->discenable = discenable;
4185 		tstate->tagenable = 0; /* Wait until the XPT says its okay */
4186 	}
4187 	ahc->user_discenable = discenable;
4188 	ahc->user_tagenable = tagenable;
4189 
4190 	/* There are no untagged SCBs active yet. */
4191 	for (i = 0; i < 16; i++) {
4192 		ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, 0), /*unbusy*/TRUE);
4193 		if ((ahc->features & AHC_SCB_BTT) != 0) {
4194 			int lun;
4195 
4196 			/*
4197 			 * The SCB based BTT allows an entry per
4198 			 * target and lun pair.
4199 			 */
4200 			for (lun = 1; lun < AHC_NUM_LUNS; lun++) {
4201 				ahc_index_busy_tcl(ahc,
4202 						   BUILD_TCL(i << 4, lun),
4203 						   /*unbusy*/TRUE);
4204 			}
4205 		}
4206 	}
4207 
4208 	/* All of our queues are empty */
4209 	for (i = 0; i < 256; i++)
4210 		ahc->qoutfifo[i] = SCB_LIST_NULL;
4211 
4212 	for (i = 0; i < 256; i++)
4213 		ahc->qinfifo[i] = SCB_LIST_NULL;
4214 
4215 	if ((ahc->features & AHC_MULTI_TID) != 0) {
4216 		ahc_outb(ahc, TARGID, 0);
4217 		ahc_outb(ahc, TARGID + 1, 0);
4218 	}
4219 
4220 	/*
4221 	 * Tell the sequencer where it can find our arrays in memory.
4222 	 */
4223 	physaddr = ahc->scb_data->hscb_busaddr;
4224 	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4225 	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4226 	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4227 	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4228 
4229 	physaddr = ahc->shared_data_busaddr;
4230 	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4231 	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4232 	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4233 	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4234 
4235 	/*
4236 	 * Initialize the group code to command length table.
4237 	 * This overrides the values in TARG_SCSIRATE, so only
4238 	 * setup the table after we have processed that information.
4239 	 */
4240 	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4241 	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4242 	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4243 	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4244 	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4245 	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4246 	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4247 	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4248 
4249 	/* Tell the sequencer of our initial queue positions */
4250 	ahc_outb(ahc, KERNEL_QINPOS, 0);
4251 	ahc_outb(ahc, QINPOS, 0);
4252 	ahc_outb(ahc, QOUTPOS, 0);
4253 
4254 	/* Don't have any special messages to send to targets */
4255 	ahc_outb(ahc, TARGET_MSG_REQUEST, 0);
4256 	ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0);
4257 
4258 	/*
4259 	 * Use the built in queue management registers
4260 	 * if they are available.
4261 	 */
4262 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4263 		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4264 		ahc_outb(ahc, SDSCB_QOFF, 0);
4265 		ahc_outb(ahc, SNSCB_QOFF, 0);
4266 		ahc_outb(ahc, HNSCB_QOFF, 0);
4267 	}
4268 
4269 
4270 	/* We don't have any waiting selections */
4271 	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4272 
4273 	/* Our disconnection list is empty too */
4274 	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4275 
4276 	/* Message out buffer starts empty */
4277 	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4278 
4279 	/*
4280 	 * Setup the allowed SCSI Sequences based on operational mode.
4281 	 * If we are a target, we'll enalbe select in operations once
4282 	 * we've had a lun enabled.
4283 	 */
4284 	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4285 	if ((ahc->flags & AHC_INITIATORROLE) != 0)
4286 		scsiseq_template |= ENRSELI;
4287 	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4288 
4289 	/*
4290 	 * Load the Sequencer program and Enable the adapter
4291 	 * in "fast" mode.
4292 	 */
4293 	if (bootverbose)
4294 		printf("%s: Downloading Sequencer Program...",
4295 		       ahc_name(ahc));
4296 
4297 	ahc_loadseq(ahc);
4298 
4299 	if ((ahc->features & AHC_ULTRA2) != 0) {
4300 		int wait;
4301 
4302 		/*
4303 		 * Wait for up to 500ms for our transceivers
4304 		 * to settle.  If the adapter does not have
4305 		 * a cable attached, the tranceivers may
4306 		 * never settle, so don't complain if we
4307 		 * fail here.
4308 		 */
4309 		pause_sequencer(ahc);
4310 		for (wait = 5000;
4311 		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4312 		     wait--)
4313 			ahc_delay(100);
4314 		unpause_sequencer(ahc);
4315 	}
4316 	return (0);
4317 }
4318 
4319 /************************** Busy Target Table *********************************/
4320 /*
4321  * Return the untagged transaction id for a given target/channel lun.
4322  * Optionally, clear the entry.
4323  */
4324 u_int
4325 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl, int unbusy)
4326 {
4327 	u_int scbid;
4328 	u_int target_offset;
4329 
4330 	if ((ahc->features & AHC_SCB_BTT) != 0) {
4331 		u_int saved_scbptr;
4332 
4333 		saved_scbptr = ahc_inb(ahc, SCBPTR);
4334 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4335 		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
4336 		if (unbusy)
4337 			ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl),
4338 				 SCB_LIST_NULL);
4339 		ahc_outb(ahc, SCBPTR, saved_scbptr);
4340 	} else {
4341 		target_offset = TCL_TARGET_OFFSET(tcl);
4342 		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
4343 		if (unbusy)
4344 			ahc_outb(ahc, BUSY_TARGETS + target_offset,
4345 				 SCB_LIST_NULL);
4346 	}
4347 
4348 	return (scbid);
4349 }
4350 
4351 void
4352 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
4353 {
4354 	u_int target_offset;
4355 
4356 	if ((ahc->features & AHC_SCB_BTT) != 0) {
4357 		u_int saved_scbptr;
4358 
4359 		saved_scbptr = ahc_inb(ahc, SCBPTR);
4360 		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4361 		ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
4362 		ahc_outb(ahc, SCBPTR, saved_scbptr);
4363 	} else {
4364 		target_offset = TCL_TARGET_OFFSET(tcl);
4365 		ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
4366 	}
4367 }
4368 
4369 /************************** SCB and SCB queue management **********************/
4370 int
4371 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
4372 	      char channel, int lun, u_int tag, role_t role)
4373 {
4374 	int targ = SCB_GET_TARGET(ahc, scb);
4375 	char chan = SCB_GET_CHANNEL(ahc, scb);
4376 	int slun = SCB_GET_LUN(scb);
4377 	int match;
4378 
4379 	match = ((chan == channel) || (channel == ALL_CHANNELS));
4380 	if (match != 0)
4381 		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
4382 	if (match != 0)
4383 		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
4384 	if (match != 0) {
4385 #if AHC_TARGET_MODE
4386 		int group;
4387 
4388 		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
4389 		if (role == ROLE_INITIATOR) {
4390 			match = (group == XPT_FC_GROUP_COMMON)
4391 			      && ((tag == scb->hscb->tag)
4392 			       || (tag == SCB_LIST_NULL));
4393 		} else if (role == ROLE_TARGET) {
4394 			match = (group == XPT_FC_GROUP_TMODE)
4395 			      && ((tag == scb->io_ctx->csio.tag_id)
4396 			       || (tag == SCB_LIST_NULL));
4397 		}
4398 #else /* !AHC_TARGET_MODE */
4399 		match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
4400 #endif /* AHC_TARGET_MODE */
4401 	}
4402 
4403 	return match;
4404 }
4405 
4406 void
4407 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
4408 {
4409 	int	target;
4410 	char	channel;
4411 	int	lun;
4412 
4413 	target = SCB_GET_TARGET(ahc, scb);
4414 	lun = SCB_GET_LUN(scb);
4415 	channel = SCB_GET_CHANNEL(ahc, scb);
4416 
4417 	ahc_search_qinfifo(ahc, target, channel, lun,
4418 			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
4419 			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
4420 
4421 	ahc_platform_freeze_devq(ahc, scb);
4422 }
4423 
4424 void
4425 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
4426 {
4427 	struct scb *prev_scb;
4428 
4429 	prev_scb = NULL;
4430 	if (ahc_qinfifo_count(ahc) != 0) {
4431 		u_int prev_tag;
4432 		uint8_t prev_pos;
4433 
4434 		prev_pos = ahc->qinfifonext - 1;
4435 		prev_tag = ahc->qinfifo[prev_pos];
4436 		prev_scb = ahc_lookup_scb(ahc, prev_tag);
4437 	}
4438 	ahc_qinfifo_requeue(ahc, prev_scb, scb);
4439 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4440 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4441 	} else {
4442 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4443 	}
4444 }
4445 
4446 static void
4447 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
4448 		    struct scb *scb)
4449 {
4450 	if (prev_scb == NULL)
4451 		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
4452 	else
4453 		prev_scb->hscb->next = scb->hscb->tag;
4454 	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
4455 	scb->hscb->next = ahc->next_queued_scb->hscb->tag;
4456 }
4457 
4458 static int
4459 ahc_qinfifo_count(struct ahc_softc *ahc)
4460 {
4461 	u_int8_t qinpos;
4462 	u_int8_t diff;
4463 
4464 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4465 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
4466 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
4467 	} else
4468 		qinpos = ahc_inb(ahc, QINPOS);
4469 	diff = ahc->qinfifonext - qinpos;
4470 	return (diff);
4471 }
4472 
4473 int
4474 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
4475 		   int lun, u_int tag, role_t role, uint32_t status,
4476 		   ahc_search_action action)
4477 {
4478 	struct	scb *scb;
4479 	struct	scb *prev_scb;
4480 	uint8_t qinstart;
4481 	uint8_t qinpos;
4482 	uint8_t qintail;
4483 	uint8_t next, prev;
4484 	uint8_t curscbptr;
4485 	int	found;
4486 	int	maxtarget;
4487 	int	i;
4488 	int	have_qregs;
4489 
4490 	qintail = ahc->qinfifonext;
4491 	have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
4492 	if (have_qregs) {
4493 		qinstart = ahc_inb(ahc, SNSCB_QOFF);
4494 		ahc_outb(ahc, SNSCB_QOFF, qinstart);
4495 	} else
4496 		qinstart = ahc_inb(ahc, QINPOS);
4497 	qinpos = qinstart;
4498 
4499 	next = ahc_inb(ahc, NEXT_QUEUED_SCB);
4500 	if (qinstart == qintail) {
4501 		if (next != ahc->next_queued_scb->hscb->tag)
4502 			qinpos--;
4503 	} else if (next != ahc->qinfifo[qinstart]) {
4504 		qinpos--;
4505 	}
4506 
4507 	found = 0;
4508 	prev_scb = NULL;
4509 
4510 	if (action == SEARCH_COMPLETE) {
4511 		/*
4512 		 * Don't attempt to run any queued untagged transactions
4513 		 * until we are done with the abort process.
4514 		 */
4515 		ahc_freeze_untagged_queues(ahc);
4516 	}
4517 
4518 	if (action != SEARCH_COUNT && (qinpos != qintail)) {
4519 		/*
4520 		 * The sequencer may be in the process of dmaing
4521 		 * down the SCB at the beginning of the queue.
4522 		 * This could be problematic if either the first
4523 		 * or the second SCB is removed from the queue
4524 		 * (the first SCB includes a pointer to the "next"
4525 		 * SCB to dma).  If we have the prospect of removing
4526 		 * any entries, swap the first element in the queue
4527 		 * with the next HSCB so the sequencer will notice
4528 		 * that NEXT_QUEUED_SCB has changed during its dma
4529 		 * attempt and will retry the DMA.
4530 		 */
4531 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
4532 		ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
4533 		ahc_swap_with_next_hscb(ahc, scb);
4534 		ahc->qinfifo[qinpos] = scb->hscb->tag;
4535 	}
4536 
4537 	/*
4538 	 * Start with an empty queue.  Entries that are not chosen
4539 	 * for removal will be re-added to the queue as we go.
4540 	 */
4541 	ahc->qinfifonext = qinpos;
4542 	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4543 
4544 	while (qinpos != qintail) {
4545 		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
4546 		if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
4547 			/*
4548 			 * We found an scb that needs to be acted on.
4549 			 */
4550 			found++;
4551 			switch (action) {
4552 			case SEARCH_COMPLETE:
4553 			{
4554 				cam_status ostat;
4555 
4556 				ostat = ahc_get_transaction_status(scb);
4557 				if (ostat == CAM_REQ_INPROG)
4558 					ahc_set_transaction_status(scb,
4559 								   status);
4560 				ahc_freeze_scb(scb);
4561 				if ((scb->flags & SCB_ACTIVE) == 0)
4562 					printf("Inactive SCB in qinfifo\n");
4563 				ahc_done(ahc, scb);
4564 
4565 				/* FALLTHROUGH */
4566 			case SEARCH_REMOVE:
4567 				break;
4568 			}
4569 			case SEARCH_COUNT:
4570 				ahc_qinfifo_requeue(ahc, prev_scb, scb);
4571 				prev_scb = scb;
4572 				break;
4573 			}
4574 		} else {
4575 			ahc_qinfifo_requeue(ahc, prev_scb, scb);
4576 			prev_scb = scb;
4577 		}
4578 		qinpos++;
4579 	}
4580 
4581 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4582 		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4583 	} else {
4584 		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4585 	}
4586 
4587 	/*
4588 	 * Search waiting for selection list.
4589 	 */
4590 	curscbptr = ahc_inb(ahc, SCBPTR);
4591 	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
4592 	prev = SCB_LIST_NULL;
4593 
4594 	while (next != SCB_LIST_NULL) {
4595 		uint8_t scb_index;
4596 
4597 		ahc_outb(ahc, SCBPTR, next);
4598 		scb_index = ahc_inb(ahc, SCB_TAG);
4599 		if (scb_index >= ahc->scb_data->numscbs) {
4600 			printf("Waiting List inconsistency. "
4601 			       "SCB index == %d, yet numscbs == %d.",
4602 			       scb_index, ahc->scb_data->numscbs);
4603 			ahc_dump_card_state(ahc);
4604 			panic("for safety");
4605 		}
4606 		scb = ahc_lookup_scb(ahc, scb_index);
4607 		if (ahc_match_scb(ahc, scb, target, channel,
4608 				  lun, SCB_LIST_NULL, role)) {
4609 			/*
4610 			 * We found an scb that needs to be acted on.
4611 			 */
4612 			found++;
4613 			switch (action) {
4614 			case SEARCH_COMPLETE:
4615 			{
4616 				cam_status ostat;
4617 
4618 				ostat = ahc_get_transaction_status(scb);
4619 				if (ostat == CAM_REQ_INPROG)
4620 					ahc_set_transaction_status(scb,
4621 								   status);
4622 				ahc_freeze_scb(scb);
4623 				if ((scb->flags & SCB_ACTIVE) == 0)
4624 					printf("Inactive SCB in Waiting List\n");
4625 				ahc_done(ahc, scb);
4626 				/* FALLTHROUGH */
4627 			}
4628 			case SEARCH_REMOVE:
4629 				next = ahc_rem_wscb(ahc, next, prev);
4630 				break;
4631 			case SEARCH_COUNT:
4632 				prev = next;
4633 				next = ahc_inb(ahc, SCB_NEXT);
4634 				break;
4635 			}
4636 		} else {
4637 
4638 			prev = next;
4639 			next = ahc_inb(ahc, SCB_NEXT);
4640 		}
4641 	}
4642 	ahc_outb(ahc, SCBPTR, curscbptr);
4643 
4644 	/*
4645 	 * And lastly, the untagged holding queues.
4646 	 */
4647 	i = 0;
4648 	if ((ahc->flags & AHC_SCB_BTT) == 0) {
4649 
4650 		maxtarget = 16;
4651 		if (target != CAM_TARGET_WILDCARD) {
4652 
4653 			i = target;
4654 			if (channel == 'B')
4655 				i += 8;
4656 			maxtarget = i + 1;
4657 		}
4658 	} else {
4659 		maxtarget = 0;
4660 	}
4661 
4662 	for (; i < maxtarget; i++) {
4663 		struct scb_tailq *untagged_q;
4664 		struct scb *next_scb;
4665 
4666 		untagged_q = &(ahc->untagged_queues[i]);
4667 		next_scb = TAILQ_FIRST(untagged_q);
4668 		while (next_scb != NULL) {
4669 
4670 			scb = next_scb;
4671 			next_scb = TAILQ_NEXT(scb, links.tqe);
4672 
4673 			/*
4674 			 * The head of the list may be the currently
4675 			 * active untagged command for a device.
4676 			 * We're only searching for commands that
4677 			 * have not been started.  A transaction
4678 			 * marked active but still in the qinfifo
4679 			 * is removed by the qinfifo scanning code
4680 			 * above.
4681 			 */
4682 			if ((scb->flags & SCB_ACTIVE) != 0)
4683 				continue;
4684 
4685 			if (ahc_match_scb(ahc, scb, target, channel,
4686 					  lun, SCB_LIST_NULL, role)) {
4687 				/*
4688 				 * We found an scb that needs to be acted on.
4689 				 */
4690 				found++;
4691 				switch (action) {
4692 				case SEARCH_COMPLETE:
4693 				{
4694 					cam_status ostat;
4695 
4696 					ostat = ahc_get_transaction_status(scb);
4697 					if (ostat == CAM_REQ_INPROG)
4698 						ahc_set_transaction_status(scb,
4699 								   status);
4700 					ahc_freeze_scb(scb);
4701 					if ((scb->flags & SCB_ACTIVE) == 0)
4702 						printf("Inactive SCB in untaggedQ\n");
4703 					ahc_done(ahc, scb);
4704 					break;
4705 				}
4706 				case SEARCH_REMOVE:
4707 					TAILQ_REMOVE(untagged_q, scb,
4708 						     links.tqe);
4709 					break;
4710 				case SEARCH_COUNT:
4711 					break;
4712 				}
4713 			}
4714 		}
4715 	}
4716 
4717 	if (action == SEARCH_COMPLETE)
4718 		ahc_release_untagged_queues(ahc);
4719 	return (found);
4720 }
4721 
4722 int
4723 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
4724 		     int lun, u_int tag, int stop_on_first, int remove,
4725 		     int save_state)
4726 {
4727 	struct	scb *scbp;
4728 	u_int	next;
4729 	u_int	prev;
4730 	u_int	count;
4731 	u_int	active_scb;
4732 
4733 	count = 0;
4734 	next = ahc_inb(ahc, DISCONNECTED_SCBH);
4735 	prev = SCB_LIST_NULL;
4736 
4737 	if (save_state) {
4738 		/* restore this when we're done */
4739 		active_scb = ahc_inb(ahc, SCBPTR);
4740 	} else
4741 		/* Silence compiler */
4742 		active_scb = SCB_LIST_NULL;
4743 
4744 	while (next != SCB_LIST_NULL) {
4745 		u_int scb_index;
4746 
4747 		ahc_outb(ahc, SCBPTR, next);
4748 		scb_index = ahc_inb(ahc, SCB_TAG);
4749 		if (scb_index >= ahc->scb_data->numscbs) {
4750 			printf("Disconnected List inconsistency. "
4751 			       "SCB index == %d, yet numscbs == %d.",
4752 			       scb_index, ahc->scb_data->numscbs);
4753 			ahc_dump_card_state(ahc);
4754 			panic("for safety");
4755 		}
4756 
4757 		if (next == prev) {
4758 			panic("Disconnected List Loop. "
4759 			      "cur SCBPTR == %x, prev SCBPTR == %x.",
4760 			      next, prev);
4761 		}
4762 		scbp = ahc_lookup_scb(ahc, scb_index);
4763 		if (ahc_match_scb(ahc, scbp, target, channel, lun,
4764 				  tag, ROLE_INITIATOR)) {
4765 			count++;
4766 			if (remove) {
4767 				next =
4768 				    ahc_rem_scb_from_disc_list(ahc, prev, next);
4769 			} else {
4770 				prev = next;
4771 				next = ahc_inb(ahc, SCB_NEXT);
4772 			}
4773 			if (stop_on_first)
4774 				break;
4775 		} else {
4776 			prev = next;
4777 			next = ahc_inb(ahc, SCB_NEXT);
4778 		}
4779 	}
4780 	if (save_state)
4781 		ahc_outb(ahc, SCBPTR, active_scb);
4782 	return (count);
4783 }
4784 
4785 /*
4786  * Remove an SCB from the on chip list of disconnected transactions.
4787  * This is empty/unused if we are not performing SCB paging.
4788  */
4789 static u_int
4790 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
4791 {
4792 	u_int next;
4793 
4794 	ahc_outb(ahc, SCBPTR, scbptr);
4795 	next = ahc_inb(ahc, SCB_NEXT);
4796 
4797 	ahc_outb(ahc, SCB_CONTROL, 0);
4798 
4799 	ahc_add_curscb_to_free_list(ahc);
4800 
4801 	if (prev != SCB_LIST_NULL) {
4802 		ahc_outb(ahc, SCBPTR, prev);
4803 		ahc_outb(ahc, SCB_NEXT, next);
4804 	} else
4805 		ahc_outb(ahc, DISCONNECTED_SCBH, next);
4806 
4807 	return (next);
4808 }
4809 
4810 /*
4811  * Add the SCB as selected by SCBPTR onto the on chip list of
4812  * free hardware SCBs.  This list is empty/unused if we are not
4813  * performing SCB paging.
4814  */
4815 static void
4816 ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
4817 {
4818 	/*
4819 	 * Invalidate the tag so that our abort
4820 	 * routines don't think it's active.
4821 	 */
4822 	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
4823 
4824 	if ((ahc->flags & AHC_PAGESCBS) != 0) {
4825 		ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
4826 		ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
4827 	}
4828 }
4829 
4830 /*
4831  * Manipulate the waiting for selection list and return the
4832  * scb that follows the one that we remove.
4833  */
4834 static u_int
4835 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
4836 {
4837 	u_int curscb, next;
4838 
4839 	/*
4840 	 * Select the SCB we want to abort and
4841 	 * pull the next pointer out of it.
4842 	 */
4843 	curscb = ahc_inb(ahc, SCBPTR);
4844 	ahc_outb(ahc, SCBPTR, scbpos);
4845 	next = ahc_inb(ahc, SCB_NEXT);
4846 
4847 	/* Clear the necessary fields */
4848 	ahc_outb(ahc, SCB_CONTROL, 0);
4849 
4850 	ahc_add_curscb_to_free_list(ahc);
4851 
4852 	/* update the waiting list */
4853 	if (prev == SCB_LIST_NULL) {
4854 		/* First in the list */
4855 		ahc_outb(ahc, WAITING_SCBH, next);
4856 
4857 		/*
4858 		 * Ensure we aren't attempting to perform
4859 		 * selection for this entry.
4860 		 */
4861 		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
4862 	} else {
4863 		/*
4864 		 * Select the scb that pointed to us
4865 		 * and update its next pointer.
4866 		 */
4867 		ahc_outb(ahc, SCBPTR, prev);
4868 		ahc_outb(ahc, SCB_NEXT, next);
4869 	}
4870 
4871 	/*
4872 	 * Point us back at the original scb position.
4873 	 */
4874 	ahc_outb(ahc, SCBPTR, curscb);
4875 	return next;
4876 }
4877 
4878 /******************************** Error Handling ******************************/
4879 /*
4880  * Abort all SCBs that match the given description (target/channel/lun/tag),
4881  * setting their status to the passed in status if the status has not already
4882  * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
4883  * is paused before it is called.
4884  */
4885 int
4886 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
4887 	       int lun, u_int tag, role_t role, uint32_t status)
4888 {
4889 	struct	scb *scbp;
4890 	struct	scb *scbp_next;
4891 	u_int	active_scb;
4892 	int	i, j;
4893 	int	maxtarget;
4894 	int	minlun;
4895 	int	maxlun;
4896 
4897 	int	found;
4898 
4899 	/*
4900 	 * Don't attempt to run any queued untagged transactions
4901 	 * until we are done with the abort process.
4902 	 */
4903 	ahc_freeze_untagged_queues(ahc);
4904 
4905 	/* restore this when we're done */
4906 	active_scb = ahc_inb(ahc, SCBPTR);
4907 
4908 	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
4909 				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
4910 
4911 	/*
4912 	 * Clean out the busy target table for any untagged commands.
4913 	 */
4914 	i = 0;
4915 	maxtarget = 16;
4916 	if (target != CAM_TARGET_WILDCARD) {
4917 		i = target;
4918 		if (channel == 'B')
4919 			i += 8;
4920 		maxtarget = i + 1;
4921 	}
4922 
4923 	if (lun == CAM_LUN_WILDCARD) {
4924 
4925 		/*
4926 		 * Unless we are using an SCB based
4927 		 * busy targets table, there is only
4928 		 * one table entry for all luns of
4929 		 * a target.
4930 		 */
4931 		minlun = 0;
4932 		maxlun = 1;
4933 		if ((ahc->flags & AHC_SCB_BTT) != 0)
4934 			maxlun = AHC_NUM_LUNS;
4935 	} else {
4936 		minlun = lun;
4937 		maxlun = lun + 1;
4938 	}
4939 
4940 	for (;i < maxtarget; i++) {
4941 		for (j = minlun;j < maxlun; j++)
4942 			ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, j),
4943 					   /*unbusy*/TRUE);
4944 	}
4945 
4946 	/*
4947 	 * Go through the disconnected list and remove any entries we
4948 	 * have queued for completion, 0'ing their control byte too.
4949 	 * We save the active SCB and restore it ourselves, so there
4950 	 * is no reason for this search to restore it too.
4951 	 */
4952 	ahc_search_disc_list(ahc, target, channel, lun, tag,
4953 			     /*stop_on_first*/FALSE, /*remove*/TRUE,
4954 			     /*save_state*/FALSE);
4955 
4956 	/*
4957 	 * Go through the hardware SCB array looking for commands that
4958 	 * were active but not on any list.
4959 	 */
4960 	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
4961 		u_int scbid;
4962 
4963 		ahc_outb(ahc, SCBPTR, i);
4964 		scbid = ahc_inb(ahc, SCB_TAG);
4965 		scbp = ahc_lookup_scb(ahc, scbid);
4966 		if (scbp != NULL
4967 		 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
4968 			ahc_add_curscb_to_free_list(ahc);
4969 	}
4970 
4971 	/*
4972 	 * Go through the pending CCB list and look for
4973 	 * commands for this target that are still active.
4974 	 * These are other tagged commands that were
4975 	 * disconnected when the reset occured.
4976 	 */
4977 	scbp_next = LIST_FIRST(&ahc->pending_scbs);
4978 	while (scbp_next != NULL) {
4979 		scbp = scbp_next;
4980 		scbp_next = LIST_NEXT(scbp, pending_links);
4981 		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
4982 			cam_status ostat;
4983 
4984 			ostat = ahc_get_transaction_status(scbp);
4985 			if (ostat == CAM_REQ_INPROG)
4986 				ahc_set_transaction_status(scbp, status);
4987 			ahc_freeze_scb(scbp);
4988 			if ((scbp->flags & SCB_ACTIVE) == 0)
4989 				printf("Inactive SCB on pending list\n");
4990 			ahc_done(ahc, scbp);
4991 			found++;
4992 		}
4993 	}
4994 	ahc_outb(ahc, SCBPTR, active_scb);
4995 	ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
4996 	ahc_release_untagged_queues(ahc);
4997 	return found;
4998 }
4999 
5000 static void
5001 ahc_reset_current_bus(struct ahc_softc *ahc)
5002 {
5003 	uint8_t scsiseq;
5004 
5005 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5006 	scsiseq = ahc_inb(ahc, SCSISEQ);
5007 	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5008 	ahc_delay(AHC_BUSRESET_DELAY);
5009 	/* Turn off the bus reset */
5010 	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5011 
5012 	ahc_clear_intstat(ahc);
5013 
5014 	/* Re-enable reset interrupts */
5015 	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5016 }
5017 
5018 int
5019 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
5020 {
5021 	struct	ahc_devinfo devinfo;
5022 	u_int	initiator, target, max_scsiid;
5023 	u_int	sblkctl;
5024 	int	found;
5025 	int	restart_needed;
5026 	char	cur_channel;
5027 
5028 	ahc->pending_device = NULL;
5029 
5030 	ahc_compile_devinfo(&devinfo,
5031 			    CAM_TARGET_WILDCARD,
5032 			    CAM_TARGET_WILDCARD,
5033 			    CAM_LUN_WILDCARD,
5034 			    channel, ROLE_UNKNOWN);
5035 	pause_sequencer(ahc);
5036 
5037 	/* Make sure the sequencer is in a safe location. */
5038 	ahc_clear_critical_section(ahc);
5039 
5040 	/*
5041 	 * Run our command complete fifos to ensure that we perform
5042 	 * completion processing on any commands that 'completed'
5043 	 * before the reset occurred.
5044 	 */
5045 	ahc_run_qoutfifo(ahc);
5046 #if AHC_TARGET_MODE
5047 	if ((ahc->flags & AHC_TARGETROLE) != 0) {
5048 		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
5049 	}
5050 #endif
5051 
5052 	/*
5053 	 * Reset the bus if we are initiating this reset
5054 	 */
5055 	sblkctl = ahc_inb(ahc, SBLKCTL);
5056 	cur_channel = 'A';
5057 	if ((ahc->features & AHC_TWIN) != 0
5058 	 && ((sblkctl & SELBUSB) != 0))
5059 	    cur_channel = 'B';
5060 	if (cur_channel != channel) {
5061 		/* Case 1: Command for another bus is active
5062 		 * Stealthily reset the other bus without
5063 		 * upsetting the current bus.
5064 		 */
5065 		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
5066 		ahc_outb(ahc, SIMODE1,
5067 			 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5068 		ahc_outb(ahc, SCSISEQ,
5069 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5070 		if (initiate_reset)
5071 			ahc_reset_current_bus(ahc);
5072 		ahc_clear_intstat(ahc);
5073 		ahc_outb(ahc, SBLKCTL, sblkctl);
5074 		restart_needed = FALSE;
5075 	} else {
5076 		/* Case 2: A command from this bus is active or we're idle */
5077 		ahc_clear_msg_state(ahc);
5078 		ahc_outb(ahc, SIMODE1,
5079 			 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5080 		ahc_outb(ahc, SCSISEQ,
5081 			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5082 		if (initiate_reset)
5083 			ahc_reset_current_bus(ahc);
5084 		ahc_clear_intstat(ahc);
5085 		restart_needed = TRUE;
5086 	}
5087 
5088 	/*
5089 	 * Clean up all the state information for the
5090 	 * pending transactions on this bus.
5091 	 */
5092 	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
5093 			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
5094 			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
5095 
5096 	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
5097 
5098 #ifdef AHC_TARGET_MODE
5099 	/*
5100 	 * Send an immediate notify ccb to all target more peripheral
5101 	 * drivers affected by this action.
5102 	 */
5103 	for (target = 0; target <= max_scsiid; target++) {
5104 		struct tmode_tstate* tstate;
5105 		u_int lun;
5106 
5107 		tstate = ahc->enabled_targets[target];
5108 		if (tstate == NULL)
5109 			continue;
5110 		for (lun = 0; lun <= 7; lun++) {
5111 			struct tmode_lstate* lstate;
5112 
5113 			lstate = tstate->enabled_luns[lun];
5114 			if (lstate == NULL)
5115 				continue;
5116 
5117 			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
5118 					       EVENT_TYPE_BUS_RESET, /*arg*/0);
5119 			ahc_send_lstate_events(ahc, lstate);
5120 		}
5121 	}
5122 #endif
5123 	/* Notify the XPT that a bus reset occurred */
5124 	ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
5125 		       CAM_LUN_WILDCARD, AC_BUS_RESET);
5126 
5127 	/*
5128 	 * Revert to async/narrow transfers until we renegotiate.
5129 	 */
5130 	for (target = 0; target <= max_scsiid; target++) {
5131 
5132 		if (ahc->enabled_targets[target] == NULL)
5133 			continue;
5134 		for (initiator = 0; initiator <= max_scsiid; initiator++) {
5135 			struct ahc_devinfo devinfo;
5136 
5137 			ahc_compile_devinfo(&devinfo, target, initiator,
5138 					    CAM_LUN_WILDCARD,
5139 					    channel, ROLE_UNKNOWN);
5140 			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5141 				      AHC_TRANS_CUR, /*paused*/TRUE);
5142 			ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
5143 					 /*period*/0, /*offset*/0,
5144 					 /*ppr_options*/0, AHC_TRANS_CUR,
5145 					 /*paused*/TRUE);
5146 		}
5147 	}
5148 
5149 	if (restart_needed)
5150 		restart_sequencer(ahc);
5151 	else
5152 		unpause_sequencer(ahc);
5153 	return found;
5154 }
5155 
5156 
5157 /***************************** Residual Processing ****************************/
5158 /*
5159  * Calculate the residual for a just completed SCB.
5160  */
5161 static void
5162 ahc_calc_residual(struct scb *scb)
5163 {
5164 	struct hardware_scb *hscb;
5165 	struct status_pkt *spkt;
5166 	uint32_t resid;
5167 
5168 	/*
5169 	 * 5 cases.
5170 	 * 1) No residual.
5171 	 *    SG_RESID_VALID clear in sgptr.
5172 	 * 2) Transferless command
5173 	 * 3) Never performed any transfers.
5174 	 *    sgptr has SG_FULL_RESID set.
5175 	 * 4) No residual but target did not
5176 	 *    save data pointers after the
5177 	 *    last transfer, so sgptr was
5178 	 *    never updated.
5179 	 * 5) We have a partial residual.
5180 	 *    Use residual_sgptr to determine
5181 	 *    where we are.
5182 	 */
5183 
5184 	hscb = scb->hscb;
5185 	if ((hscb->sgptr & SG_RESID_VALID) == 0)
5186 		/* Case 1 */
5187 		return;
5188 	hscb->sgptr &= ~SG_RESID_VALID;
5189 
5190 	if ((hscb->sgptr & SG_LIST_NULL) != 0)
5191 		/* Case 2 */
5192 		return;
5193 
5194 	spkt = &hscb->shared_data.status;
5195 	if ((hscb->sgptr & SG_FULL_RESID) != 0) {
5196 		/* Case 3 */
5197 		resid = ahc_get_transfer_length(scb);
5198 	} else if ((spkt->residual_sg_ptr & SG_LIST_NULL) != 0) {
5199 		/* Case 4 */
5200 		return;
5201 	} else if ((spkt->residual_sg_ptr & ~SG_PTR_MASK) != 0) {
5202 		panic("Bogus resid sgptr value 0x%x\n", spkt->residual_sg_ptr);
5203 	} else {
5204 		struct ahc_dma_seg *sg;
5205 
5206 		/*
5207 		 * Remainder of the SG where the transfer
5208 		 * stopped.
5209 		 */
5210 		resid = spkt->residual_datacnt & AHC_SG_LEN_MASK;
5211 		sg = ahc_sg_bus_to_virt(scb,
5212 					spkt->residual_sg_ptr & SG_PTR_MASK);
5213 
5214 		/* The residual sg_ptr always points to the next sg */
5215 		sg--;
5216 
5217 		/*
5218 		 * Add up the contents of all residual
5219 		 * SG segments that are after the SG where
5220 		 * the transfer stopped.
5221 		 */
5222 		while ((sg->len & AHC_DMA_LAST_SEG) == 0) {
5223 			sg++;
5224 			resid += sg->len & AHC_SG_LEN_MASK;
5225 		}
5226 	}
5227 	if ((scb->flags & SCB_SENSE) == 0)
5228 		ahc_set_residual(scb, resid);
5229 	else
5230 		ahc_set_sense_residual(scb, resid);
5231 
5232 #ifdef AHC_DEBUG
5233 	if (ahc_debug & AHC_SHOWMISC) {
5234 		ahc_print_path(ahc, scb);
5235 		printf("Handled Residual of %d bytes\n", resid);
5236 	}
5237 #endif
5238 }
5239 
5240 /******************************* Target Mode **********************************/
5241 #ifdef AHC_TARGET_MODE
5242 /*
5243  * Add a target mode event to this lun's queue
5244  */
5245 static void
5246 ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate,
5247 		       u_int initiator_id, u_int event_type, u_int event_arg)
5248 {
5249 	struct ahc_tmode_event *event;
5250 	int pending;
5251 
5252 	xpt_freeze_devq(lstate->path, /*count*/1);
5253 	if (lstate->event_w_idx >= lstate->event_r_idx)
5254 		pending = lstate->event_w_idx - lstate->event_r_idx;
5255 	else
5256 		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
5257 			- (lstate->event_r_idx - lstate->event_w_idx);
5258 
5259 	if (event_type == EVENT_TYPE_BUS_RESET
5260 	 || event_type == MSG_BUS_DEV_RESET) {
5261 		/*
5262 		 * Any earlier events are irrelevant, so reset our buffer.
5263 		 * This has the effect of allowing us to deal with reset
5264 		 * floods (an external device holding down the reset line)
5265 		 * without losing the event that is really interesting.
5266 		 */
5267 		lstate->event_r_idx = 0;
5268 		lstate->event_w_idx = 0;
5269 		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
5270 	}
5271 
5272 	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
5273 		xpt_print_path(lstate->path);
5274 		printf("immediate event %x:%x lost\n",
5275 		       lstate->event_buffer[lstate->event_r_idx].event_type,
5276 		       lstate->event_buffer[lstate->event_r_idx].event_arg);
5277 		lstate->event_r_idx++;
5278 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5279 			lstate->event_r_idx = 0;
5280 		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
5281 	}
5282 
5283 	event = &lstate->event_buffer[lstate->event_w_idx];
5284 	event->initiator_id = initiator_id;
5285 	event->event_type = event_type;
5286 	event->event_arg = event_arg;
5287 	lstate->event_w_idx++;
5288 	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5289 		lstate->event_w_idx = 0;
5290 }
5291 
5292 /*
5293  * Send any target mode events queued up waiting
5294  * for immediate notify resources.
5295  */
5296 void
5297 ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate)
5298 {
5299 	struct ccb_hdr *ccbh;
5300 	struct ccb_immed_notify *inot;
5301 
5302 	while (lstate->event_r_idx != lstate->event_w_idx
5303 	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
5304 		struct ahc_tmode_event *event;
5305 
5306 		event = &lstate->event_buffer[lstate->event_r_idx];
5307 		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
5308 		inot = (struct ccb_immed_notify *)ccbh;
5309 		switch (event->event_type) {
5310 		case EVENT_TYPE_BUS_RESET:
5311 			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
5312 			break;
5313 		default:
5314 			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5315 			inot->message_args[0] = event->event_type;
5316 			inot->message_args[1] = event->event_arg;
5317 			break;
5318 		}
5319 		inot->initiator_id = event->initiator_id;
5320 		inot->sense_len = 0;
5321 		xpt_done((union ccb *)inot);
5322 		lstate->event_r_idx++;
5323 		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5324 			lstate->event_r_idx = 0;
5325 	}
5326 }
5327 #endif
5328 
5329 /******************** Sequencer Program Patching/Download *********************/
5330 
5331 #ifdef AHC_DUMP_SEQ
5332 void
5333 ahc_dumpseq(struct ahc_softc* ahc)
5334 {
5335 	int i;
5336 	int max_prog;
5337 
5338 	if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
5339 		max_prog = 448;
5340 	else if ((ahc->features & AHC_ULTRA2) != 0)
5341 		max_prog = 768;
5342 	else
5343 		max_prog = 512;
5344 
5345 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5346 	ahc_outb(ahc, SEQADDR0, 0);
5347 	ahc_outb(ahc, SEQADDR1, 0);
5348 	for (i = 0; i < max_prog; i++) {
5349 		uint8_t ins_bytes[4];
5350 
5351 		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
5352 		printf("0x%08x\n", ins_bytes[0] << 24
5353 				 | ins_bytes[1] << 16
5354 				 | ins_bytes[2] << 8
5355 				 | ins_bytes[3]);
5356 	}
5357 }
5358 #endif
5359 
5360 static void
5361 ahc_loadseq(struct ahc_softc *ahc)
5362 {
5363 	struct	cs cs_table[num_critical_sections];
5364 	u_int	begin_set[num_critical_sections];
5365 	u_int	end_set[num_critical_sections];
5366 	struct	patch *cur_patch;
5367 	u_int	cs_count;
5368 	u_int	cur_cs;
5369 	u_int	i;
5370 	int	downloaded;
5371 	u_int	skip_addr;
5372 	u_int	sg_prefetch_cnt;
5373 	uint8_t	download_consts[7];
5374 
5375 	/*
5376 	 * Start out with 0 critical sections
5377 	 * that apply to this firmware load.
5378 	 */
5379 	cs_count = 0;
5380 	cur_cs = 0;
5381 	memset(begin_set, 0, sizeof(begin_set));
5382 	memset(end_set, 0, sizeof(end_set));
5383 
5384 	/* Setup downloadable constant table */
5385 	download_consts[QOUTFIFO_OFFSET] = 0;
5386 	if (ahc->targetcmds != NULL)
5387 		download_consts[QOUTFIFO_OFFSET] += 32;
5388 	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
5389 	download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
5390 	download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
5391 	sg_prefetch_cnt = ahc->pci_cachesize;
5392 	if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
5393 		sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
5394 	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
5395 	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
5396 	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
5397 
5398 	cur_patch = patches;
5399 	downloaded = 0;
5400 	skip_addr = 0;
5401 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5402 	ahc_outb(ahc, SEQADDR0, 0);
5403 	ahc_outb(ahc, SEQADDR1, 0);
5404 
5405 	for (i = 0; i < sizeof(seqprog)/4; i++) {
5406 		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
5407 			/*
5408 			 * Don't download this instruction as it
5409 			 * is in a patch that was removed.
5410 			 */
5411 			continue;
5412 		}
5413 		/*
5414 		 * Move through the CS table until we find a CS
5415 		 * that might apply to this instruction.
5416 		 */
5417 		for (; cur_cs < num_critical_sections; cur_cs++) {
5418 			if (critical_sections[cur_cs].end <= i) {
5419 				if (begin_set[cs_count] == TRUE
5420 				 && end_set[cs_count] == FALSE) {
5421 					cs_table[cs_count].end = downloaded;
5422 				 	end_set[cs_count] = TRUE;
5423 					cs_count++;
5424 				}
5425 				continue;
5426 			}
5427 			if (critical_sections[cur_cs].begin <= i
5428 			 && begin_set[cs_count] == FALSE) {
5429 				cs_table[cs_count].begin = downloaded;
5430 				begin_set[cs_count] = TRUE;
5431 			}
5432 			break;
5433 		}
5434 		ahc_download_instr(ahc, i, download_consts);
5435 		downloaded++;
5436 	}
5437 
5438 	ahc->num_critical_sections = cs_count;
5439 	if (cs_count != 0) {
5440 
5441 		cs_count *= sizeof(struct cs);
5442 		ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
5443 		if (ahc->critical_sections == NULL)
5444 			panic("ahc_loadseq: Could not malloc");
5445 		memcpy(ahc->critical_sections, cs_table, cs_count);
5446 	}
5447 	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
5448 	restart_sequencer(ahc);
5449 
5450 	if (bootverbose)
5451 		printf(" %d instructions downloaded\n", downloaded);
5452 }
5453 
5454 static int
5455 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
5456 		u_int start_instr, u_int *skip_addr)
5457 {
5458 	struct	patch *cur_patch;
5459 	struct	patch *last_patch;
5460 	u_int	num_patches;
5461 
5462 	num_patches = sizeof(patches)/sizeof(struct patch);
5463 	last_patch = &patches[num_patches];
5464 	cur_patch = *start_patch;
5465 
5466 	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
5467 
5468 		if (cur_patch->patch_func(ahc) == 0) {
5469 
5470 			/* Start rejecting code */
5471 			*skip_addr = start_instr + cur_patch->skip_instr;
5472 			cur_patch += cur_patch->skip_patch;
5473 		} else {
5474 			/* Accepted this patch.  Advance to the next
5475 			 * one and wait for our intruction pointer to
5476 			 * hit this point.
5477 			 */
5478 			cur_patch++;
5479 		}
5480 	}
5481 
5482 	*start_patch = cur_patch;
5483 	if (start_instr < *skip_addr)
5484 		/* Still skipping */
5485 		return (0);
5486 
5487 	return (1);
5488 }
5489 
5490 static void
5491 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
5492 {
5493 	union	ins_formats instr;
5494 	struct	ins_format1 *fmt1_ins;
5495 	struct	ins_format3 *fmt3_ins;
5496 	u_int	opcode;
5497 
5498 	/* Structure copy */
5499 	instr = *(union ins_formats*)&seqprog[instrptr * 4];
5500 
5501 #if BYTE_ORDER == BIG_ENDIAN
5502 	opcode = instr.format.bytes[0];
5503 	instr.format.bytes[0] = instr.format.bytes[3];
5504 	instr.format.bytes[3] = opcode;
5505 	opcode = instr.format.bytes[1];
5506 	instr.format.bytes[1] = instr.format.bytes[2];
5507 	instr.format.bytes[2] = opcode;
5508 #endif
5509 
5510 	fmt1_ins = &instr.format1;
5511 	fmt3_ins = NULL;
5512 
5513 	/* Pull the opcode */
5514 	opcode = instr.format1.opcode;
5515 	switch (opcode) {
5516 	case AIC_OP_JMP:
5517 	case AIC_OP_JC:
5518 	case AIC_OP_JNC:
5519 	case AIC_OP_CALL:
5520 	case AIC_OP_JNE:
5521 	case AIC_OP_JNZ:
5522 	case AIC_OP_JE:
5523 	case AIC_OP_JZ:
5524 	{
5525 		struct patch *cur_patch;
5526 		int address_offset;
5527 		u_int address;
5528 		u_int skip_addr;
5529 		u_int i;
5530 
5531 		fmt3_ins = &instr.format3;
5532 		address_offset = 0;
5533 		address = fmt3_ins->address;
5534 		cur_patch = patches;
5535 		skip_addr = 0;
5536 
5537 		for (i = 0; i < address;) {
5538 
5539 			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
5540 
5541 			if (skip_addr > i) {
5542 				int end_addr;
5543 
5544 				end_addr = MIN(address, skip_addr);
5545 				address_offset += end_addr - i;
5546 				i = skip_addr;
5547 			} else {
5548 				i++;
5549 			}
5550 		}
5551 		address -= address_offset;
5552 		fmt3_ins->address = address;
5553 		/* FALLTHROUGH */
5554 	}
5555 	case AIC_OP_OR:
5556 	case AIC_OP_AND:
5557 	case AIC_OP_XOR:
5558 	case AIC_OP_ADD:
5559 	case AIC_OP_ADC:
5560 	case AIC_OP_BMOV:
5561 		if (fmt1_ins->parity != 0) {
5562 			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
5563 		}
5564 		fmt1_ins->parity = 0;
5565 		if ((ahc->features & AHC_CMD_CHAN) == 0
5566 		 && opcode == AIC_OP_BMOV) {
5567 			/*
5568 			 * Block move was added at the same time
5569 			 * as the command channel.  Verify that
5570 			 * this is only a move of a single element
5571 			 * and convert the BMOV to a MOV
5572 			 * (AND with an immediate of FF).
5573 			 */
5574 			if (fmt1_ins->immediate != 1)
5575 				panic("%s: BMOV not supported\n",
5576 				      ahc_name(ahc));
5577 			fmt1_ins->opcode = AIC_OP_AND;
5578 			fmt1_ins->immediate = 0xff;
5579 		}
5580 		/* FALLTHROUGH */
5581 	case AIC_OP_ROL:
5582 		if ((ahc->features & AHC_ULTRA2) != 0) {
5583 			int i, count;
5584 
5585 			/* Calculate odd parity for the instruction */
5586 			for (i = 0, count = 0; i < 31; i++) {
5587 				uint32_t mask;
5588 
5589 				mask = 0x01 << i;
5590 				if ((instr.integer & mask) != 0)
5591 					count++;
5592 			}
5593 			if ((count & 0x01) == 0)
5594 				instr.format1.parity = 1;
5595 		} else {
5596 			/* Compress the instruction for older sequencers */
5597 			if (fmt3_ins != NULL) {
5598 				instr.integer =
5599 					fmt3_ins->immediate
5600 				      | (fmt3_ins->source << 8)
5601 				      | (fmt3_ins->address << 16)
5602 				      |	(fmt3_ins->opcode << 25);
5603 			} else {
5604 				instr.integer =
5605 					fmt1_ins->immediate
5606 				      | (fmt1_ins->source << 8)
5607 				      | (fmt1_ins->destination << 16)
5608 				      |	(fmt1_ins->ret << 24)
5609 				      |	(fmt1_ins->opcode << 25);
5610 			}
5611 		}
5612 #if BYTE_ORDER == BIG_ENDIAN
5613 		opcode = instr.format.bytes[0];
5614 		instr.format.bytes[0] = instr.format.bytes[3];
5615 		instr.format.bytes[3] = opcode;
5616 		opcode = instr.format.bytes[1];
5617 		instr.format.bytes[1] = instr.format.bytes[2];
5618 		instr.format.bytes[2] = opcode;
5619 #endif
5620 		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
5621 		break;
5622 	default:
5623 		panic("Unknown opcode encountered in seq program");
5624 		break;
5625 	}
5626 }
5627 
5628 void
5629 ahc_dump_card_state(struct ahc_softc *ahc)
5630 {
5631 	struct scb *scb;
5632 	struct scb_tailq *untagged_q;
5633 	int target;
5634 	int maxtarget;
5635 	int i;
5636 	uint8_t qinpos;
5637 	uint8_t qintail;
5638 	uint8_t qoutpos;
5639 	uint8_t scb_index;
5640 	uint8_t saved_scbptr;
5641 
5642 	saved_scbptr = ahc_inb(ahc, SCBPTR);
5643 
5644 	printf("%s: Dumping Card State at SEQADDR 0x%x\n",
5645 	       ahc_name(ahc),
5646 	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
5647 
5648 	printf("SCB count = %d\n", ahc->scb_data->numscbs);
5649 	/* QINFIFO */
5650 	printf("QINFIFO entries: ");
5651 	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5652 		qinpos = ahc_inb(ahc, SNSCB_QOFF);
5653 		ahc_outb(ahc, SNSCB_QOFF, qinpos);
5654 	} else
5655 		qinpos = ahc_inb(ahc, QINPOS);
5656 	qintail = ahc->qinfifonext;
5657 	while (qinpos != qintail) {
5658 		printf("%d ", ahc->qinfifo[qinpos]);
5659 		qinpos++;
5660 	}
5661 	printf("\n");
5662 
5663 	printf("Waiting Queue entries: ");
5664 	scb_index = ahc_inb(ahc, WAITING_SCBH);
5665 	i = 0;
5666 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
5667 		ahc_outb(ahc, SCBPTR, scb_index);
5668 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
5669 		scb_index = ahc_inb(ahc, SCB_NEXT);
5670 	}
5671 	printf("\n");
5672 
5673 	printf("Disconnected Queue entries: ");
5674 	scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
5675 	i = 0;
5676 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
5677 		ahc_outb(ahc, SCBPTR, scb_index);
5678 		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
5679 		scb_index = ahc_inb(ahc, SCB_NEXT);
5680 	}
5681 	printf("\n");
5682 
5683 	printf("QOUTFIFO entries: ");
5684 	qoutpos = ahc->qoutfifonext;
5685 	i = 0;
5686 	while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
5687 		printf("%d ", ahc->qoutfifo[qoutpos]);
5688 		qoutpos++;
5689 	}
5690 	printf("\n");
5691 
5692 	printf("Sequencer Free SCB List: ");
5693 	scb_index = ahc_inb(ahc, FREE_SCBH);
5694 	i = 0;
5695 	while (scb_index != SCB_LIST_NULL && i++ < 256) {
5696 		ahc_outb(ahc, SCBPTR, scb_index);
5697 		printf("%d ", scb_index);
5698 		scb_index = ahc_inb(ahc, SCB_NEXT);
5699 	}
5700 	printf("\n");
5701 
5702 	printf("Pending list: ");
5703 	i = 0;
5704 	LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
5705 		if (i++ > 256)
5706 			break;
5707 		printf("%d ", scb->hscb->tag);
5708 	}
5709 	printf("\n");
5710 
5711 	printf("Kernel Free SCB list: ");
5712 	i = 0;
5713 	SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
5714 		if (i++ > 256)
5715 			break;
5716 		printf("%d ", scb->hscb->tag);
5717 	}
5718 	printf("\n");
5719 
5720 	maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
5721 	for (target = 0; target <= maxtarget; target++) {
5722 		untagged_q = &ahc->untagged_queues[target];
5723 		if (TAILQ_FIRST(untagged_q) == NULL)
5724 			continue;
5725 		printf("Untagged Q(%d): ", target);
5726 		i = 0;
5727 		TAILQ_FOREACH(scb, untagged_q, links.tqe) {
5728 			if (i++ > 256)
5729 				break;
5730 			printf("%d ", scb->hscb->tag);
5731 		}
5732 		printf("\n");
5733 	}
5734 
5735 	ahc_platform_dump_card_state(ahc);
5736 	ahc_outb(ahc, SCBPTR, saved_scbptr);
5737 }
5738 
5739 /************************* Target Mode ****************************************/
5740 #ifdef AHC_TARGET_MODE
5741 cam_status
5742 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
5743 		    struct tmode_tstate **tstate, struct tmode_lstate **lstate,
5744 		    int notfound_failure)
5745 {
5746 
5747 	if ((ahc->features & AHC_TARGETMODE) == 0)
5748 		return (CAM_REQ_INVALID);
5749 
5750 	/*
5751 	 * Handle the 'black hole' device that sucks up
5752 	 * requests to unattached luns on enabled targets.
5753 	 */
5754 	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
5755 	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
5756 		*tstate = NULL;
5757 		*lstate = ahc->black_hole;
5758 	} else {
5759 		u_int max_id;
5760 
5761 		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
5762 		if (ccb->ccb_h.target_id > max_id)
5763 			return (CAM_TID_INVALID);
5764 
5765 		if (ccb->ccb_h.target_lun > 7)
5766 			return (CAM_LUN_INVALID);
5767 
5768 		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
5769 		*lstate = NULL;
5770 		if (*tstate != NULL)
5771 			*lstate =
5772 			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
5773 	}
5774 
5775 	if (notfound_failure != 0 && *lstate == NULL)
5776 		return (CAM_PATH_INVALID);
5777 
5778 	return (CAM_REQ_CMP);
5779 }
5780 
5781 void
5782 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
5783 {
5784 	struct	   tmode_tstate *tstate;
5785 	struct	   tmode_lstate *lstate;
5786 	struct	   ccb_en_lun *cel;
5787 	cam_status status;
5788 	u_int	   target;
5789 	u_int	   lun;
5790 	u_int	   target_mask;
5791 	u_long	   s;
5792 	char	   channel;
5793 
5794 	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
5795 				     /*notfound_failure*/FALSE);
5796 
5797 	if (status != CAM_REQ_CMP) {
5798 		ccb->ccb_h.status = status;
5799 		return;
5800 	}
5801 
5802 	if ((ahc->features & AHC_MULTIROLE) != 0) {
5803 		u_int	   our_id;
5804 
5805 		if (cam_sim_bus(sim) == 0)
5806 			our_id = ahc->our_id;
5807 		else
5808 			our_id = ahc->our_id_b;
5809 
5810 		if (ccb->ccb_h.target_id != our_id) {
5811 			if ((ahc->features & AHC_MULTI_TID) != 0
5812 		   	 && (ahc->flags & AHC_INITIATORROLE) != 0) {
5813 				/*
5814 				 * Only allow additional targets if
5815 				 * the initiator role is disabled.
5816 				 * The hardware cannot handle a re-select-in
5817 				 * on the initiator id during a re-select-out
5818 				 * on a different target id.
5819 				 */
5820 				status = CAM_TID_INVALID;
5821 			} else if ((ahc->flags & AHC_INITIATORROLE) != 0
5822 				|| ahc->enabled_luns > 0) {
5823 				/*
5824 				 * Only allow our target id to change
5825 				 * if the initiator role is not configured
5826 				 * and there are no enabled luns which
5827 				 * are attached to the currently registered
5828 				 * scsi id.
5829 				 */
5830 				status = CAM_TID_INVALID;
5831 			}
5832 		}
5833 	}
5834 
5835 	if (status != CAM_REQ_CMP) {
5836 		ccb->ccb_h.status = status;
5837 		return;
5838 	}
5839 
5840 	/*
5841 	 * We now have an id that is valid.
5842 	 * If we aren't in target mode, switch modes.
5843 	 */
5844 	if ((ahc->flags & AHC_TARGETROLE) == 0
5845 	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
5846 		u_long	s;
5847 
5848 		printf("Configuring Target Mode\n");
5849 		ahc_lock(ahc, &s);
5850 		if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
5851 			ccb->ccb_h.status = CAM_BUSY;
5852 			ahc_unlock(ahc, &s);
5853 			return;
5854 		}
5855 		ahc->flags |= AHC_TARGETROLE;
5856 		if ((ahc->features & AHC_MULTIROLE) == 0)
5857 			ahc->flags &= ~AHC_INITIATORROLE;
5858 		pause_sequencer(ahc);
5859 		ahc_loadseq(ahc);
5860 		ahc_unlock(ahc, &s);
5861 	}
5862 	cel = &ccb->cel;
5863 	target = ccb->ccb_h.target_id;
5864 	lun = ccb->ccb_h.target_lun;
5865 	channel = SIM_CHANNEL(ahc, sim);
5866 	target_mask = 0x01 << target;
5867 	if (channel == 'B')
5868 		target_mask <<= 8;
5869 
5870 	if (cel->enable != 0) {
5871 		u_int scsiseq;
5872 
5873 		/* Are we already enabled?? */
5874 		if (lstate != NULL) {
5875 			xpt_print_path(ccb->ccb_h.path);
5876 			printf("Lun already enabled\n");
5877 			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
5878 			return;
5879 		}
5880 
5881 		if (cel->grp6_len != 0
5882 		 || cel->grp7_len != 0) {
5883 			/*
5884 			 * Don't (yet?) support vendor
5885 			 * specific commands.
5886 			 */
5887 			ccb->ccb_h.status = CAM_REQ_INVALID;
5888 			printf("Non-zero Group Codes\n");
5889 			return;
5890 		}
5891 
5892 		/*
5893 		 * Seems to be okay.
5894 		 * Setup our data structures.
5895 		 */
5896 		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
5897 			tstate = ahc_alloc_tstate(ahc, target, channel);
5898 			if (tstate == NULL) {
5899 				xpt_print_path(ccb->ccb_h.path);
5900 				printf("Couldn't allocate tstate\n");
5901 				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5902 				return;
5903 			}
5904 		}
5905 		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
5906 		if (lstate == NULL) {
5907 			xpt_print_path(ccb->ccb_h.path);
5908 			printf("Couldn't allocate lstate\n");
5909 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5910 			return;
5911 		}
5912 		memset(lstate, 0, sizeof(*lstate));
5913 		status = xpt_create_path(&lstate->path, /*periph*/NULL,
5914 					 xpt_path_path_id(ccb->ccb_h.path),
5915 					 xpt_path_target_id(ccb->ccb_h.path),
5916 					 xpt_path_lun_id(ccb->ccb_h.path));
5917 		if (status != CAM_REQ_CMP) {
5918 			free(lstate, M_DEVBUF);
5919 			xpt_print_path(ccb->ccb_h.path);
5920 			printf("Couldn't allocate path\n");
5921 			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5922 			return;
5923 		}
5924 		SLIST_INIT(&lstate->accept_tios);
5925 		SLIST_INIT(&lstate->immed_notifies);
5926 		ahc_lock(ahc, &s);
5927 		pause_sequencer(ahc);
5928 		if (target != CAM_TARGET_WILDCARD) {
5929 			tstate->enabled_luns[lun] = lstate;
5930 			ahc->enabled_luns++;
5931 
5932 			if ((ahc->features & AHC_MULTI_TID) != 0) {
5933 				u_int targid_mask;
5934 
5935 				targid_mask = ahc_inb(ahc, TARGID)
5936 					    | (ahc_inb(ahc, TARGID + 1) << 8);
5937 
5938 				targid_mask |= target_mask;
5939 				ahc_outb(ahc, TARGID, targid_mask);
5940 				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
5941 
5942 				ahc_update_scsiid(ahc, targid_mask);
5943 			} else {
5944 				u_int our_id;
5945 				char  channel;
5946 
5947 				channel = SIM_CHANNEL(ahc, sim);
5948 				our_id = SIM_SCSI_ID(ahc, sim);
5949 
5950 				/*
5951 				 * This can only happen if selections
5952 				 * are not enabled
5953 				 */
5954 				if (target != our_id) {
5955 					u_int sblkctl;
5956 					char  cur_channel;
5957 					int   swap;
5958 
5959 					sblkctl = ahc_inb(ahc, SBLKCTL);
5960 					cur_channel = (sblkctl & SELBUSB)
5961 						    ? 'B' : 'A';
5962 					if ((ahc->features & AHC_TWIN) == 0)
5963 						cur_channel = 'A';
5964 					swap = cur_channel != channel;
5965 					if (channel == 'A')
5966 						ahc->our_id = target;
5967 					else
5968 						ahc->our_id_b = target;
5969 
5970 					if (swap)
5971 						ahc_outb(ahc, SBLKCTL,
5972 							 sblkctl ^ SELBUSB);
5973 
5974 					ahc_outb(ahc, SCSIID, target);
5975 
5976 					if (swap)
5977 						ahc_outb(ahc, SBLKCTL, sblkctl);
5978 				}
5979 			}
5980 		} else
5981 			ahc->black_hole = lstate;
5982 		/* Allow select-in operations */
5983 		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
5984 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
5985 			scsiseq |= ENSELI;
5986 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
5987 			scsiseq = ahc_inb(ahc, SCSISEQ);
5988 			scsiseq |= ENSELI;
5989 			ahc_outb(ahc, SCSISEQ, scsiseq);
5990 		}
5991 		unpause_sequencer(ahc);
5992 		ahc_unlock(ahc, &s);
5993 		ccb->ccb_h.status = CAM_REQ_CMP;
5994 		xpt_print_path(ccb->ccb_h.path);
5995 		printf("Lun now enabled for target mode\n");
5996 	} else {
5997 		struct scb *scb;
5998 		int i, empty;
5999 
6000 		if (lstate == NULL) {
6001 			ccb->ccb_h.status = CAM_LUN_INVALID;
6002 			return;
6003 		}
6004 
6005 		ahc_lock(ahc, &s);
6006 
6007 		ccb->ccb_h.status = CAM_REQ_CMP;
6008 		LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6009 			struct ccb_hdr *ccbh;
6010 
6011 			ccbh = &scb->io_ctx->ccb_h;
6012 			if (ccbh->func_code == XPT_CONT_TARGET_IO
6013 			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
6014 				printf("CTIO pending\n");
6015 				ccb->ccb_h.status = CAM_REQ_INVALID;
6016 				ahc_unlock(ahc, &s);
6017 				return;
6018 			}
6019 		}
6020 
6021 		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
6022 			printf("ATIOs pending\n");
6023 			ccb->ccb_h.status = CAM_REQ_INVALID;
6024 		}
6025 
6026 		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
6027 			printf("INOTs pending\n");
6028 			ccb->ccb_h.status = CAM_REQ_INVALID;
6029 		}
6030 
6031 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
6032 			ahc_unlock(ahc, &s);
6033 			return;
6034 		}
6035 
6036 		xpt_print_path(ccb->ccb_h.path);
6037 		printf("Target mode disabled\n");
6038 		xpt_free_path(lstate->path);
6039 		free(lstate, M_DEVBUF);
6040 
6041 		pause_sequencer(ahc);
6042 		/* Can we clean up the target too? */
6043 		if (target != CAM_TARGET_WILDCARD) {
6044 			tstate->enabled_luns[lun] = NULL;
6045 			ahc->enabled_luns--;
6046 			for (empty = 1, i = 0; i < 8; i++)
6047 				if (tstate->enabled_luns[i] != NULL) {
6048 					empty = 0;
6049 					break;
6050 				}
6051 
6052 			if (empty) {
6053 				ahc_free_tstate(ahc, target, channel,
6054 						/*force*/FALSE);
6055 				if (ahc->features & AHC_MULTI_TID) {
6056 					u_int targid_mask;
6057 
6058 					targid_mask = ahc_inb(ahc, TARGID)
6059 						    | (ahc_inb(ahc, TARGID + 1)
6060 						       << 8);
6061 
6062 					targid_mask &= ~target_mask;
6063 					ahc_outb(ahc, TARGID, targid_mask);
6064 					ahc_outb(ahc, TARGID+1,
6065 					 	 (targid_mask >> 8));
6066 					ahc_update_scsiid(ahc, targid_mask);
6067 				}
6068 			}
6069 		} else {
6070 
6071 			ahc->black_hole = NULL;
6072 
6073 			/*
6074 			 * We can't allow selections without
6075 			 * our black hole device.
6076 			 */
6077 			empty = TRUE;
6078 		}
6079 		if (ahc->enabled_luns == 0) {
6080 			/* Disallow select-in */
6081 			u_int scsiseq;
6082 
6083 			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6084 			scsiseq &= ~ENSELI;
6085 			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6086 			scsiseq = ahc_inb(ahc, SCSISEQ);
6087 			scsiseq &= ~ENSELI;
6088 			ahc_outb(ahc, SCSISEQ, scsiseq);
6089 
6090 			if ((ahc->features & AHC_MULTIROLE) == 0) {
6091 				printf("Configuring Initiator Mode\n");
6092 				ahc->flags &= ~AHC_TARGETROLE;
6093 				ahc->flags |= AHC_INITIATORROLE;
6094 				pause_sequencer(ahc);
6095 				ahc_loadseq(ahc);
6096 			}
6097 		}
6098 		unpause_sequencer(ahc);
6099 		ahc_unlock(ahc, &s);
6100 	}
6101 }
6102 
6103 static void
6104 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
6105 {
6106 	u_int scsiid_mask;
6107 	u_int scsiid;
6108 
6109 	if ((ahc->features & AHC_MULTI_TID) == 0)
6110 		panic("ahc_update_scsiid called on non-multitid unit\n");
6111 
6112 	/*
6113 	 * Since we will rely on the the TARGID mask
6114 	 * for selection enables, ensure that OID
6115 	 * in SCSIID is not set to some other ID
6116 	 * that we don't want to allow selections on.
6117 	 */
6118 	if ((ahc->features & AHC_ULTRA2) != 0)
6119 		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
6120 	else
6121 		scsiid = ahc_inb(ahc, SCSIID);
6122 	scsiid_mask = 0x1 << (scsiid & OID);
6123 	if ((targid_mask & scsiid_mask) == 0) {
6124 		u_int our_id;
6125 
6126 		/* ffs counts from 1 */
6127 		our_id = ffs(targid_mask);
6128 		if (our_id == 0)
6129 			our_id = ahc->our_id;
6130 		else
6131 			our_id--;
6132 		scsiid &= TID;
6133 		scsiid |= our_id;
6134 	}
6135 	if ((ahc->features & AHC_ULTRA2) != 0)
6136 		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
6137 	else
6138 		ahc_outb(ahc, SCSIID, scsiid);
6139 }
6140 
6141 void
6142 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
6143 {
6144 	struct target_cmd *cmd;
6145 
6146 	/*
6147 	 * If the card supports auto-access pause,
6148 	 * we can access the card directly regardless
6149 	 * of whether it is paused or not.
6150 	 */
6151 	if ((ahc->features & AHC_AUTOPAUSE) != 0)
6152 		paused = TRUE;
6153 
6154 	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
6155 
6156 		/*
6157 		 * Only advance through the queue if we
6158 		 * have the resources to process the command.
6159 		 */
6160 		if (ahc_handle_target_cmd(ahc, cmd) != 0)
6161 			break;
6162 
6163 		ahc->tqinfifonext++;
6164 		cmd->cmd_valid = 0;
6165 
6166 		/*
6167 		 * Lazily update our position in the target mode incomming
6168 		 * command queue as seen by the sequencer.
6169 		 */
6170 		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
6171 			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
6172 				u_int hs_mailbox;
6173 
6174 				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
6175 				hs_mailbox &= ~HOST_TQINPOS;
6176 				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
6177 				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
6178 			} else {
6179 				if (!paused)
6180 					pause_sequencer(ahc);
6181 				ahc_outb(ahc, KERNEL_TQINPOS,
6182 					 ahc->tqinfifonext & HOST_TQINPOS);
6183 				if (!paused)
6184 					unpause_sequencer(ahc);
6185 			}
6186 		}
6187 	}
6188 }
6189 
6190 static int
6191 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
6192 {
6193 	struct	  tmode_tstate *tstate;
6194 	struct	  tmode_lstate *lstate;
6195 	struct	  ccb_accept_tio *atio;
6196 	uint8_t *byte;
6197 	int	  initiator;
6198 	int	  target;
6199 	int	  lun;
6200 
6201 	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
6202 	target = SCSIID_OUR_ID(cmd->scsiid);
6203 	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
6204 
6205 	byte = cmd->bytes;
6206 	tstate = ahc->enabled_targets[target];
6207 	lstate = NULL;
6208 	if (tstate != NULL)
6209 		lstate = tstate->enabled_luns[lun];
6210 
6211 	/*
6212 	 * Commands for disabled luns go to the black hole driver.
6213 	 */
6214 	if (lstate == NULL)
6215 		lstate = ahc->black_hole;
6216 
6217 	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
6218 	if (atio == NULL) {
6219 		ahc->flags |= AHC_TQINFIFO_BLOCKED;
6220 		/*
6221 		 * Wait for more ATIOs from the peripheral driver for this lun.
6222 		 */
6223 		return (1);
6224 	} else
6225 		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
6226 #if 0
6227 	printf("Incoming command from %d for %d:%d%s\n",
6228 	       initiator, target, lun,
6229 	       lstate == ahc->black_hole ? "(Black Holed)" : "");
6230 #endif
6231 	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
6232 
6233 	if (lstate == ahc->black_hole) {
6234 		/* Fill in the wildcards */
6235 		atio->ccb_h.target_id = target;
6236 		atio->ccb_h.target_lun = lun;
6237 	}
6238 
6239 	/*
6240 	 * Package it up and send it off to
6241 	 * whomever has this lun enabled.
6242 	 */
6243 	atio->sense_len = 0;
6244 	atio->init_id = initiator;
6245 	if (byte[0] != 0xFF) {
6246 		/* Tag was included */
6247 		atio->tag_action = *byte++;
6248 		atio->tag_id = *byte++;
6249 		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
6250 	} else {
6251 		atio->ccb_h.flags = 0;
6252 	}
6253 	byte++;
6254 
6255 	/* Okay.  Now determine the cdb size based on the command code */
6256 	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
6257 	case 0:
6258 		atio->cdb_len = 6;
6259 		break;
6260 	case 1:
6261 	case 2:
6262 		atio->cdb_len = 10;
6263 		break;
6264 	case 4:
6265 		atio->cdb_len = 16;
6266 		break;
6267 	case 5:
6268 		atio->cdb_len = 12;
6269 		break;
6270 	case 3:
6271 	default:
6272 		/* Only copy the opcode. */
6273 		atio->cdb_len = 1;
6274 		printf("Reserved or VU command code type encountered\n");
6275 		break;
6276 	}
6277 
6278 	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
6279 
6280 	atio->ccb_h.status |= CAM_CDB_RECVD;
6281 
6282 	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
6283 		/*
6284 		 * We weren't allowed to disconnect.
6285 		 * We're hanging on the bus until a
6286 		 * continue target I/O comes in response
6287 		 * to this accept tio.
6288 		 */
6289 #if 0
6290 		printf("Received Immediate Command %d:%d:%d - %p\n",
6291 		       initiator, target, lun, ahc->pending_device);
6292 #endif
6293 		ahc->pending_device = lstate;
6294 		ahc_freeze_ccb((union ccb *)atio);
6295 		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
6296 	}
6297 	xpt_done((union ccb*)atio);
6298 	return (0);
6299 }
6300 
6301 #endif
6302