xref: /freebsd/sys/dev/aic7xxx/aic79xx_osm.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*
2  * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3  *
4  * Copyright (c) 1994-2002 Justin T. Gibbs.
5  * Copyright (c) 2001-2002 Adaptec Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * Alternatively, this software may be distributed under the terms of the
18  * GNU Public License ("GPL").
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#22 $
33  *
34  * $FreeBSD$
35  */
36 
37 #include <dev/aic7xxx/aic79xx_osm.h>
38 #include <dev/aic7xxx/aic79xx_inline.h>
39 
40 #include "opt_ddb.h"
41 #ifdef DDB
42 #include <ddb/ddb.h>
43 #endif
44 
45 #ifndef AHD_TMODE_ENABLE
46 #define AHD_TMODE_ENABLE 0
47 #endif
48 
49 #define ccb_scb_ptr spriv_ptr0
50 
51 #if UNUSED
52 static void	ahd_dump_targcmd(struct target_cmd *cmd);
53 #endif
54 static int	ahd_modevent(module_t mod, int type, void *data);
55 static void	ahd_action(struct cam_sim *sim, union ccb *ccb);
56 static void	ahd_set_tran_settings(struct ahd_softc *ahd,
57 				      int our_id, char channel,
58 				      struct ccb_trans_settings *cts);
59 static void	ahd_get_tran_settings(struct ahd_softc *ahd,
60 				      int our_id, char channel,
61 				      struct ccb_trans_settings *cts);
62 static void	ahd_async(void *callback_arg, uint32_t code,
63 			  struct cam_path *path, void *arg);
64 static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
65 				int nsegments, int error);
66 static void	ahd_poll(struct cam_sim *sim);
67 static void	ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
68 			       struct ccb_scsiio *csio, struct scb *scb);
69 static void	ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim,
70 			      union ccb *ccb);
71 static int	ahd_create_path(struct ahd_softc *ahd,
72 				char channel, u_int target, u_int lun,
73 				struct cam_path **path);
74 
75 #if NOT_YET
76 static void	ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
77 #endif
78 
79 static int
80 ahd_create_path(struct ahd_softc *ahd, char channel, u_int target,
81 	        u_int lun, struct cam_path **path)
82 {
83 	path_id_t path_id;
84 
85 	if (channel == 'B')
86 		path_id = cam_sim_path(ahd->platform_data->sim_b);
87 	else
88 		path_id = cam_sim_path(ahd->platform_data->sim);
89 
90 	return (xpt_create_path(path, /*periph*/NULL,
91 				path_id, target, lun));
92 }
93 
94 int
95 ahd_map_int(struct ahd_softc *ahd)
96 {
97 	int error;
98 
99 	/* Hook up our interrupt handler */
100 	error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq,
101 			       INTR_TYPE_CAM, ahd_platform_intr, ahd,
102 			       &ahd->platform_data->ih);
103 	if (error != 0)
104 		device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n",
105 			      error);
106 	return (error);
107 }
108 
109 /*
110  * Attach all the sub-devices we can find
111  */
112 int
113 ahd_attach(struct ahd_softc *ahd)
114 {
115 	char   ahd_info[256];
116 	struct ccb_setasync csa;
117 	struct cam_devq *devq;
118 	struct cam_sim *sim;
119 	struct cam_path *path;
120 	long s;
121 	int count;
122 
123 	count = 0;
124 	sim = NULL;
125 
126 	ahd_controller_info(ahd, ahd_info);
127 	printf("%s\n", ahd_info);
128 	ahd_lock(ahd, &s);
129 
130 	/*
131 	 * Create the device queue for our SIM(s).
132 	 */
133 	devq = cam_simq_alloc(AHD_MAX_QUEUE);
134 	if (devq == NULL)
135 		goto fail;
136 
137 	/*
138 	 * Construct our SIM entry
139 	 */
140 	sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd,
141 			    device_get_unit(ahd->dev_softc),
142 			    1, /*XXX*/256, devq);
143 	if (sim == NULL) {
144 		cam_simq_free(devq);
145 		goto fail;
146 	}
147 
148 	if (xpt_bus_register(sim, /*bus_id*/0) != CAM_SUCCESS) {
149 		cam_sim_free(sim, /*free_devq*/TRUE);
150 		sim = NULL;
151 		goto fail;
152 	}
153 
154 	if (xpt_create_path(&path, /*periph*/NULL,
155 			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
156 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
157 		xpt_bus_deregister(cam_sim_path(sim));
158 		cam_sim_free(sim, /*free_devq*/TRUE);
159 		sim = NULL;
160 		goto fail;
161 	}
162 
163 	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
164 	csa.ccb_h.func_code = XPT_SASYNC_CB;
165 	csa.event_enable = AC_LOST_DEVICE;
166 	csa.callback = ahd_async;
167 	csa.callback_arg = sim;
168 	xpt_action((union ccb *)&csa);
169 	count++;
170 
171 fail:
172 	ahd->platform_data->sim = sim;
173 	ahd->platform_data->path = path;
174 	if (count != 0) {
175 		/* We have to wait until after any system dumps... */
176 		ahd->platform_data->eh =
177 		    EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown,
178 					  ahd, SHUTDOWN_PRI_DEFAULT);
179 		ahd_intr_enable(ahd, TRUE);
180 	}
181 
182 	ahd_unlock(ahd, &s);
183 
184 	return (count);
185 }
186 
187 /*
188  * Catch an interrupt from the adapter
189  */
190 void
191 ahd_platform_intr(void *arg)
192 {
193 	struct	ahd_softc *ahd;
194 
195 	ahd = (struct ahd_softc *)arg;
196 	ahd_intr(ahd);
197 }
198 
199 /*
200  * We have an scb which has been processed by the
201  * adaptor, now we look to see how the operation
202  * went.
203  */
204 void
205 ahd_done(struct ahd_softc *ahd, struct scb *scb)
206 {
207 	union ccb *ccb;
208 
209 	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
210 		  ("ahd_done - scb %d\n", SCB_GET_TAG(scb)));
211 
212 	ccb = scb->io_ctx;
213 	LIST_REMOVE(scb, pending_links);
214 
215 	untimeout(ahd_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
216 
217 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
218 		bus_dmasync_op_t op;
219 
220 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
221 			op = BUS_DMASYNC_POSTREAD;
222 		else
223 			op = BUS_DMASYNC_POSTWRITE;
224 		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
225 		bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
226 	}
227 
228 #ifdef AHD_TARGET_MODE
229 	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
230 		struct cam_path *ccb_path;
231 
232 		/*
233 		 * If we have finally disconnected, clean up our
234 		 * pending device state.
235 		 * XXX - There may be error states that cause where
236 		 *       we will remain connected.
237 		 */
238 		ccb_path = ccb->ccb_h.path;
239 		if (ahd->pending_device != NULL
240 		 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) {
241 
242 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
243 				ahd->pending_device = NULL;
244 			} else {
245 				xpt_print_path(ccb->ccb_h.path);
246 				printf("Still disconnected\n");
247 				ahd_freeze_ccb(ccb);
248 			}
249 		}
250 
251 		if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG)
252 			ccb->ccb_h.status |= CAM_REQ_CMP;
253 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
254 		ahd_free_scb(ahd, scb);
255 		xpt_done(ccb);
256 		return;
257 	}
258 #endif
259 
260 	/*
261 	 * If the recovery SCB completes, we have to be
262 	 * out of our timeout.
263 	 */
264 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
265 		struct	scb *list_scb;
266 
267 		/*
268 		 * We were able to complete the command successfully,
269 		 * so reinstate the timeouts for all other pending
270 		 * commands.
271 		 */
272 		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
273 			union ccb *ccb;
274 			uint64_t time;
275 
276 			ccb = list_scb->io_ctx;
277 			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
278 				continue;
279 
280 			time = ccb->ccb_h.timeout;
281 			time *= hz;
282 			time /= 1000;
283 			ccb->ccb_h.timeout_ch =
284 			    timeout(ahd_timeout, list_scb, time);
285 		}
286 
287 		if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
288 		 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
289 			ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
290 		ahd_print_path(ahd, scb);
291 		printf("no longer in timeout, status = %x\n",
292 		       ccb->ccb_h.status);
293 	}
294 
295 	/* Don't clobber any existing error state */
296 	if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
297 		ccb->ccb_h.status |= CAM_REQ_CMP;
298 	} else if ((scb->flags & SCB_SENSE) != 0) {
299 		/*
300 		 * We performed autosense retrieval.
301 		 *
302 		 * Zero any sense not transferred by the
303 		 * device.  The SCSI spec mandates that any
304 		 * untransfered data should be assumed to be
305 		 * zero.  Complete the 'bounce' of sense information
306 		 * through buffers accessible via bus-space by
307 		 * copying it into the clients csio.
308 		 */
309 		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
310 		memcpy(&ccb->csio.sense_data,
311 		       ahd_get_sense_buf(ahd, scb),
312 /* XXX What size do we want to use??? */
313 			sizeof(ccb->csio.sense_data)
314 		       - ccb->csio.sense_resid);
315 		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
316 	} else if ((scb->flags & SCB_PKT_SENSE) != 0) {
317 		struct scsi_status_iu_header *siu;
318 		u_int sense_len;
319 		int i;
320 
321 		/*
322 		 * Copy only the sense data into the provided buffer.
323 		 */
324 		siu = (struct scsi_status_iu_header *)scb->sense_data;
325 		sense_len = MIN(scsi_4btoul(siu->sense_length),
326 				sizeof(ccb->csio.sense_data));
327 		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
328 		memcpy(&ccb->csio.sense_data,
329 		       ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),
330 		       sense_len);
331 		printf("Copied %d bytes of sense data offset %d:", sense_len,
332 		       SIU_SENSE_OFFSET(siu));
333 		for (i = 0; i < sense_len; i++)
334 			printf(" 0x%x", ((uint8_t *)&ccb->csio.sense_data)[i]);
335 		printf("\n");
336 		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
337 	}
338 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
339 	ahd_free_scb(ahd, scb);
340 	xpt_done(ccb);
341 }
342 
343 static void
344 ahd_action(struct cam_sim *sim, union ccb *ccb)
345 {
346 	struct	ahd_softc *ahd;
347 #ifdef AHD_TARGET_MODE
348 	struct	ahd_tmode_lstate *lstate;
349 #endif
350 	u_int	target_id;
351 	u_int	our_id;
352 	long	s;
353 
354 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n"));
355 
356 	ahd = (struct ahd_softc *)cam_sim_softc(sim);
357 
358 	target_id = ccb->ccb_h.target_id;
359 	our_id = SIM_SCSI_ID(ahd, sim);
360 
361 	switch (ccb->ccb_h.func_code) {
362 	/* Common cases first */
363 #ifdef AHD_TARGET_MODE
364 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
365 	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
366 	{
367 		struct	   ahd_tmode_tstate *tstate;
368 		cam_status status;
369 
370 		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
371 					     &lstate, TRUE);
372 
373 		if (status != CAM_REQ_CMP) {
374 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
375 				/* Response from the black hole device */
376 				tstate = NULL;
377 				lstate = ahd->black_hole;
378 			} else {
379 				ccb->ccb_h.status = status;
380 				xpt_done(ccb);
381 				break;
382 			}
383 		}
384 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
385 
386 			ahd_lock(ahd, &s);
387 			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
388 					  sim_links.sle);
389 			ccb->ccb_h.status = CAM_REQ_INPROG;
390 			if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0)
391 				ahd_run_tqinfifo(ahd, /*paused*/FALSE);
392 			ahd_unlock(ahd, &s);
393 			break;
394 		}
395 
396 		/*
397 		 * The target_id represents the target we attempt to
398 		 * select.  In target mode, this is the initiator of
399 		 * the original command.
400 		 */
401 		our_id = target_id;
402 		target_id = ccb->csio.init_id;
403 		/* FALLTHROUGH */
404 	}
405 #endif
406 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
407 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
408 	{
409 		struct	scb *scb;
410 		struct	hardware_scb *hscb;
411 		struct	ahd_initiator_tinfo *tinfo;
412 		struct	ahd_tmode_tstate *tstate;
413 		u_int	col_idx;
414 
415 		if ((ahd->flags & AHD_INITIATORROLE) == 0
416 		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
417 		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
418 			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
419 			xpt_done(ccb);
420 			return;
421 		}
422 
423 		/*
424 		 * get an scb to use.
425 		 */
426 		ahd_lock(ahd, &s);
427 		tinfo = ahd_fetch_transinfo(ahd, 'A', our_id,
428 					    target_id, &tstate);
429 		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
430 		 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0
431 		 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
432 			col_idx = AHD_NEVER_COL_IDX;
433 		} else {
434 			col_idx = AHD_BUILD_COL_IDX(target_id,
435 						    ccb->ccb_h.target_lun);
436 		}
437 		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
438 
439 			xpt_freeze_simq(sim, /*count*/1);
440 			ahd->flags |= AHD_RESOURCE_SHORTAGE;
441 			ahd_unlock(ahd, &s);
442 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
443 			xpt_done(ccb);
444 			return;
445 		}
446 		ahd_unlock(ahd, &s);
447 
448 		hscb = scb->hscb;
449 
450 		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
451 			  ("start scb(%p)\n", scb));
452 		scb->io_ctx = ccb;
453 		/*
454 		 * So we can find the SCB when an abort is requested
455 		 */
456 		ccb->ccb_h.ccb_scb_ptr = scb;
457 
458 		/*
459 		 * Put all the arguments for the xfer in the scb
460 		 */
461 		hscb->control = 0;
462 		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
463 		hscb->lun = ccb->ccb_h.target_lun;
464 		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
465 			hscb->cdb_len = 0;
466 			scb->flags |= SCB_DEVICE_RESET;
467 			hscb->control |= MK_MESSAGE;
468 			ahd_execute_scb(scb, NULL, 0, 0);
469 		} else {
470 #ifdef AHD_TARGET_MODE
471 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
472 				struct target_data *tdata;
473 
474 				tdata = &hscb->shared_data.tdata;
475 				if (ahd->pending_device == lstate)
476 					scb->flags |= SCB_TARGET_IMMEDIATE;
477 				hscb->control |= TARGET_SCB;
478 				tdata->target_phases = 0;
479 				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
480 					tdata->target_phases |= SPHASE_PENDING;
481 					tdata->scsi_status =
482 					    ccb->csio.scsi_status;
483 				}
484 	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
485 					tdata->target_phases |= NO_DISCONNECT;
486 
487 				tdata->initiator_tag =
488 				    ahd_htole16(ccb->csio.tag_id);
489 			}
490 #endif
491 			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
492 				hscb->control |= ccb->csio.tag_action;
493 
494 			ahd_setup_data(ahd, sim, &ccb->csio, scb);
495 		}
496 		break;
497 	}
498 #ifdef AHD_TARGET_MODE
499 	case XPT_NOTIFY_ACK:
500 	case XPT_IMMED_NOTIFY:
501 	{
502 		struct	   ahd_tmode_tstate *tstate;
503 		struct	   ahd_tmode_lstate *lstate;
504 		cam_status status;
505 
506 		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
507 					     &lstate, TRUE);
508 
509 		if (status != CAM_REQ_CMP) {
510 			ccb->ccb_h.status = status;
511 			xpt_done(ccb);
512 			break;
513 		}
514 		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
515 				  sim_links.sle);
516 		ccb->ccb_h.status = CAM_REQ_INPROG;
517 		ahd_send_lstate_events(ahd, lstate);
518 		break;
519 	}
520 	case XPT_EN_LUN:		/* Enable LUN as a target */
521 		ahd_handle_en_lun(ahd, sim, ccb);
522 		xpt_done(ccb);
523 		break;
524 #endif
525 	case XPT_ABORT:			/* Abort the specified CCB */
526 	{
527 		ahd_abort_ccb(ahd, sim, ccb);
528 		break;
529 	}
530 	case XPT_SET_TRAN_SETTINGS:
531 	{
532 		ahd_lock(ahd, &s);
533 		ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
534 				      SIM_CHANNEL(ahd, sim), &ccb->cts);
535 		ahd_unlock(ahd, &s);
536 		xpt_done(ccb);
537 		break;
538 	}
539 	case XPT_GET_TRAN_SETTINGS:
540 	/* Get default/user set transfer settings for the target */
541 	{
542 		ahd_lock(ahd, &s);
543 		ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
544 				      SIM_CHANNEL(ahd, sim), &ccb->cts);
545 		ahd_unlock(ahd, &s);
546 		xpt_done(ccb);
547 		break;
548 	}
549 	case XPT_CALC_GEOMETRY:
550 	{
551 		struct	  ccb_calc_geometry *ccg;
552 		uint32_t size_mb;
553 		uint32_t secs_per_cylinder;
554 		int	  extended;
555 
556 		ccg = &ccb->ccg;
557 		size_mb = ccg->volume_size
558 			/ ((1024L * 1024L) / ccg->block_size);
559 		extended = ahd->flags & AHD_EXTENDED_TRANS_A;
560 
561 		if (size_mb > 1024 && extended) {
562 			ccg->heads = 255;
563 			ccg->secs_per_track = 63;
564 		} else {
565 			ccg->heads = 64;
566 			ccg->secs_per_track = 32;
567 		}
568 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
569 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
570 		ccb->ccb_h.status = CAM_REQ_CMP;
571 		xpt_done(ccb);
572 		break;
573 	}
574 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
575 	{
576 		int  found;
577 
578 		ahd_lock(ahd, &s);
579 		found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
580 					  /*initiate reset*/TRUE);
581 		ahd_unlock(ahd, &s);
582 		if (bootverbose) {
583 			xpt_print_path(SIM_PATH(ahd, sim));
584 			printf("SCSI bus reset delivered. "
585 			       "%d SCBs aborted.\n", found);
586 		}
587 		ccb->ccb_h.status = CAM_REQ_CMP;
588 		xpt_done(ccb);
589 		break;
590 	}
591 	case XPT_TERM_IO:		/* Terminate the I/O process */
592 		/* XXX Implement */
593 		ccb->ccb_h.status = CAM_REQ_INVALID;
594 		xpt_done(ccb);
595 		break;
596 	case XPT_PATH_INQ:		/* Path routing inquiry */
597 	{
598 		struct ccb_pathinq *cpi = &ccb->cpi;
599 
600 		cpi->version_num = 1; /* XXX??? */
601 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
602 		if ((ahd->features & AHD_WIDE) != 0)
603 			cpi->hba_inquiry |= PI_WIDE_16;
604 		if ((ahd->features & AHD_TARGETMODE) != 0) {
605 			cpi->target_sprt = PIT_PROCESSOR
606 					 | PIT_DISCONNECT
607 					 | PIT_TERM_IO;
608 		} else {
609 			cpi->target_sprt = 0;
610 		}
611 		cpi->hba_misc = 0;
612 		cpi->hba_eng_cnt = 0;
613 		cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7;
614 		cpi->max_lun = AHD_NUM_LUNS - 1;
615 		cpi->initiator_id = ahd->our_id;
616 		if ((ahd->flags & AHD_RESET_BUS_A) == 0) {
617 			cpi->hba_misc |= PIM_NOBUSRESET;
618 		}
619 		cpi->bus_id = cam_sim_bus(sim);
620 		cpi->base_transfer_speed = 3300;
621 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
622 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
623 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
624 		cpi->unit_number = cam_sim_unit(sim);
625 #ifdef AHD_NEW_TRAN_SETTINGS
626 		cpi->protocol = PROTO_SCSI;
627 		cpi->protocol_version = SCSI_REV_2;
628 		cpi->transport = XPORT_SPI;
629 		cpi->transport_version = 2;
630 		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
631 		cpi->transport_version = 4;
632 		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST;
633 #endif
634 		cpi->ccb_h.status = CAM_REQ_CMP;
635 		xpt_done(ccb);
636 		break;
637 	}
638 	default:
639 		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
640 		xpt_done(ccb);
641 		break;
642 	}
643 }
644 
645 
646 static void
647 ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
648 		      struct ccb_trans_settings *cts)
649 {
650 #ifdef AHD_NEW_TRAN_SETTINGS
651 	struct	  ahd_devinfo devinfo;
652 	struct	  ccb_trans_settings_scsi *scsi;
653 	struct	  ccb_trans_settings_spi *spi;
654 	struct	  ahd_initiator_tinfo *tinfo;
655 	struct	  ahd_tmode_tstate *tstate;
656 	uint16_t *discenable;
657 	uint16_t *tagenable;
658 	u_int	  update_type;
659 
660 	scsi = &cts->proto_specific.scsi;
661 	spi = &cts->xport_specific.spi;
662 	ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
663 			    cts->ccb_h.target_id,
664 			    cts->ccb_h.target_lun,
665 			    SIM_CHANNEL(ahd, sim),
666 			    ROLE_UNKNOWN);
667 	tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
668 				    devinfo.our_scsiid,
669 				    devinfo.target, &tstate);
670 	update_type = 0;
671 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
672 		update_type |= AHD_TRANS_GOAL;
673 		discenable = &tstate->discenable;
674 		tagenable = &tstate->tagenable;
675 		tinfo->curr.protocol_version = cts->protocol_version;
676 		tinfo->curr.transport_version = cts->transport_version;
677 		tinfo->goal.protocol_version = cts->protocol_version;
678 		tinfo->goal.transport_version = cts->transport_version;
679 	} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
680 		update_type |= AHD_TRANS_USER;
681 		discenable = &ahd->user_discenable;
682 		tagenable = &ahd->user_tagenable;
683 		tinfo->user.protocol_version = cts->protocol_version;
684 		tinfo->user.transport_version = cts->transport_version;
685 	} else {
686 		cts->ccb_h.status = CAM_REQ_INVALID;
687 		return;
688 	}
689 
690 	if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
691 		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
692 			*discenable |= devinfo.target_mask;
693 		else
694 			*discenable &= ~devinfo.target_mask;
695 	}
696 
697 	if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
698 		if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
699 			*tagenable |= devinfo.target_mask;
700 		else
701 			*tagenable &= ~devinfo.target_mask;
702 	}
703 
704 	if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
705 		ahd_validate_width(ahd, /*tinfo limit*/NULL,
706 				   &spi->bus_width, ROLE_UNKNOWN);
707 		ahd_set_width(ahd, &devinfo, spi->bus_width,
708 			      update_type, /*paused*/FALSE);
709 	}
710 
711 	if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
712 		if (update_type == AHD_TRANS_USER)
713 			spi->ppr_options = tinfo->user.ppr_options;
714 		else
715 			spi->ppr_options = tinfo->goal.ppr_options;
716 	}
717 
718 	if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
719 		if (update_type == AHD_TRANS_USER)
720 			spi->sync_offset = tinfo->user.offset;
721 		else
722 			spi->sync_offset = tinfo->goal.offset;
723 	}
724 
725 	if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
726 		if (update_type == AHD_TRANS_USER)
727 			spi->sync_period = tinfo->user.period;
728 		else
729 			spi->sync_period = tinfo->goal.period;
730 	}
731 
732 	if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
733 	 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
734 		u_int	maxsync;
735 
736 		maxsync = AHD_SYNCRATE_MAX;
737 
738 		if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
739 			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
740 
741 		if ((*discenable & devinfo.target_mask) == 0)
742 			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
743 
744 		ahd_find_syncrate(ahd, &spi->sync_period,
745 				  &spi->ppr_options, maxsync);
746 		ahd_validate_offset(ahd, /*tinfo limit*/NULL,
747 				    spi->sync_period, &spi->sync_offset,
748 				    spi->bus_width, ROLE_UNKNOWN);
749 
750 		/* We use a period of 0 to represent async */
751 		if (spi->sync_offset == 0) {
752 			spi->sync_period = 0;
753 			spi->ppr_options = 0;
754 		}
755 
756 		ahd_set_syncrate(ahd, &devinfo, spi->sync_period,
757 				 spi->sync_offset, spi->ppr_options,
758 				 update_type, /*paused*/FALSE);
759 	}
760 	cts->ccb_h.status = CAM_REQ_CMP;
761 #else
762 	struct	  ahd_devinfo devinfo;
763 	struct	  ahd_initiator_tinfo *tinfo;
764 	struct	  ahd_tmode_tstate *tstate;
765 	uint16_t *discenable;
766 	uint16_t *tagenable;
767 	u_int	  update_type;
768 
769 	ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
770 			    cts->ccb_h.target_id,
771 			    cts->ccb_h.target_lun,
772 			    SIM_CHANNEL(ahd, sim),
773 			    ROLE_UNKNOWN);
774 	tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
775 				    devinfo.our_scsiid,
776 				    devinfo.target, &tstate);
777 	update_type = 0;
778 	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
779 		update_type |= AHD_TRANS_GOAL;
780 		discenable = &tstate->discenable;
781 		tagenable = &tstate->tagenable;
782 	} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
783 		update_type |= AHD_TRANS_USER;
784 		discenable = &ahd->user_discenable;
785 		tagenable = &ahd->user_tagenable;
786 	} else {
787 		cts->ccb_h.status = CAM_REQ_INVALID;
788 		return;
789 	}
790 
791 	if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
792 		if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
793 			*discenable |= devinfo.target_mask;
794 		else
795 			*discenable &= ~devinfo.target_mask;
796 	}
797 
798 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
799 		if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
800 			*tagenable |= devinfo.target_mask;
801 		else
802 			*tagenable &= ~devinfo.target_mask;
803 	}
804 
805 	if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
806 		ahd_validate_width(ahd, /*tinfo limit*/NULL,
807 				   &cts->bus_width, ROLE_UNKNOWN);
808 		ahd_set_width(ahd, &devinfo, cts->bus_width,
809 			      update_type, /*paused*/FALSE);
810 	}
811 
812 	if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
813 		if (update_type == AHD_TRANS_USER)
814 			cts->sync_offset = tinfo->user.offset;
815 		else
816 			cts->sync_offset = tinfo->goal.offset;
817 	}
818 
819 	if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
820 		if (update_type == AHD_TRANS_USER)
821 			cts->sync_period = tinfo->user.period;
822 		else
823 			cts->sync_period = tinfo->goal.period;
824 	}
825 
826 	if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
827 	 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)
828 	 || ((cts->valid & CCB_TRANS_TQ_VALID) != 0)
829 	 || ((cts->valid & CCB_TRANS_DISC_VALID) != 0)) {
830 		u_int ppr_options;
831 		u_int maxsync;
832 
833 		maxsync = AHD_SYNCRATE_MAX;
834 		ppr_options = 0;
835 		if (cts->sync_period <= AHD_SYNCRATE_DT
836 		 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) {
837 			ppr_options = tinfo->user.ppr_options
838 				    | MSG_EXT_PPR_DT_REQ;
839 		}
840 
841 		if ((*tagenable & devinfo.target_mask) == 0
842 		 || (*discenable & devinfo.target_mask) == 0)
843 			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
844 
845 		ahd_find_syncrate(ahd, &cts->sync_period,
846 				  &ppr_options, maxsync);
847 		ahd_validate_offset(ahd, /*tinfo limit*/NULL,
848 				    cts->sync_period, &cts->sync_offset,
849 				    MSG_EXT_WDTR_BUS_8_BIT,
850 				    ROLE_UNKNOWN);
851 
852 		/* We use a period of 0 to represent async */
853 		if (cts->sync_offset == 0) {
854 			cts->sync_period = 0;
855 			ppr_options = 0;
856 		}
857 
858 		if (ppr_options != 0
859 		 && tinfo->user.transport_version >= 3) {
860 			tinfo->goal.transport_version =
861 			    tinfo->user.transport_version;
862 			tinfo->curr.transport_version =
863 			    tinfo->user.transport_version;
864 		}
865 
866 		ahd_set_syncrate(ahd, &devinfo, cts->sync_period,
867 				 cts->sync_offset, ppr_options,
868 				 update_type, /*paused*/FALSE);
869 	}
870 	cts->ccb_h.status = CAM_REQ_CMP;
871 #endif
872 }
873 
874 static void
875 ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
876 		      struct ccb_trans_settings *cts)
877 {
878 #ifdef AHD_NEW_TRAN_SETTINGS
879 	struct	ahd_devinfo devinfo;
880 	struct	ccb_trans_settings_scsi *scsi;
881 	struct	ccb_trans_settings_spi *spi;
882 	struct	ahd_initiator_tinfo *targ_info;
883 	struct	ahd_tmode_tstate *tstate;
884 	struct	ahd_transinfo *tinfo;
885 
886 	scsi = &cts->proto_specific.scsi;
887 	spi = &cts->xport_specific.spi;
888 	ahd_compile_devinfo(&devinfo, our_id,
889 			    cts->ccb_h.target_id,
890 			    cts->ccb_h.target_lun,
891 			    channel, ROLE_UNKNOWN);
892 	targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
893 					devinfo.our_scsiid,
894 					devinfo.target, &tstate);
895 
896 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
897 		tinfo = &targ_info->curr;
898 	else
899 		tinfo = &targ_info->user;
900 
901 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
902 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
903 	if (cts->type == CTS_TYPE_USER_SETTINGS) {
904 		if ((ahd->user_discenable & devinfo.target_mask) != 0)
905 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
906 
907 		if ((ahd->user_tagenable & devinfo.target_mask) != 0)
908 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
909 	} else {
910 		if ((tstate->discenable & devinfo.target_mask) != 0)
911 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
912 
913 		if ((tstate->tagenable & devinfo.target_mask) != 0)
914 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
915 	}
916 	cts->protocol_version = tinfo->protocol_version;
917 	cts->transport_version = tinfo->transport_version;
918 
919 	spi->sync_period = tinfo->period;
920 	spi->sync_offset = tinfo->offset;
921 	spi->bus_width = tinfo->width;
922 	spi->ppr_options = tinfo->ppr_options;
923 
924 	cts->protocol = PROTO_SCSI;
925 	cts->transport = XPORT_SPI;
926 	spi->valid = CTS_SPI_VALID_SYNC_RATE
927 		   | CTS_SPI_VALID_SYNC_OFFSET
928 		   | CTS_SPI_VALID_BUS_WIDTH
929 		   | CTS_SPI_VALID_PPR_OPTIONS;
930 
931 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
932 		scsi->valid = CTS_SCSI_VALID_TQ;
933 		spi->valid |= CTS_SPI_VALID_DISC;
934 	} else {
935 		scsi->valid = 0;
936 	}
937 
938 	cts->ccb_h.status = CAM_REQ_CMP;
939 #else
940 	struct	ahd_devinfo devinfo;
941 	struct	ahd_initiator_tinfo *targ_info;
942 	struct	ahd_tmode_tstate *tstate;
943 	struct	ahd_transinfo *tinfo;
944 
945 	ahd_compile_devinfo(&devinfo, our_id,
946 			    cts->ccb_h.target_id,
947 			    cts->ccb_h.target_lun,
948 			    channel, ROLE_UNKNOWN);
949 	targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
950 					devinfo.our_scsiid,
951 					devinfo.target, &tstate);
952 
953 	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
954 		tinfo = &targ_info->curr;
955 	else
956 		tinfo = &targ_info->user;
957 
958 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
959 	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
960 		if ((ahd->user_discenable & devinfo.target_mask) != 0)
961 			cts->flags |= CCB_TRANS_DISC_ENB;
962 
963 		if ((ahd->user_tagenable & devinfo.target_mask) != 0)
964 			cts->flags |= CCB_TRANS_TAG_ENB;
965 	} else {
966 		if ((tstate->discenable & devinfo.target_mask) != 0)
967 			cts->flags |= CCB_TRANS_DISC_ENB;
968 
969 		if ((tstate->tagenable & devinfo.target_mask) != 0)
970 			cts->flags |= CCB_TRANS_TAG_ENB;
971 	}
972 	cts->sync_period = tinfo->period;
973 	cts->sync_offset = tinfo->offset;
974 	cts->bus_width = tinfo->width;
975 
976 	cts->valid = CCB_TRANS_SYNC_RATE_VALID
977 		   | CCB_TRANS_SYNC_OFFSET_VALID
978 		   | CCB_TRANS_BUS_WIDTH_VALID;
979 
980 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
981 		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
982 
983 	cts->ccb_h.status = CAM_REQ_CMP;
984 #endif
985 }
986 
987 static void
988 ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
989 {
990 	struct ahd_softc *ahd;
991 	struct cam_sim *sim;
992 
993 	sim = (struct cam_sim *)callback_arg;
994 	ahd = (struct ahd_softc *)cam_sim_softc(sim);
995 	switch (code) {
996 	case AC_LOST_DEVICE:
997 	{
998 		struct	ahd_devinfo devinfo;
999 		long	s;
1000 
1001 		ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
1002 				    xpt_path_target_id(path),
1003 				    xpt_path_lun_id(path),
1004 				    SIM_CHANNEL(ahd, sim),
1005 				    ROLE_UNKNOWN);
1006 
1007 		/*
1008 		 * Revert to async/narrow transfers
1009 		 * for the next device.
1010 		 */
1011 		ahd_lock(ahd, &s);
1012 		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1013 			      AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE);
1014 		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
1015 				 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR,
1016 				 /*paused*/FALSE);
1017 		ahd_unlock(ahd, &s);
1018 		break;
1019 	}
1020 	default:
1021 		break;
1022 	}
1023 }
1024 
1025 static void
1026 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1027 		int error)
1028 {
1029 	struct	scb *scb;
1030 	union	ccb *ccb;
1031 	struct	ahd_softc *ahd;
1032 	struct	ahd_initiator_tinfo *tinfo;
1033 	struct	ahd_tmode_tstate *tstate;
1034 	u_int	mask;
1035 	u_long	s;
1036 
1037 	scb = (struct scb *)arg;
1038 	ccb = scb->io_ctx;
1039 	ahd = scb->ahd_softc;
1040 
1041 	if (error != 0) {
1042 		if (error == EFBIG)
1043 			ahd_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1044 		else
1045 			ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1046 		if (nsegments != 0)
1047 			bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
1048 		ahd_lock(ahd, &s);
1049 		ahd_free_scb(ahd, scb);
1050 		ahd_unlock(ahd, &s);
1051 		xpt_done(ccb);
1052 		return;
1053 	}
1054 	scb->sg_count = 0;
1055 	if (nsegments != 0) {
1056 		void *sg;
1057 		bus_dmasync_op_t op;
1058 		u_int i;
1059 
1060 		/* Copy the segments into our SG list */
1061 		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
1062 
1063 			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
1064 					  dm_segs->ds_len,
1065 					  /*last*/i == 1);
1066 			dm_segs++;
1067 		}
1068 
1069 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1070 			op = BUS_DMASYNC_PREREAD;
1071 		else
1072 			op = BUS_DMASYNC_PREWRITE;
1073 
1074 		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
1075 
1076 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1077 			struct target_data *tdata;
1078 
1079 			tdata = &scb->hscb->shared_data.tdata;
1080 			tdata->target_phases |= DPHASE_PENDING;
1081 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1082 				tdata->data_phase = P_DATAOUT;
1083 			else
1084 				tdata->data_phase = P_DATAIN;
1085 		}
1086 	}
1087 
1088 	ahd_lock(ahd, &s);
1089 
1090 	/*
1091 	 * Last time we need to check if this SCB needs to
1092 	 * be aborted.
1093 	 */
1094 	if (ahd_get_transaction_status(scb) != CAM_REQ_INPROG) {
1095 		if (nsegments != 0)
1096 			bus_dmamap_unload(ahd->buffer_dmat,
1097 					  scb->dmamap);
1098 		ahd_free_scb(ahd, scb);
1099 		ahd_unlock(ahd, &s);
1100 		xpt_done(ccb);
1101 		return;
1102 	}
1103 
1104 	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
1105 				    SCSIID_OUR_ID(scb->hscb->scsiid),
1106 				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
1107 				    &tstate);
1108 
1109 	mask = SCB_GET_TARGET_MASK(ahd, scb);
1110 
1111 	if ((tstate->discenable & mask) != 0
1112 	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1113 		scb->hscb->control |= DISCENB;
1114 
1115 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
1116 		scb->flags |= SCB_PACKETIZED;
1117 
1118 	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1119 	 && (tinfo->goal.width != 0
1120 	  || tinfo->goal.period != 0
1121 	  || tinfo->goal.ppr_options != 0)) {
1122 		scb->flags |= SCB_NEGOTIATE;
1123 		scb->hscb->control |= MK_MESSAGE;
1124 	} else if ((tstate->auto_negotiate & mask) != 0) {
1125 		scb->flags |= SCB_AUTO_NEGOTIATE;
1126 		scb->hscb->control |= MK_MESSAGE;
1127 	}
1128 
1129 	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1130 
1131 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1132 
1133 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1134 		uint64_t time;
1135 
1136 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1137 			ccb->ccb_h.timeout = 5 * 1000;
1138 
1139 		time = ccb->ccb_h.timeout;
1140 		time *= hz;
1141 		time /= 1000;
1142 		ccb->ccb_h.timeout_ch =
1143 		    timeout(ahd_timeout, (caddr_t)scb, time);
1144 	}
1145 
1146 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1147 		/* Define a mapping from our tag to the SCB. */
1148 		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
1149 		ahd_pause(ahd);
1150 		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1151 		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
1152 		ahd_unpause(ahd);
1153 	} else {
1154 		ahd_queue_scb(ahd, scb);
1155 	}
1156 
1157 	ahd_unlock(ahd, &s);
1158 }
1159 
1160 static void
1161 ahd_poll(struct cam_sim *sim)
1162 {
1163 	ahd_intr(cam_sim_softc(sim));
1164 }
1165 
1166 static void
1167 ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
1168 	       struct ccb_scsiio *csio, struct scb *scb)
1169 {
1170 	struct hardware_scb *hscb;
1171 	struct ccb_hdr *ccb_h;
1172 
1173 	hscb = scb->hscb;
1174 	ccb_h = &csio->ccb_h;
1175 
1176 	csio->resid = 0;
1177 	csio->sense_resid = 0;
1178 	if (ccb_h->func_code == XPT_SCSI_IO) {
1179 		hscb->cdb_len = csio->cdb_len;
1180 		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1181 
1182 			if (hscb->cdb_len > MAX_CDB_LEN
1183 			 && (ccb_h->flags & CAM_CDB_PHYS) == 0) {
1184 				u_long s;
1185 
1186 				ahd_set_transaction_status(scb,
1187 							   CAM_REQ_INVALID);
1188 				ahd_lock(ahd, &s);
1189 				ahd_free_scb(ahd, scb);
1190 				ahd_unlock(ahd, &s);
1191 				xpt_done((union ccb *)csio);
1192 				return;
1193 			}
1194 			if ((ccb_h->flags & CAM_CDB_PHYS) != 0) {
1195 				hscb->shared_data.idata.cdbptr =
1196 				   ahd_htole64((uintptr_t)csio->cdb_io.cdb_ptr);
1197 			} else {
1198 				memcpy(hscb->shared_data.idata.cdb,
1199 				       csio->cdb_io.cdb_ptr,
1200 				       hscb->cdb_len);
1201 			}
1202 		} else {
1203 			if (hscb->cdb_len > MAX_CDB_LEN) {
1204 				u_long s;
1205 
1206 				ahd_set_transaction_status(scb,
1207 							   CAM_REQ_INVALID);
1208 				ahd_lock(ahd, &s);
1209 				ahd_free_scb(ahd, scb);
1210 				ahd_unlock(ahd, &s);
1211 				xpt_done((union ccb *)csio);
1212 				return;
1213 			}
1214 			memcpy(hscb->shared_data.idata.cdb,
1215 			       csio->cdb_io.cdb_bytes, hscb->cdb_len);
1216 		}
1217 	}
1218 
1219 	/* Only use S/G if there is a transfer */
1220 	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1221 		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1222 			/* We've been given a pointer to a single buffer */
1223 			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1224 				int s;
1225 				int error;
1226 
1227 				s = splsoftvm();
1228 				error = bus_dmamap_load(ahd->buffer_dmat,
1229 							scb->dmamap,
1230 							csio->data_ptr,
1231 							csio->dxfer_len,
1232 							ahd_execute_scb,
1233 							scb, /*flags*/0);
1234 				if (error == EINPROGRESS) {
1235 					/*
1236 					 * So as to maintain ordering,
1237 					 * freeze the controller queue
1238 					 * until our mapping is
1239 					 * returned.
1240 					 */
1241 					xpt_freeze_simq(sim,
1242 							/*count*/1);
1243 					scb->io_ctx->ccb_h.status |=
1244 					    CAM_RELEASE_SIMQ;
1245 				}
1246 				splx(s);
1247 			} else {
1248 				struct bus_dma_segment seg;
1249 
1250 				/* Pointer to physical buffer */
1251 				if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE)
1252 					panic("ahd_setup_data - Transfer size "
1253 					      "larger than can device max");
1254 
1255 				seg.ds_addr = (bus_addr_t)csio->data_ptr;
1256 				seg.ds_len = csio->dxfer_len;
1257 				ahd_execute_scb(scb, &seg, 1, 0);
1258 			}
1259 		} else {
1260 			struct bus_dma_segment *segs;
1261 
1262 			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1263 				panic("ahd_setup_data - Physical segment "
1264 				      "pointers unsupported");
1265 
1266 			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1267 				panic("ahd_setup_data - Virtual segment "
1268 				      "addresses unsupported");
1269 
1270 			/* Just use the segments provided */
1271 			segs = (struct bus_dma_segment *)csio->data_ptr;
1272 			ahd_execute_scb(scb, segs, csio->sglist_cnt, 0);
1273 		}
1274 	} else {
1275 		ahd_execute_scb(scb, NULL, 0, 0);
1276 	}
1277 }
1278 
1279 #if NOT_YET
1280 static void
1281 ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb) {
1282 
1283 	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1284 		struct scb *list_scb;
1285 
1286 		scb->flags |= SCB_RECOVERY_SCB;
1287 
1288 		/*
1289 		 * Take all queued, but not sent SCBs out of the equation.
1290 		 * Also ensure that no new CCBs are queued to us while we
1291 		 * try to fix this problem.
1292 		 */
1293 		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1294 			xpt_freeze_simq(SCB_GET_SIM(ahd, scb), /*count*/1);
1295 			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1296 		}
1297 
1298 		/*
1299 		 * Go through all of our pending SCBs and remove
1300 		 * any scheduled timeouts for them.  We will reschedule
1301 		 * them after we've successfully fixed this problem.
1302 		 */
1303 		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
1304 			union ccb *ccb;
1305 
1306 			ccb = list_scb->io_ctx;
1307 			untimeout(ahd_timeout, list_scb, ccb->ccb_h.timeout_ch);
1308 		}
1309 	}
1310 }
1311 #endif
1312 
1313 void
1314 ahd_timeout(void *arg)
1315 {
1316 	struct	scb	  *scb;
1317 	struct	ahd_softc *ahd;
1318 	ahd_mode_state	   saved_modes;
1319 	long		   s;
1320 	int		   target;
1321 	int		   lun;
1322 	char		   channel;
1323 
1324 #if NOT_YET
1325 	int		   i;
1326 	int		   found;
1327 	u_int		   last_phase;
1328 #endif
1329 
1330 	scb = (struct scb *)arg;
1331 	ahd = (struct ahd_softc *)scb->ahd_softc;
1332 
1333 	ahd_lock(ahd, &s);
1334 
1335 	ahd_pause_and_flushwork(ahd);
1336 
1337 	saved_modes = ahd_save_modes(ahd);
1338 #if 0
1339 	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1340 	ahd_outb(ahd, SCSISIGO, ACKO);
1341 	printf("set ACK\n");
1342 	ahd_outb(ahd, SCSISIGO, 0);
1343 	printf("clearing Ack\n");
1344 	ahd_restore_modes(ahd, saved_modes);
1345 #endif
1346 	if ((scb->flags & SCB_ACTIVE) == 0) {
1347 		/* Previous timeout took care of me already */
1348 		printf("%s: Timedout SCB already complete. "
1349 		       "Interrupts may not be functioning.\n", ahd_name(ahd));
1350 		ahd_unpause(ahd);
1351 		ahd_unlock(ahd, &s);
1352 		return;
1353 	}
1354 
1355 	target = SCB_GET_TARGET(ahd, scb);
1356 	channel = SCB_GET_CHANNEL(ahd, scb);
1357 	lun = SCB_GET_LUN(scb);
1358 
1359 	ahd_print_path(ahd, scb);
1360 	printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
1361 	ahd_dump_card_state(ahd);
1362 	ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
1363 			  /*initiate reset*/TRUE);
1364 	ahd_unlock(ahd, &s);
1365 	return;
1366 #if NOT_YET
1367 	last_phase = ahd_inb(ahd, LASTPHASE);
1368 	if (scb->sg_count > 0) {
1369 		for (i = 0; i < scb->sg_count; i++) {
1370 			printf("sg[%d] - Addr 0x%x : Length %d\n",
1371 			       i,
1372 			       ((struct ahd_dma_seg *)scb->sg_list)[i].addr,
1373 			       ((struct ahd_dma_seg *)scb->sg_list)[i].len
1374 				& AHD_SG_LEN_MASK);
1375 		}
1376 	}
1377 	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1378 		/*
1379 		 * Been down this road before.
1380 		 * Do a full bus reset.
1381 		 */
1382 bus_reset:
1383 		ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1384 		found = ahd_reset_channel(ahd, channel, /*Initiate Reset*/TRUE);
1385 		printf("%s: Issued Channel %c Bus Reset. "
1386 		       "%d SCBs aborted\n", ahd_name(ahd), channel, found);
1387 	} else {
1388 		/*
1389 		 * If we are a target, transition to bus free and report
1390 		 * the timeout.
1391 		 *
1392 		 * The target/initiator that is holding up the bus may not
1393 		 * be the same as the one that triggered this timeout
1394 		 * (different commands have different timeout lengths).
1395 		 * If the bus is idle and we are actiing as the initiator
1396 		 * for this request, queue a BDR message to the timed out
1397 		 * target.  Otherwise, if the timed out transaction is
1398 		 * active:
1399 		 *   Initiator transaction:
1400 		 *	Stuff the message buffer with a BDR message and assert
1401 		 *	ATN in the hopes that the target will let go of the bus
1402 		 *	and go to the mesgout phase.  If this fails, we'll
1403 		 *	get another timeout 2 seconds later which will attempt
1404 		 *	a bus reset.
1405 		 *
1406 		 *   Target transaction:
1407 		 *	Transition to BUS FREE and report the error.
1408 		 *	It's good to be the target!
1409 		 */
1410 		u_int active_scb_index;
1411 		u_int saved_scbptr;
1412 
1413 		saved_scbptr = ahd_get_scbptr(ahd);
1414 		active_scb_index = saved_scbptr;
1415 
1416 		if (last_phase != P_BUSFREE
1417 		  && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
1418 		  && (active_scb_index < ahd->scb_data.numscbs)) {
1419 			struct scb *active_scb;
1420 
1421 			/*
1422 			 * If the active SCB is not us, assume that
1423 			 * the active SCB has a longer timeout than
1424 			 * the timedout SCB, and wait for the active
1425 			 * SCB to timeout.
1426 			 */
1427 			active_scb = ahd_lookup_scb(ahd, active_scb_index);
1428 			if (active_scb != scb) {
1429 				struct	 ccb_hdr *ccbh;
1430 				uint64_t newtimeout;
1431 
1432 				ahd_print_path(ahd, scb);
1433 				printf("Other SCB Timeout%s",
1434 			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1435 				       ? " again\n" : "\n");
1436 				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1437 				newtimeout =
1438 				    MAX(active_scb->io_ctx->ccb_h.timeout,
1439 					scb->io_ctx->ccb_h.timeout);
1440 				newtimeout *= hz;
1441 				newtimeout /= 1000;
1442 				ccbh = &scb->io_ctx->ccb_h;
1443 				scb->io_ctx->ccb_h.timeout_ch =
1444 				    timeout(ahd_timeout, scb, newtimeout);
1445 				ahd_unpause(ahd);
1446 				ahd_unlock(ahd, &s);
1447 				return;
1448 			}
1449 
1450 			/* It's us */
1451 			if ((scb->hscb->control & TARGET_SCB) != 0) {
1452 
1453 				/*
1454 				 * Send back any queued up transactions
1455 				 * and properly record the error condition.
1456 				 */
1457 				ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1458 					       SCB_GET_CHANNEL(ahd, scb),
1459 					       SCB_GET_LUN(scb),
1460 					       SCB_GET_TAG(scb),
1461 					       ROLE_TARGET,
1462 					       CAM_CMD_TIMEOUT);
1463 
1464 				/* Will clear us from the bus */
1465 				ahd_restart(ahd);
1466 				ahd_unlock(ahd, &s);
1467 				return;
1468 			}
1469 
1470 			ahd_set_recoveryscb(ahd, active_scb);
1471 			ahd_outb(ahd, MSG_OUT, HOST_MSG);
1472 			ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
1473 			ahd_print_path(ahd, active_scb);
1474 			printf("BDR message in message buffer\n");
1475 			active_scb->flags |= SCB_DEVICE_RESET;
1476 			active_scb->io_ctx->ccb_h.timeout_ch =
1477 			    timeout(ahd_timeout, (caddr_t)active_scb, 2 * hz);
1478 			ahd_unpause(ahd);
1479 		} else {
1480 			int	 disconnected;
1481 
1482 			/* XXX Shouldn't panic.  Just punt instead? */
1483 			if ((scb->hscb->control & TARGET_SCB) != 0)
1484 				panic("Timed-out target SCB but bus idle");
1485 
1486 			if (last_phase != P_BUSFREE
1487 			 && (ahd_inb(ahd, SSTAT0) & TARGET) != 0) {
1488 				/* XXX What happened to the SCB? */
1489 				/* Hung target selection.  Goto busfree */
1490 				printf("%s: Hung target selection\n",
1491 				       ahd_name(ahd));
1492 				ahd_restart(ahd);
1493 				ahd_unlock(ahd, &s);
1494 				return;
1495 			}
1496 
1497 			if (ahd_search_qinfifo(ahd, target, channel, lun,
1498 					       SCB_GET_TAG(scb), ROLE_INITIATOR,
1499 					       /*status*/0, SEARCH_COUNT) > 0) {
1500 				disconnected = FALSE;
1501 			} else {
1502 				disconnected = TRUE;
1503 			}
1504 
1505 			if (disconnected) {
1506 
1507 				ahd_set_recoveryscb(ahd, scb);
1508 				/*
1509 				 * Actually re-queue this SCB in an attempt
1510 				 * to select the device before it reconnects.
1511 				 * In either case (selection or reselection),
1512 				 * we will now issue a target reset to the
1513 				 * timed-out device.
1514 				 *
1515 				 * Set the MK_MESSAGE control bit indicating
1516 				 * that we desire to send a message.  We
1517 				 * also set the disconnected flag since
1518 				 * in the paging case there is no guarantee
1519 				 * that our SCB control byte matches the
1520 				 * version on the card.  We don't want the
1521 				 * sequencer to abort the command thinking
1522 				 * an unsolicited reselection occurred.
1523 				 */
1524 				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1525 				scb->flags |= SCB_DEVICE_RESET;
1526 
1527 				/*
1528 				 * The sequencer will never re-reference the
1529 				 * in-core SCB.  To make sure we are notified
1530 				 * during reslection, set the MK_MESSAGE flag
1531 				 * in the card's copy of the SCB.
1532 				 */
1533 				ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1534 				ahd_outb(ahd, SCB_CONTROL,
1535 					 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
1536 
1537 				/*
1538 				 * Clear out any entries in the QINFIFO first
1539 				 * so we are the next SCB for this target
1540 				 * to run.
1541 				 */
1542 				ahd_search_qinfifo(ahd,
1543 						   SCB_GET_TARGET(ahd, scb),
1544 						   channel, SCB_GET_LUN(scb),
1545 						   SCB_LIST_NULL,
1546 						   ROLE_INITIATOR,
1547 						   CAM_REQUEUE_REQ,
1548 						   SEARCH_COMPLETE);
1549 				ahd_print_path(ahd, scb);
1550 				printf("Queuing a BDR SCB\n");
1551 				ahd_qinfifo_requeue_tail(ahd, scb);
1552 				ahd_set_scbptr(ahd, saved_scbptr);
1553 				scb->io_ctx->ccb_h.timeout_ch =
1554 				    timeout(ahd_timeout, (caddr_t)scb, 2 * hz);
1555 				ahd_unpause(ahd);
1556 			} else {
1557 				/* Go "immediatly" to the bus reset */
1558 				/* This shouldn't happen */
1559 				ahd_set_recoveryscb(ahd, scb);
1560 				ahd_print_path(ahd, scb);
1561 				printf("SCB %d: Immediate reset.  "
1562 					"Flags = 0x%x\n", SCB_GET_TAG(scb),
1563 					scb->flags);
1564 				goto bus_reset;
1565 			}
1566 		}
1567 	}
1568 	ahd_unlock(ahd, &s);
1569 #endif
1570 }
1571 
1572 static void
1573 ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
1574 {
1575 	union ccb *abort_ccb;
1576 
1577 	abort_ccb = ccb->cab.abort_ccb;
1578 	switch (abort_ccb->ccb_h.func_code) {
1579 #ifdef AHD_TARGET_MODE
1580 	case XPT_ACCEPT_TARGET_IO:
1581 	case XPT_IMMED_NOTIFY:
1582 	case XPT_CONT_TARGET_IO:
1583 	{
1584 		struct ahd_tmode_tstate *tstate;
1585 		struct ahd_tmode_lstate *lstate;
1586 		struct ccb_hdr_slist *list;
1587 		cam_status status;
1588 
1589 		status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate,
1590 					     &lstate, TRUE);
1591 
1592 		if (status != CAM_REQ_CMP) {
1593 			ccb->ccb_h.status = status;
1594 			break;
1595 		}
1596 
1597 		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1598 			list = &lstate->accept_tios;
1599 		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1600 			list = &lstate->immed_notifies;
1601 		else
1602 			list = NULL;
1603 
1604 		if (list != NULL) {
1605 			struct ccb_hdr *curelm;
1606 			int found;
1607 
1608 			curelm = SLIST_FIRST(list);
1609 			found = 0;
1610 			if (curelm == &abort_ccb->ccb_h) {
1611 				found = 1;
1612 				SLIST_REMOVE_HEAD(list, sim_links.sle);
1613 			} else {
1614 				while(curelm != NULL) {
1615 					struct ccb_hdr *nextelm;
1616 
1617 					nextelm =
1618 					    SLIST_NEXT(curelm, sim_links.sle);
1619 
1620 					if (nextelm == &abort_ccb->ccb_h) {
1621 						found = 1;
1622 						SLIST_NEXT(curelm,
1623 							   sim_links.sle) =
1624 						    SLIST_NEXT(nextelm,
1625 							       sim_links.sle);
1626 						break;
1627 					}
1628 					curelm = nextelm;
1629 				}
1630 			}
1631 
1632 			if (found) {
1633 				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1634 				xpt_done(abort_ccb);
1635 				ccb->ccb_h.status = CAM_REQ_CMP;
1636 			} else {
1637 				xpt_print_path(abort_ccb->ccb_h.path);
1638 				printf("Not found\n");
1639 				ccb->ccb_h.status = CAM_PATH_INVALID;
1640 			}
1641 			break;
1642 		}
1643 		/* FALLTHROUGH */
1644 	}
1645 #endif
1646 	case XPT_SCSI_IO:
1647 		/* XXX Fully implement the hard ones */
1648 		ccb->ccb_h.status = CAM_UA_ABORT;
1649 		break;
1650 	default:
1651 		ccb->ccb_h.status = CAM_REQ_INVALID;
1652 		break;
1653 	}
1654 	xpt_done(ccb);
1655 }
1656 
1657 void
1658 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target,
1659 		u_int lun, ac_code code, void *opt_arg)
1660 {
1661 	struct	ccb_trans_settings cts;
1662 	struct cam_path *path;
1663 	void *arg;
1664 	int error;
1665 
1666 	arg = NULL;
1667 	error = ahd_create_path(ahd, channel, target, lun, &path);
1668 
1669 	if (error != CAM_REQ_CMP)
1670 		return;
1671 
1672 	switch (code) {
1673 	case AC_TRANSFER_NEG:
1674 	{
1675 #ifdef AHD_NEW_TRAN_SETTINGS
1676 		struct	ccb_trans_settings_scsi *scsi;
1677 
1678 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1679 		scsi = &cts.proto_specific.scsi;
1680 #else
1681 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1682 #endif
1683 		cts.ccb_h.path = path;
1684 		cts.ccb_h.target_id = target;
1685 		cts.ccb_h.target_lun = lun;
1686 		ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts);
1687 		arg = &cts;
1688 #ifdef AHD_NEW_TRAN_SETTINGS
1689 		scsi->valid &= ~CTS_SCSI_VALID_TQ;
1690 		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1691 #else
1692 		cts.valid &= ~CCB_TRANS_TQ_VALID;
1693 		cts.flags &= ~CCB_TRANS_TAG_ENB;
1694 #endif
1695 		if (opt_arg == NULL)
1696 			break;
1697 		if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED)
1698 #ifdef AHD_NEW_TRAN_SETTINGS
1699 			scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1700 		scsi->valid |= CTS_SCSI_VALID_TQ;
1701 #else
1702 			cts.flags |= CCB_TRANS_TAG_ENB;
1703 		cts.valid |= CCB_TRANS_TQ_VALID;
1704 #endif
1705 		break;
1706 	}
1707 	case AC_SENT_BDR:
1708 	case AC_BUS_RESET:
1709 		break;
1710 	default:
1711 		panic("ahd_send_async: Unexpected async event");
1712 	}
1713 	xpt_async(code, path, arg);
1714 	xpt_free_path(path);
1715 }
1716 
1717 void
1718 ahd_platform_set_tags(struct ahd_softc *ahd,
1719 		      struct ahd_devinfo *devinfo, int enable)
1720 {
1721 }
1722 
1723 int
1724 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1725 {
1726 	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
1727 	    M_NOWAIT | M_ZERO);
1728 	if (ahd->platform_data == NULL)
1729 		return (ENOMEM);
1730 	return (0);
1731 }
1732 
1733 void
1734 ahd_platform_free(struct ahd_softc *ahd)
1735 {
1736 	struct ahd_platform_data *pdata;
1737 
1738 	pdata = ahd->platform_data;
1739 	if (pdata != NULL) {
1740 		if (pdata->regs[0] != NULL)
1741 			bus_release_resource(ahd->dev_softc,
1742 					     pdata->regs_res_type[0],
1743 					     pdata->regs_res_id[0],
1744 					     pdata->regs[0]);
1745 
1746 		if (pdata->regs[1] != NULL)
1747 			bus_release_resource(ahd->dev_softc,
1748 					     pdata->regs_res_type[1],
1749 					     pdata->regs_res_id[1],
1750 					     pdata->regs[1]);
1751 
1752 		if (pdata->irq != NULL)
1753 			bus_release_resource(ahd->dev_softc,
1754 					     pdata->irq_res_type,
1755 					     0, pdata->irq);
1756 
1757 		if (pdata->sim_b != NULL) {
1758 			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1759 			xpt_free_path(pdata->path_b);
1760 			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1761 			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1762 		}
1763 		if (pdata->sim != NULL) {
1764 			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1765 			xpt_free_path(pdata->path);
1766 			xpt_bus_deregister(cam_sim_path(pdata->sim));
1767 			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1768 		}
1769 		if (pdata->eh != NULL)
1770 			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1771 		free(ahd->platform_data, M_DEVBUF);
1772 	}
1773 }
1774 
1775 int
1776 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1777 {
1778 	/* We don't sort softcs under FreeBSD so report equal always */
1779 	return (0);
1780 }
1781 
1782 int
1783 ahd_detach(device_t dev)
1784 {
1785 	struct ahd_softc *ahd;
1786 	u_long l;
1787 	u_long s;
1788 
1789 	ahd_list_lock(&l);
1790 	device_printf(dev, "detaching device\n");
1791 	ahd = device_get_softc(dev);
1792 	ahd = ahd_find_softc(ahd);
1793 	if (ahd == NULL) {
1794 		device_printf(dev, "aic7xxx already detached\n");
1795 		ahd_list_unlock(&l);
1796 		return (ENOENT);
1797 	}
1798 	ahd_lock(ahd, &s);
1799 	ahd_intr_enable(ahd, FALSE);
1800 	bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih);
1801 	ahd_unlock(ahd, &s);
1802 	ahd_free(ahd);
1803 	ahd_list_unlock(&l);
1804 	return (0);
1805 }
1806 
1807 #if UNUSED
1808 static void
1809 ahd_dump_targcmd(struct target_cmd *cmd)
1810 {
1811 	uint8_t *byte;
1812 	uint8_t *last_byte;
1813 	int i;
1814 
1815 	byte = &cmd->initiator_channel;
1816 	/* Debugging info for received commands */
1817 	last_byte = &cmd[1].initiator_channel;
1818 
1819 	i = 0;
1820 	while (byte < last_byte) {
1821 		if (i == 0)
1822 			printf("\t");
1823 		printf("%#x", *byte++);
1824 		i++;
1825 		if (i == 8) {
1826 			printf("\n");
1827 			i = 0;
1828 		} else {
1829 			printf(", ");
1830 		}
1831 	}
1832 }
1833 #endif
1834 
1835 static int
1836 ahd_modevent(module_t mod, int type, void *data)
1837 {
1838 	/* XXX Deal with busy status on unload. */
1839 	return 0;
1840 }
1841 
1842 static moduledata_t ahd_mod = {
1843 	"ahd",
1844 	ahd_modevent,
1845 	NULL
1846 };
1847 
1848 /********************************** DDB Hooks *********************************/
1849 #ifdef DDB
1850 static struct ahd_softc *ahd_ddb_softc;
1851 static int ahd_ddb_paused;
1852 static int ahd_ddb_paused_on_entry;
1853 DB_COMMAND(ahd_set_unit, ahd_ddb_set_unit)
1854 {
1855 	struct ahd_softc *list_ahd;
1856 
1857 	ahd_ddb_softc = NULL;
1858 	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
1859 		if (list_ahd->unit == addr)
1860 			ahd_ddb_softc = list_ahd;
1861 	}
1862 	if (ahd_ddb_softc == NULL)
1863 		db_error("No matching softc found!\n");
1864 }
1865 
1866 DB_COMMAND(ahd_pause, ahd_ddb_pause)
1867 {
1868 	if (ahd_ddb_softc == NULL) {
1869 		db_error("Must set unit with ahd_set_unit first!\n");
1870 		return;
1871 	}
1872 	if (ahd_ddb_paused == 0) {
1873 		ahd_ddb_paused++;
1874 		if (ahd_is_paused(ahd_ddb_softc)) {
1875 			ahd_ddb_paused_on_entry++;
1876 			return;
1877 		}
1878 		ahd_pause(ahd_ddb_softc);
1879 	}
1880 }
1881 
1882 DB_COMMAND(ahd_unpause, ahd_ddb_unpause)
1883 {
1884 	if (ahd_ddb_softc == NULL) {
1885 		db_error("Must set unit with ahd_set_unit first!\n");
1886 		return;
1887 	}
1888 	if (ahd_ddb_paused != 0) {
1889 		ahd_ddb_paused = 0;
1890 		if (ahd_ddb_paused_on_entry)
1891 			return;
1892 		ahd_unpause(ahd_ddb_softc);
1893 	} else if (ahd_ddb_paused_on_entry != 0) {
1894 		/* Two unpauses to clear a paused on entry. */
1895 		ahd_ddb_paused_on_entry = 0;
1896 		ahd_unpause(ahd_ddb_softc);
1897 	}
1898 }
1899 
1900 DB_COMMAND(ahd_in, ahd_ddb_in)
1901 {
1902 	int c;
1903 	int size;
1904 
1905 	if (ahd_ddb_softc == NULL) {
1906 		db_error("Must set unit with ahd_set_unit first!\n");
1907 		return;
1908 	}
1909 	if (have_addr == 0)
1910 		return;
1911 
1912 	size = 1;
1913 	while ((c = *modif++) != '\0') {
1914 		switch (c) {
1915 		case 'b':
1916 			size = 1;
1917 			break;
1918 		case 'w':
1919 			size = 2;
1920 			break;
1921 		case 'l':
1922 			size = 4;
1923 		break;
1924 		}
1925 	}
1926 
1927 	if (count <= 0)
1928 		count = 1;
1929 	while (--count >= 0) {
1930 		db_printf("%04x (M)%x: \t", addr,
1931 			  ahd_inb(ahd_ddb_softc, MODE_PTR));
1932 		switch (size) {
1933 		case 1:
1934 			db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr));
1935 			break;
1936 		case 2:
1937 			db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr));
1938 			break;
1939 		case 4:
1940 			db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr));
1941 			break;
1942 		}
1943 	}
1944 }
1945 
1946 DB_SET(ahd_out, ahd_ddb_out, db_cmd_set, CS_MORE, NULL)
1947 {
1948 	db_expr_t old_value;
1949 	db_expr_t new_value;
1950 	int	  size;
1951 
1952 	if (ahd_ddb_softc == NULL) {
1953 		db_error("Must set unit with ahd_set_unit first!\n");
1954 		return;
1955 	}
1956 
1957 	switch (modif[0]) {
1958 	case '\0':
1959 	case 'b':
1960 		size = 1;
1961 		break;
1962 	case 'h':
1963 		size = 2;
1964 		break;
1965 	case 'l':
1966 		size = 4;
1967 		break;
1968 	default:
1969 		db_error("Unknown size\n");
1970 		return;
1971 	}
1972 
1973 	while (db_expression(&new_value)) {
1974 		switch (size) {
1975 		default:
1976 		case 1:
1977 			old_value = ahd_inb(ahd_ddb_softc, addr);
1978 			ahd_outb(ahd_ddb_softc, addr, new_value);
1979 			break;
1980 		case 2:
1981 			old_value = ahd_inw(ahd_ddb_softc, addr);
1982 			ahd_outw(ahd_ddb_softc, addr, new_value);
1983 			break;
1984 		case 4:
1985 			old_value = ahd_inl(ahd_ddb_softc, addr);
1986 			ahd_outl(ahd_ddb_softc, addr, new_value);
1987 			break;
1988 		}
1989 		db_printf("%04x (M)%x: \t0x%x\t=\t0x%x",
1990 			  addr, ahd_inb(ahd_ddb_softc, MODE_PTR),
1991 			  old_value, new_value);
1992 		addr += size;
1993 	}
1994 	db_skip_to_eol();
1995 }
1996 
1997 #endif
1998 
1999 
2000 DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
2001 MODULE_DEPEND(ahd, cam, 1, 1, 1);
2002 MODULE_VERSION(ahd, 1);
2003