xref: /freebsd/sys/dev/aic7xxx/aic79xx_osm.c (revision d37ea99837e6ad50837fd9fe1771ddf1c3ba6002)
1 /*
2  * Bus independent FreeBSD shim for the aic79xx based Adaptec SCSI controllers
3  *
4  * Copyright (c) 1994-2002 Justin T. Gibbs.
5  * Copyright (c) 2001-2002 Adaptec Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * Alternatively, this software may be distributed under the terms of the
18  * GNU Public License ("GPL").
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#35 $
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <dev/aic7xxx/aic79xx_osm.h>
39 #include <dev/aic7xxx/aic79xx_inline.h>
40 
41 #include <sys/kthread.h>
42 
43 #include "opt_ddb.h"
44 #ifdef DDB
45 #include <ddb/ddb.h>
46 #endif
47 
48 #ifndef AHD_TMODE_ENABLE
49 #define AHD_TMODE_ENABLE 0
50 #endif
51 
52 #include <dev/aic7xxx/aic_osm_lib.c>
53 
54 #define ccb_scb_ptr spriv_ptr0
55 
56 #if UNUSED
57 static void	ahd_dump_targcmd(struct target_cmd *cmd);
58 #endif
59 static int	ahd_modevent(module_t mod, int type, void *data);
60 static void	ahd_action(struct cam_sim *sim, union ccb *ccb);
61 static void	ahd_set_tran_settings(struct ahd_softc *ahd,
62 				      int our_id, char channel,
63 				      struct ccb_trans_settings *cts);
64 static void	ahd_get_tran_settings(struct ahd_softc *ahd,
65 				      int our_id, char channel,
66 				      struct ccb_trans_settings *cts);
67 static void	ahd_async(void *callback_arg, uint32_t code,
68 			  struct cam_path *path, void *arg);
69 static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
70 				int nsegments, int error);
71 static void	ahd_poll(struct cam_sim *sim);
72 static void	ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
73 			       struct ccb_scsiio *csio, struct scb *scb);
74 static void	ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim,
75 			      union ccb *ccb);
76 static int	ahd_create_path(struct ahd_softc *ahd,
77 				char channel, u_int target, u_int lun,
78 				struct cam_path **path);
79 
80 static int
81 ahd_create_path(struct ahd_softc *ahd, char channel, u_int target,
82 	        u_int lun, struct cam_path **path)
83 {
84 	path_id_t path_id;
85 
86 	path_id = cam_sim_path(ahd->platform_data->sim);
87 	return (xpt_create_path(path, /*periph*/NULL,
88 				path_id, target, lun));
89 }
90 
91 int
92 ahd_map_int(struct ahd_softc *ahd)
93 {
94 	int error;
95 
96 	/* Hook up our interrupt handler */
97 	error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq,
98 			       INTR_TYPE_CAM, ahd_platform_intr, ahd,
99 			       &ahd->platform_data->ih);
100 	if (error != 0)
101 		device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n",
102 			      error);
103 	return (error);
104 }
105 
106 /*
107  * Attach all the sub-devices we can find
108  */
109 int
110 ahd_attach(struct ahd_softc *ahd)
111 {
112 	char   ahd_info[256];
113 	struct ccb_setasync csa;
114 	struct cam_devq *devq;
115 	struct cam_sim *sim;
116 	struct cam_path *path;
117 	long s;
118 	int count;
119 
120 	count = 0;
121 	devq = NULL;
122 	sim = NULL;
123 
124 	/*
125 	 * Create a thread to perform all recovery.
126 	 */
127 	if (ahd_spawn_recovery_thread(ahd) != 0)
128 		goto fail;
129 
130 	ahd_controller_info(ahd, ahd_info);
131 	printf("%s\n", ahd_info);
132 	ahd_lock(ahd, &s);
133 
134 	/*
135 	 * Create the device queue for our SIM(s).
136 	 */
137 	devq = cam_simq_alloc(AHD_MAX_QUEUE);
138 	if (devq == NULL)
139 		goto fail;
140 
141 	/*
142 	 * Construct our SIM entry
143 	 */
144 	sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd,
145 			    device_get_unit(ahd->dev_softc),
146 			    1, /*XXX*/256, devq);
147 	if (sim == NULL) {
148 		cam_simq_free(devq);
149 		goto fail;
150 	}
151 
152 	if (xpt_bus_register(sim, /*bus_id*/0) != CAM_SUCCESS) {
153 		cam_sim_free(sim, /*free_devq*/TRUE);
154 		sim = NULL;
155 		goto fail;
156 	}
157 
158 	if (xpt_create_path(&path, /*periph*/NULL,
159 			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
160 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
161 		xpt_bus_deregister(cam_sim_path(sim));
162 		cam_sim_free(sim, /*free_devq*/TRUE);
163 		sim = NULL;
164 		goto fail;
165 	}
166 
167 	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
168 	csa.ccb_h.func_code = XPT_SASYNC_CB;
169 	csa.event_enable = AC_LOST_DEVICE;
170 	csa.callback = ahd_async;
171 	csa.callback_arg = sim;
172 	xpt_action((union ccb *)&csa);
173 	count++;
174 
175 fail:
176 	ahd->platform_data->sim = sim;
177 	ahd->platform_data->path = path;
178 	if (count != 0) {
179 		/* We have to wait until after any system dumps... */
180 		ahd->platform_data->eh =
181 		    EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown,
182 					  ahd, SHUTDOWN_PRI_DEFAULT);
183 		ahd_intr_enable(ahd, TRUE);
184 	}
185 
186 	ahd_unlock(ahd, &s);
187 
188 	return (count);
189 }
190 
191 /*
192  * Catch an interrupt from the adapter
193  */
194 void
195 ahd_platform_intr(void *arg)
196 {
197 	struct	ahd_softc *ahd;
198 
199 	ahd = (struct ahd_softc *)arg;
200 	ahd_intr(ahd);
201 }
202 
203 /*
204  * We have an scb which has been processed by the
205  * adaptor, now we look to see how the operation
206  * went.
207  */
208 void
209 ahd_done(struct ahd_softc *ahd, struct scb *scb)
210 {
211 	union ccb *ccb;
212 
213 	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
214 		  ("ahd_done - scb %d\n", SCB_GET_TAG(scb)));
215 
216 	ccb = scb->io_ctx;
217 	LIST_REMOVE(scb, pending_links);
218 	if ((scb->flags & SCB_TIMEDOUT) != 0)
219 		LIST_REMOVE(scb, timedout_links);
220 
221 	untimeout(ahd_platform_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
222 
223 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
224 		bus_dmasync_op_t op;
225 
226 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
227 			op = BUS_DMASYNC_POSTREAD;
228 		else
229 			op = BUS_DMASYNC_POSTWRITE;
230 		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
231 		bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
232 	}
233 
234 #ifdef AHD_TARGET_MODE
235 	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
236 		struct cam_path *ccb_path;
237 
238 		/*
239 		 * If we have finally disconnected, clean up our
240 		 * pending device state.
241 		 * XXX - There may be error states that cause where
242 		 *       we will remain connected.
243 		 */
244 		ccb_path = ccb->ccb_h.path;
245 		if (ahd->pending_device != NULL
246 		 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) {
247 
248 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
249 				ahd->pending_device = NULL;
250 			} else {
251 				xpt_print_path(ccb->ccb_h.path);
252 				printf("Still disconnected\n");
253 				ahd_freeze_ccb(ccb);
254 			}
255 		}
256 
257 		if (aic_get_transaction_status(scb) == CAM_REQ_INPROG)
258 			ccb->ccb_h.status |= CAM_REQ_CMP;
259 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
260 		ahd_free_scb(ahd, scb);
261 		xpt_done(ccb);
262 		return;
263 	}
264 #endif
265 
266 	/*
267 	 * If the recovery SCB completes, we have to be
268 	 * out of our timeout.
269 	 */
270 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
271 		struct	scb *list_scb;
272 
273 		/*
274 		 * We were able to complete the command successfully,
275 		 * so reinstate the timeouts for all other pending
276 		 * commands.
277 		 */
278 		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
279 			union ccb *ccb;
280 			uint64_t time;
281 
282 			ccb = list_scb->io_ctx;
283 			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
284 				continue;
285 
286 			time = ccb->ccb_h.timeout;
287 			time *= hz;
288 			time /= 1000;
289 			ccb->ccb_h.timeout_ch =
290 			    timeout(ahd_platform_timeout, list_scb, time);
291 		}
292 
293 		if (aic_get_transaction_status(scb) == CAM_BDR_SENT
294 		 || aic_get_transaction_status(scb) == CAM_REQ_ABORTED)
295 			aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
296 
297 		ahd_print_path(ahd, scb);
298 		printf("no longer in timeout, status = %x\n",
299 		       ccb->ccb_h.status);
300 	}
301 
302 	/* Don't clobber any existing error state */
303 	if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) {
304 		ccb->ccb_h.status |= CAM_REQ_CMP;
305 	} else if ((scb->flags & SCB_SENSE) != 0) {
306 		/*
307 		 * We performed autosense retrieval.
308 		 *
309 		 * Zero any sense not transferred by the
310 		 * device.  The SCSI spec mandates that any
311 		 * untransfered data should be assumed to be
312 		 * zero.  Complete the 'bounce' of sense information
313 		 * through buffers accessible via bus-space by
314 		 * copying it into the clients csio.
315 		 */
316 		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
317 		memcpy(&ccb->csio.sense_data,
318 		       ahd_get_sense_buf(ahd, scb),
319 /* XXX What size do we want to use??? */
320 			sizeof(ccb->csio.sense_data)
321 		       - ccb->csio.sense_resid);
322 		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
323 	} else if ((scb->flags & SCB_PKT_SENSE) != 0) {
324 		struct scsi_status_iu_header *siu;
325 		u_int sense_len;
326 		int i;
327 
328 		/*
329 		 * Copy only the sense data into the provided buffer.
330 		 */
331 		siu = (struct scsi_status_iu_header *)scb->sense_data;
332 		sense_len = MIN(scsi_4btoul(siu->sense_length),
333 				sizeof(ccb->csio.sense_data));
334 		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
335 		memcpy(&ccb->csio.sense_data,
336 		       ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),
337 		       sense_len);
338 		printf("Copied %d bytes of sense data offset %d:", sense_len,
339 		       SIU_SENSE_OFFSET(siu));
340 		for (i = 0; i < sense_len; i++)
341 			printf(" 0x%x", ((uint8_t *)&ccb->csio.sense_data)[i]);
342 		printf("\n");
343 		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
344 	}
345 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
346 	ahd_free_scb(ahd, scb);
347 	xpt_done(ccb);
348 }
349 
350 static void
351 ahd_action(struct cam_sim *sim, union ccb *ccb)
352 {
353 	struct	ahd_softc *ahd;
354 #ifdef AHD_TARGET_MODE
355 	struct	ahd_tmode_lstate *lstate;
356 #endif
357 	u_int	target_id;
358 	u_int	our_id;
359 	long	s;
360 
361 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n"));
362 
363 	ahd = (struct ahd_softc *)cam_sim_softc(sim);
364 
365 	target_id = ccb->ccb_h.target_id;
366 	our_id = SIM_SCSI_ID(ahd, sim);
367 
368 	switch (ccb->ccb_h.func_code) {
369 	/* Common cases first */
370 #ifdef AHD_TARGET_MODE
371 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
372 	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
373 	{
374 		struct	   ahd_tmode_tstate *tstate;
375 		cam_status status;
376 
377 		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
378 					     &lstate, TRUE);
379 
380 		if (status != CAM_REQ_CMP) {
381 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
382 				/* Response from the black hole device */
383 				tstate = NULL;
384 				lstate = ahd->black_hole;
385 			} else {
386 				ccb->ccb_h.status = status;
387 				xpt_done(ccb);
388 				break;
389 			}
390 		}
391 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
392 
393 			ahd_lock(ahd, &s);
394 			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
395 					  sim_links.sle);
396 			ccb->ccb_h.status = CAM_REQ_INPROG;
397 			if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0)
398 				ahd_run_tqinfifo(ahd, /*paused*/FALSE);
399 			ahd_unlock(ahd, &s);
400 			break;
401 		}
402 
403 		/*
404 		 * The target_id represents the target we attempt to
405 		 * select.  In target mode, this is the initiator of
406 		 * the original command.
407 		 */
408 		our_id = target_id;
409 		target_id = ccb->csio.init_id;
410 		/* FALLTHROUGH */
411 	}
412 #endif
413 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
414 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
415 	{
416 		struct	scb *scb;
417 		struct	hardware_scb *hscb;
418 		struct	ahd_initiator_tinfo *tinfo;
419 		struct	ahd_tmode_tstate *tstate;
420 		u_int	col_idx;
421 
422 		if ((ahd->flags & AHD_INITIATORROLE) == 0
423 		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
424 		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
425 			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
426 			xpt_done(ccb);
427 			return;
428 		}
429 
430 		/*
431 		 * get an scb to use.
432 		 */
433 		ahd_lock(ahd, &s);
434 		tinfo = ahd_fetch_transinfo(ahd, 'A', our_id,
435 					    target_id, &tstate);
436 		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
437 		 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0
438 		 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
439 			col_idx = AHD_NEVER_COL_IDX;
440 		} else {
441 			col_idx = AHD_BUILD_COL_IDX(target_id,
442 						    ccb->ccb_h.target_lun);
443 		}
444 		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
445 
446 			xpt_freeze_simq(sim, /*count*/1);
447 			ahd->flags |= AHD_RESOURCE_SHORTAGE;
448 			ahd_unlock(ahd, &s);
449 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
450 			xpt_done(ccb);
451 			return;
452 		}
453 		ahd_unlock(ahd, &s);
454 
455 		hscb = scb->hscb;
456 
457 		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
458 			  ("start scb(%p)\n", scb));
459 		scb->io_ctx = ccb;
460 		/*
461 		 * So we can find the SCB when an abort is requested
462 		 */
463 		ccb->ccb_h.ccb_scb_ptr = scb;
464 
465 		/*
466 		 * Put all the arguments for the xfer in the scb
467 		 */
468 		hscb->control = 0;
469 		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
470 		hscb->lun = ccb->ccb_h.target_lun;
471 		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
472 			hscb->cdb_len = 0;
473 			scb->flags |= SCB_DEVICE_RESET;
474 			hscb->control |= MK_MESSAGE;
475 			hscb->task_management = SIU_TASKMGMT_LUN_RESET;
476 			ahd_execute_scb(scb, NULL, 0, 0);
477 		} else {
478 #ifdef AHD_TARGET_MODE
479 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
480 				struct target_data *tdata;
481 
482 				tdata = &hscb->shared_data.tdata;
483 				if (ahd->pending_device == lstate)
484 					scb->flags |= SCB_TARGET_IMMEDIATE;
485 				hscb->control |= TARGET_SCB;
486 				tdata->target_phases = 0;
487 				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
488 					tdata->target_phases |= SPHASE_PENDING;
489 					tdata->scsi_status =
490 					    ccb->csio.scsi_status;
491 				}
492 	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
493 					tdata->target_phases |= NO_DISCONNECT;
494 
495 				tdata->initiator_tag =
496 				    ahd_htole16(ccb->csio.tag_id);
497 			}
498 #endif
499 			hscb->task_management = 0;
500 			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
501 				hscb->control |= ccb->csio.tag_action;
502 
503 			ahd_setup_data(ahd, sim, &ccb->csio, scb);
504 		}
505 		break;
506 	}
507 #ifdef AHD_TARGET_MODE
508 	case XPT_NOTIFY_ACK:
509 	case XPT_IMMED_NOTIFY:
510 	{
511 		struct	   ahd_tmode_tstate *tstate;
512 		struct	   ahd_tmode_lstate *lstate;
513 		cam_status status;
514 
515 		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
516 					     &lstate, TRUE);
517 
518 		if (status != CAM_REQ_CMP) {
519 			ccb->ccb_h.status = status;
520 			xpt_done(ccb);
521 			break;
522 		}
523 		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
524 				  sim_links.sle);
525 		ccb->ccb_h.status = CAM_REQ_INPROG;
526 		ahd_send_lstate_events(ahd, lstate);
527 		break;
528 	}
529 	case XPT_EN_LUN:		/* Enable LUN as a target */
530 		ahd_handle_en_lun(ahd, sim, ccb);
531 		xpt_done(ccb);
532 		break;
533 #endif
534 	case XPT_ABORT:			/* Abort the specified CCB */
535 	{
536 		ahd_abort_ccb(ahd, sim, ccb);
537 		break;
538 	}
539 	case XPT_SET_TRAN_SETTINGS:
540 	{
541 		ahd_lock(ahd, &s);
542 		ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
543 				      SIM_CHANNEL(ahd, sim), &ccb->cts);
544 		ahd_unlock(ahd, &s);
545 		xpt_done(ccb);
546 		break;
547 	}
548 	case XPT_GET_TRAN_SETTINGS:
549 	/* Get default/user set transfer settings for the target */
550 	{
551 		ahd_lock(ahd, &s);
552 		ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
553 				      SIM_CHANNEL(ahd, sim), &ccb->cts);
554 		ahd_unlock(ahd, &s);
555 		xpt_done(ccb);
556 		break;
557 	}
558 	case XPT_CALC_GEOMETRY:
559 	{
560 		aic_calc_geometry(&ccb->ccg, ahd->flags & AHD_EXTENDED_TRANS_A);
561 		xpt_done(ccb);
562 		break;
563 	}
564 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
565 	{
566 		int  found;
567 
568 		ahd_lock(ahd, &s);
569 		found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
570 					  /*initiate reset*/TRUE);
571 		ahd_unlock(ahd, &s);
572 		if (bootverbose) {
573 			xpt_print_path(SIM_PATH(ahd, sim));
574 			printf("SCSI bus reset delivered. "
575 			       "%d SCBs aborted.\n", found);
576 		}
577 		ccb->ccb_h.status = CAM_REQ_CMP;
578 		xpt_done(ccb);
579 		break;
580 	}
581 	case XPT_TERM_IO:		/* Terminate the I/O process */
582 		/* XXX Implement */
583 		ccb->ccb_h.status = CAM_REQ_INVALID;
584 		xpt_done(ccb);
585 		break;
586 	case XPT_PATH_INQ:		/* Path routing inquiry */
587 	{
588 		struct ccb_pathinq *cpi = &ccb->cpi;
589 
590 		cpi->version_num = 1; /* XXX??? */
591 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
592 		if ((ahd->features & AHD_WIDE) != 0)
593 			cpi->hba_inquiry |= PI_WIDE_16;
594 		if ((ahd->features & AHD_TARGETMODE) != 0) {
595 			cpi->target_sprt = PIT_PROCESSOR
596 					 | PIT_DISCONNECT
597 					 | PIT_TERM_IO;
598 		} else {
599 			cpi->target_sprt = 0;
600 		}
601 		cpi->hba_misc = 0;
602 		cpi->hba_eng_cnt = 0;
603 		cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7;
604 		cpi->max_lun = AHD_NUM_LUNS - 1;
605 		cpi->initiator_id = ahd->our_id;
606 		if ((ahd->flags & AHD_RESET_BUS_A) == 0) {
607 			cpi->hba_misc |= PIM_NOBUSRESET;
608 		}
609 		cpi->bus_id = cam_sim_bus(sim);
610 		cpi->base_transfer_speed = 3300;
611 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
612 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
613 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
614 		cpi->unit_number = cam_sim_unit(sim);
615 #ifdef AHD_NEW_TRAN_SETTINGS
616 		cpi->protocol = PROTO_SCSI;
617 		cpi->protocol_version = SCSI_REV_2;
618 		cpi->transport = XPORT_SPI;
619 		cpi->transport_version = 2;
620 		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
621 		cpi->transport_version = 4;
622 		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST;
623 #endif
624 		cpi->ccb_h.status = CAM_REQ_CMP;
625 		xpt_done(ccb);
626 		break;
627 	}
628 	default:
629 		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
630 		xpt_done(ccb);
631 		break;
632 	}
633 }
634 
635 
636 static void
637 ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
638 		      struct ccb_trans_settings *cts)
639 {
640 #ifdef AHD_NEW_TRAN_SETTINGS
641 	struct	  ahd_devinfo devinfo;
642 	struct	  ccb_trans_settings_scsi *scsi;
643 	struct	  ccb_trans_settings_spi *spi;
644 	struct	  ahd_initiator_tinfo *tinfo;
645 	struct	  ahd_tmode_tstate *tstate;
646 	uint16_t *discenable;
647 	uint16_t *tagenable;
648 	u_int	  update_type;
649 
650 	scsi = &cts->proto_specific.scsi;
651 	spi = &cts->xport_specific.spi;
652 	ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
653 			    cts->ccb_h.target_id,
654 			    cts->ccb_h.target_lun,
655 			    SIM_CHANNEL(ahd, sim),
656 			    ROLE_UNKNOWN);
657 	tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
658 				    devinfo.our_scsiid,
659 				    devinfo.target, &tstate);
660 	update_type = 0;
661 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
662 		update_type |= AHD_TRANS_GOAL;
663 		discenable = &tstate->discenable;
664 		tagenable = &tstate->tagenable;
665 		tinfo->curr.protocol_version = cts->protocol_version;
666 		tinfo->curr.transport_version = cts->transport_version;
667 		tinfo->goal.protocol_version = cts->protocol_version;
668 		tinfo->goal.transport_version = cts->transport_version;
669 	} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
670 		update_type |= AHD_TRANS_USER;
671 		discenable = &ahd->user_discenable;
672 		tagenable = &ahd->user_tagenable;
673 		tinfo->user.protocol_version = cts->protocol_version;
674 		tinfo->user.transport_version = cts->transport_version;
675 	} else {
676 		cts->ccb_h.status = CAM_REQ_INVALID;
677 		return;
678 	}
679 
680 	if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
681 		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
682 			*discenable |= devinfo.target_mask;
683 		else
684 			*discenable &= ~devinfo.target_mask;
685 	}
686 
687 	if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
688 		if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
689 			*tagenable |= devinfo.target_mask;
690 		else
691 			*tagenable &= ~devinfo.target_mask;
692 	}
693 
694 	if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
695 		ahd_validate_width(ahd, /*tinfo limit*/NULL,
696 				   &spi->bus_width, ROLE_UNKNOWN);
697 		ahd_set_width(ahd, &devinfo, spi->bus_width,
698 			      update_type, /*paused*/FALSE);
699 	}
700 
701 	if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
702 		if (update_type == AHD_TRANS_USER)
703 			spi->ppr_options = tinfo->user.ppr_options;
704 		else
705 			spi->ppr_options = tinfo->goal.ppr_options;
706 	}
707 
708 	if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
709 		if (update_type == AHD_TRANS_USER)
710 			spi->sync_offset = tinfo->user.offset;
711 		else
712 			spi->sync_offset = tinfo->goal.offset;
713 	}
714 
715 	if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
716 		if (update_type == AHD_TRANS_USER)
717 			spi->sync_period = tinfo->user.period;
718 		else
719 			spi->sync_period = tinfo->goal.period;
720 	}
721 
722 	if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
723 	 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
724 		u_int	maxsync;
725 
726 		maxsync = AHD_SYNCRATE_MAX;
727 
728 		if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
729 			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
730 
731 		if ((*discenable & devinfo.target_mask) == 0)
732 			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
733 
734 		ahd_find_syncrate(ahd, &spi->sync_period,
735 				  &spi->ppr_options, maxsync);
736 		ahd_validate_offset(ahd, /*tinfo limit*/NULL,
737 				    spi->sync_period, &spi->sync_offset,
738 				    spi->bus_width, ROLE_UNKNOWN);
739 
740 		/* We use a period of 0 to represent async */
741 		if (spi->sync_offset == 0) {
742 			spi->sync_period = 0;
743 			spi->ppr_options = 0;
744 		}
745 
746 		ahd_set_syncrate(ahd, &devinfo, spi->sync_period,
747 				 spi->sync_offset, spi->ppr_options,
748 				 update_type, /*paused*/FALSE);
749 	}
750 	cts->ccb_h.status = CAM_REQ_CMP;
751 #else
752 	struct	  ahd_devinfo devinfo;
753 	struct	  ahd_initiator_tinfo *tinfo;
754 	struct	  ahd_tmode_tstate *tstate;
755 	uint16_t *discenable;
756 	uint16_t *tagenable;
757 	u_int	  update_type;
758 
759 	ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
760 			    cts->ccb_h.target_id,
761 			    cts->ccb_h.target_lun,
762 			    SIM_CHANNEL(ahd, sim),
763 			    ROLE_UNKNOWN);
764 	tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
765 				    devinfo.our_scsiid,
766 				    devinfo.target, &tstate);
767 	update_type = 0;
768 	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
769 		update_type |= AHD_TRANS_GOAL;
770 		discenable = &tstate->discenable;
771 		tagenable = &tstate->tagenable;
772 	} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
773 		update_type |= AHD_TRANS_USER;
774 		discenable = &ahd->user_discenable;
775 		tagenable = &ahd->user_tagenable;
776 	} else {
777 		cts->ccb_h.status = CAM_REQ_INVALID;
778 		return;
779 	}
780 
781 	if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
782 		if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
783 			*discenable |= devinfo.target_mask;
784 		else
785 			*discenable &= ~devinfo.target_mask;
786 	}
787 
788 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
789 		if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
790 			*tagenable |= devinfo.target_mask;
791 		else
792 			*tagenable &= ~devinfo.target_mask;
793 	}
794 
795 	if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
796 		ahd_validate_width(ahd, /*tinfo limit*/NULL,
797 				   &cts->bus_width, ROLE_UNKNOWN);
798 		ahd_set_width(ahd, &devinfo, cts->bus_width,
799 			      update_type, /*paused*/FALSE);
800 	}
801 
802 	if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
803 		if (update_type == AHD_TRANS_USER)
804 			cts->sync_offset = tinfo->user.offset;
805 		else
806 			cts->sync_offset = tinfo->goal.offset;
807 	}
808 
809 	if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
810 		if (update_type == AHD_TRANS_USER)
811 			cts->sync_period = tinfo->user.period;
812 		else
813 			cts->sync_period = tinfo->goal.period;
814 	}
815 
816 	if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
817 	 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)
818 	 || ((cts->valid & CCB_TRANS_TQ_VALID) != 0)
819 	 || ((cts->valid & CCB_TRANS_DISC_VALID) != 0)) {
820 		u_int ppr_options;
821 		u_int maxsync;
822 
823 		maxsync = AHD_SYNCRATE_MAX;
824 		ppr_options = 0;
825 		if (cts->sync_period <= AHD_SYNCRATE_DT
826 		 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) {
827 			ppr_options = tinfo->user.ppr_options
828 				    | MSG_EXT_PPR_DT_REQ;
829 		}
830 
831 		if ((*tagenable & devinfo.target_mask) == 0
832 		 || (*discenable & devinfo.target_mask) == 0)
833 			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
834 
835 		ahd_find_syncrate(ahd, &cts->sync_period,
836 				  &ppr_options, maxsync);
837 		ahd_validate_offset(ahd, /*tinfo limit*/NULL,
838 				    cts->sync_period, &cts->sync_offset,
839 				    MSG_EXT_WDTR_BUS_8_BIT,
840 				    ROLE_UNKNOWN);
841 
842 		/* We use a period of 0 to represent async */
843 		if (cts->sync_offset == 0) {
844 			cts->sync_period = 0;
845 			ppr_options = 0;
846 		}
847 
848 		if (ppr_options != 0
849 		 && tinfo->user.transport_version >= 3) {
850 			tinfo->goal.transport_version =
851 			    tinfo->user.transport_version;
852 			tinfo->curr.transport_version =
853 			    tinfo->user.transport_version;
854 		}
855 
856 		ahd_set_syncrate(ahd, &devinfo, cts->sync_period,
857 				 cts->sync_offset, ppr_options,
858 				 update_type, /*paused*/FALSE);
859 	}
860 	cts->ccb_h.status = CAM_REQ_CMP;
861 #endif
862 }
863 
864 static void
865 ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
866 		      struct ccb_trans_settings *cts)
867 {
868 #ifdef AHD_NEW_TRAN_SETTINGS
869 	struct	ahd_devinfo devinfo;
870 	struct	ccb_trans_settings_scsi *scsi;
871 	struct	ccb_trans_settings_spi *spi;
872 	struct	ahd_initiator_tinfo *targ_info;
873 	struct	ahd_tmode_tstate *tstate;
874 	struct	ahd_transinfo *tinfo;
875 
876 	scsi = &cts->proto_specific.scsi;
877 	spi = &cts->xport_specific.spi;
878 	ahd_compile_devinfo(&devinfo, our_id,
879 			    cts->ccb_h.target_id,
880 			    cts->ccb_h.target_lun,
881 			    channel, ROLE_UNKNOWN);
882 	targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
883 					devinfo.our_scsiid,
884 					devinfo.target, &tstate);
885 
886 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
887 		tinfo = &targ_info->curr;
888 	else
889 		tinfo = &targ_info->user;
890 
891 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
892 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
893 	if (cts->type == CTS_TYPE_USER_SETTINGS) {
894 		if ((ahd->user_discenable & devinfo.target_mask) != 0)
895 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
896 
897 		if ((ahd->user_tagenable & devinfo.target_mask) != 0)
898 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
899 	} else {
900 		if ((tstate->discenable & devinfo.target_mask) != 0)
901 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
902 
903 		if ((tstate->tagenable & devinfo.target_mask) != 0)
904 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
905 	}
906 	cts->protocol_version = tinfo->protocol_version;
907 	cts->transport_version = tinfo->transport_version;
908 
909 	spi->sync_period = tinfo->period;
910 	spi->sync_offset = tinfo->offset;
911 	spi->bus_width = tinfo->width;
912 	spi->ppr_options = tinfo->ppr_options;
913 
914 	cts->protocol = PROTO_SCSI;
915 	cts->transport = XPORT_SPI;
916 	spi->valid = CTS_SPI_VALID_SYNC_RATE
917 		   | CTS_SPI_VALID_SYNC_OFFSET
918 		   | CTS_SPI_VALID_BUS_WIDTH
919 		   | CTS_SPI_VALID_PPR_OPTIONS;
920 
921 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
922 		scsi->valid = CTS_SCSI_VALID_TQ;
923 		spi->valid |= CTS_SPI_VALID_DISC;
924 	} else {
925 		scsi->valid = 0;
926 	}
927 
928 	cts->ccb_h.status = CAM_REQ_CMP;
929 #else
930 	struct	ahd_devinfo devinfo;
931 	struct	ahd_initiator_tinfo *targ_info;
932 	struct	ahd_tmode_tstate *tstate;
933 	struct	ahd_transinfo *tinfo;
934 
935 	ahd_compile_devinfo(&devinfo, our_id,
936 			    cts->ccb_h.target_id,
937 			    cts->ccb_h.target_lun,
938 			    channel, ROLE_UNKNOWN);
939 	targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
940 					devinfo.our_scsiid,
941 					devinfo.target, &tstate);
942 
943 	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
944 		tinfo = &targ_info->curr;
945 	else
946 		tinfo = &targ_info->user;
947 
948 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
949 	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
950 		if ((ahd->user_discenable & devinfo.target_mask) != 0)
951 			cts->flags |= CCB_TRANS_DISC_ENB;
952 
953 		if ((ahd->user_tagenable & devinfo.target_mask) != 0)
954 			cts->flags |= CCB_TRANS_TAG_ENB;
955 	} else {
956 		if ((tstate->discenable & devinfo.target_mask) != 0)
957 			cts->flags |= CCB_TRANS_DISC_ENB;
958 
959 		if ((tstate->tagenable & devinfo.target_mask) != 0)
960 			cts->flags |= CCB_TRANS_TAG_ENB;
961 	}
962 	cts->sync_period = tinfo->period;
963 	cts->sync_offset = tinfo->offset;
964 	cts->bus_width = tinfo->width;
965 
966 	cts->valid = CCB_TRANS_SYNC_RATE_VALID
967 		   | CCB_TRANS_SYNC_OFFSET_VALID
968 		   | CCB_TRANS_BUS_WIDTH_VALID;
969 
970 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
971 		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
972 
973 	cts->ccb_h.status = CAM_REQ_CMP;
974 #endif
975 }
976 
977 static void
978 ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
979 {
980 	struct ahd_softc *ahd;
981 	struct cam_sim *sim;
982 
983 	sim = (struct cam_sim *)callback_arg;
984 	ahd = (struct ahd_softc *)cam_sim_softc(sim);
985 	switch (code) {
986 	case AC_LOST_DEVICE:
987 	{
988 		struct	ahd_devinfo devinfo;
989 		long	s;
990 
991 		ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
992 				    xpt_path_target_id(path),
993 				    xpt_path_lun_id(path),
994 				    SIM_CHANNEL(ahd, sim),
995 				    ROLE_UNKNOWN);
996 
997 		/*
998 		 * Revert to async/narrow transfers
999 		 * for the next device.
1000 		 */
1001 		ahd_lock(ahd, &s);
1002 		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1003 			      AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE);
1004 		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
1005 				 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR,
1006 				 /*paused*/FALSE);
1007 		ahd_unlock(ahd, &s);
1008 		break;
1009 	}
1010 	default:
1011 		break;
1012 	}
1013 }
1014 
1015 static void
1016 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1017 		int error)
1018 {
1019 	struct	scb *scb;
1020 	union	ccb *ccb;
1021 	struct	ahd_softc *ahd;
1022 	struct	ahd_initiator_tinfo *tinfo;
1023 	struct	ahd_tmode_tstate *tstate;
1024 	u_int	mask;
1025 	u_long	s;
1026 
1027 	scb = (struct scb *)arg;
1028 	ccb = scb->io_ctx;
1029 	ahd = scb->ahd_softc;
1030 
1031 	if (error != 0) {
1032 		if (error == EFBIG)
1033 			aic_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1034 		else
1035 			aic_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1036 		if (nsegments != 0)
1037 			bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
1038 		ahd_lock(ahd, &s);
1039 		ahd_free_scb(ahd, scb);
1040 		ahd_unlock(ahd, &s);
1041 		xpt_done(ccb);
1042 		return;
1043 	}
1044 	scb->sg_count = 0;
1045 	if (nsegments != 0) {
1046 		void *sg;
1047 		bus_dmasync_op_t op;
1048 		u_int i;
1049 
1050 		/* Copy the segments into our SG list */
1051 		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
1052 
1053 			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
1054 					  dm_segs->ds_len,
1055 					  /*last*/i == 1);
1056 			dm_segs++;
1057 		}
1058 
1059 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1060 			op = BUS_DMASYNC_PREREAD;
1061 		else
1062 			op = BUS_DMASYNC_PREWRITE;
1063 
1064 		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
1065 
1066 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1067 			struct target_data *tdata;
1068 
1069 			tdata = &scb->hscb->shared_data.tdata;
1070 			tdata->target_phases |= DPHASE_PENDING;
1071 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1072 				tdata->data_phase = P_DATAOUT;
1073 			else
1074 				tdata->data_phase = P_DATAIN;
1075 		}
1076 	}
1077 
1078 	ahd_lock(ahd, &s);
1079 
1080 	/*
1081 	 * Last time we need to check if this SCB needs to
1082 	 * be aborted.
1083 	 */
1084 	if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) {
1085 		if (nsegments != 0)
1086 			bus_dmamap_unload(ahd->buffer_dmat,
1087 					  scb->dmamap);
1088 		ahd_free_scb(ahd, scb);
1089 		ahd_unlock(ahd, &s);
1090 		xpt_done(ccb);
1091 		return;
1092 	}
1093 
1094 	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
1095 				    SCSIID_OUR_ID(scb->hscb->scsiid),
1096 				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
1097 				    &tstate);
1098 
1099 	mask = SCB_GET_TARGET_MASK(ahd, scb);
1100 
1101 	if ((tstate->discenable & mask) != 0
1102 	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1103 		scb->hscb->control |= DISCENB;
1104 
1105 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1106 		scb->flags |= SCB_PACKETIZED;
1107 		if (scb->hscb->task_management != 0)
1108 			scb->hscb->control &= ~MK_MESSAGE;
1109 	}
1110 
1111 	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1112 	 && (tinfo->goal.width != 0
1113 	  || tinfo->goal.period != 0
1114 	  || tinfo->goal.ppr_options != 0)) {
1115 		scb->flags |= SCB_NEGOTIATE;
1116 		scb->hscb->control |= MK_MESSAGE;
1117 	} else if ((tstate->auto_negotiate & mask) != 0) {
1118 		scb->flags |= SCB_AUTO_NEGOTIATE;
1119 		scb->hscb->control |= MK_MESSAGE;
1120 	}
1121 
1122 	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1123 
1124 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1125 
1126 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1127 		uint64_t time;
1128 
1129 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1130 			ccb->ccb_h.timeout = 5 * 1000;
1131 
1132 		time = ccb->ccb_h.timeout;
1133 		time *= hz;
1134 		time /= 1000;
1135 		ccb->ccb_h.timeout_ch =
1136 		    timeout(ahd_platform_timeout, (caddr_t)scb, time);
1137 	}
1138 
1139 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1140 		/* Define a mapping from our tag to the SCB. */
1141 		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
1142 		ahd_pause(ahd);
1143 		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1144 		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
1145 		ahd_unpause(ahd);
1146 	} else {
1147 		ahd_queue_scb(ahd, scb);
1148 	}
1149 
1150 	ahd_unlock(ahd, &s);
1151 }
1152 
1153 static void
1154 ahd_poll(struct cam_sim *sim)
1155 {
1156 	ahd_intr(cam_sim_softc(sim));
1157 }
1158 
1159 static void
1160 ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
1161 	       struct ccb_scsiio *csio, struct scb *scb)
1162 {
1163 	struct hardware_scb *hscb;
1164 	struct ccb_hdr *ccb_h;
1165 
1166 	hscb = scb->hscb;
1167 	ccb_h = &csio->ccb_h;
1168 
1169 	csio->resid = 0;
1170 	csio->sense_resid = 0;
1171 	if (ccb_h->func_code == XPT_SCSI_IO) {
1172 		hscb->cdb_len = csio->cdb_len;
1173 		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1174 
1175 			if (hscb->cdb_len > MAX_CDB_LEN
1176 			 && (ccb_h->flags & CAM_CDB_PHYS) == 0) {
1177 				u_long s;
1178 
1179 				/*
1180 				 * Should CAM start to support CDB sizes
1181 				 * greater than 16 bytes, we could use
1182 				 * the sense buffer to store the CDB.
1183 				 */
1184 				aic_set_transaction_status(scb,
1185 							   CAM_REQ_INVALID);
1186 				ahd_lock(ahd, &s);
1187 				ahd_free_scb(ahd, scb);
1188 				ahd_unlock(ahd, &s);
1189 				xpt_done((union ccb *)csio);
1190 				return;
1191 			}
1192 			if ((ccb_h->flags & CAM_CDB_PHYS) != 0) {
1193 				hscb->shared_data.idata.cdb_from_host.cdbptr =
1194 				   aic_htole64((uintptr_t)csio->cdb_io.cdb_ptr);
1195 				hscb->shared_data.idata.cdb_from_host.cdblen =
1196 				   csio->cdb_len;
1197 				hscb->cdb_len |= SCB_CDB_LEN_PTR;
1198 			} else {
1199 				memcpy(hscb->shared_data.idata.cdb,
1200 				       csio->cdb_io.cdb_ptr,
1201 				       hscb->cdb_len);
1202 			}
1203 		} else {
1204 			if (hscb->cdb_len > MAX_CDB_LEN) {
1205 				u_long s;
1206 
1207 				aic_set_transaction_status(scb,
1208 							   CAM_REQ_INVALID);
1209 				ahd_lock(ahd, &s);
1210 				ahd_free_scb(ahd, scb);
1211 				ahd_unlock(ahd, &s);
1212 				xpt_done((union ccb *)csio);
1213 				return;
1214 			}
1215 			memcpy(hscb->shared_data.idata.cdb,
1216 			       csio->cdb_io.cdb_bytes, hscb->cdb_len);
1217 		}
1218 	}
1219 
1220 	/* Only use S/G if there is a transfer */
1221 	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1222 		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1223 			/* We've been given a pointer to a single buffer */
1224 			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1225 				int s;
1226 				int error;
1227 
1228 				s = splsoftvm();
1229 				error = bus_dmamap_load(ahd->buffer_dmat,
1230 							scb->dmamap,
1231 							csio->data_ptr,
1232 							csio->dxfer_len,
1233 							ahd_execute_scb,
1234 							scb, /*flags*/0);
1235 				if (error == EINPROGRESS) {
1236 					/*
1237 					 * So as to maintain ordering,
1238 					 * freeze the controller queue
1239 					 * until our mapping is
1240 					 * returned.
1241 					 */
1242 					xpt_freeze_simq(sim,
1243 							/*count*/1);
1244 					scb->io_ctx->ccb_h.status |=
1245 					    CAM_RELEASE_SIMQ;
1246 				}
1247 				splx(s);
1248 			} else {
1249 				struct bus_dma_segment seg;
1250 
1251 				/* Pointer to physical buffer */
1252 				if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE)
1253 					panic("ahd_setup_data - Transfer size "
1254 					      "larger than can device max");
1255 
1256 				seg.ds_addr =
1257 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1258 				seg.ds_len = csio->dxfer_len;
1259 				ahd_execute_scb(scb, &seg, 1, 0);
1260 			}
1261 		} else {
1262 			struct bus_dma_segment *segs;
1263 
1264 			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1265 				panic("ahd_setup_data - Physical segment "
1266 				      "pointers unsupported");
1267 
1268 			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1269 				panic("ahd_setup_data - Virtual segment "
1270 				      "addresses unsupported");
1271 
1272 			/* Just use the segments provided */
1273 			segs = (struct bus_dma_segment *)csio->data_ptr;
1274 			ahd_execute_scb(scb, segs, csio->sglist_cnt, 0);
1275 		}
1276 	} else {
1277 		ahd_execute_scb(scb, NULL, 0, 0);
1278 	}
1279 }
1280 
1281 static void
1282 ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
1283 {
1284 	union ccb *abort_ccb;
1285 
1286 	abort_ccb = ccb->cab.abort_ccb;
1287 	switch (abort_ccb->ccb_h.func_code) {
1288 #ifdef AHD_TARGET_MODE
1289 	case XPT_ACCEPT_TARGET_IO:
1290 	case XPT_IMMED_NOTIFY:
1291 	case XPT_CONT_TARGET_IO:
1292 	{
1293 		struct ahd_tmode_tstate *tstate;
1294 		struct ahd_tmode_lstate *lstate;
1295 		struct ccb_hdr_slist *list;
1296 		cam_status status;
1297 
1298 		status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate,
1299 					     &lstate, TRUE);
1300 
1301 		if (status != CAM_REQ_CMP) {
1302 			ccb->ccb_h.status = status;
1303 			break;
1304 		}
1305 
1306 		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1307 			list = &lstate->accept_tios;
1308 		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1309 			list = &lstate->immed_notifies;
1310 		else
1311 			list = NULL;
1312 
1313 		if (list != NULL) {
1314 			struct ccb_hdr *curelm;
1315 			int found;
1316 
1317 			curelm = SLIST_FIRST(list);
1318 			found = 0;
1319 			if (curelm == &abort_ccb->ccb_h) {
1320 				found = 1;
1321 				SLIST_REMOVE_HEAD(list, sim_links.sle);
1322 			} else {
1323 				while(curelm != NULL) {
1324 					struct ccb_hdr *nextelm;
1325 
1326 					nextelm =
1327 					    SLIST_NEXT(curelm, sim_links.sle);
1328 
1329 					if (nextelm == &abort_ccb->ccb_h) {
1330 						found = 1;
1331 						SLIST_NEXT(curelm,
1332 							   sim_links.sle) =
1333 						    SLIST_NEXT(nextelm,
1334 							       sim_links.sle);
1335 						break;
1336 					}
1337 					curelm = nextelm;
1338 				}
1339 			}
1340 
1341 			if (found) {
1342 				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1343 				xpt_done(abort_ccb);
1344 				ccb->ccb_h.status = CAM_REQ_CMP;
1345 			} else {
1346 				xpt_print_path(abort_ccb->ccb_h.path);
1347 				printf("Not found\n");
1348 				ccb->ccb_h.status = CAM_PATH_INVALID;
1349 			}
1350 			break;
1351 		}
1352 		/* FALLTHROUGH */
1353 	}
1354 #endif
1355 	case XPT_SCSI_IO:
1356 		/* XXX Fully implement the hard ones */
1357 		ccb->ccb_h.status = CAM_UA_ABORT;
1358 		break;
1359 	default:
1360 		ccb->ccb_h.status = CAM_REQ_INVALID;
1361 		break;
1362 	}
1363 	xpt_done(ccb);
1364 }
1365 
1366 void
1367 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target,
1368 		u_int lun, ac_code code, void *opt_arg)
1369 {
1370 	struct	ccb_trans_settings cts;
1371 	struct cam_path *path;
1372 	void *arg;
1373 	int error;
1374 
1375 	arg = NULL;
1376 	error = ahd_create_path(ahd, channel, target, lun, &path);
1377 
1378 	if (error != CAM_REQ_CMP)
1379 		return;
1380 
1381 	switch (code) {
1382 	case AC_TRANSFER_NEG:
1383 	{
1384 #ifdef AHD_NEW_TRAN_SETTINGS
1385 		struct	ccb_trans_settings_scsi *scsi;
1386 
1387 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1388 		scsi = &cts.proto_specific.scsi;
1389 #else
1390 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1391 #endif
1392 		cts.ccb_h.path = path;
1393 		cts.ccb_h.target_id = target;
1394 		cts.ccb_h.target_lun = lun;
1395 		ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts);
1396 		arg = &cts;
1397 #ifdef AHD_NEW_TRAN_SETTINGS
1398 		scsi->valid &= ~CTS_SCSI_VALID_TQ;
1399 		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1400 #else
1401 		cts.valid &= ~CCB_TRANS_TQ_VALID;
1402 		cts.flags &= ~CCB_TRANS_TAG_ENB;
1403 #endif
1404 		if (opt_arg == NULL)
1405 			break;
1406 		if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED)
1407 #ifdef AHD_NEW_TRAN_SETTINGS
1408 			scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1409 		scsi->valid |= CTS_SCSI_VALID_TQ;
1410 #else
1411 			cts.flags |= CCB_TRANS_TAG_ENB;
1412 		cts.valid |= CCB_TRANS_TQ_VALID;
1413 #endif
1414 		break;
1415 	}
1416 	case AC_SENT_BDR:
1417 	case AC_BUS_RESET:
1418 		break;
1419 	default:
1420 		panic("ahd_send_async: Unexpected async event");
1421 	}
1422 	xpt_async(code, path, arg);
1423 	xpt_free_path(path);
1424 }
1425 
1426 void
1427 ahd_platform_set_tags(struct ahd_softc *ahd,
1428 		      struct ahd_devinfo *devinfo, int enable)
1429 {
1430 }
1431 
1432 int
1433 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1434 {
1435 	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
1436 	    M_NOWAIT | M_ZERO);
1437 	if (ahd->platform_data == NULL)
1438 		return (ENOMEM);
1439 	return (0);
1440 }
1441 
1442 void
1443 ahd_platform_free(struct ahd_softc *ahd)
1444 {
1445 	struct ahd_platform_data *pdata;
1446 
1447 	pdata = ahd->platform_data;
1448 	if (pdata != NULL) {
1449 		if (pdata->regs[0] != NULL)
1450 			bus_release_resource(ahd->dev_softc,
1451 					     pdata->regs_res_type[0],
1452 					     pdata->regs_res_id[0],
1453 					     pdata->regs[0]);
1454 
1455 		if (pdata->regs[1] != NULL)
1456 			bus_release_resource(ahd->dev_softc,
1457 					     pdata->regs_res_type[1],
1458 					     pdata->regs_res_id[1],
1459 					     pdata->regs[1]);
1460 
1461 		if (pdata->irq != NULL)
1462 			bus_release_resource(ahd->dev_softc,
1463 					     pdata->irq_res_type,
1464 					     0, pdata->irq);
1465 
1466 		if (pdata->sim != NULL) {
1467 			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1468 			xpt_free_path(pdata->path);
1469 			xpt_bus_deregister(cam_sim_path(pdata->sim));
1470 			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1471 		}
1472 		if (pdata->eh != NULL)
1473 			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1474 		free(ahd->platform_data, M_DEVBUF);
1475 	}
1476 }
1477 
1478 int
1479 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1480 {
1481 	/* We don't sort softcs under FreeBSD so report equal always */
1482 	return (0);
1483 }
1484 
1485 int
1486 ahd_detach(device_t dev)
1487 {
1488 	struct ahd_softc *ahd;
1489 	u_long l;
1490 	u_long s;
1491 
1492 	ahd_list_lock(&l);
1493 	device_printf(dev, "detaching device\n");
1494 	ahd = device_get_softc(dev);
1495 	ahd = ahd_find_softc(ahd);
1496 	if (ahd == NULL) {
1497 		device_printf(dev, "aic7xxx already detached\n");
1498 		ahd_list_unlock(&l);
1499 		return (ENOENT);
1500 	}
1501 	TAILQ_REMOVE(&ahd_tailq, ahd, links);
1502 	ahd_list_unlock(&l);
1503 	ahd_lock(ahd, &s);
1504 	ahd_intr_enable(ahd, FALSE);
1505 	bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih);
1506 	ahd_unlock(ahd, &s);
1507 	ahd_free(ahd);
1508 	return (0);
1509 }
1510 
1511 #if UNUSED
1512 static void
1513 ahd_dump_targcmd(struct target_cmd *cmd)
1514 {
1515 	uint8_t *byte;
1516 	uint8_t *last_byte;
1517 	int i;
1518 
1519 	byte = &cmd->initiator_channel;
1520 	/* Debugging info for received commands */
1521 	last_byte = &cmd[1].initiator_channel;
1522 
1523 	i = 0;
1524 	while (byte < last_byte) {
1525 		if (i == 0)
1526 			printf("\t");
1527 		printf("%#x", *byte++);
1528 		i++;
1529 		if (i == 8) {
1530 			printf("\n");
1531 			i = 0;
1532 		} else {
1533 			printf(", ");
1534 		}
1535 	}
1536 }
1537 #endif
1538 
1539 static int
1540 ahd_modevent(module_t mod, int type, void *data)
1541 {
1542 	/* XXX Deal with busy status on unload. */
1543 	return 0;
1544 }
1545 
1546 static moduledata_t ahd_mod = {
1547 	"ahd",
1548 	ahd_modevent,
1549 	NULL
1550 };
1551 
1552 /********************************** DDB Hooks *********************************/
1553 #ifdef DDB
1554 static struct ahd_softc *ahd_ddb_softc;
1555 static int ahd_ddb_paused;
1556 static int ahd_ddb_paused_on_entry;
1557 DB_COMMAND(ahd_set_unit, ahd_ddb_set_unit)
1558 {
1559 	struct ahd_softc *list_ahd;
1560 
1561 	ahd_ddb_softc = NULL;
1562 	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
1563 		if (list_ahd->unit == addr)
1564 			ahd_ddb_softc = list_ahd;
1565 	}
1566 	if (ahd_ddb_softc == NULL)
1567 		db_error("No matching softc found!\n");
1568 }
1569 
1570 DB_COMMAND(ahd_pause, ahd_ddb_pause)
1571 {
1572 	if (ahd_ddb_softc == NULL) {
1573 		db_error("Must set unit with ahd_set_unit first!\n");
1574 		return;
1575 	}
1576 	if (ahd_ddb_paused == 0) {
1577 		ahd_ddb_paused++;
1578 		if (ahd_is_paused(ahd_ddb_softc)) {
1579 			ahd_ddb_paused_on_entry++;
1580 			return;
1581 		}
1582 		ahd_pause(ahd_ddb_softc);
1583 	}
1584 }
1585 
1586 DB_COMMAND(ahd_unpause, ahd_ddb_unpause)
1587 {
1588 	if (ahd_ddb_softc == NULL) {
1589 		db_error("Must set unit with ahd_set_unit first!\n");
1590 		return;
1591 	}
1592 	if (ahd_ddb_paused != 0) {
1593 		ahd_ddb_paused = 0;
1594 		if (ahd_ddb_paused_on_entry)
1595 			return;
1596 		ahd_unpause(ahd_ddb_softc);
1597 	} else if (ahd_ddb_paused_on_entry != 0) {
1598 		/* Two unpauses to clear a paused on entry. */
1599 		ahd_ddb_paused_on_entry = 0;
1600 		ahd_unpause(ahd_ddb_softc);
1601 	}
1602 }
1603 
1604 DB_COMMAND(ahd_in, ahd_ddb_in)
1605 {
1606 	int c;
1607 	int size;
1608 
1609 	if (ahd_ddb_softc == NULL) {
1610 		db_error("Must set unit with ahd_set_unit first!\n");
1611 		return;
1612 	}
1613 	if (have_addr == 0)
1614 		return;
1615 
1616 	size = 1;
1617 	while ((c = *modif++) != '\0') {
1618 		switch (c) {
1619 		case 'b':
1620 			size = 1;
1621 			break;
1622 		case 'w':
1623 			size = 2;
1624 			break;
1625 		case 'l':
1626 			size = 4;
1627 		break;
1628 		}
1629 	}
1630 
1631 	if (count <= 0)
1632 		count = 1;
1633 	while (--count >= 0) {
1634 		db_printf("%04lx (M)%x: \t", (u_long)addr,
1635 			  ahd_inb(ahd_ddb_softc, MODE_PTR));
1636 		switch (size) {
1637 		case 1:
1638 			db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr));
1639 			break;
1640 		case 2:
1641 			db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr));
1642 			break;
1643 		case 4:
1644 			db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr));
1645 			break;
1646 		}
1647 	}
1648 }
1649 
1650 DB_SET(ahd_out, ahd_ddb_out, db_cmd_set, CS_MORE, NULL)
1651 {
1652 	db_expr_t old_value;
1653 	db_expr_t new_value;
1654 	int	  size;
1655 
1656 	if (ahd_ddb_softc == NULL) {
1657 		db_error("Must set unit with ahd_set_unit first!\n");
1658 		return;
1659 	}
1660 
1661 	switch (modif[0]) {
1662 	case '\0':
1663 	case 'b':
1664 		size = 1;
1665 		break;
1666 	case 'h':
1667 		size = 2;
1668 		break;
1669 	case 'l':
1670 		size = 4;
1671 		break;
1672 	default:
1673 		db_error("Unknown size\n");
1674 		return;
1675 	}
1676 
1677 	while (db_expression(&new_value)) {
1678 		switch (size) {
1679 		default:
1680 		case 1:
1681 			old_value = ahd_inb(ahd_ddb_softc, addr);
1682 			ahd_outb(ahd_ddb_softc, addr, new_value);
1683 			break;
1684 		case 2:
1685 			old_value = ahd_inw(ahd_ddb_softc, addr);
1686 			ahd_outw(ahd_ddb_softc, addr, new_value);
1687 			break;
1688 		case 4:
1689 			old_value = ahd_inl(ahd_ddb_softc, addr);
1690 			ahd_outl(ahd_ddb_softc, addr, new_value);
1691 			break;
1692 		}
1693 		db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx",
1694 			  (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR),
1695 			  (u_long)old_value, (u_long)new_value);
1696 		addr += size;
1697 	}
1698 	db_skip_to_eol();
1699 }
1700 
1701 #endif
1702 
1703 
1704 DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1705 MODULE_DEPEND(ahd, cam, 1, 1, 1);
1706 MODULE_VERSION(ahd, 1);
1707