xref: /freebsd/sys/dev/aic7xxx/aic79xx_osm.c (revision c6ec7d31830ab1c80edae95ad5e4b9dba10c47ac)
1 /*-
2  * Bus independent FreeBSD shim for the aic79xx based Adaptec SCSI controllers
3  *
4  * Copyright (c) 1994-2002, 2004 Justin T. Gibbs.
5  * Copyright (c) 2001-2002 Adaptec Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * Alternatively, this software may be distributed under the terms of the
18  * GNU Public License ("GPL").
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#35 $
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <dev/aic7xxx/aic79xx_osm.h>
39 #include <dev/aic7xxx/aic79xx_inline.h>
40 
41 #include <sys/kthread.h>
42 
43 #include "opt_ddb.h"
44 #ifdef DDB
45 #include <ddb/ddb.h>
46 #endif
47 
48 #ifndef AHD_TMODE_ENABLE
49 #define AHD_TMODE_ENABLE 0
50 #endif
51 
52 #include <dev/aic7xxx/aic_osm_lib.c>
53 
54 #define ccb_scb_ptr spriv_ptr0
55 
56 #if 0
57 static void	ahd_dump_targcmd(struct target_cmd *cmd);
58 #endif
59 static int	ahd_modevent(module_t mod, int type, void *data);
60 static void	ahd_action(struct cam_sim *sim, union ccb *ccb);
61 static void	ahd_set_tran_settings(struct ahd_softc *ahd,
62 				      int our_id, char channel,
63 				      struct ccb_trans_settings *cts);
64 static void	ahd_get_tran_settings(struct ahd_softc *ahd,
65 				      int our_id, char channel,
66 				      struct ccb_trans_settings *cts);
67 static void	ahd_async(void *callback_arg, uint32_t code,
68 			  struct cam_path *path, void *arg);
69 static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
70 				int nsegments, int error);
71 static void	ahd_poll(struct cam_sim *sim);
72 static void	ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
73 			       struct ccb_scsiio *csio, struct scb *scb);
74 static void	ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim,
75 			      union ccb *ccb);
76 static int	ahd_create_path(struct ahd_softc *ahd,
77 				char channel, u_int target, u_int lun,
78 				struct cam_path **path);
79 
80 static const char *ahd_sysctl_node_elements[] = {
81 	"root",
82 	"summary",
83 	"debug"
84 };
85 
86 #ifndef NO_SYSCTL_DESCR
87 static const char *ahd_sysctl_node_descriptions[] = {
88 	"root error collection for aic79xx controllers",
89 	"summary collection for aic79xx controllers",
90 	"debug collection for aic79xx controllers"
91 };
92 #endif
93 
94 static const char *ahd_sysctl_errors_elements[] = {
95 	"Cerrors",
96 	"Uerrors",
97 	"Ferrors"
98 };
99 
100 #ifndef NO_SYSCTL_DESCR
101 static const char *ahd_sysctl_errors_descriptions[] = {
102 	"Correctable errors",
103 	"Uncorrectable errors",
104 	"Fatal errors"
105 };
106 #endif
107 
108 static int
109 ahd_set_debugcounters(SYSCTL_HANDLER_ARGS)
110 {
111 	struct ahd_softc *sc;
112 	int error, tmpv;
113 
114 	tmpv = 0;
115 	sc = arg1;
116 	error = sysctl_handle_int(oidp, &tmpv, 0, req);
117 	if (error != 0 || req->newptr == NULL)
118 		return (error);
119 	if (tmpv < 0 || tmpv >= AHD_ERRORS_NUMBER)
120 		return (EINVAL);
121 	sc->summerr[arg2] = tmpv;
122 	return (0);
123 }
124 
125 static int
126 ahd_clear_allcounters(SYSCTL_HANDLER_ARGS)
127 {
128 	struct ahd_softc *sc;
129 	int error, tmpv;
130 
131 	tmpv = 0;
132 	sc = arg1;
133 	error = sysctl_handle_int(oidp, &tmpv, 0, req);
134 	if (error != 0 || req->newptr == NULL)
135 		return (error);
136 	if (tmpv != 0)
137 		bzero(sc->summerr, sizeof(sc->summerr));
138 	return (0);
139 }
140 
141 static int
142 ahd_create_path(struct ahd_softc *ahd, char channel, u_int target,
143 	        u_int lun, struct cam_path **path)
144 {
145 	path_id_t path_id;
146 
147 	path_id = cam_sim_path(ahd->platform_data->sim);
148 	return (xpt_create_path(path, /*periph*/NULL,
149 				path_id, target, lun));
150 }
151 
152 void
153 ahd_sysctl(struct ahd_softc *ahd)
154 {
155 	u_int i;
156 
157 	for (i = 0; i < AHD_SYSCTL_NUMBER; i++)
158 		sysctl_ctx_init(&ahd->sysctl_ctx[i]);
159 
160 	ahd->sysctl_tree[AHD_SYSCTL_ROOT] =
161 	    SYSCTL_ADD_NODE(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT],
162 			    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
163 			    device_get_nameunit(ahd->dev_softc), CTLFLAG_RD, 0,
164 			    ahd_sysctl_node_descriptions[AHD_SYSCTL_ROOT]);
165 	    SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT],
166 			    SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]),
167 			    OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW, ahd,
168 			    0, ahd_clear_allcounters, "IU",
169 			    "Clear all counters");
170 
171 	for (i = AHD_SYSCTL_SUMMARY; i < AHD_SYSCTL_NUMBER; i++)
172 		ahd->sysctl_tree[i] =
173 		    SYSCTL_ADD_NODE(&ahd->sysctl_ctx[i],
174 				    SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]),
175 				    OID_AUTO, ahd_sysctl_node_elements[i],
176 				    CTLFLAG_RD, 0,
177 				    ahd_sysctl_node_descriptions[i]);
178 
179 	for (i = AHD_ERRORS_CORRECTABLE; i < AHD_ERRORS_NUMBER; i++) {
180 		SYSCTL_ADD_UINT(&ahd->sysctl_ctx[AHD_SYSCTL_SUMMARY],
181 				SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_SUMMARY]),
182 				OID_AUTO, ahd_sysctl_errors_elements[i],
183 				CTLFLAG_RD, &ahd->summerr[i], i,
184 				ahd_sysctl_errors_descriptions[i]);
185 		SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_DEBUG],
186 				SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_DEBUG]),
187 				OID_AUTO, ahd_sysctl_errors_elements[i],
188 				CTLFLAG_RW | CTLTYPE_UINT, ahd, i,
189 				ahd_set_debugcounters, "IU",
190 				ahd_sysctl_errors_descriptions[i]);
191 	}
192 }
193 
194 int
195 ahd_map_int(struct ahd_softc *ahd)
196 {
197 	int error;
198 
199 	/* Hook up our interrupt handler */
200 	error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq,
201 			       INTR_TYPE_CAM|INTR_MPSAFE, NULL,
202 			       ahd_platform_intr, ahd, &ahd->platform_data->ih);
203 	if (error != 0)
204 		device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n",
205 			      error);
206 	return (error);
207 }
208 
209 /*
210  * Attach all the sub-devices we can find
211  */
212 int
213 ahd_attach(struct ahd_softc *ahd)
214 {
215 	char   ahd_info[256];
216 	struct ccb_setasync csa;
217 	struct cam_devq *devq;
218 	struct cam_sim *sim;
219 	struct cam_path *path;
220 	int count;
221 
222 	count = 0;
223 	devq = NULL;
224 	sim = NULL;
225 	path = NULL;
226 
227 	/*
228 	 * Create a thread to perform all recovery.
229 	 */
230 	if (ahd_spawn_recovery_thread(ahd) != 0)
231 		goto fail;
232 
233 	ahd_controller_info(ahd, ahd_info);
234 	printf("%s\n", ahd_info);
235 	ahd_lock(ahd);
236 
237 	/*
238 	 * Create the device queue for our SIM(s).
239 	 */
240 	devq = cam_simq_alloc(AHD_MAX_QUEUE);
241 	if (devq == NULL)
242 		goto fail;
243 
244 	/*
245 	 * Construct our SIM entry
246 	 */
247 	sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd,
248 			    device_get_unit(ahd->dev_softc),
249 			    &ahd->platform_data->mtx, 1, /*XXX*/256, devq);
250 	if (sim == NULL) {
251 		cam_simq_free(devq);
252 		goto fail;
253 	}
254 
255 	if (xpt_bus_register(sim, ahd->dev_softc, /*bus_id*/0) != CAM_SUCCESS) {
256 		cam_sim_free(sim, /*free_devq*/TRUE);
257 		sim = NULL;
258 		goto fail;
259 	}
260 
261 	if (xpt_create_path(&path, /*periph*/NULL,
262 			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
263 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
264 		xpt_bus_deregister(cam_sim_path(sim));
265 		cam_sim_free(sim, /*free_devq*/TRUE);
266 		sim = NULL;
267 		goto fail;
268 	}
269 
270 	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
271 	csa.ccb_h.func_code = XPT_SASYNC_CB;
272 	csa.event_enable = AC_LOST_DEVICE;
273 	csa.callback = ahd_async;
274 	csa.callback_arg = sim;
275 	xpt_action((union ccb *)&csa);
276 	count++;
277 
278 fail:
279 	ahd->platform_data->sim = sim;
280 	ahd->platform_data->path = path;
281 	ahd_unlock(ahd);
282 	if (count != 0) {
283 		/* We have to wait until after any system dumps... */
284 		ahd->platform_data->eh =
285 		    EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown,
286 					  ahd, SHUTDOWN_PRI_DEFAULT);
287 		ahd_intr_enable(ahd, TRUE);
288 	}
289 
290 
291 	return (count);
292 }
293 
294 /*
295  * Catch an interrupt from the adapter
296  */
297 void
298 ahd_platform_intr(void *arg)
299 {
300 	struct	ahd_softc *ahd;
301 
302 	ahd = (struct ahd_softc *)arg;
303 	ahd_lock(ahd);
304 	ahd_intr(ahd);
305 	ahd_unlock(ahd);
306 }
307 
308 /*
309  * We have an scb which has been processed by the
310  * adaptor, now we look to see how the operation
311  * went.
312  */
313 void
314 ahd_done(struct ahd_softc *ahd, struct scb *scb)
315 {
316 	union ccb *ccb;
317 
318 	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
319 		  ("ahd_done - scb %d\n", SCB_GET_TAG(scb)));
320 
321 	ccb = scb->io_ctx;
322 	LIST_REMOVE(scb, pending_links);
323 	if ((scb->flags & SCB_TIMEDOUT) != 0)
324 		LIST_REMOVE(scb, timedout_links);
325 
326 	callout_stop(&scb->io_timer);
327 
328 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
329 		bus_dmasync_op_t op;
330 
331 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
332 			op = BUS_DMASYNC_POSTREAD;
333 		else
334 			op = BUS_DMASYNC_POSTWRITE;
335 		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
336 		bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
337 	}
338 
339 #ifdef AHD_TARGET_MODE
340 	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
341 		struct cam_path *ccb_path;
342 
343 		/*
344 		 * If we have finally disconnected, clean up our
345 		 * pending device state.
346 		 * XXX - There may be error states that cause where
347 		 *       we will remain connected.
348 		 */
349 		ccb_path = ccb->ccb_h.path;
350 		if (ahd->pending_device != NULL
351 		 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) {
352 
353 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
354 				ahd->pending_device = NULL;
355 			} else {
356 				xpt_print_path(ccb->ccb_h.path);
357 				printf("Still disconnected\n");
358 				ahd_freeze_ccb(ccb);
359 			}
360 		}
361 
362 		if (aic_get_transaction_status(scb) == CAM_REQ_INPROG)
363 			ccb->ccb_h.status |= CAM_REQ_CMP;
364 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
365 		ahd_free_scb(ahd, scb);
366 		xpt_done(ccb);
367 		return;
368 	}
369 #endif
370 
371 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
372 		struct	scb *list_scb;
373 
374 		ahd->scb_data.recovery_scbs--;
375 
376 		if (aic_get_transaction_status(scb) == CAM_BDR_SENT
377 		 || aic_get_transaction_status(scb) == CAM_REQ_ABORTED)
378 			aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
379 
380 		if (ahd->scb_data.recovery_scbs == 0) {
381 			/*
382 			 * All recovery actions have completed successfully,
383 			 * so reinstate the timeouts for all other pending
384 			 * commands.
385 			 */
386 			LIST_FOREACH(list_scb,
387 				     &ahd->pending_scbs, pending_links) {
388 
389 				aic_scb_timer_reset(list_scb,
390 						    aic_get_timeout(scb));
391 			}
392 
393 			ahd_print_path(ahd, scb);
394 			printf("no longer in timeout, status = %x\n",
395 			       ccb->ccb_h.status);
396 		}
397 	}
398 
399 	/* Don't clobber any existing error state */
400 	if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) {
401 		ccb->ccb_h.status |= CAM_REQ_CMP;
402 	} else if ((scb->flags & SCB_SENSE) != 0) {
403 		/*
404 		 * We performed autosense retrieval.
405 		 *
406 		 * Zero any sense not transferred by the
407 		 * device.  The SCSI spec mandates that any
408 		 * untransfered data should be assumed to be
409 		 * zero.  Complete the 'bounce' of sense information
410 		 * through buffers accessible via bus-space by
411 		 * copying it into the clients csio.
412 		 */
413 		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
414 		memcpy(&ccb->csio.sense_data,
415 		       ahd_get_sense_buf(ahd, scb),
416 /* XXX What size do we want to use??? */
417 			sizeof(ccb->csio.sense_data)
418 		       - ccb->csio.sense_resid);
419 		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
420 	} else if ((scb->flags & SCB_PKT_SENSE) != 0) {
421 		struct scsi_status_iu_header *siu;
422 		u_int sense_len;
423 
424 		/*
425 		 * Copy only the sense data into the provided buffer.
426 		 */
427 		siu = (struct scsi_status_iu_header *)scb->sense_data;
428 		sense_len = MIN(scsi_4btoul(siu->sense_length),
429 				sizeof(ccb->csio.sense_data));
430 		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
431 		memcpy(&ccb->csio.sense_data,
432 		       ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),
433 		       sense_len);
434 #ifdef AHD_DEBUG
435 		if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
436 			uint8_t *sense_data = (uint8_t *)&ccb->csio.sense_data;
437 			u_int i;
438 
439 			printf("Copied %d bytes of sense data offset %d:",
440 			       sense_len, SIU_SENSE_OFFSET(siu));
441 			for (i = 0; i < sense_len; i++)
442 				printf(" 0x%x", *sense_data++);
443 			printf("\n");
444 		}
445 #endif
446 		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
447 	}
448 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
449 	ahd_free_scb(ahd, scb);
450 	xpt_done(ccb);
451 }
452 
453 static void
454 ahd_action(struct cam_sim *sim, union ccb *ccb)
455 {
456 	struct	ahd_softc *ahd;
457 #ifdef AHD_TARGET_MODE
458 	struct	ahd_tmode_lstate *lstate;
459 #endif
460 	u_int	target_id;
461 	u_int	our_id;
462 
463 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n"));
464 
465 	ahd = (struct ahd_softc *)cam_sim_softc(sim);
466 
467 	target_id = ccb->ccb_h.target_id;
468 	our_id = SIM_SCSI_ID(ahd, sim);
469 
470 	switch (ccb->ccb_h.func_code) {
471 	/* Common cases first */
472 #ifdef AHD_TARGET_MODE
473 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
474 	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
475 	{
476 		struct	   ahd_tmode_tstate *tstate;
477 		cam_status status;
478 
479 		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
480 					     &lstate, TRUE);
481 
482 		if (status != CAM_REQ_CMP) {
483 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
484 				/* Response from the black hole device */
485 				tstate = NULL;
486 				lstate = ahd->black_hole;
487 			} else {
488 				ccb->ccb_h.status = status;
489 				xpt_done(ccb);
490 				break;
491 			}
492 		}
493 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
494 
495 			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
496 					  sim_links.sle);
497 			ccb->ccb_h.status = CAM_REQ_INPROG;
498 			if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0)
499 				ahd_run_tqinfifo(ahd, /*paused*/FALSE);
500 			break;
501 		}
502 
503 		/*
504 		 * The target_id represents the target we attempt to
505 		 * select.  In target mode, this is the initiator of
506 		 * the original command.
507 		 */
508 		our_id = target_id;
509 		target_id = ccb->csio.init_id;
510 		/* FALLTHROUGH */
511 	}
512 #endif
513 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
514 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
515 	{
516 		struct	scb *scb;
517 		struct	hardware_scb *hscb;
518 		struct	ahd_initiator_tinfo *tinfo;
519 		struct	ahd_tmode_tstate *tstate;
520 		u_int	col_idx;
521 
522 		if ((ahd->flags & AHD_INITIATORROLE) == 0
523 		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
524 		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
525 			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
526 			xpt_done(ccb);
527 			return;
528 		}
529 
530 		/*
531 		 * get an scb to use.
532 		 */
533 		tinfo = ahd_fetch_transinfo(ahd, 'A', our_id,
534 					    target_id, &tstate);
535 		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
536 		 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0
537 		 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
538 			col_idx = AHD_NEVER_COL_IDX;
539 		} else {
540 			col_idx = AHD_BUILD_COL_IDX(target_id,
541 						    ccb->ccb_h.target_lun);
542 		}
543 		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
544 
545 			xpt_freeze_simq(sim, /*count*/1);
546 			ahd->flags |= AHD_RESOURCE_SHORTAGE;
547 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
548 			xpt_done(ccb);
549 			return;
550 		}
551 
552 		hscb = scb->hscb;
553 
554 		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
555 			  ("start scb(%p)\n", scb));
556 		scb->io_ctx = ccb;
557 		/*
558 		 * So we can find the SCB when an abort is requested
559 		 */
560 		ccb->ccb_h.ccb_scb_ptr = scb;
561 
562 		/*
563 		 * Put all the arguments for the xfer in the scb
564 		 */
565 		hscb->control = 0;
566 		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
567 		hscb->lun = ccb->ccb_h.target_lun;
568 		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
569 			hscb->cdb_len = 0;
570 			scb->flags |= SCB_DEVICE_RESET;
571 			hscb->control |= MK_MESSAGE;
572 			hscb->task_management = SIU_TASKMGMT_LUN_RESET;
573 			ahd_execute_scb(scb, NULL, 0, 0);
574 		} else {
575 #ifdef AHD_TARGET_MODE
576 			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
577 				struct target_data *tdata;
578 
579 				tdata = &hscb->shared_data.tdata;
580 				if (ahd->pending_device == lstate)
581 					scb->flags |= SCB_TARGET_IMMEDIATE;
582 				hscb->control |= TARGET_SCB;
583 				tdata->target_phases = 0;
584 				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
585 					tdata->target_phases |= SPHASE_PENDING;
586 					tdata->scsi_status =
587 					    ccb->csio.scsi_status;
588 				}
589 	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
590 					tdata->target_phases |= NO_DISCONNECT;
591 
592 				tdata->initiator_tag =
593 				    ahd_htole16(ccb->csio.tag_id);
594 			}
595 #endif
596 			hscb->task_management = 0;
597 			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
598 				hscb->control |= ccb->csio.tag_action;
599 
600 			ahd_setup_data(ahd, sim, &ccb->csio, scb);
601 		}
602 		break;
603 	}
604 #ifdef AHD_TARGET_MODE
605 	case XPT_NOTIFY_ACKNOWLEDGE:
606 	case XPT_IMMEDIATE_NOTIFY:
607 	{
608 		struct	   ahd_tmode_tstate *tstate;
609 		struct	   ahd_tmode_lstate *lstate;
610 		cam_status status;
611 
612 		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
613 					     &lstate, TRUE);
614 
615 		if (status != CAM_REQ_CMP) {
616 			ccb->ccb_h.status = status;
617 			xpt_done(ccb);
618 			break;
619 		}
620 		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
621 				  sim_links.sle);
622 		ccb->ccb_h.status = CAM_REQ_INPROG;
623 		ahd_send_lstate_events(ahd, lstate);
624 		break;
625 	}
626 	case XPT_EN_LUN:		/* Enable LUN as a target */
627 		ahd_handle_en_lun(ahd, sim, ccb);
628 		xpt_done(ccb);
629 		break;
630 #endif
631 	case XPT_ABORT:			/* Abort the specified CCB */
632 	{
633 		ahd_abort_ccb(ahd, sim, ccb);
634 		break;
635 	}
636 	case XPT_SET_TRAN_SETTINGS:
637 	{
638 		ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
639 				      SIM_CHANNEL(ahd, sim), &ccb->cts);
640 		xpt_done(ccb);
641 		break;
642 	}
643 	case XPT_GET_TRAN_SETTINGS:
644 	/* Get default/user set transfer settings for the target */
645 	{
646 		ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
647 				      SIM_CHANNEL(ahd, sim), &ccb->cts);
648 		xpt_done(ccb);
649 		break;
650 	}
651 	case XPT_CALC_GEOMETRY:
652 	{
653 		aic_calc_geometry(&ccb->ccg, ahd->flags & AHD_EXTENDED_TRANS_A);
654 		xpt_done(ccb);
655 		break;
656 	}
657 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
658 	{
659 		int  found;
660 
661 		found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
662 					  /*initiate reset*/TRUE);
663 		if (bootverbose) {
664 			xpt_print_path(SIM_PATH(ahd, sim));
665 			printf("SCSI bus reset delivered. "
666 			       "%d SCBs aborted.\n", found);
667 		}
668 		ccb->ccb_h.status = CAM_REQ_CMP;
669 		xpt_done(ccb);
670 		break;
671 	}
672 	case XPT_TERM_IO:		/* Terminate the I/O process */
673 		/* XXX Implement */
674 		ccb->ccb_h.status = CAM_REQ_INVALID;
675 		xpt_done(ccb);
676 		break;
677 	case XPT_PATH_INQ:		/* Path routing inquiry */
678 	{
679 		struct ccb_pathinq *cpi = &ccb->cpi;
680 
681 		cpi->version_num = 1; /* XXX??? */
682 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
683 		if ((ahd->features & AHD_WIDE) != 0)
684 			cpi->hba_inquiry |= PI_WIDE_16;
685 		if ((ahd->features & AHD_TARGETMODE) != 0) {
686 			cpi->target_sprt = PIT_PROCESSOR
687 					 | PIT_DISCONNECT
688 					 | PIT_TERM_IO;
689 		} else {
690 			cpi->target_sprt = 0;
691 		}
692 		cpi->hba_misc = 0;
693 		cpi->hba_eng_cnt = 0;
694 		cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7;
695 		cpi->max_lun = AHD_NUM_LUNS_NONPKT - 1;
696 		cpi->initiator_id = ahd->our_id;
697 		if ((ahd->flags & AHD_RESET_BUS_A) == 0) {
698 			cpi->hba_misc |= PIM_NOBUSRESET;
699 		}
700 		cpi->bus_id = cam_sim_bus(sim);
701 		cpi->base_transfer_speed = 3300;
702 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
703 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
704 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
705 		cpi->unit_number = cam_sim_unit(sim);
706 		cpi->protocol = PROTO_SCSI;
707 		cpi->protocol_version = SCSI_REV_2;
708 		cpi->transport = XPORT_SPI;
709 		cpi->transport_version = 4;
710 		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST
711 						    | SID_SPI_IUS
712 						    | SID_SPI_QAS;
713 		cpi->ccb_h.status = CAM_REQ_CMP;
714 		xpt_done(ccb);
715 		break;
716 	}
717 	default:
718 		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
719 		xpt_done(ccb);
720 		break;
721 	}
722 }
723 
724 
725 static void
726 ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
727 		      struct ccb_trans_settings *cts)
728 {
729 	struct	  ahd_devinfo devinfo;
730 	struct	  ccb_trans_settings_scsi *scsi;
731 	struct	  ccb_trans_settings_spi *spi;
732 	struct	  ahd_initiator_tinfo *tinfo;
733 	struct	  ahd_tmode_tstate *tstate;
734 	uint16_t *discenable;
735 	uint16_t *tagenable;
736 	u_int	  update_type;
737 
738 	scsi = &cts->proto_specific.scsi;
739 	spi = &cts->xport_specific.spi;
740 	ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
741 			    cts->ccb_h.target_id,
742 			    cts->ccb_h.target_lun,
743 			    SIM_CHANNEL(ahd, sim),
744 			    ROLE_UNKNOWN);
745 	tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
746 				    devinfo.our_scsiid,
747 				    devinfo.target, &tstate);
748 	update_type = 0;
749 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
750 		update_type |= AHD_TRANS_GOAL;
751 		discenable = &tstate->discenable;
752 		tagenable = &tstate->tagenable;
753 		tinfo->curr.protocol_version = cts->protocol_version;
754 		tinfo->curr.transport_version = cts->transport_version;
755 		tinfo->goal.protocol_version = cts->protocol_version;
756 		tinfo->goal.transport_version = cts->transport_version;
757 	} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
758 		update_type |= AHD_TRANS_USER;
759 		discenable = &ahd->user_discenable;
760 		tagenable = &ahd->user_tagenable;
761 		tinfo->user.protocol_version = cts->protocol_version;
762 		tinfo->user.transport_version = cts->transport_version;
763 	} else {
764 		cts->ccb_h.status = CAM_REQ_INVALID;
765 		return;
766 	}
767 
768 	if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
769 		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
770 			*discenable |= devinfo.target_mask;
771 		else
772 			*discenable &= ~devinfo.target_mask;
773 	}
774 
775 	if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
776 		if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
777 			*tagenable |= devinfo.target_mask;
778 		else
779 			*tagenable &= ~devinfo.target_mask;
780 	}
781 
782 	if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
783 		ahd_validate_width(ahd, /*tinfo limit*/NULL,
784 				   &spi->bus_width, ROLE_UNKNOWN);
785 		ahd_set_width(ahd, &devinfo, spi->bus_width,
786 			      update_type, /*paused*/FALSE);
787 	}
788 
789 	if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
790 		if (update_type == AHD_TRANS_USER)
791 			spi->ppr_options = tinfo->user.ppr_options;
792 		else
793 			spi->ppr_options = tinfo->goal.ppr_options;
794 	}
795 
796 	if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
797 		if (update_type == AHD_TRANS_USER)
798 			spi->sync_offset = tinfo->user.offset;
799 		else
800 			spi->sync_offset = tinfo->goal.offset;
801 	}
802 
803 	if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
804 		if (update_type == AHD_TRANS_USER)
805 			spi->sync_period = tinfo->user.period;
806 		else
807 			spi->sync_period = tinfo->goal.period;
808 	}
809 
810 	if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
811 	 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
812 		u_int	maxsync;
813 
814 		maxsync = AHD_SYNCRATE_MAX;
815 
816 		if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
817 			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
818 
819 		if ((*discenable & devinfo.target_mask) == 0)
820 			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
821 
822 		ahd_find_syncrate(ahd, &spi->sync_period,
823 				  &spi->ppr_options, maxsync);
824 		ahd_validate_offset(ahd, /*tinfo limit*/NULL,
825 				    spi->sync_period, &spi->sync_offset,
826 				    spi->bus_width, ROLE_UNKNOWN);
827 
828 		/* We use a period of 0 to represent async */
829 		if (spi->sync_offset == 0) {
830 			spi->sync_period = 0;
831 			spi->ppr_options = 0;
832 		}
833 
834 		ahd_set_syncrate(ahd, &devinfo, spi->sync_period,
835 				 spi->sync_offset, spi->ppr_options,
836 				 update_type, /*paused*/FALSE);
837 	}
838 	cts->ccb_h.status = CAM_REQ_CMP;
839 }
840 
841 static void
842 ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
843 		      struct ccb_trans_settings *cts)
844 {
845 	struct	ahd_devinfo devinfo;
846 	struct	ccb_trans_settings_scsi *scsi;
847 	struct	ccb_trans_settings_spi *spi;
848 	struct	ahd_initiator_tinfo *targ_info;
849 	struct	ahd_tmode_tstate *tstate;
850 	struct	ahd_transinfo *tinfo;
851 
852 	scsi = &cts->proto_specific.scsi;
853 	spi = &cts->xport_specific.spi;
854 	ahd_compile_devinfo(&devinfo, our_id,
855 			    cts->ccb_h.target_id,
856 			    cts->ccb_h.target_lun,
857 			    channel, ROLE_UNKNOWN);
858 	targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
859 					devinfo.our_scsiid,
860 					devinfo.target, &tstate);
861 
862 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
863 		tinfo = &targ_info->curr;
864 	else
865 		tinfo = &targ_info->user;
866 
867 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
868 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
869 	if (cts->type == CTS_TYPE_USER_SETTINGS) {
870 		if ((ahd->user_discenable & devinfo.target_mask) != 0)
871 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
872 
873 		if ((ahd->user_tagenable & devinfo.target_mask) != 0)
874 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
875 	} else {
876 		if ((tstate->discenable & devinfo.target_mask) != 0)
877 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
878 
879 		if ((tstate->tagenable & devinfo.target_mask) != 0)
880 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
881 	}
882 	cts->protocol_version = tinfo->protocol_version;
883 	cts->transport_version = tinfo->transport_version;
884 
885 	spi->sync_period = tinfo->period;
886 	spi->sync_offset = tinfo->offset;
887 	spi->bus_width = tinfo->width;
888 	spi->ppr_options = tinfo->ppr_options;
889 
890 	cts->protocol = PROTO_SCSI;
891 	cts->transport = XPORT_SPI;
892 	spi->valid = CTS_SPI_VALID_SYNC_RATE
893 		   | CTS_SPI_VALID_SYNC_OFFSET
894 		   | CTS_SPI_VALID_BUS_WIDTH
895 		   | CTS_SPI_VALID_PPR_OPTIONS;
896 
897 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
898 		scsi->valid = CTS_SCSI_VALID_TQ;
899 		spi->valid |= CTS_SPI_VALID_DISC;
900 	} else {
901 		scsi->valid = 0;
902 	}
903 
904 	cts->ccb_h.status = CAM_REQ_CMP;
905 }
906 
907 static void
908 ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
909 {
910 	struct ahd_softc *ahd;
911 	struct cam_sim *sim;
912 
913 	sim = (struct cam_sim *)callback_arg;
914 	ahd = (struct ahd_softc *)cam_sim_softc(sim);
915 	switch (code) {
916 	case AC_LOST_DEVICE:
917 	{
918 		struct	ahd_devinfo devinfo;
919 
920 		ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
921 				    xpt_path_target_id(path),
922 				    xpt_path_lun_id(path),
923 				    SIM_CHANNEL(ahd, sim),
924 				    ROLE_UNKNOWN);
925 
926 		/*
927 		 * Revert to async/narrow transfers
928 		 * for the next device.
929 		 */
930 		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
931 			      AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE);
932 		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
933 				 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR,
934 				 /*paused*/FALSE);
935 		break;
936 	}
937 	default:
938 		break;
939 	}
940 }
941 
942 static void
943 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
944 		int error)
945 {
946 	struct	scb *scb;
947 	union	ccb *ccb;
948 	struct	ahd_softc *ahd;
949 	struct	ahd_initiator_tinfo *tinfo;
950 	struct	ahd_tmode_tstate *tstate;
951 	u_int	mask;
952 
953 	scb = (struct scb *)arg;
954 	ccb = scb->io_ctx;
955 	ahd = scb->ahd_softc;
956 
957 	if (error != 0) {
958 		if (error == EFBIG)
959 			aic_set_transaction_status(scb, CAM_REQ_TOO_BIG);
960 		else
961 			aic_set_transaction_status(scb, CAM_REQ_CMP_ERR);
962 		if (nsegments != 0)
963 			bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
964 		ahd_free_scb(ahd, scb);
965 		xpt_done(ccb);
966 		return;
967 	}
968 	scb->sg_count = 0;
969 	if (nsegments != 0) {
970 		void *sg;
971 		bus_dmasync_op_t op;
972 		u_int i;
973 
974 		/* Copy the segments into our SG list */
975 		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
976 
977 			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
978 					  dm_segs->ds_len,
979 					  /*last*/i == 1);
980 			dm_segs++;
981 		}
982 
983 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
984 			op = BUS_DMASYNC_PREREAD;
985 		else
986 			op = BUS_DMASYNC_PREWRITE;
987 
988 		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
989 
990 		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
991 			struct target_data *tdata;
992 
993 			tdata = &scb->hscb->shared_data.tdata;
994 			tdata->target_phases |= DPHASE_PENDING;
995 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
996 				tdata->data_phase = P_DATAOUT;
997 			else
998 				tdata->data_phase = P_DATAIN;
999 		}
1000 	}
1001 
1002 	/*
1003 	 * Last time we need to check if this SCB needs to
1004 	 * be aborted.
1005 	 */
1006 	if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) {
1007 		if (nsegments != 0)
1008 			bus_dmamap_unload(ahd->buffer_dmat,
1009 					  scb->dmamap);
1010 		ahd_free_scb(ahd, scb);
1011 		xpt_done(ccb);
1012 		return;
1013 	}
1014 
1015 	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
1016 				    SCSIID_OUR_ID(scb->hscb->scsiid),
1017 				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
1018 				    &tstate);
1019 
1020 	mask = SCB_GET_TARGET_MASK(ahd, scb);
1021 
1022 	if ((tstate->discenable & mask) != 0
1023 	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1024 		scb->hscb->control |= DISCENB;
1025 
1026 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1027 		scb->flags |= SCB_PACKETIZED;
1028 		if (scb->hscb->task_management != 0)
1029 			scb->hscb->control &= ~MK_MESSAGE;
1030 	}
1031 
1032 	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1033 	 && (tinfo->goal.width != 0
1034 	  || tinfo->goal.period != 0
1035 	  || tinfo->goal.ppr_options != 0)) {
1036 		scb->flags |= SCB_NEGOTIATE;
1037 		scb->hscb->control |= MK_MESSAGE;
1038 	} else if ((tstate->auto_negotiate & mask) != 0) {
1039 		scb->flags |= SCB_AUTO_NEGOTIATE;
1040 		scb->hscb->control |= MK_MESSAGE;
1041 	}
1042 
1043 	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1044 
1045 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1046 
1047 	aic_scb_timer_start(scb);
1048 
1049 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1050 		/* Define a mapping from our tag to the SCB. */
1051 		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
1052 		ahd_pause(ahd);
1053 		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1054 		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
1055 		ahd_unpause(ahd);
1056 	} else {
1057 		ahd_queue_scb(ahd, scb);
1058 	}
1059 
1060 }
1061 
1062 static void
1063 ahd_poll(struct cam_sim *sim)
1064 {
1065 	ahd_intr(cam_sim_softc(sim));
1066 }
1067 
1068 static void
1069 ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
1070 	       struct ccb_scsiio *csio, struct scb *scb)
1071 {
1072 	struct hardware_scb *hscb;
1073 	struct ccb_hdr *ccb_h;
1074 
1075 	hscb = scb->hscb;
1076 	ccb_h = &csio->ccb_h;
1077 
1078 	csio->resid = 0;
1079 	csio->sense_resid = 0;
1080 	if (ccb_h->func_code == XPT_SCSI_IO) {
1081 		hscb->cdb_len = csio->cdb_len;
1082 		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1083 
1084 			if (hscb->cdb_len > MAX_CDB_LEN
1085 			 && (ccb_h->flags & CAM_CDB_PHYS) == 0) {
1086 
1087 				/*
1088 				 * Should CAM start to support CDB sizes
1089 				 * greater than 16 bytes, we could use
1090 				 * the sense buffer to store the CDB.
1091 				 */
1092 				aic_set_transaction_status(scb,
1093 							   CAM_REQ_INVALID);
1094 				ahd_free_scb(ahd, scb);
1095 				xpt_done((union ccb *)csio);
1096 				return;
1097 			}
1098 			if ((ccb_h->flags & CAM_CDB_PHYS) != 0) {
1099 				hscb->shared_data.idata.cdb_from_host.cdbptr =
1100 				   aic_htole64((uintptr_t)csio->cdb_io.cdb_ptr);
1101 				hscb->shared_data.idata.cdb_from_host.cdblen =
1102 				   csio->cdb_len;
1103 				hscb->cdb_len |= SCB_CDB_LEN_PTR;
1104 			} else {
1105 				memcpy(hscb->shared_data.idata.cdb,
1106 				       csio->cdb_io.cdb_ptr,
1107 				       hscb->cdb_len);
1108 			}
1109 		} else {
1110 			if (hscb->cdb_len > MAX_CDB_LEN) {
1111 
1112 				aic_set_transaction_status(scb,
1113 							   CAM_REQ_INVALID);
1114 				ahd_free_scb(ahd, scb);
1115 				xpt_done((union ccb *)csio);
1116 				return;
1117 			}
1118 			memcpy(hscb->shared_data.idata.cdb,
1119 			       csio->cdb_io.cdb_bytes, hscb->cdb_len);
1120 		}
1121 	}
1122 
1123 	/* Only use S/G if there is a transfer */
1124 	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1125 		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1126 			/* We've been given a pointer to a single buffer */
1127 			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1128 				int s;
1129 				int error;
1130 
1131 				s = splsoftvm();
1132 				error = bus_dmamap_load(ahd->buffer_dmat,
1133 							scb->dmamap,
1134 							csio->data_ptr,
1135 							csio->dxfer_len,
1136 							ahd_execute_scb,
1137 							scb, /*flags*/0);
1138 				if (error == EINPROGRESS) {
1139 					/*
1140 					 * So as to maintain ordering,
1141 					 * freeze the controller queue
1142 					 * until our mapping is
1143 					 * returned.
1144 					 */
1145 					xpt_freeze_simq(sim,
1146 							/*count*/1);
1147 					scb->io_ctx->ccb_h.status |=
1148 					    CAM_RELEASE_SIMQ;
1149 				}
1150 				splx(s);
1151 			} else {
1152 				struct bus_dma_segment seg;
1153 
1154 				/* Pointer to physical buffer */
1155 				if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE)
1156 					panic("ahd_setup_data - Transfer size "
1157 					      "larger than can device max");
1158 
1159 				seg.ds_addr =
1160 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1161 				seg.ds_len = csio->dxfer_len;
1162 				ahd_execute_scb(scb, &seg, 1, 0);
1163 			}
1164 		} else {
1165 			struct bus_dma_segment *segs;
1166 
1167 			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1168 				panic("ahd_setup_data - Physical segment "
1169 				      "pointers unsupported");
1170 
1171 			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1172 				panic("ahd_setup_data - Virtual segment "
1173 				      "addresses unsupported");
1174 
1175 			/* Just use the segments provided */
1176 			segs = (struct bus_dma_segment *)csio->data_ptr;
1177 			ahd_execute_scb(scb, segs, csio->sglist_cnt, 0);
1178 		}
1179 	} else {
1180 		ahd_execute_scb(scb, NULL, 0, 0);
1181 	}
1182 }
1183 
1184 static void
1185 ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
1186 {
1187 	union ccb *abort_ccb;
1188 
1189 	abort_ccb = ccb->cab.abort_ccb;
1190 	switch (abort_ccb->ccb_h.func_code) {
1191 #ifdef AHD_TARGET_MODE
1192 	case XPT_ACCEPT_TARGET_IO:
1193 	case XPT_IMMEDIATE_NOTIFY:
1194 	case XPT_CONT_TARGET_IO:
1195 	{
1196 		struct ahd_tmode_tstate *tstate;
1197 		struct ahd_tmode_lstate *lstate;
1198 		struct ccb_hdr_slist *list;
1199 		cam_status status;
1200 
1201 		status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate,
1202 					     &lstate, TRUE);
1203 
1204 		if (status != CAM_REQ_CMP) {
1205 			ccb->ccb_h.status = status;
1206 			break;
1207 		}
1208 
1209 		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1210 			list = &lstate->accept_tios;
1211 		else if (abort_ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
1212 			list = &lstate->immed_notifies;
1213 		else
1214 			list = NULL;
1215 
1216 		if (list != NULL) {
1217 			struct ccb_hdr *curelm;
1218 			int found;
1219 
1220 			curelm = SLIST_FIRST(list);
1221 			found = 0;
1222 			if (curelm == &abort_ccb->ccb_h) {
1223 				found = 1;
1224 				SLIST_REMOVE_HEAD(list, sim_links.sle);
1225 			} else {
1226 				while(curelm != NULL) {
1227 					struct ccb_hdr *nextelm;
1228 
1229 					nextelm =
1230 					    SLIST_NEXT(curelm, sim_links.sle);
1231 
1232 					if (nextelm == &abort_ccb->ccb_h) {
1233 						found = 1;
1234 						SLIST_NEXT(curelm,
1235 							   sim_links.sle) =
1236 						    SLIST_NEXT(nextelm,
1237 							       sim_links.sle);
1238 						break;
1239 					}
1240 					curelm = nextelm;
1241 				}
1242 			}
1243 
1244 			if (found) {
1245 				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1246 				xpt_done(abort_ccb);
1247 				ccb->ccb_h.status = CAM_REQ_CMP;
1248 			} else {
1249 				xpt_print_path(abort_ccb->ccb_h.path);
1250 				printf("Not found\n");
1251 				ccb->ccb_h.status = CAM_PATH_INVALID;
1252 			}
1253 			break;
1254 		}
1255 		/* FALLTHROUGH */
1256 	}
1257 #endif
1258 	case XPT_SCSI_IO:
1259 		/* XXX Fully implement the hard ones */
1260 		ccb->ccb_h.status = CAM_UA_ABORT;
1261 		break;
1262 	default:
1263 		ccb->ccb_h.status = CAM_REQ_INVALID;
1264 		break;
1265 	}
1266 	xpt_done(ccb);
1267 }
1268 
1269 void
1270 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target,
1271 		u_int lun, ac_code code, void *opt_arg)
1272 {
1273 	struct	ccb_trans_settings cts;
1274 	struct cam_path *path;
1275 	void *arg;
1276 	int error;
1277 
1278 	arg = NULL;
1279 	error = ahd_create_path(ahd, channel, target, lun, &path);
1280 
1281 	if (error != CAM_REQ_CMP)
1282 		return;
1283 
1284 	switch (code) {
1285 	case AC_TRANSFER_NEG:
1286 	{
1287 		struct	ccb_trans_settings_scsi *scsi;
1288 
1289 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1290 		scsi = &cts.proto_specific.scsi;
1291 		cts.ccb_h.path = path;
1292 		cts.ccb_h.target_id = target;
1293 		cts.ccb_h.target_lun = lun;
1294 		ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts);
1295 		arg = &cts;
1296 		scsi->valid &= ~CTS_SCSI_VALID_TQ;
1297 		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1298 		if (opt_arg == NULL)
1299 			break;
1300 		if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED)
1301 			scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1302 		scsi->valid |= CTS_SCSI_VALID_TQ;
1303 		break;
1304 	}
1305 	case AC_SENT_BDR:
1306 	case AC_BUS_RESET:
1307 		break;
1308 	default:
1309 		panic("ahd_send_async: Unexpected async event");
1310 	}
1311 	xpt_async(code, path, arg);
1312 	xpt_free_path(path);
1313 }
1314 
1315 void
1316 ahd_platform_set_tags(struct ahd_softc *ahd,
1317 		      struct ahd_devinfo *devinfo, int enable)
1318 {
1319 }
1320 
1321 int
1322 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1323 {
1324 	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
1325 	    M_NOWAIT | M_ZERO);
1326 	if (ahd->platform_data == NULL)
1327 		return (ENOMEM);
1328 	return (0);
1329 }
1330 
1331 void
1332 ahd_platform_free(struct ahd_softc *ahd)
1333 {
1334 	struct ahd_platform_data *pdata;
1335 
1336 	pdata = ahd->platform_data;
1337 	if (pdata != NULL) {
1338 		if (pdata->regs[0] != NULL)
1339 			bus_release_resource(ahd->dev_softc,
1340 					     pdata->regs_res_type[0],
1341 					     pdata->regs_res_id[0],
1342 					     pdata->regs[0]);
1343 
1344 		if (pdata->regs[1] != NULL)
1345 			bus_release_resource(ahd->dev_softc,
1346 					     pdata->regs_res_type[1],
1347 					     pdata->regs_res_id[1],
1348 					     pdata->regs[1]);
1349 
1350 		if (pdata->irq != NULL)
1351 			bus_release_resource(ahd->dev_softc,
1352 					     pdata->irq_res_type,
1353 					     0, pdata->irq);
1354 
1355 		if (pdata->sim != NULL) {
1356 			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1357 			xpt_free_path(pdata->path);
1358 			xpt_bus_deregister(cam_sim_path(pdata->sim));
1359 			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1360 		}
1361 		if (pdata->eh != NULL)
1362 			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1363 		free(ahd->platform_data, M_DEVBUF);
1364 	}
1365 }
1366 
1367 int
1368 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1369 {
1370 	/* We don't sort softcs under FreeBSD so report equal always */
1371 	return (0);
1372 }
1373 
1374 int
1375 ahd_detach(device_t dev)
1376 {
1377 	struct ahd_softc *ahd;
1378 
1379 	device_printf(dev, "detaching device\n");
1380 	ahd = device_get_softc(dev);
1381 	ahd_lock(ahd);
1382 	TAILQ_REMOVE(&ahd_tailq, ahd, links);
1383 	ahd_intr_enable(ahd, FALSE);
1384 	bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih);
1385 	ahd_unlock(ahd);
1386 	ahd_free(ahd);
1387 	return (0);
1388 }
1389 
1390 #if 0
1391 static void
1392 ahd_dump_targcmd(struct target_cmd *cmd)
1393 {
1394 	uint8_t *byte;
1395 	uint8_t *last_byte;
1396 	int i;
1397 
1398 	byte = &cmd->initiator_channel;
1399 	/* Debugging info for received commands */
1400 	last_byte = &cmd[1].initiator_channel;
1401 
1402 	i = 0;
1403 	while (byte < last_byte) {
1404 		if (i == 0)
1405 			printf("\t");
1406 		printf("%#x", *byte++);
1407 		i++;
1408 		if (i == 8) {
1409 			printf("\n");
1410 			i = 0;
1411 		} else {
1412 			printf(", ");
1413 		}
1414 	}
1415 }
1416 #endif
1417 
1418 static int
1419 ahd_modevent(module_t mod, int type, void *data)
1420 {
1421 	/* XXX Deal with busy status on unload. */
1422 	/* XXX Deal with unknown events */
1423 	return 0;
1424 }
1425 
1426 static moduledata_t ahd_mod = {
1427 	"ahd",
1428 	ahd_modevent,
1429 	NULL
1430 };
1431 
1432 /********************************** DDB Hooks *********************************/
1433 #ifdef DDB
1434 static struct ahd_softc *ahd_ddb_softc;
1435 static int ahd_ddb_paused;
1436 static int ahd_ddb_paused_on_entry;
1437 DB_COMMAND(ahd_sunit, ahd_ddb_sunit)
1438 {
1439 	struct ahd_softc *list_ahd;
1440 
1441 	ahd_ddb_softc = NULL;
1442 	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
1443 		if (list_ahd->unit == addr)
1444 			ahd_ddb_softc = list_ahd;
1445 	}
1446 	if (ahd_ddb_softc == NULL)
1447 		db_error("No matching softc found!\n");
1448 }
1449 
1450 DB_COMMAND(ahd_pause, ahd_ddb_pause)
1451 {
1452 	if (ahd_ddb_softc == NULL) {
1453 		db_error("Must set unit with ahd_sunit first!\n");
1454 		return;
1455 	}
1456 	if (ahd_ddb_paused == 0) {
1457 		ahd_ddb_paused++;
1458 		if (ahd_is_paused(ahd_ddb_softc)) {
1459 			ahd_ddb_paused_on_entry++;
1460 			return;
1461 		}
1462 		ahd_pause(ahd_ddb_softc);
1463 	}
1464 }
1465 
1466 DB_COMMAND(ahd_unpause, ahd_ddb_unpause)
1467 {
1468 	if (ahd_ddb_softc == NULL) {
1469 		db_error("Must set unit with ahd_sunit first!\n");
1470 		return;
1471 	}
1472 	if (ahd_ddb_paused != 0) {
1473 		ahd_ddb_paused = 0;
1474 		if (ahd_ddb_paused_on_entry)
1475 			return;
1476 		ahd_unpause(ahd_ddb_softc);
1477 	} else if (ahd_ddb_paused_on_entry != 0) {
1478 		/* Two unpauses to clear a paused on entry. */
1479 		ahd_ddb_paused_on_entry = 0;
1480 		ahd_unpause(ahd_ddb_softc);
1481 	}
1482 }
1483 
1484 DB_COMMAND(ahd_in, ahd_ddb_in)
1485 {
1486 	int c;
1487 	int size;
1488 
1489 	if (ahd_ddb_softc == NULL) {
1490 		db_error("Must set unit with ahd_sunit first!\n");
1491 		return;
1492 	}
1493 	if (have_addr == 0)
1494 		return;
1495 
1496 	size = 1;
1497 	while ((c = *modif++) != '\0') {
1498 		switch (c) {
1499 		case 'b':
1500 			size = 1;
1501 			break;
1502 		case 'w':
1503 			size = 2;
1504 			break;
1505 		case 'l':
1506 			size = 4;
1507 		break;
1508 		}
1509 	}
1510 
1511 	if (count <= 0)
1512 		count = 1;
1513 	while (--count >= 0) {
1514 		db_printf("%04lx (M)%x: \t", (u_long)addr,
1515 			  ahd_inb(ahd_ddb_softc, MODE_PTR));
1516 		switch (size) {
1517 		case 1:
1518 			db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr));
1519 			break;
1520 		case 2:
1521 			db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr));
1522 			break;
1523 		case 4:
1524 			db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr));
1525 			break;
1526 		}
1527 	}
1528 }
1529 
1530 DB_FUNC(ahd_out, ahd_ddb_out, db_cmd_table, CS_MORE, NULL)
1531 {
1532 	db_expr_t old_value;
1533 	db_expr_t new_value;
1534 	int	  size;
1535 
1536 	if (ahd_ddb_softc == NULL) {
1537 		db_error("Must set unit with ahd_sunit first!\n");
1538 		return;
1539 	}
1540 
1541 	switch (modif[0]) {
1542 	case '\0':
1543 	case 'b':
1544 		size = 1;
1545 		break;
1546 	case 'h':
1547 		size = 2;
1548 		break;
1549 	case 'l':
1550 		size = 4;
1551 		break;
1552 	default:
1553 		db_error("Unknown size\n");
1554 		return;
1555 	}
1556 
1557 	while (db_expression(&new_value)) {
1558 		switch (size) {
1559 		default:
1560 		case 1:
1561 			old_value = ahd_inb(ahd_ddb_softc, addr);
1562 			ahd_outb(ahd_ddb_softc, addr, new_value);
1563 			break;
1564 		case 2:
1565 			old_value = ahd_inw(ahd_ddb_softc, addr);
1566 			ahd_outw(ahd_ddb_softc, addr, new_value);
1567 			break;
1568 		case 4:
1569 			old_value = ahd_inl(ahd_ddb_softc, addr);
1570 			ahd_outl(ahd_ddb_softc, addr, new_value);
1571 			break;
1572 		}
1573 		db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx",
1574 			  (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR),
1575 			  (u_long)old_value, (u_long)new_value);
1576 		addr += size;
1577 	}
1578 	db_skip_to_eol();
1579 }
1580 
1581 DB_COMMAND(ahd_dump, ahd_ddb_dump)
1582 {
1583 	if (ahd_ddb_softc == NULL) {
1584 		db_error("Must set unit with ahd_sunit first!\n");
1585 		return;
1586 	}
1587 	ahd_dump_card_state(ahd_ddb_softc);
1588 }
1589 
1590 #endif
1591 
1592 
1593 DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1594 MODULE_DEPEND(ahd, cam, 1, 1, 1);
1595 MODULE_VERSION(ahd, 1);
1596