xref: /freebsd/sys/dev/ahci/ahci.c (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1 /*-
2  * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/module.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/endian.h>
37 #include <sys/malloc.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <machine/stdarg.h>
41 #include <machine/resource.h>
42 #include <machine/bus.h>
43 #include <sys/rman.h>
44 #include "ahci.h"
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 #include <cam/cam_sim.h>
49 #include <cam/cam_xpt_sim.h>
50 #include <cam/cam_debug.h>
51 
52 /* local prototypes */
53 static void ahci_intr(void *data);
54 static void ahci_intr_one(void *data);
55 static void ahci_intr_one_edge(void *data);
56 static int ahci_ch_init(device_t dev);
57 static int ahci_ch_deinit(device_t dev);
58 static int ahci_ch_suspend(device_t dev);
59 static int ahci_ch_resume(device_t dev);
60 static void ahci_ch_pm(void *arg);
61 static void ahci_ch_intr(void *arg);
62 static void ahci_ch_intr_direct(void *arg);
63 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus);
64 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb);
65 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
66 static void ahci_execute_transaction(struct ahci_slot *slot);
67 static void ahci_timeout(struct ahci_slot *slot);
68 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et);
69 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag);
70 static void ahci_dmainit(device_t dev);
71 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
72 static void ahci_dmafini(device_t dev);
73 static void ahci_slotsalloc(device_t dev);
74 static void ahci_slotsfree(device_t dev);
75 static void ahci_reset(struct ahci_channel *ch);
76 static void ahci_start(struct ahci_channel *ch, int fbs);
77 static void ahci_stop(struct ahci_channel *ch);
78 static void ahci_clo(struct ahci_channel *ch);
79 static void ahci_start_fr(struct ahci_channel *ch);
80 static void ahci_stop_fr(struct ahci_channel *ch);
81 
82 static int ahci_sata_connect(struct ahci_channel *ch);
83 static int ahci_sata_phy_reset(struct ahci_channel *ch);
84 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0);
85 
86 static void ahci_issue_recovery(struct ahci_channel *ch);
87 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb);
88 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb);
89 
90 static void ahciaction(struct cam_sim *sim, union ccb *ccb);
91 static void ahcipoll(struct cam_sim *sim);
92 
93 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers");
94 
95 #define recovery_type		spriv_field0
96 #define RECOVERY_NONE		0
97 #define RECOVERY_READ_LOG	1
98 #define RECOVERY_REQUEST_SENSE	2
99 #define recovery_slot		spriv_field1
100 
101 int
102 ahci_ctlr_setup(device_t dev)
103 {
104 	struct ahci_controller *ctlr = device_get_softc(dev);
105 	/* Clear interrupts */
106 	ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS));
107 	/* Configure CCC */
108 	if (ctlr->ccc) {
109 		ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI));
110 		ATA_OUTL(ctlr->r_mem, AHCI_CCCC,
111 		    (ctlr->ccc << AHCI_CCCC_TV_SHIFT) |
112 		    (4 << AHCI_CCCC_CC_SHIFT) |
113 		    AHCI_CCCC_EN);
114 		ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) &
115 		    AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT;
116 		if (bootverbose) {
117 			device_printf(dev,
118 			    "CCC with %dms/4cmd enabled on vector %d\n",
119 			    ctlr->ccc, ctlr->cccv);
120 		}
121 	}
122 	/* Enable AHCI interrupts */
123 	ATA_OUTL(ctlr->r_mem, AHCI_GHC,
124 	    ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE);
125 	return (0);
126 }
127 
128 int
129 ahci_ctlr_reset(device_t dev)
130 {
131 	struct ahci_controller *ctlr = device_get_softc(dev);
132 	int timeout;
133 
134 	/* Enable AHCI mode */
135 	ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
136 	/* Reset AHCI controller */
137 	ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR);
138 	for (timeout = 1000; timeout > 0; timeout--) {
139 		DELAY(1000);
140 		if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0)
141 			break;
142 	}
143 	if (timeout == 0) {
144 		device_printf(dev, "AHCI controller reset failure\n");
145 		return (ENXIO);
146 	}
147 	/* Reenable AHCI mode */
148 	ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
149 
150 	if (ctlr->quirks & AHCI_Q_RESTORE_CAP) {
151 		/*
152 		 * Restore capability field.
153 		 * This is write to a read-only register to restore its state.
154 		 * On fully standard-compliant hardware this is not needed and
155 		 * this operation shall not take place. See ahci_pci.c for
156 		 * platforms using this quirk.
157 		 */
158 		ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps);
159 	}
160 
161 	return (0);
162 }
163 
164 
165 int
166 ahci_attach(device_t dev)
167 {
168 	struct ahci_controller *ctlr = device_get_softc(dev);
169 	int error, i, u, speed, unit;
170 	u_int32_t version;
171 	device_t child;
172 
173 	ctlr->dev = dev;
174 	ctlr->ccc = 0;
175 	resource_int_value(device_get_name(dev),
176 	    device_get_unit(dev), "ccc", &ctlr->ccc);
177 
178 	/* Setup our own memory management for channels. */
179 	ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem);
180 	ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem);
181 	ctlr->sc_iomem.rm_type = RMAN_ARRAY;
182 	ctlr->sc_iomem.rm_descr = "I/O memory addresses";
183 	if ((error = rman_init(&ctlr->sc_iomem)) != 0) {
184 		ahci_free_mem(dev);
185 		return (error);
186 	}
187 	if ((error = rman_manage_region(&ctlr->sc_iomem,
188 	    rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) {
189 		ahci_free_mem(dev);
190 		rman_fini(&ctlr->sc_iomem);
191 		return (error);
192 	}
193 	/* Get the HW capabilities */
194 	version = ATA_INL(ctlr->r_mem, AHCI_VS);
195 	ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP);
196 	if (version >= 0x00010200)
197 		ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2);
198 	if (ctlr->caps & AHCI_CAP_EMS)
199 		ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL);
200 
201 	if (ctlr->quirks & AHCI_Q_FORCE_PI) {
202 		/*
203 		 * Enable ports.
204 		 * The spec says that BIOS sets up bits corresponding to
205 		 * available ports. On platforms where this information
206 		 * is missing, the driver can define available ports on its own.
207 		 */
208 		int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1;
209 		int nmask = (1 << nports) - 1;
210 
211 		ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask);
212 		device_printf(dev, "Forcing PI to %d ports (mask = %x)\n",
213 		    nports, nmask);
214 	}
215 
216 	ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI);
217 
218 	/* Identify and set separate quirks for HBA and RAID f/w Marvells. */
219 	if ((ctlr->quirks & AHCI_Q_ALTSIG) &&
220 	    (ctlr->caps & AHCI_CAP_SPM) == 0)
221 		ctlr->quirks |= AHCI_Q_NOBSYRES;
222 
223 	if (ctlr->quirks & AHCI_Q_1CH) {
224 		ctlr->caps &= ~AHCI_CAP_NPMASK;
225 		ctlr->ichannels &= 0x01;
226 	}
227 	if (ctlr->quirks & AHCI_Q_2CH) {
228 		ctlr->caps &= ~AHCI_CAP_NPMASK;
229 		ctlr->caps |= 1;
230 		ctlr->ichannels &= 0x03;
231 	}
232 	if (ctlr->quirks & AHCI_Q_4CH) {
233 		ctlr->caps &= ~AHCI_CAP_NPMASK;
234 		ctlr->caps |= 3;
235 		ctlr->ichannels &= 0x0f;
236 	}
237 	ctlr->channels = MAX(flsl(ctlr->ichannels),
238 	    (ctlr->caps & AHCI_CAP_NPMASK) + 1);
239 	if (ctlr->quirks & AHCI_Q_NOPMP)
240 		ctlr->caps &= ~AHCI_CAP_SPM;
241 	if (ctlr->quirks & AHCI_Q_NONCQ)
242 		ctlr->caps &= ~AHCI_CAP_SNCQ;
243 	if ((ctlr->caps & AHCI_CAP_CCCS) == 0)
244 		ctlr->ccc = 0;
245 	ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC);
246 
247 	/* Create controller-wide DMA tag. */
248 	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
249 	    (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR :
250 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
251 	    BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE,
252 	    0, NULL, NULL, &ctlr->dma_tag)) {
253 		ahci_free_mem(dev);
254 		rman_fini(&ctlr->sc_iomem);
255 		return (ENXIO);
256 	}
257 
258 	ahci_ctlr_setup(dev);
259 
260 	/* Setup interrupts. */
261 	if ((error = ahci_setup_interrupt(dev)) != 0) {
262 		bus_dma_tag_destroy(ctlr->dma_tag);
263 		ahci_free_mem(dev);
264 		rman_fini(&ctlr->sc_iomem);
265 		return (error);
266 	}
267 
268 	i = 0;
269 	for (u = ctlr->ichannels; u != 0; u >>= 1)
270 		i += (u & 1);
271 	ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3));
272 	resource_int_value(device_get_name(dev), device_get_unit(dev),
273 	    "direct", &ctlr->direct);
274 	/* Announce HW capabilities. */
275 	speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT;
276 	device_printf(dev,
277 		    "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n",
278 		    ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f),
279 		    ((version >> 4) & 0xf0) + (version & 0x0f),
280 		    (ctlr->caps & AHCI_CAP_NPMASK) + 1,
281 		    ((speed == 1) ? "1.5":((speed == 2) ? "3":
282 		    ((speed == 3) ? "6":"?"))),
283 		    (ctlr->caps & AHCI_CAP_SPM) ?
284 		    "supported" : "not supported",
285 		    (ctlr->caps & AHCI_CAP_FBSS) ?
286 		    " with FBS" : "");
287 	if (ctlr->quirks != 0) {
288 		device_printf(dev, "quirks=0x%b\n", ctlr->quirks,
289 		    AHCI_Q_BIT_STRING);
290 	}
291 	if (bootverbose) {
292 		device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps",
293 		    (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"",
294 		    (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"",
295 		    (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"",
296 		    (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"",
297 		    (ctlr->caps & AHCI_CAP_SSS) ? " SS":"",
298 		    (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"",
299 		    (ctlr->caps & AHCI_CAP_SAL) ? " AL":"",
300 		    (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"",
301 		    ((speed == 1) ? "1.5":((speed == 2) ? "3":
302 		    ((speed == 3) ? "6":"?"))));
303 		printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n",
304 		    (ctlr->caps & AHCI_CAP_SAM) ? " AM":"",
305 		    (ctlr->caps & AHCI_CAP_SPM) ? " PM":"",
306 		    (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"",
307 		    (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"",
308 		    (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"",
309 		    (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"",
310 		    ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1,
311 		    (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"",
312 		    (ctlr->caps & AHCI_CAP_EMS) ? " EM":"",
313 		    (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"",
314 		    (ctlr->caps & AHCI_CAP_NPMASK) + 1);
315 	}
316 	if (bootverbose && version >= 0x00010200) {
317 		device_printf(dev, "Caps2:%s%s%s%s%s%s\n",
318 		    (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"",
319 		    (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"",
320 		    (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"",
321 		    (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"",
322 		    (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"",
323 		    (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":"");
324 	}
325 	/* Attach all channels on this controller */
326 	for (unit = 0; unit < ctlr->channels; unit++) {
327 		child = device_add_child(dev, "ahcich", -1);
328 		if (child == NULL) {
329 			device_printf(dev, "failed to add channel device\n");
330 			continue;
331 		}
332 		device_set_ivars(child, (void *)(intptr_t)unit);
333 		if ((ctlr->ichannels & (1 << unit)) == 0)
334 			device_disable(child);
335 	}
336 	if (ctlr->caps & AHCI_CAP_EMS) {
337 		child = device_add_child(dev, "ahciem", -1);
338 		if (child == NULL)
339 			device_printf(dev, "failed to add enclosure device\n");
340 		else
341 			device_set_ivars(child, (void *)(intptr_t)-1);
342 	}
343 	bus_generic_attach(dev);
344 	return (0);
345 }
346 
347 int
348 ahci_detach(device_t dev)
349 {
350 	struct ahci_controller *ctlr = device_get_softc(dev);
351 	int i;
352 
353 	/* Detach & delete all children */
354 	device_delete_children(dev);
355 
356 	/* Free interrupts. */
357 	for (i = 0; i < ctlr->numirqs; i++) {
358 		if (ctlr->irqs[i].r_irq) {
359 			bus_teardown_intr(dev, ctlr->irqs[i].r_irq,
360 			    ctlr->irqs[i].handle);
361 			bus_release_resource(dev, SYS_RES_IRQ,
362 			    ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq);
363 		}
364 	}
365 	bus_dma_tag_destroy(ctlr->dma_tag);
366 	/* Free memory. */
367 	rman_fini(&ctlr->sc_iomem);
368 	ahci_free_mem(dev);
369 	return (0);
370 }
371 
372 void
373 ahci_free_mem(device_t dev)
374 {
375 	struct ahci_controller *ctlr = device_get_softc(dev);
376 
377 	/* Release memory resources */
378 	if (ctlr->r_mem)
379 		bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
380 	if (ctlr->r_msix_table)
381 		bus_release_resource(dev, SYS_RES_MEMORY,
382 		    ctlr->r_msix_tab_rid, ctlr->r_msix_table);
383 	if (ctlr->r_msix_pba)
384 		bus_release_resource(dev, SYS_RES_MEMORY,
385 		    ctlr->r_msix_pba_rid, ctlr->r_msix_pba);
386 
387 	ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL;
388 }
389 
390 int
391 ahci_setup_interrupt(device_t dev)
392 {
393 	struct ahci_controller *ctlr = device_get_softc(dev);
394 	int i;
395 
396 	/* Check for single MSI vector fallback. */
397 	if (ctlr->numirqs > 1 &&
398 	    (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) {
399 		device_printf(dev, "Falling back to one MSI\n");
400 		ctlr->numirqs = 1;
401 	}
402 
403 	/* Ensure we don't overrun irqs. */
404 	if (ctlr->numirqs > AHCI_MAX_IRQS) {
405 		device_printf(dev, "Too many irqs %d > %d (clamping)\n",
406 		    ctlr->numirqs, AHCI_MAX_IRQS);
407 		ctlr->numirqs = AHCI_MAX_IRQS;
408 	}
409 
410 	/* Allocate all IRQs. */
411 	for (i = 0; i < ctlr->numirqs; i++) {
412 		ctlr->irqs[i].ctlr = ctlr;
413 		ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0);
414 		if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi)
415 			ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
416 		else if (ctlr->numirqs == 1 || i >= ctlr->channels ||
417 		    (ctlr->ccc && i == ctlr->cccv))
418 			ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL;
419 		else if (i == ctlr->numirqs - 1)
420 			ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER;
421 		else
422 			ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
423 		if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
424 		    &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) {
425 			device_printf(dev, "unable to map interrupt\n");
426 			return (ENXIO);
427 		}
428 		if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL,
429 		    (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr :
430 		     ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge :
431 		      ahci_intr_one),
432 		    &ctlr->irqs[i], &ctlr->irqs[i].handle))) {
433 			/* SOS XXX release r_irq */
434 			device_printf(dev, "unable to setup interrupt\n");
435 			return (ENXIO);
436 		}
437 		if (ctlr->numirqs > 1) {
438 			bus_describe_intr(dev, ctlr->irqs[i].r_irq,
439 			    ctlr->irqs[i].handle,
440 			    ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ?
441 			    "ch%d" : "%d", i);
442 		}
443 	}
444 	return (0);
445 }
446 
447 /*
448  * Common case interrupt handler.
449  */
450 static void
451 ahci_intr(void *data)
452 {
453 	struct ahci_controller_irq *irq = data;
454 	struct ahci_controller *ctlr = irq->ctlr;
455 	u_int32_t is, ise = 0;
456 	void *arg;
457 	int unit;
458 
459 	if (irq->mode == AHCI_IRQ_MODE_ALL) {
460 		unit = 0;
461 		if (ctlr->ccc)
462 			is = ctlr->ichannels;
463 		else
464 			is = ATA_INL(ctlr->r_mem, AHCI_IS);
465 	} else {	/* AHCI_IRQ_MODE_AFTER */
466 		unit = irq->r_irq_rid - 1;
467 		is = ATA_INL(ctlr->r_mem, AHCI_IS);
468 	}
469 	/* CCC interrupt is edge triggered. */
470 	if (ctlr->ccc)
471 		ise = 1 << ctlr->cccv;
472 	/* Some controllers have edge triggered IS. */
473 	if (ctlr->quirks & AHCI_Q_EDGEIS)
474 		ise |= is;
475 	if (ise != 0)
476 		ATA_OUTL(ctlr->r_mem, AHCI_IS, ise);
477 	for (; unit < ctlr->channels; unit++) {
478 		if ((is & (1 << unit)) != 0 &&
479 		    (arg = ctlr->interrupt[unit].argument)) {
480 				ctlr->interrupt[unit].function(arg);
481 		}
482 	}
483 	/* AHCI declares level triggered IS. */
484 	if (!(ctlr->quirks & AHCI_Q_EDGEIS))
485 		ATA_OUTL(ctlr->r_mem, AHCI_IS, is);
486 }
487 
488 /*
489  * Simplified interrupt handler for multivector MSI mode.
490  */
491 static void
492 ahci_intr_one(void *data)
493 {
494 	struct ahci_controller_irq *irq = data;
495 	struct ahci_controller *ctlr = irq->ctlr;
496 	void *arg;
497 	int unit;
498 
499 	unit = irq->r_irq_rid - 1;
500 	if ((arg = ctlr->interrupt[unit].argument))
501 	    ctlr->interrupt[unit].function(arg);
502 	/* AHCI declares level triggered IS. */
503 	ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
504 }
505 
506 static void
507 ahci_intr_one_edge(void *data)
508 {
509 	struct ahci_controller_irq *irq = data;
510 	struct ahci_controller *ctlr = irq->ctlr;
511 	void *arg;
512 	int unit;
513 
514 	unit = irq->r_irq_rid - 1;
515 	/* Some controllers have edge triggered IS. */
516 	ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
517 	if ((arg = ctlr->interrupt[unit].argument))
518 		ctlr->interrupt[unit].function(arg);
519 }
520 
521 struct resource *
522 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid,
523     u_long start, u_long end, u_long count, u_int flags)
524 {
525 	struct ahci_controller *ctlr = device_get_softc(dev);
526 	struct resource *res;
527 	long st;
528 	int offset, size, unit;
529 
530 	unit = (intptr_t)device_get_ivars(child);
531 	res = NULL;
532 	switch (type) {
533 	case SYS_RES_MEMORY:
534 		if (unit >= 0) {
535 			offset = AHCI_OFFSET + (unit << 7);
536 			size = 128;
537 		} else if (*rid == 0) {
538 			offset = AHCI_EM_CTL;
539 			size = 4;
540 		} else {
541 			offset = (ctlr->emloc & 0xffff0000) >> 14;
542 			size = (ctlr->emloc & 0x0000ffff) << 2;
543 			if (*rid != 1) {
544 				if (*rid == 2 && (ctlr->capsem &
545 				    (AHCI_EM_XMT | AHCI_EM_SMB)) == 0)
546 					offset += size;
547 				else
548 					break;
549 			}
550 		}
551 		st = rman_get_start(ctlr->r_mem);
552 		res = rman_reserve_resource(&ctlr->sc_iomem, st + offset,
553 		    st + offset + size - 1, size, RF_ACTIVE, child);
554 		if (res) {
555 			bus_space_handle_t bsh;
556 			bus_space_tag_t bst;
557 			bsh = rman_get_bushandle(ctlr->r_mem);
558 			bst = rman_get_bustag(ctlr->r_mem);
559 			bus_space_subregion(bst, bsh, offset, 128, &bsh);
560 			rman_set_bushandle(res, bsh);
561 			rman_set_bustag(res, bst);
562 		}
563 		break;
564 	case SYS_RES_IRQ:
565 		if (*rid == ATA_IRQ_RID)
566 			res = ctlr->irqs[0].r_irq;
567 		break;
568 	}
569 	return (res);
570 }
571 
572 int
573 ahci_release_resource(device_t dev, device_t child, int type, int rid,
574     struct resource *r)
575 {
576 
577 	switch (type) {
578 	case SYS_RES_MEMORY:
579 		rman_release_resource(r);
580 		return (0);
581 	case SYS_RES_IRQ:
582 		if (rid != ATA_IRQ_RID)
583 			return (ENOENT);
584 		return (0);
585 	}
586 	return (EINVAL);
587 }
588 
589 int
590 ahci_setup_intr(device_t dev, device_t child, struct resource *irq,
591     int flags, driver_filter_t *filter, driver_intr_t *function,
592     void *argument, void **cookiep)
593 {
594 	struct ahci_controller *ctlr = device_get_softc(dev);
595 	int unit = (intptr_t)device_get_ivars(child);
596 
597 	if (filter != NULL) {
598 		printf("ahci.c: we cannot use a filter here\n");
599 		return (EINVAL);
600 	}
601 	ctlr->interrupt[unit].function = function;
602 	ctlr->interrupt[unit].argument = argument;
603 	return (0);
604 }
605 
606 int
607 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq,
608     void *cookie)
609 {
610 	struct ahci_controller *ctlr = device_get_softc(dev);
611 	int unit = (intptr_t)device_get_ivars(child);
612 
613 	ctlr->interrupt[unit].function = NULL;
614 	ctlr->interrupt[unit].argument = NULL;
615 	return (0);
616 }
617 
618 int
619 ahci_print_child(device_t dev, device_t child)
620 {
621 	int retval, channel;
622 
623 	retval = bus_print_child_header(dev, child);
624 	channel = (int)(intptr_t)device_get_ivars(child);
625 	if (channel >= 0)
626 		retval += printf(" at channel %d", channel);
627 	retval += bus_print_child_footer(dev, child);
628 	return (retval);
629 }
630 
631 int
632 ahci_child_location_str(device_t dev, device_t child, char *buf,
633     size_t buflen)
634 {
635 	int channel;
636 
637 	channel = (int)(intptr_t)device_get_ivars(child);
638 	if (channel >= 0)
639 		snprintf(buf, buflen, "channel=%d", channel);
640 	return (0);
641 }
642 
643 bus_dma_tag_t
644 ahci_get_dma_tag(device_t dev, device_t child)
645 {
646 	struct ahci_controller *ctlr = device_get_softc(dev);
647 
648 	return (ctlr->dma_tag);
649 }
650 
651 static int
652 ahci_ch_probe(device_t dev)
653 {
654 
655 	device_set_desc_copy(dev, "AHCI channel");
656 	return (BUS_PROBE_DEFAULT);
657 }
658 
659 static int
660 ahci_ch_attach(device_t dev)
661 {
662 	struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev));
663 	struct ahci_channel *ch = device_get_softc(dev);
664 	struct cam_devq *devq;
665 	int rid, error, i, sata_rev = 0;
666 	u_int32_t version;
667 
668 	ch->dev = dev;
669 	ch->unit = (intptr_t)device_get_ivars(dev);
670 	ch->caps = ctlr->caps;
671 	ch->caps2 = ctlr->caps2;
672 	ch->start = ctlr->ch_start;
673 	ch->quirks = ctlr->quirks;
674 	ch->vendorid = ctlr->vendorid;
675 	ch->deviceid = ctlr->deviceid;
676 	ch->subvendorid = ctlr->subvendorid;
677 	ch->subdeviceid = ctlr->subdeviceid;
678 	ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1;
679 	mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF);
680 	ch->pm_level = 0;
681 	resource_int_value(device_get_name(dev),
682 	    device_get_unit(dev), "pm_level", &ch->pm_level);
683 	STAILQ_INIT(&ch->doneq);
684 	if (ch->pm_level > 3)
685 		callout_init_mtx(&ch->pm_timer, &ch->mtx, 0);
686 	callout_init_mtx(&ch->reset_timer, &ch->mtx, 0);
687 	/* JMicron external ports (0) sometimes limited */
688 	if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0)
689 		sata_rev = 1;
690 	if (ch->quirks & AHCI_Q_SATA2)
691 		sata_rev = 2;
692 	resource_int_value(device_get_name(dev),
693 	    device_get_unit(dev), "sata_rev", &sata_rev);
694 	for (i = 0; i < 16; i++) {
695 		ch->user[i].revision = sata_rev;
696 		ch->user[i].mode = 0;
697 		ch->user[i].bytecount = 8192;
698 		ch->user[i].tags = ch->numslots;
699 		ch->user[i].caps = 0;
700 		ch->curr[i] = ch->user[i];
701 		if (ch->pm_level) {
702 			ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ |
703 			    CTS_SATA_CAPS_H_APST |
704 			    CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST;
705 		}
706 		ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA |
707 		    CTS_SATA_CAPS_H_AN;
708 	}
709 	rid = 0;
710 	if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
711 	    &rid, RF_ACTIVE)))
712 		return (ENXIO);
713 	ahci_dmainit(dev);
714 	ahci_slotsalloc(dev);
715 	mtx_lock(&ch->mtx);
716 	ahci_ch_init(dev);
717 	rid = ATA_IRQ_RID;
718 	if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
719 	    &rid, RF_SHAREABLE | RF_ACTIVE))) {
720 		device_printf(dev, "Unable to map interrupt\n");
721 		error = ENXIO;
722 		goto err0;
723 	}
724 	if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
725 	    ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr,
726 	    ch, &ch->ih))) {
727 		device_printf(dev, "Unable to setup interrupt\n");
728 		error = ENXIO;
729 		goto err1;
730 	}
731 	ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD);
732 	version = ATA_INL(ctlr->r_mem, AHCI_VS);
733 	if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS))
734 		ch->chcaps |= AHCI_P_CMD_FBSCP;
735 	if (ch->caps2 & AHCI_CAP2_SDS)
736 		ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP);
737 	if (bootverbose) {
738 		device_printf(dev, "Caps:%s%s%s%s%s%s\n",
739 		    (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"",
740 		    (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"",
741 		    (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"",
742 		    (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"",
743 		    (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"",
744 		    (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":"");
745 	}
746 	/* Create the device queue for our SIM. */
747 	devq = cam_simq_alloc(ch->numslots);
748 	if (devq == NULL) {
749 		device_printf(dev, "Unable to allocate simq\n");
750 		error = ENOMEM;
751 		goto err1;
752 	}
753 	/* Construct SIM entry */
754 	ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch,
755 	    device_get_unit(dev), (struct mtx *)&ch->mtx,
756 	    min(2, ch->numslots),
757 	    (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0,
758 	    devq);
759 	if (ch->sim == NULL) {
760 		cam_simq_free(devq);
761 		device_printf(dev, "unable to allocate sim\n");
762 		error = ENOMEM;
763 		goto err1;
764 	}
765 	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
766 		device_printf(dev, "unable to register xpt bus\n");
767 		error = ENXIO;
768 		goto err2;
769 	}
770 	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
771 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
772 		device_printf(dev, "unable to create path\n");
773 		error = ENXIO;
774 		goto err3;
775 	}
776 	if (ch->pm_level > 3) {
777 		callout_reset(&ch->pm_timer,
778 		    (ch->pm_level == 4) ? hz / 1000 : hz / 8,
779 		    ahci_ch_pm, ch);
780 	}
781 	mtx_unlock(&ch->mtx);
782 	return (0);
783 
784 err3:
785 	xpt_bus_deregister(cam_sim_path(ch->sim));
786 err2:
787 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
788 err1:
789 	bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
790 err0:
791 	bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
792 	mtx_unlock(&ch->mtx);
793 	mtx_destroy(&ch->mtx);
794 	return (error);
795 }
796 
797 static int
798 ahci_ch_detach(device_t dev)
799 {
800 	struct ahci_channel *ch = device_get_softc(dev);
801 
802 	mtx_lock(&ch->mtx);
803 	xpt_async(AC_LOST_DEVICE, ch->path, NULL);
804 	/* Forget about reset. */
805 	if (ch->resetting) {
806 		ch->resetting = 0;
807 		xpt_release_simq(ch->sim, TRUE);
808 	}
809 	xpt_free_path(ch->path);
810 	xpt_bus_deregister(cam_sim_path(ch->sim));
811 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
812 	mtx_unlock(&ch->mtx);
813 
814 	if (ch->pm_level > 3)
815 		callout_drain(&ch->pm_timer);
816 	callout_drain(&ch->reset_timer);
817 	bus_teardown_intr(dev, ch->r_irq, ch->ih);
818 	bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
819 
820 	ahci_ch_deinit(dev);
821 	ahci_slotsfree(dev);
822 	ahci_dmafini(dev);
823 
824 	bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
825 	mtx_destroy(&ch->mtx);
826 	return (0);
827 }
828 
829 static int
830 ahci_ch_init(device_t dev)
831 {
832 	struct ahci_channel *ch = device_get_softc(dev);
833 	uint64_t work;
834 
835 	/* Disable port interrupts */
836 	ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
837 	/* Setup work areas */
838 	work = ch->dma.work_bus + AHCI_CL_OFFSET;
839 	ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff);
840 	ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32);
841 	work = ch->dma.rfis_bus;
842 	ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff);
843 	ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32);
844 	/* Activate the channel and power/spin up device */
845 	ATA_OUTL(ch->r_mem, AHCI_P_CMD,
846 	     (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD |
847 	     ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) |
848 	     ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 )));
849 	ahci_start_fr(ch);
850 	ahci_start(ch, 1);
851 	return (0);
852 }
853 
854 static int
855 ahci_ch_deinit(device_t dev)
856 {
857 	struct ahci_channel *ch = device_get_softc(dev);
858 
859 	/* Disable port interrupts. */
860 	ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
861 	/* Reset command register. */
862 	ahci_stop(ch);
863 	ahci_stop_fr(ch);
864 	ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0);
865 	/* Allow everything, including partial and slumber modes. */
866 	ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0);
867 	/* Request slumber mode transition and give some time to get there. */
868 	ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER);
869 	DELAY(100);
870 	/* Disable PHY. */
871 	ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
872 	return (0);
873 }
874 
875 static int
876 ahci_ch_suspend(device_t dev)
877 {
878 	struct ahci_channel *ch = device_get_softc(dev);
879 
880 	mtx_lock(&ch->mtx);
881 	xpt_freeze_simq(ch->sim, 1);
882 	/* Forget about reset. */
883 	if (ch->resetting) {
884 		ch->resetting = 0;
885 		callout_stop(&ch->reset_timer);
886 		xpt_release_simq(ch->sim, TRUE);
887 	}
888 	while (ch->oslots)
889 		msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100);
890 	ahci_ch_deinit(dev);
891 	mtx_unlock(&ch->mtx);
892 	return (0);
893 }
894 
895 static int
896 ahci_ch_resume(device_t dev)
897 {
898 	struct ahci_channel *ch = device_get_softc(dev);
899 
900 	mtx_lock(&ch->mtx);
901 	ahci_ch_init(dev);
902 	ahci_reset(ch);
903 	xpt_release_simq(ch->sim, TRUE);
904 	mtx_unlock(&ch->mtx);
905 	return (0);
906 }
907 
908 devclass_t ahcich_devclass;
909 static device_method_t ahcich_methods[] = {
910 	DEVMETHOD(device_probe,     ahci_ch_probe),
911 	DEVMETHOD(device_attach,    ahci_ch_attach),
912 	DEVMETHOD(device_detach,    ahci_ch_detach),
913 	DEVMETHOD(device_suspend,   ahci_ch_suspend),
914 	DEVMETHOD(device_resume,    ahci_ch_resume),
915 	DEVMETHOD_END
916 };
917 static driver_t ahcich_driver = {
918         "ahcich",
919         ahcich_methods,
920         sizeof(struct ahci_channel)
921 };
922 DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL);
923 
924 struct ahci_dc_cb_args {
925 	bus_addr_t maddr;
926 	int error;
927 };
928 
929 static void
930 ahci_dmainit(device_t dev)
931 {
932 	struct ahci_channel *ch = device_get_softc(dev);
933 	struct ahci_dc_cb_args dcba;
934 	size_t rfsize;
935 
936 	/* Command area. */
937 	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0,
938 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
939 	    NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE,
940 	    0, NULL, NULL, &ch->dma.work_tag))
941 		goto error;
942 	if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
943 	    BUS_DMA_ZERO, &ch->dma.work_map))
944 		goto error;
945 	if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
946 	    AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) {
947 		bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
948 		goto error;
949 	}
950 	ch->dma.work_bus = dcba.maddr;
951 	/* FIS receive area. */
952 	if (ch->chcaps & AHCI_P_CMD_FBSCP)
953 	    rfsize = 4096;
954 	else
955 	    rfsize = 256;
956 	if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0,
957 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
958 	    NULL, NULL, rfsize, 1, rfsize,
959 	    0, NULL, NULL, &ch->dma.rfis_tag))
960 		goto error;
961 	if (bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0,
962 	    &ch->dma.rfis_map))
963 		goto error;
964 	if (bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis,
965 	    rfsize, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) {
966 		bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
967 		goto error;
968 	}
969 	ch->dma.rfis_bus = dcba.maddr;
970 	/* Data area. */
971 	if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
972 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
973 	    NULL, NULL,
974 	    AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots,
975 	    AHCI_SG_ENTRIES, AHCI_PRD_MAX,
976 	    0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) {
977 		goto error;
978 	}
979 	return;
980 
981 error:
982 	device_printf(dev, "WARNING - DMA initialization failed\n");
983 	ahci_dmafini(dev);
984 }
985 
986 static void
987 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
988 {
989 	struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc;
990 
991 	if (!(dcba->error = error))
992 		dcba->maddr = segs[0].ds_addr;
993 }
994 
995 static void
996 ahci_dmafini(device_t dev)
997 {
998 	struct ahci_channel *ch = device_get_softc(dev);
999 
1000 	if (ch->dma.data_tag) {
1001 		bus_dma_tag_destroy(ch->dma.data_tag);
1002 		ch->dma.data_tag = NULL;
1003 	}
1004 	if (ch->dma.rfis_bus) {
1005 		bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map);
1006 		bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
1007 		ch->dma.rfis_bus = 0;
1008 		ch->dma.rfis = NULL;
1009 	}
1010 	if (ch->dma.work_bus) {
1011 		bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map);
1012 		bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
1013 		ch->dma.work_bus = 0;
1014 		ch->dma.work = NULL;
1015 	}
1016 	if (ch->dma.work_tag) {
1017 		bus_dma_tag_destroy(ch->dma.work_tag);
1018 		ch->dma.work_tag = NULL;
1019 	}
1020 }
1021 
1022 static void
1023 ahci_slotsalloc(device_t dev)
1024 {
1025 	struct ahci_channel *ch = device_get_softc(dev);
1026 	int i;
1027 
1028 	/* Alloc and setup command/dma slots */
1029 	bzero(ch->slot, sizeof(ch->slot));
1030 	for (i = 0; i < ch->numslots; i++) {
1031 		struct ahci_slot *slot = &ch->slot[i];
1032 
1033 		slot->ch = ch;
1034 		slot->slot = i;
1035 		slot->state = AHCI_SLOT_EMPTY;
1036 		slot->ccb = NULL;
1037 		callout_init_mtx(&slot->timeout, &ch->mtx, 0);
1038 
1039 		if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map))
1040 			device_printf(ch->dev, "FAILURE - create data_map\n");
1041 	}
1042 }
1043 
1044 static void
1045 ahci_slotsfree(device_t dev)
1046 {
1047 	struct ahci_channel *ch = device_get_softc(dev);
1048 	int i;
1049 
1050 	/* Free all dma slots */
1051 	for (i = 0; i < ch->numslots; i++) {
1052 		struct ahci_slot *slot = &ch->slot[i];
1053 
1054 		callout_drain(&slot->timeout);
1055 		if (slot->dma.data_map) {
1056 			bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map);
1057 			slot->dma.data_map = NULL;
1058 		}
1059 	}
1060 }
1061 
1062 static int
1063 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr)
1064 {
1065 
1066 	if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) ||
1067 	    ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) {
1068 		u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
1069 		union ccb *ccb;
1070 
1071 		if (bootverbose) {
1072 			if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
1073 				device_printf(ch->dev, "CONNECT requested\n");
1074 			else
1075 				device_printf(ch->dev, "DISCONNECT requested\n");
1076 		}
1077 		ahci_reset(ch);
1078 		if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
1079 			return (0);
1080 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
1081 		    cam_sim_path(ch->sim),
1082 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1083 			xpt_free_ccb(ccb);
1084 			return (0);
1085 		}
1086 		xpt_rescan(ccb);
1087 		return (1);
1088 	}
1089 	return (0);
1090 }
1091 
1092 static void
1093 ahci_cpd_check_events(struct ahci_channel *ch)
1094 {
1095 	u_int32_t status;
1096 	union ccb *ccb;
1097 	device_t dev;
1098 
1099 	if (ch->pm_level == 0)
1100 		return;
1101 
1102 	status = ATA_INL(ch->r_mem, AHCI_P_CMD);
1103 	if ((status & AHCI_P_CMD_CPD) == 0)
1104 		return;
1105 
1106 	if (bootverbose) {
1107 		dev = ch->dev;
1108 		if (status & AHCI_P_CMD_CPS) {
1109 			device_printf(dev, "COLD CONNECT requested\n");
1110 		} else
1111 			device_printf(dev, "COLD DISCONNECT requested\n");
1112 	}
1113 	ahci_reset(ch);
1114 	if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
1115 		return;
1116 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim),
1117 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1118 		xpt_free_ccb(ccb);
1119 		return;
1120 	}
1121 	xpt_rescan(ccb);
1122 }
1123 
1124 static void
1125 ahci_notify_events(struct ahci_channel *ch, u_int32_t status)
1126 {
1127 	struct cam_path *dpath;
1128 	int i;
1129 
1130 	if (ch->caps & AHCI_CAP_SSNTF)
1131 		ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status);
1132 	if (bootverbose)
1133 		device_printf(ch->dev, "SNTF 0x%04x\n", status);
1134 	for (i = 0; i < 16; i++) {
1135 		if ((status & (1 << i)) == 0)
1136 			continue;
1137 		if (xpt_create_path(&dpath, NULL,
1138 		    xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) {
1139 			xpt_async(AC_SCSI_AEN, dpath, NULL);
1140 			xpt_free_path(dpath);
1141 		}
1142 	}
1143 }
1144 
1145 static void
1146 ahci_done(struct ahci_channel *ch, union ccb *ccb)
1147 {
1148 
1149 	mtx_assert(&ch->mtx, MA_OWNED);
1150 	if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 ||
1151 	    ch->batch == 0) {
1152 		xpt_done(ccb);
1153 		return;
1154 	}
1155 
1156 	STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe);
1157 }
1158 
1159 static void
1160 ahci_ch_intr(void *arg)
1161 {
1162 	struct ahci_channel *ch = (struct ahci_channel *)arg;
1163 	uint32_t istatus;
1164 
1165 	/* Read interrupt statuses. */
1166 	istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
1167 	if (istatus == 0)
1168 		return;
1169 
1170 	mtx_lock(&ch->mtx);
1171 	ahci_ch_intr_main(ch, istatus);
1172 	mtx_unlock(&ch->mtx);
1173 }
1174 
1175 static void
1176 ahci_ch_intr_direct(void *arg)
1177 {
1178 	struct ahci_channel *ch = (struct ahci_channel *)arg;
1179 	struct ccb_hdr *ccb_h;
1180 	uint32_t istatus;
1181 	STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq);
1182 
1183 	/* Read interrupt statuses. */
1184 	istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
1185 	if (istatus == 0)
1186 		return;
1187 
1188 	mtx_lock(&ch->mtx);
1189 	ch->batch = 1;
1190 	ahci_ch_intr_main(ch, istatus);
1191 	ch->batch = 0;
1192 	/*
1193 	 * Prevent the possibility of issues caused by processing the queue
1194 	 * while unlocked below by moving the contents to a local queue.
1195 	 */
1196 	STAILQ_CONCAT(&tmp_doneq, &ch->doneq);
1197 	mtx_unlock(&ch->mtx);
1198 	while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) {
1199 		STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe);
1200 		xpt_done_direct((union ccb *)ccb_h);
1201 	}
1202 }
1203 
1204 static void
1205 ahci_ch_pm(void *arg)
1206 {
1207 	struct ahci_channel *ch = (struct ahci_channel *)arg;
1208 	uint32_t work;
1209 
1210 	if (ch->numrslots != 0)
1211 		return;
1212 	work = ATA_INL(ch->r_mem, AHCI_P_CMD);
1213 	if (ch->pm_level == 4)
1214 		work |= AHCI_P_CMD_PARTIAL;
1215 	else
1216 		work |= AHCI_P_CMD_SLUMBER;
1217 	ATA_OUTL(ch->r_mem, AHCI_P_CMD, work);
1218 }
1219 
1220 static void
1221 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus)
1222 {
1223 	uint32_t cstatus, serr = 0, sntf = 0, ok, err;
1224 	enum ahci_err_type et;
1225 	int i, ccs, port, reset = 0;
1226 
1227 	/* Clear interrupt statuses. */
1228 	ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus);
1229 	/* Read command statuses. */
1230 	if (ch->numtslots != 0)
1231 		cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
1232 	else
1233 		cstatus = 0;
1234 	if (ch->numrslots != ch->numtslots)
1235 		cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI);
1236 	/* Read SNTF in one of possible ways. */
1237 	if ((istatus & AHCI_P_IX_SDB) &&
1238 	    (ch->pm_present || ch->curr[0].atapi != 0)) {
1239 		if (ch->caps & AHCI_CAP_SSNTF)
1240 			sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF);
1241 		else if (ch->fbs_enabled) {
1242 			u_int8_t *fis = ch->dma.rfis + 0x58;
1243 
1244 			for (i = 0; i < 16; i++) {
1245 				if (fis[1] & 0x80) {
1246 					fis[1] &= 0x7f;
1247 	    				sntf |= 1 << i;
1248 	    			}
1249 	    			fis += 256;
1250 	    		}
1251 		} else {
1252 			u_int8_t *fis = ch->dma.rfis + 0x58;
1253 
1254 			if (fis[1] & 0x80)
1255 				sntf = (1 << (fis[1] & 0x0f));
1256 		}
1257 	}
1258 	/* Process PHY events */
1259 	if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF |
1260 	    AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
1261 		serr = ATA_INL(ch->r_mem, AHCI_P_SERR);
1262 		if (serr) {
1263 			ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr);
1264 			reset = ahci_phy_check_events(ch, serr);
1265 		}
1266 	}
1267 	/* Process cold presence detection events */
1268 	if ((istatus & AHCI_P_IX_CPD) && !reset)
1269 		ahci_cpd_check_events(ch);
1270 	/* Process command errors */
1271 	if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF |
1272 	    AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
1273 		ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK)
1274 		    >> AHCI_P_CMD_CCS_SHIFT;
1275 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n",
1276 //    __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD),
1277 //    serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs);
1278 		port = -1;
1279 		if (ch->fbs_enabled) {
1280 			uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS);
1281 			if (fbs & AHCI_P_FBS_SDE) {
1282 				port = (fbs & AHCI_P_FBS_DWE)
1283 				    >> AHCI_P_FBS_DWE_SHIFT;
1284 			} else {
1285 				for (i = 0; i < 16; i++) {
1286 					if (ch->numrslotspd[i] == 0)
1287 						continue;
1288 					if (port == -1)
1289 						port = i;
1290 					else if (port != i) {
1291 						port = -2;
1292 						break;
1293 					}
1294 				}
1295 			}
1296 		}
1297 		err = ch->rslots & cstatus;
1298 	} else {
1299 		ccs = 0;
1300 		err = 0;
1301 		port = -1;
1302 	}
1303 	/* Complete all successfull commands. */
1304 	ok = ch->rslots & ~cstatus;
1305 	for (i = 0; i < ch->numslots; i++) {
1306 		if ((ok >> i) & 1)
1307 			ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE);
1308 	}
1309 	/* On error, complete the rest of commands with error statuses. */
1310 	if (err) {
1311 		if (ch->frozen) {
1312 			union ccb *fccb = ch->frozen;
1313 			ch->frozen = NULL;
1314 			fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
1315 			if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
1316 				xpt_freeze_devq(fccb->ccb_h.path, 1);
1317 				fccb->ccb_h.status |= CAM_DEV_QFRZN;
1318 			}
1319 			ahci_done(ch, fccb);
1320 		}
1321 		for (i = 0; i < ch->numslots; i++) {
1322 			/* XXX: reqests in loading state. */
1323 			if (((err >> i) & 1) == 0)
1324 				continue;
1325 			if (port >= 0 &&
1326 			    ch->slot[i].ccb->ccb_h.target_id != port)
1327 				continue;
1328 			if (istatus & AHCI_P_IX_TFE) {
1329 			    if (port != -2) {
1330 				/* Task File Error */
1331 				if (ch->numtslotspd[
1332 				    ch->slot[i].ccb->ccb_h.target_id] == 0) {
1333 					/* Untagged operation. */
1334 					if (i == ccs)
1335 						et = AHCI_ERR_TFE;
1336 					else
1337 						et = AHCI_ERR_INNOCENT;
1338 				} else {
1339 					/* Tagged operation. */
1340 					et = AHCI_ERR_NCQ;
1341 				}
1342 			    } else {
1343 				et = AHCI_ERR_TFE;
1344 				ch->fatalerr = 1;
1345 			    }
1346 			} else if (istatus & AHCI_P_IX_IF) {
1347 				if (ch->numtslots == 0 && i != ccs && port != -2)
1348 					et = AHCI_ERR_INNOCENT;
1349 				else
1350 					et = AHCI_ERR_SATA;
1351 			} else
1352 				et = AHCI_ERR_INVALID;
1353 			ahci_end_transaction(&ch->slot[i], et);
1354 		}
1355 		/*
1356 		 * We can't reinit port if there are some other
1357 		 * commands active, use resume to complete them.
1358 		 */
1359 		if (ch->rslots != 0 && !ch->recoverycmd)
1360 			ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC);
1361 	}
1362 	/* Process NOTIFY events */
1363 	if (sntf)
1364 		ahci_notify_events(ch, sntf);
1365 }
1366 
1367 /* Must be called with channel locked. */
1368 static int
1369 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb)
1370 {
1371 	int t = ccb->ccb_h.target_id;
1372 
1373 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1374 	    (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1375 		/* Tagged command while we have no supported tag free. */
1376 		if (((~ch->oslots) & (0xffffffff >> (32 -
1377 		    ch->curr[t].tags))) == 0)
1378 			return (1);
1379 		/* If we have FBS */
1380 		if (ch->fbs_enabled) {
1381 			/* Tagged command while untagged are active. */
1382 			if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0)
1383 				return (1);
1384 		} else {
1385 			/* Tagged command while untagged are active. */
1386 			if (ch->numrslots != 0 && ch->numtslots == 0)
1387 				return (1);
1388 			/* Tagged command while tagged to other target is active. */
1389 			if (ch->numtslots != 0 &&
1390 			    ch->taggedtarget != ccb->ccb_h.target_id)
1391 				return (1);
1392 		}
1393 	} else {
1394 		/* If we have FBS */
1395 		if (ch->fbs_enabled) {
1396 			/* Untagged command while tagged are active. */
1397 			if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0)
1398 				return (1);
1399 		} else {
1400 			/* Untagged command while tagged are active. */
1401 			if (ch->numrslots != 0 && ch->numtslots != 0)
1402 				return (1);
1403 		}
1404 	}
1405 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1406 	    (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) {
1407 		/* Atomic command while anything active. */
1408 		if (ch->numrslots != 0)
1409 			return (1);
1410 	}
1411        /* We have some atomic command running. */
1412        if (ch->aslots != 0)
1413                return (1);
1414 	return (0);
1415 }
1416 
1417 /* Must be called with channel locked. */
1418 static void
1419 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb)
1420 {
1421 	struct ahci_slot *slot;
1422 	int tag, tags;
1423 
1424 	/* Choose empty slot. */
1425 	tags = ch->numslots;
1426 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1427 	    (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA))
1428 		tags = ch->curr[ccb->ccb_h.target_id].tags;
1429 	if (ch->lastslot + 1 < tags)
1430 		tag = ffs(~(ch->oslots >> (ch->lastslot + 1)));
1431 	else
1432 		tag = 0;
1433 	if (tag == 0 || tag + ch->lastslot >= tags)
1434 		tag = ffs(~ch->oslots) - 1;
1435 	else
1436 		tag += ch->lastslot;
1437 	ch->lastslot = tag;
1438 	/* Occupy chosen slot. */
1439 	slot = &ch->slot[tag];
1440 	slot->ccb = ccb;
1441 	/* Stop PM timer. */
1442 	if (ch->numrslots == 0 && ch->pm_level > 3)
1443 		callout_stop(&ch->pm_timer);
1444 	/* Update channel stats. */
1445 	ch->oslots |= (1 << tag);
1446 	ch->numrslots++;
1447 	ch->numrslotspd[ccb->ccb_h.target_id]++;
1448 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1449 	    (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1450 		ch->numtslots++;
1451 		ch->numtslotspd[ccb->ccb_h.target_id]++;
1452 		ch->taggedtarget = ccb->ccb_h.target_id;
1453 	}
1454 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1455 	    (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)))
1456 		ch->aslots |= (1 << tag);
1457 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1458 		slot->state = AHCI_SLOT_LOADING;
1459 		bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb,
1460 		    ahci_dmasetprd, slot, 0);
1461 	} else {
1462 		slot->dma.nsegs = 0;
1463 		ahci_execute_transaction(slot);
1464 	}
1465 }
1466 
1467 /* Locked by busdma engine. */
1468 static void
1469 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1470 {
1471 	struct ahci_slot *slot = arg;
1472 	struct ahci_channel *ch = slot->ch;
1473 	struct ahci_cmd_tab *ctp;
1474 	struct ahci_dma_prd *prd;
1475 	int i;
1476 
1477 	if (error) {
1478 		device_printf(ch->dev, "DMA load error\n");
1479 		ahci_end_transaction(slot, AHCI_ERR_INVALID);
1480 		return;
1481 	}
1482 	KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n"));
1483 	/* Get a piece of the workspace for this request */
1484 	ctp = (struct ahci_cmd_tab *)
1485 		(ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot));
1486 	/* Fill S/G table */
1487 	prd = &ctp->prd_tab[0];
1488 	for (i = 0; i < nsegs; i++) {
1489 		prd[i].dba = htole64(segs[i].ds_addr);
1490 		prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK);
1491 	}
1492 	slot->dma.nsegs = nsegs;
1493 	bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
1494 	    ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ?
1495 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1496 	ahci_execute_transaction(slot);
1497 }
1498 
1499 /* Must be called with channel locked. */
1500 static void
1501 ahci_execute_transaction(struct ahci_slot *slot)
1502 {
1503 	struct ahci_channel *ch = slot->ch;
1504 	struct ahci_cmd_tab *ctp;
1505 	struct ahci_cmd_list *clp;
1506 	union ccb *ccb = slot->ccb;
1507 	int port = ccb->ccb_h.target_id & 0x0f;
1508 	int fis_size, i, softreset;
1509 	uint8_t *fis = ch->dma.rfis + 0x40;
1510 	uint8_t val;
1511 
1512 	/* Get a piece of the workspace for this request */
1513 	ctp = (struct ahci_cmd_tab *)
1514 		(ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot));
1515 	/* Setup the FIS for this request */
1516 	if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) {
1517 		device_printf(ch->dev, "Setting up SATA FIS failed\n");
1518 		ahci_end_transaction(slot, AHCI_ERR_INVALID);
1519 		return;
1520 	}
1521 	/* Setup the command list entry */
1522 	clp = (struct ahci_cmd_list *)
1523 	    (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
1524 	clp->cmd_flags = htole16(
1525 		    (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) |
1526 		    (ccb->ccb_h.func_code == XPT_SCSI_IO ?
1527 		     (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) |
1528 		    (fis_size / sizeof(u_int32_t)) |
1529 		    (port << 12));
1530 	clp->prd_length = htole16(slot->dma.nsegs);
1531 	/* Special handling for Soft Reset command. */
1532 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1533 	    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) {
1534 		if (ccb->ataio.cmd.control & ATA_A_RESET) {
1535 			softreset = 1;
1536 			/* Kick controller into sane state */
1537 			ahci_stop(ch);
1538 			ahci_clo(ch);
1539 			ahci_start(ch, 0);
1540 			clp->cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY;
1541 		} else {
1542 			softreset = 2;
1543 			/* Prepare FIS receive area for check. */
1544 			for (i = 0; i < 20; i++)
1545 				fis[i] = 0xff;
1546 		}
1547 	} else
1548 		softreset = 0;
1549 	clp->bytecount = 0;
1550 	clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET +
1551 				  (AHCI_CT_SIZE * slot->slot));
1552 	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
1553 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554 	bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
1555 	    BUS_DMASYNC_PREREAD);
1556 	/* Set ACTIVE bit for NCQ commands. */
1557 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1558 	    (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1559 		ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot);
1560 	}
1561 	/* If FBS is enabled, set PMP port. */
1562 	if (ch->fbs_enabled) {
1563 		ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN |
1564 		    (port << AHCI_P_FBS_DEV_SHIFT));
1565 	}
1566 	/* Issue command to the controller. */
1567 	slot->state = AHCI_SLOT_RUNNING;
1568 	ch->rslots |= (1 << slot->slot);
1569 	ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot));
1570 	/* Device reset commands doesn't interrupt. Poll them. */
1571 	if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1572 	    (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) {
1573 		int count, timeout = ccb->ccb_h.timeout * 100;
1574 		enum ahci_err_type et = AHCI_ERR_NONE;
1575 
1576 		for (count = 0; count < timeout; count++) {
1577 			DELAY(10);
1578 			if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot)))
1579 				break;
1580 			if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) &&
1581 			    softreset != 1) {
1582 #if 0
1583 				device_printf(ch->dev,
1584 				    "Poll error on slot %d, TFD: %04x\n",
1585 				    slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD));
1586 #endif
1587 				et = AHCI_ERR_TFE;
1588 				break;
1589 			}
1590 			/* Workaround for ATI SB600/SB700 chipsets. */
1591 			if (ccb->ccb_h.target_id == 15 &&
1592 			    (ch->quirks & AHCI_Q_ATI_PMP_BUG) &&
1593 			    (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) {
1594 				et = AHCI_ERR_TIMEOUT;
1595 				break;
1596 			}
1597 		}
1598 
1599 		/*
1600 		 * Marvell HBAs with non-RAID firmware do not wait for
1601 		 * readiness after soft reset, so we have to wait here.
1602 		 * Marvell RAIDs do not have this problem, but instead
1603 		 * sometimes forget to update FIS receive area, breaking
1604 		 * this wait.
1605 		 */
1606 		if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 &&
1607 		    (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 &&
1608 		    softreset == 2 && et == AHCI_ERR_NONE) {
1609 			while ((val = fis[2]) & ATA_S_BUSY) {
1610 				DELAY(10);
1611 				if (count++ >= timeout)
1612 					break;
1613 			}
1614 		}
1615 
1616 		if (timeout && (count >= timeout)) {
1617 			device_printf(ch->dev, "Poll timeout on slot %d port %d\n",
1618 			    slot->slot, port);
1619 			device_printf(ch->dev, "is %08x cs %08x ss %08x "
1620 			    "rs %08x tfd %02x serr %08x cmd %08x\n",
1621 			    ATA_INL(ch->r_mem, AHCI_P_IS),
1622 			    ATA_INL(ch->r_mem, AHCI_P_CI),
1623 			    ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
1624 			    ATA_INL(ch->r_mem, AHCI_P_TFD),
1625 			    ATA_INL(ch->r_mem, AHCI_P_SERR),
1626 			    ATA_INL(ch->r_mem, AHCI_P_CMD));
1627 			et = AHCI_ERR_TIMEOUT;
1628 		}
1629 
1630 		/* Kick controller into sane state and enable FBS. */
1631 		if (softreset == 2)
1632 			ch->eslots |= (1 << slot->slot);
1633 		ahci_end_transaction(slot, et);
1634 		return;
1635 	}
1636 	/* Start command execution timeout */
1637 	callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2,
1638 	    0, (timeout_t*)ahci_timeout, slot, 0);
1639 	return;
1640 }
1641 
1642 /* Must be called with channel locked. */
1643 static void
1644 ahci_process_timeout(struct ahci_channel *ch)
1645 {
1646 	int i;
1647 
1648 	mtx_assert(&ch->mtx, MA_OWNED);
1649 	/* Handle the rest of commands. */
1650 	for (i = 0; i < ch->numslots; i++) {
1651 		/* Do we have a running request on slot? */
1652 		if (ch->slot[i].state < AHCI_SLOT_RUNNING)
1653 			continue;
1654 		ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT);
1655 	}
1656 }
1657 
1658 /* Must be called with channel locked. */
1659 static void
1660 ahci_rearm_timeout(struct ahci_channel *ch)
1661 {
1662 	int i;
1663 
1664 	mtx_assert(&ch->mtx, MA_OWNED);
1665 	for (i = 0; i < ch->numslots; i++) {
1666 		struct ahci_slot *slot = &ch->slot[i];
1667 
1668 		/* Do we have a running request on slot? */
1669 		if (slot->state < AHCI_SLOT_RUNNING)
1670 			continue;
1671 		if ((ch->toslots & (1 << i)) == 0)
1672 			continue;
1673 		callout_reset_sbt(&slot->timeout,
1674     	    	    SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
1675 		    (timeout_t*)ahci_timeout, slot, 0);
1676 	}
1677 }
1678 
1679 /* Locked by callout mechanism. */
1680 static void
1681 ahci_timeout(struct ahci_slot *slot)
1682 {
1683 	struct ahci_channel *ch = slot->ch;
1684 	device_t dev = ch->dev;
1685 	uint32_t sstatus;
1686 	int ccs;
1687 	int i;
1688 
1689 	/* Check for stale timeout. */
1690 	if (slot->state < AHCI_SLOT_RUNNING)
1691 		return;
1692 
1693 	/* Check if slot was not being executed last time we checked. */
1694 	if (slot->state < AHCI_SLOT_EXECUTING) {
1695 		/* Check if slot started executing. */
1696 		sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
1697 		ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK)
1698 		    >> AHCI_P_CMD_CCS_SHIFT;
1699 		if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot ||
1700 		    ch->fbs_enabled || ch->wrongccs)
1701 			slot->state = AHCI_SLOT_EXECUTING;
1702 		else if ((ch->rslots & (1 << ccs)) == 0) {
1703 			ch->wrongccs = 1;
1704 			slot->state = AHCI_SLOT_EXECUTING;
1705 		}
1706 
1707 		callout_reset_sbt(&slot->timeout,
1708 	    	    SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
1709 		    (timeout_t*)ahci_timeout, slot, 0);
1710 		return;
1711 	}
1712 
1713 	device_printf(dev, "Timeout on slot %d port %d\n",
1714 	    slot->slot, slot->ccb->ccb_h.target_id & 0x0f);
1715 	device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x "
1716 	    "serr %08x cmd %08x\n",
1717 	    ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI),
1718 	    ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
1719 	    ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR),
1720 	    ATA_INL(ch->r_mem, AHCI_P_CMD));
1721 
1722 	/* Handle frozen command. */
1723 	if (ch->frozen) {
1724 		union ccb *fccb = ch->frozen;
1725 		ch->frozen = NULL;
1726 		fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
1727 		if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
1728 			xpt_freeze_devq(fccb->ccb_h.path, 1);
1729 			fccb->ccb_h.status |= CAM_DEV_QFRZN;
1730 		}
1731 		ahci_done(ch, fccb);
1732 	}
1733 	if (!ch->fbs_enabled && !ch->wrongccs) {
1734 		/* Without FBS we know real timeout source. */
1735 		ch->fatalerr = 1;
1736 		/* Handle command with timeout. */
1737 		ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT);
1738 		/* Handle the rest of commands. */
1739 		for (i = 0; i < ch->numslots; i++) {
1740 			/* Do we have a running request on slot? */
1741 			if (ch->slot[i].state < AHCI_SLOT_RUNNING)
1742 				continue;
1743 			ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
1744 		}
1745 	} else {
1746 		/* With FBS we wait for other commands timeout and pray. */
1747 		if (ch->toslots == 0)
1748 			xpt_freeze_simq(ch->sim, 1);
1749 		ch->toslots |= (1 << slot->slot);
1750 		if ((ch->rslots & ~ch->toslots) == 0)
1751 			ahci_process_timeout(ch);
1752 		else
1753 			device_printf(dev, " ... waiting for slots %08x\n",
1754 			    ch->rslots & ~ch->toslots);
1755 	}
1756 }
1757 
1758 /* Must be called with channel locked. */
1759 static void
1760 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
1761 {
1762 	struct ahci_channel *ch = slot->ch;
1763 	union ccb *ccb = slot->ccb;
1764 	struct ahci_cmd_list *clp;
1765 	int lastto;
1766 	uint32_t sig;
1767 
1768 	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
1769 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1770 	clp = (struct ahci_cmd_list *)
1771 	    (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
1772 	/* Read result registers to the result struct
1773 	 * May be incorrect if several commands finished same time,
1774 	 * so read only when sure or have to.
1775 	 */
1776 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1777 		struct ata_res *res = &ccb->ataio.res;
1778 
1779 		if ((et == AHCI_ERR_TFE) ||
1780 		    (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) {
1781 			u_int8_t *fis = ch->dma.rfis + 0x40;
1782 
1783 			bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
1784 			    BUS_DMASYNC_POSTREAD);
1785 			if (ch->fbs_enabled) {
1786 				fis += ccb->ccb_h.target_id * 256;
1787 				res->status = fis[2];
1788 				res->error = fis[3];
1789 			} else {
1790 				uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD);
1791 
1792 				res->status = tfd;
1793 				res->error = tfd >> 8;
1794 			}
1795 			res->lba_low = fis[4];
1796 			res->lba_mid = fis[5];
1797 			res->lba_high = fis[6];
1798 			res->device = fis[7];
1799 			res->lba_low_exp = fis[8];
1800 			res->lba_mid_exp = fis[9];
1801 			res->lba_high_exp = fis[10];
1802 			res->sector_count = fis[12];
1803 			res->sector_count_exp = fis[13];
1804 
1805 			/*
1806 			 * Some weird controllers do not return signature in
1807 			 * FIS receive area. Read it from PxSIG register.
1808 			 */
1809 			if ((ch->quirks & AHCI_Q_ALTSIG) &&
1810 			    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1811 			    (ccb->ataio.cmd.control & ATA_A_RESET) == 0) {
1812 				sig = ATA_INL(ch->r_mem,  AHCI_P_SIG);
1813 				res->lba_high = sig >> 24;
1814 				res->lba_mid = sig >> 16;
1815 				res->lba_low = sig >> 8;
1816 				res->sector_count = sig;
1817 			}
1818 		} else
1819 			bzero(res, sizeof(*res));
1820 		if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 &&
1821 		    (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1822 		    (ch->quirks & AHCI_Q_NOCOUNT) == 0) {
1823 			ccb->ataio.resid =
1824 			    ccb->ataio.dxfer_len - le32toh(clp->bytecount);
1825 		}
1826 	} else {
1827 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1828 		    (ch->quirks & AHCI_Q_NOCOUNT) == 0) {
1829 			ccb->csio.resid =
1830 			    ccb->csio.dxfer_len - le32toh(clp->bytecount);
1831 		}
1832 	}
1833 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1834 		bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
1835 		    (ccb->ccb_h.flags & CAM_DIR_IN) ?
1836 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1837 		bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map);
1838 	}
1839 	if (et != AHCI_ERR_NONE)
1840 		ch->eslots |= (1 << slot->slot);
1841 	/* In case of error, freeze device for proper recovery. */
1842 	if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) &&
1843 	    !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1844 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1845 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1846 	}
1847 	/* Set proper result status. */
1848 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1849 	switch (et) {
1850 	case AHCI_ERR_NONE:
1851 		ccb->ccb_h.status |= CAM_REQ_CMP;
1852 		if (ccb->ccb_h.func_code == XPT_SCSI_IO)
1853 			ccb->csio.scsi_status = SCSI_STATUS_OK;
1854 		break;
1855 	case AHCI_ERR_INVALID:
1856 		ch->fatalerr = 1;
1857 		ccb->ccb_h.status |= CAM_REQ_INVALID;
1858 		break;
1859 	case AHCI_ERR_INNOCENT:
1860 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1861 		break;
1862 	case AHCI_ERR_TFE:
1863 	case AHCI_ERR_NCQ:
1864 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1865 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1866 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1867 		} else {
1868 			ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1869 		}
1870 		break;
1871 	case AHCI_ERR_SATA:
1872 		ch->fatalerr = 1;
1873 		if (!ch->recoverycmd) {
1874 			xpt_freeze_simq(ch->sim, 1);
1875 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1876 			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1877 		}
1878 		ccb->ccb_h.status |= CAM_UNCOR_PARITY;
1879 		break;
1880 	case AHCI_ERR_TIMEOUT:
1881 		if (!ch->recoverycmd) {
1882 			xpt_freeze_simq(ch->sim, 1);
1883 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1884 			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1885 		}
1886 		ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1887 		break;
1888 	default:
1889 		ch->fatalerr = 1;
1890 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1891 	}
1892 	/* Free slot. */
1893 	ch->oslots &= ~(1 << slot->slot);
1894 	ch->rslots &= ~(1 << slot->slot);
1895 	ch->aslots &= ~(1 << slot->slot);
1896 	slot->state = AHCI_SLOT_EMPTY;
1897 	slot->ccb = NULL;
1898 	/* Update channel stats. */
1899 	ch->numrslots--;
1900 	ch->numrslotspd[ccb->ccb_h.target_id]--;
1901 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1902 	    (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1903 		ch->numtslots--;
1904 		ch->numtslotspd[ccb->ccb_h.target_id]--;
1905 	}
1906 	/* Cancel timeout state if request completed normally. */
1907 	if (et != AHCI_ERR_TIMEOUT) {
1908 		lastto = (ch->toslots == (1 << slot->slot));
1909 		ch->toslots &= ~(1 << slot->slot);
1910 		if (lastto)
1911 			xpt_release_simq(ch->sim, TRUE);
1912 	}
1913 	/* If it was first request of reset sequence and there is no error,
1914 	 * proceed to second request. */
1915 	if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1916 	    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1917 	    (ccb->ataio.cmd.control & ATA_A_RESET) &&
1918 	    et == AHCI_ERR_NONE) {
1919 		ccb->ataio.cmd.control &= ~ATA_A_RESET;
1920 		ahci_begin_transaction(ch, ccb);
1921 		return;
1922 	}
1923 	/* If it was our READ LOG command - process it. */
1924 	if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) {
1925 		ahci_process_read_log(ch, ccb);
1926 	/* If it was our REQUEST SENSE command - process it. */
1927 	} else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) {
1928 		ahci_process_request_sense(ch, ccb);
1929 	/* If it was NCQ or ATAPI command error, put result on hold. */
1930 	} else if (et == AHCI_ERR_NCQ ||
1931 	    ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
1932 	     (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) {
1933 		ch->hold[slot->slot] = ccb;
1934 		ch->numhslots++;
1935 	} else
1936 		ahci_done(ch, ccb);
1937 	/* If we have no other active commands, ... */
1938 	if (ch->rslots == 0) {
1939 		/* if there was fatal error - reset port. */
1940 		if (ch->toslots != 0 || ch->fatalerr) {
1941 			ahci_reset(ch);
1942 		} else {
1943 			/* if we have slots in error, we can reinit port. */
1944 			if (ch->eslots != 0) {
1945 				ahci_stop(ch);
1946 				ahci_clo(ch);
1947 				ahci_start(ch, 1);
1948 			}
1949 			/* if there commands on hold, we can do READ LOG. */
1950 			if (!ch->recoverycmd && ch->numhslots)
1951 				ahci_issue_recovery(ch);
1952 		}
1953 	/* If all the rest of commands are in timeout - give them chance. */
1954 	} else if ((ch->rslots & ~ch->toslots) == 0 &&
1955 	    et != AHCI_ERR_TIMEOUT)
1956 		ahci_rearm_timeout(ch);
1957 	/* Unfreeze frozen command. */
1958 	if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) {
1959 		union ccb *fccb = ch->frozen;
1960 		ch->frozen = NULL;
1961 		ahci_begin_transaction(ch, fccb);
1962 		xpt_release_simq(ch->sim, TRUE);
1963 	}
1964 	/* Start PM timer. */
1965 	if (ch->numrslots == 0 && ch->pm_level > 3 &&
1966 	    (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) {
1967 		callout_schedule(&ch->pm_timer,
1968 		    (ch->pm_level == 4) ? hz / 1000 : hz / 8);
1969 	}
1970 }
1971 
1972 static void
1973 ahci_issue_recovery(struct ahci_channel *ch)
1974 {
1975 	union ccb *ccb;
1976 	struct ccb_ataio *ataio;
1977 	struct ccb_scsiio *csio;
1978 	int i;
1979 
1980 	/* Find some held command. */
1981 	for (i = 0; i < ch->numslots; i++) {
1982 		if (ch->hold[i])
1983 			break;
1984 	}
1985 	ccb = xpt_alloc_ccb_nowait();
1986 	if (ccb == NULL) {
1987 		device_printf(ch->dev, "Unable to allocate recovery command\n");
1988 completeall:
1989 		/* We can't do anything -- complete held commands. */
1990 		for (i = 0; i < ch->numslots; i++) {
1991 			if (ch->hold[i] == NULL)
1992 				continue;
1993 			ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
1994 			ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL;
1995 			ahci_done(ch, ch->hold[i]);
1996 			ch->hold[i] = NULL;
1997 			ch->numhslots--;
1998 		}
1999 		ahci_reset(ch);
2000 		return;
2001 	}
2002 	ccb->ccb_h = ch->hold[i]->ccb_h;	/* Reuse old header. */
2003 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2004 		/* READ LOG */
2005 		ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
2006 		ccb->ccb_h.func_code = XPT_ATA_IO;
2007 		ccb->ccb_h.flags = CAM_DIR_IN;
2008 		ccb->ccb_h.timeout = 1000;	/* 1s should be enough. */
2009 		ataio = &ccb->ataio;
2010 		ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT);
2011 		if (ataio->data_ptr == NULL) {
2012 			xpt_free_ccb(ccb);
2013 			device_printf(ch->dev,
2014 			    "Unable to allocate memory for READ LOG command\n");
2015 			goto completeall;
2016 		}
2017 		ataio->dxfer_len = 512;
2018 		bzero(&ataio->cmd, sizeof(ataio->cmd));
2019 		ataio->cmd.flags = CAM_ATAIO_48BIT;
2020 		ataio->cmd.command = 0x2F;	/* READ LOG EXT */
2021 		ataio->cmd.sector_count = 1;
2022 		ataio->cmd.sector_count_exp = 0;
2023 		ataio->cmd.lba_low = 0x10;
2024 		ataio->cmd.lba_mid = 0;
2025 		ataio->cmd.lba_mid_exp = 0;
2026 	} else {
2027 		/* REQUEST SENSE */
2028 		ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE;
2029 		ccb->ccb_h.recovery_slot = i;
2030 		ccb->ccb_h.func_code = XPT_SCSI_IO;
2031 		ccb->ccb_h.flags = CAM_DIR_IN;
2032 		ccb->ccb_h.status = 0;
2033 		ccb->ccb_h.timeout = 1000;	/* 1s should be enough. */
2034 		csio = &ccb->csio;
2035 		csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data;
2036 		csio->dxfer_len = ch->hold[i]->csio.sense_len;
2037 		csio->cdb_len = 6;
2038 		bzero(&csio->cdb_io, sizeof(csio->cdb_io));
2039 		csio->cdb_io.cdb_bytes[0] = 0x03;
2040 		csio->cdb_io.cdb_bytes[4] = csio->dxfer_len;
2041 	}
2042 	/* Freeze SIM while doing recovery. */
2043 	ch->recoverycmd = 1;
2044 	xpt_freeze_simq(ch->sim, 1);
2045 	ahci_begin_transaction(ch, ccb);
2046 }
2047 
2048 static void
2049 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb)
2050 {
2051 	uint8_t *data;
2052 	struct ata_res *res;
2053 	int i;
2054 
2055 	ch->recoverycmd = 0;
2056 
2057 	data = ccb->ataio.data_ptr;
2058 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2059 	    (data[0] & 0x80) == 0) {
2060 		for (i = 0; i < ch->numslots; i++) {
2061 			if (!ch->hold[i])
2062 				continue;
2063 			if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
2064 				continue;
2065 			if ((data[0] & 0x1F) == i) {
2066 				res = &ch->hold[i]->ataio.res;
2067 				res->status = data[2];
2068 				res->error = data[3];
2069 				res->lba_low = data[4];
2070 				res->lba_mid = data[5];
2071 				res->lba_high = data[6];
2072 				res->device = data[7];
2073 				res->lba_low_exp = data[8];
2074 				res->lba_mid_exp = data[9];
2075 				res->lba_high_exp = data[10];
2076 				res->sector_count = data[12];
2077 				res->sector_count_exp = data[13];
2078 			} else {
2079 				ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2080 				ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ;
2081 			}
2082 			ahci_done(ch, ch->hold[i]);
2083 			ch->hold[i] = NULL;
2084 			ch->numhslots--;
2085 		}
2086 	} else {
2087 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
2088 			device_printf(ch->dev, "Error while READ LOG EXT\n");
2089 		else if ((data[0] & 0x80) == 0) {
2090 			device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n");
2091 		}
2092 		for (i = 0; i < ch->numslots; i++) {
2093 			if (!ch->hold[i])
2094 				continue;
2095 			if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
2096 				continue;
2097 			ahci_done(ch, ch->hold[i]);
2098 			ch->hold[i] = NULL;
2099 			ch->numhslots--;
2100 		}
2101 	}
2102 	free(ccb->ataio.data_ptr, M_AHCI);
2103 	xpt_free_ccb(ccb);
2104 	xpt_release_simq(ch->sim, TRUE);
2105 }
2106 
2107 static void
2108 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb)
2109 {
2110 	int i;
2111 
2112 	ch->recoverycmd = 0;
2113 
2114 	i = ccb->ccb_h.recovery_slot;
2115 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
2116 		ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID;
2117 	} else {
2118 		ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2119 		ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2120 	}
2121 	ahci_done(ch, ch->hold[i]);
2122 	ch->hold[i] = NULL;
2123 	ch->numhslots--;
2124 	xpt_free_ccb(ccb);
2125 	xpt_release_simq(ch->sim, TRUE);
2126 }
2127 
2128 static void
2129 ahci_start(struct ahci_channel *ch, int fbs)
2130 {
2131 	u_int32_t cmd;
2132 
2133 	/* Run the channel start callback, if any. */
2134 	if (ch->start)
2135 		ch->start(ch);
2136 
2137 	/* Clear SATA error register */
2138 	ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF);
2139 	/* Clear any interrupts pending on this channel */
2140 	ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF);
2141 	/* Configure FIS-based switching if supported. */
2142 	if (ch->chcaps & AHCI_P_CMD_FBSCP) {
2143 		ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0;
2144 		ATA_OUTL(ch->r_mem, AHCI_P_FBS,
2145 		    ch->fbs_enabled ? AHCI_P_FBS_EN : 0);
2146 	}
2147 	/* Start operations on this channel */
2148 	cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2149 	cmd &= ~AHCI_P_CMD_PMA;
2150 	ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST |
2151 	    (ch->pm_present ? AHCI_P_CMD_PMA : 0));
2152 }
2153 
2154 static void
2155 ahci_stop(struct ahci_channel *ch)
2156 {
2157 	u_int32_t cmd;
2158 	int timeout;
2159 
2160 	/* Kill all activity on this channel */
2161 	cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2162 	ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST);
2163 	/* Wait for activity stop. */
2164 	timeout = 0;
2165 	do {
2166 		DELAY(10);
2167 		if (timeout++ > 50000) {
2168 			device_printf(ch->dev, "stopping AHCI engine failed\n");
2169 			break;
2170 		}
2171 	} while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR);
2172 	ch->eslots = 0;
2173 }
2174 
2175 static void
2176 ahci_clo(struct ahci_channel *ch)
2177 {
2178 	u_int32_t cmd;
2179 	int timeout;
2180 
2181 	/* Issue Command List Override if supported */
2182 	if (ch->caps & AHCI_CAP_SCLO) {
2183 		cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2184 		cmd |= AHCI_P_CMD_CLO;
2185 		ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd);
2186 		timeout = 0;
2187 		do {
2188 			DELAY(10);
2189 			if (timeout++ > 50000) {
2190 			    device_printf(ch->dev, "executing CLO failed\n");
2191 			    break;
2192 			}
2193 		} while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO);
2194 	}
2195 }
2196 
2197 static void
2198 ahci_stop_fr(struct ahci_channel *ch)
2199 {
2200 	u_int32_t cmd;
2201 	int timeout;
2202 
2203 	/* Kill all FIS reception on this channel */
2204 	cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2205 	ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE);
2206 	/* Wait for FIS reception stop. */
2207 	timeout = 0;
2208 	do {
2209 		DELAY(10);
2210 		if (timeout++ > 50000) {
2211 			device_printf(ch->dev, "stopping AHCI FR engine failed\n");
2212 			break;
2213 		}
2214 	} while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR);
2215 }
2216 
2217 static void
2218 ahci_start_fr(struct ahci_channel *ch)
2219 {
2220 	u_int32_t cmd;
2221 
2222 	/* Start FIS reception on this channel */
2223 	cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2224 	ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE);
2225 }
2226 
2227 static int
2228 ahci_wait_ready(struct ahci_channel *ch, int t, int t0)
2229 {
2230 	int timeout = 0;
2231 	uint32_t val;
2232 
2233 	while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) &
2234 	    (ATA_S_BUSY | ATA_S_DRQ)) {
2235 		if (timeout > t) {
2236 			if (t != 0) {
2237 				device_printf(ch->dev,
2238 				    "AHCI reset: device not ready after %dms "
2239 				    "(tfd = %08x)\n",
2240 				    MAX(t, 0) + t0, val);
2241 			}
2242 			return (EBUSY);
2243 		}
2244 		DELAY(1000);
2245 		timeout++;
2246 	}
2247 	if (bootverbose)
2248 		device_printf(ch->dev, "AHCI reset: device ready after %dms\n",
2249 		    timeout + t0);
2250 	return (0);
2251 }
2252 
2253 static void
2254 ahci_reset_to(void *arg)
2255 {
2256 	struct ahci_channel *ch = arg;
2257 
2258 	if (ch->resetting == 0)
2259 		return;
2260 	ch->resetting--;
2261 	if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0,
2262 	    (310 - ch->resetting) * 100) == 0) {
2263 		ch->resetting = 0;
2264 		ahci_start(ch, 1);
2265 		xpt_release_simq(ch->sim, TRUE);
2266 		return;
2267 	}
2268 	if (ch->resetting == 0) {
2269 		ahci_clo(ch);
2270 		ahci_start(ch, 1);
2271 		xpt_release_simq(ch->sim, TRUE);
2272 		return;
2273 	}
2274 	callout_schedule(&ch->reset_timer, hz / 10);
2275 }
2276 
2277 static void
2278 ahci_reset(struct ahci_channel *ch)
2279 {
2280 	struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev));
2281 	int i;
2282 
2283 	xpt_freeze_simq(ch->sim, 1);
2284 	if (bootverbose)
2285 		device_printf(ch->dev, "AHCI reset...\n");
2286 	/* Forget about previous reset. */
2287 	if (ch->resetting) {
2288 		ch->resetting = 0;
2289 		callout_stop(&ch->reset_timer);
2290 		xpt_release_simq(ch->sim, TRUE);
2291 	}
2292 	/* Requeue freezed command. */
2293 	if (ch->frozen) {
2294 		union ccb *fccb = ch->frozen;
2295 		ch->frozen = NULL;
2296 		fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
2297 		if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
2298 			xpt_freeze_devq(fccb->ccb_h.path, 1);
2299 			fccb->ccb_h.status |= CAM_DEV_QFRZN;
2300 		}
2301 		ahci_done(ch, fccb);
2302 	}
2303 	/* Kill the engine and requeue all running commands. */
2304 	ahci_stop(ch);
2305 	for (i = 0; i < ch->numslots; i++) {
2306 		/* Do we have a running request on slot? */
2307 		if (ch->slot[i].state < AHCI_SLOT_RUNNING)
2308 			continue;
2309 		/* XXX; Commands in loading state. */
2310 		ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
2311 	}
2312 	for (i = 0; i < ch->numslots; i++) {
2313 		if (!ch->hold[i])
2314 			continue;
2315 		ahci_done(ch, ch->hold[i]);
2316 		ch->hold[i] = NULL;
2317 		ch->numhslots--;
2318 	}
2319 	if (ch->toslots != 0)
2320 		xpt_release_simq(ch->sim, TRUE);
2321 	ch->eslots = 0;
2322 	ch->toslots = 0;
2323 	ch->wrongccs = 0;
2324 	ch->fatalerr = 0;
2325 	/* Tell the XPT about the event */
2326 	xpt_async(AC_BUS_RESET, ch->path, NULL);
2327 	/* Disable port interrupts */
2328 	ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
2329 	/* Reset and reconnect PHY, */
2330 	if (!ahci_sata_phy_reset(ch)) {
2331 		if (bootverbose)
2332 			device_printf(ch->dev,
2333 			    "AHCI reset: device not found\n");
2334 		ch->devices = 0;
2335 		/* Enable wanted port interrupts */
2336 		ATA_OUTL(ch->r_mem, AHCI_P_IE,
2337 		    (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
2338 		     AHCI_P_IX_PRC | AHCI_P_IX_PC));
2339 		xpt_release_simq(ch->sim, TRUE);
2340 		return;
2341 	}
2342 	if (bootverbose)
2343 		device_printf(ch->dev, "AHCI reset: device found\n");
2344 	/* Wait for clearing busy status. */
2345 	if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) {
2346 		if (dumping)
2347 			ahci_clo(ch);
2348 		else
2349 			ch->resetting = 310;
2350 	}
2351 	ch->devices = 1;
2352 	/* Enable wanted port interrupts */
2353 	ATA_OUTL(ch->r_mem, AHCI_P_IE,
2354 	     (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
2355 	      AHCI_P_IX_TFE | AHCI_P_IX_HBF |
2356 	      AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF |
2357 	      ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC |
2358 	      AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) |
2359 	      AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR)));
2360 	if (ch->resetting)
2361 		callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch);
2362 	else {
2363 		ahci_start(ch, 1);
2364 		xpt_release_simq(ch->sim, TRUE);
2365 	}
2366 }
2367 
2368 static int
2369 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag)
2370 {
2371 	u_int8_t *fis = &ctp->cfis[0];
2372 
2373 	bzero(fis, 20);
2374 	fis[0] = 0x27;  		/* host to device */
2375 	fis[1] = (ccb->ccb_h.target_id & 0x0f);
2376 	if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2377 		fis[1] |= 0x80;
2378 		fis[2] = ATA_PACKET_CMD;
2379 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
2380 		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
2381 			fis[3] = ATA_F_DMA;
2382 		else {
2383 			fis[5] = ccb->csio.dxfer_len;
2384 		        fis[6] = ccb->csio.dxfer_len >> 8;
2385 		}
2386 		fis[7] = ATA_D_LBA;
2387 		fis[15] = ATA_A_4BIT;
2388 		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2389 		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
2390 		    ctp->acmd, ccb->csio.cdb_len);
2391 		bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len);
2392 	} else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) {
2393 		fis[1] |= 0x80;
2394 		fis[2] = ccb->ataio.cmd.command;
2395 		fis[3] = ccb->ataio.cmd.features;
2396 		fis[4] = ccb->ataio.cmd.lba_low;
2397 		fis[5] = ccb->ataio.cmd.lba_mid;
2398 		fis[6] = ccb->ataio.cmd.lba_high;
2399 		fis[7] = ccb->ataio.cmd.device;
2400 		fis[8] = ccb->ataio.cmd.lba_low_exp;
2401 		fis[9] = ccb->ataio.cmd.lba_mid_exp;
2402 		fis[10] = ccb->ataio.cmd.lba_high_exp;
2403 		fis[11] = ccb->ataio.cmd.features_exp;
2404 		if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
2405 			fis[12] = tag << 3;
2406 			fis[13] = 0;
2407 		} else {
2408 			fis[12] = ccb->ataio.cmd.sector_count;
2409 			fis[13] = ccb->ataio.cmd.sector_count_exp;
2410 		}
2411 		fis[15] = ATA_A_4BIT;
2412 	} else {
2413 		fis[15] = ccb->ataio.cmd.control;
2414 	}
2415 	return (20);
2416 }
2417 
2418 static int
2419 ahci_sata_connect(struct ahci_channel *ch)
2420 {
2421 	u_int32_t status;
2422 	int timeout, found = 0;
2423 
2424 	/* Wait up to 100ms for "connect well" */
2425 	for (timeout = 0; timeout < 1000 ; timeout++) {
2426 		status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
2427 		if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
2428 			found = 1;
2429 		if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) &&
2430 		    ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) &&
2431 		    ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE))
2432 			break;
2433 		if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) {
2434 			if (bootverbose) {
2435 				device_printf(ch->dev, "SATA offline status=%08x\n",
2436 				    status);
2437 			}
2438 			return (0);
2439 		}
2440 		if (found == 0 && timeout >= 100)
2441 			break;
2442 		DELAY(100);
2443 	}
2444 	if (timeout >= 1000 || !found) {
2445 		if (bootverbose) {
2446 			device_printf(ch->dev,
2447 			    "SATA connect timeout time=%dus status=%08x\n",
2448 			    timeout * 100, status);
2449 		}
2450 		return (0);
2451 	}
2452 	if (bootverbose) {
2453 		device_printf(ch->dev, "SATA connect time=%dus status=%08x\n",
2454 		    timeout * 100, status);
2455 	}
2456 	/* Clear SATA error register */
2457 	ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff);
2458 	return (1);
2459 }
2460 
2461 static int
2462 ahci_sata_phy_reset(struct ahci_channel *ch)
2463 {
2464 	int sata_rev;
2465 	uint32_t val;
2466 
2467 	if (ch->listening) {
2468 		val = ATA_INL(ch->r_mem, AHCI_P_CMD);
2469 		val |= AHCI_P_CMD_SUD;
2470 		ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
2471 		ch->listening = 0;
2472 	}
2473 	sata_rev = ch->user[ch->pm_present ? 15 : 0].revision;
2474 	if (sata_rev == 1)
2475 		val = ATA_SC_SPD_SPEED_GEN1;
2476 	else if (sata_rev == 2)
2477 		val = ATA_SC_SPD_SPEED_GEN2;
2478 	else if (sata_rev == 3)
2479 		val = ATA_SC_SPD_SPEED_GEN3;
2480 	else
2481 		val = 0;
2482 	ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
2483 	    ATA_SC_DET_RESET | val |
2484 	    ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER);
2485 	DELAY(1000);
2486 	ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
2487 	    ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 :
2488 	    (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER)));
2489 	if (!ahci_sata_connect(ch)) {
2490 		if (ch->caps & AHCI_CAP_SSS) {
2491 			val = ATA_INL(ch->r_mem, AHCI_P_CMD);
2492 			val &= ~AHCI_P_CMD_SUD;
2493 			ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
2494 			ch->listening = 1;
2495 		} else if (ch->pm_level > 0)
2496 			ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
2497 		return (0);
2498 	}
2499 	return (1);
2500 }
2501 
2502 static int
2503 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb)
2504 {
2505 
2506 	if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) {
2507 		ccb->ccb_h.status = CAM_TID_INVALID;
2508 		ahci_done(ch, ccb);
2509 		return (-1);
2510 	}
2511 	if (ccb->ccb_h.target_lun != 0) {
2512 		ccb->ccb_h.status = CAM_LUN_INVALID;
2513 		ahci_done(ch, ccb);
2514 		return (-1);
2515 	}
2516 	return (0);
2517 }
2518 
2519 static void
2520 ahciaction(struct cam_sim *sim, union ccb *ccb)
2521 {
2522 	struct ahci_channel *ch;
2523 
2524 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n",
2525 	    ccb->ccb_h.func_code));
2526 
2527 	ch = (struct ahci_channel *)cam_sim_softc(sim);
2528 	switch (ccb->ccb_h.func_code) {
2529 	/* Common cases first */
2530 	case XPT_ATA_IO:	/* Execute the requested I/O operation */
2531 	case XPT_SCSI_IO:
2532 		if (ahci_check_ids(ch, ccb))
2533 			return;
2534 		if (ch->devices == 0 ||
2535 		    (ch->pm_present == 0 &&
2536 		     ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) {
2537 			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2538 			break;
2539 		}
2540 		ccb->ccb_h.recovery_type = RECOVERY_NONE;
2541 		/* Check for command collision. */
2542 		if (ahci_check_collision(ch, ccb)) {
2543 			/* Freeze command. */
2544 			ch->frozen = ccb;
2545 			/* We have only one frozen slot, so freeze simq also. */
2546 			xpt_freeze_simq(ch->sim, 1);
2547 			return;
2548 		}
2549 		ahci_begin_transaction(ch, ccb);
2550 		return;
2551 	case XPT_EN_LUN:		/* Enable LUN as a target */
2552 	case XPT_TARGET_IO:		/* Execute target I/O request */
2553 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
2554 	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
2555 	case XPT_ABORT:			/* Abort the specified CCB */
2556 		/* XXX Implement */
2557 		ccb->ccb_h.status = CAM_REQ_INVALID;
2558 		break;
2559 	case XPT_SET_TRAN_SETTINGS:
2560 	{
2561 		struct	ccb_trans_settings *cts = &ccb->cts;
2562 		struct	ahci_device *d;
2563 
2564 		if (ahci_check_ids(ch, ccb))
2565 			return;
2566 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2567 			d = &ch->curr[ccb->ccb_h.target_id];
2568 		else
2569 			d = &ch->user[ccb->ccb_h.target_id];
2570 		if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
2571 			d->revision = cts->xport_specific.sata.revision;
2572 		if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE)
2573 			d->mode = cts->xport_specific.sata.mode;
2574 		if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
2575 			d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
2576 		if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS)
2577 			d->tags = min(ch->numslots, cts->xport_specific.sata.tags);
2578 		if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM)
2579 			ch->pm_present = cts->xport_specific.sata.pm_present;
2580 		if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
2581 			d->atapi = cts->xport_specific.sata.atapi;
2582 		if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
2583 			d->caps = cts->xport_specific.sata.caps;
2584 		ccb->ccb_h.status = CAM_REQ_CMP;
2585 		break;
2586 	}
2587 	case XPT_GET_TRAN_SETTINGS:
2588 	/* Get default/user set transfer settings for the target */
2589 	{
2590 		struct	ccb_trans_settings *cts = &ccb->cts;
2591 		struct  ahci_device *d;
2592 		uint32_t status;
2593 
2594 		if (ahci_check_ids(ch, ccb))
2595 			return;
2596 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2597 			d = &ch->curr[ccb->ccb_h.target_id];
2598 		else
2599 			d = &ch->user[ccb->ccb_h.target_id];
2600 		cts->protocol = PROTO_UNSPECIFIED;
2601 		cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
2602 		cts->transport = XPORT_SATA;
2603 		cts->transport_version = XPORT_VERSION_UNSPECIFIED;
2604 		cts->proto_specific.valid = 0;
2605 		cts->xport_specific.sata.valid = 0;
2606 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS &&
2607 		    (ccb->ccb_h.target_id == 15 ||
2608 		    (ccb->ccb_h.target_id == 0 && !ch->pm_present))) {
2609 			status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK;
2610 			if (status & 0x0f0) {
2611 				cts->xport_specific.sata.revision =
2612 				    (status & 0x0f0) >> 4;
2613 				cts->xport_specific.sata.valid |=
2614 				    CTS_SATA_VALID_REVISION;
2615 			}
2616 			cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D;
2617 			if (ch->pm_level) {
2618 				if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC))
2619 					cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ;
2620 				if (ch->caps2 & AHCI_CAP2_APST)
2621 					cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST;
2622 			}
2623 			if ((ch->caps & AHCI_CAP_SNCQ) &&
2624 			    (ch->quirks & AHCI_Q_NOAA) == 0)
2625 				cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA;
2626 			cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN;
2627 			cts->xport_specific.sata.caps &=
2628 			    ch->user[ccb->ccb_h.target_id].caps;
2629 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2630 		} else {
2631 			cts->xport_specific.sata.revision = d->revision;
2632 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
2633 			cts->xport_specific.sata.caps = d->caps;
2634 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2635 		}
2636 		cts->xport_specific.sata.mode = d->mode;
2637 		cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
2638 		cts->xport_specific.sata.bytecount = d->bytecount;
2639 		cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
2640 		cts->xport_specific.sata.pm_present = ch->pm_present;
2641 		cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM;
2642 		cts->xport_specific.sata.tags = d->tags;
2643 		cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS;
2644 		cts->xport_specific.sata.atapi = d->atapi;
2645 		cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
2646 		ccb->ccb_h.status = CAM_REQ_CMP;
2647 		break;
2648 	}
2649 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2650 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2651 		ahci_reset(ch);
2652 		ccb->ccb_h.status = CAM_REQ_CMP;
2653 		break;
2654 	case XPT_TERM_IO:		/* Terminate the I/O process */
2655 		/* XXX Implement */
2656 		ccb->ccb_h.status = CAM_REQ_INVALID;
2657 		break;
2658 	case XPT_PATH_INQ:		/* Path routing inquiry */
2659 	{
2660 		struct ccb_pathinq *cpi = &ccb->cpi;
2661 
2662 		cpi->version_num = 1; /* XXX??? */
2663 		cpi->hba_inquiry = PI_SDTR_ABLE;
2664 		if (ch->caps & AHCI_CAP_SNCQ)
2665 			cpi->hba_inquiry |= PI_TAG_ABLE;
2666 		if (ch->caps & AHCI_CAP_SPM)
2667 			cpi->hba_inquiry |= PI_SATAPM;
2668 		cpi->target_sprt = 0;
2669 		cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
2670 		cpi->hba_eng_cnt = 0;
2671 		if (ch->caps & AHCI_CAP_SPM)
2672 			cpi->max_target = 15;
2673 		else
2674 			cpi->max_target = 0;
2675 		cpi->max_lun = 0;
2676 		cpi->initiator_id = 0;
2677 		cpi->bus_id = cam_sim_bus(sim);
2678 		cpi->base_transfer_speed = 150000;
2679 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2680 		strncpy(cpi->hba_vid, "AHCI", HBA_IDLEN);
2681 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2682 		cpi->unit_number = cam_sim_unit(sim);
2683 		cpi->transport = XPORT_SATA;
2684 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
2685 		cpi->protocol = PROTO_ATA;
2686 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
2687 		cpi->maxio = MAXPHYS;
2688 		/* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */
2689 		if (ch->quirks & AHCI_Q_MAXIO_64K)
2690 			cpi->maxio = min(cpi->maxio, 128 * 512);
2691 		cpi->hba_vendor = ch->vendorid;
2692 		cpi->hba_device = ch->deviceid;
2693 		cpi->hba_subvendor = ch->subvendorid;
2694 		cpi->hba_subdevice = ch->subdeviceid;
2695 		cpi->ccb_h.status = CAM_REQ_CMP;
2696 		break;
2697 	}
2698 	default:
2699 		ccb->ccb_h.status = CAM_REQ_INVALID;
2700 		break;
2701 	}
2702 	ahci_done(ch, ccb);
2703 }
2704 
2705 static void
2706 ahcipoll(struct cam_sim *sim)
2707 {
2708 	struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim);
2709 	uint32_t istatus;
2710 
2711 	/* Read interrupt statuses and process if any. */
2712 	istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
2713 	if (istatus != 0)
2714 		ahci_ch_intr_main(ch, istatus);
2715 	if (ch->resetting != 0 &&
2716 	    (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) {
2717 		ch->resetpolldiv = 1000;
2718 		ahci_reset_to(ch);
2719 	}
2720 }
2721 MODULE_VERSION(ahci, 1);
2722 MODULE_DEPEND(ahci, cam, 1, 1, 1);
2723