1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/module.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/endian.h>
36 #include <sys/malloc.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/sbuf.h>
40 #include <sys/sysctl.h>
41 #include <machine/stdarg.h>
42 #include <machine/resource.h>
43 #include <machine/bus.h>
44 #include <sys/rman.h>
45 #include "ahci.h"
46
47 #include <cam/cam.h>
48 #include <cam/cam_ccb.h>
49 #include <cam/cam_sim.h>
50 #include <cam/cam_xpt_sim.h>
51 #include <cam/cam_debug.h>
52
53 /* local prototypes */
54 static void ahci_intr(void *data);
55 static void ahci_intr_one(void *data);
56 static void ahci_intr_one_edge(void *data);
57 static int ahci_ch_init(device_t dev);
58 static int ahci_ch_deinit(device_t dev);
59 static int ahci_ch_suspend(device_t dev);
60 static int ahci_ch_resume(device_t dev);
61 static void ahci_ch_pm(void *arg);
62 static void ahci_ch_intr(void *arg);
63 static void ahci_ch_intr_direct(void *arg);
64 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus);
65 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb);
66 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
67 static void ahci_execute_transaction(struct ahci_slot *slot);
68 static void ahci_timeout(void *arg);
69 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et);
70 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag);
71 static void ahci_dmainit(device_t dev);
72 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
73 static void ahci_dmafini(device_t dev);
74 static void ahci_slotsalloc(device_t dev);
75 static void ahci_slotsfree(device_t dev);
76 static void ahci_reset(struct ahci_channel *ch);
77 static void ahci_start(struct ahci_channel *ch, int fbs);
78 static void ahci_stop(struct ahci_channel *ch);
79 static void ahci_clo(struct ahci_channel *ch);
80 static void ahci_start_fr(struct ahci_channel *ch);
81 static void ahci_stop_fr(struct ahci_channel *ch);
82 static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr);
83 static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val);
84
85 static int ahci_sata_connect(struct ahci_channel *ch);
86 static int ahci_sata_phy_reset(struct ahci_channel *ch);
87 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0);
88
89 static void ahci_issue_recovery(struct ahci_channel *ch);
90 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb);
91 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb);
92
93 static void ahciaction(struct cam_sim *sim, union ccb *ccb);
94 static void ahcipoll(struct cam_sim *sim);
95
96 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers");
97
98 #define recovery_type spriv_field0
99 #define RECOVERY_NONE 0
100 #define RECOVERY_READ_LOG 1
101 #define RECOVERY_REQUEST_SENSE 2
102 #define recovery_slot spriv_field1
103
104 static uint32_t
ahci_ch_detval(struct ahci_channel * ch,uint32_t val)105 ahci_ch_detval(struct ahci_channel *ch, uint32_t val)
106 {
107
108 return ch->disablephy ? ATA_SC_DET_DISABLE : val;
109 }
110
111 int
ahci_ctlr_setup(device_t dev)112 ahci_ctlr_setup(device_t dev)
113 {
114 struct ahci_controller *ctlr = device_get_softc(dev);
115 /* Clear interrupts */
116 ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS));
117 /* Configure CCC */
118 if (ctlr->ccc) {
119 ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI));
120 ATA_OUTL(ctlr->r_mem, AHCI_CCCC,
121 (ctlr->ccc << AHCI_CCCC_TV_SHIFT) |
122 (4 << AHCI_CCCC_CC_SHIFT) |
123 AHCI_CCCC_EN);
124 ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) &
125 AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT;
126 if (bootverbose) {
127 device_printf(dev,
128 "CCC with %dms/4cmd enabled on vector %d\n",
129 ctlr->ccc, ctlr->cccv);
130 }
131 }
132 /* Enable AHCI interrupts */
133 ATA_OUTL(ctlr->r_mem, AHCI_GHC,
134 ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE);
135 return (0);
136 }
137
138 int
ahci_ctlr_reset(device_t dev)139 ahci_ctlr_reset(device_t dev)
140 {
141 struct ahci_controller *ctlr = device_get_softc(dev);
142 uint32_t v;
143 int timeout;
144
145 /* BIOS/OS Handoff */
146 if ((ATA_INL(ctlr->r_mem, AHCI_VS) >= 0x00010200) &&
147 (ATA_INL(ctlr->r_mem, AHCI_CAP2) & AHCI_CAP2_BOH) &&
148 ((v = ATA_INL(ctlr->r_mem, AHCI_BOHC)) & AHCI_BOHC_OOS) == 0) {
149 /* Request OS ownership. */
150 ATA_OUTL(ctlr->r_mem, AHCI_BOHC, v | AHCI_BOHC_OOS);
151
152 /* Wait up to 2s for BIOS ownership release. */
153 for (timeout = 0; timeout < 80; timeout++) {
154 DELAY(25000);
155 v = ATA_INL(ctlr->r_mem, AHCI_BOHC);
156 if ((v & AHCI_BOHC_BOS) == 0)
157 break;
158 if ((v & AHCI_BOHC_BB) == 0)
159 break;
160 }
161 }
162
163 /* Enable AHCI mode */
164 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
165 /* Reset AHCI controller */
166 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR);
167 for (timeout = 1000; timeout > 0; timeout--) {
168 DELAY(1000);
169 if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0)
170 break;
171 }
172 if (timeout == 0) {
173 device_printf(dev, "AHCI controller reset failure\n");
174 return (ENXIO);
175 }
176 /* Reenable AHCI mode */
177 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
178
179 if (ctlr->quirks & AHCI_Q_RESTORE_CAP) {
180 /*
181 * Restore capability field.
182 * This is write to a read-only register to restore its state.
183 * On fully standard-compliant hardware this is not needed and
184 * this operation shall not take place. See ahci_pci.c for
185 * platforms using this quirk.
186 */
187 ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps);
188 }
189
190 return (0);
191 }
192
193 int
ahci_attach(device_t dev)194 ahci_attach(device_t dev)
195 {
196 struct ahci_controller *ctlr = device_get_softc(dev);
197 int error, i, speed, unit;
198 uint32_t u, version;
199 device_t child;
200
201 ctlr->dev = dev;
202 ctlr->ccc = 0;
203 resource_int_value(device_get_name(dev),
204 device_get_unit(dev), "ccc", &ctlr->ccc);
205 mtx_init(&ctlr->ch_mtx, "AHCI channels lock", NULL, MTX_DEF);
206
207 /* Setup our own memory management for channels. */
208 ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem);
209 ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem);
210 ctlr->sc_iomem.rm_type = RMAN_ARRAY;
211 ctlr->sc_iomem.rm_descr = "I/O memory addresses";
212 if ((error = rman_init(&ctlr->sc_iomem)) != 0) {
213 ahci_free_mem(dev);
214 return (error);
215 }
216 if ((error = rman_manage_region(&ctlr->sc_iomem,
217 rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) {
218 ahci_free_mem(dev);
219 rman_fini(&ctlr->sc_iomem);
220 return (error);
221 }
222 /* Get the HW capabilities */
223 version = ATA_INL(ctlr->r_mem, AHCI_VS);
224 ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP);
225 if (version >= 0x00010200)
226 ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2);
227 if (ctlr->caps & AHCI_CAP_EMS)
228 ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL);
229
230 if (ctlr->quirks & AHCI_Q_FORCE_PI) {
231 /*
232 * Enable ports.
233 * The spec says that BIOS sets up bits corresponding to
234 * available ports. On platforms where this information
235 * is missing, the driver can define available ports on its own.
236 */
237 int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1;
238 int nmask = (1 << nports) - 1;
239
240 ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask);
241 device_printf(dev, "Forcing PI to %d ports (mask = %x)\n",
242 nports, nmask);
243 }
244
245 ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI);
246
247 /* Identify and set separate quirks for HBA and RAID f/w Marvells. */
248 if ((ctlr->quirks & AHCI_Q_ALTSIG) &&
249 (ctlr->caps & AHCI_CAP_SPM) == 0)
250 ctlr->quirks |= AHCI_Q_NOBSYRES;
251
252 if (ctlr->quirks & AHCI_Q_1CH) {
253 ctlr->caps &= ~AHCI_CAP_NPMASK;
254 ctlr->ichannels &= 0x01;
255 }
256 if (ctlr->quirks & AHCI_Q_2CH) {
257 ctlr->caps &= ~AHCI_CAP_NPMASK;
258 ctlr->caps |= 1;
259 ctlr->ichannels &= 0x03;
260 }
261 if (ctlr->quirks & AHCI_Q_4CH) {
262 ctlr->caps &= ~AHCI_CAP_NPMASK;
263 ctlr->caps |= 3;
264 ctlr->ichannels &= 0x0f;
265 }
266 ctlr->channels = MAX(flsl(ctlr->ichannels),
267 (ctlr->caps & AHCI_CAP_NPMASK) + 1);
268 if (ctlr->quirks & AHCI_Q_NOPMP)
269 ctlr->caps &= ~AHCI_CAP_SPM;
270 if (ctlr->quirks & AHCI_Q_NONCQ)
271 ctlr->caps &= ~AHCI_CAP_SNCQ;
272 if ((ctlr->caps & AHCI_CAP_CCCS) == 0)
273 ctlr->ccc = 0;
274 ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC);
275
276 /* Create controller-wide DMA tag. */
277 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
278 (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR :
279 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
280 BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE,
281 ctlr->dma_coherent ? BUS_DMA_COHERENT : 0, NULL, NULL,
282 &ctlr->dma_tag)) {
283 ahci_free_mem(dev);
284 rman_fini(&ctlr->sc_iomem);
285 return (ENXIO);
286 }
287
288 ahci_ctlr_setup(dev);
289
290 /* Setup interrupts. */
291 if ((error = ahci_setup_interrupt(dev)) != 0) {
292 bus_dma_tag_destroy(ctlr->dma_tag);
293 ahci_free_mem(dev);
294 rman_fini(&ctlr->sc_iomem);
295 return (error);
296 }
297
298 i = 0;
299 for (u = ctlr->ichannels; u != 0; u >>= 1)
300 i += (u & 1);
301 ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3));
302 resource_int_value(device_get_name(dev), device_get_unit(dev),
303 "direct", &ctlr->direct);
304 /* Announce HW capabilities. */
305 speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT;
306 device_printf(dev,
307 "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n",
308 ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f),
309 ((version >> 4) & 0xf0) + (version & 0x0f),
310 (ctlr->caps & AHCI_CAP_NPMASK) + 1,
311 ((speed == 1) ? "1.5":((speed == 2) ? "3":
312 ((speed == 3) ? "6":"?"))),
313 (ctlr->caps & AHCI_CAP_SPM) ?
314 "supported" : "not supported",
315 (ctlr->caps & AHCI_CAP_FBSS) ?
316 " with FBS" : "");
317 if (ctlr->quirks != 0) {
318 device_printf(dev, "quirks=0x%b\n", ctlr->quirks,
319 AHCI_Q_BIT_STRING);
320 }
321 if (bootverbose) {
322 device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps",
323 (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"",
324 (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"",
325 (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"",
326 (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"",
327 (ctlr->caps & AHCI_CAP_SSS) ? " SS":"",
328 (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"",
329 (ctlr->caps & AHCI_CAP_SAL) ? " AL":"",
330 (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"",
331 ((speed == 1) ? "1.5":((speed == 2) ? "3":
332 ((speed == 3) ? "6":"?"))));
333 printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n",
334 (ctlr->caps & AHCI_CAP_SAM) ? " AM":"",
335 (ctlr->caps & AHCI_CAP_SPM) ? " PM":"",
336 (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"",
337 (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"",
338 (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"",
339 (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"",
340 ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1,
341 (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"",
342 (ctlr->caps & AHCI_CAP_EMS) ? " EM":"",
343 (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"",
344 (ctlr->caps & AHCI_CAP_NPMASK) + 1);
345 }
346 if (bootverbose && version >= 0x00010200) {
347 device_printf(dev, "Caps2:%s%s%s%s%s%s\n",
348 (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"",
349 (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"",
350 (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"",
351 (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"",
352 (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"",
353 (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":"");
354 }
355 /* Attach all channels on this controller */
356 for (unit = 0; unit < ctlr->channels; unit++) {
357 child = device_add_child(dev, "ahcich", DEVICE_UNIT_ANY);
358 if (child == NULL) {
359 device_printf(dev, "failed to add channel device\n");
360 continue;
361 }
362 device_set_ivars(child, (void *)(intptr_t)unit);
363 if ((ctlr->ichannels & (1 << unit)) == 0)
364 device_disable(child);
365 }
366 /* Attach any remapped NVME device */
367 for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) {
368 child = device_add_child(dev, "nvme", DEVICE_UNIT_ANY);
369 if (child == NULL) {
370 device_printf(dev, "failed to add remapped NVMe device");
371 continue;
372 }
373 device_set_ivars(child, (void *)(intptr_t)(unit | AHCI_REMAPPED_UNIT));
374 }
375
376 int em = (ctlr->caps & AHCI_CAP_EMS) != 0;
377 resource_int_value(device_get_name(dev), device_get_unit(dev),
378 "em", &em);
379 if (em) {
380 child = device_add_child(dev, "ahciem", DEVICE_UNIT_ANY);
381 if (child == NULL)
382 device_printf(dev, "failed to add enclosure device\n");
383 else
384 device_set_ivars(child, (void *)(intptr_t)AHCI_EM_UNIT);
385 }
386 bus_attach_children(dev);
387 return (0);
388 }
389
390 int
ahci_detach(device_t dev)391 ahci_detach(device_t dev)
392 {
393 struct ahci_controller *ctlr = device_get_softc(dev);
394 int error, i;
395
396 /* Detach & delete all children */
397 error = bus_generic_detach(dev);
398 if (error != 0)
399 return (error);
400
401 /* Free interrupts. */
402 for (i = 0; i < ctlr->numirqs; i++) {
403 if (ctlr->irqs[i].r_irq) {
404 bus_teardown_intr(dev, ctlr->irqs[i].r_irq,
405 ctlr->irqs[i].handle);
406 bus_release_resource(dev, SYS_RES_IRQ,
407 ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq);
408 }
409 }
410 bus_dma_tag_destroy(ctlr->dma_tag);
411 /* Free memory. */
412 rman_fini(&ctlr->sc_iomem);
413 ahci_free_mem(dev);
414 mtx_destroy(&ctlr->ch_mtx);
415 return (0);
416 }
417
418 void
ahci_free_mem(device_t dev)419 ahci_free_mem(device_t dev)
420 {
421 struct ahci_controller *ctlr = device_get_softc(dev);
422
423 /* Release memory resources */
424 if (ctlr->r_mem)
425 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
426 if (ctlr->r_msix_table)
427 bus_release_resource(dev, SYS_RES_MEMORY,
428 ctlr->r_msix_tab_rid, ctlr->r_msix_table);
429 if (ctlr->r_msix_pba)
430 bus_release_resource(dev, SYS_RES_MEMORY,
431 ctlr->r_msix_pba_rid, ctlr->r_msix_pba);
432
433 ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL;
434 }
435
436 int
ahci_setup_interrupt(device_t dev)437 ahci_setup_interrupt(device_t dev)
438 {
439 struct ahci_controller *ctlr = device_get_softc(dev);
440 int i;
441
442 /* Check for single MSI vector fallback. */
443 if (ctlr->numirqs > 1 &&
444 (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) {
445 device_printf(dev, "Falling back to one MSI\n");
446 ctlr->numirqs = 1;
447 }
448
449 /* Ensure we don't overrun irqs. */
450 if (ctlr->numirqs > AHCI_MAX_IRQS) {
451 device_printf(dev, "Too many irqs %d > %d (clamping)\n",
452 ctlr->numirqs, AHCI_MAX_IRQS);
453 ctlr->numirqs = AHCI_MAX_IRQS;
454 }
455
456 /* Allocate all IRQs. */
457 for (i = 0; i < ctlr->numirqs; i++) {
458 ctlr->irqs[i].ctlr = ctlr;
459 ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0);
460 if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi)
461 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
462 else if (ctlr->numirqs == 1 || i >= ctlr->channels ||
463 (ctlr->ccc && i == ctlr->cccv))
464 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL;
465 else if (ctlr->channels > ctlr->numirqs &&
466 i == ctlr->numirqs - 1)
467 ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER;
468 else
469 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
470 if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
471 &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) {
472 device_printf(dev, "unable to map interrupt\n");
473 return (ENXIO);
474 }
475 if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL,
476 (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr :
477 ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge :
478 ahci_intr_one),
479 &ctlr->irqs[i], &ctlr->irqs[i].handle))) {
480 /* SOS XXX release r_irq */
481 device_printf(dev, "unable to setup interrupt\n");
482 return (ENXIO);
483 }
484 if (ctlr->numirqs > 1) {
485 bus_describe_intr(dev, ctlr->irqs[i].r_irq,
486 ctlr->irqs[i].handle,
487 ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ?
488 "ch%d" : "%d", i);
489 }
490 }
491 return (0);
492 }
493
494 /*
495 * Common case interrupt handler.
496 */
497 static void
ahci_intr(void * data)498 ahci_intr(void *data)
499 {
500 struct ahci_controller_irq *irq = data;
501 struct ahci_controller *ctlr = irq->ctlr;
502 u_int32_t is, ise = 0;
503 void *arg;
504 int unit;
505
506 if (irq->mode == AHCI_IRQ_MODE_ALL) {
507 unit = 0;
508 if (ctlr->ccc)
509 is = ctlr->ichannels;
510 else
511 is = ATA_INL(ctlr->r_mem, AHCI_IS);
512 } else { /* AHCI_IRQ_MODE_AFTER */
513 unit = irq->r_irq_rid - 1;
514 is = ATA_INL(ctlr->r_mem, AHCI_IS);
515 is &= (0xffffffff << unit);
516 }
517 /* CCC interrupt is edge triggered. */
518 if (ctlr->ccc)
519 ise = 1 << ctlr->cccv;
520 /* Some controllers have edge triggered IS. */
521 if (ctlr->quirks & AHCI_Q_EDGEIS)
522 ise |= is;
523 if (ise != 0)
524 ATA_OUTL(ctlr->r_mem, AHCI_IS, ise);
525 for (; unit < ctlr->channels; unit++) {
526 if ((is & (1 << unit)) != 0 &&
527 (arg = ctlr->interrupt[unit].argument)) {
528 ctlr->interrupt[unit].function(arg);
529 }
530 }
531 for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) {
532 if ((arg = ctlr->interrupt[unit].argument)) {
533 ctlr->interrupt[unit].function(arg);
534 }
535 }
536
537 /* AHCI declares level triggered IS. */
538 if (!(ctlr->quirks & AHCI_Q_EDGEIS))
539 ATA_OUTL(ctlr->r_mem, AHCI_IS, is);
540 ATA_RBL(ctlr->r_mem, AHCI_IS);
541 }
542
543 /*
544 * Simplified interrupt handler for multivector MSI mode.
545 */
546 static void
ahci_intr_one(void * data)547 ahci_intr_one(void *data)
548 {
549 struct ahci_controller_irq *irq = data;
550 struct ahci_controller *ctlr = irq->ctlr;
551 void *arg;
552 int unit;
553
554 unit = irq->r_irq_rid - 1;
555 if ((arg = ctlr->interrupt[unit].argument))
556 ctlr->interrupt[unit].function(arg);
557 /* AHCI declares level triggered IS. */
558 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
559 ATA_RBL(ctlr->r_mem, AHCI_IS);
560 }
561
562 static void
ahci_intr_one_edge(void * data)563 ahci_intr_one_edge(void *data)
564 {
565 struct ahci_controller_irq *irq = data;
566 struct ahci_controller *ctlr = irq->ctlr;
567 void *arg;
568 int unit;
569
570 unit = irq->r_irq_rid - 1;
571 /* Some controllers have edge triggered IS. */
572 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
573 if ((arg = ctlr->interrupt[unit].argument))
574 ctlr->interrupt[unit].function(arg);
575 ATA_RBL(ctlr->r_mem, AHCI_IS);
576 }
577
578 struct resource *
ahci_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)579 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid,
580 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
581 {
582 struct ahci_controller *ctlr = device_get_softc(dev);
583 struct resource *res;
584 rman_res_t st;
585 int offset, size, unit;
586 bool is_em, is_remapped;
587
588 unit = (intptr_t)device_get_ivars(child);
589 is_em = is_remapped = false;
590 if (unit & AHCI_REMAPPED_UNIT) {
591 unit &= AHCI_UNIT;
592 unit -= ctlr->channels;
593 is_remapped = true;
594 } else if (unit & AHCI_EM_UNIT) {
595 unit &= AHCI_UNIT;
596 is_em = true;
597 }
598 res = NULL;
599 switch (type) {
600 case SYS_RES_MEMORY:
601 if (is_remapped) {
602 offset = ctlr->remap_offset + unit * ctlr->remap_size;
603 size = ctlr->remap_size;
604 } else if (!is_em) {
605 offset = AHCI_OFFSET + (unit << 7);
606 size = 128;
607 } else if ((ctlr->caps & AHCI_CAP_EMS) == 0) {
608 break;
609 } else if (*rid == 0) {
610 offset = AHCI_EM_CTL;
611 size = 4;
612 } else {
613 offset = (ctlr->emloc & 0xffff0000) >> 14;
614 size = (ctlr->emloc & 0x0000ffff) << 2;
615 if (*rid != 1) {
616 if (*rid == 2 && (ctlr->capsem &
617 (AHCI_EM_XMT | AHCI_EM_SMB)) == 0)
618 offset += size;
619 else
620 break;
621 }
622 }
623 st = rman_get_start(ctlr->r_mem);
624 res = rman_reserve_resource(&ctlr->sc_iomem, st + offset,
625 st + offset + size - 1, size, RF_ACTIVE, child);
626 if (res) {
627 bus_space_handle_t bsh;
628 bus_space_tag_t bst;
629 bsh = rman_get_bushandle(ctlr->r_mem);
630 bst = rman_get_bustag(ctlr->r_mem);
631 bus_space_subregion(bst, bsh, offset, 128, &bsh);
632 rman_set_bushandle(res, bsh);
633 rman_set_bustag(res, bst);
634 }
635 break;
636 case SYS_RES_IRQ:
637 if (*rid == ATA_IRQ_RID)
638 res = ctlr->irqs[0].r_irq;
639 break;
640 }
641 return (res);
642 }
643
644 int
ahci_release_resource(device_t dev,device_t child,struct resource * r)645 ahci_release_resource(device_t dev, device_t child, struct resource *r)
646 {
647
648 switch (rman_get_type(r)) {
649 case SYS_RES_MEMORY:
650 rman_release_resource(r);
651 return (0);
652 case SYS_RES_IRQ:
653 if (rman_get_rid(r) != ATA_IRQ_RID)
654 return (ENOENT);
655 return (0);
656 }
657 return (EINVAL);
658 }
659
660 int
ahci_setup_intr(device_t dev,device_t child,struct resource * irq,int flags,driver_filter_t * filter,driver_intr_t * function,void * argument,void ** cookiep)661 ahci_setup_intr(device_t dev, device_t child, struct resource *irq,
662 int flags, driver_filter_t *filter, driver_intr_t *function,
663 void *argument, void **cookiep)
664 {
665 struct ahci_controller *ctlr = device_get_softc(dev);
666 int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT;
667
668 if (filter != NULL) {
669 printf("ahci.c: we cannot use a filter here\n");
670 return (EINVAL);
671 }
672 ctlr->interrupt[unit].function = function;
673 ctlr->interrupt[unit].argument = argument;
674 return (0);
675 }
676
677 int
ahci_teardown_intr(device_t dev,device_t child,struct resource * irq,void * cookie)678 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq,
679 void *cookie)
680 {
681 struct ahci_controller *ctlr = device_get_softc(dev);
682 int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT;
683
684 ctlr->interrupt[unit].function = NULL;
685 ctlr->interrupt[unit].argument = NULL;
686 return (0);
687 }
688
689 int
ahci_print_child(device_t dev,device_t child)690 ahci_print_child(device_t dev, device_t child)
691 {
692 intptr_t ivars;
693 int retval;
694
695 retval = bus_print_child_header(dev, child);
696 ivars = (intptr_t)device_get_ivars(child);
697 if ((ivars & AHCI_EM_UNIT) == 0)
698 retval += printf(" at channel %d", (int)ivars & AHCI_UNIT);
699 retval += bus_print_child_footer(dev, child);
700 return (retval);
701 }
702
703 int
ahci_child_location(device_t dev,device_t child,struct sbuf * sb)704 ahci_child_location(device_t dev, device_t child, struct sbuf *sb)
705 {
706 intptr_t ivars;
707
708 ivars = (intptr_t)device_get_ivars(child);
709 if ((ivars & AHCI_EM_UNIT) == 0)
710 sbuf_printf(sb, "channel=%d", (int)ivars & AHCI_UNIT);
711 return (0);
712 }
713
714 bus_dma_tag_t
ahci_get_dma_tag(device_t dev,device_t child)715 ahci_get_dma_tag(device_t dev, device_t child)
716 {
717 struct ahci_controller *ctlr = device_get_softc(dev);
718
719 return (ctlr->dma_tag);
720 }
721
722 void
ahci_attached(device_t dev,struct ahci_channel * ch)723 ahci_attached(device_t dev, struct ahci_channel *ch)
724 {
725 struct ahci_controller *ctlr = device_get_softc(dev);
726
727 mtx_lock(&ctlr->ch_mtx);
728 ctlr->ch[ch->unit] = ch;
729 mtx_unlock(&ctlr->ch_mtx);
730 }
731
732 void
ahci_detached(device_t dev,struct ahci_channel * ch)733 ahci_detached(device_t dev, struct ahci_channel *ch)
734 {
735 struct ahci_controller *ctlr = device_get_softc(dev);
736
737 mtx_lock(&ctlr->ch_mtx);
738 mtx_lock(&ch->mtx);
739 ctlr->ch[ch->unit] = NULL;
740 mtx_unlock(&ch->mtx);
741 mtx_unlock(&ctlr->ch_mtx);
742 }
743
744 struct ahci_channel *
ahci_getch(device_t dev,int n)745 ahci_getch(device_t dev, int n)
746 {
747 struct ahci_controller *ctlr = device_get_softc(dev);
748 struct ahci_channel *ch;
749
750 KASSERT(n >= 0 && n < AHCI_MAX_PORTS, ("Bad channel number %d", n));
751 mtx_lock(&ctlr->ch_mtx);
752 ch = ctlr->ch[n];
753 if (ch != NULL)
754 mtx_lock(&ch->mtx);
755 mtx_unlock(&ctlr->ch_mtx);
756 return (ch);
757 }
758
759 void
ahci_putch(struct ahci_channel * ch)760 ahci_putch(struct ahci_channel *ch)
761 {
762
763 mtx_unlock(&ch->mtx);
764 }
765
766 static int
ahci_ch_probe(device_t dev)767 ahci_ch_probe(device_t dev)
768 {
769
770 device_set_desc(dev, "AHCI channel");
771 return (BUS_PROBE_DEFAULT);
772 }
773
774 static int
ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS)775 ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS)
776 {
777 struct ahci_channel *ch;
778 int error, value;
779
780 ch = arg1;
781 value = ch->disablephy;
782 error = sysctl_handle_int(oidp, &value, 0, req);
783 if (error != 0 || req->newptr == NULL || (value != 0 && value != 1))
784 return (error);
785
786 mtx_lock(&ch->mtx);
787 ch->disablephy = value;
788 if (value) {
789 ahci_ch_deinit(ch->dev);
790 } else {
791 ahci_ch_init(ch->dev);
792 ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED);
793 }
794 mtx_unlock(&ch->mtx);
795
796 return (0);
797 }
798
799 static int
ahci_ch_attach(device_t dev)800 ahci_ch_attach(device_t dev)
801 {
802 struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev));
803 struct ahci_channel *ch = device_get_softc(dev);
804 struct cam_devq *devq;
805 struct sysctl_ctx_list *ctx;
806 struct sysctl_oid *tree;
807 int rid, error, i, sata_rev = 0;
808 u_int32_t version;
809
810 ch->dev = dev;
811 ch->unit = (intptr_t)device_get_ivars(dev);
812 ch->caps = ctlr->caps;
813 ch->caps2 = ctlr->caps2;
814 ch->start = ctlr->ch_start;
815 ch->quirks = ctlr->quirks;
816 ch->vendorid = ctlr->vendorid;
817 ch->deviceid = ctlr->deviceid;
818 ch->subvendorid = ctlr->subvendorid;
819 ch->subdeviceid = ctlr->subdeviceid;
820 ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1;
821 mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF);
822 ch->pm_level = 0;
823 resource_int_value(device_get_name(dev),
824 device_get_unit(dev), "pm_level", &ch->pm_level);
825 STAILQ_INIT(&ch->doneq);
826 if (ch->pm_level > 3)
827 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0);
828 callout_init_mtx(&ch->reset_timer, &ch->mtx, 0);
829 /* JMicron external ports (0) sometimes limited */
830 if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0)
831 sata_rev = 1;
832 if (ch->quirks & AHCI_Q_SATA2)
833 sata_rev = 2;
834 resource_int_value(device_get_name(dev),
835 device_get_unit(dev), "sata_rev", &sata_rev);
836 for (i = 0; i < 16; i++) {
837 ch->user[i].revision = sata_rev;
838 ch->user[i].mode = 0;
839 ch->user[i].bytecount = 8192;
840 ch->user[i].tags = ch->numslots;
841 ch->user[i].caps = 0;
842 ch->curr[i] = ch->user[i];
843 if (ch->pm_level) {
844 ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ |
845 CTS_SATA_CAPS_H_APST |
846 CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST;
847 }
848 ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA |
849 CTS_SATA_CAPS_H_AN;
850 }
851 rid = 0;
852 if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
853 &rid, RF_ACTIVE)))
854 return (ENXIO);
855 ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD);
856 version = ATA_INL(ctlr->r_mem, AHCI_VS);
857 if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS))
858 ch->chcaps |= AHCI_P_CMD_FBSCP;
859 if (ch->caps2 & AHCI_CAP2_SDS)
860 ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP);
861 if (bootverbose) {
862 device_printf(dev, "Caps:%s%s%s%s%s%s\n",
863 (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"",
864 (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"",
865 (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"",
866 (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"",
867 (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"",
868 (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":"");
869 }
870 ahci_dmainit(dev);
871 ahci_slotsalloc(dev);
872 mtx_lock(&ch->mtx);
873 ahci_ch_init(dev);
874 rid = ATA_IRQ_RID;
875 if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
876 &rid, RF_SHAREABLE | RF_ACTIVE))) {
877 device_printf(dev, "Unable to map interrupt\n");
878 error = ENXIO;
879 goto err0;
880 }
881 if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
882 ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr,
883 ch, &ch->ih))) {
884 device_printf(dev, "Unable to setup interrupt\n");
885 error = ENXIO;
886 goto err1;
887 }
888 /* Create the device queue for our SIM. */
889 devq = cam_simq_alloc(ch->numslots);
890 if (devq == NULL) {
891 device_printf(dev, "Unable to allocate simq\n");
892 error = ENOMEM;
893 goto err1;
894 }
895 /* Construct SIM entry */
896 ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch,
897 device_get_unit(dev), (struct mtx *)&ch->mtx,
898 (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots),
899 (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0,
900 devq);
901 if (ch->sim == NULL) {
902 cam_simq_free(devq);
903 device_printf(dev, "unable to allocate sim\n");
904 error = ENOMEM;
905 goto err1;
906 }
907 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
908 device_printf(dev, "unable to register xpt bus\n");
909 error = ENXIO;
910 goto err2;
911 }
912 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
913 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
914 device_printf(dev, "unable to create path\n");
915 error = ENXIO;
916 goto err3;
917 }
918 if (ch->pm_level > 3) {
919 callout_reset(&ch->pm_timer,
920 (ch->pm_level == 4) ? hz / 1000 : hz / 8,
921 ahci_ch_pm, ch);
922 }
923 mtx_unlock(&ch->mtx);
924 ahci_attached(device_get_parent(dev), ch);
925 ctx = device_get_sysctl_ctx(dev);
926 tree = device_get_sysctl_tree(dev);
927 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy",
928 CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, ch,
929 0, ahci_ch_disablephy_proc, "IU", "Disable PHY");
930 return (0);
931
932 err3:
933 xpt_bus_deregister(cam_sim_path(ch->sim));
934 err2:
935 cam_sim_free(ch->sim, /*free_devq*/TRUE);
936 err1:
937 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
938 err0:
939 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
940 mtx_unlock(&ch->mtx);
941 mtx_destroy(&ch->mtx);
942 return (error);
943 }
944
945 static int
ahci_ch_detach(device_t dev)946 ahci_ch_detach(device_t dev)
947 {
948 struct ahci_channel *ch = device_get_softc(dev);
949
950 ahci_detached(device_get_parent(dev), ch);
951 mtx_lock(&ch->mtx);
952 xpt_async(AC_LOST_DEVICE, ch->path, NULL);
953 /* Forget about reset. */
954 if (ch->resetting) {
955 ch->resetting = 0;
956 xpt_release_simq(ch->sim, TRUE);
957 }
958 xpt_free_path(ch->path);
959 xpt_bus_deregister(cam_sim_path(ch->sim));
960 cam_sim_free(ch->sim, /*free_devq*/TRUE);
961 mtx_unlock(&ch->mtx);
962
963 if (ch->pm_level > 3)
964 callout_drain(&ch->pm_timer);
965 callout_drain(&ch->reset_timer);
966 bus_teardown_intr(dev, ch->r_irq, ch->ih);
967 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
968
969 ahci_ch_deinit(dev);
970 ahci_slotsfree(dev);
971 ahci_dmafini(dev);
972
973 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
974 mtx_destroy(&ch->mtx);
975 return (0);
976 }
977
978 static int
ahci_ch_init(device_t dev)979 ahci_ch_init(device_t dev)
980 {
981 struct ahci_channel *ch = device_get_softc(dev);
982 uint64_t work;
983
984 /* Disable port interrupts */
985 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
986 /* Setup work areas */
987 work = ch->dma.work_bus + AHCI_CL_OFFSET;
988 ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff);
989 ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32);
990 work = ch->dma.rfis_bus;
991 ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff);
992 ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32);
993 /* Activate the channel and power/spin up device */
994 ATA_OUTL(ch->r_mem, AHCI_P_CMD,
995 (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD |
996 ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) |
997 ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 )));
998 ahci_start_fr(ch);
999 ahci_start(ch, 1);
1000 return (0);
1001 }
1002
1003 static int
ahci_ch_deinit(device_t dev)1004 ahci_ch_deinit(device_t dev)
1005 {
1006 struct ahci_channel *ch = device_get_softc(dev);
1007
1008 /* Disable port interrupts. */
1009 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
1010 /* Reset command register. */
1011 ahci_stop(ch);
1012 ahci_stop_fr(ch);
1013 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0);
1014 /* Allow everything, including partial and slumber modes. */
1015 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0);
1016 /* Request slumber mode transition and give some time to get there. */
1017 ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER);
1018 DELAY(100);
1019 /* Disable PHY. */
1020 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
1021 return (0);
1022 }
1023
1024 static int
ahci_ch_suspend(device_t dev)1025 ahci_ch_suspend(device_t dev)
1026 {
1027 struct ahci_channel *ch = device_get_softc(dev);
1028
1029 mtx_lock(&ch->mtx);
1030 xpt_freeze_simq(ch->sim, 1);
1031 /* Forget about reset. */
1032 if (ch->resetting) {
1033 ch->resetting = 0;
1034 callout_stop(&ch->reset_timer);
1035 xpt_release_simq(ch->sim, TRUE);
1036 }
1037 while (ch->oslots)
1038 msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100);
1039 ahci_ch_deinit(dev);
1040 mtx_unlock(&ch->mtx);
1041 return (0);
1042 }
1043
1044 static int
ahci_ch_resume(device_t dev)1045 ahci_ch_resume(device_t dev)
1046 {
1047 struct ahci_channel *ch = device_get_softc(dev);
1048
1049 mtx_lock(&ch->mtx);
1050 ahci_ch_init(dev);
1051 ahci_reset(ch);
1052 xpt_release_simq(ch->sim, TRUE);
1053 mtx_unlock(&ch->mtx);
1054 return (0);
1055 }
1056
1057 static device_method_t ahcich_methods[] = {
1058 DEVMETHOD(device_probe, ahci_ch_probe),
1059 DEVMETHOD(device_attach, ahci_ch_attach),
1060 DEVMETHOD(device_detach, ahci_ch_detach),
1061 DEVMETHOD(device_suspend, ahci_ch_suspend),
1062 DEVMETHOD(device_resume, ahci_ch_resume),
1063 DEVMETHOD_END
1064 };
1065 static driver_t ahcich_driver = {
1066 "ahcich",
1067 ahcich_methods,
1068 sizeof(struct ahci_channel)
1069 };
1070 DRIVER_MODULE(ahcich, ahci, ahcich_driver, NULL, NULL);
1071
1072 struct ahci_dc_cb_args {
1073 bus_addr_t maddr;
1074 int error;
1075 };
1076
1077 static void
ahci_dmainit(device_t dev)1078 ahci_dmainit(device_t dev)
1079 {
1080 struct ahci_channel *ch = device_get_softc(dev);
1081 struct ahci_dc_cb_args dcba;
1082 size_t rfsize;
1083 int error;
1084
1085 /* Command area. */
1086 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0,
1087 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1088 NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE,
1089 0, NULL, NULL, &ch->dma.work_tag);
1090 if (error != 0)
1091 goto error;
1092 error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
1093 BUS_DMA_ZERO, &ch->dma.work_map);
1094 if (error != 0)
1095 goto error;
1096 error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
1097 AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT);
1098 if (error != 0 || (error = dcba.error) != 0) {
1099 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
1100 goto error;
1101 }
1102 ch->dma.work_bus = dcba.maddr;
1103 /* FIS receive area. */
1104 if (ch->chcaps & AHCI_P_CMD_FBSCP)
1105 rfsize = 4096;
1106 else
1107 rfsize = 256;
1108 error = bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0,
1109 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1110 NULL, NULL, rfsize, 1, rfsize,
1111 0, NULL, NULL, &ch->dma.rfis_tag);
1112 if (error != 0)
1113 goto error;
1114 error = bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0,
1115 &ch->dma.rfis_map);
1116 if (error != 0)
1117 goto error;
1118 error = bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis,
1119 rfsize, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT);
1120 if (error != 0 || (error = dcba.error) != 0) {
1121 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
1122 goto error;
1123 }
1124 ch->dma.rfis_bus = dcba.maddr;
1125 /* Data area. */
1126 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
1127 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1128 NULL, NULL,
1129 AHCI_SG_ENTRIES * PAGE_SIZE, AHCI_SG_ENTRIES, AHCI_PRD_MAX,
1130 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag);
1131 if (error != 0)
1132 goto error;
1133 return;
1134
1135 error:
1136 device_printf(dev, "WARNING - DMA initialization failed, error %d\n",
1137 error);
1138 ahci_dmafini(dev);
1139 }
1140
1141 static void
ahci_dmasetupc_cb(void * xsc,bus_dma_segment_t * segs,int nsegs,int error)1142 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
1143 {
1144 struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc;
1145
1146 if (!(dcba->error = error))
1147 dcba->maddr = segs[0].ds_addr;
1148 }
1149
1150 static void
ahci_dmafini(device_t dev)1151 ahci_dmafini(device_t dev)
1152 {
1153 struct ahci_channel *ch = device_get_softc(dev);
1154
1155 if (ch->dma.data_tag) {
1156 bus_dma_tag_destroy(ch->dma.data_tag);
1157 ch->dma.data_tag = NULL;
1158 }
1159 if (ch->dma.rfis_bus) {
1160 bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map);
1161 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
1162 ch->dma.rfis_bus = 0;
1163 ch->dma.rfis = NULL;
1164 }
1165 if (ch->dma.work_bus) {
1166 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map);
1167 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
1168 ch->dma.work_bus = 0;
1169 ch->dma.work = NULL;
1170 }
1171 if (ch->dma.work_tag) {
1172 bus_dma_tag_destroy(ch->dma.work_tag);
1173 ch->dma.work_tag = NULL;
1174 }
1175 }
1176
1177 static void
ahci_slotsalloc(device_t dev)1178 ahci_slotsalloc(device_t dev)
1179 {
1180 struct ahci_channel *ch = device_get_softc(dev);
1181 int i;
1182
1183 /* Alloc and setup command/dma slots */
1184 bzero(ch->slot, sizeof(ch->slot));
1185 for (i = 0; i < ch->numslots; i++) {
1186 struct ahci_slot *slot = &ch->slot[i];
1187
1188 slot->ch = ch;
1189 slot->slot = i;
1190 slot->state = AHCI_SLOT_EMPTY;
1191 slot->ct_offset = AHCI_CT_OFFSET + AHCI_CT_SIZE * i;
1192 slot->ccb = NULL;
1193 callout_init_mtx(&slot->timeout, &ch->mtx, 0);
1194
1195 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map))
1196 device_printf(ch->dev, "FAILURE - create data_map\n");
1197 }
1198 }
1199
1200 static void
ahci_slotsfree(device_t dev)1201 ahci_slotsfree(device_t dev)
1202 {
1203 struct ahci_channel *ch = device_get_softc(dev);
1204 int i;
1205
1206 /* Free all dma slots */
1207 for (i = 0; i < ch->numslots; i++) {
1208 struct ahci_slot *slot = &ch->slot[i];
1209
1210 callout_drain(&slot->timeout);
1211 if (slot->dma.data_map) {
1212 bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map);
1213 slot->dma.data_map = NULL;
1214 }
1215 }
1216 }
1217
1218 static int
ahci_phy_check_events(struct ahci_channel * ch,u_int32_t serr)1219 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr)
1220 {
1221
1222 if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) ||
1223 ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) {
1224 u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
1225 union ccb *ccb;
1226
1227 if (bootverbose) {
1228 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
1229 device_printf(ch->dev, "CONNECT requested\n");
1230 else
1231 device_printf(ch->dev, "DISCONNECT requested\n");
1232 }
1233 ahci_reset(ch);
1234 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
1235 return (0);
1236 if (xpt_create_path(&ccb->ccb_h.path, NULL,
1237 cam_sim_path(ch->sim),
1238 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1239 xpt_free_ccb(ccb);
1240 return (0);
1241 }
1242 xpt_rescan(ccb);
1243 return (1);
1244 }
1245 return (0);
1246 }
1247
1248 static void
ahci_cpd_check_events(struct ahci_channel * ch)1249 ahci_cpd_check_events(struct ahci_channel *ch)
1250 {
1251 u_int32_t status;
1252 union ccb *ccb;
1253 device_t dev;
1254
1255 if (ch->pm_level == 0)
1256 return;
1257
1258 status = ATA_INL(ch->r_mem, AHCI_P_CMD);
1259 if ((status & AHCI_P_CMD_CPD) == 0)
1260 return;
1261
1262 if (bootverbose) {
1263 dev = ch->dev;
1264 if (status & AHCI_P_CMD_CPS) {
1265 device_printf(dev, "COLD CONNECT requested\n");
1266 } else
1267 device_printf(dev, "COLD DISCONNECT requested\n");
1268 }
1269 ahci_reset(ch);
1270 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
1271 return;
1272 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim),
1273 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1274 xpt_free_ccb(ccb);
1275 return;
1276 }
1277 xpt_rescan(ccb);
1278 }
1279
1280 static void
ahci_notify_events(struct ahci_channel * ch,u_int32_t status)1281 ahci_notify_events(struct ahci_channel *ch, u_int32_t status)
1282 {
1283 struct cam_path *dpath;
1284 int i;
1285
1286 if (ch->caps & AHCI_CAP_SSNTF)
1287 ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status);
1288 if (bootverbose)
1289 device_printf(ch->dev, "SNTF 0x%04x\n", status);
1290 for (i = 0; i < 16; i++) {
1291 if ((status & (1 << i)) == 0)
1292 continue;
1293 if (xpt_create_path(&dpath, NULL,
1294 xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) {
1295 xpt_async(AC_SCSI_AEN, dpath, NULL);
1296 xpt_free_path(dpath);
1297 }
1298 }
1299 }
1300
1301 static void
ahci_done(struct ahci_channel * ch,union ccb * ccb)1302 ahci_done(struct ahci_channel *ch, union ccb *ccb)
1303 {
1304
1305 mtx_assert(&ch->mtx, MA_OWNED);
1306 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 ||
1307 ch->batch == 0) {
1308 xpt_done(ccb);
1309 return;
1310 }
1311
1312 STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe);
1313 }
1314
1315 static void
ahci_ch_intr(void * arg)1316 ahci_ch_intr(void *arg)
1317 {
1318 struct ahci_channel *ch = (struct ahci_channel *)arg;
1319 uint32_t istatus;
1320
1321 /* Read interrupt statuses. */
1322 istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
1323
1324 mtx_lock(&ch->mtx);
1325 ahci_ch_intr_main(ch, istatus);
1326 mtx_unlock(&ch->mtx);
1327 }
1328
1329 static void
ahci_ch_intr_direct(void * arg)1330 ahci_ch_intr_direct(void *arg)
1331 {
1332 struct ahci_channel *ch = (struct ahci_channel *)arg;
1333 struct ccb_hdr *ccb_h;
1334 uint32_t istatus;
1335 STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq);
1336
1337 /* Read interrupt statuses. */
1338 istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
1339
1340 mtx_lock(&ch->mtx);
1341 ch->batch = 1;
1342 ahci_ch_intr_main(ch, istatus);
1343 ch->batch = 0;
1344 /*
1345 * Prevent the possibility of issues caused by processing the queue
1346 * while unlocked below by moving the contents to a local queue.
1347 */
1348 STAILQ_CONCAT(&tmp_doneq, &ch->doneq);
1349 mtx_unlock(&ch->mtx);
1350 while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) {
1351 STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe);
1352 xpt_done_direct((union ccb *)ccb_h);
1353 }
1354 }
1355
1356 static void
ahci_ch_pm(void * arg)1357 ahci_ch_pm(void *arg)
1358 {
1359 struct ahci_channel *ch = (struct ahci_channel *)arg;
1360 uint32_t work;
1361
1362 if (ch->numrslots != 0)
1363 return;
1364 work = ATA_INL(ch->r_mem, AHCI_P_CMD);
1365 if (ch->pm_level == 4)
1366 work |= AHCI_P_CMD_PARTIAL;
1367 else
1368 work |= AHCI_P_CMD_SLUMBER;
1369 ATA_OUTL(ch->r_mem, AHCI_P_CMD, work);
1370 }
1371
1372 static void
ahci_ch_intr_main(struct ahci_channel * ch,uint32_t istatus)1373 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus)
1374 {
1375 uint32_t cstatus, serr = 0, sntf = 0, ok, err;
1376 enum ahci_err_type et;
1377 int i, ccs, port, reset = 0;
1378
1379 /* Clear interrupt statuses. */
1380 ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus);
1381 /* Read command statuses. */
1382 if (ch->numtslots != 0)
1383 cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
1384 else
1385 cstatus = 0;
1386 if (ch->numrslots != ch->numtslots)
1387 cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI);
1388 /* Read SNTF in one of possible ways. */
1389 if ((istatus & AHCI_P_IX_SDB) &&
1390 (ch->pm_present || ch->curr[0].atapi != 0)) {
1391 if (ch->caps & AHCI_CAP_SSNTF)
1392 sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF);
1393 else if (ch->fbs_enabled) {
1394 u_int8_t *fis = ch->dma.rfis + 0x58;
1395
1396 for (i = 0; i < 16; i++) {
1397 if (fis[1] & 0x80) {
1398 fis[1] &= 0x7f;
1399 sntf |= 1 << i;
1400 }
1401 fis += 256;
1402 }
1403 } else {
1404 u_int8_t *fis = ch->dma.rfis + 0x58;
1405
1406 if (fis[1] & 0x80)
1407 sntf = (1 << (fis[1] & 0x0f));
1408 }
1409 }
1410 /* Process PHY events */
1411 if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF |
1412 AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
1413 serr = ATA_INL(ch->r_mem, AHCI_P_SERR);
1414 if (serr) {
1415 ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr);
1416 reset = ahci_phy_check_events(ch, serr);
1417 }
1418 }
1419 /* Process cold presence detection events */
1420 if ((istatus & AHCI_P_IX_CPD) && !reset)
1421 ahci_cpd_check_events(ch);
1422 /* Process command errors */
1423 if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF |
1424 AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
1425 if (ch->quirks & AHCI_Q_NOCCS) {
1426 /*
1427 * ASMedia chips sometimes report failed commands as
1428 * completed. Count all running commands as failed.
1429 */
1430 cstatus |= ch->rslots;
1431
1432 /* They also report wrong CCS, so try to guess one. */
1433 ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1;
1434 } else {
1435 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) &
1436 AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT;
1437 }
1438 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n",
1439 // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD),
1440 // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs);
1441 port = -1;
1442 if (ch->fbs_enabled) {
1443 uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS);
1444 if (fbs & AHCI_P_FBS_SDE) {
1445 port = (fbs & AHCI_P_FBS_DWE)
1446 >> AHCI_P_FBS_DWE_SHIFT;
1447 } else {
1448 for (i = 0; i < 16; i++) {
1449 if (ch->numrslotspd[i] == 0)
1450 continue;
1451 if (port == -1)
1452 port = i;
1453 else if (port != i) {
1454 port = -2;
1455 break;
1456 }
1457 }
1458 }
1459 }
1460 err = ch->rslots & cstatus;
1461 } else {
1462 ccs = 0;
1463 err = 0;
1464 port = -1;
1465 }
1466 /* Complete all successful commands. */
1467 ok = ch->rslots & ~cstatus;
1468 for (i = 0; i < ch->numslots; i++) {
1469 if ((ok >> i) & 1)
1470 ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE);
1471 }
1472 /* On error, complete the rest of commands with error statuses. */
1473 if (err) {
1474 if (ch->frozen) {
1475 union ccb *fccb = ch->frozen;
1476 ch->frozen = NULL;
1477 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
1478 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
1479 xpt_freeze_devq(fccb->ccb_h.path, 1);
1480 fccb->ccb_h.status |= CAM_DEV_QFRZN;
1481 }
1482 ahci_done(ch, fccb);
1483 }
1484 for (i = 0; i < ch->numslots; i++) {
1485 /* XXX: requests in loading state. */
1486 if (((err >> i) & 1) == 0)
1487 continue;
1488 if (port >= 0 &&
1489 ch->slot[i].ccb->ccb_h.target_id != port)
1490 continue;
1491 if (istatus & AHCI_P_IX_TFE) {
1492 if (port != -2) {
1493 /* Task File Error */
1494 if (ch->numtslotspd[
1495 ch->slot[i].ccb->ccb_h.target_id] == 0) {
1496 /* Untagged operation. */
1497 if (i == ccs)
1498 et = AHCI_ERR_TFE;
1499 else
1500 et = AHCI_ERR_INNOCENT;
1501 } else {
1502 /* Tagged operation. */
1503 et = AHCI_ERR_NCQ;
1504 }
1505 } else {
1506 et = AHCI_ERR_TFE;
1507 ch->fatalerr = 1;
1508 }
1509 } else if (istatus & AHCI_P_IX_IF) {
1510 if (ch->numtslots == 0 && i != ccs && port != -2)
1511 et = AHCI_ERR_INNOCENT;
1512 else
1513 et = AHCI_ERR_SATA;
1514 } else
1515 et = AHCI_ERR_INVALID;
1516 ahci_end_transaction(&ch->slot[i], et);
1517 }
1518 /*
1519 * We can't reinit port if there are some other
1520 * commands active, use resume to complete them.
1521 */
1522 if (ch->rslots != 0 && !ch->recoverycmd)
1523 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC);
1524 }
1525 /* Process NOTIFY events */
1526 if (sntf)
1527 ahci_notify_events(ch, sntf);
1528 }
1529
1530 /* Must be called with channel locked. */
1531 static int
ahci_check_collision(struct ahci_channel * ch,union ccb * ccb)1532 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb)
1533 {
1534 int t = ccb->ccb_h.target_id;
1535
1536 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1537 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1538 /* Tagged command while we have no supported tag free. */
1539 if (((~ch->oslots) & (0xffffffff >> (32 -
1540 ch->curr[t].tags))) == 0)
1541 return (1);
1542 /* If we have FBS */
1543 if (ch->fbs_enabled) {
1544 /* Tagged command while untagged are active. */
1545 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0)
1546 return (1);
1547 } else {
1548 /* Tagged command while untagged are active. */
1549 if (ch->numrslots != 0 && ch->numtslots == 0)
1550 return (1);
1551 /* Tagged command while tagged to other target is active. */
1552 if (ch->numtslots != 0 &&
1553 ch->taggedtarget != ccb->ccb_h.target_id)
1554 return (1);
1555 }
1556 } else {
1557 /* If we have FBS */
1558 if (ch->fbs_enabled) {
1559 /* Untagged command while tagged are active. */
1560 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0)
1561 return (1);
1562 } else {
1563 /* Untagged command while tagged are active. */
1564 if (ch->numrslots != 0 && ch->numtslots != 0)
1565 return (1);
1566 }
1567 }
1568 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1569 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) {
1570 /* Atomic command while anything active. */
1571 if (ch->numrslots != 0)
1572 return (1);
1573 }
1574 /* We have some atomic command running. */
1575 if (ch->aslots != 0)
1576 return (1);
1577 return (0);
1578 }
1579
1580 /* Must be called with channel locked. */
1581 static void
ahci_begin_transaction(struct ahci_channel * ch,union ccb * ccb)1582 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb)
1583 {
1584 struct ahci_slot *slot;
1585 int tag, tags;
1586
1587 /* Choose empty slot. */
1588 tags = ch->numslots;
1589 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1590 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA))
1591 tags = ch->curr[ccb->ccb_h.target_id].tags;
1592 if (ch->lastslot + 1 < tags)
1593 tag = ffs(~(ch->oslots >> (ch->lastslot + 1)));
1594 else
1595 tag = 0;
1596 if (tag == 0 || tag + ch->lastslot >= tags)
1597 tag = ffs(~ch->oslots) - 1;
1598 else
1599 tag += ch->lastslot;
1600 ch->lastslot = tag;
1601 /* Occupy chosen slot. */
1602 slot = &ch->slot[tag];
1603 slot->ccb = ccb;
1604 /* Stop PM timer. */
1605 if (ch->numrslots == 0 && ch->pm_level > 3)
1606 callout_stop(&ch->pm_timer);
1607 /* Update channel stats. */
1608 ch->oslots |= (1 << tag);
1609 ch->numrslots++;
1610 ch->numrslotspd[ccb->ccb_h.target_id]++;
1611 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1612 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1613 ch->numtslots++;
1614 ch->numtslotspd[ccb->ccb_h.target_id]++;
1615 ch->taggedtarget = ccb->ccb_h.target_id;
1616 }
1617 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1618 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)))
1619 ch->aslots |= (1 << tag);
1620 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1621 slot->state = AHCI_SLOT_LOADING;
1622 bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb,
1623 ahci_dmasetprd, slot, 0);
1624 } else {
1625 slot->dma.nsegs = 0;
1626 ahci_execute_transaction(slot);
1627 }
1628 }
1629
1630 /* Locked by busdma engine. */
1631 static void
ahci_dmasetprd(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1632 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1633 {
1634 struct ahci_slot *slot = arg;
1635 struct ahci_channel *ch = slot->ch;
1636 struct ahci_cmd_tab *ctp;
1637 struct ahci_dma_prd *prd;
1638 int i;
1639
1640 if (error) {
1641 device_printf(ch->dev, "DMA load error\n");
1642 ahci_end_transaction(slot, AHCI_ERR_INVALID);
1643 return;
1644 }
1645 KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n"));
1646 /* Get a piece of the workspace for this request */
1647 ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
1648 /* Fill S/G table */
1649 prd = &ctp->prd_tab[0];
1650 for (i = 0; i < nsegs; i++) {
1651 prd[i].dba = htole64(segs[i].ds_addr);
1652 prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK);
1653 }
1654 slot->dma.nsegs = nsegs;
1655 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
1656 ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ?
1657 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1658 ahci_execute_transaction(slot);
1659 }
1660
1661 /* Must be called with channel locked. */
1662 static void
ahci_execute_transaction(struct ahci_slot * slot)1663 ahci_execute_transaction(struct ahci_slot *slot)
1664 {
1665 struct ahci_channel *ch = slot->ch;
1666 struct ahci_cmd_tab *ctp;
1667 struct ahci_cmd_list *clp;
1668 union ccb *ccb = slot->ccb;
1669 int port = ccb->ccb_h.target_id & 0x0f;
1670 int fis_size, i, softreset;
1671 uint8_t *fis = ch->dma.rfis + 0x40;
1672 uint8_t val;
1673 uint16_t cmd_flags;
1674
1675 /* Get a piece of the workspace for this request */
1676 ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
1677 /* Setup the FIS for this request */
1678 if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) {
1679 device_printf(ch->dev, "Setting up SATA FIS failed\n");
1680 ahci_end_transaction(slot, AHCI_ERR_INVALID);
1681 return;
1682 }
1683 /* Setup the command list entry */
1684 clp = (struct ahci_cmd_list *)
1685 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
1686 cmd_flags =
1687 (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) |
1688 (ccb->ccb_h.func_code == XPT_SCSI_IO ?
1689 (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) |
1690 (fis_size / sizeof(u_int32_t)) |
1691 (port << 12);
1692 clp->prd_length = htole16(slot->dma.nsegs);
1693 /* Special handling for Soft Reset command. */
1694 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1695 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) {
1696 if (ccb->ataio.cmd.control & ATA_A_RESET) {
1697 softreset = 1;
1698 /* Kick controller into sane state */
1699 ahci_stop(ch);
1700 ahci_clo(ch);
1701 ahci_start(ch, 0);
1702 cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY;
1703 } else {
1704 softreset = 2;
1705 /* Prepare FIS receive area for check. */
1706 for (i = 0; i < 20; i++)
1707 fis[i] = 0xff;
1708 }
1709 } else
1710 softreset = 0;
1711 clp->bytecount = 0;
1712 clp->cmd_flags = htole16(cmd_flags);
1713 clp->cmd_table_phys = htole64(ch->dma.work_bus + slot->ct_offset);
1714 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
1715 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1716 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
1717 BUS_DMASYNC_PREREAD);
1718 /* Set ACTIVE bit for NCQ commands. */
1719 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1720 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1721 ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot);
1722 }
1723 /* If FBS is enabled, set PMP port. */
1724 if (ch->fbs_enabled) {
1725 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN |
1726 (port << AHCI_P_FBS_DEV_SHIFT));
1727 }
1728 /* Issue command to the controller. */
1729 slot->state = AHCI_SLOT_RUNNING;
1730 ch->rslots |= (1 << slot->slot);
1731 ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot));
1732 /* Device reset commands doesn't interrupt. Poll them. */
1733 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1734 (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) {
1735 int count, timeout = ccb->ccb_h.timeout * 100;
1736 enum ahci_err_type et = AHCI_ERR_NONE;
1737
1738 for (count = 0; count < timeout; count++) {
1739 DELAY(10);
1740 if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot)))
1741 break;
1742 if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) &&
1743 softreset != 1) {
1744 #if 0
1745 device_printf(ch->dev,
1746 "Poll error on slot %d, TFD: %04x\n",
1747 slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD));
1748 #endif
1749 et = AHCI_ERR_TFE;
1750 break;
1751 }
1752 /* Workaround for ATI SB600/SB700 chipsets. */
1753 if (ccb->ccb_h.target_id == 15 &&
1754 (ch->quirks & AHCI_Q_ATI_PMP_BUG) &&
1755 (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) {
1756 et = AHCI_ERR_TIMEOUT;
1757 break;
1758 }
1759 }
1760
1761 /*
1762 * Some Marvell controllers require additional time
1763 * after soft reset to work properly. Setup delay
1764 * to 50ms after soft reset.
1765 */
1766 if (ch->quirks & AHCI_Q_MRVL_SR_DEL)
1767 DELAY(50000);
1768
1769 /*
1770 * Marvell HBAs with non-RAID firmware do not wait for
1771 * readiness after soft reset, so we have to wait here.
1772 * Marvell RAIDs do not have this problem, but instead
1773 * sometimes forget to update FIS receive area, breaking
1774 * this wait.
1775 */
1776 if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 &&
1777 (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 &&
1778 softreset == 2 && et == AHCI_ERR_NONE) {
1779 for ( ; count < timeout; count++) {
1780 bus_dmamap_sync(ch->dma.rfis_tag,
1781 ch->dma.rfis_map, BUS_DMASYNC_POSTREAD);
1782 val = fis[2];
1783 bus_dmamap_sync(ch->dma.rfis_tag,
1784 ch->dma.rfis_map, BUS_DMASYNC_PREREAD);
1785 if ((val & ATA_S_BUSY) == 0)
1786 break;
1787 DELAY(10);
1788 }
1789 }
1790
1791 if (timeout && (count >= timeout)) {
1792 device_printf(ch->dev, "Poll timeout on slot %d port %d\n",
1793 slot->slot, port);
1794 device_printf(ch->dev, "is %08x cs %08x ss %08x "
1795 "rs %08x tfd %02x serr %08x cmd %08x\n",
1796 ATA_INL(ch->r_mem, AHCI_P_IS),
1797 ATA_INL(ch->r_mem, AHCI_P_CI),
1798 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
1799 ATA_INL(ch->r_mem, AHCI_P_TFD),
1800 ATA_INL(ch->r_mem, AHCI_P_SERR),
1801 ATA_INL(ch->r_mem, AHCI_P_CMD));
1802 et = AHCI_ERR_TIMEOUT;
1803 }
1804
1805 /* Kick controller into sane state and enable FBS. */
1806 if (softreset == 2)
1807 ch->eslots |= (1 << slot->slot);
1808 ahci_end_transaction(slot, et);
1809 return;
1810 }
1811 /* Start command execution timeout */
1812 callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2,
1813 0, ahci_timeout, slot, 0);
1814 return;
1815 }
1816
1817 /* Must be called with channel locked. */
1818 static void
ahci_process_timeout(struct ahci_channel * ch)1819 ahci_process_timeout(struct ahci_channel *ch)
1820 {
1821 int i;
1822
1823 mtx_assert(&ch->mtx, MA_OWNED);
1824 /* Handle the rest of commands. */
1825 for (i = 0; i < ch->numslots; i++) {
1826 /* Do we have a running request on slot? */
1827 if (ch->slot[i].state < AHCI_SLOT_RUNNING)
1828 continue;
1829 ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT);
1830 }
1831 }
1832
1833 /* Must be called with channel locked. */
1834 static void
ahci_rearm_timeout(struct ahci_channel * ch)1835 ahci_rearm_timeout(struct ahci_channel *ch)
1836 {
1837 int i;
1838
1839 mtx_assert(&ch->mtx, MA_OWNED);
1840 for (i = 0; i < ch->numslots; i++) {
1841 struct ahci_slot *slot = &ch->slot[i];
1842
1843 /* Do we have a running request on slot? */
1844 if (slot->state < AHCI_SLOT_RUNNING)
1845 continue;
1846 if ((ch->toslots & (1 << i)) == 0)
1847 continue;
1848 callout_reset_sbt(&slot->timeout,
1849 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
1850 ahci_timeout, slot, 0);
1851 }
1852 }
1853
1854 /* Locked by callout mechanism. */
1855 static void
ahci_timeout(void * arg)1856 ahci_timeout(void *arg)
1857 {
1858 struct ahci_slot *slot = arg;
1859 struct ahci_channel *ch = slot->ch;
1860 device_t dev = ch->dev;
1861 uint32_t sstatus;
1862 int ccs;
1863 int i;
1864
1865 /* Check for stale timeout. */
1866 if (slot->state < AHCI_SLOT_RUNNING)
1867 return;
1868
1869 /* Check if slot was not being executed last time we checked. */
1870 if (slot->state < AHCI_SLOT_EXECUTING) {
1871 /* Check if slot started executing. */
1872 sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
1873 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK)
1874 >> AHCI_P_CMD_CCS_SHIFT;
1875 if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot ||
1876 ch->fbs_enabled || ch->wrongccs)
1877 slot->state = AHCI_SLOT_EXECUTING;
1878 else if ((ch->rslots & (1 << ccs)) == 0) {
1879 ch->wrongccs = 1;
1880 slot->state = AHCI_SLOT_EXECUTING;
1881 }
1882
1883 callout_reset_sbt(&slot->timeout,
1884 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
1885 ahci_timeout, slot, 0);
1886 return;
1887 }
1888
1889 device_printf(dev, "Timeout on slot %d port %d\n",
1890 slot->slot, slot->ccb->ccb_h.target_id & 0x0f);
1891 device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x "
1892 "serr %08x cmd %08x\n",
1893 ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI),
1894 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
1895 ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR),
1896 ATA_INL(ch->r_mem, AHCI_P_CMD));
1897
1898 /* Handle frozen command. */
1899 if (ch->frozen) {
1900 union ccb *fccb = ch->frozen;
1901 ch->frozen = NULL;
1902 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
1903 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
1904 xpt_freeze_devq(fccb->ccb_h.path, 1);
1905 fccb->ccb_h.status |= CAM_DEV_QFRZN;
1906 }
1907 ahci_done(ch, fccb);
1908 }
1909 if (!ch->fbs_enabled && !ch->wrongccs) {
1910 /* Without FBS we know real timeout source. */
1911 ch->fatalerr = 1;
1912 /* Handle command with timeout. */
1913 ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT);
1914 /* Handle the rest of commands. */
1915 for (i = 0; i < ch->numslots; i++) {
1916 /* Do we have a running request on slot? */
1917 if (ch->slot[i].state < AHCI_SLOT_RUNNING)
1918 continue;
1919 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
1920 }
1921 } else {
1922 /* With FBS we wait for other commands timeout and pray. */
1923 if (ch->toslots == 0)
1924 xpt_freeze_simq(ch->sim, 1);
1925 ch->toslots |= (1 << slot->slot);
1926 if ((ch->rslots & ~ch->toslots) == 0)
1927 ahci_process_timeout(ch);
1928 else
1929 device_printf(dev, " ... waiting for slots %08x\n",
1930 ch->rslots & ~ch->toslots);
1931 }
1932 }
1933
1934 /* Must be called with channel locked. */
1935 static void
ahci_end_transaction(struct ahci_slot * slot,enum ahci_err_type et)1936 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
1937 {
1938 struct ahci_channel *ch = slot->ch;
1939 union ccb *ccb = slot->ccb;
1940 struct ahci_cmd_list *clp;
1941 int lastto;
1942 uint32_t sig;
1943
1944 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
1945 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1946 clp = (struct ahci_cmd_list *)
1947 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
1948 /* Read result registers to the result struct
1949 * May be incorrect if several commands finished same time,
1950 * so read only when sure or have to.
1951 */
1952 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1953 struct ata_res *res = &ccb->ataio.res;
1954
1955 if ((et == AHCI_ERR_TFE) ||
1956 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) {
1957 u_int8_t *fis = ch->dma.rfis + 0x40;
1958
1959 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
1960 BUS_DMASYNC_POSTREAD);
1961 if (ch->fbs_enabled) {
1962 fis += ccb->ccb_h.target_id * 256;
1963 res->status = fis[2];
1964 res->error = fis[3];
1965 } else {
1966 uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD);
1967
1968 res->status = tfd;
1969 res->error = tfd >> 8;
1970 }
1971 res->lba_low = fis[4];
1972 res->lba_mid = fis[5];
1973 res->lba_high = fis[6];
1974 res->device = fis[7];
1975 res->lba_low_exp = fis[8];
1976 res->lba_mid_exp = fis[9];
1977 res->lba_high_exp = fis[10];
1978 res->sector_count = fis[12];
1979 res->sector_count_exp = fis[13];
1980
1981 /*
1982 * Some weird controllers do not return signature in
1983 * FIS receive area. Read it from PxSIG register.
1984 */
1985 if ((ch->quirks & AHCI_Q_ALTSIG) &&
1986 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1987 (ccb->ataio.cmd.control & ATA_A_RESET) == 0) {
1988 sig = ATA_INL(ch->r_mem, AHCI_P_SIG);
1989 res->lba_high = sig >> 24;
1990 res->lba_mid = sig >> 16;
1991 res->lba_low = sig >> 8;
1992 res->sector_count = sig;
1993 }
1994 } else
1995 bzero(res, sizeof(*res));
1996 if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 &&
1997 (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1998 (ch->quirks & AHCI_Q_NOCOUNT) == 0) {
1999 ccb->ataio.resid =
2000 ccb->ataio.dxfer_len - le32toh(clp->bytecount);
2001 }
2002 } else {
2003 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
2004 (ch->quirks & AHCI_Q_NOCOUNT) == 0) {
2005 ccb->csio.resid =
2006 ccb->csio.dxfer_len - le32toh(clp->bytecount);
2007 }
2008 }
2009 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2010 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
2011 (ccb->ccb_h.flags & CAM_DIR_IN) ?
2012 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2013 bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map);
2014 }
2015 if (et != AHCI_ERR_NONE)
2016 ch->eslots |= (1 << slot->slot);
2017 /* In case of error, freeze device for proper recovery. */
2018 if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) &&
2019 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
2020 xpt_freeze_devq(ccb->ccb_h.path, 1);
2021 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2022 }
2023 /* Set proper result status. */
2024 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2025 switch (et) {
2026 case AHCI_ERR_NONE:
2027 ccb->ccb_h.status |= CAM_REQ_CMP;
2028 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
2029 ccb->csio.scsi_status = SCSI_STATUS_OK;
2030 break;
2031 case AHCI_ERR_INVALID:
2032 ch->fatalerr = 1;
2033 ccb->ccb_h.status |= CAM_REQ_INVALID;
2034 break;
2035 case AHCI_ERR_INNOCENT:
2036 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2037 break;
2038 case AHCI_ERR_TFE:
2039 case AHCI_ERR_NCQ:
2040 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2041 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2042 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2043 } else {
2044 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
2045 }
2046 break;
2047 case AHCI_ERR_SATA:
2048 ch->fatalerr = 1;
2049 if (!ch->recoverycmd) {
2050 xpt_freeze_simq(ch->sim, 1);
2051 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2052 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2053 }
2054 ccb->ccb_h.status |= CAM_UNCOR_PARITY;
2055 break;
2056 case AHCI_ERR_TIMEOUT:
2057 if (!ch->recoverycmd) {
2058 xpt_freeze_simq(ch->sim, 1);
2059 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2060 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2061 }
2062 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
2063 break;
2064 default:
2065 ch->fatalerr = 1;
2066 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
2067 }
2068 /* Free slot. */
2069 ch->oslots &= ~(1 << slot->slot);
2070 ch->rslots &= ~(1 << slot->slot);
2071 ch->aslots &= ~(1 << slot->slot);
2072 slot->state = AHCI_SLOT_EMPTY;
2073 slot->ccb = NULL;
2074 /* Update channel stats. */
2075 ch->numrslots--;
2076 ch->numrslotspd[ccb->ccb_h.target_id]--;
2077 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
2078 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
2079 ch->numtslots--;
2080 ch->numtslotspd[ccb->ccb_h.target_id]--;
2081 }
2082 /* Cancel timeout state if request completed normally. */
2083 if (et != AHCI_ERR_TIMEOUT) {
2084 lastto = (ch->toslots == (1 << slot->slot));
2085 ch->toslots &= ~(1 << slot->slot);
2086 if (lastto)
2087 xpt_release_simq(ch->sim, TRUE);
2088 }
2089 /* If it was first request of reset sequence and there is no error,
2090 * proceed to second request. */
2091 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
2092 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
2093 (ccb->ataio.cmd.control & ATA_A_RESET) &&
2094 et == AHCI_ERR_NONE) {
2095 ccb->ataio.cmd.control &= ~ATA_A_RESET;
2096 ahci_begin_transaction(ch, ccb);
2097 return;
2098 }
2099 /* If it was our READ LOG command - process it. */
2100 if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) {
2101 ahci_process_read_log(ch, ccb);
2102 /* If it was our REQUEST SENSE command - process it. */
2103 } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) {
2104 ahci_process_request_sense(ch, ccb);
2105 /* If it was NCQ or ATAPI command error, put result on hold. */
2106 } else if (et == AHCI_ERR_NCQ ||
2107 ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
2108 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) {
2109 ch->hold[slot->slot] = ccb;
2110 ch->numhslots++;
2111 } else
2112 ahci_done(ch, ccb);
2113 /* If we have no other active commands, ... */
2114 if (ch->rslots == 0) {
2115 /* if there was fatal error - reset port. */
2116 if (ch->toslots != 0 || ch->fatalerr) {
2117 ahci_reset(ch);
2118 } else {
2119 /* if we have slots in error, we can reinit port. */
2120 if (ch->eslots != 0) {
2121 ahci_stop(ch);
2122 ahci_clo(ch);
2123 ahci_start(ch, 1);
2124 }
2125 /* if there commands on hold, we can do READ LOG. */
2126 if (!ch->recoverycmd && ch->numhslots)
2127 ahci_issue_recovery(ch);
2128 }
2129 /* If all the rest of commands are in timeout - give them chance. */
2130 } else if ((ch->rslots & ~ch->toslots) == 0 &&
2131 et != AHCI_ERR_TIMEOUT)
2132 ahci_rearm_timeout(ch);
2133 /* Unfreeze frozen command. */
2134 if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) {
2135 union ccb *fccb = ch->frozen;
2136 ch->frozen = NULL;
2137 ahci_begin_transaction(ch, fccb);
2138 xpt_release_simq(ch->sim, TRUE);
2139 }
2140 /* Start PM timer. */
2141 if (ch->numrslots == 0 && ch->pm_level > 3 &&
2142 (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) {
2143 callout_schedule(&ch->pm_timer,
2144 (ch->pm_level == 4) ? hz / 1000 : hz / 8);
2145 }
2146 }
2147
2148 static void
ahci_issue_recovery(struct ahci_channel * ch)2149 ahci_issue_recovery(struct ahci_channel *ch)
2150 {
2151 union ccb *ccb;
2152 struct ccb_ataio *ataio;
2153 struct ccb_scsiio *csio;
2154 int i;
2155
2156 /* Find some held command. */
2157 for (i = 0; i < ch->numslots; i++) {
2158 if (ch->hold[i])
2159 break;
2160 }
2161 ccb = xpt_alloc_ccb_nowait();
2162 if (ccb == NULL) {
2163 device_printf(ch->dev, "Unable to allocate recovery command\n");
2164 completeall:
2165 /* We can't do anything -- complete held commands. */
2166 for (i = 0; i < ch->numslots; i++) {
2167 if (ch->hold[i] == NULL)
2168 continue;
2169 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2170 ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL;
2171 ahci_done(ch, ch->hold[i]);
2172 ch->hold[i] = NULL;
2173 ch->numhslots--;
2174 }
2175 ahci_reset(ch);
2176 return;
2177 }
2178 xpt_setup_ccb(&ccb->ccb_h, ch->hold[i]->ccb_h.path,
2179 ch->hold[i]->ccb_h.pinfo.priority);
2180 if (ch->hold[i]->ccb_h.func_code == XPT_ATA_IO) {
2181 /* READ LOG */
2182 ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
2183 ccb->ccb_h.func_code = XPT_ATA_IO;
2184 ccb->ccb_h.flags = CAM_DIR_IN;
2185 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
2186 ataio = &ccb->ataio;
2187 ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT);
2188 if (ataio->data_ptr == NULL) {
2189 xpt_free_ccb(ccb);
2190 device_printf(ch->dev,
2191 "Unable to allocate memory for READ LOG command\n");
2192 goto completeall;
2193 }
2194 ataio->dxfer_len = 512;
2195 bzero(&ataio->cmd, sizeof(ataio->cmd));
2196 ataio->cmd.flags = CAM_ATAIO_48BIT;
2197 ataio->cmd.command = 0x2F; /* READ LOG EXT */
2198 ataio->cmd.sector_count = 1;
2199 ataio->cmd.sector_count_exp = 0;
2200 ataio->cmd.lba_low = 0x10;
2201 ataio->cmd.lba_mid = 0;
2202 ataio->cmd.lba_mid_exp = 0;
2203 } else {
2204 /* REQUEST SENSE */
2205 ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE;
2206 ccb->ccb_h.recovery_slot = i;
2207 ccb->ccb_h.func_code = XPT_SCSI_IO;
2208 ccb->ccb_h.flags = CAM_DIR_IN;
2209 ccb->ccb_h.status = 0;
2210 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
2211 csio = &ccb->csio;
2212 csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data;
2213 csio->dxfer_len = ch->hold[i]->csio.sense_len;
2214 csio->cdb_len = 6;
2215 bzero(&csio->cdb_io, sizeof(csio->cdb_io));
2216 csio->cdb_io.cdb_bytes[0] = 0x03;
2217 csio->cdb_io.cdb_bytes[4] = csio->dxfer_len;
2218 }
2219 /* Freeze SIM while doing recovery. */
2220 ch->recoverycmd = 1;
2221 xpt_freeze_simq(ch->sim, 1);
2222 ahci_begin_transaction(ch, ccb);
2223 }
2224
2225 static void
ahci_process_read_log(struct ahci_channel * ch,union ccb * ccb)2226 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb)
2227 {
2228 uint8_t *data;
2229 struct ata_res *res;
2230 int i;
2231
2232 ch->recoverycmd = 0;
2233
2234 data = ccb->ataio.data_ptr;
2235 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2236 (data[0] & 0x80) == 0) {
2237 for (i = 0; i < ch->numslots; i++) {
2238 if (!ch->hold[i])
2239 continue;
2240 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
2241 continue;
2242 if ((data[0] & 0x1F) == i) {
2243 res = &ch->hold[i]->ataio.res;
2244 res->status = data[2];
2245 res->error = data[3];
2246 res->lba_low = data[4];
2247 res->lba_mid = data[5];
2248 res->lba_high = data[6];
2249 res->device = data[7];
2250 res->lba_low_exp = data[8];
2251 res->lba_mid_exp = data[9];
2252 res->lba_high_exp = data[10];
2253 res->sector_count = data[12];
2254 res->sector_count_exp = data[13];
2255 } else {
2256 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2257 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ;
2258 }
2259 ahci_done(ch, ch->hold[i]);
2260 ch->hold[i] = NULL;
2261 ch->numhslots--;
2262 }
2263 } else {
2264 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
2265 device_printf(ch->dev, "Error while READ LOG EXT\n");
2266 else if ((data[0] & 0x80) == 0) {
2267 device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n");
2268 }
2269 for (i = 0; i < ch->numslots; i++) {
2270 if (!ch->hold[i])
2271 continue;
2272 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
2273 continue;
2274 ahci_done(ch, ch->hold[i]);
2275 ch->hold[i] = NULL;
2276 ch->numhslots--;
2277 }
2278 }
2279 free(ccb->ataio.data_ptr, M_AHCI);
2280 xpt_free_ccb(ccb);
2281 xpt_release_simq(ch->sim, TRUE);
2282 }
2283
2284 static void
ahci_process_request_sense(struct ahci_channel * ch,union ccb * ccb)2285 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb)
2286 {
2287 int i;
2288
2289 ch->recoverycmd = 0;
2290
2291 i = ccb->ccb_h.recovery_slot;
2292 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
2293 ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID;
2294 } else {
2295 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2296 ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2297 }
2298 ahci_done(ch, ch->hold[i]);
2299 ch->hold[i] = NULL;
2300 ch->numhslots--;
2301 xpt_free_ccb(ccb);
2302 xpt_release_simq(ch->sim, TRUE);
2303 }
2304
2305 static void
ahci_start(struct ahci_channel * ch,int fbs)2306 ahci_start(struct ahci_channel *ch, int fbs)
2307 {
2308 u_int32_t cmd;
2309
2310 /* Run the channel start callback, if any. */
2311 if (ch->start)
2312 ch->start(ch);
2313
2314 /* Clear SATA error register */
2315 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF);
2316 /* Clear any interrupts pending on this channel */
2317 ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF);
2318 /* Configure FIS-based switching if supported. */
2319 if (ch->chcaps & AHCI_P_CMD_FBSCP) {
2320 ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0;
2321 ATA_OUTL(ch->r_mem, AHCI_P_FBS,
2322 ch->fbs_enabled ? AHCI_P_FBS_EN : 0);
2323 }
2324 /* Start operations on this channel */
2325 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2326 cmd &= ~AHCI_P_CMD_PMA;
2327 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST |
2328 (ch->pm_present ? AHCI_P_CMD_PMA : 0));
2329 }
2330
2331 static void
ahci_stop(struct ahci_channel * ch)2332 ahci_stop(struct ahci_channel *ch)
2333 {
2334 u_int32_t cmd;
2335 int timeout;
2336
2337 /* Kill all activity on this channel */
2338 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2339 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST);
2340 /* Wait for activity stop. */
2341 timeout = 0;
2342 do {
2343 DELAY(10);
2344 if (timeout++ > 50000) {
2345 device_printf(ch->dev, "stopping AHCI engine failed\n");
2346 break;
2347 }
2348 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR);
2349 ch->eslots = 0;
2350 }
2351
2352 static void
ahci_clo(struct ahci_channel * ch)2353 ahci_clo(struct ahci_channel *ch)
2354 {
2355 u_int32_t cmd;
2356 int timeout;
2357
2358 /* Issue Command List Override if supported */
2359 if (ch->caps & AHCI_CAP_SCLO) {
2360 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2361 cmd |= AHCI_P_CMD_CLO;
2362 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd);
2363 timeout = 0;
2364 do {
2365 DELAY(10);
2366 if (timeout++ > 50000) {
2367 device_printf(ch->dev, "executing CLO failed\n");
2368 break;
2369 }
2370 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO);
2371 }
2372 }
2373
2374 static void
ahci_stop_fr(struct ahci_channel * ch)2375 ahci_stop_fr(struct ahci_channel *ch)
2376 {
2377 u_int32_t cmd;
2378 int timeout;
2379
2380 /* Kill all FIS reception on this channel */
2381 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2382 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE);
2383 /* Wait for FIS reception stop. */
2384 timeout = 0;
2385 do {
2386 DELAY(10);
2387 if (timeout++ > 50000) {
2388 device_printf(ch->dev, "stopping AHCI FR engine failed\n");
2389 break;
2390 }
2391 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR);
2392 }
2393
2394 static void
ahci_start_fr(struct ahci_channel * ch)2395 ahci_start_fr(struct ahci_channel *ch)
2396 {
2397 u_int32_t cmd;
2398
2399 /* Start FIS reception on this channel */
2400 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2401 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE);
2402 }
2403
2404 static int
ahci_wait_ready(struct ahci_channel * ch,int t,int t0)2405 ahci_wait_ready(struct ahci_channel *ch, int t, int t0)
2406 {
2407 int timeout = 0;
2408 uint32_t val;
2409
2410 while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) &
2411 (ATA_S_BUSY | ATA_S_DRQ)) {
2412 if (timeout > t) {
2413 if (t != 0) {
2414 device_printf(ch->dev,
2415 "AHCI reset: device not ready after %dms "
2416 "(tfd = %08x)\n",
2417 MAX(t, 0) + t0, val);
2418 }
2419 return (EBUSY);
2420 }
2421 DELAY(1000);
2422 timeout++;
2423 }
2424 if (bootverbose)
2425 device_printf(ch->dev, "AHCI reset: device ready after %dms\n",
2426 timeout + t0);
2427 return (0);
2428 }
2429
2430 static void
ahci_reset_to(void * arg)2431 ahci_reset_to(void *arg)
2432 {
2433 struct ahci_channel *ch = arg;
2434
2435 if (ch->resetting == 0)
2436 return;
2437 ch->resetting--;
2438 if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0,
2439 (310 - ch->resetting) * 100) == 0) {
2440 ch->resetting = 0;
2441 ahci_start(ch, 1);
2442 xpt_release_simq(ch->sim, TRUE);
2443 return;
2444 }
2445 if (ch->resetting == 0) {
2446 ahci_clo(ch);
2447 ahci_start(ch, 1);
2448 xpt_release_simq(ch->sim, TRUE);
2449 return;
2450 }
2451 callout_schedule(&ch->reset_timer, hz / 10);
2452 }
2453
2454 static void
ahci_reset(struct ahci_channel * ch)2455 ahci_reset(struct ahci_channel *ch)
2456 {
2457 struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev));
2458 int i;
2459
2460 xpt_freeze_simq(ch->sim, 1);
2461 if (bootverbose)
2462 device_printf(ch->dev, "AHCI reset...\n");
2463 /* Forget about previous reset. */
2464 if (ch->resetting) {
2465 ch->resetting = 0;
2466 callout_stop(&ch->reset_timer);
2467 xpt_release_simq(ch->sim, TRUE);
2468 }
2469 /* Requeue freezed command. */
2470 if (ch->frozen) {
2471 union ccb *fccb = ch->frozen;
2472 ch->frozen = NULL;
2473 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
2474 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
2475 xpt_freeze_devq(fccb->ccb_h.path, 1);
2476 fccb->ccb_h.status |= CAM_DEV_QFRZN;
2477 }
2478 ahci_done(ch, fccb);
2479 }
2480 /* Kill the engine and requeue all running commands. */
2481 ahci_stop(ch);
2482 for (i = 0; i < ch->numslots; i++) {
2483 /* Do we have a running request on slot? */
2484 if (ch->slot[i].state < AHCI_SLOT_RUNNING)
2485 continue;
2486 /* XXX; Commands in loading state. */
2487 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
2488 }
2489 for (i = 0; i < ch->numslots; i++) {
2490 if (!ch->hold[i])
2491 continue;
2492 ahci_done(ch, ch->hold[i]);
2493 ch->hold[i] = NULL;
2494 ch->numhslots--;
2495 }
2496 if (ch->toslots != 0)
2497 xpt_release_simq(ch->sim, TRUE);
2498 ch->eslots = 0;
2499 ch->toslots = 0;
2500 ch->wrongccs = 0;
2501 ch->fatalerr = 0;
2502 /* Tell the XPT about the event */
2503 xpt_async(AC_BUS_RESET, ch->path, NULL);
2504 /* Disable port interrupts */
2505 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
2506 /* Reset and reconnect PHY, */
2507 if (!ahci_sata_phy_reset(ch)) {
2508 if (bootverbose)
2509 device_printf(ch->dev,
2510 "AHCI reset: device not found\n");
2511 ch->devices = 0;
2512 /* Enable wanted port interrupts */
2513 ATA_OUTL(ch->r_mem, AHCI_P_IE,
2514 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
2515 AHCI_P_IX_PRC | AHCI_P_IX_PC));
2516 xpt_release_simq(ch->sim, TRUE);
2517 return;
2518 }
2519 if (bootverbose)
2520 device_printf(ch->dev, "AHCI reset: device found\n");
2521 /* Wait for clearing busy status. */
2522 if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) {
2523 if (dumping)
2524 ahci_clo(ch);
2525 else
2526 ch->resetting = 310;
2527 }
2528 ch->devices = 1;
2529 /* Enable wanted port interrupts */
2530 ATA_OUTL(ch->r_mem, AHCI_P_IE,
2531 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
2532 AHCI_P_IX_TFE | AHCI_P_IX_HBF |
2533 AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF |
2534 ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC |
2535 AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) |
2536 AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR)));
2537 if (ch->resetting)
2538 callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch);
2539 else {
2540 ahci_start(ch, 1);
2541 xpt_release_simq(ch->sim, TRUE);
2542 }
2543 }
2544
2545 static int
ahci_setup_fis(struct ahci_channel * ch,struct ahci_cmd_tab * ctp,union ccb * ccb,int tag)2546 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag)
2547 {
2548 u_int8_t *fis = &ctp->cfis[0];
2549
2550 bzero(fis, 20);
2551 fis[0] = 0x27; /* host to device */
2552 fis[1] = (ccb->ccb_h.target_id & 0x0f);
2553 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2554 fis[1] |= 0x80;
2555 fis[2] = ATA_PACKET_CMD;
2556 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
2557 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
2558 fis[3] = ATA_F_DMA;
2559 else {
2560 fis[5] = ccb->csio.dxfer_len;
2561 fis[6] = ccb->csio.dxfer_len >> 8;
2562 }
2563 fis[7] = ATA_D_LBA;
2564 fis[15] = ATA_A_4BIT;
2565 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2566 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
2567 ctp->acmd, ccb->csio.cdb_len);
2568 bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len);
2569 } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) {
2570 fis[1] |= 0x80;
2571 fis[2] = ccb->ataio.cmd.command;
2572 fis[3] = ccb->ataio.cmd.features;
2573 fis[4] = ccb->ataio.cmd.lba_low;
2574 fis[5] = ccb->ataio.cmd.lba_mid;
2575 fis[6] = ccb->ataio.cmd.lba_high;
2576 fis[7] = ccb->ataio.cmd.device;
2577 fis[8] = ccb->ataio.cmd.lba_low_exp;
2578 fis[9] = ccb->ataio.cmd.lba_mid_exp;
2579 fis[10] = ccb->ataio.cmd.lba_high_exp;
2580 fis[11] = ccb->ataio.cmd.features_exp;
2581 fis[12] = ccb->ataio.cmd.sector_count;
2582 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
2583 fis[12] &= 0x07;
2584 fis[12] |= tag << 3;
2585 }
2586 fis[13] = ccb->ataio.cmd.sector_count_exp;
2587 if (ccb->ataio.ata_flags & ATA_FLAG_ICC)
2588 fis[14] = ccb->ataio.icc;
2589 fis[15] = ATA_A_4BIT;
2590 if (ccb->ataio.ata_flags & ATA_FLAG_AUX) {
2591 fis[16] = ccb->ataio.aux & 0xff;
2592 fis[17] = (ccb->ataio.aux >> 8) & 0xff;
2593 fis[18] = (ccb->ataio.aux >> 16) & 0xff;
2594 fis[19] = (ccb->ataio.aux >> 24) & 0xff;
2595 }
2596 } else {
2597 fis[15] = ccb->ataio.cmd.control;
2598 }
2599 return (20);
2600 }
2601
2602 static int
ahci_sata_connect(struct ahci_channel * ch)2603 ahci_sata_connect(struct ahci_channel *ch)
2604 {
2605 u_int32_t status;
2606 int timeout, timeoutslot, found = 0;
2607
2608 /*
2609 * Wait for "connect well", up to 100ms by default and
2610 * up to 500ms for devices with the SLOWDEV quirk.
2611 */
2612 timeoutslot = ((ch->quirks & AHCI_Q_SLOWDEV) ? 5000 : 1000);
2613 for (timeout = 0; timeout < timeoutslot; timeout++) {
2614 status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
2615 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
2616 found = 1;
2617 if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) &&
2618 ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) &&
2619 ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE))
2620 break;
2621 if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) {
2622 if (bootverbose) {
2623 device_printf(ch->dev, "SATA offline status=%08x\n",
2624 status);
2625 }
2626 return (0);
2627 }
2628 if (found == 0 && timeout >= 100)
2629 break;
2630 DELAY(100);
2631 }
2632 if (timeout >= timeoutslot || !found) {
2633 if (bootverbose) {
2634 device_printf(ch->dev,
2635 "SATA connect timeout time=%dus status=%08x\n",
2636 timeout * 100, status);
2637 }
2638 return (0);
2639 }
2640 if (bootverbose) {
2641 device_printf(ch->dev, "SATA connect time=%dus status=%08x\n",
2642 timeout * 100, status);
2643 }
2644 /* Clear SATA error register */
2645 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff);
2646 return (1);
2647 }
2648
2649 static int
ahci_sata_phy_reset(struct ahci_channel * ch)2650 ahci_sata_phy_reset(struct ahci_channel *ch)
2651 {
2652 int sata_rev;
2653 uint32_t val, detval;
2654
2655 if (ch->listening) {
2656 val = ATA_INL(ch->r_mem, AHCI_P_CMD);
2657 val |= AHCI_P_CMD_SUD;
2658 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
2659 ch->listening = 0;
2660 }
2661 sata_rev = ch->user[ch->pm_present ? 15 : 0].revision;
2662 if (sata_rev == 1)
2663 val = ATA_SC_SPD_SPEED_GEN1;
2664 else if (sata_rev == 2)
2665 val = ATA_SC_SPD_SPEED_GEN2;
2666 else if (sata_rev == 3)
2667 val = ATA_SC_SPD_SPEED_GEN3;
2668 else
2669 val = 0;
2670 detval = ahci_ch_detval(ch, ATA_SC_DET_RESET);
2671 ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
2672 detval | val |
2673 ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER);
2674 DELAY(1000);
2675 detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE);
2676 ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
2677 detval | val | ((ch->pm_level > 0) ? 0 :
2678 (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER)));
2679 if (!ahci_sata_connect(ch)) {
2680 if (ch->caps & AHCI_CAP_SSS) {
2681 val = ATA_INL(ch->r_mem, AHCI_P_CMD);
2682 val &= ~AHCI_P_CMD_SUD;
2683 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
2684 ch->listening = 1;
2685 } else if (ch->pm_level > 0)
2686 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
2687 return (0);
2688 }
2689 return (1);
2690 }
2691
2692 static int
ahci_check_ids(struct ahci_channel * ch,union ccb * ccb)2693 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb)
2694 {
2695
2696 if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) {
2697 ccb->ccb_h.status = CAM_TID_INVALID;
2698 ahci_done(ch, ccb);
2699 return (-1);
2700 }
2701 if (ccb->ccb_h.target_lun != 0) {
2702 ccb->ccb_h.status = CAM_LUN_INVALID;
2703 ahci_done(ch, ccb);
2704 return (-1);
2705 }
2706 return (0);
2707 }
2708
2709 static void
ahciaction(struct cam_sim * sim,union ccb * ccb)2710 ahciaction(struct cam_sim *sim, union ccb *ccb)
2711 {
2712 struct ahci_channel *ch;
2713
2714 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n",
2715 ccb->ccb_h.func_code));
2716
2717 ch = (struct ahci_channel *)cam_sim_softc(sim);
2718 switch (ccb->ccb_h.func_code) {
2719 /* Common cases first */
2720 case XPT_ATA_IO: /* Execute the requested I/O operation */
2721 case XPT_SCSI_IO:
2722 if (ahci_check_ids(ch, ccb))
2723 return;
2724 if (ch->devices == 0 ||
2725 (ch->pm_present == 0 &&
2726 ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) {
2727 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2728 break;
2729 }
2730 ccb->ccb_h.recovery_type = RECOVERY_NONE;
2731 /* Check for command collision. */
2732 if (ahci_check_collision(ch, ccb)) {
2733 /* Freeze command. */
2734 ch->frozen = ccb;
2735 /* We have only one frozen slot, so freeze simq also. */
2736 xpt_freeze_simq(ch->sim, 1);
2737 return;
2738 }
2739 ahci_begin_transaction(ch, ccb);
2740 return;
2741 case XPT_ABORT: /* Abort the specified CCB */
2742 /* XXX Implement */
2743 ccb->ccb_h.status = CAM_REQ_INVALID;
2744 break;
2745 case XPT_SET_TRAN_SETTINGS:
2746 {
2747 struct ccb_trans_settings *cts = &ccb->cts;
2748 struct ahci_device *d;
2749
2750 if (ahci_check_ids(ch, ccb))
2751 return;
2752 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2753 d = &ch->curr[ccb->ccb_h.target_id];
2754 else
2755 d = &ch->user[ccb->ccb_h.target_id];
2756 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
2757 d->revision = cts->xport_specific.sata.revision;
2758 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE)
2759 d->mode = cts->xport_specific.sata.mode;
2760 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
2761 d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
2762 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS)
2763 d->tags = min(ch->numslots, cts->xport_specific.sata.tags);
2764 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM)
2765 ch->pm_present = cts->xport_specific.sata.pm_present;
2766 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
2767 d->atapi = cts->xport_specific.sata.atapi;
2768 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
2769 d->caps = cts->xport_specific.sata.caps;
2770 ccb->ccb_h.status = CAM_REQ_CMP;
2771 break;
2772 }
2773 case XPT_GET_TRAN_SETTINGS:
2774 /* Get default/user set transfer settings for the target */
2775 {
2776 struct ccb_trans_settings *cts = &ccb->cts;
2777 struct ahci_device *d;
2778 uint32_t status;
2779
2780 if (ahci_check_ids(ch, ccb))
2781 return;
2782 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2783 d = &ch->curr[ccb->ccb_h.target_id];
2784 else
2785 d = &ch->user[ccb->ccb_h.target_id];
2786 cts->protocol = PROTO_UNSPECIFIED;
2787 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
2788 cts->transport = XPORT_SATA;
2789 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
2790 cts->proto_specific.valid = 0;
2791 cts->xport_specific.sata.valid = 0;
2792 if (cts->type == CTS_TYPE_CURRENT_SETTINGS &&
2793 (ccb->ccb_h.target_id == 15 ||
2794 (ccb->ccb_h.target_id == 0 && !ch->pm_present))) {
2795 status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK;
2796 if (status & 0x0f0) {
2797 cts->xport_specific.sata.revision =
2798 (status & 0x0f0) >> 4;
2799 cts->xport_specific.sata.valid |=
2800 CTS_SATA_VALID_REVISION;
2801 }
2802 cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D;
2803 if (ch->pm_level) {
2804 if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC))
2805 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ;
2806 if (ch->caps2 & AHCI_CAP2_APST)
2807 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST;
2808 }
2809 if ((ch->caps & AHCI_CAP_SNCQ) &&
2810 (ch->quirks & AHCI_Q_NOAA) == 0)
2811 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA;
2812 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN;
2813 cts->xport_specific.sata.caps &=
2814 ch->user[ccb->ccb_h.target_id].caps;
2815 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2816 } else {
2817 cts->xport_specific.sata.revision = d->revision;
2818 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
2819 cts->xport_specific.sata.caps = d->caps;
2820 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2821 }
2822 cts->xport_specific.sata.mode = d->mode;
2823 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
2824 cts->xport_specific.sata.bytecount = d->bytecount;
2825 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
2826 cts->xport_specific.sata.pm_present = ch->pm_present;
2827 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM;
2828 cts->xport_specific.sata.tags = d->tags;
2829 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS;
2830 cts->xport_specific.sata.atapi = d->atapi;
2831 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
2832 ccb->ccb_h.status = CAM_REQ_CMP;
2833 break;
2834 }
2835 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2836 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2837 ahci_reset(ch);
2838 ccb->ccb_h.status = CAM_REQ_CMP;
2839 break;
2840 case XPT_TERM_IO: /* Terminate the I/O process */
2841 /* XXX Implement */
2842 ccb->ccb_h.status = CAM_REQ_INVALID;
2843 break;
2844 case XPT_PATH_INQ: /* Path routing inquiry */
2845 {
2846 struct ccb_pathinq *cpi = &ccb->cpi;
2847
2848 cpi->version_num = 1; /* XXX??? */
2849 cpi->hba_inquiry = PI_SDTR_ABLE;
2850 if (ch->caps & AHCI_CAP_SNCQ)
2851 cpi->hba_inquiry |= PI_TAG_ABLE;
2852 if (ch->caps & AHCI_CAP_SPM)
2853 cpi->hba_inquiry |= PI_SATAPM;
2854 cpi->target_sprt = 0;
2855 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
2856 if ((ch->quirks & AHCI_Q_NOAUX) == 0)
2857 cpi->hba_misc |= PIM_ATA_EXT;
2858 cpi->hba_eng_cnt = 0;
2859 if (ch->caps & AHCI_CAP_SPM)
2860 cpi->max_target = 15;
2861 else
2862 cpi->max_target = 0;
2863 cpi->max_lun = 0;
2864 cpi->initiator_id = 0;
2865 cpi->bus_id = cam_sim_bus(sim);
2866 cpi->base_transfer_speed = 150000;
2867 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2868 strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN);
2869 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2870 cpi->unit_number = cam_sim_unit(sim);
2871 cpi->transport = XPORT_SATA;
2872 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
2873 cpi->protocol = PROTO_ATA;
2874 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
2875 cpi->maxio = ctob(AHCI_SG_ENTRIES - 1);
2876 /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */
2877 if (ch->quirks & AHCI_Q_MAXIO_64K)
2878 cpi->maxio = min(cpi->maxio, 128 * 512);
2879 cpi->hba_vendor = ch->vendorid;
2880 cpi->hba_device = ch->deviceid;
2881 cpi->hba_subvendor = ch->subvendorid;
2882 cpi->hba_subdevice = ch->subdeviceid;
2883 cpi->ccb_h.status = CAM_REQ_CMP;
2884 break;
2885 }
2886 default:
2887 ccb->ccb_h.status = CAM_REQ_INVALID;
2888 break;
2889 }
2890 ahci_done(ch, ccb);
2891 }
2892
2893 static void
ahcipoll(struct cam_sim * sim)2894 ahcipoll(struct cam_sim *sim)
2895 {
2896 struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim);
2897 uint32_t istatus;
2898
2899 /* Read interrupt statuses and process if any. */
2900 istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
2901 if (istatus != 0)
2902 ahci_ch_intr_main(ch, istatus);
2903 if (ch->resetting != 0 &&
2904 (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) {
2905 ch->resetpolldiv = 1000;
2906 ahci_reset_to(ch);
2907 }
2908 }
2909
2910 MODULE_VERSION(ahci, 1);
2911 MODULE_DEPEND(ahci, cam, 1, 1, 1);
2912