1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/ata.h> 33 #include <sys/kernel.h> 34 #include <sys/endian.h> 35 #include <sys/malloc.h> 36 #include <sys/lock.h> 37 #include <sys/sema.h> 38 #include <sys/taskqueue.h> 39 #include <vm/uma.h> 40 #include <sys/bus.h> 41 #include <machine/bus.h> 42 #include <sys/rman.h> 43 #include <dev/ata/ata-all.h> 44 45 /* prototypes */ 46 static void ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 47 static void ata_dmaalloc(device_t dev); 48 static void ata_dmafree(device_t dev); 49 static void ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 50 static int ata_dmaload(struct ata_request *request, void *addr, int *nsegs); 51 static int ata_dmaunload(struct ata_request *request); 52 53 /* local vars */ 54 static MALLOC_DEFINE(M_ATADMA, "ata_dma", "ATA driver DMA"); 55 56 /* misc defines */ 57 #define MAXTABSZ PAGE_SIZE 58 #define MAXWSPCSZ PAGE_SIZE*2 59 60 struct ata_dc_cb_args { 61 bus_addr_t maddr; 62 int error; 63 }; 64 65 void 66 ata_dmainit(device_t dev) 67 { 68 struct ata_channel *ch = device_get_softc(dev); 69 struct ata_dc_cb_args dcba; 70 71 if (ch->dma.alloc == NULL) 72 ch->dma.alloc = ata_dmaalloc; 73 if (ch->dma.free == NULL) 74 ch->dma.free = ata_dmafree; 75 if (ch->dma.setprd == NULL) 76 ch->dma.setprd = ata_dmasetprd; 77 if (ch->dma.load == NULL) 78 ch->dma.load = ata_dmaload; 79 if (ch->dma.unload == NULL) 80 ch->dma.unload = ata_dmaunload; 81 if (ch->dma.alignment == 0) 82 ch->dma.alignment = 2; 83 if (ch->dma.boundary == 0) 84 ch->dma.boundary = 65536; 85 if (ch->dma.segsize == 0) 86 ch->dma.segsize = 65536; 87 if (ch->dma.max_iosize == 0) 88 ch->dma.max_iosize = (ATA_DMA_ENTRIES - 1) * PAGE_SIZE; 89 if (ch->dma.max_address == 0) 90 ch->dma.max_address = BUS_SPACE_MAXADDR_32BIT; 91 if (ch->dma.dma_slots == 0) 92 ch->dma.dma_slots = 1; 93 94 if (bus_dma_tag_create(bus_get_dma_tag(dev), ch->dma.alignment, 0, 95 ch->dma.max_address, BUS_SPACE_MAXADDR, 96 NULL, NULL, ch->dma.max_iosize, 97 ATA_DMA_ENTRIES, ch->dma.segsize, 98 0, NULL, NULL, &ch->dma.dmatag)) 99 goto error; 100 101 if (bus_dma_tag_create(ch->dma.dmatag, PAGE_SIZE, 64 * 1024, 102 ch->dma.max_address, BUS_SPACE_MAXADDR, 103 NULL, NULL, MAXWSPCSZ, 1, MAXWSPCSZ, 104 0, NULL, NULL, &ch->dma.work_tag)) 105 goto error; 106 107 if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 108 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 109 &ch->dma.work_map)) 110 goto error; 111 112 if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, 113 MAXWSPCSZ, ata_dmasetupc_cb, &dcba, 0) || 114 dcba.error) { 115 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 116 goto error; 117 } 118 ch->dma.work_bus = dcba.maddr; 119 return; 120 121 error: 122 device_printf(dev, "WARNING - DMA initialization failed, disabling DMA\n"); 123 ata_dmafini(dev); 124 } 125 126 void 127 ata_dmafini(device_t dev) 128 { 129 struct ata_channel *ch = device_get_softc(dev); 130 131 if (ch->dma.work_bus) { 132 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); 133 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 134 ch->dma.work_bus = 0; 135 ch->dma.work = NULL; 136 } 137 if (ch->dma.work_tag) { 138 bus_dma_tag_destroy(ch->dma.work_tag); 139 ch->dma.work_tag = NULL; 140 } 141 if (ch->dma.dmatag) { 142 bus_dma_tag_destroy(ch->dma.dmatag); 143 ch->dma.dmatag = NULL; 144 } 145 } 146 147 static void 148 ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 149 { 150 struct ata_dc_cb_args *dcba = (struct ata_dc_cb_args *)xsc; 151 152 if (!(dcba->error = error)) 153 dcba->maddr = segs[0].ds_addr; 154 } 155 156 static void 157 ata_dmaalloc(device_t dev) 158 { 159 struct ata_channel *ch = device_get_softc(dev); 160 struct ata_dc_cb_args dcba; 161 int i; 162 163 /* alloc and setup needed dma slots */ 164 bzero(ch->dma.slot, sizeof(struct ata_dmaslot) * ATA_DMA_SLOTS); 165 for (i = 0; i < ch->dma.dma_slots; i++) { 166 struct ata_dmaslot *slot = &ch->dma.slot[i]; 167 168 if (bus_dma_tag_create(ch->dma.dmatag, PAGE_SIZE, PAGE_SIZE, 169 ch->dma.max_address, BUS_SPACE_MAXADDR, 170 NULL, NULL, PAGE_SIZE, 1, PAGE_SIZE, 171 0, NULL, NULL, &slot->sg_tag)) { 172 device_printf(ch->dev, "FAILURE - create sg_tag\n"); 173 goto error; 174 } 175 176 if (bus_dmamem_alloc(slot->sg_tag, (void **)&slot->sg, BUS_DMA_WAITOK, 177 &slot->sg_map)) { 178 device_printf(ch->dev, "FAILURE - alloc sg_map\n"); 179 goto error; 180 } 181 182 if (bus_dmamap_load(slot->sg_tag, slot->sg_map, slot->sg, MAXTABSZ, 183 ata_dmasetupc_cb, &dcba, 0) || dcba.error) { 184 device_printf(ch->dev, "FAILURE - load sg\n"); 185 goto error; 186 } 187 slot->sg_bus = dcba.maddr; 188 189 if (bus_dma_tag_create(ch->dma.dmatag, 190 ch->dma.alignment, ch->dma.boundary, 191 ch->dma.max_address, BUS_SPACE_MAXADDR, 192 NULL, NULL, ch->dma.max_iosize, 193 ATA_DMA_ENTRIES, ch->dma.segsize, 194 BUS_DMA_ALLOCNOW, NULL, NULL, &slot->data_tag)) { 195 device_printf(ch->dev, "FAILURE - create data_tag\n"); 196 goto error; 197 } 198 199 if (bus_dmamap_create(slot->data_tag, 0, &slot->data_map)) { 200 device_printf(ch->dev, "FAILURE - create data_map\n"); 201 goto error; 202 } 203 } 204 205 return; 206 207 error: 208 device_printf(dev, "WARNING - DMA allocation failed, disabling DMA\n"); 209 ata_dmafree(dev); 210 } 211 212 static void 213 ata_dmafree(device_t dev) 214 { 215 struct ata_channel *ch = device_get_softc(dev); 216 int i; 217 218 /* free all dma slots */ 219 for (i = 0; i < ATA_DMA_SLOTS; i++) { 220 struct ata_dmaslot *slot = &ch->dma.slot[i]; 221 222 if (slot->sg_bus) { 223 bus_dmamap_unload(slot->sg_tag, slot->sg_map); 224 slot->sg_bus = 0; 225 } 226 if (slot->sg) { 227 bus_dmamem_free(slot->sg_tag, slot->sg, slot->sg_map); 228 slot->sg = NULL; 229 } 230 if (slot->data_map) { 231 bus_dmamap_destroy(slot->data_tag, slot->data_map); 232 slot->data_map = NULL; 233 } 234 if (slot->sg_tag) { 235 bus_dma_tag_destroy(slot->sg_tag); 236 slot->sg_tag = NULL; 237 } 238 if (slot->data_tag) { 239 bus_dma_tag_destroy(slot->data_tag); 240 slot->data_tag = NULL; 241 } 242 } 243 } 244 245 static void 246 ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 247 { 248 struct ata_dmasetprd_args *args = xsc; 249 struct ata_dma_prdentry *prd = args->dmatab; 250 int i; 251 252 if ((args->error = error)) 253 return; 254 255 for (i = 0; i < nsegs; i++) { 256 prd[i].addr = htole32(segs[i].ds_addr); 257 prd[i].count = htole32(segs[i].ds_len); 258 } 259 prd[i - 1].count |= htole32(ATA_DMA_EOT); 260 KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n")); 261 args->nsegs = nsegs; 262 } 263 264 static int 265 ata_dmaload(struct ata_request *request, void *addr, int *entries) 266 { 267 struct ata_channel *ch = device_get_softc(request->parent); 268 struct ata_dmasetprd_args dspa; 269 int error; 270 271 ATA_DEBUG_RQ(request, "dmaload"); 272 273 if (request->dma) { 274 device_printf(request->parent, 275 "FAILURE - already active DMA on this device\n"); 276 return EIO; 277 } 278 if (!request->bytecount) { 279 device_printf(request->parent, 280 "FAILURE - zero length DMA transfer attempted\n"); 281 return EIO; 282 } 283 if (request->bytecount & (ch->dma.alignment - 1)) { 284 device_printf(request->parent, 285 "FAILURE - odd-sized DMA transfer attempt %d %% %d\n", 286 request->bytecount, ch->dma.alignment); 287 return EIO; 288 } 289 if (request->bytecount > ch->dma.max_iosize) { 290 device_printf(request->parent, 291 "FAILURE - oversized DMA transfer attempt %d > %d\n", 292 request->bytecount, ch->dma.max_iosize); 293 return EIO; 294 } 295 296 /* set our slot. XXX SOS NCQ will change that */ 297 request->dma = &ch->dma.slot[0]; 298 299 if (addr) 300 dspa.dmatab = addr; 301 else 302 dspa.dmatab = request->dma->sg; 303 304 if (request->flags & ATA_R_DATA_IN_CCB) 305 error = bus_dmamap_load_ccb(request->dma->data_tag, 306 request->dma->data_map, request->ccb, 307 ch->dma.setprd, &dspa, BUS_DMA_NOWAIT); 308 else 309 error = bus_dmamap_load(request->dma->data_tag, request->dma->data_map, 310 request->data, request->bytecount, 311 ch->dma.setprd, &dspa, BUS_DMA_NOWAIT); 312 if (error || (error = dspa.error)) { 313 device_printf(request->parent, "FAILURE - load data\n"); 314 goto error; 315 } 316 317 if (entries) 318 *entries = dspa.nsegs; 319 320 bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map, 321 BUS_DMASYNC_PREWRITE); 322 bus_dmamap_sync(request->dma->data_tag, request->dma->data_map, 323 (request->flags & ATA_R_READ) ? 324 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 325 return 0; 326 327 error: 328 ata_dmaunload(request); 329 return EIO; 330 } 331 332 int 333 ata_dmaunload(struct ata_request *request) 334 { 335 ATA_DEBUG_RQ(request, "dmaunload"); 336 337 if (request->dma) { 338 bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map, 339 BUS_DMASYNC_POSTWRITE); 340 bus_dmamap_sync(request->dma->data_tag, request->dma->data_map, 341 (request->flags & ATA_R_READ) ? 342 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 343 344 bus_dmamap_unload(request->dma->data_tag, request->dma->data_map); 345 request->dma = NULL; 346 } 347 return 0; 348 } 349