1 /* 2 * drivers/ata/sata_dwc_460ex.c 3 * 4 * Synopsys DesignWare Cores (DWC) SATA host driver 5 * 6 * Author: Mark Miesfeld <mmiesfeld@amcc.com> 7 * 8 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> 9 * Copyright 2008 DENX Software Engineering 10 * 11 * Based on versions provided by AMCC and Synopsys which are: 12 * Copyright 2006 Applied Micro Circuits Corporation 13 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 */ 20 21 #ifdef CONFIG_SATA_DWC_DEBUG 22 #define DEBUG 23 #endif 24 25 #ifdef CONFIG_SATA_DWC_VDEBUG 26 #define VERBOSE_DEBUG 27 #define DEBUG_NCQ 28 #endif 29 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/init.h> 33 #include <linux/device.h> 34 #include <linux/of_platform.h> 35 #include <linux/platform_device.h> 36 #include <linux/libata.h> 37 #include <linux/slab.h> 38 #include "libata.h" 39 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_cmnd.h> 42 43 /* These two are defined in "libata.h" */ 44 #undef DRV_NAME 45 #undef DRV_VERSION 46 #define DRV_NAME "sata-dwc" 47 #define DRV_VERSION "1.3" 48 49 /* SATA DMA driver Globals */ 50 #define DMA_NUM_CHANS 1 51 #define DMA_NUM_CHAN_REGS 8 52 53 /* SATA DMA Register definitions */ 54 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ 55 56 struct dmareg { 57 u32 low; /* Low bits 0-31 */ 58 u32 high; /* High bits 32-63 */ 59 }; 60 61 /* DMA Per Channel registers */ 62 struct dma_chan_regs { 63 struct dmareg sar; /* Source Address */ 64 struct dmareg dar; /* Destination address */ 65 struct dmareg llp; /* Linked List Pointer */ 66 struct dmareg ctl; /* Control */ 67 struct dmareg sstat; /* Source Status not implemented in core */ 68 struct dmareg dstat; /* Destination Status not implemented in core*/ 69 struct dmareg sstatar; /* Source Status Address not impl in core */ 70 struct dmareg dstatar; /* Destination Status Address not implemente */ 71 struct dmareg cfg; /* Config */ 72 struct dmareg sgr; /* Source Gather */ 73 struct dmareg dsr; /* Destination Scatter */ 74 }; 75 76 /* Generic Interrupt Registers */ 77 struct dma_interrupt_regs { 78 struct dmareg tfr; /* Transfer Interrupt */ 79 struct dmareg block; /* Block Interrupt */ 80 struct dmareg srctran; /* Source Transfer Interrupt */ 81 struct dmareg dsttran; /* Dest Transfer Interrupt */ 82 struct dmareg error; /* Error */ 83 }; 84 85 struct ahb_dma_regs { 86 struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; 87 struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ 88 struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ 89 struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ 90 struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ 91 struct dmareg statusInt; /* Interrupt combined*/ 92 struct dmareg rq_srcreg; /* Src Trans Req */ 93 struct dmareg rq_dstreg; /* Dst Trans Req */ 94 struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/ 95 struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/ 96 struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/ 97 struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/ 98 struct dmareg dma_cfg; /* DMA Config */ 99 struct dmareg dma_chan_en; /* DMA Channel Enable*/ 100 struct dmareg dma_id; /* DMA ID */ 101 struct dmareg dma_test; /* DMA Test */ 102 struct dmareg res1; /* reserved */ 103 struct dmareg res2; /* reserved */ 104 /* 105 * DMA Comp Params 106 * Param 6 = dma_param[0], Param 5 = dma_param[1], 107 * Param 4 = dma_param[2] ... 108 */ 109 struct dmareg dma_params[6]; 110 }; 111 112 /* Data structure for linked list item */ 113 struct lli { 114 u32 sar; /* Source Address */ 115 u32 dar; /* Destination address */ 116 u32 llp; /* Linked List Pointer */ 117 struct dmareg ctl; /* Control */ 118 struct dmareg dstat; /* Destination Status */ 119 }; 120 121 enum { 122 SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)), 123 SATA_DWC_DMAC_LLI_NUM = 256, 124 SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \ 125 SATA_DWC_DMAC_LLI_NUM), 126 SATA_DWC_DMAC_TWIDTH_BYTES = 4, 127 SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \ 128 SATA_DWC_DMAC_TWIDTH_BYTES), 129 }; 130 131 /* DMA Register Operation Bits */ 132 enum { 133 DMA_EN = 0x00000001, /* Enable AHB DMA */ 134 DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */ 135 DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */ 136 }; 137 138 #define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ 139 #define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ 140 /* Enable channel */ 141 #define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ 142 ((0x000000001 << (ch)) << 8)) 143 /* Disable channel */ 144 #define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) 145 /* Transfer Type & Flow Controller */ 146 #define DMA_CTL_TTFC(type) (((type) & 0x7) << 20) 147 #define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */ 148 #define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */ 149 /* Src Burst Transaction Length */ 150 #define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14) 151 /* Dst Burst Transaction Length */ 152 #define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11) 153 /* Source Transfer Width */ 154 #define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4) 155 /* Destination Transfer Width */ 156 #define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1) 157 158 /* Assign HW handshaking interface (x) to destination / source peripheral */ 159 #define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11) 160 #define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7) 161 #define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) 162 163 /* 164 * This define is used to set block chaining disabled in the control low 165 * register. It is already in little endian format so it can be &'d dirctly. 166 * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) 167 */ 168 enum { 169 DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7, 170 DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */ 171 DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */ 172 DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */ 173 DMA_CTL_SINC_DEC = 0x00000200, 174 DMA_CTL_SINC_NOCHANGE = 0x00000400, 175 DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */ 176 DMA_CTL_DINC_DEC = 0x00000080, 177 DMA_CTL_DINC_NOCHANGE = 0x00000100, 178 DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */ 179 180 /* Channel Configuration Register high bits */ 181 DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */ 182 DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */ 183 184 /* Channel Configuration Register low bits */ 185 DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */ 186 DMA_CFG_RELD_SRC = 0x40000000, 187 DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */ 188 DMA_CFG_HS_SELDST = 0x00000400, 189 DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */ 190 191 /* Channel Linked List Pointer Register */ 192 DMA_LLP_AHBMASTER1 = 0, /* List Master Select */ 193 DMA_LLP_AHBMASTER2 = 1, 194 195 SATA_DWC_MAX_PORTS = 1, 196 197 SATA_DWC_SCR_OFFSET = 0x24, 198 SATA_DWC_REG_OFFSET = 0x64, 199 }; 200 201 /* DWC SATA Registers */ 202 struct sata_dwc_regs { 203 u32 fptagr; /* 1st party DMA tag */ 204 u32 fpbor; /* 1st party DMA buffer offset */ 205 u32 fptcr; /* 1st party DMA Xfr count */ 206 u32 dmacr; /* DMA Control */ 207 u32 dbtsr; /* DMA Burst Transac size */ 208 u32 intpr; /* Interrupt Pending */ 209 u32 intmr; /* Interrupt Mask */ 210 u32 errmr; /* Error Mask */ 211 u32 llcr; /* Link Layer Control */ 212 u32 phycr; /* PHY Control */ 213 u32 physr; /* PHY Status */ 214 u32 rxbistpd; /* Recvd BIST pattern def register */ 215 u32 rxbistpd1; /* Recvd BIST data dword1 */ 216 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ 217 u32 txbistpd; /* Trans BIST pattern def register */ 218 u32 txbistpd1; /* Trans BIST data dword1 */ 219 u32 txbistpd2; /* Trans BIST data dword2 */ 220 u32 bistcr; /* BIST Control Register */ 221 u32 bistfctr; /* BIST FIS Count Register */ 222 u32 bistsr; /* BIST Status Register */ 223 u32 bistdecr; /* BIST Dword Error count register */ 224 u32 res[15]; /* Reserved locations */ 225 u32 testr; /* Test Register */ 226 u32 versionr; /* Version Register */ 227 u32 idr; /* ID Register */ 228 u32 unimpl[192]; /* Unimplemented */ 229 u32 dmadr[256]; /* FIFO Locations in DMA Mode */ 230 }; 231 232 enum { 233 SCR_SCONTROL_DET_ENABLE = 0x00000001, 234 SCR_SSTATUS_DET_PRESENT = 0x00000001, 235 SCR_SERROR_DIAG_X = 0x04000000, 236 /* DWC SATA Register Operations */ 237 SATA_DWC_TXFIFO_DEPTH = 0x01FF, 238 SATA_DWC_RXFIFO_DEPTH = 0x01FF, 239 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, 240 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), 241 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), 242 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, 243 SATA_DWC_INTPR_DMAT = 0x00000001, 244 SATA_DWC_INTPR_NEWFP = 0x00000002, 245 SATA_DWC_INTPR_PMABRT = 0x00000004, 246 SATA_DWC_INTPR_ERR = 0x00000008, 247 SATA_DWC_INTPR_NEWBIST = 0x00000010, 248 SATA_DWC_INTPR_IPF = 0x10000000, 249 SATA_DWC_INTMR_DMATM = 0x00000001, 250 SATA_DWC_INTMR_NEWFPM = 0x00000002, 251 SATA_DWC_INTMR_PMABRTM = 0x00000004, 252 SATA_DWC_INTMR_ERRM = 0x00000008, 253 SATA_DWC_INTMR_NEWBISTM = 0x00000010, 254 SATA_DWC_LLCR_SCRAMEN = 0x00000001, 255 SATA_DWC_LLCR_DESCRAMEN = 0x00000002, 256 SATA_DWC_LLCR_RPDEN = 0x00000004, 257 /* This is all error bits, zero's are reserved fields. */ 258 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 259 }; 260 261 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) 262 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ 263 SATA_DWC_DMACR_TMOD_TXCHEN) 264 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ 265 SATA_DWC_DMACR_TMOD_TXCHEN) 266 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) 267 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ 268 << 16) 269 struct sata_dwc_device { 270 struct device *dev; /* generic device struct */ 271 struct ata_probe_ent *pe; /* ptr to probe-ent */ 272 struct ata_host *host; 273 u8 *reg_base; 274 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ 275 int irq_dma; 276 }; 277 278 #define SATA_DWC_QCMD_MAX 32 279 280 struct sata_dwc_device_port { 281 struct sata_dwc_device *hsdev; 282 int cmd_issued[SATA_DWC_QCMD_MAX]; 283 struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */ 284 dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; 285 u32 dma_chan[SATA_DWC_QCMD_MAX]; 286 int dma_pending[SATA_DWC_QCMD_MAX]; 287 }; 288 289 /* 290 * Commonly used DWC SATA driver Macros 291 */ 292 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\ 293 (host)->private_data) 294 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\ 295 (ap)->host->private_data) 296 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\ 297 (ap)->private_data) 298 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\ 299 (qc)->ap->host->private_data) 300 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\ 301 (hsdevp)->hsdev) 302 303 enum { 304 SATA_DWC_CMD_ISSUED_NOT = 0, 305 SATA_DWC_CMD_ISSUED_PEND = 1, 306 SATA_DWC_CMD_ISSUED_EXEC = 2, 307 SATA_DWC_CMD_ISSUED_NODATA = 3, 308 309 SATA_DWC_DMA_PENDING_NONE = 0, 310 SATA_DWC_DMA_PENDING_TX = 1, 311 SATA_DWC_DMA_PENDING_RX = 2, 312 }; 313 314 struct sata_dwc_host_priv { 315 void __iomem *scr_addr_sstatus; 316 u32 sata_dwc_sactive_issued ; 317 u32 sata_dwc_sactive_queued ; 318 u32 dma_interrupt_count; 319 struct ahb_dma_regs *sata_dma_regs; 320 struct device *dwc_dev; 321 }; 322 struct sata_dwc_host_priv host_pvt; 323 /* 324 * Prototypes 325 */ 326 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); 327 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 328 u32 check_status); 329 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 330 static void sata_dwc_port_stop(struct ata_port *ap); 331 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 332 static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq); 333 static void dma_dwc_exit(struct sata_dwc_device *hsdev); 334 static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, 335 struct lli *lli, dma_addr_t dma_lli, 336 void __iomem *addr, int dir); 337 static void dma_dwc_xfer_start(int dma_ch); 338 339 static const char *get_prot_descript(u8 protocol) 340 { 341 switch ((enum ata_tf_protocols)protocol) { 342 case ATA_PROT_NODATA: 343 return "ATA no data"; 344 case ATA_PROT_PIO: 345 return "ATA PIO"; 346 case ATA_PROT_DMA: 347 return "ATA DMA"; 348 case ATA_PROT_NCQ: 349 return "ATA NCQ"; 350 case ATAPI_PROT_NODATA: 351 return "ATAPI no data"; 352 case ATAPI_PROT_PIO: 353 return "ATAPI PIO"; 354 case ATAPI_PROT_DMA: 355 return "ATAPI DMA"; 356 default: 357 return "unknown"; 358 } 359 } 360 361 static const char *get_dma_dir_descript(int dma_dir) 362 { 363 switch ((enum dma_data_direction)dma_dir) { 364 case DMA_BIDIRECTIONAL: 365 return "bidirectional"; 366 case DMA_TO_DEVICE: 367 return "to device"; 368 case DMA_FROM_DEVICE: 369 return "from device"; 370 default: 371 return "none"; 372 } 373 } 374 375 static void sata_dwc_tf_dump(struct ata_taskfile *tf) 376 { 377 dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:" 378 "0x%lx device: %x\n", tf->command, 379 get_prot_descript(tf->protocol), tf->flags, tf->device); 380 dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x " 381 "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, 382 tf->lbam, tf->lbah); 383 dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x " 384 "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", 385 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, 386 tf->hob_lbah); 387 } 388 389 /* 390 * Function: get_burst_length_encode 391 * arguments: datalength: length in bytes of data 392 * returns value to be programmed in register corresponding to data length 393 * This value is effectively the log(base 2) of the length 394 */ 395 static int get_burst_length_encode(int datalength) 396 { 397 int items = datalength >> 2; /* div by 4 to get lword count */ 398 399 if (items >= 64) 400 return 5; 401 402 if (items >= 32) 403 return 4; 404 405 if (items >= 16) 406 return 3; 407 408 if (items >= 8) 409 return 2; 410 411 if (items >= 4) 412 return 1; 413 414 return 0; 415 } 416 417 static void clear_chan_interrupts(int c) 418 { 419 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low), 420 DMA_CHANNEL(c)); 421 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low), 422 DMA_CHANNEL(c)); 423 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low), 424 DMA_CHANNEL(c)); 425 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low), 426 DMA_CHANNEL(c)); 427 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low), 428 DMA_CHANNEL(c)); 429 } 430 431 /* 432 * Function: dma_request_channel 433 * arguments: None 434 * returns channel number if available else -1 435 * This function assigns the next available DMA channel from the list to the 436 * requester 437 */ 438 static int dma_request_channel(void) 439 { 440 int i; 441 442 for (i = 0; i < DMA_NUM_CHANS; i++) { 443 if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &\ 444 DMA_CHANNEL(i))) 445 return i; 446 } 447 dev_err(host_pvt.dwc_dev, "%s NO channel chan_en: 0x%08x\n", __func__, 448 in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low))); 449 return -1; 450 } 451 452 /* 453 * Function: dma_dwc_interrupt 454 * arguments: irq, dev_id, pt_regs 455 * returns channel number if available else -1 456 * Interrupt Handler for DW AHB SATA DMA 457 */ 458 static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance) 459 { 460 int chan; 461 u32 tfr_reg, err_reg; 462 unsigned long flags; 463 struct sata_dwc_device *hsdev = 464 (struct sata_dwc_device *)hsdev_instance; 465 struct ata_host *host = (struct ata_host *)hsdev->host; 466 struct ata_port *ap; 467 struct sata_dwc_device_port *hsdevp; 468 u8 tag = 0; 469 unsigned int port = 0; 470 471 spin_lock_irqsave(&host->lock, flags); 472 ap = host->ports[port]; 473 hsdevp = HSDEVP_FROM_AP(ap); 474 tag = ap->link.active_tag; 475 476 tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\ 477 .low)); 478 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\ 479 .low)); 480 481 dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", 482 tfr_reg, err_reg, hsdevp->dma_pending[tag], port); 483 484 for (chan = 0; chan < DMA_NUM_CHANS; chan++) { 485 /* Check for end-of-transfer interrupt. */ 486 if (tfr_reg & DMA_CHANNEL(chan)) { 487 /* 488 * Each DMA command produces 2 interrupts. Only 489 * complete the command after both interrupts have been 490 * seen. (See sata_dwc_isr()) 491 */ 492 host_pvt.dma_interrupt_count++; 493 sata_dwc_clear_dmacr(hsdevp, tag); 494 495 if (hsdevp->dma_pending[tag] == 496 SATA_DWC_DMA_PENDING_NONE) { 497 dev_err(ap->dev, "DMA not pending eot=0x%08x " 498 "err=0x%08x tag=0x%02x pending=%d\n", 499 tfr_reg, err_reg, tag, 500 hsdevp->dma_pending[tag]); 501 } 502 503 if ((host_pvt.dma_interrupt_count % 2) == 0) 504 sata_dwc_dma_xfer_complete(ap, 1); 505 506 /* Clear the interrupt */ 507 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ 508 .tfr.low), 509 DMA_CHANNEL(chan)); 510 } 511 512 /* Check for error interrupt. */ 513 if (err_reg & DMA_CHANNEL(chan)) { 514 /* TODO Need error handler ! */ 515 dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", 516 err_reg); 517 518 /* Clear the interrupt. */ 519 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ 520 .error.low), 521 DMA_CHANNEL(chan)); 522 } 523 } 524 spin_unlock_irqrestore(&host->lock, flags); 525 return IRQ_HANDLED; 526 } 527 528 /* 529 * Function: dma_request_interrupts 530 * arguments: hsdev 531 * returns status 532 * This function registers ISR for a particular DMA channel interrupt 533 */ 534 static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) 535 { 536 int retval = 0; 537 int chan; 538 539 for (chan = 0; chan < DMA_NUM_CHANS; chan++) { 540 /* Unmask error interrupt */ 541 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low, 542 DMA_ENABLE_CHAN(chan)); 543 544 /* Unmask end-of-transfer interrupt */ 545 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low, 546 DMA_ENABLE_CHAN(chan)); 547 } 548 549 retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev); 550 if (retval) { 551 dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n", 552 __func__, irq); 553 return -ENODEV; 554 } 555 556 /* Mark this interrupt as requested */ 557 hsdev->irq_dma = irq; 558 return 0; 559 } 560 561 /* 562 * Function: map_sg_to_lli 563 * The Synopsis driver has a comment proposing that better performance 564 * is possible by only enabling interrupts on the last item in the linked list. 565 * However, it seems that could be a problem if an error happened on one of the 566 * first items. The transfer would halt, but no error interrupt would occur. 567 * Currently this function sets interrupts enabled for each linked list item: 568 * DMA_CTL_INT_EN. 569 */ 570 static int map_sg_to_lli(struct scatterlist *sg, int num_elems, 571 struct lli *lli, dma_addr_t dma_lli, 572 void __iomem *dmadr_addr, int dir) 573 { 574 int i, idx = 0; 575 int fis_len = 0; 576 dma_addr_t next_llp; 577 int bl; 578 579 dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x" 580 " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, 581 (u32)dmadr_addr); 582 583 bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); 584 585 for (i = 0; i < num_elems; i++, sg++) { 586 u32 addr, offset; 587 u32 sg_len, len; 588 589 addr = (u32) sg_dma_address(sg); 590 sg_len = sg_dma_len(sg); 591 592 dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len" 593 "=%d\n", __func__, i, addr, sg_len); 594 595 while (sg_len) { 596 if (idx >= SATA_DWC_DMAC_LLI_NUM) { 597 /* The LLI table is not large enough. */ 598 dev_err(host_pvt.dwc_dev, "LLI table overrun " 599 "(idx=%d)\n", idx); 600 break; 601 } 602 len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? 603 SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; 604 605 offset = addr & 0xffff; 606 if ((offset + sg_len) > 0x10000) 607 len = 0x10000 - offset; 608 609 /* 610 * Make sure a LLI block is not created that will span 611 * 8K max FIS boundary. If the block spans such a FIS 612 * boundary, there is a chance that a DMA burst will 613 * cross that boundary -- this results in an error in 614 * the host controller. 615 */ 616 if (fis_len + len > 8192) { 617 dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len=" 618 "%d(0x%x) len=%d(0x%x)\n", fis_len, 619 fis_len, len, len); 620 len = 8192 - fis_len; 621 fis_len = 0; 622 } else { 623 fis_len += len; 624 } 625 if (fis_len == 8192) 626 fis_len = 0; 627 628 /* 629 * Set DMA addresses and lower half of control register 630 * based on direction. 631 */ 632 if (dir == DMA_FROM_DEVICE) { 633 lli[idx].dar = cpu_to_le32(addr); 634 lli[idx].sar = cpu_to_le32((u32)dmadr_addr); 635 636 lli[idx].ctl.low = cpu_to_le32( 637 DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | 638 DMA_CTL_SMS(0) | 639 DMA_CTL_DMS(1) | 640 DMA_CTL_SRC_MSIZE(bl) | 641 DMA_CTL_DST_MSIZE(bl) | 642 DMA_CTL_SINC_NOCHANGE | 643 DMA_CTL_SRC_TRWID(2) | 644 DMA_CTL_DST_TRWID(2) | 645 DMA_CTL_INT_EN | 646 DMA_CTL_LLP_SRCEN | 647 DMA_CTL_LLP_DSTEN); 648 } else { /* DMA_TO_DEVICE */ 649 lli[idx].sar = cpu_to_le32(addr); 650 lli[idx].dar = cpu_to_le32((u32)dmadr_addr); 651 652 lli[idx].ctl.low = cpu_to_le32( 653 DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | 654 DMA_CTL_SMS(1) | 655 DMA_CTL_DMS(0) | 656 DMA_CTL_SRC_MSIZE(bl) | 657 DMA_CTL_DST_MSIZE(bl) | 658 DMA_CTL_DINC_NOCHANGE | 659 DMA_CTL_SRC_TRWID(2) | 660 DMA_CTL_DST_TRWID(2) | 661 DMA_CTL_INT_EN | 662 DMA_CTL_LLP_SRCEN | 663 DMA_CTL_LLP_DSTEN); 664 } 665 666 dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: " 667 "0x%08x val: 0x%08x\n", __func__, 668 len, DMA_CTL_BLK_TS(len / 4)); 669 670 /* Program the LLI CTL high register */ 671 lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\ 672 (len / 4)); 673 674 /* Program the next pointer. The next pointer must be 675 * the physical address, not the virtual address. 676 */ 677 next_llp = (dma_lli + ((idx + 1) * sizeof(struct \ 678 lli))); 679 680 /* The last 2 bits encode the list master select. */ 681 next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); 682 683 lli[idx].llp = cpu_to_le32(next_llp); 684 idx++; 685 sg_len -= len; 686 addr += len; 687 } 688 } 689 690 /* 691 * The last next ptr has to be zero and the last control low register 692 * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source 693 * and destination enable) set back to 0 (disabled.) This is what tells 694 * the core that this is the last item in the linked list. 695 */ 696 if (idx) { 697 lli[idx-1].llp = 0x00000000; 698 lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; 699 700 /* Flush cache to memory */ 701 dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), 702 DMA_BIDIRECTIONAL); 703 } 704 705 return idx; 706 } 707 708 /* 709 * Function: dma_dwc_xfer_start 710 * arguments: Channel number 711 * Return : None 712 * Enables the DMA channel 713 */ 714 static void dma_dwc_xfer_start(int dma_ch) 715 { 716 /* Enable the DMA channel */ 717 out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low), 718 in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) | 719 DMA_ENABLE_CHAN(dma_ch)); 720 } 721 722 static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, 723 struct lli *lli, dma_addr_t dma_lli, 724 void __iomem *addr, int dir) 725 { 726 int dma_ch; 727 int num_lli; 728 /* Acquire DMA channel */ 729 dma_ch = dma_request_channel(); 730 if (dma_ch == -1) { 731 dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n", 732 __func__); 733 return -EAGAIN; 734 } 735 736 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 737 num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir); 738 739 dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:" 740 " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems, 741 lli, (u32)dma_lli, addr, num_lli); 742 743 clear_chan_interrupts(dma_ch); 744 745 /* Program the CFG register. */ 746 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high), 747 DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); 748 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), 0); 749 750 /* Program the address of the linked list */ 751 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low), 752 DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); 753 754 /* Program the CTL register with src enable / dst enable */ 755 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low), 756 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); 757 return dma_ch; 758 } 759 760 /* 761 * Function: dma_dwc_exit 762 * arguments: None 763 * returns status 764 * This function exits the SATA DMA driver 765 */ 766 static void dma_dwc_exit(struct sata_dwc_device *hsdev) 767 { 768 dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); 769 if (host_pvt.sata_dma_regs) { 770 iounmap(host_pvt.sata_dma_regs); 771 host_pvt.sata_dma_regs = NULL; 772 } 773 774 if (hsdev->irq_dma) { 775 free_irq(hsdev->irq_dma, hsdev); 776 hsdev->irq_dma = 0; 777 } 778 } 779 780 /* 781 * Function: dma_dwc_init 782 * arguments: hsdev 783 * returns status 784 * This function initializes the SATA DMA driver 785 */ 786 static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) 787 { 788 int err; 789 790 err = dma_request_interrupts(hsdev, irq); 791 if (err) { 792 dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" 793 " %d\n", __func__, err); 794 goto error_out; 795 } 796 797 /* Enabe DMA */ 798 out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN); 799 800 dev_notice(host_pvt.dwc_dev, "DMA initialized\n"); 801 dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\ 802 sata_dma_regs); 803 804 return 0; 805 806 error_out: 807 dma_dwc_exit(hsdev); 808 809 return err; 810 } 811 812 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 813 { 814 if (scr > SCR_NOTIFICATION) { 815 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 816 __func__, scr); 817 return -EINVAL; 818 } 819 820 *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4)); 821 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", 822 __func__, link->ap->print_id, scr, *val); 823 824 return 0; 825 } 826 827 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) 828 { 829 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", 830 __func__, link->ap->print_id, scr, val); 831 if (scr > SCR_NOTIFICATION) { 832 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 833 __func__, scr); 834 return -EINVAL; 835 } 836 out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val); 837 838 return 0; 839 } 840 841 static u32 core_scr_read(unsigned int scr) 842 { 843 return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\ 844 (scr * 4)); 845 } 846 847 static void core_scr_write(unsigned int scr, u32 val) 848 { 849 out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4), 850 val); 851 } 852 853 static void clear_serror(void) 854 { 855 u32 val; 856 val = core_scr_read(SCR_ERROR); 857 core_scr_write(SCR_ERROR, val); 858 859 } 860 861 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) 862 { 863 out_le32(&hsdev->sata_dwc_regs->intpr, 864 in_le32(&hsdev->sata_dwc_regs->intpr)); 865 } 866 867 static u32 qcmd_tag_to_mask(u8 tag) 868 { 869 return 0x00000001 << (tag & 0x1f); 870 } 871 872 /* See ahci.c */ 873 static void sata_dwc_error_intr(struct ata_port *ap, 874 struct sata_dwc_device *hsdev, uint intpr) 875 { 876 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 877 struct ata_eh_info *ehi = &ap->link.eh_info; 878 unsigned int err_mask = 0, action = 0; 879 struct ata_queued_cmd *qc; 880 u32 serror; 881 u8 status, tag; 882 u32 err_reg; 883 884 ata_ehi_clear_desc(ehi); 885 886 serror = core_scr_read(SCR_ERROR); 887 status = ap->ops->sff_check_status(ap); 888 889 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\ 890 low)); 891 tag = ap->link.active_tag; 892 893 dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x " 894 "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n", 895 __func__, serror, intpr, status, host_pvt.dma_interrupt_count, 896 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg); 897 898 /* Clear error register and interrupt bit */ 899 clear_serror(); 900 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); 901 902 /* This is the only error happening now. TODO check for exact error */ 903 904 err_mask |= AC_ERR_HOST_BUS; 905 action |= ATA_EH_RESET; 906 907 /* Pass this on to EH */ 908 ehi->serror |= serror; 909 ehi->action |= action; 910 911 qc = ata_qc_from_tag(ap, tag); 912 if (qc) 913 qc->err_mask |= err_mask; 914 else 915 ehi->err_mask |= err_mask; 916 917 ata_port_abort(ap); 918 } 919 920 /* 921 * Function : sata_dwc_isr 922 * arguments : irq, void *dev_instance, struct pt_regs *regs 923 * Return value : irqreturn_t - status of IRQ 924 * This Interrupt handler called via port ops registered function. 925 * .irq_handler = sata_dwc_isr 926 */ 927 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) 928 { 929 struct ata_host *host = (struct ata_host *)dev_instance; 930 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); 931 struct ata_port *ap; 932 struct ata_queued_cmd *qc; 933 unsigned long flags; 934 u8 status, tag; 935 int handled, num_processed, port = 0; 936 uint intpr, sactive, sactive2, tag_mask; 937 struct sata_dwc_device_port *hsdevp; 938 host_pvt.sata_dwc_sactive_issued = 0; 939 940 spin_lock_irqsave(&host->lock, flags); 941 942 /* Read the interrupt register */ 943 intpr = in_le32(&hsdev->sata_dwc_regs->intpr); 944 945 ap = host->ports[port]; 946 hsdevp = HSDEVP_FROM_AP(ap); 947 948 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, 949 ap->link.active_tag); 950 951 /* Check for error interrupt */ 952 if (intpr & SATA_DWC_INTPR_ERR) { 953 sata_dwc_error_intr(ap, hsdev, intpr); 954 handled = 1; 955 goto DONE; 956 } 957 958 /* Check for DMA SETUP FIS (FP DMA) interrupt */ 959 if (intpr & SATA_DWC_INTPR_NEWFP) { 960 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); 961 962 tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr)); 963 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); 964 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) 965 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); 966 967 host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag); 968 969 qc = ata_qc_from_tag(ap, tag); 970 /* 971 * Start FP DMA for NCQ command. At this point the tag is the 972 * active tag. It is the tag that matches the command about to 973 * be completed. 974 */ 975 qc->ap->link.active_tag = tag; 976 sata_dwc_bmdma_start_by_tag(qc, tag); 977 978 handled = 1; 979 goto DONE; 980 } 981 sactive = core_scr_read(SCR_ACTIVE); 982 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; 983 984 /* If no sactive issued and tag_mask is zero then this is not NCQ */ 985 if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) { 986 if (ap->link.active_tag == ATA_TAG_POISON) 987 tag = 0; 988 else 989 tag = ap->link.active_tag; 990 qc = ata_qc_from_tag(ap, tag); 991 992 /* DEV interrupt w/ no active qc? */ 993 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 994 dev_err(ap->dev, "%s interrupt with no active qc " 995 "qc=%p\n", __func__, qc); 996 ap->ops->sff_check_status(ap); 997 handled = 1; 998 goto DONE; 999 } 1000 status = ap->ops->sff_check_status(ap); 1001 1002 qc->ap->link.active_tag = tag; 1003 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 1004 1005 if (status & ATA_ERR) { 1006 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); 1007 sata_dwc_qc_complete(ap, qc, 1); 1008 handled = 1; 1009 goto DONE; 1010 } 1011 1012 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 1013 __func__, get_prot_descript(qc->tf.protocol)); 1014 DRVSTILLBUSY: 1015 if (ata_is_dma(qc->tf.protocol)) { 1016 /* 1017 * Each DMA transaction produces 2 interrupts. The DMAC 1018 * transfer complete interrupt and the SATA controller 1019 * operation done interrupt. The command should be 1020 * completed only after both interrupts are seen. 1021 */ 1022 host_pvt.dma_interrupt_count++; 1023 if (hsdevp->dma_pending[tag] == \ 1024 SATA_DWC_DMA_PENDING_NONE) { 1025 dev_err(ap->dev, "%s: DMA not pending " 1026 "intpr=0x%08x status=0x%08x pending" 1027 "=%d\n", __func__, intpr, status, 1028 hsdevp->dma_pending[tag]); 1029 } 1030 1031 if ((host_pvt.dma_interrupt_count % 2) == 0) 1032 sata_dwc_dma_xfer_complete(ap, 1); 1033 } else if (ata_is_pio(qc->tf.protocol)) { 1034 ata_sff_hsm_move(ap, qc, status, 0); 1035 handled = 1; 1036 goto DONE; 1037 } else { 1038 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 1039 goto DRVSTILLBUSY; 1040 } 1041 1042 handled = 1; 1043 goto DONE; 1044 } 1045 1046 /* 1047 * This is a NCQ command. At this point we need to figure out for which 1048 * tags we have gotten a completion interrupt. One interrupt may serve 1049 * as completion for more than one operation when commands are queued 1050 * (NCQ). We need to process each completed command. 1051 */ 1052 1053 /* process completed commands */ 1054 sactive = core_scr_read(SCR_ACTIVE); 1055 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; 1056 1057 if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ 1058 tag_mask > 1) { 1059 dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x" 1060 "tag_mask=0x%08x\n", __func__, sactive, 1061 host_pvt.sata_dwc_sactive_issued, tag_mask); 1062 } 1063 1064 if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ 1065 (host_pvt.sata_dwc_sactive_issued)) { 1066 dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x " 1067 "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask" 1068 "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued, 1069 tag_mask); 1070 } 1071 1072 /* read just to clear ... not bad if currently still busy */ 1073 status = ap->ops->sff_check_status(ap); 1074 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); 1075 1076 tag = 0; 1077 num_processed = 0; 1078 while (tag_mask) { 1079 num_processed++; 1080 while (!(tag_mask & 0x00000001)) { 1081 tag++; 1082 tag_mask <<= 1; 1083 } 1084 1085 tag_mask &= (~0x00000001); 1086 qc = ata_qc_from_tag(ap, tag); 1087 1088 /* To be picked up by completion functions */ 1089 qc->ap->link.active_tag = tag; 1090 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 1091 1092 /* Let libata/scsi layers handle error */ 1093 if (status & ATA_ERR) { 1094 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, 1095 status); 1096 sata_dwc_qc_complete(ap, qc, 1); 1097 handled = 1; 1098 goto DONE; 1099 } 1100 1101 /* Process completed command */ 1102 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 1103 get_prot_descript(qc->tf.protocol)); 1104 if (ata_is_dma(qc->tf.protocol)) { 1105 host_pvt.dma_interrupt_count++; 1106 if (hsdevp->dma_pending[tag] == \ 1107 SATA_DWC_DMA_PENDING_NONE) 1108 dev_warn(ap->dev, "%s: DMA not pending?\n", 1109 __func__); 1110 if ((host_pvt.dma_interrupt_count % 2) == 0) 1111 sata_dwc_dma_xfer_complete(ap, 1); 1112 } else { 1113 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 1114 goto STILLBUSY; 1115 } 1116 continue; 1117 1118 STILLBUSY: 1119 ap->stats.idle_irq++; 1120 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", 1121 ap->print_id); 1122 } /* while tag_mask */ 1123 1124 /* 1125 * Check to see if any commands completed while we were processing our 1126 * initial set of completed commands (read status clears interrupts, 1127 * so we might miss a completed command interrupt if one came in while 1128 * we were processing --we read status as part of processing a completed 1129 * command). 1130 */ 1131 sactive2 = core_scr_read(SCR_ACTIVE); 1132 if (sactive2 != sactive) { 1133 dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2" 1134 "=0x%x\n", sactive, sactive2); 1135 } 1136 handled = 1; 1137 1138 DONE: 1139 spin_unlock_irqrestore(&host->lock, flags); 1140 return IRQ_RETVAL(handled); 1141 } 1142 1143 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) 1144 { 1145 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); 1146 1147 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { 1148 out_le32(&(hsdev->sata_dwc_regs->dmacr), 1149 SATA_DWC_DMACR_RX_CLEAR( 1150 in_le32(&(hsdev->sata_dwc_regs->dmacr)))); 1151 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { 1152 out_le32(&(hsdev->sata_dwc_regs->dmacr), 1153 SATA_DWC_DMACR_TX_CLEAR( 1154 in_le32(&(hsdev->sata_dwc_regs->dmacr)))); 1155 } else { 1156 /* 1157 * This should not happen, it indicates the driver is out of 1158 * sync. If it does happen, clear dmacr anyway. 1159 */ 1160 dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and" 1161 "TX DMA not pending tag=0x%02x pending=%d" 1162 " dmacr: 0x%08x\n", __func__, tag, 1163 hsdevp->dma_pending[tag], 1164 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 1165 out_le32(&(hsdev->sata_dwc_regs->dmacr), 1166 SATA_DWC_DMACR_TXRXCH_CLEAR); 1167 } 1168 } 1169 1170 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) 1171 { 1172 struct ata_queued_cmd *qc; 1173 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1174 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1175 u8 tag = 0; 1176 1177 tag = ap->link.active_tag; 1178 qc = ata_qc_from_tag(ap, tag); 1179 if (!qc) { 1180 dev_err(ap->dev, "failed to get qc"); 1181 return; 1182 } 1183 1184 #ifdef DEBUG_NCQ 1185 if (tag > 0) { 1186 dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " 1187 "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, 1188 get_dma_dir_descript(qc->dma_dir), 1189 get_prot_descript(qc->tf.protocol), 1190 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 1191 } 1192 #endif 1193 1194 if (ata_is_dma(qc->tf.protocol)) { 1195 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 1196 dev_err(ap->dev, "%s DMA protocol RX and TX DMA not " 1197 "pending dmacr: 0x%08x\n", __func__, 1198 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 1199 } 1200 1201 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; 1202 sata_dwc_qc_complete(ap, qc, check_status); 1203 ap->link.active_tag = ATA_TAG_POISON; 1204 } else { 1205 sata_dwc_qc_complete(ap, qc, check_status); 1206 } 1207 } 1208 1209 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 1210 u32 check_status) 1211 { 1212 u8 status = 0; 1213 u32 mask = 0x0; 1214 u8 tag = qc->tag; 1215 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1216 host_pvt.sata_dwc_sactive_queued = 0; 1217 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); 1218 1219 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) 1220 dev_err(ap->dev, "TX DMA PENDING\n"); 1221 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) 1222 dev_err(ap->dev, "RX DMA PENDING\n"); 1223 dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:" 1224 " protocol=%d\n", qc->tf.command, status, ap->print_id, 1225 qc->tf.protocol); 1226 1227 /* clear active bit */ 1228 mask = (~(qcmd_tag_to_mask(tag))); 1229 host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \ 1230 & mask; 1231 host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \ 1232 & mask; 1233 ata_qc_complete(qc); 1234 return 0; 1235 } 1236 1237 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) 1238 { 1239 /* Enable selective interrupts by setting the interrupt maskregister*/ 1240 out_le32(&hsdev->sata_dwc_regs->intmr, 1241 SATA_DWC_INTMR_ERRM | 1242 SATA_DWC_INTMR_NEWFPM | 1243 SATA_DWC_INTMR_PMABRTM | 1244 SATA_DWC_INTMR_DMATM); 1245 /* 1246 * Unmask the error bits that should trigger an error interrupt by 1247 * setting the error mask register. 1248 */ 1249 out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); 1250 1251 dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 1252 __func__, in_le32(&hsdev->sata_dwc_regs->intmr), 1253 in_le32(&hsdev->sata_dwc_regs->errmr)); 1254 } 1255 1256 static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) 1257 { 1258 port->cmd_addr = (void *)base + 0x00; 1259 port->data_addr = (void *)base + 0x00; 1260 1261 port->error_addr = (void *)base + 0x04; 1262 port->feature_addr = (void *)base + 0x04; 1263 1264 port->nsect_addr = (void *)base + 0x08; 1265 1266 port->lbal_addr = (void *)base + 0x0c; 1267 port->lbam_addr = (void *)base + 0x10; 1268 port->lbah_addr = (void *)base + 0x14; 1269 1270 port->device_addr = (void *)base + 0x18; 1271 port->command_addr = (void *)base + 0x1c; 1272 port->status_addr = (void *)base + 0x1c; 1273 1274 port->altstatus_addr = (void *)base + 0x20; 1275 port->ctl_addr = (void *)base + 0x20; 1276 } 1277 1278 /* 1279 * Function : sata_dwc_port_start 1280 * arguments : struct ata_ioports *port 1281 * Return value : returns 0 if success, error code otherwise 1282 * This function allocates the scatter gather LLI table for AHB DMA 1283 */ 1284 static int sata_dwc_port_start(struct ata_port *ap) 1285 { 1286 int err = 0; 1287 struct sata_dwc_device *hsdev; 1288 struct sata_dwc_device_port *hsdevp = NULL; 1289 struct device *pdev; 1290 int i; 1291 1292 hsdev = HSDEV_FROM_AP(ap); 1293 1294 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); 1295 1296 hsdev->host = ap->host; 1297 pdev = ap->host->dev; 1298 if (!pdev) { 1299 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); 1300 err = -ENODEV; 1301 goto CLEANUP; 1302 } 1303 1304 /* Allocate Port Struct */ 1305 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); 1306 if (!hsdevp) { 1307 dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__); 1308 err = -ENOMEM; 1309 goto CLEANUP; 1310 } 1311 hsdevp->hsdev = hsdev; 1312 1313 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 1314 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 1315 1316 ap->bmdma_prd = 0; /* set these so libata doesn't use them */ 1317 ap->bmdma_prd_dma = 0; 1318 1319 /* 1320 * DMA - Assign scatter gather LLI table. We can't use the libata 1321 * version since it's PRD is IDE PCI specific. 1322 */ 1323 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { 1324 hsdevp->llit[i] = dma_alloc_coherent(pdev, 1325 SATA_DWC_DMAC_LLI_TBL_SZ, 1326 &(hsdevp->llit_dma[i]), 1327 GFP_ATOMIC); 1328 if (!hsdevp->llit[i]) { 1329 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", 1330 __func__); 1331 err = -ENOMEM; 1332 goto CLEANUP_ALLOC; 1333 } 1334 } 1335 1336 if (ap->port_no == 0) { 1337 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", 1338 __func__); 1339 out_le32(&hsdev->sata_dwc_regs->dmacr, 1340 SATA_DWC_DMACR_TXRXCH_CLEAR); 1341 1342 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", 1343 __func__); 1344 out_le32(&hsdev->sata_dwc_regs->dbtsr, 1345 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 1346 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); 1347 } 1348 1349 /* Clear any error bits before libata starts issuing commands */ 1350 clear_serror(); 1351 ap->private_data = hsdevp; 1352 dev_dbg(ap->dev, "%s: done\n", __func__); 1353 return 0; 1354 1355 CLEANUP_ALLOC: 1356 kfree(hsdevp); 1357 CLEANUP: 1358 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); 1359 return err; 1360 } 1361 1362 static void sata_dwc_port_stop(struct ata_port *ap) 1363 { 1364 int i; 1365 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1366 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1367 1368 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 1369 1370 if (hsdevp && hsdev) { 1371 /* deallocate LLI table */ 1372 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { 1373 dma_free_coherent(ap->host->dev, 1374 SATA_DWC_DMAC_LLI_TBL_SZ, 1375 hsdevp->llit[i], hsdevp->llit_dma[i]); 1376 } 1377 1378 kfree(hsdevp); 1379 } 1380 ap->private_data = NULL; 1381 } 1382 1383 /* 1384 * Function : sata_dwc_exec_command_by_tag 1385 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued 1386 * Return value : None 1387 * This function keeps track of individual command tag ids and calls 1388 * ata_exec_command in libata 1389 */ 1390 static void sata_dwc_exec_command_by_tag(struct ata_port *ap, 1391 struct ata_taskfile *tf, 1392 u8 tag, u32 cmd_issued) 1393 { 1394 unsigned long flags; 1395 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1396 1397 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, 1398 ata_get_cmd_descript(tf->command), tag); 1399 1400 spin_lock_irqsave(&ap->host->lock, flags); 1401 hsdevp->cmd_issued[tag] = cmd_issued; 1402 spin_unlock_irqrestore(&ap->host->lock, flags); 1403 /* 1404 * Clear SError before executing a new command. 1405 * sata_dwc_scr_write and read can not be used here. Clearing the PM 1406 * managed SError register for the disk needs to be done before the 1407 * task file is loaded. 1408 */ 1409 clear_serror(); 1410 ata_sff_exec_command(ap, tf); 1411 } 1412 1413 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) 1414 { 1415 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, 1416 SATA_DWC_CMD_ISSUED_PEND); 1417 } 1418 1419 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) 1420 { 1421 u8 tag = qc->tag; 1422 1423 if (ata_is_ncq(qc->tf.protocol)) { 1424 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1425 __func__, qc->ap->link.sactive, tag); 1426 } else { 1427 tag = 0; 1428 } 1429 sata_dwc_bmdma_setup_by_tag(qc, tag); 1430 } 1431 1432 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 1433 { 1434 int start_dma; 1435 u32 reg, dma_chan; 1436 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 1437 struct ata_port *ap = qc->ap; 1438 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1439 int dir = qc->dma_dir; 1440 dma_chan = hsdevp->dma_chan[tag]; 1441 1442 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 1443 start_dma = 1; 1444 if (dir == DMA_TO_DEVICE) 1445 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; 1446 else 1447 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; 1448 } else { 1449 dev_err(ap->dev, "%s: Command not pending cmd_issued=%d " 1450 "(tag=%d) DMA NOT started\n", __func__, 1451 hsdevp->cmd_issued[tag], tag); 1452 start_dma = 0; 1453 } 1454 1455 dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " 1456 "start_dma? %x\n", __func__, qc, tag, qc->tf.command, 1457 get_dma_dir_descript(qc->dma_dir), start_dma); 1458 sata_dwc_tf_dump(&(qc->tf)); 1459 1460 if (start_dma) { 1461 reg = core_scr_read(SCR_ERROR); 1462 if (reg & SATA_DWC_SERROR_ERR_BITS) { 1463 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", 1464 __func__, reg); 1465 } 1466 1467 if (dir == DMA_TO_DEVICE) 1468 out_le32(&hsdev->sata_dwc_regs->dmacr, 1469 SATA_DWC_DMACR_TXCHEN); 1470 else 1471 out_le32(&hsdev->sata_dwc_regs->dmacr, 1472 SATA_DWC_DMACR_RXCHEN); 1473 1474 /* Enable AHB DMA transfer on the specified channel */ 1475 dma_dwc_xfer_start(dma_chan); 1476 } 1477 } 1478 1479 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) 1480 { 1481 u8 tag = qc->tag; 1482 1483 if (ata_is_ncq(qc->tf.protocol)) { 1484 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1485 __func__, qc->ap->link.sactive, tag); 1486 } else { 1487 tag = 0; 1488 } 1489 dev_dbg(qc->ap->dev, "%s\n", __func__); 1490 sata_dwc_bmdma_start_by_tag(qc, tag); 1491 } 1492 1493 /* 1494 * Function : sata_dwc_qc_prep_by_tag 1495 * arguments : ata_queued_cmd *qc, u8 tag 1496 * Return value : None 1497 * qc_prep for a particular queued command based on tag 1498 */ 1499 static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) 1500 { 1501 struct scatterlist *sg = qc->sg; 1502 struct ata_port *ap = qc->ap; 1503 int dma_chan; 1504 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1505 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1506 1507 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", 1508 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), 1509 qc->n_elem); 1510 1511 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], 1512 hsdevp->llit_dma[tag], 1513 (void *__iomem)(&hsdev->sata_dwc_regs->\ 1514 dmadr), qc->dma_dir); 1515 if (dma_chan < 0) { 1516 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", 1517 __func__, dma_chan); 1518 return; 1519 } 1520 hsdevp->dma_chan[tag] = dma_chan; 1521 } 1522 1523 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) 1524 { 1525 u32 sactive; 1526 u8 tag = qc->tag; 1527 struct ata_port *ap = qc->ap; 1528 1529 #ifdef DEBUG_NCQ 1530 if (qc->tag > 0 || ap->link.sactive > 1) 1531 dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d " 1532 "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1533 __func__, ap->print_id, qc->tf.command, 1534 ata_get_cmd_descript(qc->tf.command), 1535 qc->tag, get_prot_descript(qc->tf.protocol), 1536 ap->link.active_tag, ap->link.sactive); 1537 #endif 1538 1539 if (!ata_is_ncq(qc->tf.protocol)) 1540 tag = 0; 1541 sata_dwc_qc_prep_by_tag(qc, tag); 1542 1543 if (ata_is_ncq(qc->tf.protocol)) { 1544 sactive = core_scr_read(SCR_ACTIVE); 1545 sactive |= (0x00000001 << tag); 1546 core_scr_write(SCR_ACTIVE, sactive); 1547 1548 dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x " 1549 "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, 1550 sactive); 1551 1552 ap->ops->sff_tf_load(ap, &qc->tf); 1553 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, 1554 SATA_DWC_CMD_ISSUED_PEND); 1555 } else { 1556 ata_sff_qc_issue(qc); 1557 } 1558 return 0; 1559 } 1560 1561 /* 1562 * Function : sata_dwc_qc_prep 1563 * arguments : ata_queued_cmd *qc 1564 * Return value : None 1565 * qc_prep for a particular queued command 1566 */ 1567 1568 static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) 1569 { 1570 if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) 1571 return; 1572 1573 #ifdef DEBUG_NCQ 1574 if (qc->tag > 0) 1575 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", 1576 __func__, qc->tag, qc->ap->link.active_tag); 1577 1578 return ; 1579 #endif 1580 } 1581 1582 static void sata_dwc_error_handler(struct ata_port *ap) 1583 { 1584 ap->link.flags |= ATA_LFLAG_NO_HRST; 1585 ata_sff_error_handler(ap); 1586 } 1587 1588 /* 1589 * scsi mid-layer and libata interface structures 1590 */ 1591 static struct scsi_host_template sata_dwc_sht = { 1592 ATA_NCQ_SHT(DRV_NAME), 1593 /* 1594 * test-only: Currently this driver doesn't handle NCQ 1595 * correctly. We enable NCQ but set the queue depth to a 1596 * max of 1. This will get fixed in in a future release. 1597 */ 1598 .sg_tablesize = LIBATA_MAX_PRD, 1599 .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */ 1600 .dma_boundary = ATA_DMA_BOUNDARY, 1601 }; 1602 1603 static struct ata_port_operations sata_dwc_ops = { 1604 .inherits = &ata_sff_port_ops, 1605 1606 .error_handler = sata_dwc_error_handler, 1607 1608 .qc_prep = sata_dwc_qc_prep, 1609 .qc_issue = sata_dwc_qc_issue, 1610 1611 .scr_read = sata_dwc_scr_read, 1612 .scr_write = sata_dwc_scr_write, 1613 1614 .port_start = sata_dwc_port_start, 1615 .port_stop = sata_dwc_port_stop, 1616 1617 .bmdma_setup = sata_dwc_bmdma_setup, 1618 .bmdma_start = sata_dwc_bmdma_start, 1619 }; 1620 1621 static const struct ata_port_info sata_dwc_port_info[] = { 1622 { 1623 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, 1624 .pio_mask = ATA_PIO4, 1625 .udma_mask = ATA_UDMA6, 1626 .port_ops = &sata_dwc_ops, 1627 }, 1628 }; 1629 1630 static int sata_dwc_probe(struct platform_device *ofdev) 1631 { 1632 struct sata_dwc_device *hsdev; 1633 u32 idr, versionr; 1634 char *ver = (char *)&versionr; 1635 u8 *base = NULL; 1636 int err = 0; 1637 int irq, rc; 1638 struct ata_host *host; 1639 struct ata_port_info pi = sata_dwc_port_info[0]; 1640 const struct ata_port_info *ppi[] = { &pi, NULL }; 1641 1642 /* Allocate DWC SATA device */ 1643 hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); 1644 if (hsdev == NULL) { 1645 dev_err(&ofdev->dev, "kmalloc failed for hsdev\n"); 1646 err = -ENOMEM; 1647 goto error; 1648 } 1649 1650 /* Ioremap SATA registers */ 1651 base = of_iomap(ofdev->dev.of_node, 0); 1652 if (!base) { 1653 dev_err(&ofdev->dev, "ioremap failed for SATA register" 1654 " address\n"); 1655 err = -ENODEV; 1656 goto error_kmalloc; 1657 } 1658 hsdev->reg_base = base; 1659 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); 1660 1661 /* Synopsys DWC SATA specific Registers */ 1662 hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); 1663 1664 /* Allocate and fill host */ 1665 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); 1666 if (!host) { 1667 dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n"); 1668 err = -ENOMEM; 1669 goto error_iomap; 1670 } 1671 1672 host->private_data = hsdev; 1673 1674 /* Setup port */ 1675 host->ports[0]->ioaddr.cmd_addr = base; 1676 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; 1677 host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET; 1678 sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base); 1679 1680 /* Read the ID and Version Registers */ 1681 idr = in_le32(&hsdev->sata_dwc_regs->idr); 1682 versionr = in_le32(&hsdev->sata_dwc_regs->versionr); 1683 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", 1684 idr, ver[0], ver[1], ver[2]); 1685 1686 /* Get SATA DMA interrupt number */ 1687 irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); 1688 if (irq == NO_IRQ) { 1689 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1690 err = -ENODEV; 1691 goto error_out; 1692 } 1693 1694 /* Get physical SATA DMA register base address */ 1695 host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1); 1696 if (!(host_pvt.sata_dma_regs)) { 1697 dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" 1698 " address\n"); 1699 err = -ENODEV; 1700 goto error_out; 1701 } 1702 1703 /* Save dev for later use in dev_xxx() routines */ 1704 host_pvt.dwc_dev = &ofdev->dev; 1705 1706 /* Initialize AHB DMAC */ 1707 dma_dwc_init(hsdev, irq); 1708 1709 /* Enable SATA Interrupts */ 1710 sata_dwc_enable_interrupts(hsdev); 1711 1712 /* Get SATA interrupt number */ 1713 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 1714 if (irq == NO_IRQ) { 1715 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1716 err = -ENODEV; 1717 goto error_out; 1718 } 1719 1720 /* 1721 * Now, register with libATA core, this will also initiate the 1722 * device discovery process, invoking our port_start() handler & 1723 * error_handler() to execute a dummy Softreset EH session 1724 */ 1725 rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); 1726 1727 if (rc != 0) 1728 dev_err(&ofdev->dev, "failed to activate host"); 1729 1730 dev_set_drvdata(&ofdev->dev, host); 1731 return 0; 1732 1733 error_out: 1734 /* Free SATA DMA resources */ 1735 dma_dwc_exit(hsdev); 1736 1737 error_iomap: 1738 iounmap(base); 1739 error_kmalloc: 1740 kfree(hsdev); 1741 error: 1742 return err; 1743 } 1744 1745 static int sata_dwc_remove(struct platform_device *ofdev) 1746 { 1747 struct device *dev = &ofdev->dev; 1748 struct ata_host *host = dev_get_drvdata(dev); 1749 struct sata_dwc_device *hsdev = host->private_data; 1750 1751 ata_host_detach(host); 1752 dev_set_drvdata(dev, NULL); 1753 1754 /* Free SATA DMA resources */ 1755 dma_dwc_exit(hsdev); 1756 1757 iounmap(hsdev->reg_base); 1758 kfree(hsdev); 1759 kfree(host); 1760 dev_dbg(&ofdev->dev, "done\n"); 1761 return 0; 1762 } 1763 1764 static const struct of_device_id sata_dwc_match[] = { 1765 { .compatible = "amcc,sata-460ex", }, 1766 {} 1767 }; 1768 MODULE_DEVICE_TABLE(of, sata_dwc_match); 1769 1770 static struct platform_driver sata_dwc_driver = { 1771 .driver = { 1772 .name = DRV_NAME, 1773 .owner = THIS_MODULE, 1774 .of_match_table = sata_dwc_match, 1775 }, 1776 .probe = sata_dwc_probe, 1777 .remove = sata_dwc_remove, 1778 }; 1779 1780 static int __init sata_dwc_init(void) 1781 { 1782 return platform_driver_register(&sata_dwc_driver); 1783 } 1784 1785 static void __exit sata_dwc_exit(void) 1786 { 1787 platform_driver_unregister(&sata_dwc_driver); 1788 } 1789 1790 module_init(sata_dwc_init); 1791 module_exit(sata_dwc_exit); 1792 1793 MODULE_LICENSE("GPL"); 1794 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); 1795 MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver"); 1796 MODULE_VERSION(DRV_VERSION); 1797