1 /* 2 * ahci.c - AHCI SATA support 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2004-2005 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * AHCI hardware documentation: 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/pci.h> 38 #include <linux/init.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/interrupt.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "ahci" 49 #define DRV_VERSION "2.0" 50 51 52 enum { 53 AHCI_PCI_BAR = 5, 54 AHCI_MAX_PORTS = 32, 55 AHCI_MAX_SG = 168, /* hardware max is 64K */ 56 AHCI_DMA_BOUNDARY = 0xffffffff, 57 AHCI_USE_CLUSTERING = 0, 58 AHCI_MAX_CMDS = 32, 59 AHCI_CMD_SZ = 32, 60 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, 61 AHCI_RX_FIS_SZ = 256, 62 AHCI_CMD_TBL_CDB = 0x40, 63 AHCI_CMD_TBL_HDR_SZ = 0x80, 64 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), 65 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, 66 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + 67 AHCI_RX_FIS_SZ, 68 AHCI_IRQ_ON_SG = (1 << 31), 69 AHCI_CMD_ATAPI = (1 << 5), 70 AHCI_CMD_WRITE = (1 << 6), 71 AHCI_CMD_PREFETCH = (1 << 7), 72 AHCI_CMD_RESET = (1 << 8), 73 AHCI_CMD_CLR_BUSY = (1 << 10), 74 75 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 76 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ 77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 78 79 board_ahci = 0, 80 board_ahci_pi = 1, 81 board_ahci_vt8251 = 2, 82 board_ahci_ign_iferr = 3, 83 84 /* global controller registers */ 85 HOST_CAP = 0x00, /* host capabilities */ 86 HOST_CTL = 0x04, /* global host control */ 87 HOST_IRQ_STAT = 0x08, /* interrupt status */ 88 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ 89 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 90 91 /* HOST_CTL bits */ 92 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 93 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ 94 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 95 96 /* HOST_CAP bits */ 97 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 98 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 99 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 100 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 101 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 102 103 /* registers for each SATA port */ 104 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 105 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ 106 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ 107 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ 108 PORT_IRQ_STAT = 0x10, /* interrupt status */ 109 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ 110 PORT_CMD = 0x18, /* port command */ 111 PORT_TFDATA = 0x20, /* taskfile data */ 112 PORT_SIG = 0x24, /* device TF signature */ 113 PORT_CMD_ISSUE = 0x38, /* command issue */ 114 PORT_SCR = 0x28, /* SATA phy register block */ 115 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ 116 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ 117 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ 118 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ 119 120 /* PORT_IRQ_{STAT,MASK} bits */ 121 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ 122 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ 123 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ 124 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ 125 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ 126 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ 127 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ 128 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ 129 130 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ 131 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ 132 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ 133 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ 134 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ 135 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ 136 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ 137 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 138 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 139 140 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | 141 PORT_IRQ_IF_ERR | 142 PORT_IRQ_CONNECT | 143 PORT_IRQ_PHYRDY | 144 PORT_IRQ_UNK_FIS, 145 PORT_IRQ_ERROR = PORT_IRQ_FREEZE | 146 PORT_IRQ_TF_ERR | 147 PORT_IRQ_HBUS_DATA_ERR, 148 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | 149 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | 150 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, 151 152 /* PORT_CMD bits */ 153 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 154 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 155 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 156 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 157 PORT_CMD_CLO = (1 << 3), /* Command list override */ 158 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 159 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 160 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 161 162 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ 163 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ 164 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 165 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 166 167 /* ap->flags bits */ 168 AHCI_FLAG_NO_NCQ = (1 << 24), 169 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */ 170 AHCI_FLAG_HONOR_PI = (1 << 26), /* honor PORTS_IMPL */ 171 }; 172 173 struct ahci_cmd_hdr { 174 u32 opts; 175 u32 status; 176 u32 tbl_addr; 177 u32 tbl_addr_hi; 178 u32 reserved[4]; 179 }; 180 181 struct ahci_sg { 182 u32 addr; 183 u32 addr_hi; 184 u32 reserved; 185 u32 flags_size; 186 }; 187 188 struct ahci_host_priv { 189 u32 cap; /* cache of HOST_CAP register */ 190 u32 port_map; /* cache of HOST_PORTS_IMPL reg */ 191 }; 192 193 struct ahci_port_priv { 194 struct ahci_cmd_hdr *cmd_slot; 195 dma_addr_t cmd_slot_dma; 196 void *cmd_tbl; 197 dma_addr_t cmd_tbl_dma; 198 void *rx_fis; 199 dma_addr_t rx_fis_dma; 200 /* for NCQ spurious interrupt analysis */ 201 int ncq_saw_spurious_sdb_cnt; 202 unsigned int ncq_saw_d2h:1; 203 unsigned int ncq_saw_dmas:1; 204 }; 205 206 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 207 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 208 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 209 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 210 static irqreturn_t ahci_interrupt (int irq, void *dev_instance); 211 static void ahci_irq_clear(struct ata_port *ap); 212 static int ahci_port_start(struct ata_port *ap); 213 static void ahci_port_stop(struct ata_port *ap); 214 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 215 static void ahci_qc_prep(struct ata_queued_cmd *qc); 216 static u8 ahci_check_status(struct ata_port *ap); 217 static void ahci_freeze(struct ata_port *ap); 218 static void ahci_thaw(struct ata_port *ap); 219 static void ahci_error_handler(struct ata_port *ap); 220 static void ahci_vt8251_error_handler(struct ata_port *ap); 221 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 222 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 223 static int ahci_port_resume(struct ata_port *ap); 224 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 225 static int ahci_pci_device_resume(struct pci_dev *pdev); 226 227 static struct scsi_host_template ahci_sht = { 228 .module = THIS_MODULE, 229 .name = DRV_NAME, 230 .ioctl = ata_scsi_ioctl, 231 .queuecommand = ata_scsi_queuecmd, 232 .change_queue_depth = ata_scsi_change_queue_depth, 233 .can_queue = AHCI_MAX_CMDS - 1, 234 .this_id = ATA_SHT_THIS_ID, 235 .sg_tablesize = AHCI_MAX_SG, 236 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .emulated = ATA_SHT_EMULATED, 238 .use_clustering = AHCI_USE_CLUSTERING, 239 .proc_name = DRV_NAME, 240 .dma_boundary = AHCI_DMA_BOUNDARY, 241 .slave_configure = ata_scsi_slave_config, 242 .slave_destroy = ata_scsi_slave_destroy, 243 .bios_param = ata_std_bios_param, 244 .suspend = ata_scsi_device_suspend, 245 .resume = ata_scsi_device_resume, 246 }; 247 248 static const struct ata_port_operations ahci_ops = { 249 .port_disable = ata_port_disable, 250 251 .check_status = ahci_check_status, 252 .check_altstatus = ahci_check_status, 253 .dev_select = ata_noop_dev_select, 254 255 .tf_read = ahci_tf_read, 256 257 .qc_prep = ahci_qc_prep, 258 .qc_issue = ahci_qc_issue, 259 260 .irq_handler = ahci_interrupt, 261 .irq_clear = ahci_irq_clear, 262 .irq_on = ata_dummy_irq_on, 263 .irq_ack = ata_dummy_irq_ack, 264 265 .scr_read = ahci_scr_read, 266 .scr_write = ahci_scr_write, 267 268 .freeze = ahci_freeze, 269 .thaw = ahci_thaw, 270 271 .error_handler = ahci_error_handler, 272 .post_internal_cmd = ahci_post_internal_cmd, 273 274 .port_suspend = ahci_port_suspend, 275 .port_resume = ahci_port_resume, 276 277 .port_start = ahci_port_start, 278 .port_stop = ahci_port_stop, 279 }; 280 281 static const struct ata_port_operations ahci_vt8251_ops = { 282 .port_disable = ata_port_disable, 283 284 .check_status = ahci_check_status, 285 .check_altstatus = ahci_check_status, 286 .dev_select = ata_noop_dev_select, 287 288 .tf_read = ahci_tf_read, 289 290 .qc_prep = ahci_qc_prep, 291 .qc_issue = ahci_qc_issue, 292 293 .irq_handler = ahci_interrupt, 294 .irq_clear = ahci_irq_clear, 295 .irq_on = ata_dummy_irq_on, 296 .irq_ack = ata_dummy_irq_ack, 297 298 .scr_read = ahci_scr_read, 299 .scr_write = ahci_scr_write, 300 301 .freeze = ahci_freeze, 302 .thaw = ahci_thaw, 303 304 .error_handler = ahci_vt8251_error_handler, 305 .post_internal_cmd = ahci_post_internal_cmd, 306 307 .port_suspend = ahci_port_suspend, 308 .port_resume = ahci_port_resume, 309 310 .port_start = ahci_port_start, 311 .port_stop = ahci_port_stop, 312 }; 313 314 static const struct ata_port_info ahci_port_info[] = { 315 /* board_ahci */ 316 { 317 .sht = &ahci_sht, 318 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 319 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 320 ATA_FLAG_SKIP_D2H_BSY, 321 .pio_mask = 0x1f, /* pio0-4 */ 322 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 323 .port_ops = &ahci_ops, 324 }, 325 /* board_ahci_pi */ 326 { 327 .sht = &ahci_sht, 328 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 329 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 330 ATA_FLAG_SKIP_D2H_BSY | AHCI_FLAG_HONOR_PI, 331 .pio_mask = 0x1f, /* pio0-4 */ 332 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 333 .port_ops = &ahci_ops, 334 }, 335 /* board_ahci_vt8251 */ 336 { 337 .sht = &ahci_sht, 338 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 339 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 340 ATA_FLAG_SKIP_D2H_BSY | 341 ATA_FLAG_HRST_TO_RESUME | AHCI_FLAG_NO_NCQ, 342 .pio_mask = 0x1f, /* pio0-4 */ 343 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 344 .port_ops = &ahci_vt8251_ops, 345 }, 346 /* board_ahci_ign_iferr */ 347 { 348 .sht = &ahci_sht, 349 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 350 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 351 ATA_FLAG_SKIP_D2H_BSY | 352 AHCI_FLAG_IGN_IRQ_IF_ERR, 353 .pio_mask = 0x1f, /* pio0-4 */ 354 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 355 .port_ops = &ahci_ops, 356 }, 357 }; 358 359 static const struct pci_device_id ahci_pci_tbl[] = { 360 /* Intel */ 361 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ 362 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ 363 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ 364 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ 365 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ 366 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ 367 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ 368 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 369 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 370 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 371 { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */ 372 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */ 373 { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */ 374 { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */ 375 { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */ 376 { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */ 377 { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */ 378 { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */ 379 { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */ 380 { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */ 381 { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */ 382 { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */ 383 { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */ 384 { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */ 385 { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */ 386 { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */ 387 388 /* JMicron */ 389 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */ 390 { PCI_VDEVICE(JMICRON, 0x2361), board_ahci_ign_iferr }, /* JMB361 */ 391 { PCI_VDEVICE(JMICRON, 0x2363), board_ahci_ign_iferr }, /* JMB363 */ 392 { PCI_VDEVICE(JMICRON, 0x2365), board_ahci_ign_iferr }, /* JMB365 */ 393 { PCI_VDEVICE(JMICRON, 0x2366), board_ahci_ign_iferr }, /* JMB366 */ 394 395 /* ATI */ 396 { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */ 397 { PCI_VDEVICE(ATI, 0x4381), board_ahci }, /* ATI SB600 raid */ 398 399 /* VIA */ 400 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 401 402 /* NVIDIA */ 403 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */ 404 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */ 405 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */ 406 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */ 407 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */ 408 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */ 409 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */ 410 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */ 411 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ 412 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ 413 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ 414 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ 415 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ 416 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ 417 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ 418 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ 419 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ 420 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ 421 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ 422 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ 423 424 /* SiS */ 425 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 426 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ 427 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 428 429 /* Generic, PCI class code for AHCI */ 430 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 431 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 432 433 { } /* terminate list */ 434 }; 435 436 437 static struct pci_driver ahci_pci_driver = { 438 .name = DRV_NAME, 439 .id_table = ahci_pci_tbl, 440 .probe = ahci_init_one, 441 .remove = ata_pci_remove_one, 442 .suspend = ahci_pci_device_suspend, 443 .resume = ahci_pci_device_resume, 444 }; 445 446 447 static inline int ahci_nr_ports(u32 cap) 448 { 449 return (cap & 0x1f) + 1; 450 } 451 452 static inline void __iomem *ahci_port_base(void __iomem *base, 453 unsigned int port) 454 { 455 return base + 0x100 + (port * 0x80); 456 } 457 458 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) 459 { 460 unsigned int sc_reg; 461 462 switch (sc_reg_in) { 463 case SCR_STATUS: sc_reg = 0; break; 464 case SCR_CONTROL: sc_reg = 1; break; 465 case SCR_ERROR: sc_reg = 2; break; 466 case SCR_ACTIVE: sc_reg = 3; break; 467 default: 468 return 0xffffffffU; 469 } 470 471 return readl(ap->ioaddr.scr_addr + (sc_reg * 4)); 472 } 473 474 475 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in, 476 u32 val) 477 { 478 unsigned int sc_reg; 479 480 switch (sc_reg_in) { 481 case SCR_STATUS: sc_reg = 0; break; 482 case SCR_CONTROL: sc_reg = 1; break; 483 case SCR_ERROR: sc_reg = 2; break; 484 case SCR_ACTIVE: sc_reg = 3; break; 485 default: 486 return; 487 } 488 489 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 490 } 491 492 static void ahci_start_engine(void __iomem *port_mmio) 493 { 494 u32 tmp; 495 496 /* start DMA */ 497 tmp = readl(port_mmio + PORT_CMD); 498 tmp |= PORT_CMD_START; 499 writel(tmp, port_mmio + PORT_CMD); 500 readl(port_mmio + PORT_CMD); /* flush */ 501 } 502 503 static int ahci_stop_engine(void __iomem *port_mmio) 504 { 505 u32 tmp; 506 507 tmp = readl(port_mmio + PORT_CMD); 508 509 /* check if the HBA is idle */ 510 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 511 return 0; 512 513 /* setting HBA to idle */ 514 tmp &= ~PORT_CMD_START; 515 writel(tmp, port_mmio + PORT_CMD); 516 517 /* wait for engine to stop. This could be as long as 500 msec */ 518 tmp = ata_wait_register(port_mmio + PORT_CMD, 519 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 520 if (tmp & PORT_CMD_LIST_ON) 521 return -EIO; 522 523 return 0; 524 } 525 526 static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap, 527 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma) 528 { 529 u32 tmp; 530 531 /* set FIS registers */ 532 if (cap & HOST_CAP_64) 533 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI); 534 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 535 536 if (cap & HOST_CAP_64) 537 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI); 538 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 539 540 /* enable FIS reception */ 541 tmp = readl(port_mmio + PORT_CMD); 542 tmp |= PORT_CMD_FIS_RX; 543 writel(tmp, port_mmio + PORT_CMD); 544 545 /* flush */ 546 readl(port_mmio + PORT_CMD); 547 } 548 549 static int ahci_stop_fis_rx(void __iomem *port_mmio) 550 { 551 u32 tmp; 552 553 /* disable FIS reception */ 554 tmp = readl(port_mmio + PORT_CMD); 555 tmp &= ~PORT_CMD_FIS_RX; 556 writel(tmp, port_mmio + PORT_CMD); 557 558 /* wait for completion, spec says 500ms, give it 1000 */ 559 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 560 PORT_CMD_FIS_ON, 10, 1000); 561 if (tmp & PORT_CMD_FIS_ON) 562 return -EBUSY; 563 564 return 0; 565 } 566 567 static void ahci_power_up(void __iomem *port_mmio, u32 cap) 568 { 569 u32 cmd; 570 571 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 572 573 /* spin up device */ 574 if (cap & HOST_CAP_SSS) { 575 cmd |= PORT_CMD_SPIN_UP; 576 writel(cmd, port_mmio + PORT_CMD); 577 } 578 579 /* wake up link */ 580 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 581 } 582 583 static void ahci_power_down(void __iomem *port_mmio, u32 cap) 584 { 585 u32 cmd, scontrol; 586 587 if (!(cap & HOST_CAP_SSS)) 588 return; 589 590 /* put device into listen mode, first set PxSCTL.DET to 0 */ 591 scontrol = readl(port_mmio + PORT_SCR_CTL); 592 scontrol &= ~0xf; 593 writel(scontrol, port_mmio + PORT_SCR_CTL); 594 595 /* then set PxCMD.SUD to 0 */ 596 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 597 cmd &= ~PORT_CMD_SPIN_UP; 598 writel(cmd, port_mmio + PORT_CMD); 599 } 600 601 static void ahci_init_port(void __iomem *port_mmio, u32 cap, 602 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma) 603 { 604 /* enable FIS reception */ 605 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma); 606 607 /* enable DMA */ 608 ahci_start_engine(port_mmio); 609 } 610 611 static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg) 612 { 613 int rc; 614 615 /* disable DMA */ 616 rc = ahci_stop_engine(port_mmio); 617 if (rc) { 618 *emsg = "failed to stop engine"; 619 return rc; 620 } 621 622 /* disable FIS reception */ 623 rc = ahci_stop_fis_rx(port_mmio); 624 if (rc) { 625 *emsg = "failed stop FIS RX"; 626 return rc; 627 } 628 629 return 0; 630 } 631 632 static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev) 633 { 634 u32 cap_save, impl_save, tmp; 635 636 cap_save = readl(mmio + HOST_CAP); 637 impl_save = readl(mmio + HOST_PORTS_IMPL); 638 639 /* global controller reset */ 640 tmp = readl(mmio + HOST_CTL); 641 if ((tmp & HOST_RESET) == 0) { 642 writel(tmp | HOST_RESET, mmio + HOST_CTL); 643 readl(mmio + HOST_CTL); /* flush */ 644 } 645 646 /* reset must complete within 1 second, or 647 * the hardware should be considered fried. 648 */ 649 ssleep(1); 650 651 tmp = readl(mmio + HOST_CTL); 652 if (tmp & HOST_RESET) { 653 dev_printk(KERN_ERR, &pdev->dev, 654 "controller reset failed (0x%x)\n", tmp); 655 return -EIO; 656 } 657 658 /* turn on AHCI mode */ 659 writel(HOST_AHCI_EN, mmio + HOST_CTL); 660 (void) readl(mmio + HOST_CTL); /* flush */ 661 662 /* These write-once registers are normally cleared on reset. 663 * Restore BIOS values... which we HOPE were present before 664 * reset. 665 */ 666 if (!impl_save) { 667 impl_save = (1 << ahci_nr_ports(cap_save)) - 1; 668 dev_printk(KERN_WARNING, &pdev->dev, 669 "PORTS_IMPL is zero, forcing 0x%x\n", impl_save); 670 } 671 writel(cap_save, mmio + HOST_CAP); 672 writel(impl_save, mmio + HOST_PORTS_IMPL); 673 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 674 675 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 676 u16 tmp16; 677 678 /* configure PCS */ 679 pci_read_config_word(pdev, 0x92, &tmp16); 680 tmp16 |= 0xf; 681 pci_write_config_word(pdev, 0x92, tmp16); 682 } 683 684 return 0; 685 } 686 687 static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev, 688 int n_ports, unsigned int port_flags, 689 struct ahci_host_priv *hpriv) 690 { 691 int i, rc; 692 u32 tmp; 693 694 for (i = 0; i < n_ports; i++) { 695 void __iomem *port_mmio = ahci_port_base(mmio, i); 696 const char *emsg = NULL; 697 698 if ((port_flags & AHCI_FLAG_HONOR_PI) && 699 !(hpriv->port_map & (1 << i))) 700 continue; 701 702 /* make sure port is not active */ 703 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); 704 if (rc) 705 dev_printk(KERN_WARNING, &pdev->dev, 706 "%s (%d)\n", emsg, rc); 707 708 /* clear SError */ 709 tmp = readl(port_mmio + PORT_SCR_ERR); 710 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 711 writel(tmp, port_mmio + PORT_SCR_ERR); 712 713 /* clear port IRQ */ 714 tmp = readl(port_mmio + PORT_IRQ_STAT); 715 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 716 if (tmp) 717 writel(tmp, port_mmio + PORT_IRQ_STAT); 718 719 writel(1 << i, mmio + HOST_IRQ_STAT); 720 } 721 722 tmp = readl(mmio + HOST_CTL); 723 VPRINTK("HOST_CTL 0x%x\n", tmp); 724 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 725 tmp = readl(mmio + HOST_CTL); 726 VPRINTK("HOST_CTL 0x%x\n", tmp); 727 } 728 729 static unsigned int ahci_dev_classify(struct ata_port *ap) 730 { 731 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 732 struct ata_taskfile tf; 733 u32 tmp; 734 735 tmp = readl(port_mmio + PORT_SIG); 736 tf.lbah = (tmp >> 24) & 0xff; 737 tf.lbam = (tmp >> 16) & 0xff; 738 tf.lbal = (tmp >> 8) & 0xff; 739 tf.nsect = (tmp) & 0xff; 740 741 return ata_dev_classify(&tf); 742 } 743 744 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 745 u32 opts) 746 { 747 dma_addr_t cmd_tbl_dma; 748 749 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 750 751 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 752 pp->cmd_slot[tag].status = 0; 753 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 754 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 755 } 756 757 static int ahci_clo(struct ata_port *ap) 758 { 759 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 760 struct ahci_host_priv *hpriv = ap->host->private_data; 761 u32 tmp; 762 763 if (!(hpriv->cap & HOST_CAP_CLO)) 764 return -EOPNOTSUPP; 765 766 tmp = readl(port_mmio + PORT_CMD); 767 tmp |= PORT_CMD_CLO; 768 writel(tmp, port_mmio + PORT_CMD); 769 770 tmp = ata_wait_register(port_mmio + PORT_CMD, 771 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 772 if (tmp & PORT_CMD_CLO) 773 return -EIO; 774 775 return 0; 776 } 777 778 static int ahci_softreset(struct ata_port *ap, unsigned int *class) 779 { 780 struct ahci_port_priv *pp = ap->private_data; 781 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 782 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 783 const u32 cmd_fis_len = 5; /* five dwords */ 784 const char *reason = NULL; 785 struct ata_taskfile tf; 786 u32 tmp; 787 u8 *fis; 788 int rc; 789 790 DPRINTK("ENTER\n"); 791 792 if (ata_port_offline(ap)) { 793 DPRINTK("PHY reports no device\n"); 794 *class = ATA_DEV_NONE; 795 return 0; 796 } 797 798 /* prepare for SRST (AHCI-1.1 10.4.1) */ 799 rc = ahci_stop_engine(port_mmio); 800 if (rc) { 801 reason = "failed to stop engine"; 802 goto fail_restart; 803 } 804 805 /* check BUSY/DRQ, perform Command List Override if necessary */ 806 if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) { 807 rc = ahci_clo(ap); 808 809 if (rc == -EOPNOTSUPP) { 810 reason = "port busy but CLO unavailable"; 811 goto fail_restart; 812 } else if (rc) { 813 reason = "port busy but CLO failed"; 814 goto fail_restart; 815 } 816 } 817 818 /* restart engine */ 819 ahci_start_engine(port_mmio); 820 821 ata_tf_init(ap->device, &tf); 822 fis = pp->cmd_tbl; 823 824 /* issue the first D2H Register FIS */ 825 ahci_fill_cmd_slot(pp, 0, 826 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); 827 828 tf.ctl |= ATA_SRST; 829 ata_tf_to_fis(&tf, fis, 0); 830 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ 831 832 writel(1, port_mmio + PORT_CMD_ISSUE); 833 834 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500); 835 if (tmp & 0x1) { 836 rc = -EIO; 837 reason = "1st FIS failed"; 838 goto fail; 839 } 840 841 /* spec says at least 5us, but be generous and sleep for 1ms */ 842 msleep(1); 843 844 /* issue the second D2H Register FIS */ 845 ahci_fill_cmd_slot(pp, 0, cmd_fis_len); 846 847 tf.ctl &= ~ATA_SRST; 848 ata_tf_to_fis(&tf, fis, 0); 849 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ 850 851 writel(1, port_mmio + PORT_CMD_ISSUE); 852 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 853 854 /* spec mandates ">= 2ms" before checking status. 855 * We wait 150ms, because that was the magic delay used for 856 * ATAPI devices in Hale Landis's ATADRVR, for the period of time 857 * between when the ATA command register is written, and then 858 * status is checked. Because waiting for "a while" before 859 * checking status is fine, post SRST, we perform this magic 860 * delay here as well. 861 */ 862 msleep(150); 863 864 *class = ATA_DEV_NONE; 865 if (ata_port_online(ap)) { 866 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 867 rc = -EIO; 868 reason = "device not ready"; 869 goto fail; 870 } 871 *class = ahci_dev_classify(ap); 872 } 873 874 DPRINTK("EXIT, class=%u\n", *class); 875 return 0; 876 877 fail_restart: 878 ahci_start_engine(port_mmio); 879 fail: 880 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); 881 return rc; 882 } 883 884 static int ahci_hardreset(struct ata_port *ap, unsigned int *class) 885 { 886 struct ahci_port_priv *pp = ap->private_data; 887 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 888 struct ata_taskfile tf; 889 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 890 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 891 int rc; 892 893 DPRINTK("ENTER\n"); 894 895 ahci_stop_engine(port_mmio); 896 897 /* clear D2H reception area to properly wait for D2H FIS */ 898 ata_tf_init(ap->device, &tf); 899 tf.command = 0x80; 900 ata_tf_to_fis(&tf, d2h_fis, 0); 901 902 rc = sata_std_hardreset(ap, class); 903 904 ahci_start_engine(port_mmio); 905 906 if (rc == 0 && ata_port_online(ap)) 907 *class = ahci_dev_classify(ap); 908 if (*class == ATA_DEV_UNKNOWN) 909 *class = ATA_DEV_NONE; 910 911 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 912 return rc; 913 } 914 915 static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class) 916 { 917 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 918 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 919 int rc; 920 921 DPRINTK("ENTER\n"); 922 923 ahci_stop_engine(port_mmio); 924 925 rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context)); 926 927 /* vt8251 needs SError cleared for the port to operate */ 928 ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR)); 929 930 ahci_start_engine(port_mmio); 931 932 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 933 934 /* vt8251 doesn't clear BSY on signature FIS reception, 935 * request follow-up softreset. 936 */ 937 return rc ?: -EAGAIN; 938 } 939 940 static void ahci_postreset(struct ata_port *ap, unsigned int *class) 941 { 942 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 943 u32 new_tmp, tmp; 944 945 ata_std_postreset(ap, class); 946 947 /* Make sure port's ATAPI bit is set appropriately */ 948 new_tmp = tmp = readl(port_mmio + PORT_CMD); 949 if (*class == ATA_DEV_ATAPI) 950 new_tmp |= PORT_CMD_ATAPI; 951 else 952 new_tmp &= ~PORT_CMD_ATAPI; 953 if (new_tmp != tmp) { 954 writel(new_tmp, port_mmio + PORT_CMD); 955 readl(port_mmio + PORT_CMD); /* flush */ 956 } 957 } 958 959 static u8 ahci_check_status(struct ata_port *ap) 960 { 961 void __iomem *mmio = ap->ioaddr.cmd_addr; 962 963 return readl(mmio + PORT_TFDATA) & 0xFF; 964 } 965 966 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 967 { 968 struct ahci_port_priv *pp = ap->private_data; 969 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 970 971 ata_tf_from_fis(d2h_fis, tf); 972 } 973 974 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 975 { 976 struct scatterlist *sg; 977 struct ahci_sg *ahci_sg; 978 unsigned int n_sg = 0; 979 980 VPRINTK("ENTER\n"); 981 982 /* 983 * Next, the S/G list. 984 */ 985 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 986 ata_for_each_sg(sg, qc) { 987 dma_addr_t addr = sg_dma_address(sg); 988 u32 sg_len = sg_dma_len(sg); 989 990 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); 991 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 992 ahci_sg->flags_size = cpu_to_le32(sg_len - 1); 993 994 ahci_sg++; 995 n_sg++; 996 } 997 998 return n_sg; 999 } 1000 1001 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1002 { 1003 struct ata_port *ap = qc->ap; 1004 struct ahci_port_priv *pp = ap->private_data; 1005 int is_atapi = is_atapi_taskfile(&qc->tf); 1006 void *cmd_tbl; 1007 u32 opts; 1008 const u32 cmd_fis_len = 5; /* five dwords */ 1009 unsigned int n_elem; 1010 1011 /* 1012 * Fill in command table information. First, the header, 1013 * a SATA Register - Host to Device command FIS. 1014 */ 1015 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1016 1017 ata_tf_to_fis(&qc->tf, cmd_tbl, 0); 1018 if (is_atapi) { 1019 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1020 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1021 } 1022 1023 n_elem = 0; 1024 if (qc->flags & ATA_QCFLAG_DMAMAP) 1025 n_elem = ahci_fill_sg(qc, cmd_tbl); 1026 1027 /* 1028 * Fill in command slot information. 1029 */ 1030 opts = cmd_fis_len | n_elem << 16; 1031 if (qc->tf.flags & ATA_TFLAG_WRITE) 1032 opts |= AHCI_CMD_WRITE; 1033 if (is_atapi) 1034 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1035 1036 ahci_fill_cmd_slot(pp, qc->tag, opts); 1037 } 1038 1039 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1040 { 1041 struct ahci_port_priv *pp = ap->private_data; 1042 struct ata_eh_info *ehi = &ap->eh_info; 1043 unsigned int err_mask = 0, action = 0; 1044 struct ata_queued_cmd *qc; 1045 u32 serror; 1046 1047 ata_ehi_clear_desc(ehi); 1048 1049 /* AHCI needs SError cleared; otherwise, it might lock up */ 1050 serror = ahci_scr_read(ap, SCR_ERROR); 1051 ahci_scr_write(ap, SCR_ERROR, serror); 1052 1053 /* analyze @irq_stat */ 1054 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); 1055 1056 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1057 if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR) 1058 irq_stat &= ~PORT_IRQ_IF_ERR; 1059 1060 if (irq_stat & PORT_IRQ_TF_ERR) 1061 err_mask |= AC_ERR_DEV; 1062 1063 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1064 err_mask |= AC_ERR_HOST_BUS; 1065 action |= ATA_EH_SOFTRESET; 1066 } 1067 1068 if (irq_stat & PORT_IRQ_IF_ERR) { 1069 err_mask |= AC_ERR_ATA_BUS; 1070 action |= ATA_EH_SOFTRESET; 1071 ata_ehi_push_desc(ehi, ", interface fatal error"); 1072 } 1073 1074 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1075 ata_ehi_hotplugged(ehi); 1076 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ? 1077 "connection status changed" : "PHY RDY changed"); 1078 } 1079 1080 if (irq_stat & PORT_IRQ_UNK_FIS) { 1081 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 1082 1083 err_mask |= AC_ERR_HSM; 1084 action |= ATA_EH_SOFTRESET; 1085 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x", 1086 unk[0], unk[1], unk[2], unk[3]); 1087 } 1088 1089 /* okay, let's hand over to EH */ 1090 ehi->serror |= serror; 1091 ehi->action |= action; 1092 1093 qc = ata_qc_from_tag(ap, ap->active_tag); 1094 if (qc) 1095 qc->err_mask |= err_mask; 1096 else 1097 ehi->err_mask |= err_mask; 1098 1099 if (irq_stat & PORT_IRQ_FREEZE) 1100 ata_port_freeze(ap); 1101 else 1102 ata_port_abort(ap); 1103 } 1104 1105 static void ahci_host_intr(struct ata_port *ap) 1106 { 1107 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1108 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1109 struct ata_eh_info *ehi = &ap->eh_info; 1110 struct ahci_port_priv *pp = ap->private_data; 1111 u32 status, qc_active; 1112 int rc, known_irq = 0; 1113 1114 status = readl(port_mmio + PORT_IRQ_STAT); 1115 writel(status, port_mmio + PORT_IRQ_STAT); 1116 1117 if (unlikely(status & PORT_IRQ_ERROR)) { 1118 ahci_error_intr(ap, status); 1119 return; 1120 } 1121 1122 if (ap->sactive) 1123 qc_active = readl(port_mmio + PORT_SCR_ACT); 1124 else 1125 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1126 1127 rc = ata_qc_complete_multiple(ap, qc_active, NULL); 1128 if (rc > 0) 1129 return; 1130 if (rc < 0) { 1131 ehi->err_mask |= AC_ERR_HSM; 1132 ehi->action |= ATA_EH_SOFTRESET; 1133 ata_port_freeze(ap); 1134 return; 1135 } 1136 1137 /* hmmm... a spurious interupt */ 1138 1139 /* if !NCQ, ignore. No modern ATA device has broken HSM 1140 * implementation for non-NCQ commands. 1141 */ 1142 if (!ap->sactive) 1143 return; 1144 1145 if (status & PORT_IRQ_D2H_REG_FIS) { 1146 if (!pp->ncq_saw_d2h) 1147 ata_port_printk(ap, KERN_INFO, 1148 "D2H reg with I during NCQ, " 1149 "this message won't be printed again\n"); 1150 pp->ncq_saw_d2h = 1; 1151 known_irq = 1; 1152 } 1153 1154 if (status & PORT_IRQ_DMAS_FIS) { 1155 if (!pp->ncq_saw_dmas) 1156 ata_port_printk(ap, KERN_INFO, 1157 "DMAS FIS during NCQ, " 1158 "this message won't be printed again\n"); 1159 pp->ncq_saw_dmas = 1; 1160 known_irq = 1; 1161 } 1162 1163 if (status & PORT_IRQ_SDB_FIS && 1164 pp->ncq_saw_spurious_sdb_cnt < 10) { 1165 /* SDB FIS containing spurious completions might be 1166 * dangerous, we need to know more about them. Print 1167 * more of it. 1168 */ 1169 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1170 1171 ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ " 1172 "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n", 1173 readl(port_mmio + PORT_CMD_ISSUE), 1174 readl(port_mmio + PORT_SCR_ACT), 1175 le32_to_cpu(f[0]), le32_to_cpu(f[1]), 1176 pp->ncq_saw_spurious_sdb_cnt < 10 ? 1177 "" : ", shutting up"); 1178 1179 pp->ncq_saw_spurious_sdb_cnt++; 1180 known_irq = 1; 1181 } 1182 1183 if (!known_irq) 1184 ata_port_printk(ap, KERN_INFO, "spurious interrupt " 1185 "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n", 1186 status, ap->active_tag, ap->sactive); 1187 } 1188 1189 static void ahci_irq_clear(struct ata_port *ap) 1190 { 1191 /* TODO */ 1192 } 1193 1194 static irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1195 { 1196 struct ata_host *host = dev_instance; 1197 struct ahci_host_priv *hpriv; 1198 unsigned int i, handled = 0; 1199 void __iomem *mmio; 1200 u32 irq_stat, irq_ack = 0; 1201 1202 VPRINTK("ENTER\n"); 1203 1204 hpriv = host->private_data; 1205 mmio = host->iomap[AHCI_PCI_BAR]; 1206 1207 /* sigh. 0xffffffff is a valid return from h/w */ 1208 irq_stat = readl(mmio + HOST_IRQ_STAT); 1209 irq_stat &= hpriv->port_map; 1210 if (!irq_stat) 1211 return IRQ_NONE; 1212 1213 spin_lock(&host->lock); 1214 1215 for (i = 0; i < host->n_ports; i++) { 1216 struct ata_port *ap; 1217 1218 if (!(irq_stat & (1 << i))) 1219 continue; 1220 1221 ap = host->ports[i]; 1222 if (ap) { 1223 ahci_host_intr(ap); 1224 VPRINTK("port %u\n", i); 1225 } else { 1226 VPRINTK("port %u (no irq)\n", i); 1227 if (ata_ratelimit()) 1228 dev_printk(KERN_WARNING, host->dev, 1229 "interrupt on disabled port %u\n", i); 1230 } 1231 1232 irq_ack |= (1 << i); 1233 } 1234 1235 if (irq_ack) { 1236 writel(irq_ack, mmio + HOST_IRQ_STAT); 1237 handled = 1; 1238 } 1239 1240 spin_unlock(&host->lock); 1241 1242 VPRINTK("EXIT\n"); 1243 1244 return IRQ_RETVAL(handled); 1245 } 1246 1247 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1248 { 1249 struct ata_port *ap = qc->ap; 1250 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 1251 1252 if (qc->tf.protocol == ATA_PROT_NCQ) 1253 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1254 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 1255 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1256 1257 return 0; 1258 } 1259 1260 static void ahci_freeze(struct ata_port *ap) 1261 { 1262 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1263 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1264 1265 /* turn IRQ off */ 1266 writel(0, port_mmio + PORT_IRQ_MASK); 1267 } 1268 1269 static void ahci_thaw(struct ata_port *ap) 1270 { 1271 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1272 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1273 u32 tmp; 1274 1275 /* clear IRQ */ 1276 tmp = readl(port_mmio + PORT_IRQ_STAT); 1277 writel(tmp, port_mmio + PORT_IRQ_STAT); 1278 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1279 1280 /* turn IRQ back on */ 1281 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK); 1282 } 1283 1284 static void ahci_error_handler(struct ata_port *ap) 1285 { 1286 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1287 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1288 1289 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1290 /* restart engine */ 1291 ahci_stop_engine(port_mmio); 1292 ahci_start_engine(port_mmio); 1293 } 1294 1295 /* perform recovery */ 1296 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset, 1297 ahci_postreset); 1298 } 1299 1300 static void ahci_vt8251_error_handler(struct ata_port *ap) 1301 { 1302 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1303 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1304 1305 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1306 /* restart engine */ 1307 ahci_stop_engine(port_mmio); 1308 ahci_start_engine(port_mmio); 1309 } 1310 1311 /* perform recovery */ 1312 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset, 1313 ahci_postreset); 1314 } 1315 1316 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1317 { 1318 struct ata_port *ap = qc->ap; 1319 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1320 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1321 1322 if (qc->flags & ATA_QCFLAG_FAILED) 1323 qc->err_mask |= AC_ERR_OTHER; 1324 1325 if (qc->err_mask) { 1326 /* make DMA engine forget about the failed command */ 1327 ahci_stop_engine(port_mmio); 1328 ahci_start_engine(port_mmio); 1329 } 1330 } 1331 1332 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1333 { 1334 struct ahci_host_priv *hpriv = ap->host->private_data; 1335 struct ahci_port_priv *pp = ap->private_data; 1336 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1337 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1338 const char *emsg = NULL; 1339 int rc; 1340 1341 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); 1342 if (rc == 0) 1343 ahci_power_down(port_mmio, hpriv->cap); 1344 else { 1345 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1346 ahci_init_port(port_mmio, hpriv->cap, 1347 pp->cmd_slot_dma, pp->rx_fis_dma); 1348 } 1349 1350 return rc; 1351 } 1352 1353 static int ahci_port_resume(struct ata_port *ap) 1354 { 1355 struct ahci_port_priv *pp = ap->private_data; 1356 struct ahci_host_priv *hpriv = ap->host->private_data; 1357 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1358 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1359 1360 ahci_power_up(port_mmio, hpriv->cap); 1361 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); 1362 1363 return 0; 1364 } 1365 1366 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1367 { 1368 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1369 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1370 u32 ctl; 1371 1372 if (mesg.event == PM_EVENT_SUSPEND) { 1373 /* AHCI spec rev1.1 section 8.3.3: 1374 * Software must disable interrupts prior to requesting a 1375 * transition of the HBA to D3 state. 1376 */ 1377 ctl = readl(mmio + HOST_CTL); 1378 ctl &= ~HOST_IRQ_EN; 1379 writel(ctl, mmio + HOST_CTL); 1380 readl(mmio + HOST_CTL); /* flush */ 1381 } 1382 1383 return ata_pci_device_suspend(pdev, mesg); 1384 } 1385 1386 static int ahci_pci_device_resume(struct pci_dev *pdev) 1387 { 1388 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1389 struct ahci_host_priv *hpriv = host->private_data; 1390 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1391 int rc; 1392 1393 rc = ata_pci_device_do_resume(pdev); 1394 if (rc) 1395 return rc; 1396 1397 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 1398 rc = ahci_reset_controller(mmio, pdev); 1399 if (rc) 1400 return rc; 1401 1402 ahci_init_controller(mmio, pdev, host->n_ports, 1403 host->ports[0]->flags, hpriv); 1404 } 1405 1406 ata_host_resume(host); 1407 1408 return 0; 1409 } 1410 1411 static int ahci_port_start(struct ata_port *ap) 1412 { 1413 struct device *dev = ap->host->dev; 1414 struct ahci_host_priv *hpriv = ap->host->private_data; 1415 struct ahci_port_priv *pp; 1416 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1417 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1418 void *mem; 1419 dma_addr_t mem_dma; 1420 int rc; 1421 1422 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1423 if (!pp) 1424 return -ENOMEM; 1425 1426 rc = ata_pad_alloc(ap, dev); 1427 if (rc) 1428 return rc; 1429 1430 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1431 GFP_KERNEL); 1432 if (!mem) 1433 return -ENOMEM; 1434 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 1435 1436 /* 1437 * First item in chunk of DMA memory: 32-slot command table, 1438 * 32 bytes each in size 1439 */ 1440 pp->cmd_slot = mem; 1441 pp->cmd_slot_dma = mem_dma; 1442 1443 mem += AHCI_CMD_SLOT_SZ; 1444 mem_dma += AHCI_CMD_SLOT_SZ; 1445 1446 /* 1447 * Second item: Received-FIS area 1448 */ 1449 pp->rx_fis = mem; 1450 pp->rx_fis_dma = mem_dma; 1451 1452 mem += AHCI_RX_FIS_SZ; 1453 mem_dma += AHCI_RX_FIS_SZ; 1454 1455 /* 1456 * Third item: data area for storing a single command 1457 * and its scatter-gather table 1458 */ 1459 pp->cmd_tbl = mem; 1460 pp->cmd_tbl_dma = mem_dma; 1461 1462 ap->private_data = pp; 1463 1464 /* power up port */ 1465 ahci_power_up(port_mmio, hpriv->cap); 1466 1467 /* initialize port */ 1468 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); 1469 1470 return 0; 1471 } 1472 1473 static void ahci_port_stop(struct ata_port *ap) 1474 { 1475 struct ahci_host_priv *hpriv = ap->host->private_data; 1476 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1477 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1478 const char *emsg = NULL; 1479 int rc; 1480 1481 /* de-initialize port */ 1482 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); 1483 if (rc) 1484 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 1485 } 1486 1487 static void ahci_setup_port(struct ata_ioports *port, void __iomem *base, 1488 unsigned int port_idx) 1489 { 1490 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx); 1491 base = ahci_port_base(base, port_idx); 1492 VPRINTK("base now==0x%lx\n", base); 1493 1494 port->cmd_addr = base; 1495 port->scr_addr = base + PORT_SCR; 1496 1497 VPRINTK("EXIT\n"); 1498 } 1499 1500 static int ahci_host_init(struct ata_probe_ent *probe_ent) 1501 { 1502 struct ahci_host_priv *hpriv = probe_ent->private_data; 1503 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 1504 void __iomem *mmio = probe_ent->iomap[AHCI_PCI_BAR]; 1505 unsigned int i, cap_n_ports, using_dac; 1506 int rc; 1507 1508 rc = ahci_reset_controller(mmio, pdev); 1509 if (rc) 1510 return rc; 1511 1512 hpriv->cap = readl(mmio + HOST_CAP); 1513 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL); 1514 cap_n_ports = ahci_nr_ports(hpriv->cap); 1515 1516 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n", 1517 hpriv->cap, hpriv->port_map, cap_n_ports); 1518 1519 if (probe_ent->port_flags & AHCI_FLAG_HONOR_PI) { 1520 unsigned int n_ports = cap_n_ports; 1521 u32 port_map = hpriv->port_map; 1522 int max_port = 0; 1523 1524 for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) { 1525 if (port_map & (1 << i)) { 1526 n_ports--; 1527 port_map &= ~(1 << i); 1528 max_port = i; 1529 } else 1530 probe_ent->dummy_port_mask |= 1 << i; 1531 } 1532 1533 if (n_ports || port_map) 1534 dev_printk(KERN_WARNING, &pdev->dev, 1535 "nr_ports (%u) and implemented port map " 1536 "(0x%x) don't match\n", 1537 cap_n_ports, hpriv->port_map); 1538 1539 probe_ent->n_ports = max_port + 1; 1540 } else 1541 probe_ent->n_ports = cap_n_ports; 1542 1543 using_dac = hpriv->cap & HOST_CAP_64; 1544 if (using_dac && 1545 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1546 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1547 if (rc) { 1548 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1549 if (rc) { 1550 dev_printk(KERN_ERR, &pdev->dev, 1551 "64-bit DMA enable failed\n"); 1552 return rc; 1553 } 1554 } 1555 } else { 1556 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1557 if (rc) { 1558 dev_printk(KERN_ERR, &pdev->dev, 1559 "32-bit DMA enable failed\n"); 1560 return rc; 1561 } 1562 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1563 if (rc) { 1564 dev_printk(KERN_ERR, &pdev->dev, 1565 "32-bit consistent DMA enable failed\n"); 1566 return rc; 1567 } 1568 } 1569 1570 for (i = 0; i < probe_ent->n_ports; i++) 1571 ahci_setup_port(&probe_ent->port[i], mmio, i); 1572 1573 ahci_init_controller(mmio, pdev, probe_ent->n_ports, 1574 probe_ent->port_flags, hpriv); 1575 1576 pci_set_master(pdev); 1577 1578 return 0; 1579 } 1580 1581 static void ahci_print_info(struct ata_probe_ent *probe_ent) 1582 { 1583 struct ahci_host_priv *hpriv = probe_ent->private_data; 1584 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 1585 void __iomem *mmio = probe_ent->iomap[AHCI_PCI_BAR]; 1586 u32 vers, cap, impl, speed; 1587 const char *speed_s; 1588 u16 cc; 1589 const char *scc_s; 1590 1591 vers = readl(mmio + HOST_VERSION); 1592 cap = hpriv->cap; 1593 impl = hpriv->port_map; 1594 1595 speed = (cap >> 20) & 0xf; 1596 if (speed == 1) 1597 speed_s = "1.5"; 1598 else if (speed == 2) 1599 speed_s = "3"; 1600 else 1601 speed_s = "?"; 1602 1603 pci_read_config_word(pdev, 0x0a, &cc); 1604 if (cc == PCI_CLASS_STORAGE_IDE) 1605 scc_s = "IDE"; 1606 else if (cc == PCI_CLASS_STORAGE_SATA) 1607 scc_s = "SATA"; 1608 else if (cc == PCI_CLASS_STORAGE_RAID) 1609 scc_s = "RAID"; 1610 else 1611 scc_s = "unknown"; 1612 1613 dev_printk(KERN_INFO, &pdev->dev, 1614 "AHCI %02x%02x.%02x%02x " 1615 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 1616 , 1617 1618 (vers >> 24) & 0xff, 1619 (vers >> 16) & 0xff, 1620 (vers >> 8) & 0xff, 1621 vers & 0xff, 1622 1623 ((cap >> 8) & 0x1f) + 1, 1624 (cap & 0x1f) + 1, 1625 speed_s, 1626 impl, 1627 scc_s); 1628 1629 dev_printk(KERN_INFO, &pdev->dev, 1630 "flags: " 1631 "%s%s%s%s%s%s" 1632 "%s%s%s%s%s%s%s\n" 1633 , 1634 1635 cap & (1 << 31) ? "64bit " : "", 1636 cap & (1 << 30) ? "ncq " : "", 1637 cap & (1 << 28) ? "ilck " : "", 1638 cap & (1 << 27) ? "stag " : "", 1639 cap & (1 << 26) ? "pm " : "", 1640 cap & (1 << 25) ? "led " : "", 1641 1642 cap & (1 << 24) ? "clo " : "", 1643 cap & (1 << 19) ? "nz " : "", 1644 cap & (1 << 18) ? "only " : "", 1645 cap & (1 << 17) ? "pmp " : "", 1646 cap & (1 << 15) ? "pio " : "", 1647 cap & (1 << 14) ? "slum " : "", 1648 cap & (1 << 13) ? "part " : "" 1649 ); 1650 } 1651 1652 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1653 { 1654 static int printed_version; 1655 unsigned int board_idx = (unsigned int) ent->driver_data; 1656 struct device *dev = &pdev->dev; 1657 struct ata_probe_ent *probe_ent; 1658 struct ahci_host_priv *hpriv; 1659 int rc; 1660 1661 VPRINTK("ENTER\n"); 1662 1663 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); 1664 1665 if (!printed_version++) 1666 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1667 1668 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) { 1669 /* Function 1 is the PATA controller except on the 368, where 1670 we are not AHCI anyway */ 1671 if (PCI_FUNC(pdev->devfn)) 1672 return -ENODEV; 1673 } 1674 1675 rc = pcim_enable_device(pdev); 1676 if (rc) 1677 return rc; 1678 1679 rc = pcim_iomap_regions(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); 1680 if (rc == -EBUSY) 1681 pcim_pin_device(pdev); 1682 if (rc) 1683 return rc; 1684 1685 if (pci_enable_msi(pdev)) 1686 pci_intx(pdev, 1); 1687 1688 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL); 1689 if (probe_ent == NULL) 1690 return -ENOMEM; 1691 1692 probe_ent->dev = pci_dev_to_dev(pdev); 1693 INIT_LIST_HEAD(&probe_ent->node); 1694 1695 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1696 if (!hpriv) 1697 return -ENOMEM; 1698 1699 probe_ent->sht = ahci_port_info[board_idx].sht; 1700 probe_ent->port_flags = ahci_port_info[board_idx].flags; 1701 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask; 1702 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask; 1703 probe_ent->port_ops = ahci_port_info[board_idx].port_ops; 1704 1705 probe_ent->irq = pdev->irq; 1706 probe_ent->irq_flags = IRQF_SHARED; 1707 probe_ent->iomap = pcim_iomap_table(pdev); 1708 probe_ent->private_data = hpriv; 1709 1710 /* initialize adapter */ 1711 rc = ahci_host_init(probe_ent); 1712 if (rc) 1713 return rc; 1714 1715 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) && 1716 (hpriv->cap & HOST_CAP_NCQ)) 1717 probe_ent->port_flags |= ATA_FLAG_NCQ; 1718 1719 ahci_print_info(probe_ent); 1720 1721 if (!ata_device_add(probe_ent)) 1722 return -ENODEV; 1723 1724 devm_kfree(dev, probe_ent); 1725 return 0; 1726 } 1727 1728 static int __init ahci_init(void) 1729 { 1730 return pci_register_driver(&ahci_pci_driver); 1731 } 1732 1733 static void __exit ahci_exit(void) 1734 { 1735 pci_unregister_driver(&ahci_pci_driver); 1736 } 1737 1738 1739 MODULE_AUTHOR("Jeff Garzik"); 1740 MODULE_DESCRIPTION("AHCI SATA low-level driver"); 1741 MODULE_LICENSE("GPL"); 1742 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 1743 MODULE_VERSION(DRV_VERSION); 1744 1745 module_init(ahci_init); 1746 module_exit(ahci_exit); 1747