1 /* 2 * Driver for the Micron P320 SSD 3 * Copyright (C) 2011 Micron Technology, Inc. 4 * 5 * Portions of this code were derived from works subjected to the 6 * following copyright: 7 * Copyright (C) 2009 Integrated Device Technology, Inc. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 */ 20 21 #include <linux/pci.h> 22 #include <linux/interrupt.h> 23 #include <linux/ata.h> 24 #include <linux/delay.h> 25 #include <linux/hdreg.h> 26 #include <linux/uaccess.h> 27 #include <linux/random.h> 28 #include <linux/smp.h> 29 #include <linux/compat.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/genhd.h> 33 #include <linux/blkdev.h> 34 #include <linux/blk-mq.h> 35 #include <linux/bio.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/idr.h> 38 #include <linux/kthread.h> 39 #include <../drivers/ata/ahci.h> 40 #include <linux/export.h> 41 #include <linux/debugfs.h> 42 #include <linux/prefetch.h> 43 #include "mtip32xx.h" 44 45 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) 46 47 /* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */ 48 #define AHCI_RX_FIS_SZ 0x100 49 #define AHCI_RX_FIS_OFFSET 0x0 50 #define AHCI_IDFY_SZ ATA_SECT_SIZE 51 #define AHCI_IDFY_OFFSET 0x400 52 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE 53 #define AHCI_SECTBUF_OFFSET 0x800 54 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE 55 #define AHCI_SMARTBUF_OFFSET 0xC00 56 /* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */ 57 #define BLOCK_DMA_ALLOC_SZ 4096 58 59 /* DMA region containing command table (should be 8192 bytes) */ 60 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr) 61 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ) 62 #define AHCI_CMD_TBL_OFFSET 0x0 63 64 /* DMA region per command (contains header and SGL) */ 65 #define AHCI_CMD_TBL_HDR_SZ 0x80 66 #define AHCI_CMD_TBL_HDR_OFFSET 0x0 67 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg)) 68 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ 69 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ) 70 71 72 #define HOST_CAP_NZDMA (1 << 19) 73 #define HOST_HSORG 0xFC 74 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24) 75 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16) 76 #define HSORG_HWREV 0xFF00 77 #define HSORG_STYLE 0x8 78 #define HSORG_SLOTGROUPS 0x7 79 80 #define PORT_COMMAND_ISSUE 0x38 81 #define PORT_SDBV 0x7C 82 83 #define PORT_OFFSET 0x100 84 #define PORT_MEM_SIZE 0x80 85 86 #define PORT_IRQ_ERR \ 87 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \ 88 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \ 89 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \ 90 PORT_IRQ_OVERFLOW) 91 #define PORT_IRQ_LEGACY \ 92 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) 93 #define PORT_IRQ_HANDLED \ 94 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \ 95 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \ 96 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY) 97 #define DEF_PORT_IRQ \ 98 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS) 99 100 /* product numbers */ 101 #define MTIP_PRODUCT_UNKNOWN 0x00 102 #define MTIP_PRODUCT_ASICFPGA 0x11 103 104 /* Device instance number, incremented each time a device is probed. */ 105 static int instance; 106 107 static struct list_head online_list; 108 static struct list_head removing_list; 109 static spinlock_t dev_lock; 110 111 /* 112 * Global variable used to hold the major block device number 113 * allocated in mtip_init(). 114 */ 115 static int mtip_major; 116 static struct dentry *dfs_parent; 117 static struct dentry *dfs_device_status; 118 119 static u32 cpu_use[NR_CPUS]; 120 121 static DEFINE_IDA(rssd_index_ida); 122 123 static int mtip_block_initialize(struct driver_data *dd); 124 125 #ifdef CONFIG_COMPAT 126 struct mtip_compat_ide_task_request_s { 127 __u8 io_ports[8]; 128 __u8 hob_ports[8]; 129 ide_reg_valid_t out_flags; 130 ide_reg_valid_t in_flags; 131 int data_phase; 132 int req_cmd; 133 compat_ulong_t out_size; 134 compat_ulong_t in_size; 135 }; 136 #endif 137 138 /* 139 * This function check_for_surprise_removal is called 140 * while card is removed from the system and it will 141 * read the vendor id from the configration space 142 * 143 * @pdev Pointer to the pci_dev structure. 144 * 145 * return value 146 * true if device removed, else false 147 */ 148 static bool mtip_check_surprise_removal(struct pci_dev *pdev) 149 { 150 u16 vendor_id = 0; 151 struct driver_data *dd = pci_get_drvdata(pdev); 152 153 if (dd->sr) 154 return true; 155 156 /* Read the vendorID from the configuration space */ 157 pci_read_config_word(pdev, 0x00, &vendor_id); 158 if (vendor_id == 0xFFFF) { 159 dd->sr = true; 160 if (dd->queue) 161 blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue); 162 else 163 dev_warn(&dd->pdev->dev, 164 "%s: dd->queue is NULL\n", __func__); 165 return true; /* device removed */ 166 } 167 168 return false; /* device present */ 169 } 170 171 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, 172 unsigned int tag) 173 { 174 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; 175 176 return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag)); 177 } 178 179 /* 180 * Reset the HBA (without sleeping) 181 * 182 * @dd Pointer to the driver data structure. 183 * 184 * return value 185 * 0 The reset was successful. 186 * -1 The HBA Reset bit did not clear. 187 */ 188 static int mtip_hba_reset(struct driver_data *dd) 189 { 190 unsigned long timeout; 191 192 /* Set the reset bit */ 193 writel(HOST_RESET, dd->mmio + HOST_CTL); 194 195 /* Flush */ 196 readl(dd->mmio + HOST_CTL); 197 198 /* 199 * Spin for up to 10 seconds waiting for reset acknowledgement. Spec 200 * is 1 sec but in LUN failure conditions, up to 10 secs are required 201 */ 202 timeout = jiffies + msecs_to_jiffies(10000); 203 do { 204 mdelay(10); 205 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) 206 return -1; 207 208 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) 209 && time_before(jiffies, timeout)); 210 211 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) 212 return -1; 213 214 return 0; 215 } 216 217 /* 218 * Issue a command to the hardware. 219 * 220 * Set the appropriate bit in the s_active and Command Issue hardware 221 * registers, causing hardware command processing to begin. 222 * 223 * @port Pointer to the port structure. 224 * @tag The tag of the command to be issued. 225 * 226 * return value 227 * None 228 */ 229 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) 230 { 231 int group = tag >> 5; 232 233 /* guard SACT and CI registers */ 234 spin_lock(&port->cmd_issue_lock[group]); 235 writel((1 << MTIP_TAG_BIT(tag)), 236 port->s_active[MTIP_TAG_INDEX(tag)]); 237 writel((1 << MTIP_TAG_BIT(tag)), 238 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 239 spin_unlock(&port->cmd_issue_lock[group]); 240 } 241 242 /* 243 * Enable/disable the reception of FIS 244 * 245 * @port Pointer to the port data structure 246 * @enable 1 to enable, 0 to disable 247 * 248 * return value 249 * Previous state: 1 enabled, 0 disabled 250 */ 251 static int mtip_enable_fis(struct mtip_port *port, int enable) 252 { 253 u32 tmp; 254 255 /* enable FIS reception */ 256 tmp = readl(port->mmio + PORT_CMD); 257 if (enable) 258 writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD); 259 else 260 writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD); 261 262 /* Flush */ 263 readl(port->mmio + PORT_CMD); 264 265 return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX)); 266 } 267 268 /* 269 * Enable/disable the DMA engine 270 * 271 * @port Pointer to the port data structure 272 * @enable 1 to enable, 0 to disable 273 * 274 * return value 275 * Previous state: 1 enabled, 0 disabled. 276 */ 277 static int mtip_enable_engine(struct mtip_port *port, int enable) 278 { 279 u32 tmp; 280 281 /* enable FIS reception */ 282 tmp = readl(port->mmio + PORT_CMD); 283 if (enable) 284 writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD); 285 else 286 writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD); 287 288 readl(port->mmio + PORT_CMD); 289 return (((tmp & PORT_CMD_START) == PORT_CMD_START)); 290 } 291 292 /* 293 * Enables the port DMA engine and FIS reception. 294 * 295 * return value 296 * None 297 */ 298 static inline void mtip_start_port(struct mtip_port *port) 299 { 300 /* Enable FIS reception */ 301 mtip_enable_fis(port, 1); 302 303 /* Enable the DMA engine */ 304 mtip_enable_engine(port, 1); 305 } 306 307 /* 308 * Deinitialize a port by disabling port interrupts, the DMA engine, 309 * and FIS reception. 310 * 311 * @port Pointer to the port structure 312 * 313 * return value 314 * None 315 */ 316 static inline void mtip_deinit_port(struct mtip_port *port) 317 { 318 /* Disable interrupts on this port */ 319 writel(0, port->mmio + PORT_IRQ_MASK); 320 321 /* Disable the DMA engine */ 322 mtip_enable_engine(port, 0); 323 324 /* Disable FIS reception */ 325 mtip_enable_fis(port, 0); 326 } 327 328 /* 329 * Initialize a port. 330 * 331 * This function deinitializes the port by calling mtip_deinit_port() and 332 * then initializes it by setting the command header and RX FIS addresses, 333 * clearing the SError register and any pending port interrupts before 334 * re-enabling the default set of port interrupts. 335 * 336 * @port Pointer to the port structure. 337 * 338 * return value 339 * None 340 */ 341 static void mtip_init_port(struct mtip_port *port) 342 { 343 int i; 344 mtip_deinit_port(port); 345 346 /* Program the command list base and FIS base addresses */ 347 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) { 348 writel((port->command_list_dma >> 16) >> 16, 349 port->mmio + PORT_LST_ADDR_HI); 350 writel((port->rxfis_dma >> 16) >> 16, 351 port->mmio + PORT_FIS_ADDR_HI); 352 set_bit(MTIP_PF_HOST_CAP_64, &port->flags); 353 } 354 355 writel(port->command_list_dma & 0xFFFFFFFF, 356 port->mmio + PORT_LST_ADDR); 357 writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR); 358 359 /* Clear SError */ 360 writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR); 361 362 /* reset the completed registers.*/ 363 for (i = 0; i < port->dd->slot_groups; i++) 364 writel(0xFFFFFFFF, port->completed[i]); 365 366 /* Clear any pending interrupts for this port */ 367 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT); 368 369 /* Clear any pending interrupts on the HBA. */ 370 writel(readl(port->dd->mmio + HOST_IRQ_STAT), 371 port->dd->mmio + HOST_IRQ_STAT); 372 373 /* Enable port interrupts */ 374 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK); 375 } 376 377 /* 378 * Restart a port 379 * 380 * @port Pointer to the port data structure. 381 * 382 * return value 383 * None 384 */ 385 static void mtip_restart_port(struct mtip_port *port) 386 { 387 unsigned long timeout; 388 389 /* Disable the DMA engine */ 390 mtip_enable_engine(port, 0); 391 392 /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */ 393 timeout = jiffies + msecs_to_jiffies(500); 394 while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) 395 && time_before(jiffies, timeout)) 396 ; 397 398 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 399 return; 400 401 /* 402 * Chip quirk: escalate to hba reset if 403 * PxCMD.CR not clear after 500 ms 404 */ 405 if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) { 406 dev_warn(&port->dd->pdev->dev, 407 "PxCMD.CR not clear, escalating reset\n"); 408 409 if (mtip_hba_reset(port->dd)) 410 dev_err(&port->dd->pdev->dev, 411 "HBA reset escalation failed.\n"); 412 413 /* 30 ms delay before com reset to quiesce chip */ 414 mdelay(30); 415 } 416 417 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n"); 418 419 /* Set PxSCTL.DET */ 420 writel(readl(port->mmio + PORT_SCR_CTL) | 421 1, port->mmio + PORT_SCR_CTL); 422 readl(port->mmio + PORT_SCR_CTL); 423 424 /* Wait 1 ms to quiesce chip function */ 425 timeout = jiffies + msecs_to_jiffies(1); 426 while (time_before(jiffies, timeout)) 427 ; 428 429 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 430 return; 431 432 /* Clear PxSCTL.DET */ 433 writel(readl(port->mmio + PORT_SCR_CTL) & ~1, 434 port->mmio + PORT_SCR_CTL); 435 readl(port->mmio + PORT_SCR_CTL); 436 437 /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */ 438 timeout = jiffies + msecs_to_jiffies(500); 439 while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) 440 && time_before(jiffies, timeout)) 441 ; 442 443 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 444 return; 445 446 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) 447 dev_warn(&port->dd->pdev->dev, 448 "COM reset failed\n"); 449 450 mtip_init_port(port); 451 mtip_start_port(port); 452 453 } 454 455 static int mtip_device_reset(struct driver_data *dd) 456 { 457 int rv = 0; 458 459 if (mtip_check_surprise_removal(dd->pdev)) 460 return 0; 461 462 if (mtip_hba_reset(dd) < 0) 463 rv = -EFAULT; 464 465 mdelay(1); 466 mtip_init_port(dd->port); 467 mtip_start_port(dd->port); 468 469 /* Enable interrupts on the HBA. */ 470 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 471 dd->mmio + HOST_CTL); 472 return rv; 473 } 474 475 /* 476 * Helper function for tag logging 477 */ 478 static void print_tags(struct driver_data *dd, 479 char *msg, 480 unsigned long *tagbits, 481 int cnt) 482 { 483 unsigned char tagmap[128]; 484 int group, tagmap_len = 0; 485 486 memset(tagmap, 0, sizeof(tagmap)); 487 for (group = SLOTBITS_IN_LONGS; group > 0; group--) 488 tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ", 489 tagbits[group-1]); 490 dev_warn(&dd->pdev->dev, 491 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); 492 } 493 494 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 495 dma_addr_t buffer_dma, unsigned int sectors); 496 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, 497 struct smart_attr *attrib); 498 499 static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status) 500 { 501 struct request *req = blk_mq_rq_from_pdu(cmd); 502 503 cmd->status = status; 504 blk_mq_complete_request(req); 505 } 506 507 /* 508 * Handle an error. 509 * 510 * @dd Pointer to the DRIVER_DATA structure. 511 * 512 * return value 513 * None 514 */ 515 static void mtip_handle_tfe(struct driver_data *dd) 516 { 517 int group, tag, bit, reissue, rv; 518 struct mtip_port *port; 519 struct mtip_cmd *cmd; 520 u32 completed; 521 struct host_to_dev_fis *fis; 522 unsigned long tagaccum[SLOTBITS_IN_LONGS]; 523 unsigned int cmd_cnt = 0; 524 unsigned char *buf; 525 char *fail_reason = NULL; 526 int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0; 527 528 dev_warn(&dd->pdev->dev, "Taskfile error\n"); 529 530 port = dd->port; 531 532 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 533 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); 534 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); 535 mtip_complete_command(cmd, BLK_STS_IOERR); 536 return; 537 } 538 539 /* clear the tag accumulator */ 540 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 541 542 /* Loop through all the groups */ 543 for (group = 0; group < dd->slot_groups; group++) { 544 completed = readl(port->completed[group]); 545 546 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed); 547 548 /* clear completed status register in the hardware.*/ 549 writel(completed, port->completed[group]); 550 551 /* Process successfully completed commands */ 552 for (bit = 0; bit < 32 && completed; bit++) { 553 if (!(completed & (1<<bit))) 554 continue; 555 tag = (group << 5) + bit; 556 557 /* Skip the internal command slot */ 558 if (tag == MTIP_TAG_INTERNAL) 559 continue; 560 561 cmd = mtip_cmd_from_tag(dd, tag); 562 mtip_complete_command(cmd, 0); 563 set_bit(tag, tagaccum); 564 cmd_cnt++; 565 } 566 } 567 568 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt); 569 570 /* Restart the port */ 571 mdelay(20); 572 mtip_restart_port(port); 573 574 /* Trying to determine the cause of the error */ 575 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, 576 dd->port->log_buf, 577 dd->port->log_buf_dma, 1); 578 if (rv) { 579 dev_warn(&dd->pdev->dev, 580 "Error in READ LOG EXT (10h) command\n"); 581 /* non-critical error, don't fail the load */ 582 } else { 583 buf = (unsigned char *)dd->port->log_buf; 584 if (buf[259] & 0x1) { 585 dev_info(&dd->pdev->dev, 586 "Write protect bit is set.\n"); 587 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); 588 fail_all_ncq_write = 1; 589 fail_reason = "write protect"; 590 } 591 if (buf[288] == 0xF7) { 592 dev_info(&dd->pdev->dev, 593 "Exceeded Tmax, drive in thermal shutdown.\n"); 594 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); 595 fail_all_ncq_cmds = 1; 596 fail_reason = "thermal shutdown"; 597 } 598 if (buf[288] == 0xBF) { 599 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); 600 dev_info(&dd->pdev->dev, 601 "Drive indicates rebuild has failed. Secure erase required.\n"); 602 fail_all_ncq_cmds = 1; 603 fail_reason = "rebuild failed"; 604 } 605 } 606 607 /* clear the tag accumulator */ 608 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 609 610 /* Loop through all the groups */ 611 for (group = 0; group < dd->slot_groups; group++) { 612 for (bit = 0; bit < 32; bit++) { 613 reissue = 1; 614 tag = (group << 5) + bit; 615 cmd = mtip_cmd_from_tag(dd, tag); 616 617 fis = (struct host_to_dev_fis *)cmd->command; 618 619 /* Should re-issue? */ 620 if (tag == MTIP_TAG_INTERNAL || 621 fis->command == ATA_CMD_SET_FEATURES) 622 reissue = 0; 623 else { 624 if (fail_all_ncq_cmds || 625 (fail_all_ncq_write && 626 fis->command == ATA_CMD_FPDMA_WRITE)) { 627 dev_warn(&dd->pdev->dev, 628 " Fail: %s w/tag %d [%s].\n", 629 fis->command == ATA_CMD_FPDMA_WRITE ? 630 "write" : "read", 631 tag, 632 fail_reason != NULL ? 633 fail_reason : "unknown"); 634 mtip_complete_command(cmd, BLK_STS_MEDIUM); 635 continue; 636 } 637 } 638 639 /* 640 * First check if this command has 641 * exceeded its retries. 642 */ 643 if (reissue && (cmd->retries-- > 0)) { 644 645 set_bit(tag, tagaccum); 646 647 /* Re-issue the command. */ 648 mtip_issue_ncq_command(port, tag); 649 650 continue; 651 } 652 653 /* Retire a command that will not be reissued */ 654 dev_warn(&port->dd->pdev->dev, 655 "retiring tag %d\n", tag); 656 657 mtip_complete_command(cmd, BLK_STS_IOERR); 658 } 659 } 660 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); 661 } 662 663 /* 664 * Handle a set device bits interrupt 665 */ 666 static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, 667 u32 completed) 668 { 669 struct driver_data *dd = port->dd; 670 int tag, bit; 671 struct mtip_cmd *command; 672 673 if (!completed) { 674 WARN_ON_ONCE(!completed); 675 return; 676 } 677 /* clear completed status register in the hardware.*/ 678 writel(completed, port->completed[group]); 679 680 /* Process completed commands. */ 681 for (bit = 0; (bit < 32) && completed; bit++) { 682 if (completed & 0x01) { 683 tag = (group << 5) | bit; 684 685 /* skip internal command slot. */ 686 if (unlikely(tag == MTIP_TAG_INTERNAL)) 687 continue; 688 689 command = mtip_cmd_from_tag(dd, tag); 690 mtip_complete_command(command, 0); 691 } 692 completed >>= 1; 693 } 694 695 /* If last, re-enable interrupts */ 696 if (atomic_dec_return(&dd->irq_workers_active) == 0) 697 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT); 698 } 699 700 /* 701 * Process legacy pio and d2h interrupts 702 */ 703 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) 704 { 705 struct mtip_port *port = dd->port; 706 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); 707 708 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) { 709 int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL); 710 int status = readl(port->cmd_issue[group]); 711 712 if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL)))) 713 mtip_complete_command(cmd, 0); 714 } 715 } 716 717 /* 718 * Demux and handle errors 719 */ 720 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) 721 { 722 if (unlikely(port_stat & PORT_IRQ_CONNECT)) { 723 dev_warn(&dd->pdev->dev, 724 "Clearing PxSERR.DIAG.x\n"); 725 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR); 726 } 727 728 if (unlikely(port_stat & PORT_IRQ_PHYRDY)) { 729 dev_warn(&dd->pdev->dev, 730 "Clearing PxSERR.DIAG.n\n"); 731 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR); 732 } 733 734 if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) { 735 dev_warn(&dd->pdev->dev, 736 "Port stat errors %x unhandled\n", 737 (port_stat & ~PORT_IRQ_HANDLED)); 738 if (mtip_check_surprise_removal(dd->pdev)) 739 return; 740 } 741 if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) { 742 set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags); 743 wake_up_interruptible(&dd->port->svc_wait); 744 } 745 } 746 747 static inline irqreturn_t mtip_handle_irq(struct driver_data *data) 748 { 749 struct driver_data *dd = (struct driver_data *) data; 750 struct mtip_port *port = dd->port; 751 u32 hba_stat, port_stat; 752 int rv = IRQ_NONE; 753 int do_irq_enable = 1, i, workers; 754 struct mtip_work *twork; 755 756 hba_stat = readl(dd->mmio + HOST_IRQ_STAT); 757 if (hba_stat) { 758 rv = IRQ_HANDLED; 759 760 /* Acknowledge the interrupt status on the port.*/ 761 port_stat = readl(port->mmio + PORT_IRQ_STAT); 762 if (unlikely(port_stat == 0xFFFFFFFF)) { 763 mtip_check_surprise_removal(dd->pdev); 764 return IRQ_HANDLED; 765 } 766 writel(port_stat, port->mmio + PORT_IRQ_STAT); 767 768 /* Demux port status */ 769 if (likely(port_stat & PORT_IRQ_SDB_FIS)) { 770 do_irq_enable = 0; 771 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0); 772 773 /* Start at 1: group zero is always local? */ 774 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; 775 i++) { 776 twork = &dd->work[i]; 777 twork->completed = readl(port->completed[i]); 778 if (twork->completed) 779 workers++; 780 } 781 782 atomic_set(&dd->irq_workers_active, workers); 783 if (workers) { 784 for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) { 785 twork = &dd->work[i]; 786 if (twork->completed) 787 queue_work_on( 788 twork->cpu_binding, 789 dd->isr_workq, 790 &twork->work); 791 } 792 793 if (likely(dd->work[0].completed)) 794 mtip_workq_sdbfx(port, 0, 795 dd->work[0].completed); 796 797 } else { 798 /* 799 * Chip quirk: SDB interrupt but nothing 800 * to complete 801 */ 802 do_irq_enable = 1; 803 } 804 } 805 806 if (unlikely(port_stat & PORT_IRQ_ERR)) { 807 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { 808 /* don't proceed further */ 809 return IRQ_HANDLED; 810 } 811 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 812 &dd->dd_flag)) 813 return rv; 814 815 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR); 816 } 817 818 if (unlikely(port_stat & PORT_IRQ_LEGACY)) 819 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY); 820 } 821 822 /* acknowledge interrupt */ 823 if (unlikely(do_irq_enable)) 824 writel(hba_stat, dd->mmio + HOST_IRQ_STAT); 825 826 return rv; 827 } 828 829 /* 830 * HBA interrupt subroutine. 831 * 832 * @irq IRQ number. 833 * @instance Pointer to the driver data structure. 834 * 835 * return value 836 * IRQ_HANDLED A HBA interrupt was pending and handled. 837 * IRQ_NONE This interrupt was not for the HBA. 838 */ 839 static irqreturn_t mtip_irq_handler(int irq, void *instance) 840 { 841 struct driver_data *dd = instance; 842 843 return mtip_handle_irq(dd); 844 } 845 846 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) 847 { 848 writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]); 849 } 850 851 static bool mtip_pause_ncq(struct mtip_port *port, 852 struct host_to_dev_fis *fis) 853 { 854 unsigned long task_file_data; 855 856 task_file_data = readl(port->mmio+PORT_TFDATA); 857 if ((task_file_data & 1)) 858 return false; 859 860 if (fis->command == ATA_CMD_SEC_ERASE_PREP) { 861 port->ic_pause_timer = jiffies; 862 return true; 863 } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && 864 (fis->features == 0x03)) { 865 set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 866 port->ic_pause_timer = jiffies; 867 return true; 868 } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) || 869 ((fis->command == 0xFC) && 870 (fis->features == 0x27 || fis->features == 0x72 || 871 fis->features == 0x62 || fis->features == 0x26))) { 872 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 873 clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag); 874 /* Com reset after secure erase or lowlevel format */ 875 mtip_restart_port(port); 876 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 877 return false; 878 } 879 880 return false; 881 } 882 883 static bool mtip_commands_active(struct mtip_port *port) 884 { 885 unsigned int active; 886 unsigned int n; 887 888 /* 889 * Ignore s_active bit 0 of array element 0. 890 * This bit will always be set 891 */ 892 active = readl(port->s_active[0]) & 0xFFFFFFFE; 893 for (n = 1; n < port->dd->slot_groups; n++) 894 active |= readl(port->s_active[n]); 895 896 return active != 0; 897 } 898 899 /* 900 * Wait for port to quiesce 901 * 902 * @port Pointer to port data structure 903 * @timeout Max duration to wait (ms) 904 * 905 * return value 906 * 0 Success 907 * -EBUSY Commands still active 908 */ 909 static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) 910 { 911 unsigned long to; 912 bool active = true; 913 914 blk_mq_quiesce_queue(port->dd->queue); 915 916 to = jiffies + msecs_to_jiffies(timeout); 917 do { 918 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) && 919 test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { 920 msleep(20); 921 continue; /* svc thd is actively issuing commands */ 922 } 923 924 msleep(100); 925 926 if (mtip_check_surprise_removal(port->dd->pdev)) 927 goto err_fault; 928 929 active = mtip_commands_active(port); 930 if (!active) 931 break; 932 } while (time_before(jiffies, to)); 933 934 blk_mq_unquiesce_queue(port->dd->queue); 935 return active ? -EBUSY : 0; 936 err_fault: 937 blk_mq_unquiesce_queue(port->dd->queue); 938 return -EFAULT; 939 } 940 941 struct mtip_int_cmd { 942 int fis_len; 943 dma_addr_t buffer; 944 int buf_len; 945 u32 opts; 946 }; 947 948 /* 949 * Execute an internal command and wait for the completion. 950 * 951 * @port Pointer to the port data structure. 952 * @fis Pointer to the FIS that describes the command. 953 * @fis_len Length in WORDS of the FIS. 954 * @buffer DMA accessible for command data. 955 * @buf_len Length, in bytes, of the data buffer. 956 * @opts Command header options, excluding the FIS length 957 * and the number of PRD entries. 958 * @timeout Time in ms to wait for the command to complete. 959 * 960 * return value 961 * 0 Command completed successfully. 962 * -EFAULT The buffer address is not correctly aligned. 963 * -EBUSY Internal command or other IO in progress. 964 * -EAGAIN Time out waiting for command to complete. 965 */ 966 static int mtip_exec_internal_command(struct mtip_port *port, 967 struct host_to_dev_fis *fis, 968 int fis_len, 969 dma_addr_t buffer, 970 int buf_len, 971 u32 opts, 972 unsigned long timeout) 973 { 974 struct mtip_cmd *int_cmd; 975 struct driver_data *dd = port->dd; 976 struct request *rq; 977 struct mtip_int_cmd icmd = { 978 .fis_len = fis_len, 979 .buffer = buffer, 980 .buf_len = buf_len, 981 .opts = opts 982 }; 983 int rv = 0; 984 985 /* Make sure the buffer is 8 byte aligned. This is asic specific. */ 986 if (buffer & 0x00000007) { 987 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); 988 return -EFAULT; 989 } 990 991 if (mtip_check_surprise_removal(dd->pdev)) 992 return -EFAULT; 993 994 rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); 995 if (IS_ERR(rq)) { 996 dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n"); 997 return -EFAULT; 998 } 999 1000 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1001 1002 if (fis->command == ATA_CMD_SEC_ERASE_PREP) 1003 set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 1004 1005 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 1006 1007 if (fis->command != ATA_CMD_STANDBYNOW1) { 1008 /* wait for io to complete if non atomic */ 1009 if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) { 1010 dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n"); 1011 blk_mq_free_request(rq); 1012 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1013 wake_up_interruptible(&port->svc_wait); 1014 return -EBUSY; 1015 } 1016 } 1017 1018 /* Copy the command to the command table */ 1019 int_cmd = blk_mq_rq_to_pdu(rq); 1020 int_cmd->icmd = &icmd; 1021 memcpy(int_cmd->command, fis, fis_len*4); 1022 1023 rq->timeout = timeout; 1024 1025 /* insert request and run queue */ 1026 blk_execute_rq(rq->q, NULL, rq, true); 1027 1028 if (int_cmd->status) { 1029 dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n", 1030 fis->command, int_cmd->status); 1031 rv = -EIO; 1032 1033 if (mtip_check_surprise_removal(dd->pdev) || 1034 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1035 &dd->dd_flag)) { 1036 dev_err(&dd->pdev->dev, 1037 "Internal command [%02X] wait returned due to SR\n", 1038 fis->command); 1039 rv = -ENXIO; 1040 goto exec_ic_exit; 1041 } 1042 mtip_device_reset(dd); /* recover from timeout issue */ 1043 rv = -EAGAIN; 1044 goto exec_ic_exit; 1045 } 1046 1047 if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)]) 1048 & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) { 1049 rv = -ENXIO; 1050 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 1051 mtip_device_reset(dd); 1052 rv = -EAGAIN; 1053 } 1054 } 1055 exec_ic_exit: 1056 /* Clear the allocated and active bits for the internal command. */ 1057 blk_mq_free_request(rq); 1058 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1059 if (rv >= 0 && mtip_pause_ncq(port, fis)) { 1060 /* NCQ paused */ 1061 return rv; 1062 } 1063 wake_up_interruptible(&port->svc_wait); 1064 1065 return rv; 1066 } 1067 1068 /* 1069 * Byte-swap ATA ID strings. 1070 * 1071 * ATA identify data contains strings in byte-swapped 16-bit words. 1072 * They must be swapped (on all architectures) to be usable as C strings. 1073 * This function swaps bytes in-place. 1074 * 1075 * @buf The buffer location of the string 1076 * @len The number of bytes to swap 1077 * 1078 * return value 1079 * None 1080 */ 1081 static inline void ata_swap_string(u16 *buf, unsigned int len) 1082 { 1083 int i; 1084 for (i = 0; i < (len/2); i++) 1085 be16_to_cpus(&buf[i]); 1086 } 1087 1088 static void mtip_set_timeout(struct driver_data *dd, 1089 struct host_to_dev_fis *fis, 1090 unsigned int *timeout, u8 erasemode) 1091 { 1092 switch (fis->command) { 1093 case ATA_CMD_DOWNLOAD_MICRO: 1094 *timeout = 120000; /* 2 minutes */ 1095 break; 1096 case ATA_CMD_SEC_ERASE_UNIT: 1097 case 0xFC: 1098 if (erasemode) 1099 *timeout = ((*(dd->port->identify + 90) * 2) * 60000); 1100 else 1101 *timeout = ((*(dd->port->identify + 89) * 2) * 60000); 1102 break; 1103 case ATA_CMD_STANDBYNOW1: 1104 *timeout = 120000; /* 2 minutes */ 1105 break; 1106 case 0xF7: 1107 case 0xFA: 1108 *timeout = 60000; /* 60 seconds */ 1109 break; 1110 case ATA_CMD_SMART: 1111 *timeout = 15000; /* 15 seconds */ 1112 break; 1113 default: 1114 *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS; 1115 break; 1116 } 1117 } 1118 1119 /* 1120 * Request the device identity information. 1121 * 1122 * If a user space buffer is not specified, i.e. is NULL, the 1123 * identify information is still read from the drive and placed 1124 * into the identify data buffer (@e port->identify) in the 1125 * port data structure. 1126 * When the identify buffer contains valid identify information @e 1127 * port->identify_valid is non-zero. 1128 * 1129 * @port Pointer to the port structure. 1130 * @user_buffer A user space buffer where the identify data should be 1131 * copied. 1132 * 1133 * return value 1134 * 0 Command completed successfully. 1135 * -EFAULT An error occurred while coping data to the user buffer. 1136 * -1 Command failed. 1137 */ 1138 static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer) 1139 { 1140 int rv = 0; 1141 struct host_to_dev_fis fis; 1142 1143 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 1144 return -EFAULT; 1145 1146 /* Build the FIS. */ 1147 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1148 fis.type = 0x27; 1149 fis.opts = 1 << 7; 1150 fis.command = ATA_CMD_ID_ATA; 1151 1152 /* Set the identify information as invalid. */ 1153 port->identify_valid = 0; 1154 1155 /* Clear the identify information. */ 1156 memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS); 1157 1158 /* Execute the command. */ 1159 if (mtip_exec_internal_command(port, 1160 &fis, 1161 5, 1162 port->identify_dma, 1163 sizeof(u16) * ATA_ID_WORDS, 1164 0, 1165 MTIP_INT_CMD_TIMEOUT_MS) 1166 < 0) { 1167 rv = -1; 1168 goto out; 1169 } 1170 1171 /* 1172 * Perform any necessary byte-swapping. Yes, the kernel does in fact 1173 * perform field-sensitive swapping on the string fields. 1174 * See the kernel use of ata_id_string() for proof of this. 1175 */ 1176 #ifdef __LITTLE_ENDIAN 1177 ata_swap_string(port->identify + 27, 40); /* model string*/ 1178 ata_swap_string(port->identify + 23, 8); /* firmware string*/ 1179 ata_swap_string(port->identify + 10, 20); /* serial# string*/ 1180 #else 1181 { 1182 int i; 1183 for (i = 0; i < ATA_ID_WORDS; i++) 1184 port->identify[i] = le16_to_cpu(port->identify[i]); 1185 } 1186 #endif 1187 1188 /* Check security locked state */ 1189 if (port->identify[128] & 0x4) 1190 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1191 else 1192 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1193 1194 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */ 1195 /* Demux ID.DRAT & ID.RZAT to determine trim support */ 1196 if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5)) 1197 port->dd->trim_supp = true; 1198 else 1199 #endif 1200 port->dd->trim_supp = false; 1201 1202 /* Set the identify buffer as valid. */ 1203 port->identify_valid = 1; 1204 1205 if (user_buffer) { 1206 if (copy_to_user( 1207 user_buffer, 1208 port->identify, 1209 ATA_ID_WORDS * sizeof(u16))) { 1210 rv = -EFAULT; 1211 goto out; 1212 } 1213 } 1214 1215 out: 1216 return rv; 1217 } 1218 1219 /* 1220 * Issue a standby immediate command to the device. 1221 * 1222 * @port Pointer to the port structure. 1223 * 1224 * return value 1225 * 0 Command was executed successfully. 1226 * -1 An error occurred while executing the command. 1227 */ 1228 static int mtip_standby_immediate(struct mtip_port *port) 1229 { 1230 int rv; 1231 struct host_to_dev_fis fis; 1232 unsigned long start; 1233 unsigned int timeout; 1234 1235 /* Build the FIS. */ 1236 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1237 fis.type = 0x27; 1238 fis.opts = 1 << 7; 1239 fis.command = ATA_CMD_STANDBYNOW1; 1240 1241 mtip_set_timeout(port->dd, &fis, &timeout, 0); 1242 1243 start = jiffies; 1244 rv = mtip_exec_internal_command(port, 1245 &fis, 1246 5, 1247 0, 1248 0, 1249 0, 1250 timeout); 1251 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", 1252 jiffies_to_msecs(jiffies - start)); 1253 if (rv) 1254 dev_warn(&port->dd->pdev->dev, 1255 "STANDBY IMMEDIATE command failed.\n"); 1256 1257 return rv; 1258 } 1259 1260 /* 1261 * Issue a READ LOG EXT command to the device. 1262 * 1263 * @port pointer to the port structure. 1264 * @page page number to fetch 1265 * @buffer pointer to buffer 1266 * @buffer_dma dma address corresponding to @buffer 1267 * @sectors page length to fetch, in sectors 1268 * 1269 * return value 1270 * @rv return value from mtip_exec_internal_command() 1271 */ 1272 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 1273 dma_addr_t buffer_dma, unsigned int sectors) 1274 { 1275 struct host_to_dev_fis fis; 1276 1277 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1278 fis.type = 0x27; 1279 fis.opts = 1 << 7; 1280 fis.command = ATA_CMD_READ_LOG_EXT; 1281 fis.sect_count = sectors & 0xFF; 1282 fis.sect_cnt_ex = (sectors >> 8) & 0xFF; 1283 fis.lba_low = page; 1284 fis.lba_mid = 0; 1285 fis.device = ATA_DEVICE_OBS; 1286 1287 memset(buffer, 0, sectors * ATA_SECT_SIZE); 1288 1289 return mtip_exec_internal_command(port, 1290 &fis, 1291 5, 1292 buffer_dma, 1293 sectors * ATA_SECT_SIZE, 1294 0, 1295 MTIP_INT_CMD_TIMEOUT_MS); 1296 } 1297 1298 /* 1299 * Issue a SMART READ DATA command to the device. 1300 * 1301 * @port pointer to the port structure. 1302 * @buffer pointer to buffer 1303 * @buffer_dma dma address corresponding to @buffer 1304 * 1305 * return value 1306 * @rv return value from mtip_exec_internal_command() 1307 */ 1308 static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer, 1309 dma_addr_t buffer_dma) 1310 { 1311 struct host_to_dev_fis fis; 1312 1313 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1314 fis.type = 0x27; 1315 fis.opts = 1 << 7; 1316 fis.command = ATA_CMD_SMART; 1317 fis.features = 0xD0; 1318 fis.sect_count = 1; 1319 fis.lba_mid = 0x4F; 1320 fis.lba_hi = 0xC2; 1321 fis.device = ATA_DEVICE_OBS; 1322 1323 return mtip_exec_internal_command(port, 1324 &fis, 1325 5, 1326 buffer_dma, 1327 ATA_SECT_SIZE, 1328 0, 1329 15000); 1330 } 1331 1332 /* 1333 * Get the value of a smart attribute 1334 * 1335 * @port pointer to the port structure 1336 * @id attribute number 1337 * @attrib pointer to return attrib information corresponding to @id 1338 * 1339 * return value 1340 * -EINVAL NULL buffer passed or unsupported attribute @id. 1341 * -EPERM Identify data not valid, SMART not supported or not enabled 1342 */ 1343 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, 1344 struct smart_attr *attrib) 1345 { 1346 int rv, i; 1347 struct smart_attr *pattr; 1348 1349 if (!attrib) 1350 return -EINVAL; 1351 1352 if (!port->identify_valid) { 1353 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n"); 1354 return -EPERM; 1355 } 1356 if (!(port->identify[82] & 0x1)) { 1357 dev_warn(&port->dd->pdev->dev, "SMART not supported\n"); 1358 return -EPERM; 1359 } 1360 if (!(port->identify[85] & 0x1)) { 1361 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n"); 1362 return -EPERM; 1363 } 1364 1365 memset(port->smart_buf, 0, ATA_SECT_SIZE); 1366 rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma); 1367 if (rv) { 1368 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n"); 1369 return rv; 1370 } 1371 1372 pattr = (struct smart_attr *)(port->smart_buf + 2); 1373 for (i = 0; i < 29; i++, pattr++) 1374 if (pattr->attr_id == id) { 1375 memcpy(attrib, pattr, sizeof(struct smart_attr)); 1376 break; 1377 } 1378 1379 if (i == 29) { 1380 dev_warn(&port->dd->pdev->dev, 1381 "Query for invalid SMART attribute ID\n"); 1382 rv = -EINVAL; 1383 } 1384 1385 return rv; 1386 } 1387 1388 /* 1389 * Trim unused sectors 1390 * 1391 * @dd pointer to driver_data structure 1392 * @lba starting lba 1393 * @len # of 512b sectors to trim 1394 */ 1395 static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba, 1396 unsigned int len) 1397 { 1398 u64 tlba, tlen, sect_left; 1399 struct mtip_trim_entry *buf; 1400 dma_addr_t dma_addr; 1401 struct host_to_dev_fis fis; 1402 blk_status_t ret = BLK_STS_OK; 1403 int i; 1404 1405 if (!len || dd->trim_supp == false) 1406 return BLK_STS_IOERR; 1407 1408 /* Trim request too big */ 1409 WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES)); 1410 1411 /* Trim request not aligned on 4k boundary */ 1412 WARN_ON(len % 8 != 0); 1413 1414 /* Warn if vu_trim structure is too big */ 1415 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE); 1416 1417 /* Allocate a DMA buffer for the trim structure */ 1418 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, 1419 GFP_KERNEL); 1420 if (!buf) 1421 return BLK_STS_RESOURCE; 1422 memset(buf, 0, ATA_SECT_SIZE); 1423 1424 for (i = 0, sect_left = len, tlba = lba; 1425 i < MTIP_MAX_TRIM_ENTRIES && sect_left; 1426 i++) { 1427 tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ? 1428 MTIP_MAX_TRIM_ENTRY_LEN : 1429 sect_left); 1430 buf[i].lba = cpu_to_le32(tlba); 1431 buf[i].range = cpu_to_le16(tlen); 1432 tlba += tlen; 1433 sect_left -= tlen; 1434 } 1435 WARN_ON(sect_left != 0); 1436 1437 /* Build the fis */ 1438 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1439 fis.type = 0x27; 1440 fis.opts = 1 << 7; 1441 fis.command = 0xfb; 1442 fis.features = 0x60; 1443 fis.sect_count = 1; 1444 fis.device = ATA_DEVICE_OBS; 1445 1446 if (mtip_exec_internal_command(dd->port, 1447 &fis, 1448 5, 1449 dma_addr, 1450 ATA_SECT_SIZE, 1451 0, 1452 MTIP_TRIM_TIMEOUT_MS) < 0) 1453 ret = BLK_STS_IOERR; 1454 1455 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); 1456 return ret; 1457 } 1458 1459 /* 1460 * Get the drive capacity. 1461 * 1462 * @dd Pointer to the device data structure. 1463 * @sectors Pointer to the variable that will receive the sector count. 1464 * 1465 * return value 1466 * 1 Capacity was returned successfully. 1467 * 0 The identify information is invalid. 1468 */ 1469 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) 1470 { 1471 struct mtip_port *port = dd->port; 1472 u64 total, raw0, raw1, raw2, raw3; 1473 raw0 = port->identify[100]; 1474 raw1 = port->identify[101]; 1475 raw2 = port->identify[102]; 1476 raw3 = port->identify[103]; 1477 total = raw0 | raw1<<16 | raw2<<32 | raw3<<48; 1478 *sectors = total; 1479 return (bool) !!port->identify_valid; 1480 } 1481 1482 /* 1483 * Display the identify command data. 1484 * 1485 * @port Pointer to the port data structure. 1486 * 1487 * return value 1488 * None 1489 */ 1490 static void mtip_dump_identify(struct mtip_port *port) 1491 { 1492 sector_t sectors; 1493 unsigned short revid; 1494 char cbuf[42]; 1495 1496 if (!port->identify_valid) 1497 return; 1498 1499 strlcpy(cbuf, (char *)(port->identify+10), 21); 1500 dev_info(&port->dd->pdev->dev, 1501 "Serial No.: %s\n", cbuf); 1502 1503 strlcpy(cbuf, (char *)(port->identify+23), 9); 1504 dev_info(&port->dd->pdev->dev, 1505 "Firmware Ver.: %s\n", cbuf); 1506 1507 strlcpy(cbuf, (char *)(port->identify+27), 41); 1508 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf); 1509 1510 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n", 1511 port->identify[128], 1512 port->identify[128] & 0x4 ? "(LOCKED)" : ""); 1513 1514 if (mtip_hw_get_capacity(port->dd, §ors)) 1515 dev_info(&port->dd->pdev->dev, 1516 "Capacity: %llu sectors (%llu MB)\n", 1517 (u64)sectors, 1518 ((u64)sectors) * ATA_SECT_SIZE >> 20); 1519 1520 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid); 1521 switch (revid & 0xFF) { 1522 case 0x1: 1523 strlcpy(cbuf, "A0", 3); 1524 break; 1525 case 0x3: 1526 strlcpy(cbuf, "A2", 3); 1527 break; 1528 default: 1529 strlcpy(cbuf, "?", 2); 1530 break; 1531 } 1532 dev_info(&port->dd->pdev->dev, 1533 "Card Type: %s\n", cbuf); 1534 } 1535 1536 /* 1537 * Map the commands scatter list into the command table. 1538 * 1539 * @command Pointer to the command. 1540 * @nents Number of scatter list entries. 1541 * 1542 * return value 1543 * None 1544 */ 1545 static inline void fill_command_sg(struct driver_data *dd, 1546 struct mtip_cmd *command, 1547 int nents) 1548 { 1549 int n; 1550 unsigned int dma_len; 1551 struct mtip_cmd_sg *command_sg; 1552 struct scatterlist *sg; 1553 1554 command_sg = command->command + AHCI_CMD_TBL_HDR_SZ; 1555 1556 for_each_sg(command->sg, sg, nents, n) { 1557 dma_len = sg_dma_len(sg); 1558 if (dma_len > 0x400000) 1559 dev_err(&dd->pdev->dev, 1560 "DMA segment length truncated\n"); 1561 command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF); 1562 command_sg->dba = cpu_to_le32(sg_dma_address(sg)); 1563 command_sg->dba_upper = 1564 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); 1565 command_sg++; 1566 } 1567 } 1568 1569 /* 1570 * @brief Execute a drive command. 1571 * 1572 * return value 0 The command completed successfully. 1573 * return value -1 An error occurred while executing the command. 1574 */ 1575 static int exec_drive_task(struct mtip_port *port, u8 *command) 1576 { 1577 struct host_to_dev_fis fis; 1578 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); 1579 unsigned int to; 1580 1581 /* Build the FIS. */ 1582 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1583 fis.type = 0x27; 1584 fis.opts = 1 << 7; 1585 fis.command = command[0]; 1586 fis.features = command[1]; 1587 fis.sect_count = command[2]; 1588 fis.sector = command[3]; 1589 fis.cyl_low = command[4]; 1590 fis.cyl_hi = command[5]; 1591 fis.device = command[6] & ~0x10; /* Clear the dev bit*/ 1592 1593 mtip_set_timeout(port->dd, &fis, &to, 0); 1594 1595 dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n", 1596 __func__, 1597 command[0], 1598 command[1], 1599 command[2], 1600 command[3], 1601 command[4], 1602 command[5], 1603 command[6]); 1604 1605 /* Execute the command. */ 1606 if (mtip_exec_internal_command(port, 1607 &fis, 1608 5, 1609 0, 1610 0, 1611 0, 1612 to) < 0) { 1613 return -1; 1614 } 1615 1616 command[0] = reply->command; /* Status*/ 1617 command[1] = reply->features; /* Error*/ 1618 command[4] = reply->cyl_low; 1619 command[5] = reply->cyl_hi; 1620 1621 dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n", 1622 __func__, 1623 command[0], 1624 command[1], 1625 command[4], 1626 command[5]); 1627 1628 return 0; 1629 } 1630 1631 /* 1632 * @brief Execute a drive command. 1633 * 1634 * @param port Pointer to the port data structure. 1635 * @param command Pointer to the user specified command parameters. 1636 * @param user_buffer Pointer to the user space buffer where read sector 1637 * data should be copied. 1638 * 1639 * return value 0 The command completed successfully. 1640 * return value -EFAULT An error occurred while copying the completion 1641 * data to the user space buffer. 1642 * return value -1 An error occurred while executing the command. 1643 */ 1644 static int exec_drive_command(struct mtip_port *port, u8 *command, 1645 void __user *user_buffer) 1646 { 1647 struct host_to_dev_fis fis; 1648 struct host_to_dev_fis *reply; 1649 u8 *buf = NULL; 1650 dma_addr_t dma_addr = 0; 1651 int rv = 0, xfer_sz = command[3]; 1652 unsigned int to; 1653 1654 if (xfer_sz) { 1655 if (!user_buffer) 1656 return -EFAULT; 1657 1658 buf = dmam_alloc_coherent(&port->dd->pdev->dev, 1659 ATA_SECT_SIZE * xfer_sz, 1660 &dma_addr, 1661 GFP_KERNEL); 1662 if (!buf) { 1663 dev_err(&port->dd->pdev->dev, 1664 "Memory allocation failed (%d bytes)\n", 1665 ATA_SECT_SIZE * xfer_sz); 1666 return -ENOMEM; 1667 } 1668 memset(buf, 0, ATA_SECT_SIZE * xfer_sz); 1669 } 1670 1671 /* Build the FIS. */ 1672 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1673 fis.type = 0x27; 1674 fis.opts = 1 << 7; 1675 fis.command = command[0]; 1676 fis.features = command[2]; 1677 fis.sect_count = command[3]; 1678 if (fis.command == ATA_CMD_SMART) { 1679 fis.sector = command[1]; 1680 fis.cyl_low = 0x4F; 1681 fis.cyl_hi = 0xC2; 1682 } 1683 1684 mtip_set_timeout(port->dd, &fis, &to, 0); 1685 1686 if (xfer_sz) 1687 reply = (port->rxfis + RX_FIS_PIO_SETUP); 1688 else 1689 reply = (port->rxfis + RX_FIS_D2H_REG); 1690 1691 dbg_printk(MTIP_DRV_NAME 1692 " %s: User Command: cmd %x, sect %x, " 1693 "feat %x, sectcnt %x\n", 1694 __func__, 1695 command[0], 1696 command[1], 1697 command[2], 1698 command[3]); 1699 1700 /* Execute the command. */ 1701 if (mtip_exec_internal_command(port, 1702 &fis, 1703 5, 1704 (xfer_sz ? dma_addr : 0), 1705 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0), 1706 0, 1707 to) 1708 < 0) { 1709 rv = -EFAULT; 1710 goto exit_drive_command; 1711 } 1712 1713 /* Collect the completion status. */ 1714 command[0] = reply->command; /* Status*/ 1715 command[1] = reply->features; /* Error*/ 1716 command[2] = reply->sect_count; 1717 1718 dbg_printk(MTIP_DRV_NAME 1719 " %s: Completion Status: stat %x, " 1720 "err %x, nsect %x\n", 1721 __func__, 1722 command[0], 1723 command[1], 1724 command[2]); 1725 1726 if (xfer_sz) { 1727 if (copy_to_user(user_buffer, 1728 buf, 1729 ATA_SECT_SIZE * command[3])) { 1730 rv = -EFAULT; 1731 goto exit_drive_command; 1732 } 1733 } 1734 exit_drive_command: 1735 if (buf) 1736 dmam_free_coherent(&port->dd->pdev->dev, 1737 ATA_SECT_SIZE * xfer_sz, buf, dma_addr); 1738 return rv; 1739 } 1740 1741 /* 1742 * Indicates whether a command has a single sector payload. 1743 * 1744 * @command passed to the device to perform the certain event. 1745 * @features passed to the device to perform the certain event. 1746 * 1747 * return value 1748 * 1 command is one that always has a single sector payload, 1749 * regardless of the value in the Sector Count field. 1750 * 0 otherwise 1751 * 1752 */ 1753 static unsigned int implicit_sector(unsigned char command, 1754 unsigned char features) 1755 { 1756 unsigned int rv = 0; 1757 1758 /* list of commands that have an implicit sector count of 1 */ 1759 switch (command) { 1760 case ATA_CMD_SEC_SET_PASS: 1761 case ATA_CMD_SEC_UNLOCK: 1762 case ATA_CMD_SEC_ERASE_PREP: 1763 case ATA_CMD_SEC_ERASE_UNIT: 1764 case ATA_CMD_SEC_FREEZE_LOCK: 1765 case ATA_CMD_SEC_DISABLE_PASS: 1766 case ATA_CMD_PMP_READ: 1767 case ATA_CMD_PMP_WRITE: 1768 rv = 1; 1769 break; 1770 case ATA_CMD_SET_MAX: 1771 if (features == ATA_SET_MAX_UNLOCK) 1772 rv = 1; 1773 break; 1774 case ATA_CMD_SMART: 1775 if ((features == ATA_SMART_READ_VALUES) || 1776 (features == ATA_SMART_READ_THRESHOLDS)) 1777 rv = 1; 1778 break; 1779 case ATA_CMD_CONF_OVERLAY: 1780 if ((features == ATA_DCO_IDENTIFY) || 1781 (features == ATA_DCO_SET)) 1782 rv = 1; 1783 break; 1784 } 1785 return rv; 1786 } 1787 1788 /* 1789 * Executes a taskfile 1790 * See ide_taskfile_ioctl() for derivation 1791 */ 1792 static int exec_drive_taskfile(struct driver_data *dd, 1793 void __user *buf, 1794 ide_task_request_t *req_task, 1795 int outtotal) 1796 { 1797 struct host_to_dev_fis fis; 1798 struct host_to_dev_fis *reply; 1799 u8 *outbuf = NULL; 1800 u8 *inbuf = NULL; 1801 dma_addr_t outbuf_dma = 0; 1802 dma_addr_t inbuf_dma = 0; 1803 dma_addr_t dma_buffer = 0; 1804 int err = 0; 1805 unsigned int taskin = 0; 1806 unsigned int taskout = 0; 1807 u8 nsect = 0; 1808 unsigned int timeout; 1809 unsigned int force_single_sector; 1810 unsigned int transfer_size; 1811 unsigned long task_file_data; 1812 int intotal = outtotal + req_task->out_size; 1813 int erasemode = 0; 1814 1815 taskout = req_task->out_size; 1816 taskin = req_task->in_size; 1817 /* 130560 = 512 * 0xFF*/ 1818 if (taskin > 130560 || taskout > 130560) 1819 return -EINVAL; 1820 1821 if (taskout) { 1822 outbuf = memdup_user(buf + outtotal, taskout); 1823 if (IS_ERR(outbuf)) 1824 return PTR_ERR(outbuf); 1825 1826 outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf, 1827 taskout, DMA_TO_DEVICE); 1828 if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) { 1829 err = -ENOMEM; 1830 goto abort; 1831 } 1832 dma_buffer = outbuf_dma; 1833 } 1834 1835 if (taskin) { 1836 inbuf = memdup_user(buf + intotal, taskin); 1837 if (IS_ERR(inbuf)) { 1838 err = PTR_ERR(inbuf); 1839 inbuf = NULL; 1840 goto abort; 1841 } 1842 inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf, 1843 taskin, DMA_FROM_DEVICE); 1844 if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) { 1845 err = -ENOMEM; 1846 goto abort; 1847 } 1848 dma_buffer = inbuf_dma; 1849 } 1850 1851 /* only supports PIO and non-data commands from this ioctl. */ 1852 switch (req_task->data_phase) { 1853 case TASKFILE_OUT: 1854 nsect = taskout / ATA_SECT_SIZE; 1855 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); 1856 break; 1857 case TASKFILE_IN: 1858 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); 1859 break; 1860 case TASKFILE_NO_DATA: 1861 reply = (dd->port->rxfis + RX_FIS_D2H_REG); 1862 break; 1863 default: 1864 err = -EINVAL; 1865 goto abort; 1866 } 1867 1868 /* Build the FIS. */ 1869 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1870 1871 fis.type = 0x27; 1872 fis.opts = 1 << 7; 1873 fis.command = req_task->io_ports[7]; 1874 fis.features = req_task->io_ports[1]; 1875 fis.sect_count = req_task->io_ports[2]; 1876 fis.lba_low = req_task->io_ports[3]; 1877 fis.lba_mid = req_task->io_ports[4]; 1878 fis.lba_hi = req_task->io_ports[5]; 1879 /* Clear the dev bit*/ 1880 fis.device = req_task->io_ports[6] & ~0x10; 1881 1882 if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) { 1883 req_task->in_flags.all = 1884 IDE_TASKFILE_STD_IN_FLAGS | 1885 (IDE_HOB_STD_IN_FLAGS << 8); 1886 fis.lba_low_ex = req_task->hob_ports[3]; 1887 fis.lba_mid_ex = req_task->hob_ports[4]; 1888 fis.lba_hi_ex = req_task->hob_ports[5]; 1889 fis.features_ex = req_task->hob_ports[1]; 1890 fis.sect_cnt_ex = req_task->hob_ports[2]; 1891 1892 } else { 1893 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; 1894 } 1895 1896 force_single_sector = implicit_sector(fis.command, fis.features); 1897 1898 if ((taskin || taskout) && (!fis.sect_count)) { 1899 if (nsect) 1900 fis.sect_count = nsect; 1901 else { 1902 if (!force_single_sector) { 1903 dev_warn(&dd->pdev->dev, 1904 "data movement but " 1905 "sect_count is 0\n"); 1906 err = -EINVAL; 1907 goto abort; 1908 } 1909 } 1910 } 1911 1912 dbg_printk(MTIP_DRV_NAME 1913 " %s: cmd %x, feat %x, nsect %x," 1914 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x," 1915 " head/dev %x\n", 1916 __func__, 1917 fis.command, 1918 fis.features, 1919 fis.sect_count, 1920 fis.lba_low, 1921 fis.lba_mid, 1922 fis.lba_hi, 1923 fis.device); 1924 1925 /* check for erase mode support during secure erase.*/ 1926 if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf && 1927 (outbuf[0] & MTIP_SEC_ERASE_MODE)) { 1928 erasemode = 1; 1929 } 1930 1931 mtip_set_timeout(dd, &fis, &timeout, erasemode); 1932 1933 /* Determine the correct transfer size.*/ 1934 if (force_single_sector) 1935 transfer_size = ATA_SECT_SIZE; 1936 else 1937 transfer_size = ATA_SECT_SIZE * fis.sect_count; 1938 1939 /* Execute the command.*/ 1940 if (mtip_exec_internal_command(dd->port, 1941 &fis, 1942 5, 1943 dma_buffer, 1944 transfer_size, 1945 0, 1946 timeout) < 0) { 1947 err = -EIO; 1948 goto abort; 1949 } 1950 1951 task_file_data = readl(dd->port->mmio+PORT_TFDATA); 1952 1953 if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) { 1954 reply = dd->port->rxfis + RX_FIS_PIO_SETUP; 1955 req_task->io_ports[7] = reply->control; 1956 } else { 1957 reply = dd->port->rxfis + RX_FIS_D2H_REG; 1958 req_task->io_ports[7] = reply->command; 1959 } 1960 1961 /* reclaim the DMA buffers.*/ 1962 if (inbuf_dma) 1963 dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin, 1964 DMA_FROM_DEVICE); 1965 if (outbuf_dma) 1966 dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout, 1967 DMA_TO_DEVICE); 1968 inbuf_dma = 0; 1969 outbuf_dma = 0; 1970 1971 /* return the ATA registers to the caller.*/ 1972 req_task->io_ports[1] = reply->features; 1973 req_task->io_ports[2] = reply->sect_count; 1974 req_task->io_ports[3] = reply->lba_low; 1975 req_task->io_ports[4] = reply->lba_mid; 1976 req_task->io_ports[5] = reply->lba_hi; 1977 req_task->io_ports[6] = reply->device; 1978 1979 if (req_task->out_flags.all & 1) { 1980 1981 req_task->hob_ports[3] = reply->lba_low_ex; 1982 req_task->hob_ports[4] = reply->lba_mid_ex; 1983 req_task->hob_ports[5] = reply->lba_hi_ex; 1984 req_task->hob_ports[1] = reply->features_ex; 1985 req_task->hob_ports[2] = reply->sect_cnt_ex; 1986 } 1987 dbg_printk(MTIP_DRV_NAME 1988 " %s: Completion: stat %x," 1989 "err %x, sect_cnt %x, lbalo %x," 1990 "lbamid %x, lbahi %x, dev %x\n", 1991 __func__, 1992 req_task->io_ports[7], 1993 req_task->io_ports[1], 1994 req_task->io_ports[2], 1995 req_task->io_ports[3], 1996 req_task->io_ports[4], 1997 req_task->io_ports[5], 1998 req_task->io_ports[6]); 1999 2000 if (taskout) { 2001 if (copy_to_user(buf + outtotal, outbuf, taskout)) { 2002 err = -EFAULT; 2003 goto abort; 2004 } 2005 } 2006 if (taskin) { 2007 if (copy_to_user(buf + intotal, inbuf, taskin)) { 2008 err = -EFAULT; 2009 goto abort; 2010 } 2011 } 2012 abort: 2013 if (inbuf_dma) 2014 dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin, 2015 DMA_FROM_DEVICE); 2016 if (outbuf_dma) 2017 dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout, 2018 DMA_TO_DEVICE); 2019 kfree(outbuf); 2020 kfree(inbuf); 2021 2022 return err; 2023 } 2024 2025 /* 2026 * Handle IOCTL calls from the Block Layer. 2027 * 2028 * This function is called by the Block Layer when it receives an IOCTL 2029 * command that it does not understand. If the IOCTL command is not supported 2030 * this function returns -ENOTTY. 2031 * 2032 * @dd Pointer to the driver data structure. 2033 * @cmd IOCTL command passed from the Block Layer. 2034 * @arg IOCTL argument passed from the Block Layer. 2035 * 2036 * return value 2037 * 0 The IOCTL completed successfully. 2038 * -ENOTTY The specified command is not supported. 2039 * -EFAULT An error occurred copying data to a user space buffer. 2040 * -EIO An error occurred while executing the command. 2041 */ 2042 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, 2043 unsigned long arg) 2044 { 2045 switch (cmd) { 2046 case HDIO_GET_IDENTITY: 2047 { 2048 if (copy_to_user((void __user *)arg, dd->port->identify, 2049 sizeof(u16) * ATA_ID_WORDS)) 2050 return -EFAULT; 2051 break; 2052 } 2053 case HDIO_DRIVE_CMD: 2054 { 2055 u8 drive_command[4]; 2056 2057 /* Copy the user command info to our buffer. */ 2058 if (copy_from_user(drive_command, 2059 (void __user *) arg, 2060 sizeof(drive_command))) 2061 return -EFAULT; 2062 2063 /* Execute the drive command. */ 2064 if (exec_drive_command(dd->port, 2065 drive_command, 2066 (void __user *) (arg+4))) 2067 return -EIO; 2068 2069 /* Copy the status back to the users buffer. */ 2070 if (copy_to_user((void __user *) arg, 2071 drive_command, 2072 sizeof(drive_command))) 2073 return -EFAULT; 2074 2075 break; 2076 } 2077 case HDIO_DRIVE_TASK: 2078 { 2079 u8 drive_command[7]; 2080 2081 /* Copy the user command info to our buffer. */ 2082 if (copy_from_user(drive_command, 2083 (void __user *) arg, 2084 sizeof(drive_command))) 2085 return -EFAULT; 2086 2087 /* Execute the drive command. */ 2088 if (exec_drive_task(dd->port, drive_command)) 2089 return -EIO; 2090 2091 /* Copy the status back to the users buffer. */ 2092 if (copy_to_user((void __user *) arg, 2093 drive_command, 2094 sizeof(drive_command))) 2095 return -EFAULT; 2096 2097 break; 2098 } 2099 case HDIO_DRIVE_TASKFILE: { 2100 ide_task_request_t req_task; 2101 int ret, outtotal; 2102 2103 if (copy_from_user(&req_task, (void __user *) arg, 2104 sizeof(req_task))) 2105 return -EFAULT; 2106 2107 outtotal = sizeof(req_task); 2108 2109 ret = exec_drive_taskfile(dd, (void __user *) arg, 2110 &req_task, outtotal); 2111 2112 if (copy_to_user((void __user *) arg, &req_task, 2113 sizeof(req_task))) 2114 return -EFAULT; 2115 2116 return ret; 2117 } 2118 2119 default: 2120 return -EINVAL; 2121 } 2122 return 0; 2123 } 2124 2125 /* 2126 * Submit an IO to the hw 2127 * 2128 * This function is called by the block layer to issue an io 2129 * to the device. Upon completion, the callback function will 2130 * be called with the data parameter passed as the callback data. 2131 * 2132 * @dd Pointer to the driver data structure. 2133 * @start First sector to read. 2134 * @nsect Number of sectors to read. 2135 * @tag The tag of this read command. 2136 * @callback Pointer to the function that should be called 2137 * when the read completes. 2138 * @data Callback data passed to the callback function 2139 * when the read completes. 2140 * @dir Direction (read or write) 2141 * 2142 * return value 2143 * None 2144 */ 2145 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, 2146 struct mtip_cmd *command, 2147 struct blk_mq_hw_ctx *hctx) 2148 { 2149 struct mtip_cmd_hdr *hdr = 2150 dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; 2151 struct host_to_dev_fis *fis; 2152 struct mtip_port *port = dd->port; 2153 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2154 u64 start = blk_rq_pos(rq); 2155 unsigned int nsect = blk_rq_sectors(rq); 2156 unsigned int nents; 2157 2158 /* Map the scatter list for DMA access */ 2159 nents = blk_rq_map_sg(hctx->queue, rq, command->sg); 2160 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); 2161 2162 prefetch(&port->flags); 2163 2164 command->scatter_ents = nents; 2165 2166 /* 2167 * The number of retries for this command before it is 2168 * reported as a failure to the upper layers. 2169 */ 2170 command->retries = MTIP_MAX_RETRIES; 2171 2172 /* Fill out fis */ 2173 fis = command->command; 2174 fis->type = 0x27; 2175 fis->opts = 1 << 7; 2176 if (dma_dir == DMA_FROM_DEVICE) 2177 fis->command = ATA_CMD_FPDMA_READ; 2178 else 2179 fis->command = ATA_CMD_FPDMA_WRITE; 2180 fis->lba_low = start & 0xFF; 2181 fis->lba_mid = (start >> 8) & 0xFF; 2182 fis->lba_hi = (start >> 16) & 0xFF; 2183 fis->lba_low_ex = (start >> 24) & 0xFF; 2184 fis->lba_mid_ex = (start >> 32) & 0xFF; 2185 fis->lba_hi_ex = (start >> 40) & 0xFF; 2186 fis->device = 1 << 6; 2187 fis->features = nsect & 0xFF; 2188 fis->features_ex = (nsect >> 8) & 0xFF; 2189 fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5)); 2190 fis->sect_cnt_ex = 0; 2191 fis->control = 0; 2192 fis->res2 = 0; 2193 fis->res3 = 0; 2194 fill_command_sg(dd, command, nents); 2195 2196 if (unlikely(command->unaligned)) 2197 fis->device |= 1 << 7; 2198 2199 /* Populate the command header */ 2200 hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF); 2201 if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) 2202 hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16); 2203 hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH); 2204 hdr->byte_count = 0; 2205 2206 command->direction = dma_dir; 2207 2208 /* 2209 * To prevent this command from being issued 2210 * if an internal command is in progress or error handling is active. 2211 */ 2212 if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) { 2213 set_bit(rq->tag, port->cmds_to_issue); 2214 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); 2215 return; 2216 } 2217 2218 /* Issue the command to the hardware */ 2219 mtip_issue_ncq_command(port, rq->tag); 2220 } 2221 2222 /* 2223 * Sysfs status dump. 2224 * 2225 * @dev Pointer to the device structure, passed by the kernrel. 2226 * @attr Pointer to the device_attribute structure passed by the kernel. 2227 * @buf Pointer to the char buffer that will receive the stats info. 2228 * 2229 * return value 2230 * The size, in bytes, of the data copied into buf. 2231 */ 2232 static ssize_t mtip_hw_show_status(struct device *dev, 2233 struct device_attribute *attr, 2234 char *buf) 2235 { 2236 struct driver_data *dd = dev_to_disk(dev)->private_data; 2237 int size = 0; 2238 2239 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) 2240 size += sprintf(buf, "%s", "thermal_shutdown\n"); 2241 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag)) 2242 size += sprintf(buf, "%s", "write_protect\n"); 2243 else 2244 size += sprintf(buf, "%s", "online\n"); 2245 2246 return size; 2247 } 2248 2249 static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL); 2250 2251 /* debugsfs entries */ 2252 2253 static ssize_t show_device_status(struct device_driver *drv, char *buf) 2254 { 2255 int size = 0; 2256 struct driver_data *dd, *tmp; 2257 unsigned long flags; 2258 char id_buf[42]; 2259 u16 status = 0; 2260 2261 spin_lock_irqsave(&dev_lock, flags); 2262 size += sprintf(&buf[size], "Devices Present:\n"); 2263 list_for_each_entry_safe(dd, tmp, &online_list, online_list) { 2264 if (dd->pdev) { 2265 if (dd->port && 2266 dd->port->identify && 2267 dd->port->identify_valid) { 2268 strlcpy(id_buf, 2269 (char *) (dd->port->identify + 10), 21); 2270 status = *(dd->port->identify + 141); 2271 } else { 2272 memset(id_buf, 0, 42); 2273 status = 0; 2274 } 2275 2276 if (dd->port && 2277 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { 2278 size += sprintf(&buf[size], 2279 " device %s %s (ftl rebuild %d %%)\n", 2280 dev_name(&dd->pdev->dev), 2281 id_buf, 2282 status); 2283 } else { 2284 size += sprintf(&buf[size], 2285 " device %s %s\n", 2286 dev_name(&dd->pdev->dev), 2287 id_buf); 2288 } 2289 } 2290 } 2291 2292 size += sprintf(&buf[size], "Devices Being Removed:\n"); 2293 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { 2294 if (dd->pdev) { 2295 if (dd->port && 2296 dd->port->identify && 2297 dd->port->identify_valid) { 2298 strlcpy(id_buf, 2299 (char *) (dd->port->identify+10), 21); 2300 status = *(dd->port->identify + 141); 2301 } else { 2302 memset(id_buf, 0, 42); 2303 status = 0; 2304 } 2305 2306 if (dd->port && 2307 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { 2308 size += sprintf(&buf[size], 2309 " device %s %s (ftl rebuild %d %%)\n", 2310 dev_name(&dd->pdev->dev), 2311 id_buf, 2312 status); 2313 } else { 2314 size += sprintf(&buf[size], 2315 " device %s %s\n", 2316 dev_name(&dd->pdev->dev), 2317 id_buf); 2318 } 2319 } 2320 } 2321 spin_unlock_irqrestore(&dev_lock, flags); 2322 2323 return size; 2324 } 2325 2326 static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, 2327 size_t len, loff_t *offset) 2328 { 2329 struct driver_data *dd = (struct driver_data *)f->private_data; 2330 int size = *offset; 2331 char *buf; 2332 int rv = 0; 2333 2334 if (!len || *offset) 2335 return 0; 2336 2337 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2338 if (!buf) { 2339 dev_err(&dd->pdev->dev, 2340 "Memory allocation: status buffer\n"); 2341 return -ENOMEM; 2342 } 2343 2344 size += show_device_status(NULL, buf); 2345 2346 *offset = size <= len ? size : len; 2347 size = copy_to_user(ubuf, buf, *offset); 2348 if (size) 2349 rv = -EFAULT; 2350 2351 kfree(buf); 2352 return rv ? rv : *offset; 2353 } 2354 2355 static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, 2356 size_t len, loff_t *offset) 2357 { 2358 struct driver_data *dd = (struct driver_data *)f->private_data; 2359 char *buf; 2360 u32 group_allocated; 2361 int size = *offset; 2362 int n, rv = 0; 2363 2364 if (!len || size) 2365 return 0; 2366 2367 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2368 if (!buf) { 2369 dev_err(&dd->pdev->dev, 2370 "Memory allocation: register buffer\n"); 2371 return -ENOMEM; 2372 } 2373 2374 size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); 2375 2376 for (n = dd->slot_groups-1; n >= 0; n--) 2377 size += sprintf(&buf[size], "%08X ", 2378 readl(dd->port->s_active[n])); 2379 2380 size += sprintf(&buf[size], "]\n"); 2381 size += sprintf(&buf[size], "H/ Command Issue : [ 0x"); 2382 2383 for (n = dd->slot_groups-1; n >= 0; n--) 2384 size += sprintf(&buf[size], "%08X ", 2385 readl(dd->port->cmd_issue[n])); 2386 2387 size += sprintf(&buf[size], "]\n"); 2388 size += sprintf(&buf[size], "H/ Completed : [ 0x"); 2389 2390 for (n = dd->slot_groups-1; n >= 0; n--) 2391 size += sprintf(&buf[size], "%08X ", 2392 readl(dd->port->completed[n])); 2393 2394 size += sprintf(&buf[size], "]\n"); 2395 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n", 2396 readl(dd->port->mmio + PORT_IRQ_STAT)); 2397 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n", 2398 readl(dd->mmio + HOST_IRQ_STAT)); 2399 size += sprintf(&buf[size], "\n"); 2400 2401 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x"); 2402 2403 for (n = dd->slot_groups-1; n >= 0; n--) { 2404 if (sizeof(long) > sizeof(u32)) 2405 group_allocated = 2406 dd->port->cmds_to_issue[n/2] >> (32*(n&1)); 2407 else 2408 group_allocated = dd->port->cmds_to_issue[n]; 2409 size += sprintf(&buf[size], "%08X ", group_allocated); 2410 } 2411 size += sprintf(&buf[size], "]\n"); 2412 2413 *offset = size <= len ? size : len; 2414 size = copy_to_user(ubuf, buf, *offset); 2415 if (size) 2416 rv = -EFAULT; 2417 2418 kfree(buf); 2419 return rv ? rv : *offset; 2420 } 2421 2422 static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, 2423 size_t len, loff_t *offset) 2424 { 2425 struct driver_data *dd = (struct driver_data *)f->private_data; 2426 char *buf; 2427 int size = *offset; 2428 int rv = 0; 2429 2430 if (!len || size) 2431 return 0; 2432 2433 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2434 if (!buf) { 2435 dev_err(&dd->pdev->dev, 2436 "Memory allocation: flag buffer\n"); 2437 return -ENOMEM; 2438 } 2439 2440 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", 2441 dd->port->flags); 2442 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", 2443 dd->dd_flag); 2444 2445 *offset = size <= len ? size : len; 2446 size = copy_to_user(ubuf, buf, *offset); 2447 if (size) 2448 rv = -EFAULT; 2449 2450 kfree(buf); 2451 return rv ? rv : *offset; 2452 } 2453 2454 static const struct file_operations mtip_device_status_fops = { 2455 .owner = THIS_MODULE, 2456 .open = simple_open, 2457 .read = mtip_hw_read_device_status, 2458 .llseek = no_llseek, 2459 }; 2460 2461 static const struct file_operations mtip_regs_fops = { 2462 .owner = THIS_MODULE, 2463 .open = simple_open, 2464 .read = mtip_hw_read_registers, 2465 .llseek = no_llseek, 2466 }; 2467 2468 static const struct file_operations mtip_flags_fops = { 2469 .owner = THIS_MODULE, 2470 .open = simple_open, 2471 .read = mtip_hw_read_flags, 2472 .llseek = no_llseek, 2473 }; 2474 2475 /* 2476 * Create the sysfs related attributes. 2477 * 2478 * @dd Pointer to the driver data structure. 2479 * @kobj Pointer to the kobj for the block device. 2480 * 2481 * return value 2482 * 0 Operation completed successfully. 2483 * -EINVAL Invalid parameter. 2484 */ 2485 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj) 2486 { 2487 if (!kobj || !dd) 2488 return -EINVAL; 2489 2490 if (sysfs_create_file(kobj, &dev_attr_status.attr)) 2491 dev_warn(&dd->pdev->dev, 2492 "Error creating 'status' sysfs entry\n"); 2493 return 0; 2494 } 2495 2496 /* 2497 * Remove the sysfs related attributes. 2498 * 2499 * @dd Pointer to the driver data structure. 2500 * @kobj Pointer to the kobj for the block device. 2501 * 2502 * return value 2503 * 0 Operation completed successfully. 2504 * -EINVAL Invalid parameter. 2505 */ 2506 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj) 2507 { 2508 if (!kobj || !dd) 2509 return -EINVAL; 2510 2511 sysfs_remove_file(kobj, &dev_attr_status.attr); 2512 2513 return 0; 2514 } 2515 2516 static int mtip_hw_debugfs_init(struct driver_data *dd) 2517 { 2518 if (!dfs_parent) 2519 return -1; 2520 2521 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent); 2522 if (IS_ERR_OR_NULL(dd->dfs_node)) { 2523 dev_warn(&dd->pdev->dev, 2524 "Error creating node %s under debugfs\n", 2525 dd->disk->disk_name); 2526 dd->dfs_node = NULL; 2527 return -1; 2528 } 2529 2530 debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops); 2531 debugfs_create_file("registers", 0444, dd->dfs_node, dd, 2532 &mtip_regs_fops); 2533 2534 return 0; 2535 } 2536 2537 static void mtip_hw_debugfs_exit(struct driver_data *dd) 2538 { 2539 debugfs_remove_recursive(dd->dfs_node); 2540 } 2541 2542 /* 2543 * Perform any init/resume time hardware setup 2544 * 2545 * @dd Pointer to the driver data structure. 2546 * 2547 * return value 2548 * None 2549 */ 2550 static inline void hba_setup(struct driver_data *dd) 2551 { 2552 u32 hwdata; 2553 hwdata = readl(dd->mmio + HOST_HSORG); 2554 2555 /* interrupt bug workaround: use only 1 IS bit.*/ 2556 writel(hwdata | 2557 HSORG_DISABLE_SLOTGRP_INTR | 2558 HSORG_DISABLE_SLOTGRP_PXIS, 2559 dd->mmio + HOST_HSORG); 2560 } 2561 2562 static int mtip_device_unaligned_constrained(struct driver_data *dd) 2563 { 2564 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0); 2565 } 2566 2567 /* 2568 * Detect the details of the product, and store anything needed 2569 * into the driver data structure. This includes product type and 2570 * version and number of slot groups. 2571 * 2572 * @dd Pointer to the driver data structure. 2573 * 2574 * return value 2575 * None 2576 */ 2577 static void mtip_detect_product(struct driver_data *dd) 2578 { 2579 u32 hwdata; 2580 unsigned int rev, slotgroups; 2581 2582 /* 2583 * HBA base + 0xFC [15:0] - vendor-specific hardware interface 2584 * info register: 2585 * [15:8] hardware/software interface rev# 2586 * [ 3] asic-style interface 2587 * [ 2:0] number of slot groups, minus 1 (only valid for asic-style). 2588 */ 2589 hwdata = readl(dd->mmio + HOST_HSORG); 2590 2591 dd->product_type = MTIP_PRODUCT_UNKNOWN; 2592 dd->slot_groups = 1; 2593 2594 if (hwdata & 0x8) { 2595 dd->product_type = MTIP_PRODUCT_ASICFPGA; 2596 rev = (hwdata & HSORG_HWREV) >> 8; 2597 slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1; 2598 dev_info(&dd->pdev->dev, 2599 "ASIC-FPGA design, HS rev 0x%x, " 2600 "%i slot groups [%i slots]\n", 2601 rev, 2602 slotgroups, 2603 slotgroups * 32); 2604 2605 if (slotgroups > MTIP_MAX_SLOT_GROUPS) { 2606 dev_warn(&dd->pdev->dev, 2607 "Warning: driver only supports " 2608 "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS); 2609 slotgroups = MTIP_MAX_SLOT_GROUPS; 2610 } 2611 dd->slot_groups = slotgroups; 2612 return; 2613 } 2614 2615 dev_warn(&dd->pdev->dev, "Unrecognized product id\n"); 2616 } 2617 2618 /* 2619 * Blocking wait for FTL rebuild to complete 2620 * 2621 * @dd Pointer to the DRIVER_DATA structure. 2622 * 2623 * return value 2624 * 0 FTL rebuild completed successfully 2625 * -EFAULT FTL rebuild error/timeout/interruption 2626 */ 2627 static int mtip_ftl_rebuild_poll(struct driver_data *dd) 2628 { 2629 unsigned long timeout, cnt = 0, start; 2630 2631 dev_warn(&dd->pdev->dev, 2632 "FTL rebuild in progress. Polling for completion.\n"); 2633 2634 start = jiffies; 2635 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS); 2636 2637 do { 2638 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 2639 &dd->dd_flag))) 2640 return -EFAULT; 2641 if (mtip_check_surprise_removal(dd->pdev)) 2642 return -EFAULT; 2643 2644 if (mtip_get_identify(dd->port, NULL) < 0) 2645 return -EFAULT; 2646 2647 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == 2648 MTIP_FTL_REBUILD_MAGIC) { 2649 ssleep(1); 2650 /* Print message every 3 minutes */ 2651 if (cnt++ >= 180) { 2652 dev_warn(&dd->pdev->dev, 2653 "FTL rebuild in progress (%d secs).\n", 2654 jiffies_to_msecs(jiffies - start) / 1000); 2655 cnt = 0; 2656 } 2657 } else { 2658 dev_warn(&dd->pdev->dev, 2659 "FTL rebuild complete (%d secs).\n", 2660 jiffies_to_msecs(jiffies - start) / 1000); 2661 mtip_block_initialize(dd); 2662 return 0; 2663 } 2664 } while (time_before(jiffies, timeout)); 2665 2666 /* Check for timeout */ 2667 dev_err(&dd->pdev->dev, 2668 "Timed out waiting for FTL rebuild to complete (%d secs).\n", 2669 jiffies_to_msecs(jiffies - start) / 1000); 2670 return -EFAULT; 2671 } 2672 2673 static void mtip_softirq_done_fn(struct request *rq) 2674 { 2675 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 2676 struct driver_data *dd = rq->q->queuedata; 2677 2678 /* Unmap the DMA scatter list entries */ 2679 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, 2680 cmd->direction); 2681 2682 if (unlikely(cmd->unaligned)) 2683 atomic_inc(&dd->port->cmd_slot_unal); 2684 2685 blk_mq_end_request(rq, cmd->status); 2686 } 2687 2688 static bool mtip_abort_cmd(struct request *req, void *data, bool reserved) 2689 { 2690 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); 2691 struct driver_data *dd = data; 2692 2693 dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag); 2694 2695 clear_bit(req->tag, dd->port->cmds_to_issue); 2696 cmd->status = BLK_STS_IOERR; 2697 mtip_softirq_done_fn(req); 2698 return true; 2699 } 2700 2701 static bool mtip_queue_cmd(struct request *req, void *data, bool reserved) 2702 { 2703 struct driver_data *dd = data; 2704 2705 set_bit(req->tag, dd->port->cmds_to_issue); 2706 blk_abort_request(req); 2707 return true; 2708 } 2709 2710 /* 2711 * service thread to issue queued commands 2712 * 2713 * @data Pointer to the driver data structure. 2714 * 2715 * return value 2716 * 0 2717 */ 2718 2719 static int mtip_service_thread(void *data) 2720 { 2721 struct driver_data *dd = (struct driver_data *)data; 2722 unsigned long slot, slot_start, slot_wrap, to; 2723 unsigned int num_cmd_slots = dd->slot_groups * 32; 2724 struct mtip_port *port = dd->port; 2725 2726 while (1) { 2727 if (kthread_should_stop() || 2728 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) 2729 goto st_out; 2730 clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); 2731 2732 /* 2733 * the condition is to check neither an internal command is 2734 * is in progress nor error handling is active 2735 */ 2736 wait_event_interruptible(port->svc_wait, (port->flags) && 2737 (port->flags & MTIP_PF_SVC_THD_WORK)); 2738 2739 if (kthread_should_stop() || 2740 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) 2741 goto st_out; 2742 2743 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 2744 &dd->dd_flag))) 2745 goto st_out; 2746 2747 set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); 2748 2749 restart_eh: 2750 /* Demux bits: start with error handling */ 2751 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) { 2752 mtip_handle_tfe(dd); 2753 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 2754 } 2755 2756 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) 2757 goto restart_eh; 2758 2759 if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) { 2760 to = jiffies + msecs_to_jiffies(5000); 2761 2762 do { 2763 mdelay(100); 2764 } while (atomic_read(&dd->irq_workers_active) != 0 && 2765 time_before(jiffies, to)); 2766 2767 if (atomic_read(&dd->irq_workers_active) != 0) 2768 dev_warn(&dd->pdev->dev, 2769 "Completion workers still active!"); 2770 2771 blk_mq_quiesce_queue(dd->queue); 2772 2773 blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd); 2774 2775 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags); 2776 2777 if (mtip_device_reset(dd)) 2778 blk_mq_tagset_busy_iter(&dd->tags, 2779 mtip_abort_cmd, dd); 2780 2781 clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags); 2782 2783 blk_mq_unquiesce_queue(dd->queue); 2784 } 2785 2786 if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { 2787 slot = 1; 2788 /* used to restrict the loop to one iteration */ 2789 slot_start = num_cmd_slots; 2790 slot_wrap = 0; 2791 while (1) { 2792 slot = find_next_bit(port->cmds_to_issue, 2793 num_cmd_slots, slot); 2794 if (slot_wrap == 1) { 2795 if ((slot_start >= slot) || 2796 (slot >= num_cmd_slots)) 2797 break; 2798 } 2799 if (unlikely(slot_start == num_cmd_slots)) 2800 slot_start = slot; 2801 2802 if (unlikely(slot == num_cmd_slots)) { 2803 slot = 1; 2804 slot_wrap = 1; 2805 continue; 2806 } 2807 2808 /* Issue the command to the hardware */ 2809 mtip_issue_ncq_command(port, slot); 2810 2811 clear_bit(slot, port->cmds_to_issue); 2812 } 2813 2814 clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); 2815 } 2816 2817 if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { 2818 if (mtip_ftl_rebuild_poll(dd) == 0) 2819 clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); 2820 } 2821 } 2822 2823 st_out: 2824 return 0; 2825 } 2826 2827 /* 2828 * DMA region teardown 2829 * 2830 * @dd Pointer to driver_data structure 2831 * 2832 * return value 2833 * None 2834 */ 2835 static void mtip_dma_free(struct driver_data *dd) 2836 { 2837 struct mtip_port *port = dd->port; 2838 2839 if (port->block1) 2840 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 2841 port->block1, port->block1_dma); 2842 2843 if (port->command_list) { 2844 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 2845 port->command_list, port->command_list_dma); 2846 } 2847 } 2848 2849 /* 2850 * DMA region setup 2851 * 2852 * @dd Pointer to driver_data structure 2853 * 2854 * return value 2855 * -ENOMEM Not enough free DMA region space to initialize driver 2856 */ 2857 static int mtip_dma_alloc(struct driver_data *dd) 2858 { 2859 struct mtip_port *port = dd->port; 2860 2861 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ 2862 port->block1 = 2863 dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 2864 &port->block1_dma, GFP_KERNEL); 2865 if (!port->block1) 2866 return -ENOMEM; 2867 memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ); 2868 2869 /* Allocate dma memory for command list */ 2870 port->command_list = 2871 dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 2872 &port->command_list_dma, GFP_KERNEL); 2873 if (!port->command_list) { 2874 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 2875 port->block1, port->block1_dma); 2876 port->block1 = NULL; 2877 port->block1_dma = 0; 2878 return -ENOMEM; 2879 } 2880 memset(port->command_list, 0, AHCI_CMD_TBL_SZ); 2881 2882 /* Setup all pointers into first DMA region */ 2883 port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET; 2884 port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET; 2885 port->identify = port->block1 + AHCI_IDFY_OFFSET; 2886 port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET; 2887 port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET; 2888 port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET; 2889 port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET; 2890 port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET; 2891 2892 return 0; 2893 } 2894 2895 static int mtip_hw_get_identify(struct driver_data *dd) 2896 { 2897 struct smart_attr attr242; 2898 unsigned char *buf; 2899 int rv; 2900 2901 if (mtip_get_identify(dd->port, NULL) < 0) 2902 return -EFAULT; 2903 2904 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == 2905 MTIP_FTL_REBUILD_MAGIC) { 2906 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); 2907 return MTIP_FTL_REBUILD_MAGIC; 2908 } 2909 mtip_dump_identify(dd->port); 2910 2911 /* check write protect, over temp and rebuild statuses */ 2912 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, 2913 dd->port->log_buf, 2914 dd->port->log_buf_dma, 1); 2915 if (rv) { 2916 dev_warn(&dd->pdev->dev, 2917 "Error in READ LOG EXT (10h) command\n"); 2918 /* non-critical error, don't fail the load */ 2919 } else { 2920 buf = (unsigned char *)dd->port->log_buf; 2921 if (buf[259] & 0x1) { 2922 dev_info(&dd->pdev->dev, 2923 "Write protect bit is set.\n"); 2924 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); 2925 } 2926 if (buf[288] == 0xF7) { 2927 dev_info(&dd->pdev->dev, 2928 "Exceeded Tmax, drive in thermal shutdown.\n"); 2929 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); 2930 } 2931 if (buf[288] == 0xBF) { 2932 dev_info(&dd->pdev->dev, 2933 "Drive indicates rebuild has failed.\n"); 2934 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); 2935 } 2936 } 2937 2938 /* get write protect progess */ 2939 memset(&attr242, 0, sizeof(struct smart_attr)); 2940 if (mtip_get_smart_attr(dd->port, 242, &attr242)) 2941 dev_warn(&dd->pdev->dev, 2942 "Unable to check write protect progress\n"); 2943 else 2944 dev_info(&dd->pdev->dev, 2945 "Write protect progress: %u%% (%u blocks)\n", 2946 attr242.cur, le32_to_cpu(attr242.data)); 2947 2948 return rv; 2949 } 2950 2951 /* 2952 * Called once for each card. 2953 * 2954 * @dd Pointer to the driver data structure. 2955 * 2956 * return value 2957 * 0 on success, else an error code. 2958 */ 2959 static int mtip_hw_init(struct driver_data *dd) 2960 { 2961 int i; 2962 int rv; 2963 unsigned long timeout, timetaken; 2964 2965 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; 2966 2967 mtip_detect_product(dd); 2968 if (dd->product_type == MTIP_PRODUCT_UNKNOWN) { 2969 rv = -EIO; 2970 goto out1; 2971 } 2972 2973 hba_setup(dd); 2974 2975 dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL, 2976 dd->numa_node); 2977 if (!dd->port) { 2978 dev_err(&dd->pdev->dev, 2979 "Memory allocation: port structure\n"); 2980 return -ENOMEM; 2981 } 2982 2983 /* Continue workqueue setup */ 2984 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) 2985 dd->work[i].port = dd->port; 2986 2987 /* Enable unaligned IO constraints for some devices */ 2988 if (mtip_device_unaligned_constrained(dd)) 2989 dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS; 2990 else 2991 dd->unal_qdepth = 0; 2992 2993 atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth); 2994 2995 /* Spinlock to prevent concurrent issue */ 2996 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) 2997 spin_lock_init(&dd->port->cmd_issue_lock[i]); 2998 2999 /* Set the port mmio base address. */ 3000 dd->port->mmio = dd->mmio + PORT_OFFSET; 3001 dd->port->dd = dd; 3002 3003 /* DMA allocations */ 3004 rv = mtip_dma_alloc(dd); 3005 if (rv < 0) 3006 goto out1; 3007 3008 /* Setup the pointers to the extended s_active and CI registers. */ 3009 for (i = 0; i < dd->slot_groups; i++) { 3010 dd->port->s_active[i] = 3011 dd->port->mmio + i*0x80 + PORT_SCR_ACT; 3012 dd->port->cmd_issue[i] = 3013 dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE; 3014 dd->port->completed[i] = 3015 dd->port->mmio + i*0x80 + PORT_SDBV; 3016 } 3017 3018 timetaken = jiffies; 3019 timeout = jiffies + msecs_to_jiffies(30000); 3020 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) && 3021 time_before(jiffies, timeout)) { 3022 mdelay(100); 3023 } 3024 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { 3025 timetaken = jiffies - timetaken; 3026 dev_warn(&dd->pdev->dev, 3027 "Surprise removal detected at %u ms\n", 3028 jiffies_to_msecs(timetaken)); 3029 rv = -ENODEV; 3030 goto out2 ; 3031 } 3032 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { 3033 timetaken = jiffies - timetaken; 3034 dev_warn(&dd->pdev->dev, 3035 "Removal detected at %u ms\n", 3036 jiffies_to_msecs(timetaken)); 3037 rv = -EFAULT; 3038 goto out2; 3039 } 3040 3041 /* Conditionally reset the HBA. */ 3042 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) { 3043 if (mtip_hba_reset(dd) < 0) { 3044 dev_err(&dd->pdev->dev, 3045 "Card did not reset within timeout\n"); 3046 rv = -EIO; 3047 goto out2; 3048 } 3049 } else { 3050 /* Clear any pending interrupts on the HBA */ 3051 writel(readl(dd->mmio + HOST_IRQ_STAT), 3052 dd->mmio + HOST_IRQ_STAT); 3053 } 3054 3055 mtip_init_port(dd->port); 3056 mtip_start_port(dd->port); 3057 3058 /* Setup the ISR and enable interrupts. */ 3059 rv = devm_request_irq(&dd->pdev->dev, 3060 dd->pdev->irq, 3061 mtip_irq_handler, 3062 IRQF_SHARED, 3063 dev_driver_string(&dd->pdev->dev), 3064 dd); 3065 3066 if (rv) { 3067 dev_err(&dd->pdev->dev, 3068 "Unable to allocate IRQ %d\n", dd->pdev->irq); 3069 goto out2; 3070 } 3071 irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding)); 3072 3073 /* Enable interrupts on the HBA. */ 3074 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 3075 dd->mmio + HOST_CTL); 3076 3077 init_waitqueue_head(&dd->port->svc_wait); 3078 3079 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 3080 rv = -EFAULT; 3081 goto out3; 3082 } 3083 3084 return rv; 3085 3086 out3: 3087 /* Disable interrupts on the HBA. */ 3088 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3089 dd->mmio + HOST_CTL); 3090 3091 /* Release the IRQ. */ 3092 irq_set_affinity_hint(dd->pdev->irq, NULL); 3093 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3094 3095 out2: 3096 mtip_deinit_port(dd->port); 3097 mtip_dma_free(dd); 3098 3099 out1: 3100 /* Free the memory allocated for the for structure. */ 3101 kfree(dd->port); 3102 3103 return rv; 3104 } 3105 3106 static int mtip_standby_drive(struct driver_data *dd) 3107 { 3108 int rv = 0; 3109 3110 if (dd->sr || !dd->port) 3111 return -ENODEV; 3112 /* 3113 * Send standby immediate (E0h) to the drive so that it 3114 * saves its state. 3115 */ 3116 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && 3117 !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) && 3118 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) { 3119 rv = mtip_standby_immediate(dd->port); 3120 if (rv) 3121 dev_warn(&dd->pdev->dev, 3122 "STANDBY IMMEDIATE failed\n"); 3123 } 3124 return rv; 3125 } 3126 3127 /* 3128 * Called to deinitialize an interface. 3129 * 3130 * @dd Pointer to the driver data structure. 3131 * 3132 * return value 3133 * 0 3134 */ 3135 static int mtip_hw_exit(struct driver_data *dd) 3136 { 3137 if (!dd->sr) { 3138 /* de-initialize the port. */ 3139 mtip_deinit_port(dd->port); 3140 3141 /* Disable interrupts on the HBA. */ 3142 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3143 dd->mmio + HOST_CTL); 3144 } 3145 3146 /* Release the IRQ. */ 3147 irq_set_affinity_hint(dd->pdev->irq, NULL); 3148 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3149 msleep(1000); 3150 3151 /* Free dma regions */ 3152 mtip_dma_free(dd); 3153 3154 /* Free the memory allocated for the for structure. */ 3155 kfree(dd->port); 3156 dd->port = NULL; 3157 3158 return 0; 3159 } 3160 3161 /* 3162 * Issue a Standby Immediate command to the device. 3163 * 3164 * This function is called by the Block Layer just before the 3165 * system powers off during a shutdown. 3166 * 3167 * @dd Pointer to the driver data structure. 3168 * 3169 * return value 3170 * 0 3171 */ 3172 static int mtip_hw_shutdown(struct driver_data *dd) 3173 { 3174 /* 3175 * Send standby immediate (E0h) to the drive so that it 3176 * saves its state. 3177 */ 3178 mtip_standby_drive(dd); 3179 3180 return 0; 3181 } 3182 3183 /* 3184 * Suspend function 3185 * 3186 * This function is called by the Block Layer just before the 3187 * system hibernates. 3188 * 3189 * @dd Pointer to the driver data structure. 3190 * 3191 * return value 3192 * 0 Suspend was successful 3193 * -EFAULT Suspend was not successful 3194 */ 3195 static int mtip_hw_suspend(struct driver_data *dd) 3196 { 3197 /* 3198 * Send standby immediate (E0h) to the drive 3199 * so that it saves its state. 3200 */ 3201 if (mtip_standby_drive(dd) != 0) { 3202 dev_err(&dd->pdev->dev, 3203 "Failed standby-immediate command\n"); 3204 return -EFAULT; 3205 } 3206 3207 /* Disable interrupts on the HBA.*/ 3208 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3209 dd->mmio + HOST_CTL); 3210 mtip_deinit_port(dd->port); 3211 3212 return 0; 3213 } 3214 3215 /* 3216 * Resume function 3217 * 3218 * This function is called by the Block Layer as the 3219 * system resumes. 3220 * 3221 * @dd Pointer to the driver data structure. 3222 * 3223 * return value 3224 * 0 Resume was successful 3225 * -EFAULT Resume was not successful 3226 */ 3227 static int mtip_hw_resume(struct driver_data *dd) 3228 { 3229 /* Perform any needed hardware setup steps */ 3230 hba_setup(dd); 3231 3232 /* Reset the HBA */ 3233 if (mtip_hba_reset(dd) != 0) { 3234 dev_err(&dd->pdev->dev, 3235 "Unable to reset the HBA\n"); 3236 return -EFAULT; 3237 } 3238 3239 /* 3240 * Enable the port, DMA engine, and FIS reception specific 3241 * h/w in controller. 3242 */ 3243 mtip_init_port(dd->port); 3244 mtip_start_port(dd->port); 3245 3246 /* Enable interrupts on the HBA.*/ 3247 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 3248 dd->mmio + HOST_CTL); 3249 3250 return 0; 3251 } 3252 3253 /* 3254 * Helper function for reusing disk name 3255 * upon hot insertion. 3256 */ 3257 static int rssd_disk_name_format(char *prefix, 3258 int index, 3259 char *buf, 3260 int buflen) 3261 { 3262 const int base = 'z' - 'a' + 1; 3263 char *begin = buf + strlen(prefix); 3264 char *end = buf + buflen; 3265 char *p; 3266 int unit; 3267 3268 p = end - 1; 3269 *p = '\0'; 3270 unit = base; 3271 do { 3272 if (p == begin) 3273 return -EINVAL; 3274 *--p = 'a' + (index % unit); 3275 index = (index / unit) - 1; 3276 } while (index >= 0); 3277 3278 memmove(begin, p, end - p); 3279 memcpy(buf, prefix, strlen(prefix)); 3280 3281 return 0; 3282 } 3283 3284 /* 3285 * Block layer IOCTL handler. 3286 * 3287 * @dev Pointer to the block_device structure. 3288 * @mode ignored 3289 * @cmd IOCTL command passed from the user application. 3290 * @arg Argument passed from the user application. 3291 * 3292 * return value 3293 * 0 IOCTL completed successfully. 3294 * -ENOTTY IOCTL not supported or invalid driver data 3295 * structure pointer. 3296 */ 3297 static int mtip_block_ioctl(struct block_device *dev, 3298 fmode_t mode, 3299 unsigned cmd, 3300 unsigned long arg) 3301 { 3302 struct driver_data *dd = dev->bd_disk->private_data; 3303 3304 if (!capable(CAP_SYS_ADMIN)) 3305 return -EACCES; 3306 3307 if (!dd) 3308 return -ENOTTY; 3309 3310 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) 3311 return -ENOTTY; 3312 3313 switch (cmd) { 3314 case BLKFLSBUF: 3315 return -ENOTTY; 3316 default: 3317 return mtip_hw_ioctl(dd, cmd, arg); 3318 } 3319 } 3320 3321 #ifdef CONFIG_COMPAT 3322 /* 3323 * Block layer compat IOCTL handler. 3324 * 3325 * @dev Pointer to the block_device structure. 3326 * @mode ignored 3327 * @cmd IOCTL command passed from the user application. 3328 * @arg Argument passed from the user application. 3329 * 3330 * return value 3331 * 0 IOCTL completed successfully. 3332 * -ENOTTY IOCTL not supported or invalid driver data 3333 * structure pointer. 3334 */ 3335 static int mtip_block_compat_ioctl(struct block_device *dev, 3336 fmode_t mode, 3337 unsigned cmd, 3338 unsigned long arg) 3339 { 3340 struct driver_data *dd = dev->bd_disk->private_data; 3341 3342 if (!capable(CAP_SYS_ADMIN)) 3343 return -EACCES; 3344 3345 if (!dd) 3346 return -ENOTTY; 3347 3348 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) 3349 return -ENOTTY; 3350 3351 switch (cmd) { 3352 case BLKFLSBUF: 3353 return -ENOTTY; 3354 case HDIO_DRIVE_TASKFILE: { 3355 struct mtip_compat_ide_task_request_s __user *compat_req_task; 3356 ide_task_request_t req_task; 3357 int compat_tasksize, outtotal, ret; 3358 3359 compat_tasksize = 3360 sizeof(struct mtip_compat_ide_task_request_s); 3361 3362 compat_req_task = 3363 (struct mtip_compat_ide_task_request_s __user *) arg; 3364 3365 if (copy_from_user(&req_task, (void __user *) arg, 3366 compat_tasksize - (2 * sizeof(compat_long_t)))) 3367 return -EFAULT; 3368 3369 if (get_user(req_task.out_size, &compat_req_task->out_size)) 3370 return -EFAULT; 3371 3372 if (get_user(req_task.in_size, &compat_req_task->in_size)) 3373 return -EFAULT; 3374 3375 outtotal = sizeof(struct mtip_compat_ide_task_request_s); 3376 3377 ret = exec_drive_taskfile(dd, (void __user *) arg, 3378 &req_task, outtotal); 3379 3380 if (copy_to_user((void __user *) arg, &req_task, 3381 compat_tasksize - 3382 (2 * sizeof(compat_long_t)))) 3383 return -EFAULT; 3384 3385 if (put_user(req_task.out_size, &compat_req_task->out_size)) 3386 return -EFAULT; 3387 3388 if (put_user(req_task.in_size, &compat_req_task->in_size)) 3389 return -EFAULT; 3390 3391 return ret; 3392 } 3393 default: 3394 return mtip_hw_ioctl(dd, cmd, arg); 3395 } 3396 } 3397 #endif 3398 3399 /* 3400 * Obtain the geometry of the device. 3401 * 3402 * You may think that this function is obsolete, but some applications, 3403 * fdisk for example still used CHS values. This function describes the 3404 * device as having 224 heads and 56 sectors per cylinder. These values are 3405 * chosen so that each cylinder is aligned on a 4KB boundary. Since a 3406 * partition is described in terms of a start and end cylinder this means 3407 * that each partition is also 4KB aligned. Non-aligned partitions adversely 3408 * affects performance. 3409 * 3410 * @dev Pointer to the block_device strucutre. 3411 * @geo Pointer to a hd_geometry structure. 3412 * 3413 * return value 3414 * 0 Operation completed successfully. 3415 * -ENOTTY An error occurred while reading the drive capacity. 3416 */ 3417 static int mtip_block_getgeo(struct block_device *dev, 3418 struct hd_geometry *geo) 3419 { 3420 struct driver_data *dd = dev->bd_disk->private_data; 3421 sector_t capacity; 3422 3423 if (!dd) 3424 return -ENOTTY; 3425 3426 if (!(mtip_hw_get_capacity(dd, &capacity))) { 3427 dev_warn(&dd->pdev->dev, 3428 "Could not get drive capacity.\n"); 3429 return -ENOTTY; 3430 } 3431 3432 geo->heads = 224; 3433 geo->sectors = 56; 3434 sector_div(capacity, (geo->heads * geo->sectors)); 3435 geo->cylinders = capacity; 3436 return 0; 3437 } 3438 3439 static int mtip_block_open(struct block_device *dev, fmode_t mode) 3440 { 3441 struct driver_data *dd; 3442 3443 if (dev && dev->bd_disk) { 3444 dd = (struct driver_data *) dev->bd_disk->private_data; 3445 3446 if (dd) { 3447 if (test_bit(MTIP_DDF_REMOVAL_BIT, 3448 &dd->dd_flag)) { 3449 return -ENODEV; 3450 } 3451 return 0; 3452 } 3453 } 3454 return -ENODEV; 3455 } 3456 3457 static void mtip_block_release(struct gendisk *disk, fmode_t mode) 3458 { 3459 } 3460 3461 /* 3462 * Block device operation function. 3463 * 3464 * This structure contains pointers to the functions required by the block 3465 * layer. 3466 */ 3467 static const struct block_device_operations mtip_block_ops = { 3468 .open = mtip_block_open, 3469 .release = mtip_block_release, 3470 .ioctl = mtip_block_ioctl, 3471 #ifdef CONFIG_COMPAT 3472 .compat_ioctl = mtip_block_compat_ioctl, 3473 #endif 3474 .getgeo = mtip_block_getgeo, 3475 .owner = THIS_MODULE 3476 }; 3477 3478 static inline bool is_se_active(struct driver_data *dd) 3479 { 3480 if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) { 3481 if (dd->port->ic_pause_timer) { 3482 unsigned long to = dd->port->ic_pause_timer + 3483 msecs_to_jiffies(1000); 3484 if (time_after(jiffies, to)) { 3485 clear_bit(MTIP_PF_SE_ACTIVE_BIT, 3486 &dd->port->flags); 3487 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); 3488 dd->port->ic_pause_timer = 0; 3489 wake_up_interruptible(&dd->port->svc_wait); 3490 return false; 3491 } 3492 } 3493 return true; 3494 } 3495 return false; 3496 } 3497 3498 static inline bool is_stopped(struct driver_data *dd, struct request *rq) 3499 { 3500 if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO))) 3501 return false; 3502 3503 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) 3504 return true; 3505 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) 3506 return true; 3507 if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) && 3508 rq_data_dir(rq)) 3509 return true; 3510 if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) 3511 return true; 3512 if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) 3513 return true; 3514 3515 return false; 3516 } 3517 3518 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, 3519 struct request *rq) 3520 { 3521 struct driver_data *dd = hctx->queue->queuedata; 3522 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3523 3524 if (rq_data_dir(rq) == READ || !dd->unal_qdepth) 3525 return false; 3526 3527 /* 3528 * If unaligned depth must be limited on this controller, mark it 3529 * as unaligned if the IO isn't on a 4k boundary (start of length). 3530 */ 3531 if (blk_rq_sectors(rq) <= 64) { 3532 if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7)) 3533 cmd->unaligned = 1; 3534 } 3535 3536 if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0) 3537 return true; 3538 3539 return false; 3540 } 3541 3542 static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, 3543 struct request *rq) 3544 { 3545 struct driver_data *dd = hctx->queue->queuedata; 3546 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3547 struct mtip_int_cmd *icmd = cmd->icmd; 3548 struct mtip_cmd_hdr *hdr = 3549 dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; 3550 struct mtip_cmd_sg *command_sg; 3551 3552 if (mtip_commands_active(dd->port)) 3553 return BLK_STS_DEV_RESOURCE; 3554 3555 hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); 3556 if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) 3557 hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16); 3558 /* Populate the SG list */ 3559 hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len); 3560 if (icmd->buf_len) { 3561 command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ; 3562 3563 command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF); 3564 command_sg->dba = cpu_to_le32(icmd->buffer & 0xFFFFFFFF); 3565 command_sg->dba_upper = 3566 cpu_to_le32((icmd->buffer >> 16) >> 16); 3567 3568 hdr->opts |= cpu_to_le32((1 << 16)); 3569 } 3570 3571 /* Populate the command header */ 3572 hdr->byte_count = 0; 3573 3574 blk_mq_start_request(rq); 3575 mtip_issue_non_ncq_command(dd->port, rq->tag); 3576 return 0; 3577 } 3578 3579 static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx, 3580 const struct blk_mq_queue_data *bd) 3581 { 3582 struct driver_data *dd = hctx->queue->queuedata; 3583 struct request *rq = bd->rq; 3584 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3585 3586 if (blk_rq_is_passthrough(rq)) 3587 return mtip_issue_reserved_cmd(hctx, rq); 3588 3589 if (unlikely(mtip_check_unal_depth(hctx, rq))) 3590 return BLK_STS_DEV_RESOURCE; 3591 3592 if (is_se_active(dd) || is_stopped(dd, rq)) 3593 return BLK_STS_IOERR; 3594 3595 blk_mq_start_request(rq); 3596 3597 if (req_op(rq) == REQ_OP_DISCARD) 3598 return mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); 3599 mtip_hw_submit_io(dd, rq, cmd, hctx); 3600 return BLK_STS_OK; 3601 } 3602 3603 static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq, 3604 unsigned int hctx_idx) 3605 { 3606 struct driver_data *dd = set->driver_data; 3607 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3608 3609 if (!cmd->command) 3610 return; 3611 3612 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3613 cmd->command, cmd->command_dma); 3614 } 3615 3616 static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, 3617 unsigned int hctx_idx, unsigned int numa_node) 3618 { 3619 struct driver_data *dd = set->driver_data; 3620 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3621 3622 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3623 &cmd->command_dma, GFP_KERNEL); 3624 if (!cmd->command) 3625 return -ENOMEM; 3626 3627 memset(cmd->command, 0, CMD_DMA_ALLOC_SZ); 3628 3629 sg_init_table(cmd->sg, MTIP_MAX_SG); 3630 return 0; 3631 } 3632 3633 static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req, 3634 bool reserved) 3635 { 3636 struct driver_data *dd = req->q->queuedata; 3637 3638 if (reserved) { 3639 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); 3640 3641 cmd->status = BLK_STS_TIMEOUT; 3642 blk_mq_complete_request(req); 3643 return BLK_EH_DONE; 3644 } 3645 3646 if (test_bit(req->tag, dd->port->cmds_to_issue)) 3647 goto exit_handler; 3648 3649 if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags)) 3650 goto exit_handler; 3651 3652 wake_up_interruptible(&dd->port->svc_wait); 3653 exit_handler: 3654 return BLK_EH_RESET_TIMER; 3655 } 3656 3657 static const struct blk_mq_ops mtip_mq_ops = { 3658 .queue_rq = mtip_queue_rq, 3659 .init_request = mtip_init_cmd, 3660 .exit_request = mtip_free_cmd, 3661 .complete = mtip_softirq_done_fn, 3662 .timeout = mtip_cmd_timeout, 3663 }; 3664 3665 /* 3666 * Block layer initialization function. 3667 * 3668 * This function is called once by the PCI layer for each P320 3669 * device that is connected to the system. 3670 * 3671 * @dd Pointer to the driver data structure. 3672 * 3673 * return value 3674 * 0 on success else an error code. 3675 */ 3676 static int mtip_block_initialize(struct driver_data *dd) 3677 { 3678 int rv = 0, wait_for_rebuild = 0; 3679 sector_t capacity; 3680 unsigned int index = 0; 3681 struct kobject *kobj; 3682 3683 if (dd->disk) 3684 goto skip_create_disk; /* hw init done, before rebuild */ 3685 3686 if (mtip_hw_init(dd)) { 3687 rv = -EINVAL; 3688 goto protocol_init_error; 3689 } 3690 3691 dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node); 3692 if (dd->disk == NULL) { 3693 dev_err(&dd->pdev->dev, 3694 "Unable to allocate gendisk structure\n"); 3695 rv = -EINVAL; 3696 goto alloc_disk_error; 3697 } 3698 3699 rv = ida_alloc(&rssd_index_ida, GFP_KERNEL); 3700 if (rv < 0) 3701 goto ida_get_error; 3702 index = rv; 3703 3704 rv = rssd_disk_name_format("rssd", 3705 index, 3706 dd->disk->disk_name, 3707 DISK_NAME_LEN); 3708 if (rv) 3709 goto disk_index_error; 3710 3711 dd->disk->major = dd->major; 3712 dd->disk->first_minor = index * MTIP_MAX_MINORS; 3713 dd->disk->minors = MTIP_MAX_MINORS; 3714 dd->disk->fops = &mtip_block_ops; 3715 dd->disk->private_data = dd; 3716 dd->index = index; 3717 3718 mtip_hw_debugfs_init(dd); 3719 3720 memset(&dd->tags, 0, sizeof(dd->tags)); 3721 dd->tags.ops = &mtip_mq_ops; 3722 dd->tags.nr_hw_queues = 1; 3723 dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS; 3724 dd->tags.reserved_tags = 1; 3725 dd->tags.cmd_size = sizeof(struct mtip_cmd); 3726 dd->tags.numa_node = dd->numa_node; 3727 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; 3728 dd->tags.driver_data = dd; 3729 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; 3730 3731 rv = blk_mq_alloc_tag_set(&dd->tags); 3732 if (rv) { 3733 dev_err(&dd->pdev->dev, 3734 "Unable to allocate request queue\n"); 3735 goto block_queue_alloc_tag_error; 3736 } 3737 3738 /* Allocate the request queue. */ 3739 dd->queue = blk_mq_init_queue(&dd->tags); 3740 if (IS_ERR(dd->queue)) { 3741 dev_err(&dd->pdev->dev, 3742 "Unable to allocate request queue\n"); 3743 rv = -ENOMEM; 3744 goto block_queue_alloc_init_error; 3745 } 3746 3747 dd->disk->queue = dd->queue; 3748 dd->queue->queuedata = dd; 3749 3750 skip_create_disk: 3751 /* Initialize the protocol layer. */ 3752 wait_for_rebuild = mtip_hw_get_identify(dd); 3753 if (wait_for_rebuild < 0) { 3754 dev_err(&dd->pdev->dev, 3755 "Protocol layer initialization failed\n"); 3756 rv = -EINVAL; 3757 goto init_hw_cmds_error; 3758 } 3759 3760 /* 3761 * if rebuild pending, start the service thread, and delay the block 3762 * queue creation and device_add_disk() 3763 */ 3764 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) 3765 goto start_service_thread; 3766 3767 /* Set device limits. */ 3768 blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue); 3769 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue); 3770 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3771 blk_queue_physical_block_size(dd->queue, 4096); 3772 blk_queue_max_hw_sectors(dd->queue, 0xffff); 3773 blk_queue_max_segment_size(dd->queue, 0x400000); 3774 blk_queue_io_min(dd->queue, 4096); 3775 3776 /* Signal trim support */ 3777 if (dd->trim_supp == true) { 3778 blk_queue_flag_set(QUEUE_FLAG_DISCARD, dd->queue); 3779 dd->queue->limits.discard_granularity = 4096; 3780 blk_queue_max_discard_sectors(dd->queue, 3781 MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); 3782 } 3783 3784 /* Set the capacity of the device in 512 byte sectors. */ 3785 if (!(mtip_hw_get_capacity(dd, &capacity))) { 3786 dev_warn(&dd->pdev->dev, 3787 "Could not read drive capacity\n"); 3788 rv = -EIO; 3789 goto read_capacity_error; 3790 } 3791 set_capacity(dd->disk, capacity); 3792 3793 /* Enable the block device and add it to /dev */ 3794 device_add_disk(&dd->pdev->dev, dd->disk, NULL); 3795 3796 dd->bdev = bdget_disk(dd->disk, 0); 3797 /* 3798 * Now that the disk is active, initialize any sysfs attributes 3799 * managed by the protocol layer. 3800 */ 3801 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 3802 if (kobj) { 3803 mtip_hw_sysfs_init(dd, kobj); 3804 kobject_put(kobj); 3805 } 3806 3807 if (dd->mtip_svc_handler) { 3808 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 3809 return rv; /* service thread created for handling rebuild */ 3810 } 3811 3812 start_service_thread: 3813 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, 3814 dd, dd->numa_node, 3815 "mtip_svc_thd_%02d", index); 3816 3817 if (IS_ERR(dd->mtip_svc_handler)) { 3818 dev_err(&dd->pdev->dev, "service thread failed to start\n"); 3819 dd->mtip_svc_handler = NULL; 3820 rv = -EFAULT; 3821 goto kthread_run_error; 3822 } 3823 wake_up_process(dd->mtip_svc_handler); 3824 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) 3825 rv = wait_for_rebuild; 3826 3827 return rv; 3828 3829 kthread_run_error: 3830 bdput(dd->bdev); 3831 dd->bdev = NULL; 3832 3833 /* Delete our gendisk. This also removes the device from /dev */ 3834 del_gendisk(dd->disk); 3835 3836 read_capacity_error: 3837 init_hw_cmds_error: 3838 blk_cleanup_queue(dd->queue); 3839 block_queue_alloc_init_error: 3840 blk_mq_free_tag_set(&dd->tags); 3841 block_queue_alloc_tag_error: 3842 mtip_hw_debugfs_exit(dd); 3843 disk_index_error: 3844 ida_free(&rssd_index_ida, index); 3845 3846 ida_get_error: 3847 put_disk(dd->disk); 3848 3849 alloc_disk_error: 3850 mtip_hw_exit(dd); /* De-initialize the protocol layer. */ 3851 3852 protocol_init_error: 3853 return rv; 3854 } 3855 3856 static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) 3857 { 3858 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3859 3860 cmd->status = BLK_STS_IOERR; 3861 blk_mq_complete_request(rq); 3862 return true; 3863 } 3864 3865 /* 3866 * Block layer deinitialization function. 3867 * 3868 * Called by the PCI layer as each P320 device is removed. 3869 * 3870 * @dd Pointer to the driver data structure. 3871 * 3872 * return value 3873 * 0 3874 */ 3875 static int mtip_block_remove(struct driver_data *dd) 3876 { 3877 struct kobject *kobj; 3878 3879 mtip_hw_debugfs_exit(dd); 3880 3881 if (dd->mtip_svc_handler) { 3882 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); 3883 wake_up_interruptible(&dd->port->svc_wait); 3884 kthread_stop(dd->mtip_svc_handler); 3885 } 3886 3887 /* Clean up the sysfs attributes, if created */ 3888 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { 3889 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 3890 if (kobj) { 3891 mtip_hw_sysfs_exit(dd, kobj); 3892 kobject_put(kobj); 3893 } 3894 } 3895 3896 if (!dd->sr) { 3897 /* 3898 * Explicitly wait here for IOs to quiesce, 3899 * as mtip_standby_drive usually won't wait for IOs. 3900 */ 3901 if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS)) 3902 mtip_standby_drive(dd); 3903 } 3904 else 3905 dev_info(&dd->pdev->dev, "device %s surprise removal\n", 3906 dd->disk->disk_name); 3907 3908 blk_freeze_queue_start(dd->queue); 3909 blk_mq_quiesce_queue(dd->queue); 3910 blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd); 3911 blk_mq_unquiesce_queue(dd->queue); 3912 3913 /* 3914 * Delete our gendisk structure. This also removes the device 3915 * from /dev 3916 */ 3917 if (dd->bdev) { 3918 bdput(dd->bdev); 3919 dd->bdev = NULL; 3920 } 3921 if (dd->disk) { 3922 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) 3923 del_gendisk(dd->disk); 3924 if (dd->disk->queue) { 3925 blk_cleanup_queue(dd->queue); 3926 blk_mq_free_tag_set(&dd->tags); 3927 dd->queue = NULL; 3928 } 3929 put_disk(dd->disk); 3930 } 3931 dd->disk = NULL; 3932 3933 ida_free(&rssd_index_ida, dd->index); 3934 3935 /* De-initialize the protocol layer. */ 3936 mtip_hw_exit(dd); 3937 3938 return 0; 3939 } 3940 3941 /* 3942 * Function called by the PCI layer when just before the 3943 * machine shuts down. 3944 * 3945 * If a protocol layer shutdown function is present it will be called 3946 * by this function. 3947 * 3948 * @dd Pointer to the driver data structure. 3949 * 3950 * return value 3951 * 0 3952 */ 3953 static int mtip_block_shutdown(struct driver_data *dd) 3954 { 3955 mtip_hw_shutdown(dd); 3956 3957 /* Delete our gendisk structure, and cleanup the blk queue. */ 3958 if (dd->disk) { 3959 dev_info(&dd->pdev->dev, 3960 "Shutting down %s ...\n", dd->disk->disk_name); 3961 3962 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) 3963 del_gendisk(dd->disk); 3964 if (dd->disk->queue) { 3965 blk_cleanup_queue(dd->queue); 3966 blk_mq_free_tag_set(&dd->tags); 3967 } 3968 put_disk(dd->disk); 3969 dd->disk = NULL; 3970 dd->queue = NULL; 3971 } 3972 3973 ida_free(&rssd_index_ida, dd->index); 3974 return 0; 3975 } 3976 3977 static int mtip_block_suspend(struct driver_data *dd) 3978 { 3979 dev_info(&dd->pdev->dev, 3980 "Suspending %s ...\n", dd->disk->disk_name); 3981 mtip_hw_suspend(dd); 3982 return 0; 3983 } 3984 3985 static int mtip_block_resume(struct driver_data *dd) 3986 { 3987 dev_info(&dd->pdev->dev, "Resuming %s ...\n", 3988 dd->disk->disk_name); 3989 mtip_hw_resume(dd); 3990 return 0; 3991 } 3992 3993 static void drop_cpu(int cpu) 3994 { 3995 cpu_use[cpu]--; 3996 } 3997 3998 static int get_least_used_cpu_on_node(int node) 3999 { 4000 int cpu, least_used_cpu, least_cnt; 4001 const struct cpumask *node_mask; 4002 4003 node_mask = cpumask_of_node(node); 4004 least_used_cpu = cpumask_first(node_mask); 4005 least_cnt = cpu_use[least_used_cpu]; 4006 cpu = least_used_cpu; 4007 4008 for_each_cpu(cpu, node_mask) { 4009 if (cpu_use[cpu] < least_cnt) { 4010 least_used_cpu = cpu; 4011 least_cnt = cpu_use[cpu]; 4012 } 4013 } 4014 cpu_use[least_used_cpu]++; 4015 return least_used_cpu; 4016 } 4017 4018 /* Helper for selecting a node in round robin mode */ 4019 static inline int mtip_get_next_rr_node(void) 4020 { 4021 static int next_node = -1; 4022 4023 if (next_node == -1) { 4024 next_node = first_online_node; 4025 return next_node; 4026 } 4027 4028 next_node = next_online_node(next_node); 4029 if (next_node == MAX_NUMNODES) 4030 next_node = first_online_node; 4031 return next_node; 4032 } 4033 4034 static DEFINE_HANDLER(0); 4035 static DEFINE_HANDLER(1); 4036 static DEFINE_HANDLER(2); 4037 static DEFINE_HANDLER(3); 4038 static DEFINE_HANDLER(4); 4039 static DEFINE_HANDLER(5); 4040 static DEFINE_HANDLER(6); 4041 static DEFINE_HANDLER(7); 4042 4043 static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) 4044 { 4045 int pos; 4046 unsigned short pcie_dev_ctrl; 4047 4048 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 4049 if (pos) { 4050 pci_read_config_word(pdev, 4051 pos + PCI_EXP_DEVCTL, 4052 &pcie_dev_ctrl); 4053 if (pcie_dev_ctrl & (1 << 11) || 4054 pcie_dev_ctrl & (1 << 4)) { 4055 dev_info(&dd->pdev->dev, 4056 "Disabling ERO/No-Snoop on bridge device %04x:%04x\n", 4057 pdev->vendor, pdev->device); 4058 pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN | 4059 PCI_EXP_DEVCTL_RELAX_EN); 4060 pci_write_config_word(pdev, 4061 pos + PCI_EXP_DEVCTL, 4062 pcie_dev_ctrl); 4063 } 4064 } 4065 } 4066 4067 static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev) 4068 { 4069 /* 4070 * This workaround is specific to AMD/ATI chipset with a PCI upstream 4071 * device with device id 0x5aXX 4072 */ 4073 if (pdev->bus && pdev->bus->self) { 4074 if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI && 4075 ((pdev->bus->self->device & 0xff00) == 0x5a00)) { 4076 mtip_disable_link_opts(dd, pdev->bus->self); 4077 } else { 4078 /* Check further up the topology */ 4079 struct pci_dev *parent_dev = pdev->bus->self; 4080 if (parent_dev->bus && 4081 parent_dev->bus->parent && 4082 parent_dev->bus->parent->self && 4083 parent_dev->bus->parent->self->vendor == 4084 PCI_VENDOR_ID_ATI && 4085 (parent_dev->bus->parent->self->device & 4086 0xff00) == 0x5a00) { 4087 mtip_disable_link_opts(dd, 4088 parent_dev->bus->parent->self); 4089 } 4090 } 4091 } 4092 } 4093 4094 /* 4095 * Called for each supported PCI device detected. 4096 * 4097 * This function allocates the private data structure, enables the 4098 * PCI device and then calls the block layer initialization function. 4099 * 4100 * return value 4101 * 0 on success else an error code. 4102 */ 4103 static int mtip_pci_probe(struct pci_dev *pdev, 4104 const struct pci_device_id *ent) 4105 { 4106 int rv = 0; 4107 struct driver_data *dd = NULL; 4108 char cpu_list[256]; 4109 const struct cpumask *node_mask; 4110 int cpu, i = 0, j = 0; 4111 int my_node = NUMA_NO_NODE; 4112 unsigned long flags; 4113 4114 /* Allocate memory for this devices private data. */ 4115 my_node = pcibus_to_node(pdev->bus); 4116 if (my_node != NUMA_NO_NODE) { 4117 if (!node_online(my_node)) 4118 my_node = mtip_get_next_rr_node(); 4119 } else { 4120 dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n"); 4121 my_node = mtip_get_next_rr_node(); 4122 } 4123 dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", 4124 my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), 4125 cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id()); 4126 4127 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); 4128 if (dd == NULL) { 4129 dev_err(&pdev->dev, 4130 "Unable to allocate memory for driver data\n"); 4131 return -ENOMEM; 4132 } 4133 4134 /* Attach the private data to this PCI device. */ 4135 pci_set_drvdata(pdev, dd); 4136 4137 rv = pcim_enable_device(pdev); 4138 if (rv < 0) { 4139 dev_err(&pdev->dev, "Unable to enable device\n"); 4140 goto iomap_err; 4141 } 4142 4143 /* Map BAR5 to memory. */ 4144 rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME); 4145 if (rv < 0) { 4146 dev_err(&pdev->dev, "Unable to map regions\n"); 4147 goto iomap_err; 4148 } 4149 4150 rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4151 if (rv) { 4152 dev_warn(&pdev->dev, "64-bit DMA enable failed\n"); 4153 goto setmask_err; 4154 } 4155 4156 /* Copy the info we may need later into the private data structure. */ 4157 dd->major = mtip_major; 4158 dd->instance = instance; 4159 dd->pdev = pdev; 4160 dd->numa_node = my_node; 4161 4162 INIT_LIST_HEAD(&dd->online_list); 4163 INIT_LIST_HEAD(&dd->remove_list); 4164 4165 memset(dd->workq_name, 0, 32); 4166 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); 4167 4168 dd->isr_workq = create_workqueue(dd->workq_name); 4169 if (!dd->isr_workq) { 4170 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); 4171 rv = -ENOMEM; 4172 goto setmask_err; 4173 } 4174 4175 memset(cpu_list, 0, sizeof(cpu_list)); 4176 4177 node_mask = cpumask_of_node(dd->numa_node); 4178 if (!cpumask_empty(node_mask)) { 4179 for_each_cpu(cpu, node_mask) 4180 { 4181 snprintf(&cpu_list[j], 256 - j, "%d ", cpu); 4182 j = strlen(cpu_list); 4183 } 4184 4185 dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n", 4186 dd->numa_node, 4187 topology_physical_package_id(cpumask_first(node_mask)), 4188 nr_cpus_node(dd->numa_node), 4189 cpu_list); 4190 } else 4191 dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n"); 4192 4193 dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node); 4194 dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n", 4195 cpu_to_node(dd->isr_binding), dd->isr_binding); 4196 4197 /* first worker context always runs in ISR */ 4198 dd->work[0].cpu_binding = dd->isr_binding; 4199 dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); 4200 dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); 4201 dd->work[3].cpu_binding = dd->work[0].cpu_binding; 4202 dd->work[4].cpu_binding = dd->work[1].cpu_binding; 4203 dd->work[5].cpu_binding = dd->work[2].cpu_binding; 4204 dd->work[6].cpu_binding = dd->work[2].cpu_binding; 4205 dd->work[7].cpu_binding = dd->work[1].cpu_binding; 4206 4207 /* Log the bindings */ 4208 for_each_present_cpu(cpu) { 4209 memset(cpu_list, 0, sizeof(cpu_list)); 4210 for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) { 4211 if (dd->work[i].cpu_binding == cpu) { 4212 snprintf(&cpu_list[j], 256 - j, "%d ", i); 4213 j = strlen(cpu_list); 4214 } 4215 } 4216 if (j) 4217 dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list); 4218 } 4219 4220 INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0); 4221 INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1); 4222 INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2); 4223 INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3); 4224 INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4); 4225 INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5); 4226 INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6); 4227 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); 4228 4229 pci_set_master(pdev); 4230 rv = pci_enable_msi(pdev); 4231 if (rv) { 4232 dev_warn(&pdev->dev, 4233 "Unable to enable MSI interrupt.\n"); 4234 goto msi_initialize_err; 4235 } 4236 4237 mtip_fix_ero_nosnoop(dd, pdev); 4238 4239 /* Initialize the block layer. */ 4240 rv = mtip_block_initialize(dd); 4241 if (rv < 0) { 4242 dev_err(&pdev->dev, 4243 "Unable to initialize block layer\n"); 4244 goto block_initialize_err; 4245 } 4246 4247 /* 4248 * Increment the instance count so that each device has a unique 4249 * instance number. 4250 */ 4251 instance++; 4252 if (rv != MTIP_FTL_REBUILD_MAGIC) 4253 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 4254 else 4255 rv = 0; /* device in rebuild state, return 0 from probe */ 4256 4257 /* Add to online list even if in ftl rebuild */ 4258 spin_lock_irqsave(&dev_lock, flags); 4259 list_add(&dd->online_list, &online_list); 4260 spin_unlock_irqrestore(&dev_lock, flags); 4261 4262 goto done; 4263 4264 block_initialize_err: 4265 pci_disable_msi(pdev); 4266 4267 msi_initialize_err: 4268 if (dd->isr_workq) { 4269 flush_workqueue(dd->isr_workq); 4270 destroy_workqueue(dd->isr_workq); 4271 drop_cpu(dd->work[0].cpu_binding); 4272 drop_cpu(dd->work[1].cpu_binding); 4273 drop_cpu(dd->work[2].cpu_binding); 4274 } 4275 setmask_err: 4276 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4277 4278 iomap_err: 4279 kfree(dd); 4280 pci_set_drvdata(pdev, NULL); 4281 return rv; 4282 done: 4283 return rv; 4284 } 4285 4286 /* 4287 * Called for each probed device when the device is removed or the 4288 * driver is unloaded. 4289 * 4290 * return value 4291 * None 4292 */ 4293 static void mtip_pci_remove(struct pci_dev *pdev) 4294 { 4295 struct driver_data *dd = pci_get_drvdata(pdev); 4296 unsigned long flags, to; 4297 4298 set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag); 4299 4300 spin_lock_irqsave(&dev_lock, flags); 4301 list_del_init(&dd->online_list); 4302 list_add(&dd->remove_list, &removing_list); 4303 spin_unlock_irqrestore(&dev_lock, flags); 4304 4305 mtip_check_surprise_removal(pdev); 4306 synchronize_irq(dd->pdev->irq); 4307 4308 /* Spin until workers are done */ 4309 to = jiffies + msecs_to_jiffies(4000); 4310 do { 4311 msleep(20); 4312 } while (atomic_read(&dd->irq_workers_active) != 0 && 4313 time_before(jiffies, to)); 4314 4315 if (!dd->sr) 4316 fsync_bdev(dd->bdev); 4317 4318 if (atomic_read(&dd->irq_workers_active) != 0) { 4319 dev_warn(&dd->pdev->dev, 4320 "Completion workers still active!\n"); 4321 } 4322 4323 blk_set_queue_dying(dd->queue); 4324 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); 4325 4326 /* Clean up the block layer. */ 4327 mtip_block_remove(dd); 4328 4329 if (dd->isr_workq) { 4330 flush_workqueue(dd->isr_workq); 4331 destroy_workqueue(dd->isr_workq); 4332 drop_cpu(dd->work[0].cpu_binding); 4333 drop_cpu(dd->work[1].cpu_binding); 4334 drop_cpu(dd->work[2].cpu_binding); 4335 } 4336 4337 pci_disable_msi(pdev); 4338 4339 spin_lock_irqsave(&dev_lock, flags); 4340 list_del_init(&dd->remove_list); 4341 spin_unlock_irqrestore(&dev_lock, flags); 4342 4343 kfree(dd); 4344 4345 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4346 pci_set_drvdata(pdev, NULL); 4347 } 4348 4349 /* 4350 * Called for each probed device when the device is suspended. 4351 * 4352 * return value 4353 * 0 Success 4354 * <0 Error 4355 */ 4356 static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) 4357 { 4358 int rv = 0; 4359 struct driver_data *dd = pci_get_drvdata(pdev); 4360 4361 if (!dd) { 4362 dev_err(&pdev->dev, 4363 "Driver private datastructure is NULL\n"); 4364 return -EFAULT; 4365 } 4366 4367 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); 4368 4369 /* Disable ports & interrupts then send standby immediate */ 4370 rv = mtip_block_suspend(dd); 4371 if (rv < 0) { 4372 dev_err(&pdev->dev, 4373 "Failed to suspend controller\n"); 4374 return rv; 4375 } 4376 4377 /* 4378 * Save the pci config space to pdev structure & 4379 * disable the device 4380 */ 4381 pci_save_state(pdev); 4382 pci_disable_device(pdev); 4383 4384 /* Move to Low power state*/ 4385 pci_set_power_state(pdev, PCI_D3hot); 4386 4387 return rv; 4388 } 4389 4390 /* 4391 * Called for each probed device when the device is resumed. 4392 * 4393 * return value 4394 * 0 Success 4395 * <0 Error 4396 */ 4397 static int mtip_pci_resume(struct pci_dev *pdev) 4398 { 4399 int rv = 0; 4400 struct driver_data *dd; 4401 4402 dd = pci_get_drvdata(pdev); 4403 if (!dd) { 4404 dev_err(&pdev->dev, 4405 "Driver private datastructure is NULL\n"); 4406 return -EFAULT; 4407 } 4408 4409 /* Move the device to active State */ 4410 pci_set_power_state(pdev, PCI_D0); 4411 4412 /* Restore PCI configuration space */ 4413 pci_restore_state(pdev); 4414 4415 /* Enable the PCI device*/ 4416 rv = pcim_enable_device(pdev); 4417 if (rv < 0) { 4418 dev_err(&pdev->dev, 4419 "Failed to enable card during resume\n"); 4420 goto err; 4421 } 4422 pci_set_master(pdev); 4423 4424 /* 4425 * Calls hbaReset, initPort, & startPort function 4426 * then enables interrupts 4427 */ 4428 rv = mtip_block_resume(dd); 4429 if (rv < 0) 4430 dev_err(&pdev->dev, "Unable to resume\n"); 4431 4432 err: 4433 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); 4434 4435 return rv; 4436 } 4437 4438 /* 4439 * Shutdown routine 4440 * 4441 * return value 4442 * None 4443 */ 4444 static void mtip_pci_shutdown(struct pci_dev *pdev) 4445 { 4446 struct driver_data *dd = pci_get_drvdata(pdev); 4447 if (dd) 4448 mtip_block_shutdown(dd); 4449 } 4450 4451 /* Table of device ids supported by this driver. */ 4452 static const struct pci_device_id mtip_pci_tbl[] = { 4453 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) }, 4454 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) }, 4455 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) }, 4456 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) }, 4457 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) }, 4458 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) }, 4459 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) }, 4460 { 0 } 4461 }; 4462 4463 /* Structure that describes the PCI driver functions. */ 4464 static struct pci_driver mtip_pci_driver = { 4465 .name = MTIP_DRV_NAME, 4466 .id_table = mtip_pci_tbl, 4467 .probe = mtip_pci_probe, 4468 .remove = mtip_pci_remove, 4469 .suspend = mtip_pci_suspend, 4470 .resume = mtip_pci_resume, 4471 .shutdown = mtip_pci_shutdown, 4472 }; 4473 4474 MODULE_DEVICE_TABLE(pci, mtip_pci_tbl); 4475 4476 /* 4477 * Module initialization function. 4478 * 4479 * Called once when the module is loaded. This function allocates a major 4480 * block device number to the Cyclone devices and registers the PCI layer 4481 * of the driver. 4482 * 4483 * Return value 4484 * 0 on success else error code. 4485 */ 4486 static int __init mtip_init(void) 4487 { 4488 int error; 4489 4490 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); 4491 4492 spin_lock_init(&dev_lock); 4493 4494 INIT_LIST_HEAD(&online_list); 4495 INIT_LIST_HEAD(&removing_list); 4496 4497 /* Allocate a major block device number to use with this driver. */ 4498 error = register_blkdev(0, MTIP_DRV_NAME); 4499 if (error <= 0) { 4500 pr_err("Unable to register block device (%d)\n", 4501 error); 4502 return -EBUSY; 4503 } 4504 mtip_major = error; 4505 4506 dfs_parent = debugfs_create_dir("rssd", NULL); 4507 if (IS_ERR_OR_NULL(dfs_parent)) { 4508 pr_warn("Error creating debugfs parent\n"); 4509 dfs_parent = NULL; 4510 } 4511 if (dfs_parent) { 4512 dfs_device_status = debugfs_create_file("device_status", 4513 0444, dfs_parent, NULL, 4514 &mtip_device_status_fops); 4515 if (IS_ERR_OR_NULL(dfs_device_status)) { 4516 pr_err("Error creating device_status node\n"); 4517 dfs_device_status = NULL; 4518 } 4519 } 4520 4521 /* Register our PCI operations. */ 4522 error = pci_register_driver(&mtip_pci_driver); 4523 if (error) { 4524 debugfs_remove(dfs_parent); 4525 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4526 } 4527 4528 return error; 4529 } 4530 4531 /* 4532 * Module de-initialization function. 4533 * 4534 * Called once when the module is unloaded. This function deallocates 4535 * the major block device number allocated by mtip_init() and 4536 * unregisters the PCI layer of the driver. 4537 * 4538 * Return value 4539 * none 4540 */ 4541 static void __exit mtip_exit(void) 4542 { 4543 /* Release the allocated major block device number. */ 4544 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4545 4546 /* Unregister the PCI driver. */ 4547 pci_unregister_driver(&mtip_pci_driver); 4548 4549 debugfs_remove_recursive(dfs_parent); 4550 } 4551 4552 MODULE_AUTHOR("Micron Technology, Inc"); 4553 MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver"); 4554 MODULE_LICENSE("GPL"); 4555 MODULE_VERSION(MTIP_DRV_VERSION); 4556 4557 module_init(mtip_init); 4558 module_exit(mtip_exit); 4559