1 /*- 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 34 #ifdef _KERNEL 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/taskqueue.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <sys/devicestat.h> 44 #include <sys/eventhandler.h> 45 #include <sys/malloc.h> 46 #include <sys/cons.h> 47 #include <sys/endian.h> 48 #include <sys/proc.h> 49 #include <geom/geom.h> 50 #include <geom/geom_disk.h> 51 #endif /* _KERNEL */ 52 53 #ifndef _KERNEL 54 #include <stdio.h> 55 #include <string.h> 56 #endif /* _KERNEL */ 57 58 #include <cam/cam.h> 59 #include <cam/cam_ccb.h> 60 #include <cam/cam_periph.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_sim.h> 63 64 #include <cam/scsi/scsi_message.h> 65 66 #ifndef _KERNEL 67 #include <cam/scsi/scsi_da.h> 68 #endif /* !_KERNEL */ 69 70 #ifdef _KERNEL 71 typedef enum { 72 DA_STATE_PROBE_RC, 73 DA_STATE_PROBE_RC16, 74 DA_STATE_PROBE_LBP, 75 DA_STATE_PROBE_BLK_LIMITS, 76 DA_STATE_PROBE_BDC, 77 DA_STATE_PROBE_ATA, 78 DA_STATE_NORMAL 79 } da_state; 80 81 typedef enum { 82 DA_FLAG_PACK_INVALID = 0x001, 83 DA_FLAG_NEW_PACK = 0x002, 84 DA_FLAG_PACK_LOCKED = 0x004, 85 DA_FLAG_PACK_REMOVABLE = 0x008, 86 DA_FLAG_NEED_OTAG = 0x020, 87 DA_FLAG_WAS_OTAG = 0x040, 88 DA_FLAG_RETRY_UA = 0x080, 89 DA_FLAG_OPEN = 0x100, 90 DA_FLAG_SCTX_INIT = 0x200, 91 DA_FLAG_CAN_RC16 = 0x400, 92 DA_FLAG_PROBED = 0x800, 93 DA_FLAG_DIRTY = 0x1000, 94 DA_FLAG_ANNOUNCED = 0x2000 95 } da_flags; 96 97 typedef enum { 98 DA_Q_NONE = 0x00, 99 DA_Q_NO_SYNC_CACHE = 0x01, 100 DA_Q_NO_6_BYTE = 0x02, 101 DA_Q_NO_PREVENT = 0x04, 102 DA_Q_4K = 0x08, 103 DA_Q_NO_RC16 = 0x10, 104 DA_Q_NO_UNMAP = 0x20, 105 DA_Q_RETRY_BUSY = 0x40 106 } da_quirks; 107 108 #define DA_Q_BIT_STRING \ 109 "\020" \ 110 "\001NO_SYNC_CACHE" \ 111 "\002NO_6_BYTE" \ 112 "\003NO_PREVENT" \ 113 "\0044K" \ 114 "\005NO_RC16" \ 115 "\006NO_UNMAP" \ 116 "\007RETRY_BUSY" 117 118 typedef enum { 119 DA_CCB_PROBE_RC = 0x01, 120 DA_CCB_PROBE_RC16 = 0x02, 121 DA_CCB_PROBE_LBP = 0x03, 122 DA_CCB_PROBE_BLK_LIMITS = 0x04, 123 DA_CCB_PROBE_BDC = 0x05, 124 DA_CCB_PROBE_ATA = 0x06, 125 DA_CCB_BUFFER_IO = 0x07, 126 DA_CCB_DUMP = 0x0A, 127 DA_CCB_DELETE = 0x0B, 128 DA_CCB_TUR = 0x0C, 129 DA_CCB_TYPE_MASK = 0x0F, 130 DA_CCB_RETRY_UA = 0x10 131 } da_ccb_state; 132 133 /* 134 * Order here is important for method choice 135 * 136 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to 137 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes 138 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql 139 * import taking 5mins. 140 * 141 */ 142 typedef enum { 143 DA_DELETE_NONE, 144 DA_DELETE_DISABLE, 145 DA_DELETE_ATA_TRIM, 146 DA_DELETE_UNMAP, 147 DA_DELETE_WS16, 148 DA_DELETE_WS10, 149 DA_DELETE_ZERO, 150 DA_DELETE_MIN = DA_DELETE_ATA_TRIM, 151 DA_DELETE_MAX = DA_DELETE_ZERO 152 } da_delete_methods; 153 154 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, 155 struct bio *bp); 156 static da_delete_func_t da_delete_trim; 157 static da_delete_func_t da_delete_unmap; 158 static da_delete_func_t da_delete_ws; 159 160 static const void * da_delete_functions[] = { 161 NULL, 162 NULL, 163 da_delete_trim, 164 da_delete_unmap, 165 da_delete_ws, 166 da_delete_ws, 167 da_delete_ws 168 }; 169 170 static const char *da_delete_method_names[] = 171 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; 172 static const char *da_delete_method_desc[] = 173 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", 174 "WRITE SAME(10) with UNMAP", "ZERO" }; 175 176 /* Offsets into our private area for storing information */ 177 #define ccb_state ppriv_field0 178 #define ccb_bp ppriv_ptr1 179 180 struct disk_params { 181 u_int8_t heads; 182 u_int32_t cylinders; 183 u_int8_t secs_per_track; 184 u_int32_t secsize; /* Number of bytes/sector */ 185 u_int64_t sectors; /* total number sectors */ 186 u_int stripesize; 187 u_int stripeoffset; 188 }; 189 190 #define UNMAP_RANGE_MAX 0xffffffff 191 #define UNMAP_HEAD_SIZE 8 192 #define UNMAP_RANGE_SIZE 16 193 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ 194 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ 195 UNMAP_HEAD_SIZE) 196 197 #define WS10_MAX_BLKS 0xffff 198 #define WS16_MAX_BLKS 0xffffffff 199 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ 200 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) 201 202 struct da_softc { 203 struct bio_queue_head bio_queue; 204 struct bio_queue_head delete_queue; 205 struct bio_queue_head delete_run_queue; 206 LIST_HEAD(, ccb_hdr) pending_ccbs; 207 int tur; /* TEST UNIT READY should be sent */ 208 int refcount; /* Active xpt_action() calls */ 209 da_state state; 210 da_flags flags; 211 da_quirks quirks; 212 int sort_io_queue; 213 int minimum_cmd_size; 214 int error_inject; 215 int trim_max_ranges; 216 int delete_running; 217 int delete_available; /* Delete methods possibly available */ 218 u_int maxio; 219 uint32_t unmap_max_ranges; 220 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ 221 uint64_t ws_max_blks; 222 da_delete_methods delete_method; 223 da_delete_func_t *delete_func; 224 struct disk_params params; 225 struct disk *disk; 226 union ccb saved_ccb; 227 struct task sysctl_task; 228 struct sysctl_ctx_list sysctl_ctx; 229 struct sysctl_oid *sysctl_tree; 230 struct callout sendordered_c; 231 uint64_t wwpn; 232 uint8_t unmap_buf[UNMAP_BUF_SIZE]; 233 struct scsi_read_capacity_data_long rcaplong; 234 struct callout mediapoll_c; 235 }; 236 237 #define dadeleteflag(softc, delete_method, enable) \ 238 if (enable) { \ 239 softc->delete_available |= (1 << delete_method); \ 240 } else { \ 241 softc->delete_available &= ~(1 << delete_method); \ 242 } 243 244 struct da_quirk_entry { 245 struct scsi_inquiry_pattern inq_pat; 246 da_quirks quirks; 247 }; 248 249 static const char quantum[] = "QUANTUM"; 250 static const char microp[] = "MICROP"; 251 252 static struct da_quirk_entry da_quirk_table[] = 253 { 254 /* SPI, FC devices */ 255 { 256 /* 257 * Fujitsu M2513A MO drives. 258 * Tested devices: M2513A2 firmware versions 1200 & 1300. 259 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 260 * Reported by: W.Scholten <whs@xs4all.nl> 261 */ 262 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 263 /*quirks*/ DA_Q_NO_SYNC_CACHE 264 }, 265 { 266 /* See above. */ 267 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 268 /*quirks*/ DA_Q_NO_SYNC_CACHE 269 }, 270 { 271 /* 272 * This particular Fujitsu drive doesn't like the 273 * synchronize cache command. 274 * Reported by: Tom Jackson <toj@gorilla.net> 275 */ 276 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 277 /*quirks*/ DA_Q_NO_SYNC_CACHE 278 }, 279 { 280 /* 281 * This drive doesn't like the synchronize cache command 282 * either. Reported by: Matthew Jacob <mjacob@feral.com> 283 * in NetBSD PR kern/6027, August 24, 1998. 284 */ 285 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 286 /*quirks*/ DA_Q_NO_SYNC_CACHE 287 }, 288 { 289 /* 290 * This drive doesn't like the synchronize cache command 291 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 292 * (PR 8882). 293 */ 294 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 295 /*quirks*/ DA_Q_NO_SYNC_CACHE 296 }, 297 { 298 /* 299 * Doesn't like the synchronize cache command. 300 * Reported by: Blaz Zupan <blaz@gold.amis.net> 301 */ 302 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 303 /*quirks*/ DA_Q_NO_SYNC_CACHE 304 }, 305 { 306 /* 307 * Doesn't like the synchronize cache command. 308 * Reported by: Blaz Zupan <blaz@gold.amis.net> 309 */ 310 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 311 /*quirks*/ DA_Q_NO_SYNC_CACHE 312 }, 313 { 314 /* 315 * Doesn't like the synchronize cache command. 316 */ 317 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 318 /*quirks*/ DA_Q_NO_SYNC_CACHE 319 }, 320 { 321 /* 322 * Doesn't like the synchronize cache command. 323 * Reported by: walter@pelissero.de 324 */ 325 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, 326 /*quirks*/ DA_Q_NO_SYNC_CACHE 327 }, 328 { 329 /* 330 * Doesn't work correctly with 6 byte reads/writes. 331 * Returns illegal request, and points to byte 9 of the 332 * 6-byte CDB. 333 * Reported by: Adam McDougall <bsdx@spawnet.com> 334 */ 335 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 336 /*quirks*/ DA_Q_NO_6_BYTE 337 }, 338 { 339 /* See above. */ 340 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 341 /*quirks*/ DA_Q_NO_6_BYTE 342 }, 343 { 344 /* 345 * Doesn't like the synchronize cache command. 346 * Reported by: walter@pelissero.de 347 */ 348 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, 349 /*quirks*/ DA_Q_NO_SYNC_CACHE 350 }, 351 { 352 /* 353 * The CISS RAID controllers do not support SYNC_CACHE 354 */ 355 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, 356 /*quirks*/ DA_Q_NO_SYNC_CACHE 357 }, 358 { 359 /* 360 * The STEC SSDs sometimes hang on UNMAP. 361 */ 362 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, 363 /*quirks*/ DA_Q_NO_UNMAP 364 }, 365 { 366 /* 367 * VMware returns BUSY status when storage has transient 368 * connectivity problems, so better wait. 369 */ 370 {T_DIRECT, SIP_MEDIA_FIXED, "VMware", "Virtual disk", "*"}, 371 /*quirks*/ DA_Q_RETRY_BUSY 372 }, 373 /* USB mass storage devices supported by umass(4) */ 374 { 375 /* 376 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player 377 * PR: kern/51675 378 */ 379 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, 380 /*quirks*/ DA_Q_NO_SYNC_CACHE 381 }, 382 { 383 /* 384 * Power Quotient Int. (PQI) USB flash key 385 * PR: kern/53067 386 */ 387 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", 388 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 389 }, 390 { 391 /* 392 * Creative Nomad MUVO mp3 player (USB) 393 * PR: kern/53094 394 */ 395 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, 396 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 397 }, 398 { 399 /* 400 * Jungsoft NEXDISK USB flash key 401 * PR: kern/54737 402 */ 403 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, 404 /*quirks*/ DA_Q_NO_SYNC_CACHE 405 }, 406 { 407 /* 408 * FreeDik USB Mini Data Drive 409 * PR: kern/54786 410 */ 411 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", 412 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 413 }, 414 { 415 /* 416 * Sigmatel USB Flash MP3 Player 417 * PR: kern/57046 418 */ 419 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, 420 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 421 }, 422 { 423 /* 424 * Neuros USB Digital Audio Computer 425 * PR: kern/63645 426 */ 427 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", 428 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 429 }, 430 { 431 /* 432 * SEAGRAND NP-900 MP3 Player 433 * PR: kern/64563 434 */ 435 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, 436 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 437 }, 438 { 439 /* 440 * iRiver iFP MP3 player (with UMS Firmware) 441 * PR: kern/54881, i386/63941, kern/66124 442 */ 443 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, 444 /*quirks*/ DA_Q_NO_SYNC_CACHE 445 }, 446 { 447 /* 448 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 449 * PR: kern/70158 450 */ 451 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, 452 /*quirks*/ DA_Q_NO_SYNC_CACHE 453 }, 454 { 455 /* 456 * ZICPlay USB MP3 Player with FM 457 * PR: kern/75057 458 */ 459 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, 460 /*quirks*/ DA_Q_NO_SYNC_CACHE 461 }, 462 { 463 /* 464 * TEAC USB floppy mechanisms 465 */ 466 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, 467 /*quirks*/ DA_Q_NO_SYNC_CACHE 468 }, 469 { 470 /* 471 * Kingston DataTraveler II+ USB Pen-Drive. 472 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org> 473 */ 474 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", 475 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 476 }, 477 { 478 /* 479 * USB DISK Pro PMAP 480 * Reported by: jhs 481 * PR: usb/96381 482 */ 483 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, 484 /*quirks*/ DA_Q_NO_SYNC_CACHE 485 }, 486 { 487 /* 488 * Motorola E398 Mobile Phone (TransFlash memory card). 489 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl> 490 * PR: usb/89889 491 */ 492 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", 493 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 494 }, 495 { 496 /* 497 * Qware BeatZkey! Pro 498 * PR: usb/79164 499 */ 500 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", 501 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 502 }, 503 { 504 /* 505 * Time DPA20B 1GB MP3 Player 506 * PR: usb/81846 507 */ 508 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", 509 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 510 }, 511 { 512 /* 513 * Samsung USB key 128Mb 514 * PR: usb/90081 515 */ 516 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", 517 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 518 }, 519 { 520 /* 521 * Kingston DataTraveler 2.0 USB Flash memory. 522 * PR: usb/89196 523 */ 524 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", 525 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 526 }, 527 { 528 /* 529 * Creative MUVO Slim mp3 player (USB) 530 * PR: usb/86131 531 */ 532 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", 533 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 534 }, 535 { 536 /* 537 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) 538 * PR: usb/80487 539 */ 540 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", 541 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 542 }, 543 { 544 /* 545 * SanDisk Micro Cruzer 128MB 546 * PR: usb/75970 547 */ 548 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", 549 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 550 }, 551 { 552 /* 553 * TOSHIBA TransMemory USB sticks 554 * PR: kern/94660 555 */ 556 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", 557 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 558 }, 559 { 560 /* 561 * PNY USB 3.0 Flash Drives 562 */ 563 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", 564 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 565 }, 566 { 567 /* 568 * PNY USB Flash keys 569 * PR: usb/75578, usb/72344, usb/65436 570 */ 571 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", 572 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 573 }, 574 { 575 /* 576 * Genesys 6-in-1 Card Reader 577 * PR: usb/94647 578 */ 579 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 580 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 581 }, 582 { 583 /* 584 * Rekam Digital CAMERA 585 * PR: usb/98713 586 */ 587 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", 588 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 589 }, 590 { 591 /* 592 * iRiver H10 MP3 player 593 * PR: usb/102547 594 */ 595 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", 596 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 597 }, 598 { 599 /* 600 * iRiver U10 MP3 player 601 * PR: usb/92306 602 */ 603 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", 604 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 605 }, 606 { 607 /* 608 * X-Micro Flash Disk 609 * PR: usb/96901 610 */ 611 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", 612 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 613 }, 614 { 615 /* 616 * EasyMP3 EM732X USB 2.0 Flash MP3 Player 617 * PR: usb/96546 618 */ 619 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", 620 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 621 }, 622 { 623 /* 624 * Denver MP3 player 625 * PR: usb/107101 626 */ 627 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", 628 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 629 }, 630 { 631 /* 632 * Philips USB Key Audio KEY013 633 * PR: usb/68412 634 */ 635 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, 636 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 637 }, 638 { 639 /* 640 * JNC MP3 Player 641 * PR: usb/94439 642 */ 643 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", 644 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 645 }, 646 { 647 /* 648 * SAMSUNG MP0402H 649 * PR: usb/108427 650 */ 651 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, 652 /*quirks*/ DA_Q_NO_SYNC_CACHE 653 }, 654 { 655 /* 656 * I/O Magic USB flash - Giga Bank 657 * PR: usb/108810 658 */ 659 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, 660 /*quirks*/ DA_Q_NO_SYNC_CACHE 661 }, 662 { 663 /* 664 * JoyFly 128mb USB Flash Drive 665 * PR: 96133 666 */ 667 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", 668 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 669 }, 670 { 671 /* 672 * ChipsBnk usb stick 673 * PR: 103702 674 */ 675 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", 676 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 677 }, 678 { 679 /* 680 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A 681 * PR: 129858 682 */ 683 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", 684 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 685 }, 686 { 687 /* 688 * Samsung YP-U3 mp3-player 689 * PR: 125398 690 */ 691 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", 692 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 693 }, 694 { 695 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", 696 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 697 }, 698 { 699 /* 700 * Sony Cyber-Shot DSC cameras 701 * PR: usb/137035 702 */ 703 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 704 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 705 }, 706 { 707 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", 708 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT 709 }, 710 { 711 /* At least several Transcent USB sticks lie on RC16. */ 712 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", 713 "*"}, /*quirks*/ DA_Q_NO_RC16 714 }, 715 /* ATA/SATA devices over SAS/USB/... */ 716 { 717 /* Hitachi Advanced Format (4k) drives */ 718 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, 719 /*quirks*/DA_Q_4K 720 }, 721 { 722 /* Samsung Advanced Format (4k) drives */ 723 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, 724 /*quirks*/DA_Q_4K 725 }, 726 { 727 /* Samsung Advanced Format (4k) drives */ 728 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, 729 /*quirks*/DA_Q_4K 730 }, 731 { 732 /* Samsung Advanced Format (4k) drives */ 733 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, 734 /*quirks*/DA_Q_4K 735 }, 736 { 737 /* Samsung Advanced Format (4k) drives */ 738 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, 739 /*quirks*/DA_Q_4K 740 }, 741 { 742 /* Seagate Barracuda Green Advanced Format (4k) drives */ 743 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, 744 /*quirks*/DA_Q_4K 745 }, 746 { 747 /* Seagate Barracuda Green Advanced Format (4k) drives */ 748 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, 749 /*quirks*/DA_Q_4K 750 }, 751 { 752 /* Seagate Barracuda Green Advanced Format (4k) drives */ 753 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, 754 /*quirks*/DA_Q_4K 755 }, 756 { 757 /* Seagate Barracuda Green Advanced Format (4k) drives */ 758 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, 759 /*quirks*/DA_Q_4K 760 }, 761 { 762 /* Seagate Barracuda Green Advanced Format (4k) drives */ 763 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, 764 /*quirks*/DA_Q_4K 765 }, 766 { 767 /* Seagate Barracuda Green Advanced Format (4k) drives */ 768 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, 769 /*quirks*/DA_Q_4K 770 }, 771 { 772 /* Seagate Momentus Advanced Format (4k) drives */ 773 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, 774 /*quirks*/DA_Q_4K 775 }, 776 { 777 /* Seagate Momentus Advanced Format (4k) drives */ 778 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, 779 /*quirks*/DA_Q_4K 780 }, 781 { 782 /* Seagate Momentus Advanced Format (4k) drives */ 783 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, 784 /*quirks*/DA_Q_4K 785 }, 786 { 787 /* Seagate Momentus Advanced Format (4k) drives */ 788 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, 789 /*quirks*/DA_Q_4K 790 }, 791 { 792 /* Seagate Momentus Advanced Format (4k) drives */ 793 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, 794 /*quirks*/DA_Q_4K 795 }, 796 { 797 /* Seagate Momentus Advanced Format (4k) drives */ 798 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, 799 /*quirks*/DA_Q_4K 800 }, 801 { 802 /* Seagate Momentus Advanced Format (4k) drives */ 803 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, 804 /*quirks*/DA_Q_4K 805 }, 806 { 807 /* Seagate Momentus Advanced Format (4k) drives */ 808 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, 809 /*quirks*/DA_Q_4K 810 }, 811 { 812 /* Seagate Momentus Advanced Format (4k) drives */ 813 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, 814 /*quirks*/DA_Q_4K 815 }, 816 { 817 /* Seagate Momentus Advanced Format (4k) drives */ 818 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, 819 /*quirks*/DA_Q_4K 820 }, 821 { 822 /* Seagate Momentus Advanced Format (4k) drives */ 823 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, 824 /*quirks*/DA_Q_4K 825 }, 826 { 827 /* Seagate Momentus Advanced Format (4k) drives */ 828 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, 829 /*quirks*/DA_Q_4K 830 }, 831 { 832 /* Seagate Momentus Advanced Format (4k) drives */ 833 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, 834 /*quirks*/DA_Q_4K 835 }, 836 { 837 /* Seagate Momentus Advanced Format (4k) drives */ 838 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, 839 /*quirks*/DA_Q_4K 840 }, 841 { 842 /* Seagate Momentus Thin Advanced Format (4k) drives */ 843 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, 844 /*quirks*/DA_Q_4K 845 }, 846 { 847 /* Seagate Momentus Thin Advanced Format (4k) drives */ 848 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, 849 /*quirks*/DA_Q_4K 850 }, 851 { 852 /* WDC Caviar Green Advanced Format (4k) drives */ 853 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, 854 /*quirks*/DA_Q_4K 855 }, 856 { 857 /* WDC Caviar Green Advanced Format (4k) drives */ 858 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, 859 /*quirks*/DA_Q_4K 860 }, 861 { 862 /* WDC Caviar Green Advanced Format (4k) drives */ 863 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, 864 /*quirks*/DA_Q_4K 865 }, 866 { 867 /* WDC Caviar Green Advanced Format (4k) drives */ 868 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, 869 /*quirks*/DA_Q_4K 870 }, 871 { 872 /* WDC Caviar Green Advanced Format (4k) drives */ 873 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, 874 /*quirks*/DA_Q_4K 875 }, 876 { 877 /* WDC Caviar Green Advanced Format (4k) drives */ 878 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, 879 /*quirks*/DA_Q_4K 880 }, 881 { 882 /* WDC Caviar Green Advanced Format (4k) drives */ 883 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, 884 /*quirks*/DA_Q_4K 885 }, 886 { 887 /* WDC Caviar Green Advanced Format (4k) drives */ 888 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, 889 /*quirks*/DA_Q_4K 890 }, 891 { 892 /* WDC Scorpio Black Advanced Format (4k) drives */ 893 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, 894 /*quirks*/DA_Q_4K 895 }, 896 { 897 /* WDC Scorpio Black Advanced Format (4k) drives */ 898 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, 899 /*quirks*/DA_Q_4K 900 }, 901 { 902 /* WDC Scorpio Black Advanced Format (4k) drives */ 903 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, 904 /*quirks*/DA_Q_4K 905 }, 906 { 907 /* WDC Scorpio Black Advanced Format (4k) drives */ 908 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, 909 /*quirks*/DA_Q_4K 910 }, 911 { 912 /* WDC Scorpio Blue Advanced Format (4k) drives */ 913 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, 914 /*quirks*/DA_Q_4K 915 }, 916 { 917 /* WDC Scorpio Blue Advanced Format (4k) drives */ 918 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, 919 /*quirks*/DA_Q_4K 920 }, 921 { 922 /* WDC Scorpio Blue Advanced Format (4k) drives */ 923 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, 924 /*quirks*/DA_Q_4K 925 }, 926 { 927 /* WDC Scorpio Blue Advanced Format (4k) drives */ 928 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, 929 /*quirks*/DA_Q_4K 930 }, 931 { 932 /* 933 * Olympus FE-210 camera 934 */ 935 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", 936 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 937 }, 938 { 939 /* 940 * LG UP3S MP3 player 941 */ 942 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", 943 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 944 }, 945 { 946 /* 947 * Laser MP3-2GA13 MP3 player 948 */ 949 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", 950 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 951 }, 952 { 953 /* 954 * LaCie external 250GB Hard drive des by Porsche 955 * Submitted by: Ben Stuyts <ben@altesco.nl> 956 * PR: 121474 957 */ 958 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, 959 /*quirks*/ DA_Q_NO_SYNC_CACHE 960 }, 961 /* SATA SSDs */ 962 { 963 /* 964 * Corsair Force 2 SSDs 965 * 4k optimised & trim only works in 4k requests + 4k aligned 966 */ 967 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, 968 /*quirks*/DA_Q_4K 969 }, 970 { 971 /* 972 * Corsair Force 3 SSDs 973 * 4k optimised & trim only works in 4k requests + 4k aligned 974 */ 975 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, 976 /*quirks*/DA_Q_4K 977 }, 978 { 979 /* 980 * Corsair Neutron GTX SSDs 981 * 4k optimised & trim only works in 4k requests + 4k aligned 982 */ 983 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, 984 /*quirks*/DA_Q_4K 985 }, 986 { 987 /* 988 * Corsair Force GT & GS SSDs 989 * 4k optimised & trim only works in 4k requests + 4k aligned 990 */ 991 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, 992 /*quirks*/DA_Q_4K 993 }, 994 { 995 /* 996 * Crucial M4 SSDs 997 * 4k optimised & trim only works in 4k requests + 4k aligned 998 */ 999 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, 1000 /*quirks*/DA_Q_4K 1001 }, 1002 { 1003 /* 1004 * Crucial RealSSD C300 SSDs 1005 * 4k optimised 1006 */ 1007 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", 1008 "*" }, /*quirks*/DA_Q_4K 1009 }, 1010 { 1011 /* 1012 * Intel 320 Series SSDs 1013 * 4k optimised & trim only works in 4k requests + 4k aligned 1014 */ 1015 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, 1016 /*quirks*/DA_Q_4K 1017 }, 1018 { 1019 /* 1020 * Intel 330 Series SSDs 1021 * 4k optimised & trim only works in 4k requests + 4k aligned 1022 */ 1023 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, 1024 /*quirks*/DA_Q_4K 1025 }, 1026 { 1027 /* 1028 * Intel 510 Series SSDs 1029 * 4k optimised & trim only works in 4k requests + 4k aligned 1030 */ 1031 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, 1032 /*quirks*/DA_Q_4K 1033 }, 1034 { 1035 /* 1036 * Intel 520 Series SSDs 1037 * 4k optimised & trim only works in 4k requests + 4k aligned 1038 */ 1039 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, 1040 /*quirks*/DA_Q_4K 1041 }, 1042 { 1043 /* 1044 * Intel X25-M Series SSDs 1045 * 4k optimised & trim only works in 4k requests + 4k aligned 1046 */ 1047 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, 1048 /*quirks*/DA_Q_4K 1049 }, 1050 { 1051 /* 1052 * Kingston E100 Series SSDs 1053 * 4k optimised & trim only works in 4k requests + 4k aligned 1054 */ 1055 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, 1056 /*quirks*/DA_Q_4K 1057 }, 1058 { 1059 /* 1060 * Kingston HyperX 3k SSDs 1061 * 4k optimised & trim only works in 4k requests + 4k aligned 1062 */ 1063 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, 1064 /*quirks*/DA_Q_4K 1065 }, 1066 { 1067 /* 1068 * Marvell SSDs (entry taken from OpenSolaris) 1069 * 4k optimised & trim only works in 4k requests + 4k aligned 1070 */ 1071 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, 1072 /*quirks*/DA_Q_4K 1073 }, 1074 { 1075 /* 1076 * OCZ Agility 2 SSDs 1077 * 4k optimised & trim only works in 4k requests + 4k aligned 1078 */ 1079 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, 1080 /*quirks*/DA_Q_4K 1081 }, 1082 { 1083 /* 1084 * OCZ Agility 3 SSDs 1085 * 4k optimised & trim only works in 4k requests + 4k aligned 1086 */ 1087 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, 1088 /*quirks*/DA_Q_4K 1089 }, 1090 { 1091 /* 1092 * OCZ Deneva R Series SSDs 1093 * 4k optimised & trim only works in 4k requests + 4k aligned 1094 */ 1095 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, 1096 /*quirks*/DA_Q_4K 1097 }, 1098 { 1099 /* 1100 * OCZ Vertex 2 SSDs (inc pro series) 1101 * 4k optimised & trim only works in 4k requests + 4k aligned 1102 */ 1103 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, 1104 /*quirks*/DA_Q_4K 1105 }, 1106 { 1107 /* 1108 * OCZ Vertex 3 SSDs 1109 * 4k optimised & trim only works in 4k requests + 4k aligned 1110 */ 1111 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, 1112 /*quirks*/DA_Q_4K 1113 }, 1114 { 1115 /* 1116 * OCZ Vertex 4 SSDs 1117 * 4k optimised & trim only works in 4k requests + 4k aligned 1118 */ 1119 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, 1120 /*quirks*/DA_Q_4K 1121 }, 1122 { 1123 /* 1124 * Samsung 830 Series SSDs 1125 * 4k optimised & trim only works in 4k requests + 4k aligned 1126 */ 1127 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, 1128 /*quirks*/DA_Q_4K 1129 }, 1130 { 1131 /* 1132 * Samsung 840 SSDs 1133 * 4k optimised & trim only works in 4k requests + 4k aligned 1134 */ 1135 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, 1136 /*quirks*/DA_Q_4K 1137 }, 1138 { 1139 /* 1140 * Samsung 843T Series SSDs 1141 * 4k optimised 1142 */ 1143 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7WD*", "*" }, 1144 /*quirks*/DA_Q_4K 1145 }, 1146 { 1147 /* 1148 * Samsung 850 SSDs 1149 * 4k optimised & trim only works in 4k requests + 4k aligned 1150 */ 1151 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, 1152 /*quirks*/DA_Q_4K 1153 }, 1154 { 1155 /* 1156 * Samsung PM853T Series SSDs 1157 * 4k optimised 1158 */ 1159 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7GE*", "*" }, 1160 /*quirks*/DA_Q_4K 1161 }, 1162 { 1163 /* 1164 * SuperTalent TeraDrive CT SSDs 1165 * 4k optimised & trim only works in 4k requests + 4k aligned 1166 */ 1167 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, 1168 /*quirks*/DA_Q_4K 1169 }, 1170 { 1171 /* 1172 * XceedIOPS SATA SSDs 1173 * 4k optimised 1174 */ 1175 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, 1176 /*quirks*/DA_Q_4K 1177 }, 1178 }; 1179 1180 static disk_strategy_t dastrategy; 1181 static dumper_t dadump; 1182 static periph_init_t dainit; 1183 static void daasync(void *callback_arg, u_int32_t code, 1184 struct cam_path *path, void *arg); 1185 static void dasysctlinit(void *context, int pending); 1186 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); 1187 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); 1188 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); 1189 static void dadeletemethodset(struct da_softc *softc, 1190 da_delete_methods delete_method); 1191 static off_t dadeletemaxsize(struct da_softc *softc, 1192 da_delete_methods delete_method); 1193 static void dadeletemethodchoose(struct da_softc *softc, 1194 da_delete_methods default_method); 1195 static void daprobedone(struct cam_periph *periph, union ccb *ccb); 1196 1197 static periph_ctor_t daregister; 1198 static periph_dtor_t dacleanup; 1199 static periph_start_t dastart; 1200 static periph_oninv_t daoninvalidate; 1201 static void dadone(struct cam_periph *periph, 1202 union ccb *done_ccb); 1203 static int daerror(union ccb *ccb, u_int32_t cam_flags, 1204 u_int32_t sense_flags); 1205 static void daprevent(struct cam_periph *periph, int action); 1206 static void dareprobe(struct cam_periph *periph); 1207 static void dasetgeom(struct cam_periph *periph, uint32_t block_len, 1208 uint64_t maxsector, 1209 struct scsi_read_capacity_data_long *rcaplong, 1210 size_t rcap_size); 1211 static timeout_t dasendorderedtag; 1212 static void dashutdown(void *arg, int howto); 1213 static timeout_t damediapoll; 1214 1215 #ifndef DA_DEFAULT_POLL_PERIOD 1216 #define DA_DEFAULT_POLL_PERIOD 3 1217 #endif 1218 1219 #ifndef DA_DEFAULT_TIMEOUT 1220 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 1221 #endif 1222 1223 #ifndef DA_DEFAULT_RETRY 1224 #define DA_DEFAULT_RETRY 4 1225 #endif 1226 1227 #ifndef DA_DEFAULT_SEND_ORDERED 1228 #define DA_DEFAULT_SEND_ORDERED 1 1229 #endif 1230 1231 #define DA_SIO (softc->sort_io_queue >= 0 ? \ 1232 softc->sort_io_queue : cam_sort_io_queues) 1233 1234 static int da_poll_period = DA_DEFAULT_POLL_PERIOD; 1235 static int da_retry_count = DA_DEFAULT_RETRY; 1236 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 1237 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; 1238 1239 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 1240 "CAM Direct Access Disk driver"); 1241 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, 1242 &da_poll_period, 0, "Media polling period in seconds"); 1243 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, 1244 &da_retry_count, 0, "Normal I/O retry count"); 1245 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, 1246 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 1247 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, 1248 &da_send_ordered, 0, "Send Ordered Tags"); 1249 1250 /* 1251 * DA_ORDEREDTAG_INTERVAL determines how often, relative 1252 * to the default timeout, we check to see whether an ordered 1253 * tagged transaction is appropriate to prevent simple tag 1254 * starvation. Since we'd like to ensure that there is at least 1255 * 1/2 of the timeout length left for a starved transaction to 1256 * complete after we've sent an ordered tag, we must poll at least 1257 * four times in every timeout period. This takes care of the worst 1258 * case where a starved transaction starts during an interval that 1259 * meets the requirement "don't send an ordered tag" test so it takes 1260 * us two intervals to determine that a tag must be sent. 1261 */ 1262 #ifndef DA_ORDEREDTAG_INTERVAL 1263 #define DA_ORDEREDTAG_INTERVAL 4 1264 #endif 1265 1266 static struct periph_driver dadriver = 1267 { 1268 dainit, "da", 1269 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 1270 }; 1271 1272 PERIPHDRIVER_DECLARE(da, dadriver); 1273 1274 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); 1275 1276 static int 1277 daopen(struct disk *dp) 1278 { 1279 struct cam_periph *periph; 1280 struct da_softc *softc; 1281 int error; 1282 1283 periph = (struct cam_periph *)dp->d_drv1; 1284 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 1285 return (ENXIO); 1286 } 1287 1288 cam_periph_lock(periph); 1289 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 1290 cam_periph_unlock(periph); 1291 cam_periph_release(periph); 1292 return (error); 1293 } 1294 1295 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1296 ("daopen\n")); 1297 1298 softc = (struct da_softc *)periph->softc; 1299 dareprobe(periph); 1300 1301 /* Wait for the disk size update. */ 1302 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, 1303 "dareprobe", 0); 1304 if (error != 0) 1305 xpt_print(periph->path, "unable to retrieve capacity data\n"); 1306 1307 if (periph->flags & CAM_PERIPH_INVALID) 1308 error = ENXIO; 1309 1310 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1311 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1312 daprevent(periph, PR_PREVENT); 1313 1314 if (error == 0) { 1315 softc->flags &= ~DA_FLAG_PACK_INVALID; 1316 softc->flags |= DA_FLAG_OPEN; 1317 } 1318 1319 cam_periph_unhold(periph); 1320 cam_periph_unlock(periph); 1321 1322 if (error != 0) 1323 cam_periph_release(periph); 1324 1325 return (error); 1326 } 1327 1328 static int 1329 daclose(struct disk *dp) 1330 { 1331 struct cam_periph *periph; 1332 struct da_softc *softc; 1333 union ccb *ccb; 1334 int error; 1335 1336 periph = (struct cam_periph *)dp->d_drv1; 1337 softc = (struct da_softc *)periph->softc; 1338 cam_periph_lock(periph); 1339 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1340 ("daclose\n")); 1341 1342 if (cam_periph_hold(periph, PRIBIO) == 0) { 1343 1344 /* Flush disk cache. */ 1345 if ((softc->flags & DA_FLAG_DIRTY) != 0 && 1346 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && 1347 (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 1348 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1349 scsi_synchronize_cache(&ccb->csio, /*retries*/1, 1350 /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, 1351 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 1352 5 * 60 * 1000); 1353 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 1354 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, 1355 softc->disk->d_devstat); 1356 if (error == 0) 1357 softc->flags &= ~DA_FLAG_DIRTY; 1358 xpt_release_ccb(ccb); 1359 } 1360 1361 /* Allow medium removal. */ 1362 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1363 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1364 daprevent(periph, PR_ALLOW); 1365 1366 cam_periph_unhold(periph); 1367 } 1368 1369 /* 1370 * If we've got removeable media, mark the blocksize as 1371 * unavailable, since it could change when new media is 1372 * inserted. 1373 */ 1374 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 1375 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; 1376 1377 softc->flags &= ~DA_FLAG_OPEN; 1378 while (softc->refcount != 0) 1379 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); 1380 cam_periph_unlock(periph); 1381 cam_periph_release(periph); 1382 return (0); 1383 } 1384 1385 static void 1386 daschedule(struct cam_periph *periph) 1387 { 1388 struct da_softc *softc = (struct da_softc *)periph->softc; 1389 1390 if (softc->state != DA_STATE_NORMAL) 1391 return; 1392 1393 /* Check if we have more work to do. */ 1394 if (bioq_first(&softc->bio_queue) || 1395 (!softc->delete_running && bioq_first(&softc->delete_queue)) || 1396 softc->tur) { 1397 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 1398 } 1399 } 1400 1401 /* 1402 * Actually translate the requested transfer into one the physical driver 1403 * can understand. The transfer is described by a buf and will include 1404 * only one physical transfer. 1405 */ 1406 static void 1407 dastrategy(struct bio *bp) 1408 { 1409 struct cam_periph *periph; 1410 struct da_softc *softc; 1411 1412 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1413 softc = (struct da_softc *)periph->softc; 1414 1415 cam_periph_lock(periph); 1416 1417 /* 1418 * If the device has been made invalid, error out 1419 */ 1420 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 1421 cam_periph_unlock(periph); 1422 biofinish(bp, NULL, ENXIO); 1423 return; 1424 } 1425 1426 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); 1427 1428 /* 1429 * Place it in the queue of disk activities for this disk 1430 */ 1431 if (bp->bio_cmd == BIO_DELETE) { 1432 bioq_disksort(&softc->delete_queue, bp); 1433 } else if (DA_SIO) { 1434 bioq_disksort(&softc->bio_queue, bp); 1435 } else { 1436 bioq_insert_tail(&softc->bio_queue, bp); 1437 } 1438 1439 /* 1440 * Schedule ourselves for performing the work. 1441 */ 1442 daschedule(periph); 1443 cam_periph_unlock(periph); 1444 1445 return; 1446 } 1447 1448 static int 1449 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 1450 { 1451 struct cam_periph *periph; 1452 struct da_softc *softc; 1453 u_int secsize; 1454 struct ccb_scsiio csio; 1455 struct disk *dp; 1456 int error = 0; 1457 1458 dp = arg; 1459 periph = dp->d_drv1; 1460 softc = (struct da_softc *)periph->softc; 1461 cam_periph_lock(periph); 1462 secsize = softc->params.secsize; 1463 1464 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 1465 cam_periph_unlock(periph); 1466 return (ENXIO); 1467 } 1468 1469 if (length > 0) { 1470 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1471 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1472 scsi_read_write(&csio, 1473 /*retries*/0, 1474 dadone, 1475 MSG_ORDERED_Q_TAG, 1476 /*read*/SCSI_RW_WRITE, 1477 /*byte2*/0, 1478 /*minimum_cmd_size*/ softc->minimum_cmd_size, 1479 offset / secsize, 1480 length / secsize, 1481 /*data_ptr*/(u_int8_t *) virtual, 1482 /*dxfer_len*/length, 1483 /*sense_len*/SSD_FULL_SIZE, 1484 da_default_timeout * 1000); 1485 xpt_polled_action((union ccb *)&csio); 1486 1487 error = cam_periph_error((union ccb *)&csio, 1488 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1489 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) 1490 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, 1491 /*reduction*/0, /*timeout*/0, /*getcount_only*/0); 1492 if (error != 0) 1493 printf("Aborting dump due to I/O error.\n"); 1494 cam_periph_unlock(periph); 1495 return (error); 1496 } 1497 1498 /* 1499 * Sync the disk cache contents to the physical media. 1500 */ 1501 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 1502 1503 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1504 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1505 scsi_synchronize_cache(&csio, 1506 /*retries*/0, 1507 /*cbfcnp*/dadone, 1508 MSG_SIMPLE_Q_TAG, 1509 /*begin_lba*/0,/* Cover the whole disk */ 1510 /*lb_count*/0, 1511 SSD_FULL_SIZE, 1512 5 * 60 * 1000); 1513 xpt_polled_action((union ccb *)&csio); 1514 1515 error = cam_periph_error((union ccb *)&csio, 1516 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL); 1517 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) 1518 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, 1519 /*reduction*/0, /*timeout*/0, /*getcount_only*/0); 1520 if (error != 0) 1521 xpt_print(periph->path, "Synchronize cache failed\n"); 1522 } 1523 cam_periph_unlock(periph); 1524 return (error); 1525 } 1526 1527 static int 1528 dagetattr(struct bio *bp) 1529 { 1530 int ret; 1531 struct cam_periph *periph; 1532 1533 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1534 cam_periph_lock(periph); 1535 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 1536 periph->path); 1537 cam_periph_unlock(periph); 1538 if (ret == 0) 1539 bp->bio_completed = bp->bio_length; 1540 return ret; 1541 } 1542 1543 static void 1544 dainit(void) 1545 { 1546 cam_status status; 1547 1548 /* 1549 * Install a global async callback. This callback will 1550 * receive async callbacks like "new device found". 1551 */ 1552 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); 1553 1554 if (status != CAM_REQ_CMP) { 1555 printf("da: Failed to attach master async callback " 1556 "due to status 0x%x!\n", status); 1557 } else if (da_send_ordered) { 1558 1559 /* Register our shutdown event handler */ 1560 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 1561 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 1562 printf("dainit: shutdown event registration failed!\n"); 1563 } 1564 } 1565 1566 /* 1567 * Callback from GEOM, called when it has finished cleaning up its 1568 * resources. 1569 */ 1570 static void 1571 dadiskgonecb(struct disk *dp) 1572 { 1573 struct cam_periph *periph; 1574 1575 periph = (struct cam_periph *)dp->d_drv1; 1576 cam_periph_release(periph); 1577 } 1578 1579 static void 1580 daoninvalidate(struct cam_periph *periph) 1581 { 1582 struct da_softc *softc; 1583 1584 softc = (struct da_softc *)periph->softc; 1585 1586 /* 1587 * De-register any async callbacks. 1588 */ 1589 xpt_register_async(0, daasync, periph, periph->path); 1590 1591 softc->flags |= DA_FLAG_PACK_INVALID; 1592 1593 /* 1594 * Return all queued I/O with ENXIO. 1595 * XXX Handle any transactions queued to the card 1596 * with XPT_ABORT_CCB. 1597 */ 1598 bioq_flush(&softc->bio_queue, NULL, ENXIO); 1599 bioq_flush(&softc->delete_queue, NULL, ENXIO); 1600 1601 /* 1602 * Tell GEOM that we've gone away, we'll get a callback when it is 1603 * done cleaning up its resources. 1604 */ 1605 disk_gone(softc->disk); 1606 } 1607 1608 static void 1609 dacleanup(struct cam_periph *periph) 1610 { 1611 struct da_softc *softc; 1612 1613 softc = (struct da_softc *)periph->softc; 1614 1615 cam_periph_unlock(periph); 1616 1617 /* 1618 * If we can't free the sysctl tree, oh well... 1619 */ 1620 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0 1621 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { 1622 xpt_print(periph->path, "can't remove sysctl context\n"); 1623 } 1624 1625 callout_drain(&softc->mediapoll_c); 1626 disk_destroy(softc->disk); 1627 callout_drain(&softc->sendordered_c); 1628 free(softc, M_DEVBUF); 1629 cam_periph_lock(periph); 1630 } 1631 1632 static void 1633 daasync(void *callback_arg, u_int32_t code, 1634 struct cam_path *path, void *arg) 1635 { 1636 struct cam_periph *periph; 1637 struct da_softc *softc; 1638 1639 periph = (struct cam_periph *)callback_arg; 1640 switch (code) { 1641 case AC_FOUND_DEVICE: 1642 { 1643 struct ccb_getdev *cgd; 1644 cam_status status; 1645 1646 cgd = (struct ccb_getdev *)arg; 1647 if (cgd == NULL) 1648 break; 1649 1650 if (cgd->protocol != PROTO_SCSI) 1651 break; 1652 1653 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 1654 && SID_TYPE(&cgd->inq_data) != T_RBC 1655 && SID_TYPE(&cgd->inq_data) != T_OPTICAL) 1656 break; 1657 1658 /* 1659 * Allocate a peripheral instance for 1660 * this device and start the probe 1661 * process. 1662 */ 1663 status = cam_periph_alloc(daregister, daoninvalidate, 1664 dacleanup, dastart, 1665 "da", CAM_PERIPH_BIO, 1666 path, daasync, 1667 AC_FOUND_DEVICE, cgd); 1668 1669 if (status != CAM_REQ_CMP 1670 && status != CAM_REQ_INPROG) 1671 printf("daasync: Unable to attach to new device " 1672 "due to status 0x%x\n", status); 1673 return; 1674 } 1675 case AC_ADVINFO_CHANGED: 1676 { 1677 uintptr_t buftype; 1678 1679 buftype = (uintptr_t)arg; 1680 if (buftype == CDAI_TYPE_PHYS_PATH) { 1681 struct da_softc *softc; 1682 1683 softc = periph->softc; 1684 disk_attr_changed(softc->disk, "GEOM::physpath", 1685 M_NOWAIT); 1686 } 1687 break; 1688 } 1689 case AC_UNIT_ATTENTION: 1690 { 1691 union ccb *ccb; 1692 int error_code, sense_key, asc, ascq; 1693 1694 softc = (struct da_softc *)periph->softc; 1695 ccb = (union ccb *)arg; 1696 1697 /* 1698 * Handle all UNIT ATTENTIONs except our own, 1699 * as they will be handled by daerror(). 1700 */ 1701 if (xpt_path_periph(ccb->ccb_h.path) != periph && 1702 scsi_extract_sense_ccb(ccb, 1703 &error_code, &sense_key, &asc, &ascq)) { 1704 if (asc == 0x2A && ascq == 0x09) { 1705 xpt_print(ccb->ccb_h.path, 1706 "Capacity data has changed\n"); 1707 softc->flags &= ~DA_FLAG_PROBED; 1708 dareprobe(periph); 1709 } else if (asc == 0x28 && ascq == 0x00) { 1710 softc->flags &= ~DA_FLAG_PROBED; 1711 disk_media_changed(softc->disk, M_NOWAIT); 1712 } else if (asc == 0x3F && ascq == 0x03) { 1713 xpt_print(ccb->ccb_h.path, 1714 "INQUIRY data has changed\n"); 1715 softc->flags &= ~DA_FLAG_PROBED; 1716 dareprobe(periph); 1717 } 1718 } 1719 cam_periph_async(periph, code, path, arg); 1720 break; 1721 } 1722 case AC_SCSI_AEN: 1723 softc = (struct da_softc *)periph->softc; 1724 if (!softc->tur) { 1725 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 1726 softc->tur = 1; 1727 daschedule(periph); 1728 } 1729 } 1730 /* FALLTHROUGH */ 1731 case AC_SENT_BDR: 1732 case AC_BUS_RESET: 1733 { 1734 struct ccb_hdr *ccbh; 1735 1736 softc = (struct da_softc *)periph->softc; 1737 /* 1738 * Don't fail on the expected unit attention 1739 * that will occur. 1740 */ 1741 softc->flags |= DA_FLAG_RETRY_UA; 1742 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 1743 ccbh->ccb_state |= DA_CCB_RETRY_UA; 1744 break; 1745 } 1746 default: 1747 break; 1748 } 1749 cam_periph_async(periph, code, path, arg); 1750 } 1751 1752 static void 1753 dasysctlinit(void *context, int pending) 1754 { 1755 struct cam_periph *periph; 1756 struct da_softc *softc; 1757 char tmpstr[80], tmpstr2[80]; 1758 struct ccb_trans_settings cts; 1759 1760 periph = (struct cam_periph *)context; 1761 /* 1762 * periph was held for us when this task was enqueued 1763 */ 1764 if (periph->flags & CAM_PERIPH_INVALID) { 1765 cam_periph_release(periph); 1766 return; 1767 } 1768 1769 softc = (struct da_softc *)periph->softc; 1770 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); 1771 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 1772 1773 sysctl_ctx_init(&softc->sysctl_ctx); 1774 softc->flags |= DA_FLAG_SCTX_INIT; 1775 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1776 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, 1777 CTLFLAG_RD, 0, tmpstr); 1778 if (softc->sysctl_tree == NULL) { 1779 printf("dasysctlinit: unable to allocate sysctl tree\n"); 1780 cam_periph_release(periph); 1781 return; 1782 } 1783 1784 /* 1785 * Now register the sysctl handler, so the user can change the value on 1786 * the fly. 1787 */ 1788 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1789 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW, 1790 softc, 0, dadeletemethodsysctl, "A", 1791 "BIO_DELETE execution method"); 1792 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1793 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, 1794 softc, 0, dadeletemaxsysctl, "Q", 1795 "Maximum BIO_DELETE size"); 1796 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1797 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, 1798 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", 1799 "Minimum CDB size"); 1800 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1801 OID_AUTO, "sort_io_queue", CTLFLAG_RW, &softc->sort_io_queue, 0, 1802 "Sort IO queue to try and optimise disk access patterns"); 1803 1804 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1805 SYSCTL_CHILDREN(softc->sysctl_tree), 1806 OID_AUTO, 1807 "error_inject", 1808 CTLFLAG_RW, 1809 &softc->error_inject, 1810 0, 1811 "error_inject leaf"); 1812 1813 1814 /* 1815 * Add some addressing info. 1816 */ 1817 memset(&cts, 0, sizeof (cts)); 1818 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 1819 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1820 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1821 cam_periph_lock(periph); 1822 xpt_action((union ccb *)&cts); 1823 cam_periph_unlock(periph); 1824 if (cts.ccb_h.status != CAM_REQ_CMP) { 1825 cam_periph_release(periph); 1826 return; 1827 } 1828 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { 1829 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 1830 if (fc->valid & CTS_FC_VALID_WWPN) { 1831 softc->wwpn = fc->wwpn; 1832 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 1833 SYSCTL_CHILDREN(softc->sysctl_tree), 1834 OID_AUTO, "wwpn", CTLFLAG_RD, 1835 &softc->wwpn, "World Wide Port Name"); 1836 } 1837 } 1838 cam_periph_release(periph); 1839 } 1840 1841 static int 1842 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) 1843 { 1844 int error; 1845 uint64_t value; 1846 struct da_softc *softc; 1847 1848 softc = (struct da_softc *)arg1; 1849 1850 value = softc->disk->d_delmaxsize; 1851 error = sysctl_handle_64(oidp, &value, 0, req); 1852 if ((error != 0) || (req->newptr == NULL)) 1853 return (error); 1854 1855 /* only accept values smaller than the calculated value */ 1856 if (value > dadeletemaxsize(softc, softc->delete_method)) { 1857 return (EINVAL); 1858 } 1859 softc->disk->d_delmaxsize = value; 1860 1861 return (0); 1862 } 1863 1864 static int 1865 dacmdsizesysctl(SYSCTL_HANDLER_ARGS) 1866 { 1867 int error, value; 1868 1869 value = *(int *)arg1; 1870 1871 error = sysctl_handle_int(oidp, &value, 0, req); 1872 1873 if ((error != 0) 1874 || (req->newptr == NULL)) 1875 return (error); 1876 1877 /* 1878 * Acceptable values here are 6, 10, 12 or 16. 1879 */ 1880 if (value < 6) 1881 value = 6; 1882 else if ((value > 6) 1883 && (value <= 10)) 1884 value = 10; 1885 else if ((value > 10) 1886 && (value <= 12)) 1887 value = 12; 1888 else if (value > 12) 1889 value = 16; 1890 1891 *(int *)arg1 = value; 1892 1893 return (0); 1894 } 1895 1896 static void 1897 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) 1898 { 1899 1900 1901 softc->delete_method = delete_method; 1902 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); 1903 softc->delete_func = da_delete_functions[delete_method]; 1904 1905 if (softc->delete_method > DA_DELETE_DISABLE) 1906 softc->disk->d_flags |= DISKFLAG_CANDELETE; 1907 else 1908 softc->disk->d_flags &= ~DISKFLAG_CANDELETE; 1909 } 1910 1911 static off_t 1912 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) 1913 { 1914 off_t sectors; 1915 1916 switch(delete_method) { 1917 case DA_DELETE_UNMAP: 1918 sectors = (off_t)softc->unmap_max_lba; 1919 break; 1920 case DA_DELETE_ATA_TRIM: 1921 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; 1922 break; 1923 case DA_DELETE_WS16: 1924 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); 1925 break; 1926 case DA_DELETE_ZERO: 1927 case DA_DELETE_WS10: 1928 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); 1929 break; 1930 default: 1931 return 0; 1932 } 1933 1934 return (off_t)softc->params.secsize * 1935 omin(sectors, softc->params.sectors); 1936 } 1937 1938 static void 1939 daprobedone(struct cam_periph *periph, union ccb *ccb) 1940 { 1941 struct da_softc *softc; 1942 1943 softc = (struct da_softc *)periph->softc; 1944 1945 dadeletemethodchoose(softc, DA_DELETE_NONE); 1946 1947 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 1948 char buf[80]; 1949 int i, sep; 1950 1951 snprintf(buf, sizeof(buf), "Delete methods: <"); 1952 sep = 0; 1953 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 1954 if (softc->delete_available & (1 << i)) { 1955 if (sep) { 1956 strlcat(buf, ",", sizeof(buf)); 1957 } else { 1958 sep = 1; 1959 } 1960 strlcat(buf, da_delete_method_names[i], 1961 sizeof(buf)); 1962 if (i == softc->delete_method) { 1963 strlcat(buf, "(*)", sizeof(buf)); 1964 } 1965 } 1966 } 1967 if (sep == 0) { 1968 if (softc->delete_method == DA_DELETE_NONE) 1969 strlcat(buf, "NONE(*)", sizeof(buf)); 1970 else 1971 strlcat(buf, "DISABLED(*)", sizeof(buf)); 1972 } 1973 strlcat(buf, ">", sizeof(buf)); 1974 printf("%s%d: %s\n", periph->periph_name, 1975 periph->unit_number, buf); 1976 } 1977 1978 /* 1979 * Since our peripheral may be invalidated by an error 1980 * above or an external event, we must release our CCB 1981 * before releasing the probe lock on the peripheral. 1982 * The peripheral will only go away once the last lock 1983 * is removed, and we need it around for the CCB release 1984 * operation. 1985 */ 1986 xpt_release_ccb(ccb); 1987 softc->state = DA_STATE_NORMAL; 1988 softc->flags |= DA_FLAG_PROBED; 1989 daschedule(periph); 1990 wakeup(&softc->disk->d_mediasize); 1991 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { 1992 softc->flags |= DA_FLAG_ANNOUNCED; 1993 cam_periph_unhold(periph); 1994 } else 1995 cam_periph_release_locked(periph); 1996 } 1997 1998 static void 1999 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) 2000 { 2001 int i, delete_method; 2002 2003 delete_method = default_method; 2004 2005 /* 2006 * Use the pre-defined order to choose the best 2007 * performing delete. 2008 */ 2009 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 2010 if (softc->delete_available & (1 << i)) { 2011 dadeletemethodset(softc, i); 2012 return; 2013 } 2014 } 2015 dadeletemethodset(softc, delete_method); 2016 } 2017 2018 static int 2019 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) 2020 { 2021 char buf[16]; 2022 const char *p; 2023 struct da_softc *softc; 2024 int i, error, methods, value; 2025 2026 softc = (struct da_softc *)arg1; 2027 2028 value = softc->delete_method; 2029 if (value < 0 || value > DA_DELETE_MAX) 2030 p = "UNKNOWN"; 2031 else 2032 p = da_delete_method_names[value]; 2033 strncpy(buf, p, sizeof(buf)); 2034 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 2035 if (error != 0 || req->newptr == NULL) 2036 return (error); 2037 methods = softc->delete_available | (1 << DA_DELETE_DISABLE); 2038 for (i = 0; i <= DA_DELETE_MAX; i++) { 2039 if (!(methods & (1 << i)) || 2040 strcmp(buf, da_delete_method_names[i]) != 0) 2041 continue; 2042 dadeletemethodset(softc, i); 2043 return (0); 2044 } 2045 return (EINVAL); 2046 } 2047 2048 static cam_status 2049 daregister(struct cam_periph *periph, void *arg) 2050 { 2051 struct da_softc *softc; 2052 struct ccb_pathinq cpi; 2053 struct ccb_getdev *cgd; 2054 char tmpstr[80]; 2055 caddr_t match; 2056 2057 cgd = (struct ccb_getdev *)arg; 2058 if (cgd == NULL) { 2059 printf("daregister: no getdev CCB, can't register device\n"); 2060 return(CAM_REQ_CMP_ERR); 2061 } 2062 2063 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, 2064 M_NOWAIT|M_ZERO); 2065 2066 if (softc == NULL) { 2067 printf("daregister: Unable to probe new device. " 2068 "Unable to allocate softc\n"); 2069 return(CAM_REQ_CMP_ERR); 2070 } 2071 2072 LIST_INIT(&softc->pending_ccbs); 2073 softc->state = DA_STATE_PROBE_RC; 2074 bioq_init(&softc->bio_queue); 2075 bioq_init(&softc->delete_queue); 2076 bioq_init(&softc->delete_run_queue); 2077 if (SID_IS_REMOVABLE(&cgd->inq_data)) 2078 softc->flags |= DA_FLAG_PACK_REMOVABLE; 2079 softc->unmap_max_ranges = UNMAP_MAX_RANGES; 2080 softc->unmap_max_lba = UNMAP_RANGE_MAX; 2081 softc->ws_max_blks = WS16_MAX_BLKS; 2082 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; 2083 softc->sort_io_queue = -1; 2084 2085 periph->softc = softc; 2086 2087 /* 2088 * See if this device has any quirks. 2089 */ 2090 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 2091 (caddr_t)da_quirk_table, 2092 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 2093 sizeof(*da_quirk_table), scsi_inquiry_match); 2094 2095 if (match != NULL) 2096 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 2097 else 2098 softc->quirks = DA_Q_NONE; 2099 2100 /* Check if the SIM does not want 6 byte commands */ 2101 bzero(&cpi, sizeof(cpi)); 2102 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 2103 cpi.ccb_h.func_code = XPT_PATH_INQ; 2104 xpt_action((union ccb *)&cpi); 2105 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) 2106 softc->quirks |= DA_Q_NO_6_BYTE; 2107 2108 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); 2109 2110 /* 2111 * Take an exclusive refcount on the periph while dastart is called 2112 * to finish the probe. The reference will be dropped in dadone at 2113 * the end of probe. 2114 */ 2115 (void)cam_periph_hold(periph, PRIBIO); 2116 2117 /* 2118 * Schedule a periodic event to occasionally send an 2119 * ordered tag to a device. 2120 */ 2121 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); 2122 callout_reset(&softc->sendordered_c, 2123 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 2124 dasendorderedtag, softc); 2125 2126 cam_periph_unlock(periph); 2127 /* 2128 * RBC devices don't have to support READ(6), only READ(10). 2129 */ 2130 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) 2131 softc->minimum_cmd_size = 10; 2132 else 2133 softc->minimum_cmd_size = 6; 2134 2135 /* 2136 * Load the user's default, if any. 2137 */ 2138 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", 2139 periph->unit_number); 2140 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); 2141 2142 /* 2143 * 6, 10, 12 and 16 are the currently permissible values. 2144 */ 2145 if (softc->minimum_cmd_size < 6) 2146 softc->minimum_cmd_size = 6; 2147 else if ((softc->minimum_cmd_size > 6) 2148 && (softc->minimum_cmd_size <= 10)) 2149 softc->minimum_cmd_size = 10; 2150 else if ((softc->minimum_cmd_size > 10) 2151 && (softc->minimum_cmd_size <= 12)) 2152 softc->minimum_cmd_size = 12; 2153 else if (softc->minimum_cmd_size > 12) 2154 softc->minimum_cmd_size = 16; 2155 2156 /* Predict whether device may support READ CAPACITY(16). */ 2157 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && 2158 (softc->quirks & DA_Q_NO_RC16) == 0) { 2159 softc->flags |= DA_FLAG_CAN_RC16; 2160 softc->state = DA_STATE_PROBE_RC16; 2161 } 2162 2163 /* 2164 * Register this media as a disk. 2165 */ 2166 softc->disk = disk_alloc(); 2167 softc->disk->d_devstat = devstat_new_entry(periph->periph_name, 2168 periph->unit_number, 0, 2169 DEVSTAT_BS_UNAVAILABLE, 2170 SID_TYPE(&cgd->inq_data) | 2171 XPORT_DEVSTAT_TYPE(cpi.transport), 2172 DEVSTAT_PRIORITY_DISK); 2173 softc->disk->d_open = daopen; 2174 softc->disk->d_close = daclose; 2175 softc->disk->d_strategy = dastrategy; 2176 softc->disk->d_dump = dadump; 2177 softc->disk->d_getattr = dagetattr; 2178 softc->disk->d_gone = dadiskgonecb; 2179 softc->disk->d_name = "da"; 2180 softc->disk->d_drv1 = periph; 2181 if (cpi.maxio == 0) 2182 softc->maxio = DFLTPHYS; /* traditional default */ 2183 else if (cpi.maxio > MAXPHYS) 2184 softc->maxio = MAXPHYS; /* for safety */ 2185 else 2186 softc->maxio = cpi.maxio; 2187 softc->disk->d_maxsize = softc->maxio; 2188 softc->disk->d_unit = periph->unit_number; 2189 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION; 2190 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) 2191 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 2192 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) 2193 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 2194 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, 2195 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); 2196 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); 2197 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], 2198 cgd->inq_data.product, sizeof(cgd->inq_data.product), 2199 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); 2200 softc->disk->d_hba_vendor = cpi.hba_vendor; 2201 softc->disk->d_hba_device = cpi.hba_device; 2202 softc->disk->d_hba_subvendor = cpi.hba_subvendor; 2203 softc->disk->d_hba_subdevice = cpi.hba_subdevice; 2204 2205 /* 2206 * Acquire a reference to the periph before we register with GEOM. 2207 * We'll release this reference once GEOM calls us back (via 2208 * dadiskgonecb()) telling us that our provider has been freed. 2209 */ 2210 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 2211 xpt_print(periph->path, "%s: lost periph during " 2212 "registration!\n", __func__); 2213 cam_periph_lock(periph); 2214 return (CAM_REQ_CMP_ERR); 2215 } 2216 2217 disk_create(softc->disk, DISK_VERSION); 2218 cam_periph_lock(periph); 2219 2220 /* 2221 * Add async callbacks for events of interest. 2222 * I don't bother checking if this fails as, 2223 * in most cases, the system will function just 2224 * fine without them and the only alternative 2225 * would be to not attach the device on failure. 2226 */ 2227 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | 2228 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION, 2229 daasync, periph, periph->path); 2230 2231 /* 2232 * Emit an attribute changed notification just in case 2233 * physical path information arrived before our async 2234 * event handler was registered, but after anyone attaching 2235 * to our disk device polled it. 2236 */ 2237 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); 2238 2239 /* 2240 * Schedule a periodic media polling events. 2241 */ 2242 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); 2243 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && 2244 (cgd->inq_flags & SID_AEN) == 0 && 2245 da_poll_period != 0) 2246 callout_reset(&softc->mediapoll_c, da_poll_period * hz, 2247 damediapoll, periph); 2248 2249 xpt_schedule(periph, CAM_PRIORITY_DEV); 2250 2251 return(CAM_REQ_CMP); 2252 } 2253 2254 static void 2255 dastart(struct cam_periph *periph, union ccb *start_ccb) 2256 { 2257 struct da_softc *softc; 2258 2259 softc = (struct da_softc *)periph->softc; 2260 2261 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); 2262 2263 skipstate: 2264 switch (softc->state) { 2265 case DA_STATE_NORMAL: 2266 { 2267 struct bio *bp; 2268 uint8_t tag_code; 2269 2270 /* Run BIO_DELETE if not running yet. */ 2271 if (!softc->delete_running && 2272 (bp = bioq_first(&softc->delete_queue)) != NULL) { 2273 if (softc->delete_func != NULL) { 2274 softc->delete_func(periph, start_ccb, bp); 2275 goto out; 2276 } else { 2277 bioq_flush(&softc->delete_queue, NULL, 0); 2278 /* FALLTHROUGH */ 2279 } 2280 } 2281 2282 /* Run regular command. */ 2283 bp = bioq_takefirst(&softc->bio_queue); 2284 if (bp == NULL) { 2285 if (softc->tur) { 2286 softc->tur = 0; 2287 scsi_test_unit_ready(&start_ccb->csio, 2288 /*retries*/ da_retry_count, 2289 dadone, 2290 MSG_SIMPLE_Q_TAG, 2291 SSD_FULL_SIZE, 2292 da_default_timeout * 1000); 2293 start_ccb->ccb_h.ccb_bp = NULL; 2294 start_ccb->ccb_h.ccb_state = DA_CCB_TUR; 2295 xpt_action(start_ccb); 2296 } else 2297 xpt_release_ccb(start_ccb); 2298 break; 2299 } 2300 if (softc->tur) { 2301 softc->tur = 0; 2302 cam_periph_release_locked(periph); 2303 } 2304 2305 if ((bp->bio_flags & BIO_ORDERED) != 0 || 2306 (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 2307 softc->flags &= ~DA_FLAG_NEED_OTAG; 2308 softc->flags |= DA_FLAG_WAS_OTAG; 2309 tag_code = MSG_ORDERED_Q_TAG; 2310 } else { 2311 tag_code = MSG_SIMPLE_Q_TAG; 2312 } 2313 2314 switch (bp->bio_cmd) { 2315 case BIO_WRITE: 2316 softc->flags |= DA_FLAG_DIRTY; 2317 /* FALLTHROUGH */ 2318 case BIO_READ: 2319 scsi_read_write(&start_ccb->csio, 2320 /*retries*/da_retry_count, 2321 /*cbfcnp*/dadone, 2322 /*tag_action*/tag_code, 2323 /*read_op*/(bp->bio_cmd == BIO_READ ? 2324 SCSI_RW_READ : SCSI_RW_WRITE) | 2325 ((bp->bio_flags & BIO_UNMAPPED) != 0 ? 2326 SCSI_RW_BIO : 0), 2327 /*byte2*/0, 2328 softc->minimum_cmd_size, 2329 /*lba*/bp->bio_pblkno, 2330 /*block_count*/bp->bio_bcount / 2331 softc->params.secsize, 2332 /*data_ptr*/ (bp->bio_flags & 2333 BIO_UNMAPPED) != 0 ? (void *)bp : 2334 bp->bio_data, 2335 /*dxfer_len*/ bp->bio_bcount, 2336 /*sense_len*/SSD_FULL_SIZE, 2337 da_default_timeout * 1000); 2338 break; 2339 case BIO_FLUSH: 2340 /* 2341 * BIO_FLUSH doesn't currently communicate 2342 * range data, so we synchronize the cache 2343 * over the whole disk. We also force 2344 * ordered tag semantics the flush applies 2345 * to all previously queued I/O. 2346 */ 2347 scsi_synchronize_cache(&start_ccb->csio, 2348 /*retries*/1, 2349 /*cbfcnp*/dadone, 2350 MSG_ORDERED_Q_TAG, 2351 /*begin_lba*/0, 2352 /*lb_count*/0, 2353 SSD_FULL_SIZE, 2354 da_default_timeout*1000); 2355 break; 2356 } 2357 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 2358 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 2359 2360 out: 2361 LIST_INSERT_HEAD(&softc->pending_ccbs, 2362 &start_ccb->ccb_h, periph_links.le); 2363 2364 /* We expect a unit attention from this device */ 2365 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 2366 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 2367 softc->flags &= ~DA_FLAG_RETRY_UA; 2368 } 2369 2370 start_ccb->ccb_h.ccb_bp = bp; 2371 softc->refcount++; 2372 cam_periph_unlock(periph); 2373 xpt_action(start_ccb); 2374 cam_periph_lock(periph); 2375 softc->refcount--; 2376 2377 /* May have more work to do, so ensure we stay scheduled */ 2378 daschedule(periph); 2379 break; 2380 } 2381 case DA_STATE_PROBE_RC: 2382 { 2383 struct scsi_read_capacity_data *rcap; 2384 2385 rcap = (struct scsi_read_capacity_data *) 2386 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); 2387 if (rcap == NULL) { 2388 printf("dastart: Couldn't malloc read_capacity data\n"); 2389 /* da_free_periph??? */ 2390 break; 2391 } 2392 scsi_read_capacity(&start_ccb->csio, 2393 /*retries*/da_retry_count, 2394 dadone, 2395 MSG_SIMPLE_Q_TAG, 2396 rcap, 2397 SSD_FULL_SIZE, 2398 /*timeout*/5000); 2399 start_ccb->ccb_h.ccb_bp = NULL; 2400 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; 2401 xpt_action(start_ccb); 2402 break; 2403 } 2404 case DA_STATE_PROBE_RC16: 2405 { 2406 struct scsi_read_capacity_data_long *rcaplong; 2407 2408 rcaplong = (struct scsi_read_capacity_data_long *) 2409 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); 2410 if (rcaplong == NULL) { 2411 printf("dastart: Couldn't malloc read_capacity data\n"); 2412 /* da_free_periph??? */ 2413 break; 2414 } 2415 scsi_read_capacity_16(&start_ccb->csio, 2416 /*retries*/ da_retry_count, 2417 /*cbfcnp*/ dadone, 2418 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2419 /*lba*/ 0, 2420 /*reladr*/ 0, 2421 /*pmi*/ 0, 2422 /*rcap_buf*/ (uint8_t *)rcaplong, 2423 /*rcap_buf_len*/ sizeof(*rcaplong), 2424 /*sense_len*/ SSD_FULL_SIZE, 2425 /*timeout*/ da_default_timeout * 1000); 2426 start_ccb->ccb_h.ccb_bp = NULL; 2427 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; 2428 xpt_action(start_ccb); 2429 break; 2430 } 2431 case DA_STATE_PROBE_LBP: 2432 { 2433 struct scsi_vpd_logical_block_prov *lbp; 2434 2435 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { 2436 /* 2437 * If we get here we don't support any SBC-3 delete 2438 * methods with UNMAP as the Logical Block Provisioning 2439 * VPD page support is required for devices which 2440 * support it according to T10/1799-D Revision 31 2441 * however older revisions of the spec don't mandate 2442 * this so we currently don't remove these methods 2443 * from the available set. 2444 */ 2445 softc->state = DA_STATE_PROBE_BLK_LIMITS; 2446 goto skipstate; 2447 } 2448 2449 lbp = (struct scsi_vpd_logical_block_prov *) 2450 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); 2451 2452 if (lbp == NULL) { 2453 printf("dastart: Couldn't malloc lbp data\n"); 2454 /* da_free_periph??? */ 2455 break; 2456 } 2457 2458 scsi_inquiry(&start_ccb->csio, 2459 /*retries*/da_retry_count, 2460 /*cbfcnp*/dadone, 2461 /*tag_action*/MSG_SIMPLE_Q_TAG, 2462 /*inq_buf*/(u_int8_t *)lbp, 2463 /*inq_len*/sizeof(*lbp), 2464 /*evpd*/TRUE, 2465 /*page_code*/SVPD_LBP, 2466 /*sense_len*/SSD_MIN_SIZE, 2467 /*timeout*/da_default_timeout * 1000); 2468 start_ccb->ccb_h.ccb_bp = NULL; 2469 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; 2470 xpt_action(start_ccb); 2471 break; 2472 } 2473 case DA_STATE_PROBE_BLK_LIMITS: 2474 { 2475 struct scsi_vpd_block_limits *block_limits; 2476 2477 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { 2478 /* Not supported skip to next probe */ 2479 softc->state = DA_STATE_PROBE_BDC; 2480 goto skipstate; 2481 } 2482 2483 block_limits = (struct scsi_vpd_block_limits *) 2484 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); 2485 2486 if (block_limits == NULL) { 2487 printf("dastart: Couldn't malloc block_limits data\n"); 2488 /* da_free_periph??? */ 2489 break; 2490 } 2491 2492 scsi_inquiry(&start_ccb->csio, 2493 /*retries*/da_retry_count, 2494 /*cbfcnp*/dadone, 2495 /*tag_action*/MSG_SIMPLE_Q_TAG, 2496 /*inq_buf*/(u_int8_t *)block_limits, 2497 /*inq_len*/sizeof(*block_limits), 2498 /*evpd*/TRUE, 2499 /*page_code*/SVPD_BLOCK_LIMITS, 2500 /*sense_len*/SSD_MIN_SIZE, 2501 /*timeout*/da_default_timeout * 1000); 2502 start_ccb->ccb_h.ccb_bp = NULL; 2503 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; 2504 xpt_action(start_ccb); 2505 break; 2506 } 2507 case DA_STATE_PROBE_BDC: 2508 { 2509 struct scsi_vpd_block_characteristics *bdc; 2510 2511 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { 2512 softc->state = DA_STATE_PROBE_ATA; 2513 goto skipstate; 2514 } 2515 2516 bdc = (struct scsi_vpd_block_characteristics *) 2517 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 2518 2519 if (bdc == NULL) { 2520 printf("dastart: Couldn't malloc bdc data\n"); 2521 /* da_free_periph??? */ 2522 break; 2523 } 2524 2525 scsi_inquiry(&start_ccb->csio, 2526 /*retries*/da_retry_count, 2527 /*cbfcnp*/dadone, 2528 /*tag_action*/MSG_SIMPLE_Q_TAG, 2529 /*inq_buf*/(u_int8_t *)bdc, 2530 /*inq_len*/sizeof(*bdc), 2531 /*evpd*/TRUE, 2532 /*page_code*/SVPD_BDC, 2533 /*sense_len*/SSD_MIN_SIZE, 2534 /*timeout*/da_default_timeout * 1000); 2535 start_ccb->ccb_h.ccb_bp = NULL; 2536 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; 2537 xpt_action(start_ccb); 2538 break; 2539 } 2540 case DA_STATE_PROBE_ATA: 2541 { 2542 struct ata_params *ata_params; 2543 2544 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 2545 daprobedone(periph, start_ccb); 2546 break; 2547 } 2548 2549 ata_params = (struct ata_params*) 2550 malloc(sizeof(*ata_params), M_SCSIDA, M_NOWAIT|M_ZERO); 2551 2552 if (ata_params == NULL) { 2553 printf("dastart: Couldn't malloc ata_params data\n"); 2554 /* da_free_periph??? */ 2555 break; 2556 } 2557 2558 scsi_ata_identify(&start_ccb->csio, 2559 /*retries*/da_retry_count, 2560 /*cbfcnp*/dadone, 2561 /*tag_action*/MSG_SIMPLE_Q_TAG, 2562 /*data_ptr*/(u_int8_t *)ata_params, 2563 /*dxfer_len*/sizeof(*ata_params), 2564 /*sense_len*/SSD_FULL_SIZE, 2565 /*timeout*/da_default_timeout * 1000); 2566 start_ccb->ccb_h.ccb_bp = NULL; 2567 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; 2568 xpt_action(start_ccb); 2569 break; 2570 } 2571 } 2572 } 2573 2574 /* 2575 * In each of the methods below, while its the caller's 2576 * responsibility to ensure the request will fit into a 2577 * single device request, we might have changed the delete 2578 * method due to the device incorrectly advertising either 2579 * its supported methods or limits. 2580 * 2581 * To prevent this causing further issues we validate the 2582 * against the methods limits, and warn which would 2583 * otherwise be unnecessary. 2584 */ 2585 static void 2586 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 2587 { 2588 struct da_softc *softc = (struct da_softc *)periph->softc;; 2589 struct bio *bp1; 2590 uint8_t *buf = softc->unmap_buf; 2591 uint64_t lba, lastlba = (uint64_t)-1; 2592 uint64_t totalcount = 0; 2593 uint64_t count; 2594 uint32_t lastcount = 0, c; 2595 uint32_t off, ranges = 0; 2596 2597 /* 2598 * Currently this doesn't take the UNMAP 2599 * Granularity and Granularity Alignment 2600 * fields into account. 2601 * 2602 * This could result in both unoptimal unmap 2603 * requests as as well as UNMAP calls unmapping 2604 * fewer LBA's than requested. 2605 */ 2606 2607 softc->delete_running = 1; 2608 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 2609 bp1 = bp; 2610 do { 2611 bioq_remove(&softc->delete_queue, bp1); 2612 if (bp1 != bp) 2613 bioq_insert_tail(&softc->delete_run_queue, bp1); 2614 lba = bp1->bio_pblkno; 2615 count = bp1->bio_bcount / softc->params.secsize; 2616 2617 /* Try to extend the previous range. */ 2618 if (lba == lastlba) { 2619 c = omin(count, UNMAP_RANGE_MAX - lastcount); 2620 lastcount += c; 2621 off = ((ranges - 1) * UNMAP_RANGE_SIZE) + 2622 UNMAP_HEAD_SIZE; 2623 scsi_ulto4b(lastcount, &buf[off + 8]); 2624 count -= c; 2625 lba +=c; 2626 totalcount += c; 2627 } 2628 2629 while (count > 0) { 2630 c = omin(count, UNMAP_RANGE_MAX); 2631 if (totalcount + c > softc->unmap_max_lba || 2632 ranges >= softc->unmap_max_ranges) { 2633 xpt_print(periph->path, 2634 "%s issuing short delete %ld > %ld" 2635 "|| %d >= %d", 2636 da_delete_method_desc[softc->delete_method], 2637 totalcount + c, softc->unmap_max_lba, 2638 ranges, softc->unmap_max_ranges); 2639 break; 2640 } 2641 off = (ranges * UNMAP_RANGE_SIZE) + UNMAP_HEAD_SIZE; 2642 scsi_u64to8b(lba, &buf[off + 0]); 2643 scsi_ulto4b(c, &buf[off + 8]); 2644 lba += c; 2645 totalcount += c; 2646 ranges++; 2647 count -= c; 2648 lastcount = c; 2649 } 2650 lastlba = lba; 2651 bp1 = bioq_first(&softc->delete_queue); 2652 if (bp1 == NULL || ranges >= softc->unmap_max_ranges || 2653 totalcount + bp1->bio_bcount / 2654 softc->params.secsize > softc->unmap_max_lba) 2655 break; 2656 } while (1); 2657 scsi_ulto2b(ranges * 16 + 6, &buf[0]); 2658 scsi_ulto2b(ranges * 16, &buf[2]); 2659 2660 scsi_unmap(&ccb->csio, 2661 /*retries*/da_retry_count, 2662 /*cbfcnp*/dadone, 2663 /*tag_action*/MSG_SIMPLE_Q_TAG, 2664 /*byte2*/0, 2665 /*data_ptr*/ buf, 2666 /*dxfer_len*/ ranges * 16 + 8, 2667 /*sense_len*/SSD_FULL_SIZE, 2668 da_default_timeout * 1000); 2669 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 2670 ccb->ccb_h.flags |= CAM_UNLOCKED; 2671 } 2672 2673 static void 2674 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 2675 { 2676 struct da_softc *softc = (struct da_softc *)periph->softc; 2677 struct bio *bp1; 2678 uint8_t *buf = softc->unmap_buf; 2679 uint64_t lastlba = (uint64_t)-1; 2680 uint64_t count; 2681 uint64_t lba; 2682 uint32_t lastcount = 0, c, requestcount; 2683 int ranges = 0, off, block_count; 2684 2685 softc->delete_running = 1; 2686 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 2687 bp1 = bp; 2688 do { 2689 bioq_remove(&softc->delete_queue, bp1); 2690 if (bp1 != bp) 2691 bioq_insert_tail(&softc->delete_run_queue, bp1); 2692 lba = bp1->bio_pblkno; 2693 count = bp1->bio_bcount / softc->params.secsize; 2694 requestcount = count; 2695 2696 /* Try to extend the previous range. */ 2697 if (lba == lastlba) { 2698 c = omin(count, ATA_DSM_RANGE_MAX - lastcount); 2699 lastcount += c; 2700 off = (ranges - 1) * 8; 2701 buf[off + 6] = lastcount & 0xff; 2702 buf[off + 7] = (lastcount >> 8) & 0xff; 2703 count -= c; 2704 lba += c; 2705 } 2706 2707 while (count > 0) { 2708 c = omin(count, ATA_DSM_RANGE_MAX); 2709 off = ranges * 8; 2710 2711 buf[off + 0] = lba & 0xff; 2712 buf[off + 1] = (lba >> 8) & 0xff; 2713 buf[off + 2] = (lba >> 16) & 0xff; 2714 buf[off + 3] = (lba >> 24) & 0xff; 2715 buf[off + 4] = (lba >> 32) & 0xff; 2716 buf[off + 5] = (lba >> 40) & 0xff; 2717 buf[off + 6] = c & 0xff; 2718 buf[off + 7] = (c >> 8) & 0xff; 2719 lba += c; 2720 ranges++; 2721 count -= c; 2722 lastcount = c; 2723 if (count != 0 && ranges == softc->trim_max_ranges) { 2724 xpt_print(periph->path, 2725 "%s issuing short delete %ld > %ld\n", 2726 da_delete_method_desc[softc->delete_method], 2727 requestcount, 2728 (softc->trim_max_ranges - ranges) * 2729 ATA_DSM_RANGE_MAX); 2730 break; 2731 } 2732 } 2733 lastlba = lba; 2734 bp1 = bioq_first(&softc->delete_queue); 2735 if (bp1 == NULL || bp1->bio_bcount / softc->params.secsize > 2736 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) 2737 break; 2738 } while (1); 2739 2740 block_count = (ranges + ATA_DSM_BLK_RANGES - 1) / ATA_DSM_BLK_RANGES; 2741 scsi_ata_trim(&ccb->csio, 2742 /*retries*/da_retry_count, 2743 /*cbfcnp*/dadone, 2744 /*tag_action*/MSG_SIMPLE_Q_TAG, 2745 block_count, 2746 /*data_ptr*/buf, 2747 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, 2748 /*sense_len*/SSD_FULL_SIZE, 2749 da_default_timeout * 1000); 2750 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 2751 ccb->ccb_h.flags |= CAM_UNLOCKED; 2752 } 2753 2754 /* 2755 * We calculate ws_max_blks here based off d_delmaxsize instead 2756 * of using softc->ws_max_blks as it is absolute max for the 2757 * device not the protocol max which may well be lower. 2758 */ 2759 static void 2760 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 2761 { 2762 struct da_softc *softc; 2763 struct bio *bp1; 2764 uint64_t ws_max_blks; 2765 uint64_t lba; 2766 uint64_t count; /* forward compat with WS32 */ 2767 2768 softc = (struct da_softc *)periph->softc; 2769 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; 2770 softc->delete_running = 1; 2771 lba = bp->bio_pblkno; 2772 count = 0; 2773 bp1 = bp; 2774 do { 2775 bioq_remove(&softc->delete_queue, bp1); 2776 if (bp1 != bp) 2777 bioq_insert_tail(&softc->delete_run_queue, bp1); 2778 count += bp1->bio_bcount / softc->params.secsize; 2779 if (count > ws_max_blks) { 2780 xpt_print(periph->path, 2781 "%s issuing short delete %ld > %ld\n", 2782 da_delete_method_desc[softc->delete_method], 2783 count, ws_max_blks); 2784 count = omin(count, ws_max_blks); 2785 break; 2786 } 2787 bp1 = bioq_first(&softc->delete_queue); 2788 if (bp1 == NULL || lba + count != bp1->bio_pblkno || 2789 count + bp1->bio_bcount / 2790 softc->params.secsize > ws_max_blks) 2791 break; 2792 } while (1); 2793 2794 scsi_write_same(&ccb->csio, 2795 /*retries*/da_retry_count, 2796 /*cbfcnp*/dadone, 2797 /*tag_action*/MSG_SIMPLE_Q_TAG, 2798 /*byte2*/softc->delete_method == 2799 DA_DELETE_ZERO ? 0 : SWS_UNMAP, 2800 softc->delete_method == DA_DELETE_WS16 ? 16 : 10, 2801 /*lba*/lba, 2802 /*block_count*/count, 2803 /*data_ptr*/ __DECONST(void *, zero_region), 2804 /*dxfer_len*/ softc->params.secsize, 2805 /*sense_len*/SSD_FULL_SIZE, 2806 da_default_timeout * 1000); 2807 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 2808 ccb->ccb_h.flags |= CAM_UNLOCKED; 2809 } 2810 2811 static int 2812 cmd6workaround(union ccb *ccb) 2813 { 2814 struct scsi_rw_6 cmd6; 2815 struct scsi_rw_10 *cmd10; 2816 struct da_softc *softc; 2817 u_int8_t *cdb; 2818 struct bio *bp; 2819 int frozen; 2820 2821 cdb = ccb->csio.cdb_io.cdb_bytes; 2822 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; 2823 2824 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { 2825 da_delete_methods old_method = softc->delete_method; 2826 2827 /* 2828 * Typically there are two reasons for failure here 2829 * 1. Delete method was detected as supported but isn't 2830 * 2. Delete failed due to invalid params e.g. too big 2831 * 2832 * While we will attempt to choose an alternative delete method 2833 * this may result in short deletes if the existing delete 2834 * requests from geom are big for the new method choosen. 2835 * 2836 * This method assumes that the error which triggered this 2837 * will not retry the io otherwise a panic will occur 2838 */ 2839 dadeleteflag(softc, old_method, 0); 2840 dadeletemethodchoose(softc, DA_DELETE_DISABLE); 2841 if (softc->delete_method == DA_DELETE_DISABLE) 2842 xpt_print(ccb->ccb_h.path, 2843 "%s failed, disabling BIO_DELETE\n", 2844 da_delete_method_desc[old_method]); 2845 else 2846 xpt_print(ccb->ccb_h.path, 2847 "%s failed, switching to %s BIO_DELETE\n", 2848 da_delete_method_desc[old_method], 2849 da_delete_method_desc[softc->delete_method]); 2850 2851 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) 2852 bioq_disksort(&softc->delete_queue, bp); 2853 bioq_disksort(&softc->delete_queue, 2854 (struct bio *)ccb->ccb_h.ccb_bp); 2855 ccb->ccb_h.ccb_bp = NULL; 2856 return (0); 2857 } 2858 2859 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ 2860 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 2861 (*cdb == PREVENT_ALLOW) && 2862 (softc->quirks & DA_Q_NO_PREVENT) == 0) { 2863 if (bootverbose) 2864 xpt_print(ccb->ccb_h.path, 2865 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); 2866 softc->quirks |= DA_Q_NO_PREVENT; 2867 return (0); 2868 } 2869 2870 /* Detect unsupported SYNCHRONIZE CACHE(10). */ 2871 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 2872 (*cdb == SYNCHRONIZE_CACHE) && 2873 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 2874 if (bootverbose) 2875 xpt_print(ccb->ccb_h.path, 2876 "SYNCHRONIZE CACHE(10) not supported.\n"); 2877 softc->quirks |= DA_Q_NO_SYNC_CACHE; 2878 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; 2879 return (0); 2880 } 2881 2882 /* Translation only possible if CDB is an array and cmd is R/W6 */ 2883 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || 2884 (*cdb != READ_6 && *cdb != WRITE_6)) 2885 return 0; 2886 2887 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " 2888 "increasing minimum_cmd_size to 10.\n"); 2889 softc->minimum_cmd_size = 10; 2890 2891 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); 2892 cmd10 = (struct scsi_rw_10 *)cdb; 2893 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; 2894 cmd10->byte2 = 0; 2895 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); 2896 cmd10->reserved = 0; 2897 scsi_ulto2b(cmd6.length, cmd10->length); 2898 cmd10->control = cmd6.control; 2899 ccb->csio.cdb_len = sizeof(*cmd10); 2900 2901 /* Requeue request, unfreezing queue if necessary */ 2902 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 2903 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2904 xpt_action(ccb); 2905 if (frozen) { 2906 cam_release_devq(ccb->ccb_h.path, 2907 /*relsim_flags*/0, 2908 /*reduction*/0, 2909 /*timeout*/0, 2910 /*getcount_only*/0); 2911 } 2912 return (ERESTART); 2913 } 2914 2915 static void 2916 dadone(struct cam_periph *periph, union ccb *done_ccb) 2917 { 2918 struct da_softc *softc; 2919 struct ccb_scsiio *csio; 2920 u_int32_t priority; 2921 da_ccb_state state; 2922 2923 softc = (struct da_softc *)periph->softc; 2924 priority = done_ccb->ccb_h.pinfo.priority; 2925 2926 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); 2927 2928 csio = &done_ccb->csio; 2929 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 2930 switch (state) { 2931 case DA_CCB_BUFFER_IO: 2932 case DA_CCB_DELETE: 2933 { 2934 struct bio *bp, *bp1; 2935 2936 cam_periph_lock(periph); 2937 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 2938 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2939 int error; 2940 int sf; 2941 2942 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 2943 sf = SF_RETRY_UA; 2944 else 2945 sf = 0; 2946 2947 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 2948 if (error == ERESTART) { 2949 /* 2950 * A retry was scheduled, so 2951 * just return. 2952 */ 2953 cam_periph_unlock(periph); 2954 return; 2955 } 2956 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 2957 if (error != 0) { 2958 int queued_error; 2959 2960 /* 2961 * return all queued I/O with EIO, so that 2962 * the client can retry these I/Os in the 2963 * proper order should it attempt to recover. 2964 */ 2965 queued_error = EIO; 2966 2967 if (error == ENXIO 2968 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { 2969 /* 2970 * Catastrophic error. Mark our pack as 2971 * invalid. 2972 */ 2973 /* 2974 * XXX See if this is really a media 2975 * XXX change first? 2976 */ 2977 xpt_print(periph->path, 2978 "Invalidating pack\n"); 2979 softc->flags |= DA_FLAG_PACK_INVALID; 2980 queued_error = ENXIO; 2981 } 2982 bioq_flush(&softc->bio_queue, NULL, 2983 queued_error); 2984 if (bp != NULL) { 2985 bp->bio_error = error; 2986 bp->bio_resid = bp->bio_bcount; 2987 bp->bio_flags |= BIO_ERROR; 2988 } 2989 } else if (bp != NULL) { 2990 if (state == DA_CCB_DELETE) 2991 bp->bio_resid = 0; 2992 else 2993 bp->bio_resid = csio->resid; 2994 bp->bio_error = 0; 2995 if (bp->bio_resid != 0) 2996 bp->bio_flags |= BIO_ERROR; 2997 } 2998 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2999 cam_release_devq(done_ccb->ccb_h.path, 3000 /*relsim_flags*/0, 3001 /*reduction*/0, 3002 /*timeout*/0, 3003 /*getcount_only*/0); 3004 } else if (bp != NULL) { 3005 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 3006 panic("REQ_CMP with QFRZN"); 3007 if (state == DA_CCB_DELETE) 3008 bp->bio_resid = 0; 3009 else 3010 bp->bio_resid = csio->resid; 3011 if (csio->resid > 0) 3012 bp->bio_flags |= BIO_ERROR; 3013 if (softc->error_inject != 0) { 3014 bp->bio_error = softc->error_inject; 3015 bp->bio_resid = bp->bio_bcount; 3016 bp->bio_flags |= BIO_ERROR; 3017 softc->error_inject = 0; 3018 } 3019 } 3020 3021 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 3022 if (LIST_EMPTY(&softc->pending_ccbs)) 3023 softc->flags |= DA_FLAG_WAS_OTAG; 3024 3025 xpt_release_ccb(done_ccb); 3026 if (state == DA_CCB_DELETE) { 3027 TAILQ_HEAD(, bio) queue; 3028 3029 TAILQ_INIT(&queue); 3030 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); 3031 softc->delete_run_queue.insert_point = NULL; 3032 /* 3033 * Normally, the xpt_release_ccb() above would make sure 3034 * that when we have more work to do, that work would 3035 * get kicked off. However, we specifically keep 3036 * delete_running set to 0 before the call above to 3037 * allow other I/O to progress when many BIO_DELETE 3038 * requests are pushed down. We set delete_running to 0 3039 * and call daschedule again so that we don't stall if 3040 * there are no other I/Os pending apart from BIO_DELETEs. 3041 */ 3042 softc->delete_running = 0; 3043 daschedule(periph); 3044 cam_periph_unlock(periph); 3045 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { 3046 TAILQ_REMOVE(&queue, bp1, bio_queue); 3047 bp1->bio_error = bp->bio_error; 3048 if (bp->bio_flags & BIO_ERROR) { 3049 bp1->bio_flags |= BIO_ERROR; 3050 bp1->bio_resid = bp1->bio_bcount; 3051 } else 3052 bp1->bio_resid = 0; 3053 biodone(bp1); 3054 } 3055 } else 3056 cam_periph_unlock(periph); 3057 if (bp != NULL) 3058 biodone(bp); 3059 return; 3060 } 3061 case DA_CCB_PROBE_RC: 3062 case DA_CCB_PROBE_RC16: 3063 { 3064 struct scsi_read_capacity_data *rdcap; 3065 struct scsi_read_capacity_data_long *rcaplong; 3066 char announce_buf[80]; 3067 int lbp; 3068 3069 lbp = 0; 3070 rdcap = NULL; 3071 rcaplong = NULL; 3072 if (state == DA_CCB_PROBE_RC) 3073 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; 3074 else 3075 rcaplong = (struct scsi_read_capacity_data_long *) 3076 csio->data_ptr; 3077 3078 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3079 struct disk_params *dp; 3080 uint32_t block_size; 3081 uint64_t maxsector; 3082 u_int lalba; /* Lowest aligned LBA. */ 3083 3084 if (state == DA_CCB_PROBE_RC) { 3085 block_size = scsi_4btoul(rdcap->length); 3086 maxsector = scsi_4btoul(rdcap->addr); 3087 lalba = 0; 3088 3089 /* 3090 * According to SBC-2, if the standard 10 3091 * byte READ CAPACITY command returns 2^32, 3092 * we should issue the 16 byte version of 3093 * the command, since the device in question 3094 * has more sectors than can be represented 3095 * with the short version of the command. 3096 */ 3097 if (maxsector == 0xffffffff) { 3098 free(rdcap, M_SCSIDA); 3099 xpt_release_ccb(done_ccb); 3100 softc->state = DA_STATE_PROBE_RC16; 3101 xpt_schedule(periph, priority); 3102 return; 3103 } 3104 } else { 3105 block_size = scsi_4btoul(rcaplong->length); 3106 maxsector = scsi_8btou64(rcaplong->addr); 3107 lalba = scsi_2btoul(rcaplong->lalba_lbp); 3108 } 3109 3110 /* 3111 * Because GEOM code just will panic us if we 3112 * give them an 'illegal' value we'll avoid that 3113 * here. 3114 */ 3115 if (block_size == 0) { 3116 block_size = 512; 3117 if (maxsector == 0) 3118 maxsector = -1; 3119 } 3120 if (block_size >= MAXPHYS) { 3121 xpt_print(periph->path, 3122 "unsupportable block size %ju\n", 3123 (uintmax_t) block_size); 3124 announce_buf[0] = '\0'; 3125 cam_periph_invalidate(periph); 3126 } else { 3127 /* 3128 * We pass rcaplong into dasetgeom(), 3129 * because it will only use it if it is 3130 * non-NULL. 3131 */ 3132 dasetgeom(periph, block_size, maxsector, 3133 rcaplong, sizeof(*rcaplong)); 3134 lbp = (lalba & SRC16_LBPME_A); 3135 dp = &softc->params; 3136 snprintf(announce_buf, sizeof(announce_buf), 3137 "%juMB (%ju %u byte sectors: %dH %dS/T " 3138 "%dC)", (uintmax_t) 3139 (((uintmax_t)dp->secsize * 3140 dp->sectors) / (1024*1024)), 3141 (uintmax_t)dp->sectors, 3142 dp->secsize, dp->heads, 3143 dp->secs_per_track, dp->cylinders); 3144 } 3145 } else { 3146 int error; 3147 3148 announce_buf[0] = '\0'; 3149 3150 /* 3151 * Retry any UNIT ATTENTION type errors. They 3152 * are expected at boot. 3153 */ 3154 error = daerror(done_ccb, CAM_RETRY_SELTO, 3155 SF_RETRY_UA|SF_NO_PRINT); 3156 if (error == ERESTART) { 3157 /* 3158 * A retry was scheuled, so 3159 * just return. 3160 */ 3161 return; 3162 } else if (error != 0) { 3163 int asc, ascq; 3164 int sense_key, error_code; 3165 int have_sense; 3166 cam_status status; 3167 struct ccb_getdev cgd; 3168 3169 /* Don't wedge this device's queue */ 3170 status = done_ccb->ccb_h.status; 3171 if ((status & CAM_DEV_QFRZN) != 0) 3172 cam_release_devq(done_ccb->ccb_h.path, 3173 /*relsim_flags*/0, 3174 /*reduction*/0, 3175 /*timeout*/0, 3176 /*getcount_only*/0); 3177 3178 3179 xpt_setup_ccb(&cgd.ccb_h, 3180 done_ccb->ccb_h.path, 3181 CAM_PRIORITY_NORMAL); 3182 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 3183 xpt_action((union ccb *)&cgd); 3184 3185 if (scsi_extract_sense_ccb(done_ccb, 3186 &error_code, &sense_key, &asc, &ascq)) 3187 have_sense = TRUE; 3188 else 3189 have_sense = FALSE; 3190 3191 /* 3192 * If we tried READ CAPACITY(16) and failed, 3193 * fallback to READ CAPACITY(10). 3194 */ 3195 if ((state == DA_CCB_PROBE_RC16) && 3196 (softc->flags & DA_FLAG_CAN_RC16) && 3197 (((csio->ccb_h.status & CAM_STATUS_MASK) == 3198 CAM_REQ_INVALID) || 3199 ((have_sense) && 3200 (error_code == SSD_CURRENT_ERROR) && 3201 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { 3202 softc->flags &= ~DA_FLAG_CAN_RC16; 3203 free(rdcap, M_SCSIDA); 3204 xpt_release_ccb(done_ccb); 3205 softc->state = DA_STATE_PROBE_RC; 3206 xpt_schedule(periph, priority); 3207 return; 3208 } else 3209 /* 3210 * Attach to anything that claims to be a 3211 * direct access or optical disk device, 3212 * as long as it doesn't return a "Logical 3213 * unit not supported" (0x25) error. 3214 */ 3215 if ((have_sense) && (asc != 0x25) 3216 && (error_code == SSD_CURRENT_ERROR)) { 3217 const char *sense_key_desc; 3218 const char *asc_desc; 3219 3220 dasetgeom(periph, 512, -1, NULL, 0); 3221 scsi_sense_desc(sense_key, asc, ascq, 3222 &cgd.inq_data, 3223 &sense_key_desc, 3224 &asc_desc); 3225 snprintf(announce_buf, 3226 sizeof(announce_buf), 3227 "Attempt to query device " 3228 "size failed: %s, %s", 3229 sense_key_desc, 3230 asc_desc); 3231 } else { 3232 if (have_sense) 3233 scsi_sense_print( 3234 &done_ccb->csio); 3235 else { 3236 xpt_print(periph->path, 3237 "got CAM status %#x\n", 3238 done_ccb->ccb_h.status); 3239 } 3240 3241 xpt_print(periph->path, "fatal error, " 3242 "failed to attach to device\n"); 3243 3244 /* 3245 * Free up resources. 3246 */ 3247 cam_periph_invalidate(periph); 3248 } 3249 } 3250 } 3251 free(csio->data_ptr, M_SCSIDA); 3252 if (announce_buf[0] != '\0' && 3253 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { 3254 /* 3255 * Create our sysctl variables, now that we know 3256 * we have successfully attached. 3257 */ 3258 /* increase the refcount */ 3259 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 3260 taskqueue_enqueue(taskqueue_thread, 3261 &softc->sysctl_task); 3262 xpt_announce_periph(periph, announce_buf); 3263 xpt_announce_quirks(periph, softc->quirks, 3264 DA_Q_BIT_STRING); 3265 } else { 3266 xpt_print(periph->path, "fatal error, " 3267 "could not acquire reference count\n"); 3268 } 3269 } 3270 3271 /* We already probed the device. */ 3272 if (softc->flags & DA_FLAG_PROBED) { 3273 daprobedone(periph, done_ccb); 3274 return; 3275 } 3276 3277 /* Ensure re-probe doesn't see old delete. */ 3278 softc->delete_available = 0; 3279 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { 3280 /* 3281 * Based on older SBC-3 spec revisions 3282 * any of the UNMAP methods "may" be 3283 * available via LBP given this flag so 3284 * we flag all of them as availble and 3285 * then remove those which further 3286 * probes confirm aren't available 3287 * later. 3288 * 3289 * We could also check readcap(16) p_type 3290 * flag to exclude one or more invalid 3291 * write same (X) types here 3292 */ 3293 dadeleteflag(softc, DA_DELETE_WS16, 1); 3294 dadeleteflag(softc, DA_DELETE_WS10, 1); 3295 dadeleteflag(softc, DA_DELETE_ZERO, 1); 3296 dadeleteflag(softc, DA_DELETE_UNMAP, 1); 3297 3298 xpt_release_ccb(done_ccb); 3299 softc->state = DA_STATE_PROBE_LBP; 3300 xpt_schedule(periph, priority); 3301 return; 3302 } 3303 3304 xpt_release_ccb(done_ccb); 3305 softc->state = DA_STATE_PROBE_BDC; 3306 xpt_schedule(periph, priority); 3307 return; 3308 } 3309 case DA_CCB_PROBE_LBP: 3310 { 3311 struct scsi_vpd_logical_block_prov *lbp; 3312 3313 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; 3314 3315 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3316 /* 3317 * T10/1799-D Revision 31 states at least one of these 3318 * must be supported but we don't currently enforce this. 3319 */ 3320 dadeleteflag(softc, DA_DELETE_WS16, 3321 (lbp->flags & SVPD_LBP_WS16)); 3322 dadeleteflag(softc, DA_DELETE_WS10, 3323 (lbp->flags & SVPD_LBP_WS10)); 3324 dadeleteflag(softc, DA_DELETE_ZERO, 3325 (lbp->flags & SVPD_LBP_WS10)); 3326 dadeleteflag(softc, DA_DELETE_UNMAP, 3327 (lbp->flags & SVPD_LBP_UNMAP)); 3328 } else { 3329 int error; 3330 error = daerror(done_ccb, CAM_RETRY_SELTO, 3331 SF_RETRY_UA|SF_NO_PRINT); 3332 if (error == ERESTART) 3333 return; 3334 else if (error != 0) { 3335 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3336 /* Don't wedge this device's queue */ 3337 cam_release_devq(done_ccb->ccb_h.path, 3338 /*relsim_flags*/0, 3339 /*reduction*/0, 3340 /*timeout*/0, 3341 /*getcount_only*/0); 3342 } 3343 3344 /* 3345 * Failure indicates we don't support any SBC-3 3346 * delete methods with UNMAP 3347 */ 3348 } 3349 } 3350 3351 free(lbp, M_SCSIDA); 3352 xpt_release_ccb(done_ccb); 3353 softc->state = DA_STATE_PROBE_BLK_LIMITS; 3354 xpt_schedule(periph, priority); 3355 return; 3356 } 3357 case DA_CCB_PROBE_BLK_LIMITS: 3358 { 3359 struct scsi_vpd_block_limits *block_limits; 3360 3361 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; 3362 3363 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3364 uint32_t max_txfer_len = scsi_4btoul( 3365 block_limits->max_txfer_len); 3366 uint32_t max_unmap_lba_cnt = scsi_4btoul( 3367 block_limits->max_unmap_lba_cnt); 3368 uint32_t max_unmap_blk_cnt = scsi_4btoul( 3369 block_limits->max_unmap_blk_cnt); 3370 uint64_t ws_max_blks = scsi_8btou64( 3371 block_limits->max_write_same_length); 3372 3373 if (max_txfer_len != 0) { 3374 softc->disk->d_maxsize = MIN(softc->maxio, 3375 (off_t)max_txfer_len * softc->params.secsize); 3376 } 3377 3378 /* 3379 * We should already support UNMAP but we check lba 3380 * and block count to be sure 3381 */ 3382 if (max_unmap_lba_cnt != 0x00L && 3383 max_unmap_blk_cnt != 0x00L) { 3384 softc->unmap_max_lba = max_unmap_lba_cnt; 3385 softc->unmap_max_ranges = min(max_unmap_blk_cnt, 3386 UNMAP_MAX_RANGES); 3387 } else { 3388 /* 3389 * Unexpected UNMAP limits which means the 3390 * device doesn't actually support UNMAP 3391 */ 3392 dadeleteflag(softc, DA_DELETE_UNMAP, 0); 3393 } 3394 3395 if (ws_max_blks != 0x00L) 3396 softc->ws_max_blks = ws_max_blks; 3397 } else { 3398 int error; 3399 error = daerror(done_ccb, CAM_RETRY_SELTO, 3400 SF_RETRY_UA|SF_NO_PRINT); 3401 if (error == ERESTART) 3402 return; 3403 else if (error != 0) { 3404 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3405 /* Don't wedge this device's queue */ 3406 cam_release_devq(done_ccb->ccb_h.path, 3407 /*relsim_flags*/0, 3408 /*reduction*/0, 3409 /*timeout*/0, 3410 /*getcount_only*/0); 3411 } 3412 3413 /* 3414 * Failure here doesn't mean UNMAP is not 3415 * supported as this is an optional page. 3416 */ 3417 softc->unmap_max_lba = 1; 3418 softc->unmap_max_ranges = 1; 3419 } 3420 } 3421 3422 free(block_limits, M_SCSIDA); 3423 xpt_release_ccb(done_ccb); 3424 softc->state = DA_STATE_PROBE_BDC; 3425 xpt_schedule(periph, priority); 3426 return; 3427 } 3428 case DA_CCB_PROBE_BDC: 3429 { 3430 struct scsi_vpd_block_characteristics *bdc; 3431 3432 bdc = (struct scsi_vpd_block_characteristics *)csio->data_ptr; 3433 3434 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3435 /* 3436 * Disable queue sorting for non-rotational media 3437 * by default. 3438 */ 3439 u_int16_t old_rate = softc->disk->d_rotation_rate; 3440 3441 softc->disk->d_rotation_rate = 3442 scsi_2btoul(bdc->medium_rotation_rate); 3443 if (softc->disk->d_rotation_rate == 3444 SVPD_BDC_RATE_NON_ROTATING) { 3445 softc->sort_io_queue = 0; 3446 } 3447 if (softc->disk->d_rotation_rate != old_rate) { 3448 disk_attr_changed(softc->disk, 3449 "GEOM::rotation_rate", M_NOWAIT); 3450 } 3451 } else { 3452 int error; 3453 error = daerror(done_ccb, CAM_RETRY_SELTO, 3454 SF_RETRY_UA|SF_NO_PRINT); 3455 if (error == ERESTART) 3456 return; 3457 else if (error != 0) { 3458 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3459 /* Don't wedge this device's queue */ 3460 cam_release_devq(done_ccb->ccb_h.path, 3461 /*relsim_flags*/0, 3462 /*reduction*/0, 3463 /*timeout*/0, 3464 /*getcount_only*/0); 3465 } 3466 } 3467 } 3468 3469 free(bdc, M_SCSIDA); 3470 xpt_release_ccb(done_ccb); 3471 softc->state = DA_STATE_PROBE_ATA; 3472 xpt_schedule(periph, priority); 3473 return; 3474 } 3475 case DA_CCB_PROBE_ATA: 3476 { 3477 int i; 3478 struct ata_params *ata_params; 3479 int16_t *ptr; 3480 3481 ata_params = (struct ata_params *)csio->data_ptr; 3482 ptr = (uint16_t *)ata_params; 3483 3484 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3485 uint16_t old_rate; 3486 3487 for (i = 0; i < sizeof(*ata_params) / 2; i++) 3488 ptr[i] = le16toh(ptr[i]); 3489 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && 3490 (softc->quirks & DA_Q_NO_UNMAP) == 0) { 3491 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); 3492 if (ata_params->max_dsm_blocks != 0) 3493 softc->trim_max_ranges = min( 3494 softc->trim_max_ranges, 3495 ata_params->max_dsm_blocks * 3496 ATA_DSM_BLK_RANGES); 3497 } 3498 /* 3499 * Disable queue sorting for non-rotational media 3500 * by default. 3501 */ 3502 old_rate = softc->disk->d_rotation_rate; 3503 softc->disk->d_rotation_rate = 3504 ata_params->media_rotation_rate; 3505 if (softc->disk->d_rotation_rate == 3506 ATA_RATE_NON_ROTATING) { 3507 softc->sort_io_queue = 0; 3508 } 3509 3510 if (softc->disk->d_rotation_rate != old_rate) { 3511 disk_attr_changed(softc->disk, 3512 "GEOM::rotation_rate", M_NOWAIT); 3513 } 3514 } else { 3515 int error; 3516 error = daerror(done_ccb, CAM_RETRY_SELTO, 3517 SF_RETRY_UA|SF_NO_PRINT); 3518 if (error == ERESTART) 3519 return; 3520 else if (error != 0) { 3521 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3522 /* Don't wedge this device's queue */ 3523 cam_release_devq(done_ccb->ccb_h.path, 3524 /*relsim_flags*/0, 3525 /*reduction*/0, 3526 /*timeout*/0, 3527 /*getcount_only*/0); 3528 } 3529 } 3530 } 3531 3532 free(ata_params, M_SCSIDA); 3533 daprobedone(periph, done_ccb); 3534 return; 3535 } 3536 case DA_CCB_DUMP: 3537 /* No-op. We're polling */ 3538 return; 3539 case DA_CCB_TUR: 3540 { 3541 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3542 3543 if (daerror(done_ccb, CAM_RETRY_SELTO, 3544 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == 3545 ERESTART) 3546 return; 3547 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 3548 cam_release_devq(done_ccb->ccb_h.path, 3549 /*relsim_flags*/0, 3550 /*reduction*/0, 3551 /*timeout*/0, 3552 /*getcount_only*/0); 3553 } 3554 xpt_release_ccb(done_ccb); 3555 cam_periph_release_locked(periph); 3556 return; 3557 } 3558 default: 3559 break; 3560 } 3561 xpt_release_ccb(done_ccb); 3562 } 3563 3564 static void 3565 dareprobe(struct cam_periph *periph) 3566 { 3567 struct da_softc *softc; 3568 cam_status status; 3569 3570 softc = (struct da_softc *)periph->softc; 3571 3572 /* Probe in progress; don't interfere. */ 3573 if (softc->state != DA_STATE_NORMAL) 3574 return; 3575 3576 status = cam_periph_acquire(periph); 3577 KASSERT(status == CAM_REQ_CMP, 3578 ("dareprobe: cam_periph_acquire failed")); 3579 3580 if (softc->flags & DA_FLAG_CAN_RC16) 3581 softc->state = DA_STATE_PROBE_RC16; 3582 else 3583 softc->state = DA_STATE_PROBE_RC; 3584 3585 xpt_schedule(periph, CAM_PRIORITY_DEV); 3586 } 3587 3588 static int 3589 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 3590 { 3591 struct da_softc *softc; 3592 struct cam_periph *periph; 3593 int error, error_code, sense_key, asc, ascq; 3594 3595 periph = xpt_path_periph(ccb->ccb_h.path); 3596 softc = (struct da_softc *)periph->softc; 3597 3598 /* 3599 * Automatically detect devices that do not support 3600 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. 3601 */ 3602 error = 0; 3603 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { 3604 error = cmd6workaround(ccb); 3605 } else if (scsi_extract_sense_ccb(ccb, 3606 &error_code, &sense_key, &asc, &ascq)) { 3607 if (sense_key == SSD_KEY_ILLEGAL_REQUEST) 3608 error = cmd6workaround(ccb); 3609 /* 3610 * If the target replied with CAPACITY DATA HAS CHANGED UA, 3611 * query the capacity and notify upper layers. 3612 */ 3613 else if (sense_key == SSD_KEY_UNIT_ATTENTION && 3614 asc == 0x2A && ascq == 0x09) { 3615 xpt_print(periph->path, "Capacity data has changed\n"); 3616 softc->flags &= ~DA_FLAG_PROBED; 3617 dareprobe(periph); 3618 sense_flags |= SF_NO_PRINT; 3619 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 3620 asc == 0x28 && ascq == 0x00) { 3621 softc->flags &= ~DA_FLAG_PROBED; 3622 disk_media_changed(softc->disk, M_NOWAIT); 3623 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 3624 asc == 0x3F && ascq == 0x03) { 3625 xpt_print(periph->path, "INQUIRY data has changed\n"); 3626 softc->flags &= ~DA_FLAG_PROBED; 3627 dareprobe(periph); 3628 sense_flags |= SF_NO_PRINT; 3629 } else if (sense_key == SSD_KEY_NOT_READY && 3630 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 3631 softc->flags |= DA_FLAG_PACK_INVALID; 3632 disk_media_gone(softc->disk, M_NOWAIT); 3633 } 3634 } 3635 if (error == ERESTART) 3636 return (ERESTART); 3637 3638 /* 3639 * XXX 3640 * Until we have a better way of doing pack validation, 3641 * don't treat UAs as errors. 3642 */ 3643 sense_flags |= SF_RETRY_UA; 3644 3645 if (softc->quirks & DA_Q_RETRY_BUSY) 3646 sense_flags |= SF_RETRY_BUSY; 3647 return(cam_periph_error(ccb, cam_flags, sense_flags, 3648 &softc->saved_ccb)); 3649 } 3650 3651 static void 3652 damediapoll(void *arg) 3653 { 3654 struct cam_periph *periph = arg; 3655 struct da_softc *softc = periph->softc; 3656 3657 if (!softc->tur && LIST_EMPTY(&softc->pending_ccbs)) { 3658 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 3659 softc->tur = 1; 3660 daschedule(periph); 3661 } 3662 } 3663 /* Queue us up again */ 3664 if (da_poll_period != 0) 3665 callout_schedule(&softc->mediapoll_c, da_poll_period * hz); 3666 } 3667 3668 static void 3669 daprevent(struct cam_periph *periph, int action) 3670 { 3671 struct da_softc *softc; 3672 union ccb *ccb; 3673 int error; 3674 3675 softc = (struct da_softc *)periph->softc; 3676 3677 if (((action == PR_ALLOW) 3678 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 3679 || ((action == PR_PREVENT) 3680 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 3681 return; 3682 } 3683 3684 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 3685 3686 scsi_prevent(&ccb->csio, 3687 /*retries*/1, 3688 /*cbcfp*/dadone, 3689 MSG_SIMPLE_Q_TAG, 3690 action, 3691 SSD_FULL_SIZE, 3692 5000); 3693 3694 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, 3695 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); 3696 3697 if (error == 0) { 3698 if (action == PR_ALLOW) 3699 softc->flags &= ~DA_FLAG_PACK_LOCKED; 3700 else 3701 softc->flags |= DA_FLAG_PACK_LOCKED; 3702 } 3703 3704 xpt_release_ccb(ccb); 3705 } 3706 3707 static void 3708 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, 3709 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) 3710 { 3711 struct ccb_calc_geometry ccg; 3712 struct da_softc *softc; 3713 struct disk_params *dp; 3714 u_int lbppbe, lalba; 3715 int error; 3716 3717 softc = (struct da_softc *)periph->softc; 3718 3719 dp = &softc->params; 3720 dp->secsize = block_len; 3721 dp->sectors = maxsector + 1; 3722 if (rcaplong != NULL) { 3723 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; 3724 lalba = scsi_2btoul(rcaplong->lalba_lbp); 3725 lalba &= SRC16_LALBA_A; 3726 } else { 3727 lbppbe = 0; 3728 lalba = 0; 3729 } 3730 3731 if (lbppbe > 0) { 3732 dp->stripesize = block_len << lbppbe; 3733 dp->stripeoffset = (dp->stripesize - block_len * lalba) % 3734 dp->stripesize; 3735 } else if (softc->quirks & DA_Q_4K) { 3736 dp->stripesize = 4096; 3737 dp->stripeoffset = 0; 3738 } else { 3739 dp->stripesize = 0; 3740 dp->stripeoffset = 0; 3741 } 3742 /* 3743 * Have the controller provide us with a geometry 3744 * for this disk. The only time the geometry 3745 * matters is when we boot and the controller 3746 * is the only one knowledgeable enough to come 3747 * up with something that will make this a bootable 3748 * device. 3749 */ 3750 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 3751 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 3752 ccg.block_size = dp->secsize; 3753 ccg.volume_size = dp->sectors; 3754 ccg.heads = 0; 3755 ccg.secs_per_track = 0; 3756 ccg.cylinders = 0; 3757 xpt_action((union ccb*)&ccg); 3758 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3759 /* 3760 * We don't know what went wrong here- but just pick 3761 * a geometry so we don't have nasty things like divide 3762 * by zero. 3763 */ 3764 dp->heads = 255; 3765 dp->secs_per_track = 255; 3766 dp->cylinders = dp->sectors / (255 * 255); 3767 if (dp->cylinders == 0) { 3768 dp->cylinders = 1; 3769 } 3770 } else { 3771 dp->heads = ccg.heads; 3772 dp->secs_per_track = ccg.secs_per_track; 3773 dp->cylinders = ccg.cylinders; 3774 } 3775 3776 /* 3777 * If the user supplied a read capacity buffer, and if it is 3778 * different than the previous buffer, update the data in the EDT. 3779 * If it's the same, we don't bother. This avoids sending an 3780 * update every time someone opens this device. 3781 */ 3782 if ((rcaplong != NULL) 3783 && (bcmp(rcaplong, &softc->rcaplong, 3784 min(sizeof(softc->rcaplong), rcap_len)) != 0)) { 3785 struct ccb_dev_advinfo cdai; 3786 3787 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 3788 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3789 cdai.buftype = CDAI_TYPE_RCAPLONG; 3790 cdai.flags |= CDAI_FLAG_STORE; 3791 cdai.bufsiz = rcap_len; 3792 cdai.buf = (uint8_t *)rcaplong; 3793 xpt_action((union ccb *)&cdai); 3794 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3795 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 3796 if (cdai.ccb_h.status != CAM_REQ_CMP) { 3797 xpt_print(periph->path, "%s: failed to set read " 3798 "capacity advinfo\n", __func__); 3799 /* Use cam_error_print() to decode the status */ 3800 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, 3801 CAM_EPF_ALL); 3802 } else { 3803 bcopy(rcaplong, &softc->rcaplong, 3804 min(sizeof(softc->rcaplong), rcap_len)); 3805 } 3806 } 3807 3808 softc->disk->d_sectorsize = softc->params.secsize; 3809 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; 3810 softc->disk->d_stripesize = softc->params.stripesize; 3811 softc->disk->d_stripeoffset = softc->params.stripeoffset; 3812 /* XXX: these are not actually "firmware" values, so they may be wrong */ 3813 softc->disk->d_fwsectors = softc->params.secs_per_track; 3814 softc->disk->d_fwheads = softc->params.heads; 3815 softc->disk->d_devstat->block_size = softc->params.secsize; 3816 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; 3817 3818 error = disk_resize(softc->disk, M_NOWAIT); 3819 if (error != 0) 3820 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); 3821 } 3822 3823 static void 3824 dasendorderedtag(void *arg) 3825 { 3826 struct da_softc *softc = arg; 3827 3828 if (da_send_ordered) { 3829 if (!LIST_EMPTY(&softc->pending_ccbs)) { 3830 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) 3831 softc->flags |= DA_FLAG_NEED_OTAG; 3832 softc->flags &= ~DA_FLAG_WAS_OTAG; 3833 } 3834 } 3835 /* Queue us up again */ 3836 callout_reset(&softc->sendordered_c, 3837 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 3838 dasendorderedtag, softc); 3839 } 3840 3841 /* 3842 * Step through all DA peripheral drivers, and if the device is still open, 3843 * sync the disk cache to physical media. 3844 */ 3845 static void 3846 dashutdown(void * arg, int howto) 3847 { 3848 struct cam_periph *periph; 3849 struct da_softc *softc; 3850 union ccb *ccb; 3851 int error; 3852 3853 CAM_PERIPH_FOREACH(periph, &dadriver) { 3854 softc = (struct da_softc *)periph->softc; 3855 if (SCHEDULER_STOPPED()) { 3856 /* If we paniced with the lock held, do not recurse. */ 3857 if (!cam_periph_owned(periph) && 3858 (softc->flags & DA_FLAG_OPEN)) { 3859 dadump(softc->disk, NULL, 0, 0, 0); 3860 } 3861 continue; 3862 } 3863 cam_periph_lock(periph); 3864 3865 /* 3866 * We only sync the cache if the drive is still open, and 3867 * if the drive is capable of it.. 3868 */ 3869 if (((softc->flags & DA_FLAG_OPEN) == 0) 3870 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { 3871 cam_periph_unlock(periph); 3872 continue; 3873 } 3874 3875 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 3876 scsi_synchronize_cache(&ccb->csio, 3877 /*retries*/0, 3878 /*cbfcnp*/dadone, 3879 MSG_SIMPLE_Q_TAG, 3880 /*begin_lba*/0, /* whole disk */ 3881 /*lb_count*/0, 3882 SSD_FULL_SIZE, 3883 60 * 60 * 1000); 3884 3885 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 3886 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, 3887 softc->disk->d_devstat); 3888 if (error != 0) 3889 xpt_print(periph->path, "Synchronize cache failed\n"); 3890 xpt_release_ccb(ccb); 3891 cam_periph_unlock(periph); 3892 } 3893 } 3894 3895 #else /* !_KERNEL */ 3896 3897 /* 3898 * XXX These are only left out of the kernel build to silence warnings. If, 3899 * for some reason these functions are used in the kernel, the ifdefs should 3900 * be moved so they are included both in the kernel and userland. 3901 */ 3902 void 3903 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 3904 void (*cbfcnp)(struct cam_periph *, union ccb *), 3905 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 3906 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 3907 u_int32_t timeout) 3908 { 3909 struct scsi_format_unit *scsi_cmd; 3910 3911 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 3912 scsi_cmd->opcode = FORMAT_UNIT; 3913 scsi_cmd->byte2 = byte2; 3914 scsi_ulto2b(ileave, scsi_cmd->interleave); 3915 3916 cam_fill_csio(csio, 3917 retries, 3918 cbfcnp, 3919 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 3920 tag_action, 3921 data_ptr, 3922 dxfer_len, 3923 sense_len, 3924 sizeof(*scsi_cmd), 3925 timeout); 3926 } 3927 3928 void 3929 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, 3930 void (*cbfcnp)(struct cam_periph *, union ccb *), 3931 uint8_t tag_action, uint8_t list_format, 3932 uint32_t addr_desc_index, uint8_t *data_ptr, 3933 uint32_t dxfer_len, int minimum_cmd_size, 3934 uint8_t sense_len, uint32_t timeout) 3935 { 3936 uint8_t cdb_len; 3937 3938 /* 3939 * These conditions allow using the 10 byte command. Otherwise we 3940 * need to use the 12 byte command. 3941 */ 3942 if ((minimum_cmd_size <= 10) 3943 && (addr_desc_index == 0) 3944 && (dxfer_len <= SRDD10_MAX_LENGTH)) { 3945 struct scsi_read_defect_data_10 *cdb10; 3946 3947 cdb10 = (struct scsi_read_defect_data_10 *) 3948 &csio->cdb_io.cdb_bytes; 3949 3950 cdb_len = sizeof(*cdb10); 3951 bzero(cdb10, cdb_len); 3952 cdb10->opcode = READ_DEFECT_DATA_10; 3953 cdb10->format = list_format; 3954 scsi_ulto2b(dxfer_len, cdb10->alloc_length); 3955 } else { 3956 struct scsi_read_defect_data_12 *cdb12; 3957 3958 cdb12 = (struct scsi_read_defect_data_12 *) 3959 &csio->cdb_io.cdb_bytes; 3960 3961 cdb_len = sizeof(*cdb12); 3962 bzero(cdb12, cdb_len); 3963 cdb12->opcode = READ_DEFECT_DATA_12; 3964 cdb12->format = list_format; 3965 scsi_ulto4b(dxfer_len, cdb12->alloc_length); 3966 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); 3967 } 3968 3969 cam_fill_csio(csio, 3970 retries, 3971 cbfcnp, 3972 /*flags*/ CAM_DIR_IN, 3973 tag_action, 3974 data_ptr, 3975 dxfer_len, 3976 sense_len, 3977 cdb_len, 3978 timeout); 3979 } 3980 3981 void 3982 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, 3983 void (*cbfcnp)(struct cam_periph *, union ccb *), 3984 u_int8_t tag_action, u_int8_t byte2, u_int16_t control, 3985 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 3986 u_int32_t timeout) 3987 { 3988 struct scsi_sanitize *scsi_cmd; 3989 3990 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; 3991 scsi_cmd->opcode = SANITIZE; 3992 scsi_cmd->byte2 = byte2; 3993 scsi_cmd->control = control; 3994 scsi_ulto2b(dxfer_len, scsi_cmd->length); 3995 3996 cam_fill_csio(csio, 3997 retries, 3998 cbfcnp, 3999 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 4000 tag_action, 4001 data_ptr, 4002 dxfer_len, 4003 sense_len, 4004 sizeof(*scsi_cmd), 4005 timeout); 4006 } 4007 4008 #endif /* _KERNEL */ 4009