1 /*- 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 34 #ifdef _KERNEL 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/taskqueue.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <sys/devicestat.h> 44 #include <sys/eventhandler.h> 45 #include <sys/malloc.h> 46 #include <sys/cons.h> 47 #include <sys/endian.h> 48 #include <sys/proc.h> 49 #include <geom/geom.h> 50 #include <geom/geom_disk.h> 51 #endif /* _KERNEL */ 52 53 #ifndef _KERNEL 54 #include <stdio.h> 55 #include <string.h> 56 #endif /* _KERNEL */ 57 58 #include <cam/cam.h> 59 #include <cam/cam_ccb.h> 60 #include <cam/cam_periph.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_sim.h> 63 64 #include <cam/scsi/scsi_message.h> 65 66 #ifndef _KERNEL 67 #include <cam/scsi/scsi_da.h> 68 #endif /* !_KERNEL */ 69 70 #ifdef _KERNEL 71 typedef enum { 72 DA_STATE_PROBE_RC, 73 DA_STATE_PROBE_RC16, 74 DA_STATE_PROBE_LBP, 75 DA_STATE_PROBE_BLK_LIMITS, 76 DA_STATE_PROBE_BDC, 77 DA_STATE_PROBE_ATA, 78 DA_STATE_NORMAL 79 } da_state; 80 81 typedef enum { 82 DA_FLAG_PACK_INVALID = 0x001, 83 DA_FLAG_NEW_PACK = 0x002, 84 DA_FLAG_PACK_LOCKED = 0x004, 85 DA_FLAG_PACK_REMOVABLE = 0x008, 86 DA_FLAG_NEED_OTAG = 0x020, 87 DA_FLAG_WENT_IDLE = 0x040, 88 DA_FLAG_RETRY_UA = 0x080, 89 DA_FLAG_OPEN = 0x100, 90 DA_FLAG_SCTX_INIT = 0x200, 91 DA_FLAG_CAN_RC16 = 0x400, 92 DA_FLAG_PROBED = 0x800, 93 DA_FLAG_DIRTY = 0x1000 94 } da_flags; 95 96 typedef enum { 97 DA_Q_NONE = 0x00, 98 DA_Q_NO_SYNC_CACHE = 0x01, 99 DA_Q_NO_6_BYTE = 0x02, 100 DA_Q_NO_PREVENT = 0x04, 101 DA_Q_4K = 0x08, 102 DA_Q_NO_RC16 = 0x10 103 } da_quirks; 104 105 #define DA_Q_BIT_STRING \ 106 "\020" \ 107 "\001NO_SYNC_CACHE" \ 108 "\002NO_6_BYTE" \ 109 "\003NO_PREVENT" \ 110 "\0044K" \ 111 "\005NO_RC16" 112 113 typedef enum { 114 DA_CCB_PROBE_RC = 0x01, 115 DA_CCB_PROBE_RC16 = 0x02, 116 DA_CCB_PROBE_LBP = 0x03, 117 DA_CCB_PROBE_BLK_LIMITS = 0x04, 118 DA_CCB_PROBE_BDC = 0x05, 119 DA_CCB_PROBE_ATA = 0x06, 120 DA_CCB_BUFFER_IO = 0x07, 121 DA_CCB_WAITING = 0x08, 122 DA_CCB_DUMP = 0x0A, 123 DA_CCB_DELETE = 0x0B, 124 DA_CCB_TUR = 0x0C, 125 DA_CCB_TYPE_MASK = 0x0F, 126 DA_CCB_RETRY_UA = 0x10 127 } da_ccb_state; 128 129 /* 130 * Order here is important for method choice 131 * 132 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to 133 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes 134 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql 135 * import taking 5mins. 136 * 137 */ 138 typedef enum { 139 DA_DELETE_NONE, 140 DA_DELETE_DISABLE, 141 DA_DELETE_ATA_TRIM, 142 DA_DELETE_UNMAP, 143 DA_DELETE_WS16, 144 DA_DELETE_WS10, 145 DA_DELETE_ZERO, 146 DA_DELETE_MIN = DA_DELETE_ATA_TRIM, 147 DA_DELETE_MAX = DA_DELETE_ZERO 148 } da_delete_methods; 149 150 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, 151 struct bio *bp); 152 static da_delete_func_t da_delete_trim; 153 static da_delete_func_t da_delete_unmap; 154 static da_delete_func_t da_delete_ws; 155 156 static const void * da_delete_functions[] = { 157 NULL, 158 NULL, 159 da_delete_trim, 160 da_delete_unmap, 161 da_delete_ws, 162 da_delete_ws, 163 da_delete_ws 164 }; 165 166 static const char *da_delete_method_names[] = 167 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; 168 static const char *da_delete_method_desc[] = 169 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", 170 "WRITE SAME(10) with UNMAP", "ZERO" }; 171 172 /* Offsets into our private area for storing information */ 173 #define ccb_state ppriv_field0 174 #define ccb_bp ppriv_ptr1 175 176 struct disk_params { 177 u_int8_t heads; 178 u_int32_t cylinders; 179 u_int8_t secs_per_track; 180 u_int32_t secsize; /* Number of bytes/sector */ 181 u_int64_t sectors; /* total number sectors */ 182 u_int stripesize; 183 u_int stripeoffset; 184 }; 185 186 #define UNMAP_RANGE_MAX 0xffffffff 187 #define UNMAP_HEAD_SIZE 8 188 #define UNMAP_RANGE_SIZE 16 189 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ 190 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ 191 UNMAP_HEAD_SIZE) 192 193 #define WS10_MAX_BLKS 0xffff 194 #define WS16_MAX_BLKS 0xffffffff 195 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ 196 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) 197 198 struct da_softc { 199 struct bio_queue_head bio_queue; 200 struct bio_queue_head delete_queue; 201 struct bio_queue_head delete_run_queue; 202 SLIST_ENTRY(da_softc) links; 203 LIST_HEAD(, ccb_hdr) pending_ccbs; 204 da_state state; 205 da_flags flags; 206 da_quirks quirks; 207 int sort_io_queue; 208 int minimum_cmd_size; 209 int error_inject; 210 int ordered_tag_count; 211 int outstanding_cmds; 212 int trim_max_ranges; 213 int delete_running; 214 int tur; 215 int delete_available; /* Delete methods possibly available */ 216 uint32_t unmap_max_ranges; 217 uint32_t unmap_max_lba; 218 uint64_t ws_max_blks; 219 da_delete_methods delete_method; 220 da_delete_func_t *delete_func; 221 struct disk_params params; 222 struct disk *disk; 223 union ccb saved_ccb; 224 struct task sysctl_task; 225 struct sysctl_ctx_list sysctl_ctx; 226 struct sysctl_oid *sysctl_tree; 227 struct callout sendordered_c; 228 uint64_t wwpn; 229 uint8_t unmap_buf[UNMAP_BUF_SIZE]; 230 struct scsi_read_capacity_data_long rcaplong; 231 struct callout mediapoll_c; 232 }; 233 234 #define dadeleteflag(softc, delete_method, enable) \ 235 if (enable) { \ 236 softc->delete_available |= (1 << delete_method); \ 237 } else { \ 238 softc->delete_available &= ~(1 << delete_method); \ 239 } 240 241 struct da_quirk_entry { 242 struct scsi_inquiry_pattern inq_pat; 243 da_quirks quirks; 244 }; 245 246 static const char quantum[] = "QUANTUM"; 247 static const char microp[] = "MICROP"; 248 249 static struct da_quirk_entry da_quirk_table[] = 250 { 251 /* SPI, FC devices */ 252 { 253 /* 254 * Fujitsu M2513A MO drives. 255 * Tested devices: M2513A2 firmware versions 1200 & 1300. 256 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 257 * Reported by: W.Scholten <whs@xs4all.nl> 258 */ 259 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 260 /*quirks*/ DA_Q_NO_SYNC_CACHE 261 }, 262 { 263 /* See above. */ 264 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 265 /*quirks*/ DA_Q_NO_SYNC_CACHE 266 }, 267 { 268 /* 269 * This particular Fujitsu drive doesn't like the 270 * synchronize cache command. 271 * Reported by: Tom Jackson <toj@gorilla.net> 272 */ 273 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 274 /*quirks*/ DA_Q_NO_SYNC_CACHE 275 }, 276 { 277 /* 278 * This drive doesn't like the synchronize cache command 279 * either. Reported by: Matthew Jacob <mjacob@feral.com> 280 * in NetBSD PR kern/6027, August 24, 1998. 281 */ 282 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 283 /*quirks*/ DA_Q_NO_SYNC_CACHE 284 }, 285 { 286 /* 287 * This drive doesn't like the synchronize cache command 288 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 289 * (PR 8882). 290 */ 291 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 292 /*quirks*/ DA_Q_NO_SYNC_CACHE 293 }, 294 { 295 /* 296 * Doesn't like the synchronize cache command. 297 * Reported by: Blaz Zupan <blaz@gold.amis.net> 298 */ 299 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 300 /*quirks*/ DA_Q_NO_SYNC_CACHE 301 }, 302 { 303 /* 304 * Doesn't like the synchronize cache command. 305 * Reported by: Blaz Zupan <blaz@gold.amis.net> 306 */ 307 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 308 /*quirks*/ DA_Q_NO_SYNC_CACHE 309 }, 310 { 311 /* 312 * Doesn't like the synchronize cache command. 313 */ 314 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 315 /*quirks*/ DA_Q_NO_SYNC_CACHE 316 }, 317 { 318 /* 319 * Doesn't like the synchronize cache command. 320 * Reported by: walter@pelissero.de 321 */ 322 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, 323 /*quirks*/ DA_Q_NO_SYNC_CACHE 324 }, 325 { 326 /* 327 * Doesn't work correctly with 6 byte reads/writes. 328 * Returns illegal request, and points to byte 9 of the 329 * 6-byte CDB. 330 * Reported by: Adam McDougall <bsdx@spawnet.com> 331 */ 332 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 333 /*quirks*/ DA_Q_NO_6_BYTE 334 }, 335 { 336 /* See above. */ 337 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 338 /*quirks*/ DA_Q_NO_6_BYTE 339 }, 340 { 341 /* 342 * Doesn't like the synchronize cache command. 343 * Reported by: walter@pelissero.de 344 */ 345 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, 346 /*quirks*/ DA_Q_NO_SYNC_CACHE 347 }, 348 { 349 /* 350 * The CISS RAID controllers do not support SYNC_CACHE 351 */ 352 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, 353 /*quirks*/ DA_Q_NO_SYNC_CACHE 354 }, 355 /* USB mass storage devices supported by umass(4) */ 356 { 357 /* 358 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player 359 * PR: kern/51675 360 */ 361 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, 362 /*quirks*/ DA_Q_NO_SYNC_CACHE 363 }, 364 { 365 /* 366 * Power Quotient Int. (PQI) USB flash key 367 * PR: kern/53067 368 */ 369 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", 370 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 371 }, 372 { 373 /* 374 * Creative Nomad MUVO mp3 player (USB) 375 * PR: kern/53094 376 */ 377 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, 378 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 379 }, 380 { 381 /* 382 * Jungsoft NEXDISK USB flash key 383 * PR: kern/54737 384 */ 385 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, 386 /*quirks*/ DA_Q_NO_SYNC_CACHE 387 }, 388 { 389 /* 390 * FreeDik USB Mini Data Drive 391 * PR: kern/54786 392 */ 393 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", 394 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 395 }, 396 { 397 /* 398 * Sigmatel USB Flash MP3 Player 399 * PR: kern/57046 400 */ 401 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, 402 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 403 }, 404 { 405 /* 406 * Neuros USB Digital Audio Computer 407 * PR: kern/63645 408 */ 409 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", 410 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 411 }, 412 { 413 /* 414 * SEAGRAND NP-900 MP3 Player 415 * PR: kern/64563 416 */ 417 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, 418 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 419 }, 420 { 421 /* 422 * iRiver iFP MP3 player (with UMS Firmware) 423 * PR: kern/54881, i386/63941, kern/66124 424 */ 425 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, 426 /*quirks*/ DA_Q_NO_SYNC_CACHE 427 }, 428 { 429 /* 430 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 431 * PR: kern/70158 432 */ 433 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, 434 /*quirks*/ DA_Q_NO_SYNC_CACHE 435 }, 436 { 437 /* 438 * ZICPlay USB MP3 Player with FM 439 * PR: kern/75057 440 */ 441 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, 442 /*quirks*/ DA_Q_NO_SYNC_CACHE 443 }, 444 { 445 /* 446 * TEAC USB floppy mechanisms 447 */ 448 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, 449 /*quirks*/ DA_Q_NO_SYNC_CACHE 450 }, 451 { 452 /* 453 * Kingston DataTraveler II+ USB Pen-Drive. 454 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org> 455 */ 456 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", 457 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 458 }, 459 { 460 /* 461 * USB DISK Pro PMAP 462 * Reported by: jhs 463 * PR: usb/96381 464 */ 465 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, 466 /*quirks*/ DA_Q_NO_SYNC_CACHE 467 }, 468 { 469 /* 470 * Motorola E398 Mobile Phone (TransFlash memory card). 471 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl> 472 * PR: usb/89889 473 */ 474 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", 475 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 476 }, 477 { 478 /* 479 * Qware BeatZkey! Pro 480 * PR: usb/79164 481 */ 482 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", 483 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 484 }, 485 { 486 /* 487 * Time DPA20B 1GB MP3 Player 488 * PR: usb/81846 489 */ 490 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", 491 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 492 }, 493 { 494 /* 495 * Samsung USB key 128Mb 496 * PR: usb/90081 497 */ 498 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", 499 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 500 }, 501 { 502 /* 503 * Kingston DataTraveler 2.0 USB Flash memory. 504 * PR: usb/89196 505 */ 506 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", 507 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 508 }, 509 { 510 /* 511 * Creative MUVO Slim mp3 player (USB) 512 * PR: usb/86131 513 */ 514 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", 515 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 516 }, 517 { 518 /* 519 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) 520 * PR: usb/80487 521 */ 522 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", 523 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 524 }, 525 { 526 /* 527 * SanDisk Micro Cruzer 128MB 528 * PR: usb/75970 529 */ 530 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", 531 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 532 }, 533 { 534 /* 535 * TOSHIBA TransMemory USB sticks 536 * PR: kern/94660 537 */ 538 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", 539 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 540 }, 541 { 542 /* 543 * PNY USB Flash keys 544 * PR: usb/75578, usb/72344, usb/65436 545 */ 546 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", 547 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 548 }, 549 { 550 /* 551 * Genesys 6-in-1 Card Reader 552 * PR: usb/94647 553 */ 554 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 555 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 556 }, 557 { 558 /* 559 * Rekam Digital CAMERA 560 * PR: usb/98713 561 */ 562 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", 563 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 564 }, 565 { 566 /* 567 * iRiver H10 MP3 player 568 * PR: usb/102547 569 */ 570 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", 571 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 572 }, 573 { 574 /* 575 * iRiver U10 MP3 player 576 * PR: usb/92306 577 */ 578 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", 579 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 580 }, 581 { 582 /* 583 * X-Micro Flash Disk 584 * PR: usb/96901 585 */ 586 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", 587 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 588 }, 589 { 590 /* 591 * EasyMP3 EM732X USB 2.0 Flash MP3 Player 592 * PR: usb/96546 593 */ 594 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", 595 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 596 }, 597 { 598 /* 599 * Denver MP3 player 600 * PR: usb/107101 601 */ 602 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", 603 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 604 }, 605 { 606 /* 607 * Philips USB Key Audio KEY013 608 * PR: usb/68412 609 */ 610 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, 611 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 612 }, 613 { 614 /* 615 * JNC MP3 Player 616 * PR: usb/94439 617 */ 618 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", 619 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 620 }, 621 { 622 /* 623 * SAMSUNG MP0402H 624 * PR: usb/108427 625 */ 626 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, 627 /*quirks*/ DA_Q_NO_SYNC_CACHE 628 }, 629 { 630 /* 631 * I/O Magic USB flash - Giga Bank 632 * PR: usb/108810 633 */ 634 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, 635 /*quirks*/ DA_Q_NO_SYNC_CACHE 636 }, 637 { 638 /* 639 * JoyFly 128mb USB Flash Drive 640 * PR: 96133 641 */ 642 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", 643 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 644 }, 645 { 646 /* 647 * ChipsBnk usb stick 648 * PR: 103702 649 */ 650 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", 651 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 652 }, 653 { 654 /* 655 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A 656 * PR: 129858 657 */ 658 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", 659 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 660 }, 661 { 662 /* 663 * Samsung YP-U3 mp3-player 664 * PR: 125398 665 */ 666 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", 667 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 668 }, 669 { 670 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", 671 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 672 }, 673 { 674 /* 675 * Sony Cyber-Shot DSC cameras 676 * PR: usb/137035 677 */ 678 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 679 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 680 }, 681 { 682 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", 683 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT 684 }, 685 { 686 /* At least several Transcent USB sticks lie on RC16. */ 687 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", 688 "*"}, /*quirks*/ DA_Q_NO_RC16 689 }, 690 /* ATA/SATA devices over SAS/USB/... */ 691 { 692 /* Hitachi Advanced Format (4k) drives */ 693 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, 694 /*quirks*/DA_Q_4K 695 }, 696 { 697 /* Samsung Advanced Format (4k) drives */ 698 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, 699 /*quirks*/DA_Q_4K 700 }, 701 { 702 /* Samsung Advanced Format (4k) drives */ 703 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, 704 /*quirks*/DA_Q_4K 705 }, 706 { 707 /* Samsung Advanced Format (4k) drives */ 708 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, 709 /*quirks*/DA_Q_4K 710 }, 711 { 712 /* Samsung Advanced Format (4k) drives */ 713 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, 714 /*quirks*/DA_Q_4K 715 }, 716 { 717 /* Seagate Barracuda Green Advanced Format (4k) drives */ 718 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, 719 /*quirks*/DA_Q_4K 720 }, 721 { 722 /* Seagate Barracuda Green Advanced Format (4k) drives */ 723 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, 724 /*quirks*/DA_Q_4K 725 }, 726 { 727 /* Seagate Barracuda Green Advanced Format (4k) drives */ 728 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, 729 /*quirks*/DA_Q_4K 730 }, 731 { 732 /* Seagate Barracuda Green Advanced Format (4k) drives */ 733 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, 734 /*quirks*/DA_Q_4K 735 }, 736 { 737 /* Seagate Barracuda Green Advanced Format (4k) drives */ 738 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, 739 /*quirks*/DA_Q_4K 740 }, 741 { 742 /* Seagate Barracuda Green Advanced Format (4k) drives */ 743 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, 744 /*quirks*/DA_Q_4K 745 }, 746 { 747 /* Seagate Momentus Advanced Format (4k) drives */ 748 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, 749 /*quirks*/DA_Q_4K 750 }, 751 { 752 /* Seagate Momentus Advanced Format (4k) drives */ 753 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, 754 /*quirks*/DA_Q_4K 755 }, 756 { 757 /* Seagate Momentus Advanced Format (4k) drives */ 758 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, 759 /*quirks*/DA_Q_4K 760 }, 761 { 762 /* Seagate Momentus Advanced Format (4k) drives */ 763 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, 764 /*quirks*/DA_Q_4K 765 }, 766 { 767 /* Seagate Momentus Advanced Format (4k) drives */ 768 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, 769 /*quirks*/DA_Q_4K 770 }, 771 { 772 /* Seagate Momentus Advanced Format (4k) drives */ 773 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, 774 /*quirks*/DA_Q_4K 775 }, 776 { 777 /* Seagate Momentus Advanced Format (4k) drives */ 778 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, 779 /*quirks*/DA_Q_4K 780 }, 781 { 782 /* Seagate Momentus Advanced Format (4k) drives */ 783 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, 784 /*quirks*/DA_Q_4K 785 }, 786 { 787 /* Seagate Momentus Advanced Format (4k) drives */ 788 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, 789 /*quirks*/DA_Q_4K 790 }, 791 { 792 /* Seagate Momentus Advanced Format (4k) drives */ 793 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, 794 /*quirks*/DA_Q_4K 795 }, 796 { 797 /* Seagate Momentus Advanced Format (4k) drives */ 798 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, 799 /*quirks*/DA_Q_4K 800 }, 801 { 802 /* Seagate Momentus Advanced Format (4k) drives */ 803 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, 804 /*quirks*/DA_Q_4K 805 }, 806 { 807 /* Seagate Momentus Advanced Format (4k) drives */ 808 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, 809 /*quirks*/DA_Q_4K 810 }, 811 { 812 /* Seagate Momentus Advanced Format (4k) drives */ 813 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, 814 /*quirks*/DA_Q_4K 815 }, 816 { 817 /* Seagate Momentus Thin Advanced Format (4k) drives */ 818 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, 819 /*quirks*/DA_Q_4K 820 }, 821 { 822 /* Seagate Momentus Thin Advanced Format (4k) drives */ 823 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, 824 /*quirks*/DA_Q_4K 825 }, 826 { 827 /* WDC Caviar Green Advanced Format (4k) drives */ 828 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, 829 /*quirks*/DA_Q_4K 830 }, 831 { 832 /* WDC Caviar Green Advanced Format (4k) drives */ 833 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, 834 /*quirks*/DA_Q_4K 835 }, 836 { 837 /* WDC Caviar Green Advanced Format (4k) drives */ 838 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, 839 /*quirks*/DA_Q_4K 840 }, 841 { 842 /* WDC Caviar Green Advanced Format (4k) drives */ 843 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, 844 /*quirks*/DA_Q_4K 845 }, 846 { 847 /* WDC Caviar Green Advanced Format (4k) drives */ 848 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, 849 /*quirks*/DA_Q_4K 850 }, 851 { 852 /* WDC Caviar Green Advanced Format (4k) drives */ 853 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, 854 /*quirks*/DA_Q_4K 855 }, 856 { 857 /* WDC Caviar Green Advanced Format (4k) drives */ 858 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, 859 /*quirks*/DA_Q_4K 860 }, 861 { 862 /* WDC Caviar Green Advanced Format (4k) drives */ 863 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, 864 /*quirks*/DA_Q_4K 865 }, 866 { 867 /* WDC Scorpio Black Advanced Format (4k) drives */ 868 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, 869 /*quirks*/DA_Q_4K 870 }, 871 { 872 /* WDC Scorpio Black Advanced Format (4k) drives */ 873 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, 874 /*quirks*/DA_Q_4K 875 }, 876 { 877 /* WDC Scorpio Black Advanced Format (4k) drives */ 878 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, 879 /*quirks*/DA_Q_4K 880 }, 881 { 882 /* WDC Scorpio Black Advanced Format (4k) drives */ 883 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, 884 /*quirks*/DA_Q_4K 885 }, 886 { 887 /* WDC Scorpio Blue Advanced Format (4k) drives */ 888 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, 889 /*quirks*/DA_Q_4K 890 }, 891 { 892 /* WDC Scorpio Blue Advanced Format (4k) drives */ 893 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, 894 /*quirks*/DA_Q_4K 895 }, 896 { 897 /* WDC Scorpio Blue Advanced Format (4k) drives */ 898 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, 899 /*quirks*/DA_Q_4K 900 }, 901 { 902 /* WDC Scorpio Blue Advanced Format (4k) drives */ 903 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, 904 /*quirks*/DA_Q_4K 905 }, 906 { 907 /* 908 * Olympus FE-210 camera 909 */ 910 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", 911 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 912 }, 913 { 914 /* 915 * LG UP3S MP3 player 916 */ 917 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", 918 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 919 }, 920 { 921 /* 922 * Laser MP3-2GA13 MP3 player 923 */ 924 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", 925 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 926 }, 927 { 928 /* 929 * LaCie external 250GB Hard drive des by Porsche 930 * Submitted by: Ben Stuyts <ben@altesco.nl> 931 * PR: 121474 932 */ 933 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, 934 /*quirks*/ DA_Q_NO_SYNC_CACHE 935 }, 936 /* SATA SSDs */ 937 { 938 /* 939 * Corsair Force 2 SSDs 940 * 4k optimised & trim only works in 4k requests + 4k aligned 941 */ 942 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, 943 /*quirks*/DA_Q_4K 944 }, 945 { 946 /* 947 * Corsair Force 3 SSDs 948 * 4k optimised & trim only works in 4k requests + 4k aligned 949 */ 950 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, 951 /*quirks*/DA_Q_4K 952 }, 953 { 954 /* 955 * Corsair Force GT SSDs 956 * 4k optimised & trim only works in 4k requests + 4k aligned 957 */ 958 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force GT*", "*" }, 959 /*quirks*/DA_Q_4K 960 }, 961 { 962 /* 963 * Crucial M4 SSDs 964 * 4k optimised & trim only works in 4k requests + 4k aligned 965 */ 966 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, 967 /*quirks*/DA_Q_4K 968 }, 969 { 970 /* 971 * Crucial RealSSD C300 SSDs 972 * 4k optimised 973 */ 974 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", 975 "*" }, /*quirks*/DA_Q_4K 976 }, 977 { 978 /* 979 * Intel 320 Series SSDs 980 * 4k optimised & trim only works in 4k requests + 4k aligned 981 */ 982 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, 983 /*quirks*/DA_Q_4K 984 }, 985 { 986 /* 987 * Intel 330 Series SSDs 988 * 4k optimised & trim only works in 4k requests + 4k aligned 989 */ 990 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, 991 /*quirks*/DA_Q_4K 992 }, 993 { 994 /* 995 * Intel 510 Series SSDs 996 * 4k optimised & trim only works in 4k requests + 4k aligned 997 */ 998 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, 999 /*quirks*/DA_Q_4K 1000 }, 1001 { 1002 /* 1003 * Intel 520 Series SSDs 1004 * 4k optimised & trim only works in 4k requests + 4k aligned 1005 */ 1006 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, 1007 /*quirks*/DA_Q_4K 1008 }, 1009 { 1010 /* 1011 * Intel X25-M Series SSDs 1012 * 4k optimised & trim only works in 4k requests + 4k aligned 1013 */ 1014 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, 1015 /*quirks*/DA_Q_4K 1016 }, 1017 { 1018 /* 1019 * Kingston E100 Series SSDs 1020 * 4k optimised & trim only works in 4k requests + 4k aligned 1021 */ 1022 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, 1023 /*quirks*/DA_Q_4K 1024 }, 1025 { 1026 /* 1027 * Kingston HyperX 3k SSDs 1028 * 4k optimised & trim only works in 4k requests + 4k aligned 1029 */ 1030 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, 1031 /*quirks*/DA_Q_4K 1032 }, 1033 { 1034 /* 1035 * Marvell SSDs (entry taken from OpenSolaris) 1036 * 4k optimised & trim only works in 4k requests + 4k aligned 1037 */ 1038 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, 1039 /*quirks*/DA_Q_4K 1040 }, 1041 { 1042 /* 1043 * OCZ Agility 2 SSDs 1044 * 4k optimised & trim only works in 4k requests + 4k aligned 1045 */ 1046 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, 1047 /*quirks*/DA_Q_4K 1048 }, 1049 { 1050 /* 1051 * OCZ Agility 3 SSDs 1052 * 4k optimised & trim only works in 4k requests + 4k aligned 1053 */ 1054 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, 1055 /*quirks*/DA_Q_4K 1056 }, 1057 { 1058 /* 1059 * OCZ Deneva R Series SSDs 1060 * 4k optimised & trim only works in 4k requests + 4k aligned 1061 */ 1062 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, 1063 /*quirks*/DA_Q_4K 1064 }, 1065 { 1066 /* 1067 * OCZ Vertex 2 SSDs (inc pro series) 1068 * 4k optimised & trim only works in 4k requests + 4k aligned 1069 */ 1070 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, 1071 /*quirks*/DA_Q_4K 1072 }, 1073 { 1074 /* 1075 * OCZ Vertex 3 SSDs 1076 * 4k optimised & trim only works in 4k requests + 4k aligned 1077 */ 1078 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, 1079 /*quirks*/DA_Q_4K 1080 }, 1081 { 1082 /* 1083 * OCZ Vertex 4 SSDs 1084 * 4k optimised & trim only works in 4k requests + 4k aligned 1085 */ 1086 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, 1087 /*quirks*/DA_Q_4K 1088 }, 1089 { 1090 /* 1091 * Samsung 830 Series SSDs 1092 * 4k optimised & trim only works in 4k requests + 4k aligned 1093 */ 1094 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, 1095 /*quirks*/DA_Q_4K 1096 }, 1097 { 1098 /* 1099 * SuperTalent TeraDrive CT SSDs 1100 * 4k optimised & trim only works in 4k requests + 4k aligned 1101 */ 1102 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, 1103 /*quirks*/DA_Q_4K 1104 }, 1105 { 1106 /* 1107 * XceedIOPS SATA SSDs 1108 * 4k optimised 1109 */ 1110 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, 1111 /*quirks*/DA_Q_4K 1112 }, 1113 }; 1114 1115 static disk_strategy_t dastrategy; 1116 static dumper_t dadump; 1117 static periph_init_t dainit; 1118 static void daasync(void *callback_arg, u_int32_t code, 1119 struct cam_path *path, void *arg); 1120 static void dasysctlinit(void *context, int pending); 1121 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); 1122 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); 1123 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); 1124 static void dadeletemethodset(struct da_softc *softc, 1125 da_delete_methods delete_method); 1126 static off_t dadeletemaxsize(struct da_softc *softc, 1127 da_delete_methods delete_method); 1128 static void dadeletemethodchoose(struct da_softc *softc, 1129 da_delete_methods default_method); 1130 static void daprobedone(struct cam_periph *periph, union ccb *ccb); 1131 1132 static periph_ctor_t daregister; 1133 static periph_dtor_t dacleanup; 1134 static periph_start_t dastart; 1135 static periph_oninv_t daoninvalidate; 1136 static void dadone(struct cam_periph *periph, 1137 union ccb *done_ccb); 1138 static int daerror(union ccb *ccb, u_int32_t cam_flags, 1139 u_int32_t sense_flags); 1140 static void daprevent(struct cam_periph *periph, int action); 1141 static void dareprobe(struct cam_periph *periph); 1142 static void dasetgeom(struct cam_periph *periph, uint32_t block_len, 1143 uint64_t maxsector, 1144 struct scsi_read_capacity_data_long *rcaplong, 1145 size_t rcap_size); 1146 static timeout_t dasendorderedtag; 1147 static void dashutdown(void *arg, int howto); 1148 static timeout_t damediapoll; 1149 1150 #ifndef DA_DEFAULT_POLL_PERIOD 1151 #define DA_DEFAULT_POLL_PERIOD 3 1152 #endif 1153 1154 #ifndef DA_DEFAULT_TIMEOUT 1155 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 1156 #endif 1157 1158 #ifndef DA_DEFAULT_RETRY 1159 #define DA_DEFAULT_RETRY 4 1160 #endif 1161 1162 #ifndef DA_DEFAULT_SEND_ORDERED 1163 #define DA_DEFAULT_SEND_ORDERED 1 1164 #endif 1165 1166 #define DA_SIO (softc->sort_io_queue >= 0 ? \ 1167 softc->sort_io_queue : cam_sort_io_queues) 1168 1169 static int da_poll_period = DA_DEFAULT_POLL_PERIOD; 1170 static int da_retry_count = DA_DEFAULT_RETRY; 1171 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 1172 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; 1173 1174 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 1175 "CAM Direct Access Disk driver"); 1176 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RW, 1177 &da_poll_period, 0, "Media polling period in seconds"); 1178 TUNABLE_INT("kern.cam.da.poll_period", &da_poll_period); 1179 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW, 1180 &da_retry_count, 0, "Normal I/O retry count"); 1181 TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count); 1182 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW, 1183 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 1184 TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout); 1185 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RW, 1186 &da_send_ordered, 0, "Send Ordered Tags"); 1187 TUNABLE_INT("kern.cam.da.send_ordered", &da_send_ordered); 1188 1189 /* 1190 * DA_ORDEREDTAG_INTERVAL determines how often, relative 1191 * to the default timeout, we check to see whether an ordered 1192 * tagged transaction is appropriate to prevent simple tag 1193 * starvation. Since we'd like to ensure that there is at least 1194 * 1/2 of the timeout length left for a starved transaction to 1195 * complete after we've sent an ordered tag, we must poll at least 1196 * four times in every timeout period. This takes care of the worst 1197 * case where a starved transaction starts during an interval that 1198 * meets the requirement "don't send an ordered tag" test so it takes 1199 * us two intervals to determine that a tag must be sent. 1200 */ 1201 #ifndef DA_ORDEREDTAG_INTERVAL 1202 #define DA_ORDEREDTAG_INTERVAL 4 1203 #endif 1204 1205 static struct periph_driver dadriver = 1206 { 1207 dainit, "da", 1208 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 1209 }; 1210 1211 PERIPHDRIVER_DECLARE(da, dadriver); 1212 1213 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); 1214 1215 static int 1216 daopen(struct disk *dp) 1217 { 1218 struct cam_periph *periph; 1219 struct da_softc *softc; 1220 int error; 1221 1222 periph = (struct cam_periph *)dp->d_drv1; 1223 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 1224 return (ENXIO); 1225 } 1226 1227 cam_periph_lock(periph); 1228 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 1229 cam_periph_unlock(periph); 1230 cam_periph_release(periph); 1231 return (error); 1232 } 1233 1234 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1235 ("daopen\n")); 1236 1237 softc = (struct da_softc *)periph->softc; 1238 dareprobe(periph); 1239 1240 /* Wait for the disk size update. */ 1241 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, 1242 "dareprobe", 0); 1243 if (error != 0) 1244 xpt_print(periph->path, "unable to retrieve capacity data"); 1245 1246 if (periph->flags & CAM_PERIPH_INVALID) 1247 error = ENXIO; 1248 1249 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1250 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1251 daprevent(periph, PR_PREVENT); 1252 1253 if (error == 0) { 1254 softc->flags &= ~DA_FLAG_PACK_INVALID; 1255 softc->flags |= DA_FLAG_OPEN; 1256 } 1257 1258 cam_periph_unhold(periph); 1259 cam_periph_unlock(periph); 1260 1261 if (error != 0) 1262 cam_periph_release(periph); 1263 1264 return (error); 1265 } 1266 1267 static int 1268 daclose(struct disk *dp) 1269 { 1270 struct cam_periph *periph; 1271 struct da_softc *softc; 1272 int error; 1273 1274 periph = (struct cam_periph *)dp->d_drv1; 1275 cam_periph_lock(periph); 1276 if (cam_periph_hold(periph, PRIBIO) != 0) { 1277 cam_periph_unlock(periph); 1278 cam_periph_release(periph); 1279 return (0); 1280 } 1281 1282 softc = (struct da_softc *)periph->softc; 1283 1284 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1285 ("daclose\n")); 1286 1287 if ((softc->flags & DA_FLAG_DIRTY) != 0 && 1288 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && 1289 (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 1290 union ccb *ccb; 1291 1292 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1293 1294 scsi_synchronize_cache(&ccb->csio, 1295 /*retries*/1, 1296 /*cbfcnp*/dadone, 1297 MSG_SIMPLE_Q_TAG, 1298 /*begin_lba*/0,/* Cover the whole disk */ 1299 /*lb_count*/0, 1300 SSD_FULL_SIZE, 1301 5 * 60 * 1000); 1302 1303 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 1304 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, 1305 softc->disk->d_devstat); 1306 if (error == 0) 1307 softc->flags &= ~DA_FLAG_DIRTY; 1308 xpt_release_ccb(ccb); 1309 1310 } 1311 1312 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 1313 if ((softc->quirks & DA_Q_NO_PREVENT) == 0) 1314 daprevent(periph, PR_ALLOW); 1315 /* 1316 * If we've got removeable media, mark the blocksize as 1317 * unavailable, since it could change when new media is 1318 * inserted. 1319 */ 1320 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; 1321 } 1322 1323 softc->flags &= ~DA_FLAG_OPEN; 1324 cam_periph_unhold(periph); 1325 cam_periph_unlock(periph); 1326 cam_periph_release(periph); 1327 return (0); 1328 } 1329 1330 static void 1331 daschedule(struct cam_periph *periph) 1332 { 1333 struct da_softc *softc = (struct da_softc *)periph->softc; 1334 uint32_t prio; 1335 1336 if (softc->state != DA_STATE_NORMAL) 1337 return; 1338 1339 /* Check if cam_periph_getccb() was called. */ 1340 prio = periph->immediate_priority; 1341 1342 /* Check if we have more work to do. */ 1343 if (bioq_first(&softc->bio_queue) || 1344 (!softc->delete_running && bioq_first(&softc->delete_queue)) || 1345 softc->tur) { 1346 prio = CAM_PRIORITY_NORMAL; 1347 } 1348 1349 /* Schedule CCB if any of above is true. */ 1350 if (prio != CAM_PRIORITY_NONE) 1351 xpt_schedule(periph, prio); 1352 } 1353 1354 /* 1355 * Actually translate the requested transfer into one the physical driver 1356 * can understand. The transfer is described by a buf and will include 1357 * only one physical transfer. 1358 */ 1359 static void 1360 dastrategy(struct bio *bp) 1361 { 1362 struct cam_periph *periph; 1363 struct da_softc *softc; 1364 1365 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1366 softc = (struct da_softc *)periph->softc; 1367 1368 cam_periph_lock(periph); 1369 1370 /* 1371 * If the device has been made invalid, error out 1372 */ 1373 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 1374 cam_periph_unlock(periph); 1375 biofinish(bp, NULL, ENXIO); 1376 return; 1377 } 1378 1379 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); 1380 1381 /* 1382 * Place it in the queue of disk activities for this disk 1383 */ 1384 if (bp->bio_cmd == BIO_DELETE) { 1385 if (bp->bio_bcount == 0) 1386 biodone(bp); 1387 else if (DA_SIO) 1388 bioq_disksort(&softc->delete_queue, bp); 1389 else 1390 bioq_insert_tail(&softc->delete_queue, bp); 1391 } else if (DA_SIO) { 1392 bioq_disksort(&softc->bio_queue, bp); 1393 } else { 1394 bioq_insert_tail(&softc->bio_queue, bp); 1395 } 1396 1397 /* 1398 * Schedule ourselves for performing the work. 1399 */ 1400 daschedule(periph); 1401 cam_periph_unlock(periph); 1402 1403 return; 1404 } 1405 1406 static int 1407 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 1408 { 1409 struct cam_periph *periph; 1410 struct da_softc *softc; 1411 u_int secsize; 1412 struct ccb_scsiio csio; 1413 struct disk *dp; 1414 int error = 0; 1415 1416 dp = arg; 1417 periph = dp->d_drv1; 1418 softc = (struct da_softc *)periph->softc; 1419 cam_periph_lock(periph); 1420 secsize = softc->params.secsize; 1421 1422 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 1423 cam_periph_unlock(periph); 1424 return (ENXIO); 1425 } 1426 1427 if (length > 0) { 1428 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1429 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1430 scsi_read_write(&csio, 1431 /*retries*/0, 1432 dadone, 1433 MSG_ORDERED_Q_TAG, 1434 /*read*/SCSI_RW_WRITE, 1435 /*byte2*/0, 1436 /*minimum_cmd_size*/ softc->minimum_cmd_size, 1437 offset / secsize, 1438 length / secsize, 1439 /*data_ptr*/(u_int8_t *) virtual, 1440 /*dxfer_len*/length, 1441 /*sense_len*/SSD_FULL_SIZE, 1442 da_default_timeout * 1000); 1443 xpt_polled_action((union ccb *)&csio); 1444 1445 error = cam_periph_error((union ccb *)&csio, 1446 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1447 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) 1448 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, 1449 /*reduction*/0, /*timeout*/0, /*getcount_only*/0); 1450 if (error != 0) 1451 printf("Aborting dump due to I/O error.\n"); 1452 cam_periph_unlock(periph); 1453 return (error); 1454 } 1455 1456 /* 1457 * Sync the disk cache contents to the physical media. 1458 */ 1459 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 1460 1461 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1462 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1463 scsi_synchronize_cache(&csio, 1464 /*retries*/0, 1465 /*cbfcnp*/dadone, 1466 MSG_SIMPLE_Q_TAG, 1467 /*begin_lba*/0,/* Cover the whole disk */ 1468 /*lb_count*/0, 1469 SSD_FULL_SIZE, 1470 5 * 60 * 1000); 1471 xpt_polled_action((union ccb *)&csio); 1472 1473 error = cam_periph_error((union ccb *)&csio, 1474 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL); 1475 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) 1476 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, 1477 /*reduction*/0, /*timeout*/0, /*getcount_only*/0); 1478 if (error != 0) 1479 xpt_print(periph->path, "Synchronize cache failed\n"); 1480 } 1481 cam_periph_unlock(periph); 1482 return (error); 1483 } 1484 1485 static int 1486 dagetattr(struct bio *bp) 1487 { 1488 int ret; 1489 struct cam_periph *periph; 1490 1491 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1492 cam_periph_lock(periph); 1493 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 1494 periph->path); 1495 cam_periph_unlock(periph); 1496 if (ret == 0) 1497 bp->bio_completed = bp->bio_length; 1498 return ret; 1499 } 1500 1501 static void 1502 dainit(void) 1503 { 1504 cam_status status; 1505 1506 /* 1507 * Install a global async callback. This callback will 1508 * receive async callbacks like "new device found". 1509 */ 1510 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); 1511 1512 if (status != CAM_REQ_CMP) { 1513 printf("da: Failed to attach master async callback " 1514 "due to status 0x%x!\n", status); 1515 } else if (da_send_ordered) { 1516 1517 /* Register our shutdown event handler */ 1518 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 1519 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 1520 printf("dainit: shutdown event registration failed!\n"); 1521 } 1522 } 1523 1524 /* 1525 * Callback from GEOM, called when it has finished cleaning up its 1526 * resources. 1527 */ 1528 static void 1529 dadiskgonecb(struct disk *dp) 1530 { 1531 struct cam_periph *periph; 1532 1533 periph = (struct cam_periph *)dp->d_drv1; 1534 cam_periph_release(periph); 1535 } 1536 1537 static void 1538 daoninvalidate(struct cam_periph *periph) 1539 { 1540 struct da_softc *softc; 1541 1542 softc = (struct da_softc *)periph->softc; 1543 1544 /* 1545 * De-register any async callbacks. 1546 */ 1547 xpt_register_async(0, daasync, periph, periph->path); 1548 1549 softc->flags |= DA_FLAG_PACK_INVALID; 1550 1551 /* 1552 * Return all queued I/O with ENXIO. 1553 * XXX Handle any transactions queued to the card 1554 * with XPT_ABORT_CCB. 1555 */ 1556 bioq_flush(&softc->bio_queue, NULL, ENXIO); 1557 bioq_flush(&softc->delete_queue, NULL, ENXIO); 1558 1559 /* 1560 * Tell GEOM that we've gone away, we'll get a callback when it is 1561 * done cleaning up its resources. 1562 */ 1563 disk_gone(softc->disk); 1564 1565 xpt_print(periph->path, "lost device - %d outstanding, %d refs\n", 1566 softc->outstanding_cmds, periph->refcount); 1567 } 1568 1569 static void 1570 dacleanup(struct cam_periph *periph) 1571 { 1572 struct da_softc *softc; 1573 1574 softc = (struct da_softc *)periph->softc; 1575 1576 xpt_print(periph->path, "removing device entry\n"); 1577 cam_periph_unlock(periph); 1578 1579 /* 1580 * If we can't free the sysctl tree, oh well... 1581 */ 1582 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0 1583 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { 1584 xpt_print(periph->path, "can't remove sysctl context\n"); 1585 } 1586 1587 callout_drain(&softc->mediapoll_c); 1588 disk_destroy(softc->disk); 1589 callout_drain(&softc->sendordered_c); 1590 free(softc, M_DEVBUF); 1591 cam_periph_lock(periph); 1592 } 1593 1594 static void 1595 daasync(void *callback_arg, u_int32_t code, 1596 struct cam_path *path, void *arg) 1597 { 1598 struct cam_periph *periph; 1599 struct da_softc *softc; 1600 1601 periph = (struct cam_periph *)callback_arg; 1602 switch (code) { 1603 case AC_FOUND_DEVICE: 1604 { 1605 struct ccb_getdev *cgd; 1606 cam_status status; 1607 1608 cgd = (struct ccb_getdev *)arg; 1609 if (cgd == NULL) 1610 break; 1611 1612 if (cgd->protocol != PROTO_SCSI) 1613 break; 1614 1615 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 1616 && SID_TYPE(&cgd->inq_data) != T_RBC 1617 && SID_TYPE(&cgd->inq_data) != T_OPTICAL) 1618 break; 1619 1620 /* 1621 * Allocate a peripheral instance for 1622 * this device and start the probe 1623 * process. 1624 */ 1625 status = cam_periph_alloc(daregister, daoninvalidate, 1626 dacleanup, dastart, 1627 "da", CAM_PERIPH_BIO, 1628 cgd->ccb_h.path, daasync, 1629 AC_FOUND_DEVICE, cgd); 1630 1631 if (status != CAM_REQ_CMP 1632 && status != CAM_REQ_INPROG) 1633 printf("daasync: Unable to attach to new device " 1634 "due to status 0x%x\n", status); 1635 return; 1636 } 1637 case AC_ADVINFO_CHANGED: 1638 { 1639 uintptr_t buftype; 1640 1641 buftype = (uintptr_t)arg; 1642 if (buftype == CDAI_TYPE_PHYS_PATH) { 1643 struct da_softc *softc; 1644 1645 softc = periph->softc; 1646 disk_attr_changed(softc->disk, "GEOM::physpath", 1647 M_NOWAIT); 1648 } 1649 break; 1650 } 1651 case AC_UNIT_ATTENTION: 1652 { 1653 union ccb *ccb; 1654 int error_code, sense_key, asc, ascq; 1655 1656 softc = (struct da_softc *)periph->softc; 1657 ccb = (union ccb *)arg; 1658 1659 /* 1660 * Handle all UNIT ATTENTIONs except our own, 1661 * as they will be handled by daerror(). 1662 */ 1663 if (xpt_path_periph(ccb->ccb_h.path) != periph && 1664 scsi_extract_sense_ccb(ccb, 1665 &error_code, &sense_key, &asc, &ascq)) { 1666 if (asc == 0x2A && ascq == 0x09) { 1667 xpt_print(ccb->ccb_h.path, 1668 "capacity data has changed\n"); 1669 dareprobe(periph); 1670 } else if (asc == 0x28 && ascq == 0x00) 1671 disk_media_changed(softc->disk, M_NOWAIT); 1672 } 1673 cam_periph_async(periph, code, path, arg); 1674 break; 1675 } 1676 case AC_SCSI_AEN: 1677 softc = (struct da_softc *)periph->softc; 1678 if (!softc->tur) { 1679 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 1680 softc->tur = 1; 1681 daschedule(periph); 1682 } 1683 } 1684 /* FALLTHROUGH */ 1685 case AC_SENT_BDR: 1686 case AC_BUS_RESET: 1687 { 1688 struct ccb_hdr *ccbh; 1689 1690 softc = (struct da_softc *)periph->softc; 1691 /* 1692 * Don't fail on the expected unit attention 1693 * that will occur. 1694 */ 1695 softc->flags |= DA_FLAG_RETRY_UA; 1696 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 1697 ccbh->ccb_state |= DA_CCB_RETRY_UA; 1698 break; 1699 } 1700 default: 1701 break; 1702 } 1703 cam_periph_async(periph, code, path, arg); 1704 } 1705 1706 static void 1707 dasysctlinit(void *context, int pending) 1708 { 1709 struct cam_periph *periph; 1710 struct da_softc *softc; 1711 char tmpstr[80], tmpstr2[80]; 1712 struct ccb_trans_settings cts; 1713 1714 periph = (struct cam_periph *)context; 1715 /* 1716 * periph was held for us when this task was enqueued 1717 */ 1718 if (periph->flags & CAM_PERIPH_INVALID) { 1719 cam_periph_release(periph); 1720 return; 1721 } 1722 1723 softc = (struct da_softc *)periph->softc; 1724 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); 1725 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 1726 1727 sysctl_ctx_init(&softc->sysctl_ctx); 1728 softc->flags |= DA_FLAG_SCTX_INIT; 1729 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1730 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, 1731 CTLFLAG_RD, 0, tmpstr); 1732 if (softc->sysctl_tree == NULL) { 1733 printf("dasysctlinit: unable to allocate sysctl tree\n"); 1734 cam_periph_release(periph); 1735 return; 1736 } 1737 1738 /* 1739 * Now register the sysctl handler, so the user can change the value on 1740 * the fly. 1741 */ 1742 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1743 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW, 1744 softc, 0, dadeletemethodsysctl, "A", 1745 "BIO_DELETE execution method"); 1746 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1747 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, 1748 softc, 0, dadeletemaxsysctl, "Q", 1749 "Maximum BIO_DELETE size"); 1750 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1751 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, 1752 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", 1753 "Minimum CDB size"); 1754 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1755 OID_AUTO, "sort_io_queue", CTLFLAG_RW, &softc->sort_io_queue, 0, 1756 "Sort IO queue to try and optimise disk access patterns"); 1757 1758 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1759 SYSCTL_CHILDREN(softc->sysctl_tree), 1760 OID_AUTO, 1761 "error_inject", 1762 CTLFLAG_RW, 1763 &softc->error_inject, 1764 0, 1765 "error_inject leaf"); 1766 1767 1768 /* 1769 * Add some addressing info. 1770 */ 1771 memset(&cts, 0, sizeof (cts)); 1772 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 1773 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1774 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1775 cam_periph_lock(periph); 1776 xpt_action((union ccb *)&cts); 1777 cam_periph_unlock(periph); 1778 if (cts.ccb_h.status != CAM_REQ_CMP) { 1779 cam_periph_release(periph); 1780 return; 1781 } 1782 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { 1783 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 1784 if (fc->valid & CTS_FC_VALID_WWPN) { 1785 softc->wwpn = fc->wwpn; 1786 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 1787 SYSCTL_CHILDREN(softc->sysctl_tree), 1788 OID_AUTO, "wwpn", CTLFLAG_RD, 1789 &softc->wwpn, "World Wide Port Name"); 1790 } 1791 } 1792 cam_periph_release(periph); 1793 } 1794 1795 static int 1796 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) 1797 { 1798 int error; 1799 uint64_t value; 1800 struct da_softc *softc; 1801 1802 softc = (struct da_softc *)arg1; 1803 1804 value = softc->disk->d_delmaxsize; 1805 error = sysctl_handle_64(oidp, &value, 0, req); 1806 if ((error != 0) || (req->newptr == NULL)) 1807 return (error); 1808 1809 /* only accept values smaller than the calculated value */ 1810 if (value > dadeletemaxsize(softc, softc->delete_method)) { 1811 return (EINVAL); 1812 } 1813 softc->disk->d_delmaxsize = value; 1814 1815 return (0); 1816 } 1817 1818 static int 1819 dacmdsizesysctl(SYSCTL_HANDLER_ARGS) 1820 { 1821 int error, value; 1822 1823 value = *(int *)arg1; 1824 1825 error = sysctl_handle_int(oidp, &value, 0, req); 1826 1827 if ((error != 0) 1828 || (req->newptr == NULL)) 1829 return (error); 1830 1831 /* 1832 * Acceptable values here are 6, 10, 12 or 16. 1833 */ 1834 if (value < 6) 1835 value = 6; 1836 else if ((value > 6) 1837 && (value <= 10)) 1838 value = 10; 1839 else if ((value > 10) 1840 && (value <= 12)) 1841 value = 12; 1842 else if (value > 12) 1843 value = 16; 1844 1845 *(int *)arg1 = value; 1846 1847 return (0); 1848 } 1849 1850 static void 1851 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) 1852 { 1853 1854 1855 softc->delete_method = delete_method; 1856 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); 1857 softc->delete_func = da_delete_functions[delete_method]; 1858 1859 if (softc->delete_method > DA_DELETE_DISABLE) 1860 softc->disk->d_flags |= DISKFLAG_CANDELETE; 1861 else 1862 softc->disk->d_flags &= ~DISKFLAG_CANDELETE; 1863 } 1864 1865 static off_t 1866 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) 1867 { 1868 off_t sectors; 1869 1870 switch(delete_method) { 1871 case DA_DELETE_UNMAP: 1872 sectors = (off_t)softc->unmap_max_lba * softc->unmap_max_ranges; 1873 break; 1874 case DA_DELETE_ATA_TRIM: 1875 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; 1876 break; 1877 case DA_DELETE_WS16: 1878 sectors = (off_t)min(softc->ws_max_blks, WS16_MAX_BLKS); 1879 break; 1880 case DA_DELETE_ZERO: 1881 case DA_DELETE_WS10: 1882 sectors = (off_t)min(softc->ws_max_blks, WS10_MAX_BLKS); 1883 break; 1884 default: 1885 return 0; 1886 } 1887 1888 return (off_t)softc->params.secsize * 1889 min(sectors, (off_t)softc->params.sectors); 1890 } 1891 1892 static void 1893 daprobedone(struct cam_periph *periph, union ccb *ccb) 1894 { 1895 struct da_softc *softc; 1896 1897 softc = (struct da_softc *)periph->softc; 1898 1899 dadeletemethodchoose(softc, DA_DELETE_NONE); 1900 1901 if (bootverbose && (softc->flags & DA_FLAG_PROBED) == 0) { 1902 char buf[80]; 1903 int i, sep; 1904 1905 snprintf(buf, sizeof(buf), "Delete methods: <"); 1906 sep = 0; 1907 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 1908 if (softc->delete_available & (1 << i)) { 1909 if (sep) { 1910 strlcat(buf, ",", sizeof(buf)); 1911 } else { 1912 sep = 1; 1913 } 1914 strlcat(buf, da_delete_method_names[i], 1915 sizeof(buf)); 1916 if (i == softc->delete_method) { 1917 strlcat(buf, "(*)", sizeof(buf)); 1918 } 1919 } 1920 } 1921 if (sep == 0) { 1922 if (softc->delete_method == DA_DELETE_NONE) 1923 strlcat(buf, "NONE(*)", sizeof(buf)); 1924 else 1925 strlcat(buf, "DISABLED(*)", sizeof(buf)); 1926 } 1927 strlcat(buf, ">", sizeof(buf)); 1928 printf("%s%d: %s\n", periph->periph_name, 1929 periph->unit_number, buf); 1930 } 1931 1932 /* 1933 * Since our peripheral may be invalidated by an error 1934 * above or an external event, we must release our CCB 1935 * before releasing the probe lock on the peripheral. 1936 * The peripheral will only go away once the last lock 1937 * is removed, and we need it around for the CCB release 1938 * operation. 1939 */ 1940 xpt_release_ccb(ccb); 1941 softc->state = DA_STATE_NORMAL; 1942 daschedule(periph); 1943 wakeup(&softc->disk->d_mediasize); 1944 if ((softc->flags & DA_FLAG_PROBED) == 0) { 1945 softc->flags |= DA_FLAG_PROBED; 1946 cam_periph_unhold(periph); 1947 } else 1948 cam_periph_release_locked(periph); 1949 } 1950 1951 static void 1952 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) 1953 { 1954 int i, delete_method; 1955 1956 delete_method = default_method; 1957 1958 /* 1959 * Use the pre-defined order to choose the best 1960 * performing delete. 1961 */ 1962 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 1963 if (softc->delete_available & (1 << i)) { 1964 dadeletemethodset(softc, i); 1965 return; 1966 } 1967 } 1968 dadeletemethodset(softc, delete_method); 1969 } 1970 1971 static int 1972 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) 1973 { 1974 char buf[16]; 1975 const char *p; 1976 struct da_softc *softc; 1977 int i, error, value; 1978 1979 softc = (struct da_softc *)arg1; 1980 1981 value = softc->delete_method; 1982 if (value < 0 || value > DA_DELETE_MAX) 1983 p = "UNKNOWN"; 1984 else 1985 p = da_delete_method_names[value]; 1986 strncpy(buf, p, sizeof(buf)); 1987 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 1988 if (error != 0 || req->newptr == NULL) 1989 return (error); 1990 for (i = 0; i <= DA_DELETE_MAX; i++) { 1991 if (!(softc->delete_available & (1 << i)) || 1992 strcmp(buf, da_delete_method_names[i]) != 0) 1993 continue; 1994 dadeletemethodset(softc, i); 1995 return (0); 1996 } 1997 return (EINVAL); 1998 } 1999 2000 static cam_status 2001 daregister(struct cam_periph *periph, void *arg) 2002 { 2003 struct da_softc *softc; 2004 struct ccb_pathinq cpi; 2005 struct ccb_getdev *cgd; 2006 char tmpstr[80]; 2007 caddr_t match; 2008 2009 cgd = (struct ccb_getdev *)arg; 2010 if (cgd == NULL) { 2011 printf("daregister: no getdev CCB, can't register device\n"); 2012 return(CAM_REQ_CMP_ERR); 2013 } 2014 2015 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, 2016 M_NOWAIT|M_ZERO); 2017 2018 if (softc == NULL) { 2019 printf("daregister: Unable to probe new device. " 2020 "Unable to allocate softc\n"); 2021 return(CAM_REQ_CMP_ERR); 2022 } 2023 2024 LIST_INIT(&softc->pending_ccbs); 2025 softc->state = DA_STATE_PROBE_RC; 2026 bioq_init(&softc->bio_queue); 2027 bioq_init(&softc->delete_queue); 2028 bioq_init(&softc->delete_run_queue); 2029 if (SID_IS_REMOVABLE(&cgd->inq_data)) 2030 softc->flags |= DA_FLAG_PACK_REMOVABLE; 2031 softc->unmap_max_ranges = UNMAP_MAX_RANGES; 2032 softc->unmap_max_lba = UNMAP_RANGE_MAX; 2033 softc->ws_max_blks = WS16_MAX_BLKS; 2034 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; 2035 softc->sort_io_queue = -1; 2036 2037 periph->softc = softc; 2038 2039 /* 2040 * See if this device has any quirks. 2041 */ 2042 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 2043 (caddr_t)da_quirk_table, 2044 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 2045 sizeof(*da_quirk_table), scsi_inquiry_match); 2046 2047 if (match != NULL) 2048 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 2049 else 2050 softc->quirks = DA_Q_NONE; 2051 2052 /* Check if the SIM does not want 6 byte commands */ 2053 bzero(&cpi, sizeof(cpi)); 2054 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 2055 cpi.ccb_h.func_code = XPT_PATH_INQ; 2056 xpt_action((union ccb *)&cpi); 2057 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) 2058 softc->quirks |= DA_Q_NO_6_BYTE; 2059 2060 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); 2061 2062 /* 2063 * Take an exclusive refcount on the periph while dastart is called 2064 * to finish the probe. The reference will be dropped in dadone at 2065 * the end of probe. 2066 */ 2067 (void)cam_periph_hold(periph, PRIBIO); 2068 2069 /* 2070 * Schedule a periodic event to occasionally send an 2071 * ordered tag to a device. 2072 */ 2073 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0); 2074 callout_reset(&softc->sendordered_c, 2075 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 2076 dasendorderedtag, softc); 2077 2078 cam_periph_unlock(periph); 2079 /* 2080 * RBC devices don't have to support READ(6), only READ(10). 2081 */ 2082 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) 2083 softc->minimum_cmd_size = 10; 2084 else 2085 softc->minimum_cmd_size = 6; 2086 2087 /* 2088 * Load the user's default, if any. 2089 */ 2090 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", 2091 periph->unit_number); 2092 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); 2093 2094 /* 2095 * 6, 10, 12 and 16 are the currently permissible values. 2096 */ 2097 if (softc->minimum_cmd_size < 6) 2098 softc->minimum_cmd_size = 6; 2099 else if ((softc->minimum_cmd_size > 6) 2100 && (softc->minimum_cmd_size <= 10)) 2101 softc->minimum_cmd_size = 10; 2102 else if ((softc->minimum_cmd_size > 10) 2103 && (softc->minimum_cmd_size <= 12)) 2104 softc->minimum_cmd_size = 12; 2105 else if (softc->minimum_cmd_size > 12) 2106 softc->minimum_cmd_size = 16; 2107 2108 /* Predict whether device may support READ CAPACITY(16). */ 2109 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && 2110 (softc->quirks & DA_Q_NO_RC16) == 0) { 2111 softc->flags |= DA_FLAG_CAN_RC16; 2112 softc->state = DA_STATE_PROBE_RC16; 2113 } 2114 2115 /* 2116 * Register this media as a disk. 2117 */ 2118 softc->disk = disk_alloc(); 2119 softc->disk->d_devstat = devstat_new_entry(periph->periph_name, 2120 periph->unit_number, 0, 2121 DEVSTAT_BS_UNAVAILABLE, 2122 SID_TYPE(&cgd->inq_data) | 2123 XPORT_DEVSTAT_TYPE(cpi.transport), 2124 DEVSTAT_PRIORITY_DISK); 2125 softc->disk->d_open = daopen; 2126 softc->disk->d_close = daclose; 2127 softc->disk->d_strategy = dastrategy; 2128 softc->disk->d_dump = dadump; 2129 softc->disk->d_getattr = dagetattr; 2130 softc->disk->d_gone = dadiskgonecb; 2131 softc->disk->d_name = "da"; 2132 softc->disk->d_drv1 = periph; 2133 if (cpi.maxio == 0) 2134 softc->disk->d_maxsize = DFLTPHYS; /* traditional default */ 2135 else if (cpi.maxio > MAXPHYS) 2136 softc->disk->d_maxsize = MAXPHYS; /* for safety */ 2137 else 2138 softc->disk->d_maxsize = cpi.maxio; 2139 softc->disk->d_unit = periph->unit_number; 2140 softc->disk->d_flags = 0; 2141 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) 2142 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 2143 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) 2144 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 2145 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, 2146 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); 2147 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); 2148 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], 2149 cgd->inq_data.product, sizeof(cgd->inq_data.product), 2150 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); 2151 softc->disk->d_hba_vendor = cpi.hba_vendor; 2152 softc->disk->d_hba_device = cpi.hba_device; 2153 softc->disk->d_hba_subvendor = cpi.hba_subvendor; 2154 softc->disk->d_hba_subdevice = cpi.hba_subdevice; 2155 2156 /* 2157 * Acquire a reference to the periph before we register with GEOM. 2158 * We'll release this reference once GEOM calls us back (via 2159 * dadiskgonecb()) telling us that our provider has been freed. 2160 */ 2161 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 2162 xpt_print(periph->path, "%s: lost periph during " 2163 "registration!\n", __func__); 2164 cam_periph_lock(periph); 2165 return (CAM_REQ_CMP_ERR); 2166 } 2167 2168 disk_create(softc->disk, DISK_VERSION); 2169 cam_periph_lock(periph); 2170 2171 /* 2172 * Add async callbacks for events of interest. 2173 * I don't bother checking if this fails as, 2174 * in most cases, the system will function just 2175 * fine without them and the only alternative 2176 * would be to not attach the device on failure. 2177 */ 2178 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | 2179 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION, 2180 daasync, periph, periph->path); 2181 2182 /* 2183 * Emit an attribute changed notification just in case 2184 * physical path information arrived before our async 2185 * event handler was registered, but after anyone attaching 2186 * to our disk device polled it. 2187 */ 2188 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); 2189 2190 /* 2191 * Schedule a periodic media polling events. 2192 */ 2193 callout_init_mtx(&softc->mediapoll_c, periph->sim->mtx, 0); 2194 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && 2195 (cgd->inq_flags & SID_AEN) == 0 && 2196 da_poll_period != 0) 2197 callout_reset(&softc->mediapoll_c, da_poll_period * hz, 2198 damediapoll, periph); 2199 2200 xpt_schedule(periph, CAM_PRIORITY_DEV); 2201 2202 return(CAM_REQ_CMP); 2203 } 2204 2205 static void 2206 dastart(struct cam_periph *periph, union ccb *start_ccb) 2207 { 2208 struct da_softc *softc; 2209 2210 softc = (struct da_softc *)periph->softc; 2211 2212 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); 2213 2214 skipstate: 2215 switch (softc->state) { 2216 case DA_STATE_NORMAL: 2217 { 2218 struct bio *bp; 2219 uint8_t tag_code; 2220 2221 /* Execute immediate CCB if waiting. */ 2222 if (periph->immediate_priority <= periph->pinfo.priority) { 2223 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 2224 ("queuing for immediate ccb\n")); 2225 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 2226 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 2227 periph_links.sle); 2228 periph->immediate_priority = CAM_PRIORITY_NONE; 2229 wakeup(&periph->ccb_list); 2230 /* May have more work to do, so ensure we stay scheduled */ 2231 daschedule(periph); 2232 break; 2233 } 2234 2235 /* Run BIO_DELETE if not running yet. */ 2236 if (!softc->delete_running && 2237 (bp = bioq_first(&softc->delete_queue)) != NULL) { 2238 if (softc->delete_func != NULL) { 2239 softc->delete_func(periph, start_ccb, bp); 2240 goto out; 2241 } else { 2242 bioq_flush(&softc->delete_queue, NULL, 0); 2243 /* FALLTHROUGH */ 2244 } 2245 } 2246 2247 /* Run regular command. */ 2248 bp = bioq_takefirst(&softc->bio_queue); 2249 if (bp == NULL) { 2250 if (softc->tur) { 2251 softc->tur = 0; 2252 scsi_test_unit_ready(&start_ccb->csio, 2253 /*retries*/ da_retry_count, 2254 dadone, 2255 MSG_SIMPLE_Q_TAG, 2256 SSD_FULL_SIZE, 2257 da_default_timeout * 1000); 2258 start_ccb->ccb_h.ccb_bp = NULL; 2259 start_ccb->ccb_h.ccb_state = DA_CCB_TUR; 2260 xpt_action(start_ccb); 2261 } else 2262 xpt_release_ccb(start_ccb); 2263 break; 2264 } 2265 if (softc->tur) { 2266 softc->tur = 0; 2267 cam_periph_release_locked(periph); 2268 } 2269 2270 if ((bp->bio_flags & BIO_ORDERED) != 0 || 2271 (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 2272 softc->flags &= ~DA_FLAG_NEED_OTAG; 2273 softc->ordered_tag_count++; 2274 tag_code = MSG_ORDERED_Q_TAG; 2275 } else { 2276 tag_code = MSG_SIMPLE_Q_TAG; 2277 } 2278 2279 switch (bp->bio_cmd) { 2280 case BIO_WRITE: 2281 softc->flags |= DA_FLAG_DIRTY; 2282 /* FALLTHROUGH */ 2283 case BIO_READ: 2284 scsi_read_write(&start_ccb->csio, 2285 /*retries*/da_retry_count, 2286 /*cbfcnp*/dadone, 2287 /*tag_action*/tag_code, 2288 /*read_op*/(bp->bio_cmd == BIO_READ ? 2289 SCSI_RW_READ : SCSI_RW_WRITE) | 2290 ((bp->bio_flags & BIO_UNMAPPED) != 0 ? 2291 SCSI_RW_BIO : 0), 2292 /*byte2*/0, 2293 softc->minimum_cmd_size, 2294 /*lba*/bp->bio_pblkno, 2295 /*block_count*/bp->bio_bcount / 2296 softc->params.secsize, 2297 /*data_ptr*/ (bp->bio_flags & 2298 BIO_UNMAPPED) != 0 ? (void *)bp : 2299 bp->bio_data, 2300 /*dxfer_len*/ bp->bio_bcount, 2301 /*sense_len*/SSD_FULL_SIZE, 2302 da_default_timeout * 1000); 2303 break; 2304 case BIO_FLUSH: 2305 /* 2306 * BIO_FLUSH doesn't currently communicate 2307 * range data, so we synchronize the cache 2308 * over the whole disk. We also force 2309 * ordered tag semantics the flush applies 2310 * to all previously queued I/O. 2311 */ 2312 scsi_synchronize_cache(&start_ccb->csio, 2313 /*retries*/1, 2314 /*cbfcnp*/dadone, 2315 MSG_ORDERED_Q_TAG, 2316 /*begin_lba*/0, 2317 /*lb_count*/0, 2318 SSD_FULL_SIZE, 2319 da_default_timeout*1000); 2320 break; 2321 } 2322 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 2323 2324 out: 2325 /* 2326 * Block out any asynchronous callbacks 2327 * while we touch the pending ccb list. 2328 */ 2329 LIST_INSERT_HEAD(&softc->pending_ccbs, 2330 &start_ccb->ccb_h, periph_links.le); 2331 softc->outstanding_cmds++; 2332 2333 /* We expect a unit attention from this device */ 2334 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 2335 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 2336 softc->flags &= ~DA_FLAG_RETRY_UA; 2337 } 2338 2339 start_ccb->ccb_h.ccb_bp = bp; 2340 xpt_action(start_ccb); 2341 2342 /* May have more work to do, so ensure we stay scheduled */ 2343 daschedule(periph); 2344 break; 2345 } 2346 case DA_STATE_PROBE_RC: 2347 { 2348 struct scsi_read_capacity_data *rcap; 2349 2350 rcap = (struct scsi_read_capacity_data *) 2351 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); 2352 if (rcap == NULL) { 2353 printf("dastart: Couldn't malloc read_capacity data\n"); 2354 /* da_free_periph??? */ 2355 break; 2356 } 2357 scsi_read_capacity(&start_ccb->csio, 2358 /*retries*/da_retry_count, 2359 dadone, 2360 MSG_SIMPLE_Q_TAG, 2361 rcap, 2362 SSD_FULL_SIZE, 2363 /*timeout*/5000); 2364 start_ccb->ccb_h.ccb_bp = NULL; 2365 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; 2366 xpt_action(start_ccb); 2367 break; 2368 } 2369 case DA_STATE_PROBE_RC16: 2370 { 2371 struct scsi_read_capacity_data_long *rcaplong; 2372 2373 rcaplong = (struct scsi_read_capacity_data_long *) 2374 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); 2375 if (rcaplong == NULL) { 2376 printf("dastart: Couldn't malloc read_capacity data\n"); 2377 /* da_free_periph??? */ 2378 break; 2379 } 2380 scsi_read_capacity_16(&start_ccb->csio, 2381 /*retries*/ da_retry_count, 2382 /*cbfcnp*/ dadone, 2383 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2384 /*lba*/ 0, 2385 /*reladr*/ 0, 2386 /*pmi*/ 0, 2387 /*rcap_buf*/ (uint8_t *)rcaplong, 2388 /*rcap_buf_len*/ sizeof(*rcaplong), 2389 /*sense_len*/ SSD_FULL_SIZE, 2390 /*timeout*/ da_default_timeout * 1000); 2391 start_ccb->ccb_h.ccb_bp = NULL; 2392 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; 2393 xpt_action(start_ccb); 2394 break; 2395 } 2396 case DA_STATE_PROBE_LBP: 2397 { 2398 struct scsi_vpd_logical_block_prov *lbp; 2399 2400 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { 2401 /* 2402 * If we get here we don't support any SBC-3 delete 2403 * methods with UNMAP as the Logical Block Provisioning 2404 * VPD page support is required for devices which 2405 * support it according to T10/1799-D Revision 31 2406 * however older revisions of the spec don't mandate 2407 * this so we currently don't remove these methods 2408 * from the available set. 2409 */ 2410 softc->state = DA_STATE_PROBE_BLK_LIMITS; 2411 goto skipstate; 2412 } 2413 2414 lbp = (struct scsi_vpd_logical_block_prov *) 2415 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); 2416 2417 if (lbp == NULL) { 2418 printf("dastart: Couldn't malloc lbp data\n"); 2419 /* da_free_periph??? */ 2420 break; 2421 } 2422 2423 scsi_inquiry(&start_ccb->csio, 2424 /*retries*/da_retry_count, 2425 /*cbfcnp*/dadone, 2426 /*tag_action*/MSG_SIMPLE_Q_TAG, 2427 /*inq_buf*/(u_int8_t *)lbp, 2428 /*inq_len*/sizeof(*lbp), 2429 /*evpd*/TRUE, 2430 /*page_code*/SVPD_LBP, 2431 /*sense_len*/SSD_MIN_SIZE, 2432 /*timeout*/da_default_timeout * 1000); 2433 start_ccb->ccb_h.ccb_bp = NULL; 2434 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; 2435 xpt_action(start_ccb); 2436 break; 2437 } 2438 case DA_STATE_PROBE_BLK_LIMITS: 2439 { 2440 struct scsi_vpd_block_limits *block_limits; 2441 2442 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { 2443 /* Not supported skip to next probe */ 2444 softc->state = DA_STATE_PROBE_BDC; 2445 goto skipstate; 2446 } 2447 2448 block_limits = (struct scsi_vpd_block_limits *) 2449 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); 2450 2451 if (block_limits == NULL) { 2452 printf("dastart: Couldn't malloc block_limits data\n"); 2453 /* da_free_periph??? */ 2454 break; 2455 } 2456 2457 scsi_inquiry(&start_ccb->csio, 2458 /*retries*/da_retry_count, 2459 /*cbfcnp*/dadone, 2460 /*tag_action*/MSG_SIMPLE_Q_TAG, 2461 /*inq_buf*/(u_int8_t *)block_limits, 2462 /*inq_len*/sizeof(*block_limits), 2463 /*evpd*/TRUE, 2464 /*page_code*/SVPD_BLOCK_LIMITS, 2465 /*sense_len*/SSD_MIN_SIZE, 2466 /*timeout*/da_default_timeout * 1000); 2467 start_ccb->ccb_h.ccb_bp = NULL; 2468 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; 2469 xpt_action(start_ccb); 2470 break; 2471 } 2472 case DA_STATE_PROBE_BDC: 2473 { 2474 struct scsi_vpd_block_characteristics *bdc; 2475 2476 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { 2477 softc->state = DA_STATE_PROBE_ATA; 2478 goto skipstate; 2479 } 2480 2481 bdc = (struct scsi_vpd_block_characteristics *) 2482 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 2483 2484 if (bdc == NULL) { 2485 printf("dastart: Couldn't malloc bdc data\n"); 2486 /* da_free_periph??? */ 2487 break; 2488 } 2489 2490 scsi_inquiry(&start_ccb->csio, 2491 /*retries*/da_retry_count, 2492 /*cbfcnp*/dadone, 2493 /*tag_action*/MSG_SIMPLE_Q_TAG, 2494 /*inq_buf*/(u_int8_t *)bdc, 2495 /*inq_len*/sizeof(*bdc), 2496 /*evpd*/TRUE, 2497 /*page_code*/SVPD_BDC, 2498 /*sense_len*/SSD_MIN_SIZE, 2499 /*timeout*/da_default_timeout * 1000); 2500 start_ccb->ccb_h.ccb_bp = NULL; 2501 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; 2502 xpt_action(start_ccb); 2503 break; 2504 } 2505 case DA_STATE_PROBE_ATA: 2506 { 2507 struct ata_params *ata_params; 2508 2509 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 2510 daprobedone(periph, start_ccb); 2511 break; 2512 } 2513 2514 ata_params = (struct ata_params*) 2515 malloc(sizeof(*ata_params), M_SCSIDA, M_NOWAIT|M_ZERO); 2516 2517 if (ata_params == NULL) { 2518 printf("dastart: Couldn't malloc ata_params data\n"); 2519 /* da_free_periph??? */ 2520 break; 2521 } 2522 2523 scsi_ata_identify(&start_ccb->csio, 2524 /*retries*/da_retry_count, 2525 /*cbfcnp*/dadone, 2526 /*tag_action*/MSG_SIMPLE_Q_TAG, 2527 /*data_ptr*/(u_int8_t *)ata_params, 2528 /*dxfer_len*/sizeof(*ata_params), 2529 /*sense_len*/SSD_FULL_SIZE, 2530 /*timeout*/da_default_timeout * 1000); 2531 start_ccb->ccb_h.ccb_bp = NULL; 2532 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; 2533 xpt_action(start_ccb); 2534 break; 2535 } 2536 } 2537 } 2538 2539 /* 2540 * In each of the methods below, while its the caller's 2541 * responsibility to ensure the request will fit into a 2542 * single device request, we might have changed the delete 2543 * method due to the device incorrectly advertising either 2544 * its supported methods or limits. 2545 * 2546 * To prevent this causing further issues we validate the 2547 * against the methods limits, and warn which would 2548 * otherwise be unnecessary. 2549 */ 2550 static void 2551 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 2552 { 2553 struct da_softc *softc = (struct da_softc *)periph->softc;; 2554 struct bio *bp1; 2555 uint8_t *buf = softc->unmap_buf; 2556 uint64_t lba, lastlba = (uint64_t)-1; 2557 uint64_t totalcount = 0; 2558 uint64_t count; 2559 uint32_t lastcount = 0, c; 2560 uint32_t off, ranges = 0; 2561 2562 /* 2563 * Currently this doesn't take the UNMAP 2564 * Granularity and Granularity Alignment 2565 * fields into account. 2566 * 2567 * This could result in both unoptimal unmap 2568 * requests as as well as UNMAP calls unmapping 2569 * fewer LBA's than requested. 2570 */ 2571 2572 softc->delete_running = 1; 2573 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 2574 bp1 = bp; 2575 do { 2576 bioq_remove(&softc->delete_queue, bp1); 2577 if (bp1 != bp) 2578 bioq_insert_tail(&softc->delete_run_queue, bp1); 2579 lba = bp1->bio_pblkno; 2580 count = bp1->bio_bcount / softc->params.secsize; 2581 2582 /* Try to extend the previous range. */ 2583 if (lba == lastlba) { 2584 c = min(count, softc->unmap_max_lba - lastcount); 2585 lastcount += c; 2586 off = ((ranges - 1) * UNMAP_RANGE_SIZE) + 2587 UNMAP_HEAD_SIZE; 2588 scsi_ulto4b(lastcount, &buf[off + 8]); 2589 count -= c; 2590 lba +=c; 2591 totalcount += c; 2592 } 2593 2594 while (count > 0) { 2595 c = min(count, softc->unmap_max_lba); 2596 if (totalcount + c > softc->unmap_max_lba || 2597 ranges >= softc->unmap_max_ranges) { 2598 xpt_print(periph->path, 2599 "%s issuing short delete %ld > %ld" 2600 "|| %d >= %d", 2601 da_delete_method_desc[softc->delete_method], 2602 totalcount + c, softc->unmap_max_lba, 2603 ranges, softc->unmap_max_ranges); 2604 break; 2605 } 2606 off = (ranges * UNMAP_RANGE_SIZE) + UNMAP_HEAD_SIZE; 2607 scsi_u64to8b(lba, &buf[off + 0]); 2608 scsi_ulto4b(c, &buf[off + 8]); 2609 lba += c; 2610 totalcount += c; 2611 ranges++; 2612 count -= c; 2613 lastcount = c; 2614 } 2615 lastlba = lba; 2616 bp1 = bioq_first(&softc->delete_queue); 2617 if (bp1 == NULL || ranges >= softc->unmap_max_ranges || 2618 totalcount + bp1->bio_bcount / 2619 softc->params.secsize > softc->unmap_max_lba) 2620 break; 2621 } while (1); 2622 scsi_ulto2b(ranges * 16 + 6, &buf[0]); 2623 scsi_ulto2b(ranges * 16, &buf[2]); 2624 2625 scsi_unmap(&ccb->csio, 2626 /*retries*/da_retry_count, 2627 /*cbfcnp*/dadone, 2628 /*tag_action*/MSG_SIMPLE_Q_TAG, 2629 /*byte2*/0, 2630 /*data_ptr*/ buf, 2631 /*dxfer_len*/ ranges * 16 + 8, 2632 /*sense_len*/SSD_FULL_SIZE, 2633 da_default_timeout * 1000); 2634 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 2635 } 2636 2637 static void 2638 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 2639 { 2640 struct da_softc *softc = (struct da_softc *)periph->softc; 2641 struct bio *bp1; 2642 uint8_t *buf = softc->unmap_buf; 2643 uint64_t lastlba = (uint64_t)-1; 2644 uint64_t count; 2645 uint64_t lba; 2646 uint32_t lastcount = 0, c, requestcount; 2647 int ranges = 0, off, block_count; 2648 2649 softc->delete_running = 1; 2650 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 2651 bp1 = bp; 2652 do { 2653 bioq_remove(&softc->delete_queue, bp1); 2654 if (bp1 != bp) 2655 bioq_insert_tail(&softc->delete_run_queue, bp1); 2656 lba = bp1->bio_pblkno; 2657 count = bp1->bio_bcount / softc->params.secsize; 2658 requestcount = count; 2659 2660 /* Try to extend the previous range. */ 2661 if (lba == lastlba) { 2662 c = min(count, ATA_DSM_RANGE_MAX - lastcount); 2663 lastcount += c; 2664 off = (ranges - 1) * 8; 2665 buf[off + 6] = lastcount & 0xff; 2666 buf[off + 7] = (lastcount >> 8) & 0xff; 2667 count -= c; 2668 lba += c; 2669 } 2670 2671 while (count > 0) { 2672 c = min(count, ATA_DSM_RANGE_MAX); 2673 off = ranges * 8; 2674 2675 buf[off + 0] = lba & 0xff; 2676 buf[off + 1] = (lba >> 8) & 0xff; 2677 buf[off + 2] = (lba >> 16) & 0xff; 2678 buf[off + 3] = (lba >> 24) & 0xff; 2679 buf[off + 4] = (lba >> 32) & 0xff; 2680 buf[off + 5] = (lba >> 40) & 0xff; 2681 buf[off + 6] = c & 0xff; 2682 buf[off + 7] = (c >> 8) & 0xff; 2683 lba += c; 2684 ranges++; 2685 count -= c; 2686 lastcount = c; 2687 if (count != 0 && ranges == softc->trim_max_ranges) { 2688 xpt_print(periph->path, 2689 "%s issuing short delete %ld > %ld", 2690 da_delete_method_desc[softc->delete_method], 2691 requestcount, 2692 (softc->trim_max_ranges - ranges) * 2693 ATA_DSM_RANGE_MAX); 2694 break; 2695 } 2696 } 2697 lastlba = lba; 2698 bp1 = bioq_first(&softc->delete_queue); 2699 if (bp1 == NULL || bp1->bio_bcount / softc->params.secsize > 2700 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) 2701 break; 2702 } while (1); 2703 2704 block_count = (ranges + ATA_DSM_BLK_RANGES - 1) / ATA_DSM_BLK_RANGES; 2705 scsi_ata_trim(&ccb->csio, 2706 /*retries*/da_retry_count, 2707 /*cbfcnp*/dadone, 2708 /*tag_action*/MSG_SIMPLE_Q_TAG, 2709 block_count, 2710 /*data_ptr*/buf, 2711 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, 2712 /*sense_len*/SSD_FULL_SIZE, 2713 da_default_timeout * 1000); 2714 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 2715 } 2716 2717 /* 2718 * We calculate ws_max_blks here based off d_delmaxsize instead 2719 * of using softc->ws_max_blks as it is absolute max for the 2720 * device not the protocol max which may well be lower 2721 */ 2722 static void 2723 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 2724 { 2725 struct da_softc *softc; 2726 struct bio *bp1; 2727 uint64_t ws_max_blks; 2728 uint64_t lba; 2729 uint64_t count; /* forward compat with WS32 */ 2730 2731 softc = (struct da_softc *)periph->softc; 2732 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; 2733 softc->delete_running = 1; 2734 lba = bp->bio_pblkno; 2735 count = 0; 2736 bp1 = bp; 2737 do { 2738 bioq_remove(&softc->delete_queue, bp1); 2739 if (bp1 != bp) 2740 bioq_insert_tail(&softc->delete_run_queue, bp1); 2741 count += bp1->bio_bcount / softc->params.secsize; 2742 if (count > ws_max_blks) { 2743 count = min(count, ws_max_blks); 2744 xpt_print(periph->path, 2745 "%s issuing short delete %ld > %ld", 2746 da_delete_method_desc[softc->delete_method], 2747 count, ws_max_blks); 2748 break; 2749 } 2750 bp1 = bioq_first(&softc->delete_queue); 2751 if (bp1 == NULL || lba + count != bp1->bio_pblkno || 2752 count + bp1->bio_bcount / 2753 softc->params.secsize > ws_max_blks) 2754 break; 2755 } while (1); 2756 2757 scsi_write_same(&ccb->csio, 2758 /*retries*/da_retry_count, 2759 /*cbfcnp*/dadone, 2760 /*tag_action*/MSG_SIMPLE_Q_TAG, 2761 /*byte2*/softc->delete_method == 2762 DA_DELETE_ZERO ? 0 : SWS_UNMAP, 2763 softc->delete_method == DA_DELETE_WS16 ? 16 : 10, 2764 /*lba*/lba, 2765 /*block_count*/count, 2766 /*data_ptr*/ __DECONST(void *, zero_region), 2767 /*dxfer_len*/ softc->params.secsize, 2768 /*sense_len*/SSD_FULL_SIZE, 2769 da_default_timeout * 1000); 2770 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 2771 } 2772 2773 static int 2774 cmd6workaround(union ccb *ccb) 2775 { 2776 struct scsi_rw_6 cmd6; 2777 struct scsi_rw_10 *cmd10; 2778 struct da_softc *softc; 2779 u_int8_t *cdb; 2780 struct bio *bp; 2781 int frozen; 2782 2783 cdb = ccb->csio.cdb_io.cdb_bytes; 2784 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; 2785 2786 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { 2787 da_delete_methods old_method = softc->delete_method; 2788 2789 /* 2790 * Typically there are two reasons for failure here 2791 * 1. Delete method was detected as supported but isn't 2792 * 2. Delete failed due to invalid params e.g. too big 2793 * 2794 * While we will attempt to choose an alternative delete method 2795 * this may result in short deletes if the existing delete 2796 * requests from geom are big for the new method choosen. 2797 * 2798 * This method assumes that the error which triggered this 2799 * will not retry the io otherwise a panic will occur 2800 */ 2801 dadeleteflag(softc, old_method, 0); 2802 dadeletemethodchoose(softc, DA_DELETE_DISABLE); 2803 if (softc->delete_method == DA_DELETE_DISABLE) 2804 xpt_print(ccb->ccb_h.path, 2805 "%s failed, disabling BIO_DELETE\n", 2806 da_delete_method_desc[old_method]); 2807 else 2808 xpt_print(ccb->ccb_h.path, 2809 "%s failed, switching to %s BIO_DELETE\n", 2810 da_delete_method_desc[old_method], 2811 da_delete_method_desc[softc->delete_method]); 2812 2813 if (DA_SIO) { 2814 while ((bp = bioq_takefirst(&softc->delete_run_queue)) 2815 != NULL) 2816 bioq_disksort(&softc->delete_queue, bp); 2817 } else { 2818 while ((bp = bioq_takefirst(&softc->delete_run_queue)) 2819 != NULL) 2820 bioq_insert_tail(&softc->delete_queue, bp); 2821 } 2822 bioq_insert_tail(&softc->delete_queue, 2823 (struct bio *)ccb->ccb_h.ccb_bp); 2824 ccb->ccb_h.ccb_bp = NULL; 2825 return (0); 2826 } 2827 2828 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ 2829 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 2830 (*cdb == PREVENT_ALLOW) && 2831 (softc->quirks & DA_Q_NO_PREVENT) == 0) { 2832 if (bootverbose) 2833 xpt_print(ccb->ccb_h.path, 2834 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); 2835 softc->quirks |= DA_Q_NO_PREVENT; 2836 return (0); 2837 } 2838 2839 /* Detect unsupported SYNCHRONIZE CACHE(10). */ 2840 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 2841 (*cdb == SYNCHRONIZE_CACHE) && 2842 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 2843 if (bootverbose) 2844 xpt_print(ccb->ccb_h.path, 2845 "SYNCHRONIZE CACHE(10) not supported.\n"); 2846 softc->quirks |= DA_Q_NO_SYNC_CACHE; 2847 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; 2848 return (0); 2849 } 2850 2851 /* Translation only possible if CDB is an array and cmd is R/W6 */ 2852 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || 2853 (*cdb != READ_6 && *cdb != WRITE_6)) 2854 return 0; 2855 2856 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " 2857 "increasing minimum_cmd_size to 10.\n"); 2858 softc->minimum_cmd_size = 10; 2859 2860 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); 2861 cmd10 = (struct scsi_rw_10 *)cdb; 2862 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; 2863 cmd10->byte2 = 0; 2864 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); 2865 cmd10->reserved = 0; 2866 scsi_ulto2b(cmd6.length, cmd10->length); 2867 cmd10->control = cmd6.control; 2868 ccb->csio.cdb_len = sizeof(*cmd10); 2869 2870 /* Requeue request, unfreezing queue if necessary */ 2871 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 2872 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2873 xpt_action(ccb); 2874 if (frozen) { 2875 cam_release_devq(ccb->ccb_h.path, 2876 /*relsim_flags*/0, 2877 /*reduction*/0, 2878 /*timeout*/0, 2879 /*getcount_only*/0); 2880 } 2881 return (ERESTART); 2882 } 2883 2884 static void 2885 dadone(struct cam_periph *periph, union ccb *done_ccb) 2886 { 2887 struct da_softc *softc; 2888 struct ccb_scsiio *csio; 2889 u_int32_t priority; 2890 da_ccb_state state; 2891 2892 softc = (struct da_softc *)periph->softc; 2893 priority = done_ccb->ccb_h.pinfo.priority; 2894 2895 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); 2896 2897 csio = &done_ccb->csio; 2898 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 2899 switch (state) { 2900 case DA_CCB_BUFFER_IO: 2901 case DA_CCB_DELETE: 2902 { 2903 struct bio *bp, *bp1; 2904 2905 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 2906 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2907 int error; 2908 int sf; 2909 2910 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 2911 sf = SF_RETRY_UA; 2912 else 2913 sf = 0; 2914 2915 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 2916 if (error == ERESTART) { 2917 /* 2918 * A retry was scheduled, so 2919 * just return. 2920 */ 2921 return; 2922 } 2923 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 2924 if (error != 0) { 2925 int queued_error; 2926 2927 /* 2928 * return all queued I/O with EIO, so that 2929 * the client can retry these I/Os in the 2930 * proper order should it attempt to recover. 2931 */ 2932 queued_error = EIO; 2933 2934 if (error == ENXIO 2935 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { 2936 /* 2937 * Catastrophic error. Mark our pack as 2938 * invalid. 2939 */ 2940 /* 2941 * XXX See if this is really a media 2942 * XXX change first? 2943 */ 2944 xpt_print(periph->path, 2945 "Invalidating pack\n"); 2946 softc->flags |= DA_FLAG_PACK_INVALID; 2947 queued_error = ENXIO; 2948 } 2949 bioq_flush(&softc->bio_queue, NULL, 2950 queued_error); 2951 if (bp != NULL) { 2952 bp->bio_error = error; 2953 bp->bio_resid = bp->bio_bcount; 2954 bp->bio_flags |= BIO_ERROR; 2955 } 2956 } else if (bp != NULL) { 2957 if (state == DA_CCB_DELETE) 2958 bp->bio_resid = 0; 2959 else 2960 bp->bio_resid = csio->resid; 2961 bp->bio_error = 0; 2962 if (bp->bio_resid != 0) 2963 bp->bio_flags |= BIO_ERROR; 2964 } 2965 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2966 cam_release_devq(done_ccb->ccb_h.path, 2967 /*relsim_flags*/0, 2968 /*reduction*/0, 2969 /*timeout*/0, 2970 /*getcount_only*/0); 2971 } else if (bp != NULL) { 2972 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2973 panic("REQ_CMP with QFRZN"); 2974 if (state == DA_CCB_DELETE) 2975 bp->bio_resid = 0; 2976 else 2977 bp->bio_resid = csio->resid; 2978 if (csio->resid > 0) 2979 bp->bio_flags |= BIO_ERROR; 2980 if (softc->error_inject != 0) { 2981 bp->bio_error = softc->error_inject; 2982 bp->bio_resid = bp->bio_bcount; 2983 bp->bio_flags |= BIO_ERROR; 2984 softc->error_inject = 0; 2985 } 2986 } 2987 2988 /* 2989 * Block out any asynchronous callbacks 2990 * while we touch the pending ccb list. 2991 */ 2992 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 2993 softc->outstanding_cmds--; 2994 if (softc->outstanding_cmds == 0) 2995 softc->flags |= DA_FLAG_WENT_IDLE; 2996 2997 if (state == DA_CCB_DELETE) { 2998 while ((bp1 = bioq_takefirst(&softc->delete_run_queue)) 2999 != NULL) { 3000 bp1->bio_error = bp->bio_error; 3001 if (bp->bio_flags & BIO_ERROR) { 3002 bp1->bio_flags |= BIO_ERROR; 3003 bp1->bio_resid = bp1->bio_bcount; 3004 } else 3005 bp1->bio_resid = 0; 3006 biodone(bp1); 3007 } 3008 softc->delete_running = 0; 3009 if (bp != NULL) 3010 biodone(bp); 3011 daschedule(periph); 3012 } else if (bp != NULL) 3013 biodone(bp); 3014 break; 3015 } 3016 case DA_CCB_PROBE_RC: 3017 case DA_CCB_PROBE_RC16: 3018 { 3019 struct scsi_read_capacity_data *rdcap; 3020 struct scsi_read_capacity_data_long *rcaplong; 3021 char announce_buf[80]; 3022 int lbp; 3023 3024 lbp = 0; 3025 rdcap = NULL; 3026 rcaplong = NULL; 3027 if (state == DA_CCB_PROBE_RC) 3028 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; 3029 else 3030 rcaplong = (struct scsi_read_capacity_data_long *) 3031 csio->data_ptr; 3032 3033 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3034 struct disk_params *dp; 3035 uint32_t block_size; 3036 uint64_t maxsector; 3037 u_int lbppbe; /* LB per physical block exponent. */ 3038 u_int lalba; /* Lowest aligned LBA. */ 3039 3040 if (state == DA_CCB_PROBE_RC) { 3041 block_size = scsi_4btoul(rdcap->length); 3042 maxsector = scsi_4btoul(rdcap->addr); 3043 lbppbe = 0; 3044 lalba = 0; 3045 3046 /* 3047 * According to SBC-2, if the standard 10 3048 * byte READ CAPACITY command returns 2^32, 3049 * we should issue the 16 byte version of 3050 * the command, since the device in question 3051 * has more sectors than can be represented 3052 * with the short version of the command. 3053 */ 3054 if (maxsector == 0xffffffff) { 3055 free(rdcap, M_SCSIDA); 3056 xpt_release_ccb(done_ccb); 3057 softc->state = DA_STATE_PROBE_RC16; 3058 xpt_schedule(periph, priority); 3059 return; 3060 } 3061 } else { 3062 block_size = scsi_4btoul(rcaplong->length); 3063 maxsector = scsi_8btou64(rcaplong->addr); 3064 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; 3065 lalba = scsi_2btoul(rcaplong->lalba_lbp); 3066 } 3067 3068 /* 3069 * Because GEOM code just will panic us if we 3070 * give them an 'illegal' value we'll avoid that 3071 * here. 3072 */ 3073 if (block_size == 0 && maxsector == 0) { 3074 block_size = 512; 3075 maxsector = -1; 3076 } 3077 if (block_size >= MAXPHYS || block_size == 0) { 3078 xpt_print(periph->path, 3079 "unsupportable block size %ju\n", 3080 (uintmax_t) block_size); 3081 announce_buf[0] = '\0'; 3082 cam_periph_invalidate(periph); 3083 } else { 3084 /* 3085 * We pass rcaplong into dasetgeom(), 3086 * because it will only use it if it is 3087 * non-NULL. 3088 */ 3089 dasetgeom(periph, block_size, maxsector, 3090 rcaplong, sizeof(*rcaplong)); 3091 lbp = (lalba & SRC16_LBPME_A); 3092 dp = &softc->params; 3093 snprintf(announce_buf, sizeof(announce_buf), 3094 "%juMB (%ju %u byte sectors: %dH %dS/T " 3095 "%dC)", (uintmax_t) 3096 (((uintmax_t)dp->secsize * 3097 dp->sectors) / (1024*1024)), 3098 (uintmax_t)dp->sectors, 3099 dp->secsize, dp->heads, 3100 dp->secs_per_track, dp->cylinders); 3101 } 3102 } else { 3103 int error; 3104 3105 announce_buf[0] = '\0'; 3106 3107 /* 3108 * Retry any UNIT ATTENTION type errors. They 3109 * are expected at boot. 3110 */ 3111 error = daerror(done_ccb, CAM_RETRY_SELTO, 3112 SF_RETRY_UA|SF_NO_PRINT); 3113 if (error == ERESTART) { 3114 /* 3115 * A retry was scheuled, so 3116 * just return. 3117 */ 3118 return; 3119 } else if (error != 0) { 3120 int asc, ascq; 3121 int sense_key, error_code; 3122 int have_sense; 3123 cam_status status; 3124 struct ccb_getdev cgd; 3125 3126 /* Don't wedge this device's queue */ 3127 status = done_ccb->ccb_h.status; 3128 if ((status & CAM_DEV_QFRZN) != 0) 3129 cam_release_devq(done_ccb->ccb_h.path, 3130 /*relsim_flags*/0, 3131 /*reduction*/0, 3132 /*timeout*/0, 3133 /*getcount_only*/0); 3134 3135 3136 xpt_setup_ccb(&cgd.ccb_h, 3137 done_ccb->ccb_h.path, 3138 CAM_PRIORITY_NORMAL); 3139 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 3140 xpt_action((union ccb *)&cgd); 3141 3142 if (scsi_extract_sense_ccb(done_ccb, 3143 &error_code, &sense_key, &asc, &ascq)) 3144 have_sense = TRUE; 3145 else 3146 have_sense = FALSE; 3147 3148 /* 3149 * If we tried READ CAPACITY(16) and failed, 3150 * fallback to READ CAPACITY(10). 3151 */ 3152 if ((state == DA_CCB_PROBE_RC16) && 3153 (softc->flags & DA_FLAG_CAN_RC16) && 3154 (((csio->ccb_h.status & CAM_STATUS_MASK) == 3155 CAM_REQ_INVALID) || 3156 ((have_sense) && 3157 (error_code == SSD_CURRENT_ERROR) && 3158 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { 3159 softc->flags &= ~DA_FLAG_CAN_RC16; 3160 free(rdcap, M_SCSIDA); 3161 xpt_release_ccb(done_ccb); 3162 softc->state = DA_STATE_PROBE_RC; 3163 xpt_schedule(periph, priority); 3164 return; 3165 } else 3166 /* 3167 * Attach to anything that claims to be a 3168 * direct access or optical disk device, 3169 * as long as it doesn't return a "Logical 3170 * unit not supported" (0x25) error. 3171 */ 3172 if ((have_sense) && (asc != 0x25) 3173 && (error_code == SSD_CURRENT_ERROR)) { 3174 const char *sense_key_desc; 3175 const char *asc_desc; 3176 3177 dasetgeom(periph, 512, -1, NULL, 0); 3178 scsi_sense_desc(sense_key, asc, ascq, 3179 &cgd.inq_data, 3180 &sense_key_desc, 3181 &asc_desc); 3182 snprintf(announce_buf, 3183 sizeof(announce_buf), 3184 "Attempt to query device " 3185 "size failed: %s, %s", 3186 sense_key_desc, 3187 asc_desc); 3188 } else { 3189 if (have_sense) 3190 scsi_sense_print( 3191 &done_ccb->csio); 3192 else { 3193 xpt_print(periph->path, 3194 "got CAM status %#x\n", 3195 done_ccb->ccb_h.status); 3196 } 3197 3198 xpt_print(periph->path, "fatal error, " 3199 "failed to attach to device\n"); 3200 3201 /* 3202 * Free up resources. 3203 */ 3204 cam_periph_invalidate(periph); 3205 } 3206 } 3207 } 3208 free(csio->data_ptr, M_SCSIDA); 3209 if (announce_buf[0] != '\0' && ((softc->flags & DA_FLAG_PROBED) == 0)) { 3210 /* 3211 * Create our sysctl variables, now that we know 3212 * we have successfully attached. 3213 */ 3214 /* increase the refcount */ 3215 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 3216 taskqueue_enqueue(taskqueue_thread, 3217 &softc->sysctl_task); 3218 xpt_announce_periph(periph, announce_buf); 3219 xpt_announce_quirks(periph, softc->quirks, 3220 DA_Q_BIT_STRING); 3221 } else { 3222 xpt_print(periph->path, "fatal error, " 3223 "could not acquire reference count\n"); 3224 } 3225 } 3226 3227 /* Ensure re-probe doesn't see old delete. */ 3228 softc->delete_available = 0; 3229 if (lbp) { 3230 /* 3231 * Based on older SBC-3 spec revisions 3232 * any of the UNMAP methods "may" be 3233 * available via LBP given this flag so 3234 * we flag all of them as availble and 3235 * then remove those which further 3236 * probes confirm aren't available 3237 * later. 3238 * 3239 * We could also check readcap(16) p_type 3240 * flag to exclude one or more invalid 3241 * write same (X) types here 3242 */ 3243 dadeleteflag(softc, DA_DELETE_WS16, 1); 3244 dadeleteflag(softc, DA_DELETE_WS10, 1); 3245 dadeleteflag(softc, DA_DELETE_ZERO, 1); 3246 dadeleteflag(softc, DA_DELETE_UNMAP, 1); 3247 3248 xpt_release_ccb(done_ccb); 3249 softc->state = DA_STATE_PROBE_LBP; 3250 xpt_schedule(periph, priority); 3251 return; 3252 } 3253 3254 xpt_release_ccb(done_ccb); 3255 softc->state = DA_STATE_PROBE_BDC; 3256 xpt_schedule(periph, priority); 3257 return; 3258 } 3259 case DA_CCB_PROBE_LBP: 3260 { 3261 struct scsi_vpd_logical_block_prov *lbp; 3262 3263 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; 3264 3265 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3266 /* 3267 * T10/1799-D Revision 31 states at least one of these 3268 * must be supported but we don't currently enforce this. 3269 */ 3270 dadeleteflag(softc, DA_DELETE_WS16, 3271 (lbp->flags & SVPD_LBP_WS16)); 3272 dadeleteflag(softc, DA_DELETE_WS10, 3273 (lbp->flags & SVPD_LBP_WS10)); 3274 dadeleteflag(softc, DA_DELETE_ZERO, 3275 (lbp->flags & SVPD_LBP_WS10)); 3276 dadeleteflag(softc, DA_DELETE_UNMAP, 3277 (lbp->flags & SVPD_LBP_UNMAP)); 3278 3279 if (lbp->flags & SVPD_LBP_UNMAP) { 3280 free(lbp, M_SCSIDA); 3281 xpt_release_ccb(done_ccb); 3282 softc->state = DA_STATE_PROBE_BLK_LIMITS; 3283 xpt_schedule(periph, priority); 3284 return; 3285 } 3286 } else { 3287 int error; 3288 error = daerror(done_ccb, CAM_RETRY_SELTO, 3289 SF_RETRY_UA|SF_NO_PRINT); 3290 if (error == ERESTART) 3291 return; 3292 else if (error != 0) { 3293 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3294 /* Don't wedge this device's queue */ 3295 cam_release_devq(done_ccb->ccb_h.path, 3296 /*relsim_flags*/0, 3297 /*reduction*/0, 3298 /*timeout*/0, 3299 /*getcount_only*/0); 3300 } 3301 3302 /* 3303 * Failure indicates we don't support any SBC-3 3304 * delete methods with UNMAP 3305 */ 3306 } 3307 } 3308 3309 free(lbp, M_SCSIDA); 3310 xpt_release_ccb(done_ccb); 3311 softc->state = DA_STATE_PROBE_BDC; 3312 xpt_schedule(periph, priority); 3313 return; 3314 } 3315 case DA_CCB_PROBE_BLK_LIMITS: 3316 { 3317 struct scsi_vpd_block_limits *block_limits; 3318 3319 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; 3320 3321 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3322 uint32_t max_unmap_lba_cnt = scsi_4btoul( 3323 block_limits->max_unmap_lba_cnt); 3324 uint32_t max_unmap_blk_cnt = scsi_4btoul( 3325 block_limits->max_unmap_blk_cnt); 3326 uint64_t ws_max_blks = scsi_8btou64( 3327 block_limits->max_write_same_length); 3328 /* 3329 * We should already support UNMAP but we check lba 3330 * and block count to be sure 3331 */ 3332 if (max_unmap_lba_cnt != 0x00L && 3333 max_unmap_blk_cnt != 0x00L) { 3334 softc->unmap_max_lba = max_unmap_lba_cnt; 3335 softc->unmap_max_ranges = min(max_unmap_blk_cnt, 3336 UNMAP_MAX_RANGES); 3337 } else { 3338 /* 3339 * Unexpected UNMAP limits which means the 3340 * device doesn't actually support UNMAP 3341 */ 3342 dadeleteflag(softc, DA_DELETE_UNMAP, 0); 3343 } 3344 3345 if (ws_max_blks != 0x00L) 3346 softc->ws_max_blks = ws_max_blks; 3347 } else { 3348 int error; 3349 error = daerror(done_ccb, CAM_RETRY_SELTO, 3350 SF_RETRY_UA|SF_NO_PRINT); 3351 if (error == ERESTART) 3352 return; 3353 else if (error != 0) { 3354 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3355 /* Don't wedge this device's queue */ 3356 cam_release_devq(done_ccb->ccb_h.path, 3357 /*relsim_flags*/0, 3358 /*reduction*/0, 3359 /*timeout*/0, 3360 /*getcount_only*/0); 3361 } 3362 3363 /* 3364 * Failure here doesn't mean UNMAP is not 3365 * supported as this is an optional page. 3366 */ 3367 softc->unmap_max_lba = 1; 3368 softc->unmap_max_ranges = 1; 3369 } 3370 } 3371 3372 free(block_limits, M_SCSIDA); 3373 xpt_release_ccb(done_ccb); 3374 softc->state = DA_STATE_PROBE_BDC; 3375 xpt_schedule(periph, priority); 3376 return; 3377 } 3378 case DA_CCB_PROBE_BDC: 3379 { 3380 struct scsi_vpd_block_characteristics *bdc; 3381 3382 bdc = (struct scsi_vpd_block_characteristics *)csio->data_ptr; 3383 3384 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3385 /* 3386 * Disable queue sorting for non-rotational media 3387 * by default. 3388 */ 3389 if (scsi_2btoul(bdc->medium_rotation_rate) == 3390 SVPD_BDC_RATE_NONE_ROTATING) 3391 softc->sort_io_queue = 0; 3392 } else { 3393 int error; 3394 error = daerror(done_ccb, CAM_RETRY_SELTO, 3395 SF_RETRY_UA|SF_NO_PRINT); 3396 if (error == ERESTART) 3397 return; 3398 else if (error != 0) { 3399 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3400 /* Don't wedge this device's queue */ 3401 cam_release_devq(done_ccb->ccb_h.path, 3402 /*relsim_flags*/0, 3403 /*reduction*/0, 3404 /*timeout*/0, 3405 /*getcount_only*/0); 3406 } 3407 } 3408 } 3409 3410 free(bdc, M_SCSIDA); 3411 xpt_release_ccb(done_ccb); 3412 softc->state = DA_STATE_PROBE_ATA; 3413 xpt_schedule(periph, priority); 3414 return; 3415 } 3416 case DA_CCB_PROBE_ATA: 3417 { 3418 int i; 3419 struct ata_params *ata_params; 3420 int16_t *ptr; 3421 3422 ata_params = (struct ata_params *)csio->data_ptr; 3423 ptr = (uint16_t *)ata_params; 3424 3425 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 3426 for (i = 0; i < sizeof(*ata_params) / 2; i++) 3427 ptr[i] = le16toh(ptr[i]); 3428 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM) { 3429 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); 3430 if (ata_params->max_dsm_blocks != 0) 3431 softc->trim_max_ranges = min( 3432 softc->trim_max_ranges, 3433 ata_params->max_dsm_blocks * 3434 ATA_DSM_BLK_RANGES); 3435 } 3436 /* 3437 * Disable queue sorting for non-rotational media 3438 * by default. 3439 */ 3440 if (ata_params->media_rotation_rate == 1) 3441 softc->sort_io_queue = 0; 3442 } else { 3443 int error; 3444 error = daerror(done_ccb, CAM_RETRY_SELTO, 3445 SF_RETRY_UA|SF_NO_PRINT); 3446 if (error == ERESTART) 3447 return; 3448 else if (error != 0) { 3449 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 3450 /* Don't wedge this device's queue */ 3451 cam_release_devq(done_ccb->ccb_h.path, 3452 /*relsim_flags*/0, 3453 /*reduction*/0, 3454 /*timeout*/0, 3455 /*getcount_only*/0); 3456 } 3457 } 3458 } 3459 3460 free(ata_params, M_SCSIDA); 3461 daprobedone(periph, done_ccb); 3462 return; 3463 } 3464 case DA_CCB_WAITING: 3465 { 3466 /* Caller will release the CCB */ 3467 wakeup(&done_ccb->ccb_h.cbfcnp); 3468 return; 3469 } 3470 case DA_CCB_DUMP: 3471 /* No-op. We're polling */ 3472 return; 3473 case DA_CCB_TUR: 3474 { 3475 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3476 3477 if (daerror(done_ccb, CAM_RETRY_SELTO, 3478 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == 3479 ERESTART) 3480 return; 3481 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 3482 cam_release_devq(done_ccb->ccb_h.path, 3483 /*relsim_flags*/0, 3484 /*reduction*/0, 3485 /*timeout*/0, 3486 /*getcount_only*/0); 3487 } 3488 xpt_release_ccb(done_ccb); 3489 cam_periph_release_locked(periph); 3490 return; 3491 } 3492 default: 3493 break; 3494 } 3495 xpt_release_ccb(done_ccb); 3496 } 3497 3498 static void 3499 dareprobe(struct cam_periph *periph) 3500 { 3501 struct da_softc *softc; 3502 cam_status status; 3503 3504 softc = (struct da_softc *)periph->softc; 3505 3506 /* Probe in progress; don't interfere. */ 3507 if (softc->state != DA_STATE_NORMAL) 3508 return; 3509 3510 status = cam_periph_acquire(periph); 3511 KASSERT(status == CAM_REQ_CMP, 3512 ("dareprobe: cam_periph_acquire failed")); 3513 3514 if (softc->flags & DA_FLAG_CAN_RC16) 3515 softc->state = DA_STATE_PROBE_RC16; 3516 else 3517 softc->state = DA_STATE_PROBE_RC; 3518 3519 xpt_schedule(periph, CAM_PRIORITY_DEV); 3520 } 3521 3522 static int 3523 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 3524 { 3525 struct da_softc *softc; 3526 struct cam_periph *periph; 3527 int error, error_code, sense_key, asc, ascq; 3528 3529 periph = xpt_path_periph(ccb->ccb_h.path); 3530 softc = (struct da_softc *)periph->softc; 3531 3532 /* 3533 * Automatically detect devices that do not support 3534 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. 3535 */ 3536 error = 0; 3537 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { 3538 error = cmd6workaround(ccb); 3539 } else if (scsi_extract_sense_ccb(ccb, 3540 &error_code, &sense_key, &asc, &ascq)) { 3541 if (sense_key == SSD_KEY_ILLEGAL_REQUEST) 3542 error = cmd6workaround(ccb); 3543 /* 3544 * If the target replied with CAPACITY DATA HAS CHANGED UA, 3545 * query the capacity and notify upper layers. 3546 */ 3547 else if (sense_key == SSD_KEY_UNIT_ATTENTION && 3548 asc == 0x2A && ascq == 0x09) { 3549 xpt_print(periph->path, "capacity data has changed\n"); 3550 dareprobe(periph); 3551 sense_flags |= SF_NO_PRINT; 3552 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 3553 asc == 0x28 && ascq == 0x00) 3554 disk_media_changed(softc->disk, M_NOWAIT); 3555 else if (sense_key == SSD_KEY_NOT_READY && 3556 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 3557 softc->flags |= DA_FLAG_PACK_INVALID; 3558 disk_media_gone(softc->disk, M_NOWAIT); 3559 } 3560 } 3561 if (error == ERESTART) 3562 return (ERESTART); 3563 3564 /* 3565 * XXX 3566 * Until we have a better way of doing pack validation, 3567 * don't treat UAs as errors. 3568 */ 3569 sense_flags |= SF_RETRY_UA; 3570 return(cam_periph_error(ccb, cam_flags, sense_flags, 3571 &softc->saved_ccb)); 3572 } 3573 3574 static void 3575 damediapoll(void *arg) 3576 { 3577 struct cam_periph *periph = arg; 3578 struct da_softc *softc = periph->softc; 3579 3580 if (!softc->tur && softc->outstanding_cmds == 0) { 3581 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 3582 softc->tur = 1; 3583 daschedule(periph); 3584 } 3585 } 3586 /* Queue us up again */ 3587 if (da_poll_period != 0) 3588 callout_schedule(&softc->mediapoll_c, da_poll_period * hz); 3589 } 3590 3591 static void 3592 daprevent(struct cam_periph *periph, int action) 3593 { 3594 struct da_softc *softc; 3595 union ccb *ccb; 3596 int error; 3597 3598 softc = (struct da_softc *)periph->softc; 3599 3600 if (((action == PR_ALLOW) 3601 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 3602 || ((action == PR_PREVENT) 3603 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 3604 return; 3605 } 3606 3607 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 3608 3609 scsi_prevent(&ccb->csio, 3610 /*retries*/1, 3611 /*cbcfp*/dadone, 3612 MSG_SIMPLE_Q_TAG, 3613 action, 3614 SSD_FULL_SIZE, 3615 5000); 3616 3617 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, 3618 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); 3619 3620 if (error == 0) { 3621 if (action == PR_ALLOW) 3622 softc->flags &= ~DA_FLAG_PACK_LOCKED; 3623 else 3624 softc->flags |= DA_FLAG_PACK_LOCKED; 3625 } 3626 3627 xpt_release_ccb(ccb); 3628 } 3629 3630 static void 3631 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, 3632 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) 3633 { 3634 struct ccb_calc_geometry ccg; 3635 struct da_softc *softc; 3636 struct disk_params *dp; 3637 u_int lbppbe, lalba; 3638 int error; 3639 3640 softc = (struct da_softc *)periph->softc; 3641 3642 dp = &softc->params; 3643 dp->secsize = block_len; 3644 dp->sectors = maxsector + 1; 3645 if (rcaplong != NULL) { 3646 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; 3647 lalba = scsi_2btoul(rcaplong->lalba_lbp); 3648 lalba &= SRC16_LALBA_A; 3649 } else { 3650 lbppbe = 0; 3651 lalba = 0; 3652 } 3653 3654 if (lbppbe > 0) { 3655 dp->stripesize = block_len << lbppbe; 3656 dp->stripeoffset = (dp->stripesize - block_len * lalba) % 3657 dp->stripesize; 3658 } else if (softc->quirks & DA_Q_4K) { 3659 dp->stripesize = 4096; 3660 dp->stripeoffset = 0; 3661 } else { 3662 dp->stripesize = 0; 3663 dp->stripeoffset = 0; 3664 } 3665 /* 3666 * Have the controller provide us with a geometry 3667 * for this disk. The only time the geometry 3668 * matters is when we boot and the controller 3669 * is the only one knowledgeable enough to come 3670 * up with something that will make this a bootable 3671 * device. 3672 */ 3673 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 3674 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 3675 ccg.block_size = dp->secsize; 3676 ccg.volume_size = dp->sectors; 3677 ccg.heads = 0; 3678 ccg.secs_per_track = 0; 3679 ccg.cylinders = 0; 3680 xpt_action((union ccb*)&ccg); 3681 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3682 /* 3683 * We don't know what went wrong here- but just pick 3684 * a geometry so we don't have nasty things like divide 3685 * by zero. 3686 */ 3687 dp->heads = 255; 3688 dp->secs_per_track = 255; 3689 dp->cylinders = dp->sectors / (255 * 255); 3690 if (dp->cylinders == 0) { 3691 dp->cylinders = 1; 3692 } 3693 } else { 3694 dp->heads = ccg.heads; 3695 dp->secs_per_track = ccg.secs_per_track; 3696 dp->cylinders = ccg.cylinders; 3697 } 3698 3699 /* 3700 * If the user supplied a read capacity buffer, and if it is 3701 * different than the previous buffer, update the data in the EDT. 3702 * If it's the same, we don't bother. This avoids sending an 3703 * update every time someone opens this device. 3704 */ 3705 if ((rcaplong != NULL) 3706 && (bcmp(rcaplong, &softc->rcaplong, 3707 min(sizeof(softc->rcaplong), rcap_len)) != 0)) { 3708 struct ccb_dev_advinfo cdai; 3709 3710 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 3711 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3712 cdai.buftype = CDAI_TYPE_RCAPLONG; 3713 cdai.flags |= CDAI_FLAG_STORE; 3714 cdai.bufsiz = rcap_len; 3715 cdai.buf = (uint8_t *)rcaplong; 3716 xpt_action((union ccb *)&cdai); 3717 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3718 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 3719 if (cdai.ccb_h.status != CAM_REQ_CMP) { 3720 xpt_print(periph->path, "%s: failed to set read " 3721 "capacity advinfo\n", __func__); 3722 /* Use cam_error_print() to decode the status */ 3723 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, 3724 CAM_EPF_ALL); 3725 } else { 3726 bcopy(rcaplong, &softc->rcaplong, 3727 min(sizeof(softc->rcaplong), rcap_len)); 3728 } 3729 } 3730 3731 softc->disk->d_sectorsize = softc->params.secsize; 3732 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; 3733 softc->disk->d_stripesize = softc->params.stripesize; 3734 softc->disk->d_stripeoffset = softc->params.stripeoffset; 3735 /* XXX: these are not actually "firmware" values, so they may be wrong */ 3736 softc->disk->d_fwsectors = softc->params.secs_per_track; 3737 softc->disk->d_fwheads = softc->params.heads; 3738 softc->disk->d_devstat->block_size = softc->params.secsize; 3739 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; 3740 3741 error = disk_resize(softc->disk, M_NOWAIT); 3742 if (error != 0) 3743 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); 3744 } 3745 3746 static void 3747 dasendorderedtag(void *arg) 3748 { 3749 struct da_softc *softc = arg; 3750 3751 if (da_send_ordered) { 3752 if ((softc->ordered_tag_count == 0) 3753 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 3754 softc->flags |= DA_FLAG_NEED_OTAG; 3755 } 3756 if (softc->outstanding_cmds > 0) 3757 softc->flags &= ~DA_FLAG_WENT_IDLE; 3758 3759 softc->ordered_tag_count = 0; 3760 } 3761 /* Queue us up again */ 3762 callout_reset(&softc->sendordered_c, 3763 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 3764 dasendorderedtag, softc); 3765 } 3766 3767 /* 3768 * Step through all DA peripheral drivers, and if the device is still open, 3769 * sync the disk cache to physical media. 3770 */ 3771 static void 3772 dashutdown(void * arg, int howto) 3773 { 3774 struct cam_periph *periph; 3775 struct da_softc *softc; 3776 union ccb *ccb; 3777 int error; 3778 3779 CAM_PERIPH_FOREACH(periph, &dadriver) { 3780 softc = (struct da_softc *)periph->softc; 3781 if (SCHEDULER_STOPPED()) { 3782 /* If we paniced with the lock held, do not recurse. */ 3783 if (!cam_periph_owned(periph) && 3784 (softc->flags & DA_FLAG_OPEN)) { 3785 dadump(softc->disk, NULL, 0, 0, 0); 3786 } 3787 continue; 3788 } 3789 cam_periph_lock(periph); 3790 3791 /* 3792 * We only sync the cache if the drive is still open, and 3793 * if the drive is capable of it.. 3794 */ 3795 if (((softc->flags & DA_FLAG_OPEN) == 0) 3796 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { 3797 cam_periph_unlock(periph); 3798 continue; 3799 } 3800 3801 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 3802 scsi_synchronize_cache(&ccb->csio, 3803 /*retries*/0, 3804 /*cbfcnp*/dadone, 3805 MSG_SIMPLE_Q_TAG, 3806 /*begin_lba*/0, /* whole disk */ 3807 /*lb_count*/0, 3808 SSD_FULL_SIZE, 3809 60 * 60 * 1000); 3810 3811 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 3812 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, 3813 softc->disk->d_devstat); 3814 if (error != 0) 3815 xpt_print(periph->path, "Synchronize cache failed\n"); 3816 xpt_release_ccb(ccb); 3817 cam_periph_unlock(periph); 3818 } 3819 } 3820 3821 #else /* !_KERNEL */ 3822 3823 /* 3824 * XXX This is only left out of the kernel build to silence warnings. If, 3825 * for some reason this function is used in the kernel, the ifdefs should 3826 * be moved so it is included both in the kernel and userland. 3827 */ 3828 void 3829 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 3830 void (*cbfcnp)(struct cam_periph *, union ccb *), 3831 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 3832 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 3833 u_int32_t timeout) 3834 { 3835 struct scsi_format_unit *scsi_cmd; 3836 3837 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 3838 scsi_cmd->opcode = FORMAT_UNIT; 3839 scsi_cmd->byte2 = byte2; 3840 scsi_ulto2b(ileave, scsi_cmd->interleave); 3841 3842 cam_fill_csio(csio, 3843 retries, 3844 cbfcnp, 3845 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 3846 tag_action, 3847 data_ptr, 3848 dxfer_len, 3849 sense_len, 3850 sizeof(*scsi_cmd), 3851 timeout); 3852 } 3853 3854 #endif /* _KERNEL */ 3855