1 /*- 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 34 #ifdef _KERNEL 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/taskqueue.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <sys/devicestat.h> 44 #include <sys/eventhandler.h> 45 #include <sys/malloc.h> 46 #include <sys/cons.h> 47 #include <sys/endian.h> 48 #include <sys/proc.h> 49 #include <sys/sbuf.h> 50 #include <geom/geom.h> 51 #include <geom/geom_disk.h> 52 #endif /* _KERNEL */ 53 54 #ifndef _KERNEL 55 #include <stdio.h> 56 #include <string.h> 57 #endif /* _KERNEL */ 58 59 #include <cam/cam.h> 60 #include <cam/cam_ccb.h> 61 #include <cam/cam_periph.h> 62 #include <cam/cam_xpt_periph.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_iosched.h> 65 66 #include <cam/scsi/scsi_message.h> 67 #include <cam/scsi/scsi_da.h> 68 69 #ifdef _KERNEL 70 /* 71 * Note that there are probe ordering dependencies here. The order isn't 72 * controlled by this enumeration, but by explicit state transitions in 73 * dastart() and dadone(). Here are some of the dependencies: 74 * 75 * 1. RC should come first, before RC16, unless there is evidence that RC16 76 * is supported. 77 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. 78 * 3. The ATA probes should go in this order: 79 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE 80 */ 81 typedef enum { 82 DA_STATE_PROBE_RC, 83 DA_STATE_PROBE_RC16, 84 DA_STATE_PROBE_LBP, 85 DA_STATE_PROBE_BLK_LIMITS, 86 DA_STATE_PROBE_BDC, 87 DA_STATE_PROBE_ATA, 88 DA_STATE_PROBE_ATA_LOGDIR, 89 DA_STATE_PROBE_ATA_IDDIR, 90 DA_STATE_PROBE_ATA_SUP, 91 DA_STATE_PROBE_ATA_ZONE, 92 DA_STATE_PROBE_ZONE, 93 DA_STATE_NORMAL 94 } da_state; 95 96 typedef enum { 97 DA_FLAG_PACK_INVALID = 0x000001, 98 DA_FLAG_NEW_PACK = 0x000002, 99 DA_FLAG_PACK_LOCKED = 0x000004, 100 DA_FLAG_PACK_REMOVABLE = 0x000008, 101 DA_FLAG_NEED_OTAG = 0x000020, 102 DA_FLAG_WAS_OTAG = 0x000040, 103 DA_FLAG_RETRY_UA = 0x000080, 104 DA_FLAG_OPEN = 0x000100, 105 DA_FLAG_SCTX_INIT = 0x000200, 106 DA_FLAG_CAN_RC16 = 0x000400, 107 DA_FLAG_PROBED = 0x000800, 108 DA_FLAG_DIRTY = 0x001000, 109 DA_FLAG_ANNOUNCED = 0x002000, 110 DA_FLAG_CAN_ATA_DMA = 0x004000, 111 DA_FLAG_CAN_ATA_LOG = 0x008000, 112 DA_FLAG_CAN_ATA_IDLOG = 0x010000, 113 DA_FLAG_CAN_ATA_SUPCAP = 0x020000, 114 DA_FLAG_CAN_ATA_ZONE = 0x040000 115 } da_flags; 116 117 typedef enum { 118 DA_Q_NONE = 0x00, 119 DA_Q_NO_SYNC_CACHE = 0x01, 120 DA_Q_NO_6_BYTE = 0x02, 121 DA_Q_NO_PREVENT = 0x04, 122 DA_Q_4K = 0x08, 123 DA_Q_NO_RC16 = 0x10, 124 DA_Q_NO_UNMAP = 0x20, 125 DA_Q_RETRY_BUSY = 0x40, 126 DA_Q_SMR_DM = 0x80, 127 DA_Q_STRICT_UNMAP = 0x100 128 } da_quirks; 129 130 #define DA_Q_BIT_STRING \ 131 "\020" \ 132 "\001NO_SYNC_CACHE" \ 133 "\002NO_6_BYTE" \ 134 "\003NO_PREVENT" \ 135 "\0044K" \ 136 "\005NO_RC16" \ 137 "\006NO_UNMAP" \ 138 "\007RETRY_BUSY" \ 139 "\010SMR_DM" \ 140 "\011STRICT_UNMAP" 141 142 typedef enum { 143 DA_CCB_PROBE_RC = 0x01, 144 DA_CCB_PROBE_RC16 = 0x02, 145 DA_CCB_PROBE_LBP = 0x03, 146 DA_CCB_PROBE_BLK_LIMITS = 0x04, 147 DA_CCB_PROBE_BDC = 0x05, 148 DA_CCB_PROBE_ATA = 0x06, 149 DA_CCB_BUFFER_IO = 0x07, 150 DA_CCB_DUMP = 0x0A, 151 DA_CCB_DELETE = 0x0B, 152 DA_CCB_TUR = 0x0C, 153 DA_CCB_PROBE_ZONE = 0x0D, 154 DA_CCB_PROBE_ATA_LOGDIR = 0x0E, 155 DA_CCB_PROBE_ATA_IDDIR = 0x0F, 156 DA_CCB_PROBE_ATA_SUP = 0x10, 157 DA_CCB_PROBE_ATA_ZONE = 0x11, 158 DA_CCB_TYPE_MASK = 0x1F, 159 DA_CCB_RETRY_UA = 0x20 160 } da_ccb_state; 161 162 /* 163 * Order here is important for method choice 164 * 165 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to 166 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes 167 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql 168 * import taking 5mins. 169 * 170 */ 171 typedef enum { 172 DA_DELETE_NONE, 173 DA_DELETE_DISABLE, 174 DA_DELETE_ATA_TRIM, 175 DA_DELETE_UNMAP, 176 DA_DELETE_WS16, 177 DA_DELETE_WS10, 178 DA_DELETE_ZERO, 179 DA_DELETE_MIN = DA_DELETE_ATA_TRIM, 180 DA_DELETE_MAX = DA_DELETE_ZERO 181 } da_delete_methods; 182 183 /* 184 * For SCSI, host managed drives show up as a separate device type. For 185 * ATA, host managed drives also have a different device signature. 186 * XXX KDM figure out the ATA host managed signature. 187 */ 188 typedef enum { 189 DA_ZONE_NONE = 0x00, 190 DA_ZONE_DRIVE_MANAGED = 0x01, 191 DA_ZONE_HOST_AWARE = 0x02, 192 DA_ZONE_HOST_MANAGED = 0x03 193 } da_zone_mode; 194 195 /* 196 * We distinguish between these interface cases in addition to the drive type: 197 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC 198 * o ATA drive behind a SCSI translation layer that does not know about 199 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this 200 * case, we would need to share the ATA code with the ada(4) driver. 201 * o SCSI drive. 202 */ 203 typedef enum { 204 DA_ZONE_IF_SCSI, 205 DA_ZONE_IF_ATA_PASS, 206 DA_ZONE_IF_ATA_SAT, 207 } da_zone_interface; 208 209 typedef enum { 210 DA_ZONE_FLAG_RZ_SUP = 0x0001, 211 DA_ZONE_FLAG_OPEN_SUP = 0x0002, 212 DA_ZONE_FLAG_CLOSE_SUP = 0x0004, 213 DA_ZONE_FLAG_FINISH_SUP = 0x0008, 214 DA_ZONE_FLAG_RWP_SUP = 0x0010, 215 DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | 216 DA_ZONE_FLAG_OPEN_SUP | 217 DA_ZONE_FLAG_CLOSE_SUP | 218 DA_ZONE_FLAG_FINISH_SUP | 219 DA_ZONE_FLAG_RWP_SUP), 220 DA_ZONE_FLAG_URSWRZ = 0x0020, 221 DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, 222 DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, 223 DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, 224 DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | 225 DA_ZONE_FLAG_OPT_NONSEQ_SET | 226 DA_ZONE_FLAG_MAX_SEQ_SET) 227 } da_zone_flags; 228 229 static struct da_zone_desc { 230 da_zone_flags value; 231 const char *desc; 232 } da_zone_desc_table[] = { 233 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, 234 {DA_ZONE_FLAG_OPEN_SUP, "Open" }, 235 {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, 236 {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, 237 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, 238 }; 239 240 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, 241 struct bio *bp); 242 static da_delete_func_t da_delete_trim; 243 static da_delete_func_t da_delete_unmap; 244 static da_delete_func_t da_delete_ws; 245 246 static const void * da_delete_functions[] = { 247 NULL, 248 NULL, 249 da_delete_trim, 250 da_delete_unmap, 251 da_delete_ws, 252 da_delete_ws, 253 da_delete_ws 254 }; 255 256 static const char *da_delete_method_names[] = 257 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; 258 static const char *da_delete_method_desc[] = 259 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", 260 "WRITE SAME(10) with UNMAP", "ZERO" }; 261 262 /* Offsets into our private area for storing information */ 263 #define ccb_state ppriv_field0 264 #define ccb_bp ppriv_ptr1 265 266 struct disk_params { 267 u_int8_t heads; 268 u_int32_t cylinders; 269 u_int8_t secs_per_track; 270 u_int32_t secsize; /* Number of bytes/sector */ 271 u_int64_t sectors; /* total number sectors */ 272 u_int stripesize; 273 u_int stripeoffset; 274 }; 275 276 #define UNMAP_RANGE_MAX 0xffffffff 277 #define UNMAP_HEAD_SIZE 8 278 #define UNMAP_RANGE_SIZE 16 279 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ 280 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ 281 UNMAP_HEAD_SIZE) 282 283 #define WS10_MAX_BLKS 0xffff 284 #define WS16_MAX_BLKS 0xffffffff 285 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ 286 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) 287 288 #define DA_WORK_TUR (1 << 16) 289 290 struct da_softc { 291 struct cam_iosched_softc *cam_iosched; 292 struct bio_queue_head delete_run_queue; 293 LIST_HEAD(, ccb_hdr) pending_ccbs; 294 int refcount; /* Active xpt_action() calls */ 295 da_state state; 296 da_flags flags; 297 da_quirks quirks; 298 int minimum_cmd_size; 299 int error_inject; 300 int trim_max_ranges; 301 int delete_available; /* Delete methods possibly available */ 302 da_zone_mode zone_mode; 303 da_zone_interface zone_interface; 304 da_zone_flags zone_flags; 305 struct ata_gp_log_dir ata_logdir; 306 int valid_logdir_len; 307 struct ata_identify_log_pages ata_iddir; 308 int valid_iddir_len; 309 uint64_t optimal_seq_zones; 310 uint64_t optimal_nonseq_zones; 311 uint64_t max_seq_zones; 312 u_int maxio; 313 uint32_t unmap_max_ranges; 314 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ 315 uint32_t unmap_gran; 316 uint32_t unmap_gran_align; 317 uint64_t ws_max_blks; 318 da_delete_methods delete_method_pref; 319 da_delete_methods delete_method; 320 da_delete_func_t *delete_func; 321 int unmappedio; 322 int rotating; 323 struct disk_params params; 324 struct disk *disk; 325 union ccb saved_ccb; 326 struct task sysctl_task; 327 struct sysctl_ctx_list sysctl_ctx; 328 struct sysctl_oid *sysctl_tree; 329 struct callout sendordered_c; 330 uint64_t wwpn; 331 uint8_t unmap_buf[UNMAP_BUF_SIZE]; 332 struct scsi_read_capacity_data_long rcaplong; 333 struct callout mediapoll_c; 334 #ifdef CAM_IO_STATS 335 struct sysctl_ctx_list sysctl_stats_ctx; 336 struct sysctl_oid *sysctl_stats_tree; 337 u_int errors; 338 u_int timeouts; 339 u_int invalidations; 340 #endif 341 }; 342 343 #define dadeleteflag(softc, delete_method, enable) \ 344 if (enable) { \ 345 softc->delete_available |= (1 << delete_method); \ 346 } else { \ 347 softc->delete_available &= ~(1 << delete_method); \ 348 } 349 350 struct da_quirk_entry { 351 struct scsi_inquiry_pattern inq_pat; 352 da_quirks quirks; 353 }; 354 355 static const char quantum[] = "QUANTUM"; 356 static const char microp[] = "MICROP"; 357 358 static struct da_quirk_entry da_quirk_table[] = 359 { 360 /* SPI, FC devices */ 361 { 362 /* 363 * Fujitsu M2513A MO drives. 364 * Tested devices: M2513A2 firmware versions 1200 & 1300. 365 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 366 * Reported by: W.Scholten <whs@xs4all.nl> 367 */ 368 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 369 /*quirks*/ DA_Q_NO_SYNC_CACHE 370 }, 371 { 372 /* See above. */ 373 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 374 /*quirks*/ DA_Q_NO_SYNC_CACHE 375 }, 376 { 377 /* 378 * This particular Fujitsu drive doesn't like the 379 * synchronize cache command. 380 * Reported by: Tom Jackson <toj@gorilla.net> 381 */ 382 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 383 /*quirks*/ DA_Q_NO_SYNC_CACHE 384 }, 385 { 386 /* 387 * This drive doesn't like the synchronize cache command 388 * either. Reported by: Matthew Jacob <mjacob@feral.com> 389 * in NetBSD PR kern/6027, August 24, 1998. 390 */ 391 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 392 /*quirks*/ DA_Q_NO_SYNC_CACHE 393 }, 394 { 395 /* 396 * This drive doesn't like the synchronize cache command 397 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 398 * (PR 8882). 399 */ 400 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 401 /*quirks*/ DA_Q_NO_SYNC_CACHE 402 }, 403 { 404 /* 405 * Doesn't like the synchronize cache command. 406 * Reported by: Blaz Zupan <blaz@gold.amis.net> 407 */ 408 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 409 /*quirks*/ DA_Q_NO_SYNC_CACHE 410 }, 411 { 412 /* 413 * Doesn't like the synchronize cache command. 414 * Reported by: Blaz Zupan <blaz@gold.amis.net> 415 */ 416 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 417 /*quirks*/ DA_Q_NO_SYNC_CACHE 418 }, 419 { 420 /* 421 * Doesn't like the synchronize cache command. 422 */ 423 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 424 /*quirks*/ DA_Q_NO_SYNC_CACHE 425 }, 426 { 427 /* 428 * Doesn't like the synchronize cache command. 429 * Reported by: walter@pelissero.de 430 */ 431 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, 432 /*quirks*/ DA_Q_NO_SYNC_CACHE 433 }, 434 { 435 /* 436 * Doesn't work correctly with 6 byte reads/writes. 437 * Returns illegal request, and points to byte 9 of the 438 * 6-byte CDB. 439 * Reported by: Adam McDougall <bsdx@spawnet.com> 440 */ 441 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 442 /*quirks*/ DA_Q_NO_6_BYTE 443 }, 444 { 445 /* See above. */ 446 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 447 /*quirks*/ DA_Q_NO_6_BYTE 448 }, 449 { 450 /* 451 * Doesn't like the synchronize cache command. 452 * Reported by: walter@pelissero.de 453 */ 454 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, 455 /*quirks*/ DA_Q_NO_SYNC_CACHE 456 }, 457 { 458 /* 459 * The CISS RAID controllers do not support SYNC_CACHE 460 */ 461 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, 462 /*quirks*/ DA_Q_NO_SYNC_CACHE 463 }, 464 { 465 /* 466 * The STEC SSDs sometimes hang on UNMAP. 467 */ 468 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, 469 /*quirks*/ DA_Q_NO_UNMAP 470 }, 471 { 472 /* 473 * VMware returns BUSY status when storage has transient 474 * connectivity problems, so better wait. 475 * Also VMware returns odd errors on misaligned UNMAPs. 476 */ 477 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, 478 /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP 479 }, 480 /* USB mass storage devices supported by umass(4) */ 481 { 482 /* 483 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player 484 * PR: kern/51675 485 */ 486 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, 487 /*quirks*/ DA_Q_NO_SYNC_CACHE 488 }, 489 { 490 /* 491 * Power Quotient Int. (PQI) USB flash key 492 * PR: kern/53067 493 */ 494 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", 495 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 496 }, 497 { 498 /* 499 * Creative Nomad MUVO mp3 player (USB) 500 * PR: kern/53094 501 */ 502 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, 503 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 504 }, 505 { 506 /* 507 * Jungsoft NEXDISK USB flash key 508 * PR: kern/54737 509 */ 510 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, 511 /*quirks*/ DA_Q_NO_SYNC_CACHE 512 }, 513 { 514 /* 515 * FreeDik USB Mini Data Drive 516 * PR: kern/54786 517 */ 518 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", 519 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 520 }, 521 { 522 /* 523 * Sigmatel USB Flash MP3 Player 524 * PR: kern/57046 525 */ 526 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, 527 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 528 }, 529 { 530 /* 531 * Neuros USB Digital Audio Computer 532 * PR: kern/63645 533 */ 534 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", 535 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 536 }, 537 { 538 /* 539 * SEAGRAND NP-900 MP3 Player 540 * PR: kern/64563 541 */ 542 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, 543 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 544 }, 545 { 546 /* 547 * iRiver iFP MP3 player (with UMS Firmware) 548 * PR: kern/54881, i386/63941, kern/66124 549 */ 550 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, 551 /*quirks*/ DA_Q_NO_SYNC_CACHE 552 }, 553 { 554 /* 555 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 556 * PR: kern/70158 557 */ 558 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, 559 /*quirks*/ DA_Q_NO_SYNC_CACHE 560 }, 561 { 562 /* 563 * ZICPlay USB MP3 Player with FM 564 * PR: kern/75057 565 */ 566 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, 567 /*quirks*/ DA_Q_NO_SYNC_CACHE 568 }, 569 { 570 /* 571 * TEAC USB floppy mechanisms 572 */ 573 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, 574 /*quirks*/ DA_Q_NO_SYNC_CACHE 575 }, 576 { 577 /* 578 * Kingston DataTraveler II+ USB Pen-Drive. 579 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org> 580 */ 581 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", 582 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 583 }, 584 { 585 /* 586 * USB DISK Pro PMAP 587 * Reported by: jhs 588 * PR: usb/96381 589 */ 590 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, 591 /*quirks*/ DA_Q_NO_SYNC_CACHE 592 }, 593 { 594 /* 595 * Motorola E398 Mobile Phone (TransFlash memory card). 596 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl> 597 * PR: usb/89889 598 */ 599 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", 600 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 601 }, 602 { 603 /* 604 * Qware BeatZkey! Pro 605 * PR: usb/79164 606 */ 607 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", 608 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 609 }, 610 { 611 /* 612 * Time DPA20B 1GB MP3 Player 613 * PR: usb/81846 614 */ 615 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", 616 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 617 }, 618 { 619 /* 620 * Samsung USB key 128Mb 621 * PR: usb/90081 622 */ 623 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", 624 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 625 }, 626 { 627 /* 628 * Kingston DataTraveler 2.0 USB Flash memory. 629 * PR: usb/89196 630 */ 631 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", 632 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 633 }, 634 { 635 /* 636 * Creative MUVO Slim mp3 player (USB) 637 * PR: usb/86131 638 */ 639 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", 640 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 641 }, 642 { 643 /* 644 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) 645 * PR: usb/80487 646 */ 647 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", 648 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 649 }, 650 { 651 /* 652 * SanDisk Micro Cruzer 128MB 653 * PR: usb/75970 654 */ 655 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", 656 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 657 }, 658 { 659 /* 660 * TOSHIBA TransMemory USB sticks 661 * PR: kern/94660 662 */ 663 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", 664 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 665 }, 666 { 667 /* 668 * PNY USB 3.0 Flash Drives 669 */ 670 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", 671 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 672 }, 673 { 674 /* 675 * PNY USB Flash keys 676 * PR: usb/75578, usb/72344, usb/65436 677 */ 678 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", 679 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 680 }, 681 { 682 /* 683 * Genesys 6-in-1 Card Reader 684 * PR: usb/94647 685 */ 686 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 687 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 688 }, 689 { 690 /* 691 * Rekam Digital CAMERA 692 * PR: usb/98713 693 */ 694 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", 695 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 696 }, 697 { 698 /* 699 * iRiver H10 MP3 player 700 * PR: usb/102547 701 */ 702 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", 703 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 704 }, 705 { 706 /* 707 * iRiver U10 MP3 player 708 * PR: usb/92306 709 */ 710 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", 711 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 712 }, 713 { 714 /* 715 * X-Micro Flash Disk 716 * PR: usb/96901 717 */ 718 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", 719 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 720 }, 721 { 722 /* 723 * EasyMP3 EM732X USB 2.0 Flash MP3 Player 724 * PR: usb/96546 725 */ 726 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", 727 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 728 }, 729 { 730 /* 731 * Denver MP3 player 732 * PR: usb/107101 733 */ 734 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", 735 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 736 }, 737 { 738 /* 739 * Philips USB Key Audio KEY013 740 * PR: usb/68412 741 */ 742 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, 743 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 744 }, 745 { 746 /* 747 * JNC MP3 Player 748 * PR: usb/94439 749 */ 750 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", 751 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 752 }, 753 { 754 /* 755 * SAMSUNG MP0402H 756 * PR: usb/108427 757 */ 758 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, 759 /*quirks*/ DA_Q_NO_SYNC_CACHE 760 }, 761 { 762 /* 763 * I/O Magic USB flash - Giga Bank 764 * PR: usb/108810 765 */ 766 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, 767 /*quirks*/ DA_Q_NO_SYNC_CACHE 768 }, 769 { 770 /* 771 * JoyFly 128mb USB Flash Drive 772 * PR: 96133 773 */ 774 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", 775 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 776 }, 777 { 778 /* 779 * ChipsBnk usb stick 780 * PR: 103702 781 */ 782 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", 783 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 784 }, 785 { 786 /* 787 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A 788 * PR: 129858 789 */ 790 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", 791 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 792 }, 793 { 794 /* 795 * Samsung YP-U3 mp3-player 796 * PR: 125398 797 */ 798 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", 799 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 800 }, 801 { 802 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", 803 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 804 }, 805 { 806 /* 807 * Sony Cyber-Shot DSC cameras 808 * PR: usb/137035 809 */ 810 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 811 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 812 }, 813 { 814 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", 815 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT 816 }, 817 { 818 /* At least several Transcent USB sticks lie on RC16. */ 819 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", 820 "*"}, /*quirks*/ DA_Q_NO_RC16 821 }, 822 { 823 /* 824 * I-O Data USB Flash Disk 825 * PR: usb/211716 826 */ 827 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", 828 "*"}, /*quirks*/ DA_Q_NO_RC16 829 }, 830 /* ATA/SATA devices over SAS/USB/... */ 831 { 832 /* Hitachi Advanced Format (4k) drives */ 833 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, 834 /*quirks*/DA_Q_4K 835 }, 836 { 837 /* Micron Advanced Format (4k) drives */ 838 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, 839 /*quirks*/DA_Q_4K 840 }, 841 { 842 /* Samsung Advanced Format (4k) drives */ 843 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, 844 /*quirks*/DA_Q_4K 845 }, 846 { 847 /* Samsung Advanced Format (4k) drives */ 848 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, 849 /*quirks*/DA_Q_4K 850 }, 851 { 852 /* Samsung Advanced Format (4k) drives */ 853 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, 854 /*quirks*/DA_Q_4K 855 }, 856 { 857 /* Samsung Advanced Format (4k) drives */ 858 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, 859 /*quirks*/DA_Q_4K 860 }, 861 { 862 /* Seagate Barracuda Green Advanced Format (4k) drives */ 863 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, 864 /*quirks*/DA_Q_4K 865 }, 866 { 867 /* Seagate Barracuda Green Advanced Format (4k) drives */ 868 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, 869 /*quirks*/DA_Q_4K 870 }, 871 { 872 /* Seagate Barracuda Green Advanced Format (4k) drives */ 873 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, 874 /*quirks*/DA_Q_4K 875 }, 876 { 877 /* Seagate Barracuda Green Advanced Format (4k) drives */ 878 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, 879 /*quirks*/DA_Q_4K 880 }, 881 { 882 /* Seagate Barracuda Green Advanced Format (4k) drives */ 883 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, 884 /*quirks*/DA_Q_4K 885 }, 886 { 887 /* Seagate Barracuda Green Advanced Format (4k) drives */ 888 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, 889 /*quirks*/DA_Q_4K 890 }, 891 { 892 /* Seagate Momentus Advanced Format (4k) drives */ 893 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, 894 /*quirks*/DA_Q_4K 895 }, 896 { 897 /* Seagate Momentus Advanced Format (4k) drives */ 898 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, 899 /*quirks*/DA_Q_4K 900 }, 901 { 902 /* Seagate Momentus Advanced Format (4k) drives */ 903 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, 904 /*quirks*/DA_Q_4K 905 }, 906 { 907 /* Seagate Momentus Advanced Format (4k) drives */ 908 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, 909 /*quirks*/DA_Q_4K 910 }, 911 { 912 /* Seagate Momentus Advanced Format (4k) drives */ 913 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, 914 /*quirks*/DA_Q_4K 915 }, 916 { 917 /* Seagate Momentus Advanced Format (4k) drives */ 918 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, 919 /*quirks*/DA_Q_4K 920 }, 921 { 922 /* Seagate Momentus Advanced Format (4k) drives */ 923 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, 924 /*quirks*/DA_Q_4K 925 }, 926 { 927 /* Seagate Momentus Advanced Format (4k) drives */ 928 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, 929 /*quirks*/DA_Q_4K 930 }, 931 { 932 /* Seagate Momentus Advanced Format (4k) drives */ 933 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, 934 /*quirks*/DA_Q_4K 935 }, 936 { 937 /* Seagate Momentus Advanced Format (4k) drives */ 938 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, 939 /*quirks*/DA_Q_4K 940 }, 941 { 942 /* Seagate Momentus Advanced Format (4k) drives */ 943 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, 944 /*quirks*/DA_Q_4K 945 }, 946 { 947 /* Seagate Momentus Advanced Format (4k) drives */ 948 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, 949 /*quirks*/DA_Q_4K 950 }, 951 { 952 /* Seagate Momentus Advanced Format (4k) drives */ 953 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, 954 /*quirks*/DA_Q_4K 955 }, 956 { 957 /* Seagate Momentus Advanced Format (4k) drives */ 958 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, 959 /*quirks*/DA_Q_4K 960 }, 961 { 962 /* Seagate Momentus Thin Advanced Format (4k) drives */ 963 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, 964 /*quirks*/DA_Q_4K 965 }, 966 { 967 /* Seagate Momentus Thin Advanced Format (4k) drives */ 968 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, 969 /*quirks*/DA_Q_4K 970 }, 971 { 972 /* WDC Caviar Green Advanced Format (4k) drives */ 973 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, 974 /*quirks*/DA_Q_4K 975 }, 976 { 977 /* WDC Caviar Green Advanced Format (4k) drives */ 978 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, 979 /*quirks*/DA_Q_4K 980 }, 981 { 982 /* WDC Caviar Green Advanced Format (4k) drives */ 983 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, 984 /*quirks*/DA_Q_4K 985 }, 986 { 987 /* WDC Caviar Green Advanced Format (4k) drives */ 988 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, 989 /*quirks*/DA_Q_4K 990 }, 991 { 992 /* WDC Caviar Green Advanced Format (4k) drives */ 993 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, 994 /*quirks*/DA_Q_4K 995 }, 996 { 997 /* WDC Caviar Green Advanced Format (4k) drives */ 998 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, 999 /*quirks*/DA_Q_4K 1000 }, 1001 { 1002 /* WDC Caviar Green Advanced Format (4k) drives */ 1003 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, 1004 /*quirks*/DA_Q_4K 1005 }, 1006 { 1007 /* WDC Caviar Green Advanced Format (4k) drives */ 1008 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, 1009 /*quirks*/DA_Q_4K 1010 }, 1011 { 1012 /* WDC Scorpio Black Advanced Format (4k) drives */ 1013 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, 1014 /*quirks*/DA_Q_4K 1015 }, 1016 { 1017 /* WDC Scorpio Black Advanced Format (4k) drives */ 1018 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, 1019 /*quirks*/DA_Q_4K 1020 }, 1021 { 1022 /* WDC Scorpio Black Advanced Format (4k) drives */ 1023 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, 1024 /*quirks*/DA_Q_4K 1025 }, 1026 { 1027 /* WDC Scorpio Black Advanced Format (4k) drives */ 1028 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, 1029 /*quirks*/DA_Q_4K 1030 }, 1031 { 1032 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1033 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, 1034 /*quirks*/DA_Q_4K 1035 }, 1036 { 1037 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1038 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, 1039 /*quirks*/DA_Q_4K 1040 }, 1041 { 1042 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1043 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, 1044 /*quirks*/DA_Q_4K 1045 }, 1046 { 1047 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1048 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, 1049 /*quirks*/DA_Q_4K 1050 }, 1051 { 1052 /* 1053 * Olympus FE-210 camera 1054 */ 1055 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", 1056 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1057 }, 1058 { 1059 /* 1060 * LG UP3S MP3 player 1061 */ 1062 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", 1063 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1064 }, 1065 { 1066 /* 1067 * Laser MP3-2GA13 MP3 player 1068 */ 1069 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", 1070 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1071 }, 1072 { 1073 /* 1074 * LaCie external 250GB Hard drive des by Porsche 1075 * Submitted by: Ben Stuyts <ben@altesco.nl> 1076 * PR: 121474 1077 */ 1078 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, 1079 /*quirks*/ DA_Q_NO_SYNC_CACHE 1080 }, 1081 /* SATA SSDs */ 1082 { 1083 /* 1084 * Corsair Force 2 SSDs 1085 * 4k optimised & trim only works in 4k requests + 4k aligned 1086 */ 1087 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, 1088 /*quirks*/DA_Q_4K 1089 }, 1090 { 1091 /* 1092 * Corsair Force 3 SSDs 1093 * 4k optimised & trim only works in 4k requests + 4k aligned 1094 */ 1095 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, 1096 /*quirks*/DA_Q_4K 1097 }, 1098 { 1099 /* 1100 * Corsair Neutron GTX SSDs 1101 * 4k optimised & trim only works in 4k requests + 4k aligned 1102 */ 1103 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, 1104 /*quirks*/DA_Q_4K 1105 }, 1106 { 1107 /* 1108 * Corsair Force GT & GS SSDs 1109 * 4k optimised & trim only works in 4k requests + 4k aligned 1110 */ 1111 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, 1112 /*quirks*/DA_Q_4K 1113 }, 1114 { 1115 /* 1116 * Crucial M4 SSDs 1117 * 4k optimised & trim only works in 4k requests + 4k aligned 1118 */ 1119 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, 1120 /*quirks*/DA_Q_4K 1121 }, 1122 { 1123 /* 1124 * Crucial RealSSD C300 SSDs 1125 * 4k optimised 1126 */ 1127 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", 1128 "*" }, /*quirks*/DA_Q_4K 1129 }, 1130 { 1131 /* 1132 * Intel 320 Series SSDs 1133 * 4k optimised & trim only works in 4k requests + 4k aligned 1134 */ 1135 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, 1136 /*quirks*/DA_Q_4K 1137 }, 1138 { 1139 /* 1140 * Intel 330 Series SSDs 1141 * 4k optimised & trim only works in 4k requests + 4k aligned 1142 */ 1143 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, 1144 /*quirks*/DA_Q_4K 1145 }, 1146 { 1147 /* 1148 * Intel 510 Series SSDs 1149 * 4k optimised & trim only works in 4k requests + 4k aligned 1150 */ 1151 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, 1152 /*quirks*/DA_Q_4K 1153 }, 1154 { 1155 /* 1156 * Intel 520 Series SSDs 1157 * 4k optimised & trim only works in 4k requests + 4k aligned 1158 */ 1159 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, 1160 /*quirks*/DA_Q_4K 1161 }, 1162 { 1163 /* 1164 * Intel S3610 Series SSDs 1165 * 4k optimised & trim only works in 4k requests + 4k aligned 1166 */ 1167 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, 1168 /*quirks*/DA_Q_4K 1169 }, 1170 { 1171 /* 1172 * Intel X25-M Series SSDs 1173 * 4k optimised & trim only works in 4k requests + 4k aligned 1174 */ 1175 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, 1176 /*quirks*/DA_Q_4K 1177 }, 1178 { 1179 /* 1180 * Kingston E100 Series SSDs 1181 * 4k optimised & trim only works in 4k requests + 4k aligned 1182 */ 1183 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, 1184 /*quirks*/DA_Q_4K 1185 }, 1186 { 1187 /* 1188 * Kingston HyperX 3k SSDs 1189 * 4k optimised & trim only works in 4k requests + 4k aligned 1190 */ 1191 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, 1192 /*quirks*/DA_Q_4K 1193 }, 1194 { 1195 /* 1196 * Marvell SSDs (entry taken from OpenSolaris) 1197 * 4k optimised & trim only works in 4k requests + 4k aligned 1198 */ 1199 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, 1200 /*quirks*/DA_Q_4K 1201 }, 1202 { 1203 /* 1204 * OCZ Agility 2 SSDs 1205 * 4k optimised & trim only works in 4k requests + 4k aligned 1206 */ 1207 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, 1208 /*quirks*/DA_Q_4K 1209 }, 1210 { 1211 /* 1212 * OCZ Agility 3 SSDs 1213 * 4k optimised & trim only works in 4k requests + 4k aligned 1214 */ 1215 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, 1216 /*quirks*/DA_Q_4K 1217 }, 1218 { 1219 /* 1220 * OCZ Deneva R Series SSDs 1221 * 4k optimised & trim only works in 4k requests + 4k aligned 1222 */ 1223 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, 1224 /*quirks*/DA_Q_4K 1225 }, 1226 { 1227 /* 1228 * OCZ Vertex 2 SSDs (inc pro series) 1229 * 4k optimised & trim only works in 4k requests + 4k aligned 1230 */ 1231 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, 1232 /*quirks*/DA_Q_4K 1233 }, 1234 { 1235 /* 1236 * OCZ Vertex 3 SSDs 1237 * 4k optimised & trim only works in 4k requests + 4k aligned 1238 */ 1239 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, 1240 /*quirks*/DA_Q_4K 1241 }, 1242 { 1243 /* 1244 * OCZ Vertex 4 SSDs 1245 * 4k optimised & trim only works in 4k requests + 4k aligned 1246 */ 1247 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, 1248 /*quirks*/DA_Q_4K 1249 }, 1250 { 1251 /* 1252 * Samsung 830 Series SSDs 1253 * 4k optimised & trim only works in 4k requests + 4k aligned 1254 */ 1255 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, 1256 /*quirks*/DA_Q_4K 1257 }, 1258 { 1259 /* 1260 * Samsung 840 SSDs 1261 * 4k optimised & trim only works in 4k requests + 4k aligned 1262 */ 1263 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, 1264 /*quirks*/DA_Q_4K 1265 }, 1266 { 1267 /* 1268 * Samsung 850 SSDs 1269 * 4k optimised & trim only works in 4k requests + 4k aligned 1270 */ 1271 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, 1272 /*quirks*/DA_Q_4K 1273 }, 1274 { 1275 /* 1276 * Samsung 843T Series SSDs (MZ7WD*) 1277 * Samsung PM851 Series SSDs (MZ7TE*) 1278 * Samsung PM853T Series SSDs (MZ7GE*) 1279 * Samsung SM863 Series SSDs (MZ7KM*) 1280 * 4k optimised 1281 */ 1282 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, 1283 /*quirks*/DA_Q_4K 1284 }, 1285 { 1286 /* 1287 * SuperTalent TeraDrive CT SSDs 1288 * 4k optimised & trim only works in 4k requests + 4k aligned 1289 */ 1290 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, 1291 /*quirks*/DA_Q_4K 1292 }, 1293 { 1294 /* 1295 * XceedIOPS SATA SSDs 1296 * 4k optimised 1297 */ 1298 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, 1299 /*quirks*/DA_Q_4K 1300 }, 1301 { 1302 /* 1303 * Hama Innostor USB-Stick 1304 */ 1305 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, 1306 /*quirks*/DA_Q_NO_RC16 1307 }, 1308 { 1309 /* 1310 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) 1311 * Drive Managed SATA hard drive. This drive doesn't report 1312 * in firmware that it is a drive managed SMR drive. 1313 */ 1314 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" }, 1315 /*quirks*/DA_Q_SMR_DM 1316 }, 1317 { 1318 /* 1319 * MX-ES USB Drive by Mach Xtreme 1320 */ 1321 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, 1322 /*quirks*/DA_Q_NO_RC16 1323 }, 1324 }; 1325 1326 static disk_strategy_t dastrategy; 1327 static dumper_t dadump; 1328 static periph_init_t dainit; 1329 static void daasync(void *callback_arg, u_int32_t code, 1330 struct cam_path *path, void *arg); 1331 static void dasysctlinit(void *context, int pending); 1332 static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); 1333 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); 1334 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); 1335 static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); 1336 static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); 1337 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); 1338 static void dadeletemethodset(struct da_softc *softc, 1339 da_delete_methods delete_method); 1340 static off_t dadeletemaxsize(struct da_softc *softc, 1341 da_delete_methods delete_method); 1342 static void dadeletemethodchoose(struct da_softc *softc, 1343 da_delete_methods default_method); 1344 static void daprobedone(struct cam_periph *periph, union ccb *ccb); 1345 1346 static periph_ctor_t daregister; 1347 static periph_dtor_t dacleanup; 1348 static periph_start_t dastart; 1349 static periph_oninv_t daoninvalidate; 1350 static void dazonedone(struct cam_periph *periph, union ccb *ccb); 1351 static void dadone(struct cam_periph *periph, 1352 union ccb *done_ccb); 1353 static int daerror(union ccb *ccb, u_int32_t cam_flags, 1354 u_int32_t sense_flags); 1355 static void daprevent(struct cam_periph *periph, int action); 1356 static void dareprobe(struct cam_periph *periph); 1357 static void dasetgeom(struct cam_periph *periph, uint32_t block_len, 1358 uint64_t maxsector, 1359 struct scsi_read_capacity_data_long *rcaplong, 1360 size_t rcap_size); 1361 static timeout_t dasendorderedtag; 1362 static void dashutdown(void *arg, int howto); 1363 static timeout_t damediapoll; 1364 1365 #ifndef DA_DEFAULT_POLL_PERIOD 1366 #define DA_DEFAULT_POLL_PERIOD 3 1367 #endif 1368 1369 #ifndef DA_DEFAULT_TIMEOUT 1370 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 1371 #endif 1372 1373 #ifndef DA_DEFAULT_SOFTTIMEOUT 1374 #define DA_DEFAULT_SOFTTIMEOUT 0 1375 #endif 1376 1377 #ifndef DA_DEFAULT_RETRY 1378 #define DA_DEFAULT_RETRY 4 1379 #endif 1380 1381 #ifndef DA_DEFAULT_SEND_ORDERED 1382 #define DA_DEFAULT_SEND_ORDERED 1 1383 #endif 1384 1385 static int da_poll_period = DA_DEFAULT_POLL_PERIOD; 1386 static int da_retry_count = DA_DEFAULT_RETRY; 1387 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 1388 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; 1389 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; 1390 1391 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 1392 "CAM Direct Access Disk driver"); 1393 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, 1394 &da_poll_period, 0, "Media polling period in seconds"); 1395 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, 1396 &da_retry_count, 0, "Normal I/O retry count"); 1397 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, 1398 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 1399 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, 1400 &da_send_ordered, 0, "Send Ordered Tags"); 1401 1402 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, 1403 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", 1404 "Soft I/O timeout (ms)"); 1405 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); 1406 1407 /* 1408 * DA_ORDEREDTAG_INTERVAL determines how often, relative 1409 * to the default timeout, we check to see whether an ordered 1410 * tagged transaction is appropriate to prevent simple tag 1411 * starvation. Since we'd like to ensure that there is at least 1412 * 1/2 of the timeout length left for a starved transaction to 1413 * complete after we've sent an ordered tag, we must poll at least 1414 * four times in every timeout period. This takes care of the worst 1415 * case where a starved transaction starts during an interval that 1416 * meets the requirement "don't send an ordered tag" test so it takes 1417 * us two intervals to determine that a tag must be sent. 1418 */ 1419 #ifndef DA_ORDEREDTAG_INTERVAL 1420 #define DA_ORDEREDTAG_INTERVAL 4 1421 #endif 1422 1423 static struct periph_driver dadriver = 1424 { 1425 dainit, "da", 1426 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 1427 }; 1428 1429 PERIPHDRIVER_DECLARE(da, dadriver); 1430 1431 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); 1432 1433 static int 1434 daopen(struct disk *dp) 1435 { 1436 struct cam_periph *periph; 1437 struct da_softc *softc; 1438 int error; 1439 1440 periph = (struct cam_periph *)dp->d_drv1; 1441 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 1442 return (ENXIO); 1443 } 1444 1445 cam_periph_lock(periph); 1446 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 1447 cam_periph_unlock(periph); 1448 cam_periph_release(periph); 1449 return (error); 1450 } 1451 1452 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1453 ("daopen\n")); 1454 1455 softc = (struct da_softc *)periph->softc; 1456 dareprobe(periph); 1457 1458 /* Wait for the disk size update. */ 1459 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, 1460 "dareprobe", 0); 1461 if (error != 0) 1462 xpt_print(periph->path, "unable to retrieve capacity data\n"); 1463 1464 if (periph->flags & CAM_PERIPH_INVALID) 1465 error = ENXIO; 1466 1467 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1468 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1469 daprevent(periph, PR_PREVENT); 1470 1471 if (error == 0) { 1472 softc->flags &= ~DA_FLAG_PACK_INVALID; 1473 softc->flags |= DA_FLAG_OPEN; 1474 } 1475 1476 cam_periph_unhold(periph); 1477 cam_periph_unlock(periph); 1478 1479 if (error != 0) 1480 cam_periph_release(periph); 1481 1482 return (error); 1483 } 1484 1485 static int 1486 daclose(struct disk *dp) 1487 { 1488 struct cam_periph *periph; 1489 struct da_softc *softc; 1490 union ccb *ccb; 1491 int error; 1492 1493 periph = (struct cam_periph *)dp->d_drv1; 1494 softc = (struct da_softc *)periph->softc; 1495 cam_periph_lock(periph); 1496 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1497 ("daclose\n")); 1498 1499 if (cam_periph_hold(periph, PRIBIO) == 0) { 1500 1501 /* Flush disk cache. */ 1502 if ((softc->flags & DA_FLAG_DIRTY) != 0 && 1503 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && 1504 (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 1505 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1506 scsi_synchronize_cache(&ccb->csio, /*retries*/1, 1507 /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, 1508 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 1509 5 * 60 * 1000); 1510 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 1511 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, 1512 softc->disk->d_devstat); 1513 softc->flags &= ~DA_FLAG_DIRTY; 1514 xpt_release_ccb(ccb); 1515 } 1516 1517 /* Allow medium removal. */ 1518 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1519 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1520 daprevent(periph, PR_ALLOW); 1521 1522 cam_periph_unhold(periph); 1523 } 1524 1525 /* 1526 * If we've got removeable media, mark the blocksize as 1527 * unavailable, since it could change when new media is 1528 * inserted. 1529 */ 1530 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 1531 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; 1532 1533 softc->flags &= ~DA_FLAG_OPEN; 1534 while (softc->refcount != 0) 1535 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); 1536 cam_periph_unlock(periph); 1537 cam_periph_release(periph); 1538 return (0); 1539 } 1540 1541 static void 1542 daschedule(struct cam_periph *periph) 1543 { 1544 struct da_softc *softc = (struct da_softc *)periph->softc; 1545 1546 if (softc->state != DA_STATE_NORMAL) 1547 return; 1548 1549 cam_iosched_schedule(softc->cam_iosched, periph); 1550 } 1551 1552 /* 1553 * Actually translate the requested transfer into one the physical driver 1554 * can understand. The transfer is described by a buf and will include 1555 * only one physical transfer. 1556 */ 1557 static void 1558 dastrategy(struct bio *bp) 1559 { 1560 struct cam_periph *periph; 1561 struct da_softc *softc; 1562 1563 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1564 softc = (struct da_softc *)periph->softc; 1565 1566 cam_periph_lock(periph); 1567 1568 /* 1569 * If the device has been made invalid, error out 1570 */ 1571 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 1572 cam_periph_unlock(periph); 1573 biofinish(bp, NULL, ENXIO); 1574 return; 1575 } 1576 1577 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); 1578 1579 /* 1580 * Zone commands must be ordered, because they can depend on the 1581 * effects of previously issued commands, and they may affect 1582 * commands after them. 1583 */ 1584 if (bp->bio_cmd == BIO_ZONE) 1585 bp->bio_flags |= BIO_ORDERED; 1586 1587 /* 1588 * Place it in the queue of disk activities for this disk 1589 */ 1590 cam_iosched_queue_work(softc->cam_iosched, bp); 1591 1592 /* 1593 * Schedule ourselves for performing the work. 1594 */ 1595 daschedule(periph); 1596 cam_periph_unlock(periph); 1597 1598 return; 1599 } 1600 1601 static int 1602 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 1603 { 1604 struct cam_periph *periph; 1605 struct da_softc *softc; 1606 u_int secsize; 1607 struct ccb_scsiio csio; 1608 struct disk *dp; 1609 int error = 0; 1610 1611 dp = arg; 1612 periph = dp->d_drv1; 1613 softc = (struct da_softc *)periph->softc; 1614 cam_periph_lock(periph); 1615 secsize = softc->params.secsize; 1616 1617 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 1618 cam_periph_unlock(periph); 1619 return (ENXIO); 1620 } 1621 1622 if (length > 0) { 1623 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1624 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1625 scsi_read_write(&csio, 1626 /*retries*/0, 1627 dadone, 1628 MSG_ORDERED_Q_TAG, 1629 /*read*/SCSI_RW_WRITE, 1630 /*byte2*/0, 1631 /*minimum_cmd_size*/ softc->minimum_cmd_size, 1632 offset / secsize, 1633 length / secsize, 1634 /*data_ptr*/(u_int8_t *) virtual, 1635 /*dxfer_len*/length, 1636 /*sense_len*/SSD_FULL_SIZE, 1637 da_default_timeout * 1000); 1638 xpt_polled_action((union ccb *)&csio); 1639 1640 error = cam_periph_error((union ccb *)&csio, 1641 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1642 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) 1643 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, 1644 /*reduction*/0, /*timeout*/0, /*getcount_only*/0); 1645 if (error != 0) 1646 printf("Aborting dump due to I/O error.\n"); 1647 cam_periph_unlock(periph); 1648 return (error); 1649 } 1650 1651 /* 1652 * Sync the disk cache contents to the physical media. 1653 */ 1654 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 1655 1656 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1657 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1658 scsi_synchronize_cache(&csio, 1659 /*retries*/0, 1660 /*cbfcnp*/dadone, 1661 MSG_SIMPLE_Q_TAG, 1662 /*begin_lba*/0,/* Cover the whole disk */ 1663 /*lb_count*/0, 1664 SSD_FULL_SIZE, 1665 5 * 1000); 1666 xpt_polled_action((union ccb *)&csio); 1667 1668 error = cam_periph_error((union ccb *)&csio, 1669 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL); 1670 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) 1671 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, 1672 /*reduction*/0, /*timeout*/0, /*getcount_only*/0); 1673 if (error != 0) 1674 xpt_print(periph->path, "Synchronize cache failed\n"); 1675 } 1676 cam_periph_unlock(periph); 1677 return (error); 1678 } 1679 1680 static int 1681 dagetattr(struct bio *bp) 1682 { 1683 int ret; 1684 struct cam_periph *periph; 1685 1686 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1687 cam_periph_lock(periph); 1688 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 1689 periph->path); 1690 cam_periph_unlock(periph); 1691 if (ret == 0) 1692 bp->bio_completed = bp->bio_length; 1693 return ret; 1694 } 1695 1696 static void 1697 dainit(void) 1698 { 1699 cam_status status; 1700 1701 /* 1702 * Install a global async callback. This callback will 1703 * receive async callbacks like "new device found". 1704 */ 1705 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); 1706 1707 if (status != CAM_REQ_CMP) { 1708 printf("da: Failed to attach master async callback " 1709 "due to status 0x%x!\n", status); 1710 } else if (da_send_ordered) { 1711 1712 /* Register our shutdown event handler */ 1713 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 1714 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 1715 printf("dainit: shutdown event registration failed!\n"); 1716 } 1717 } 1718 1719 /* 1720 * Callback from GEOM, called when it has finished cleaning up its 1721 * resources. 1722 */ 1723 static void 1724 dadiskgonecb(struct disk *dp) 1725 { 1726 struct cam_periph *periph; 1727 1728 periph = (struct cam_periph *)dp->d_drv1; 1729 cam_periph_release(periph); 1730 } 1731 1732 static void 1733 daoninvalidate(struct cam_periph *periph) 1734 { 1735 struct da_softc *softc; 1736 1737 softc = (struct da_softc *)periph->softc; 1738 1739 /* 1740 * De-register any async callbacks. 1741 */ 1742 xpt_register_async(0, daasync, periph, periph->path); 1743 1744 softc->flags |= DA_FLAG_PACK_INVALID; 1745 #ifdef CAM_IO_STATS 1746 softc->invalidations++; 1747 #endif 1748 1749 /* 1750 * Return all queued I/O with ENXIO. 1751 * XXX Handle any transactions queued to the card 1752 * with XPT_ABORT_CCB. 1753 */ 1754 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); 1755 1756 /* 1757 * Tell GEOM that we've gone away, we'll get a callback when it is 1758 * done cleaning up its resources. 1759 */ 1760 disk_gone(softc->disk); 1761 } 1762 1763 static void 1764 dacleanup(struct cam_periph *periph) 1765 { 1766 struct da_softc *softc; 1767 1768 softc = (struct da_softc *)periph->softc; 1769 1770 cam_periph_unlock(periph); 1771 1772 cam_iosched_fini(softc->cam_iosched); 1773 1774 /* 1775 * If we can't free the sysctl tree, oh well... 1776 */ 1777 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { 1778 #ifdef CAM_IO_STATS 1779 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) 1780 xpt_print(periph->path, 1781 "can't remove sysctl stats context\n"); 1782 #endif 1783 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) 1784 xpt_print(periph->path, 1785 "can't remove sysctl context\n"); 1786 } 1787 1788 callout_drain(&softc->mediapoll_c); 1789 disk_destroy(softc->disk); 1790 callout_drain(&softc->sendordered_c); 1791 free(softc, M_DEVBUF); 1792 cam_periph_lock(periph); 1793 } 1794 1795 static void 1796 daasync(void *callback_arg, u_int32_t code, 1797 struct cam_path *path, void *arg) 1798 { 1799 struct cam_periph *periph; 1800 struct da_softc *softc; 1801 1802 periph = (struct cam_periph *)callback_arg; 1803 switch (code) { 1804 case AC_FOUND_DEVICE: 1805 { 1806 struct ccb_getdev *cgd; 1807 cam_status status; 1808 1809 cgd = (struct ccb_getdev *)arg; 1810 if (cgd == NULL) 1811 break; 1812 1813 if (cgd->protocol != PROTO_SCSI) 1814 break; 1815 if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) 1816 break; 1817 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 1818 && SID_TYPE(&cgd->inq_data) != T_RBC 1819 && SID_TYPE(&cgd->inq_data) != T_OPTICAL 1820 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) 1821 break; 1822 1823 /* 1824 * Allocate a peripheral instance for 1825 * this device and start the probe 1826 * process. 1827 */ 1828 status = cam_periph_alloc(daregister, daoninvalidate, 1829 dacleanup, dastart, 1830 "da", CAM_PERIPH_BIO, 1831 path, daasync, 1832 AC_FOUND_DEVICE, cgd); 1833 1834 if (status != CAM_REQ_CMP 1835 && status != CAM_REQ_INPROG) 1836 printf("daasync: Unable to attach to new device " 1837 "due to status 0x%x\n", status); 1838 return; 1839 } 1840 case AC_ADVINFO_CHANGED: 1841 { 1842 uintptr_t buftype; 1843 1844 buftype = (uintptr_t)arg; 1845 if (buftype == CDAI_TYPE_PHYS_PATH) { 1846 struct da_softc *softc; 1847 1848 softc = periph->softc; 1849 disk_attr_changed(softc->disk, "GEOM::physpath", 1850 M_NOWAIT); 1851 } 1852 break; 1853 } 1854 case AC_UNIT_ATTENTION: 1855 { 1856 union ccb *ccb; 1857 int error_code, sense_key, asc, ascq; 1858 1859 softc = (struct da_softc *)periph->softc; 1860 ccb = (union ccb *)arg; 1861 1862 /* 1863 * Handle all UNIT ATTENTIONs except our own, 1864 * as they will be handled by daerror(). 1865 */ 1866 if (xpt_path_periph(ccb->ccb_h.path) != periph && 1867 scsi_extract_sense_ccb(ccb, 1868 &error_code, &sense_key, &asc, &ascq)) { 1869 if (asc == 0x2A && ascq == 0x09) { 1870 xpt_print(ccb->ccb_h.path, 1871 "Capacity data has changed\n"); 1872 softc->flags &= ~DA_FLAG_PROBED; 1873 dareprobe(periph); 1874 } else if (asc == 0x28 && ascq == 0x00) { 1875 softc->flags &= ~DA_FLAG_PROBED; 1876 disk_media_changed(softc->disk, M_NOWAIT); 1877 } else if (asc == 0x3F && ascq == 0x03) { 1878 xpt_print(ccb->ccb_h.path, 1879 "INQUIRY data has changed\n"); 1880 softc->flags &= ~DA_FLAG_PROBED; 1881 dareprobe(periph); 1882 } 1883 } 1884 cam_periph_async(periph, code, path, arg); 1885 break; 1886 } 1887 case AC_SCSI_AEN: 1888 softc = (struct da_softc *)periph->softc; 1889 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { 1890 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 1891 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 1892 daschedule(periph); 1893 } 1894 } 1895 /* FALLTHROUGH */ 1896 case AC_SENT_BDR: 1897 case AC_BUS_RESET: 1898 { 1899 struct ccb_hdr *ccbh; 1900 1901 softc = (struct da_softc *)periph->softc; 1902 /* 1903 * Don't fail on the expected unit attention 1904 * that will occur. 1905 */ 1906 softc->flags |= DA_FLAG_RETRY_UA; 1907 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 1908 ccbh->ccb_state |= DA_CCB_RETRY_UA; 1909 break; 1910 } 1911 case AC_INQ_CHANGED: 1912 softc = (struct da_softc *)periph->softc; 1913 softc->flags &= ~DA_FLAG_PROBED; 1914 dareprobe(periph); 1915 break; 1916 default: 1917 break; 1918 } 1919 cam_periph_async(periph, code, path, arg); 1920 } 1921 1922 static void 1923 dasysctlinit(void *context, int pending) 1924 { 1925 struct cam_periph *periph; 1926 struct da_softc *softc; 1927 char tmpstr[80], tmpstr2[80]; 1928 struct ccb_trans_settings cts; 1929 1930 periph = (struct cam_periph *)context; 1931 /* 1932 * periph was held for us when this task was enqueued 1933 */ 1934 if (periph->flags & CAM_PERIPH_INVALID) { 1935 cam_periph_release(periph); 1936 return; 1937 } 1938 1939 softc = (struct da_softc *)periph->softc; 1940 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); 1941 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 1942 1943 sysctl_ctx_init(&softc->sysctl_ctx); 1944 softc->flags |= DA_FLAG_SCTX_INIT; 1945 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, 1946 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, 1947 CTLFLAG_RD, 0, tmpstr, "device_index"); 1948 if (softc->sysctl_tree == NULL) { 1949 printf("dasysctlinit: unable to allocate sysctl tree\n"); 1950 cam_periph_release(periph); 1951 return; 1952 } 1953 1954 /* 1955 * Now register the sysctl handler, so the user can change the value on 1956 * the fly. 1957 */ 1958 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1959 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, 1960 softc, 0, dadeletemethodsysctl, "A", 1961 "BIO_DELETE execution method"); 1962 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1963 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, 1964 softc, 0, dadeletemaxsysctl, "Q", 1965 "Maximum BIO_DELETE size"); 1966 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1967 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, 1968 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", 1969 "Minimum CDB size"); 1970 1971 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1972 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, 1973 softc, 0, dazonemodesysctl, "A", 1974 "Zone Mode"); 1975 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1976 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, 1977 softc, 0, dazonesupsysctl, "A", 1978 "Zone Support"); 1979 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 1980 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1981 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, 1982 "Optimal Number of Open Sequential Write Preferred Zones"); 1983 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 1984 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1985 "optimal_nonseq_zones", CTLFLAG_RD, 1986 &softc->optimal_nonseq_zones, 1987 "Optimal Number of Non-Sequentially Written Sequential Write " 1988 "Preferred Zones"); 1989 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 1990 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1991 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, 1992 "Maximum Number of Open Sequential Write Required Zones"); 1993 1994 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1995 SYSCTL_CHILDREN(softc->sysctl_tree), 1996 OID_AUTO, 1997 "error_inject", 1998 CTLFLAG_RW, 1999 &softc->error_inject, 2000 0, 2001 "error_inject leaf"); 2002 2003 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2004 SYSCTL_CHILDREN(softc->sysctl_tree), 2005 OID_AUTO, 2006 "unmapped_io", 2007 CTLFLAG_RD, 2008 &softc->unmappedio, 2009 0, 2010 "Unmapped I/O leaf"); 2011 2012 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2013 SYSCTL_CHILDREN(softc->sysctl_tree), 2014 OID_AUTO, 2015 "rotating", 2016 CTLFLAG_RD, 2017 &softc->rotating, 2018 0, 2019 "Rotating media"); 2020 2021 /* 2022 * Add some addressing info. 2023 */ 2024 memset(&cts, 0, sizeof (cts)); 2025 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 2026 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 2027 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2028 cam_periph_lock(periph); 2029 xpt_action((union ccb *)&cts); 2030 cam_periph_unlock(periph); 2031 if (cts.ccb_h.status != CAM_REQ_CMP) { 2032 cam_periph_release(periph); 2033 return; 2034 } 2035 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { 2036 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 2037 if (fc->valid & CTS_FC_VALID_WWPN) { 2038 softc->wwpn = fc->wwpn; 2039 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2040 SYSCTL_CHILDREN(softc->sysctl_tree), 2041 OID_AUTO, "wwpn", CTLFLAG_RD, 2042 &softc->wwpn, "World Wide Port Name"); 2043 } 2044 } 2045 2046 #ifdef CAM_IO_STATS 2047 /* 2048 * Now add some useful stats. 2049 * XXX These should live in cam_periph and be common to all periphs 2050 */ 2051 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, 2052 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", 2053 CTLFLAG_RD, 0, "Statistics"); 2054 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2055 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2056 OID_AUTO, 2057 "errors", 2058 CTLFLAG_RD, 2059 &softc->errors, 2060 0, 2061 "Transport errors reported by the SIM"); 2062 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2063 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2064 OID_AUTO, 2065 "timeouts", 2066 CTLFLAG_RD, 2067 &softc->timeouts, 2068 0, 2069 "Device timeouts reported by the SIM"); 2070 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2071 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2072 OID_AUTO, 2073 "pack_invalidations", 2074 CTLFLAG_RD, 2075 &softc->invalidations, 2076 0, 2077 "Device pack invalidations"); 2078 #endif 2079 2080 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, 2081 softc->sysctl_tree); 2082 2083 cam_periph_release(periph); 2084 } 2085 2086 static int 2087 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) 2088 { 2089 int error; 2090 uint64_t value; 2091 struct da_softc *softc; 2092 2093 softc = (struct da_softc *)arg1; 2094 2095 value = softc->disk->d_delmaxsize; 2096 error = sysctl_handle_64(oidp, &value, 0, req); 2097 if ((error != 0) || (req->newptr == NULL)) 2098 return (error); 2099 2100 /* only accept values smaller than the calculated value */ 2101 if (value > dadeletemaxsize(softc, softc->delete_method)) { 2102 return (EINVAL); 2103 } 2104 softc->disk->d_delmaxsize = value; 2105 2106 return (0); 2107 } 2108 2109 static int 2110 dacmdsizesysctl(SYSCTL_HANDLER_ARGS) 2111 { 2112 int error, value; 2113 2114 value = *(int *)arg1; 2115 2116 error = sysctl_handle_int(oidp, &value, 0, req); 2117 2118 if ((error != 0) 2119 || (req->newptr == NULL)) 2120 return (error); 2121 2122 /* 2123 * Acceptable values here are 6, 10, 12 or 16. 2124 */ 2125 if (value < 6) 2126 value = 6; 2127 else if ((value > 6) 2128 && (value <= 10)) 2129 value = 10; 2130 else if ((value > 10) 2131 && (value <= 12)) 2132 value = 12; 2133 else if (value > 12) 2134 value = 16; 2135 2136 *(int *)arg1 = value; 2137 2138 return (0); 2139 } 2140 2141 static int 2142 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) 2143 { 2144 sbintime_t value; 2145 int error; 2146 2147 value = da_default_softtimeout / SBT_1MS; 2148 2149 error = sysctl_handle_int(oidp, (int *)&value, 0, req); 2150 if ((error != 0) || (req->newptr == NULL)) 2151 return (error); 2152 2153 /* XXX Should clip this to a reasonable level */ 2154 if (value > da_default_timeout * 1000) 2155 return (EINVAL); 2156 2157 da_default_softtimeout = value * SBT_1MS; 2158 return (0); 2159 } 2160 2161 static void 2162 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) 2163 { 2164 2165 softc->delete_method = delete_method; 2166 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); 2167 softc->delete_func = da_delete_functions[delete_method]; 2168 2169 if (softc->delete_method > DA_DELETE_DISABLE) 2170 softc->disk->d_flags |= DISKFLAG_CANDELETE; 2171 else 2172 softc->disk->d_flags &= ~DISKFLAG_CANDELETE; 2173 } 2174 2175 static off_t 2176 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) 2177 { 2178 off_t sectors; 2179 2180 switch(delete_method) { 2181 case DA_DELETE_UNMAP: 2182 sectors = (off_t)softc->unmap_max_lba; 2183 break; 2184 case DA_DELETE_ATA_TRIM: 2185 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; 2186 break; 2187 case DA_DELETE_WS16: 2188 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); 2189 break; 2190 case DA_DELETE_ZERO: 2191 case DA_DELETE_WS10: 2192 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); 2193 break; 2194 default: 2195 return 0; 2196 } 2197 2198 return (off_t)softc->params.secsize * 2199 omin(sectors, softc->params.sectors); 2200 } 2201 2202 static void 2203 daprobedone(struct cam_periph *periph, union ccb *ccb) 2204 { 2205 struct da_softc *softc; 2206 2207 softc = (struct da_softc *)periph->softc; 2208 2209 dadeletemethodchoose(softc, DA_DELETE_NONE); 2210 2211 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2212 char buf[80]; 2213 int i, sep; 2214 2215 snprintf(buf, sizeof(buf), "Delete methods: <"); 2216 sep = 0; 2217 for (i = 0; i <= DA_DELETE_MAX; i++) { 2218 if ((softc->delete_available & (1 << i)) == 0 && 2219 i != softc->delete_method) 2220 continue; 2221 if (sep) 2222 strlcat(buf, ",", sizeof(buf)); 2223 strlcat(buf, da_delete_method_names[i], 2224 sizeof(buf)); 2225 if (i == softc->delete_method) 2226 strlcat(buf, "(*)", sizeof(buf)); 2227 sep = 1; 2228 } 2229 strlcat(buf, ">", sizeof(buf)); 2230 printf("%s%d: %s\n", periph->periph_name, 2231 periph->unit_number, buf); 2232 } 2233 2234 /* 2235 * Since our peripheral may be invalidated by an error 2236 * above or an external event, we must release our CCB 2237 * before releasing the probe lock on the peripheral. 2238 * The peripheral will only go away once the last lock 2239 * is removed, and we need it around for the CCB release 2240 * operation. 2241 */ 2242 xpt_release_ccb(ccb); 2243 softc->state = DA_STATE_NORMAL; 2244 softc->flags |= DA_FLAG_PROBED; 2245 daschedule(periph); 2246 wakeup(&softc->disk->d_mediasize); 2247 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2248 softc->flags |= DA_FLAG_ANNOUNCED; 2249 cam_periph_unhold(periph); 2250 } else 2251 cam_periph_release_locked(periph); 2252 } 2253 2254 static void 2255 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) 2256 { 2257 int i, methods; 2258 2259 /* If available, prefer the method requested by user. */ 2260 i = softc->delete_method_pref; 2261 methods = softc->delete_available | (1 << DA_DELETE_DISABLE); 2262 if (methods & (1 << i)) { 2263 dadeletemethodset(softc, i); 2264 return; 2265 } 2266 2267 /* Use the pre-defined order to choose the best performing delete. */ 2268 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 2269 if (i == DA_DELETE_ZERO) 2270 continue; 2271 if (softc->delete_available & (1 << i)) { 2272 dadeletemethodset(softc, i); 2273 return; 2274 } 2275 } 2276 2277 /* Fallback to default. */ 2278 dadeletemethodset(softc, default_method); 2279 } 2280 2281 static int 2282 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) 2283 { 2284 char buf[16]; 2285 const char *p; 2286 struct da_softc *softc; 2287 int i, error, methods, value; 2288 2289 softc = (struct da_softc *)arg1; 2290 2291 value = softc->delete_method; 2292 if (value < 0 || value > DA_DELETE_MAX) 2293 p = "UNKNOWN"; 2294 else 2295 p = da_delete_method_names[value]; 2296 strncpy(buf, p, sizeof(buf)); 2297 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 2298 if (error != 0 || req->newptr == NULL) 2299 return (error); 2300 methods = softc->delete_available | (1 << DA_DELETE_DISABLE); 2301 for (i = 0; i <= DA_DELETE_MAX; i++) { 2302 if (strcmp(buf, da_delete_method_names[i]) == 0) 2303 break; 2304 } 2305 if (i > DA_DELETE_MAX) 2306 return (EINVAL); 2307 softc->delete_method_pref = i; 2308 dadeletemethodchoose(softc, DA_DELETE_NONE); 2309 return (0); 2310 } 2311 2312 static int 2313 dazonemodesysctl(SYSCTL_HANDLER_ARGS) 2314 { 2315 char tmpbuf[40]; 2316 struct da_softc *softc; 2317 int error; 2318 2319 softc = (struct da_softc *)arg1; 2320 2321 switch (softc->zone_mode) { 2322 case DA_ZONE_DRIVE_MANAGED: 2323 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); 2324 break; 2325 case DA_ZONE_HOST_AWARE: 2326 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); 2327 break; 2328 case DA_ZONE_HOST_MANAGED: 2329 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); 2330 break; 2331 case DA_ZONE_NONE: 2332 default: 2333 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); 2334 break; 2335 } 2336 2337 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); 2338 2339 return (error); 2340 } 2341 2342 static int 2343 dazonesupsysctl(SYSCTL_HANDLER_ARGS) 2344 { 2345 char tmpbuf[180]; 2346 struct da_softc *softc; 2347 struct sbuf sb; 2348 int error, first; 2349 unsigned int i; 2350 2351 softc = (struct da_softc *)arg1; 2352 2353 error = 0; 2354 first = 1; 2355 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); 2356 2357 for (i = 0; i < sizeof(da_zone_desc_table) / 2358 sizeof(da_zone_desc_table[0]); i++) { 2359 if (softc->zone_flags & da_zone_desc_table[i].value) { 2360 if (first == 0) 2361 sbuf_printf(&sb, ", "); 2362 else 2363 first = 0; 2364 sbuf_cat(&sb, da_zone_desc_table[i].desc); 2365 } 2366 } 2367 2368 if (first == 1) 2369 sbuf_printf(&sb, "None"); 2370 2371 sbuf_finish(&sb); 2372 2373 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 2374 2375 return (error); 2376 } 2377 2378 static cam_status 2379 daregister(struct cam_periph *periph, void *arg) 2380 { 2381 struct da_softc *softc; 2382 struct ccb_pathinq cpi; 2383 struct ccb_getdev *cgd; 2384 char tmpstr[80]; 2385 caddr_t match; 2386 2387 cgd = (struct ccb_getdev *)arg; 2388 if (cgd == NULL) { 2389 printf("daregister: no getdev CCB, can't register device\n"); 2390 return(CAM_REQ_CMP_ERR); 2391 } 2392 2393 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, 2394 M_NOWAIT|M_ZERO); 2395 2396 if (softc == NULL) { 2397 printf("daregister: Unable to probe new device. " 2398 "Unable to allocate softc\n"); 2399 return(CAM_REQ_CMP_ERR); 2400 } 2401 2402 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { 2403 printf("daregister: Unable to probe new device. " 2404 "Unable to allocate iosched memory\n"); 2405 free(softc, M_DEVBUF); 2406 return(CAM_REQ_CMP_ERR); 2407 } 2408 2409 LIST_INIT(&softc->pending_ccbs); 2410 softc->state = DA_STATE_PROBE_RC; 2411 bioq_init(&softc->delete_run_queue); 2412 if (SID_IS_REMOVABLE(&cgd->inq_data)) 2413 softc->flags |= DA_FLAG_PACK_REMOVABLE; 2414 softc->unmap_max_ranges = UNMAP_MAX_RANGES; 2415 softc->unmap_max_lba = UNMAP_RANGE_MAX; 2416 softc->unmap_gran = 0; 2417 softc->unmap_gran_align = 0; 2418 softc->ws_max_blks = WS16_MAX_BLKS; 2419 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; 2420 softc->rotating = 1; 2421 2422 periph->softc = softc; 2423 2424 /* 2425 * See if this device has any quirks. 2426 */ 2427 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 2428 (caddr_t)da_quirk_table, 2429 nitems(da_quirk_table), 2430 sizeof(*da_quirk_table), scsi_inquiry_match); 2431 2432 if (match != NULL) 2433 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 2434 else 2435 softc->quirks = DA_Q_NONE; 2436 2437 /* Check if the SIM does not want 6 byte commands */ 2438 bzero(&cpi, sizeof(cpi)); 2439 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 2440 cpi.ccb_h.func_code = XPT_PATH_INQ; 2441 xpt_action((union ccb *)&cpi); 2442 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) 2443 softc->quirks |= DA_Q_NO_6_BYTE; 2444 2445 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) 2446 softc->zone_mode = DA_ZONE_HOST_MANAGED; 2447 else if (softc->quirks & DA_Q_SMR_DM) 2448 softc->zone_mode = DA_ZONE_DRIVE_MANAGED; 2449 else 2450 softc->zone_mode = DA_ZONE_NONE; 2451 2452 if (softc->zone_mode != DA_ZONE_NONE) { 2453 if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 2454 if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) 2455 softc->zone_interface = DA_ZONE_IF_ATA_SAT; 2456 else 2457 softc->zone_interface = DA_ZONE_IF_ATA_PASS; 2458 } else 2459 softc->zone_interface = DA_ZONE_IF_SCSI; 2460 } 2461 2462 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); 2463 2464 /* 2465 * Take an exclusive refcount on the periph while dastart is called 2466 * to finish the probe. The reference will be dropped in dadone at 2467 * the end of probe. 2468 */ 2469 (void)cam_periph_hold(periph, PRIBIO); 2470 2471 /* 2472 * Schedule a periodic event to occasionally send an 2473 * ordered tag to a device. 2474 */ 2475 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); 2476 callout_reset(&softc->sendordered_c, 2477 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 2478 dasendorderedtag, softc); 2479 2480 cam_periph_unlock(periph); 2481 /* 2482 * RBC devices don't have to support READ(6), only READ(10). 2483 */ 2484 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) 2485 softc->minimum_cmd_size = 10; 2486 else 2487 softc->minimum_cmd_size = 6; 2488 2489 /* 2490 * Load the user's default, if any. 2491 */ 2492 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", 2493 periph->unit_number); 2494 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); 2495 2496 /* 2497 * 6, 10, 12 and 16 are the currently permissible values. 2498 */ 2499 if (softc->minimum_cmd_size < 6) 2500 softc->minimum_cmd_size = 6; 2501 else if ((softc->minimum_cmd_size > 6) 2502 && (softc->minimum_cmd_size <= 10)) 2503 softc->minimum_cmd_size = 10; 2504 else if ((softc->minimum_cmd_size > 10) 2505 && (softc->minimum_cmd_size <= 12)) 2506 softc->minimum_cmd_size = 12; 2507 else if (softc->minimum_cmd_size > 12) 2508 softc->minimum_cmd_size = 16; 2509 2510 /* Predict whether device may support READ CAPACITY(16). */ 2511 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && 2512 (softc->quirks & DA_Q_NO_RC16) == 0) { 2513 softc->flags |= DA_FLAG_CAN_RC16; 2514 softc->state = DA_STATE_PROBE_RC16; 2515 } 2516 2517 /* 2518 * Register this media as a disk. 2519 */ 2520 softc->disk = disk_alloc(); 2521 softc->disk->d_devstat = devstat_new_entry(periph->periph_name, 2522 periph->unit_number, 0, 2523 DEVSTAT_BS_UNAVAILABLE, 2524 SID_TYPE(&cgd->inq_data) | 2525 XPORT_DEVSTAT_TYPE(cpi.transport), 2526 DEVSTAT_PRIORITY_DISK); 2527 softc->disk->d_open = daopen; 2528 softc->disk->d_close = daclose; 2529 softc->disk->d_strategy = dastrategy; 2530 softc->disk->d_dump = dadump; 2531 softc->disk->d_getattr = dagetattr; 2532 softc->disk->d_gone = dadiskgonecb; 2533 softc->disk->d_name = "da"; 2534 softc->disk->d_drv1 = periph; 2535 if (cpi.maxio == 0) 2536 softc->maxio = DFLTPHYS; /* traditional default */ 2537 else if (cpi.maxio > MAXPHYS) 2538 softc->maxio = MAXPHYS; /* for safety */ 2539 else 2540 softc->maxio = cpi.maxio; 2541 softc->disk->d_maxsize = softc->maxio; 2542 softc->disk->d_unit = periph->unit_number; 2543 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; 2544 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) 2545 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 2546 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { 2547 softc->unmappedio = 1; 2548 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 2549 xpt_print(periph->path, "UNMAPPED\n"); 2550 } 2551 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, 2552 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); 2553 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); 2554 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], 2555 cgd->inq_data.product, sizeof(cgd->inq_data.product), 2556 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); 2557 softc->disk->d_hba_vendor = cpi.hba_vendor; 2558 softc->disk->d_hba_device = cpi.hba_device; 2559 softc->disk->d_hba_subvendor = cpi.hba_subvendor; 2560 softc->disk->d_hba_subdevice = cpi.hba_subdevice; 2561 2562 /* 2563 * Acquire a reference to the periph before we register with GEOM. 2564 * We'll release this reference once GEOM calls us back (via 2565 * dadiskgonecb()) telling us that our provider has been freed. 2566 */ 2567 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 2568 xpt_print(periph->path, "%s: lost periph during " 2569 "registration!\n", __func__); 2570 cam_periph_lock(periph); 2571 return (CAM_REQ_CMP_ERR); 2572 } 2573 2574 disk_create(softc->disk, DISK_VERSION); 2575 cam_periph_lock(periph); 2576 2577 /* 2578 * Add async callbacks for events of interest. 2579 * I don't bother checking if this fails as, 2580 * in most cases, the system will function just 2581 * fine without them and the only alternative 2582 * would be to not attach the device on failure. 2583 */ 2584 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | 2585 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | 2586 AC_INQ_CHANGED, daasync, periph, periph->path); 2587 2588 /* 2589 * Emit an attribute changed notification just in case 2590 * physical path information arrived before our async 2591 * event handler was registered, but after anyone attaching 2592 * to our disk device polled it. 2593 */ 2594 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); 2595 2596 /* 2597 * Schedule a periodic media polling events. 2598 */ 2599 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); 2600 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && 2601 (cgd->inq_flags & SID_AEN) == 0 && 2602 da_poll_period != 0) 2603 callout_reset(&softc->mediapoll_c, da_poll_period * hz, 2604 damediapoll, periph); 2605 2606 xpt_schedule(periph, CAM_PRIORITY_DEV); 2607 2608 return(CAM_REQ_CMP); 2609 } 2610 2611 static int 2612 da_zone_bio_to_scsi(int disk_zone_cmd) 2613 { 2614 switch (disk_zone_cmd) { 2615 case DISK_ZONE_OPEN: 2616 return ZBC_OUT_SA_OPEN; 2617 case DISK_ZONE_CLOSE: 2618 return ZBC_OUT_SA_CLOSE; 2619 case DISK_ZONE_FINISH: 2620 return ZBC_OUT_SA_FINISH; 2621 case DISK_ZONE_RWP: 2622 return ZBC_OUT_SA_RWP; 2623 } 2624 2625 return -1; 2626 } 2627 2628 static int 2629 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, 2630 int *queue_ccb) 2631 { 2632 struct da_softc *softc; 2633 int error; 2634 2635 error = 0; 2636 2637 if (bp->bio_cmd != BIO_ZONE) { 2638 error = EINVAL; 2639 goto bailout; 2640 } 2641 2642 softc = periph->softc; 2643 2644 switch (bp->bio_zone.zone_cmd) { 2645 case DISK_ZONE_OPEN: 2646 case DISK_ZONE_CLOSE: 2647 case DISK_ZONE_FINISH: 2648 case DISK_ZONE_RWP: { 2649 int zone_flags; 2650 int zone_sa; 2651 uint64_t lba; 2652 2653 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); 2654 if (zone_sa == -1) { 2655 xpt_print(periph->path, "Cannot translate zone " 2656 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); 2657 error = EINVAL; 2658 goto bailout; 2659 } 2660 2661 zone_flags = 0; 2662 lba = bp->bio_zone.zone_params.rwp.id; 2663 2664 if (bp->bio_zone.zone_params.rwp.flags & 2665 DISK_ZONE_RWP_FLAG_ALL) 2666 zone_flags |= ZBC_OUT_ALL; 2667 2668 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 2669 scsi_zbc_out(&ccb->csio, 2670 /*retries*/ da_retry_count, 2671 /*cbfcnp*/ dadone, 2672 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2673 /*service_action*/ zone_sa, 2674 /*zone_id*/ lba, 2675 /*zone_flags*/ zone_flags, 2676 /*data_ptr*/ NULL, 2677 /*dxfer_len*/ 0, 2678 /*sense_len*/ SSD_FULL_SIZE, 2679 /*timeout*/ da_default_timeout * 1000); 2680 } else { 2681 /* 2682 * Note that in this case, even though we can 2683 * technically use NCQ, we don't bother for several 2684 * reasons: 2685 * 1. It hasn't been tested on a SAT layer that 2686 * supports it. This is new as of SAT-4. 2687 * 2. Even when there is a SAT layer that supports 2688 * it, that SAT layer will also probably support 2689 * ZBC -> ZAC translation, since they are both 2690 * in the SAT-4 spec. 2691 * 3. Translation will likely be preferable to ATA 2692 * passthrough. LSI / Avago at least single 2693 * steps ATA passthrough commands in the HBA, 2694 * regardless of protocol, so unless that 2695 * changes, there is a performance penalty for 2696 * doing ATA passthrough no matter whether 2697 * you're using NCQ/FPDMA, DMA or PIO. 2698 * 4. It requires a 32-byte CDB, which at least at 2699 * this point in CAM requires a CDB pointer, which 2700 * would require us to allocate an additional bit 2701 * of storage separate from the CCB. 2702 */ 2703 error = scsi_ata_zac_mgmt_out(&ccb->csio, 2704 /*retries*/ da_retry_count, 2705 /*cbfcnp*/ dadone, 2706 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2707 /*use_ncq*/ 0, 2708 /*zm_action*/ zone_sa, 2709 /*zone_id*/ lba, 2710 /*zone_flags*/ zone_flags, 2711 /*data_ptr*/ NULL, 2712 /*dxfer_len*/ 0, 2713 /*cdb_storage*/ NULL, 2714 /*cdb_storage_len*/ 0, 2715 /*sense_len*/ SSD_FULL_SIZE, 2716 /*timeout*/ da_default_timeout * 1000); 2717 if (error != 0) { 2718 error = EINVAL; 2719 xpt_print(periph->path, 2720 "scsi_ata_zac_mgmt_out() returned an " 2721 "error!"); 2722 goto bailout; 2723 } 2724 } 2725 *queue_ccb = 1; 2726 2727 break; 2728 } 2729 case DISK_ZONE_REPORT_ZONES: { 2730 uint8_t *rz_ptr; 2731 uint32_t num_entries, alloc_size; 2732 struct disk_zone_report *rep; 2733 2734 rep = &bp->bio_zone.zone_params.report; 2735 2736 num_entries = rep->entries_allocated; 2737 if (num_entries == 0) { 2738 xpt_print(periph->path, "No entries allocated for " 2739 "Report Zones request\n"); 2740 error = EINVAL; 2741 goto bailout; 2742 } 2743 alloc_size = sizeof(struct scsi_report_zones_hdr) + 2744 (sizeof(struct scsi_report_zones_desc) * num_entries); 2745 alloc_size = min(alloc_size, softc->disk->d_maxsize); 2746 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); 2747 if (rz_ptr == NULL) { 2748 xpt_print(periph->path, "Unable to allocate memory " 2749 "for Report Zones request\n"); 2750 error = ENOMEM; 2751 goto bailout; 2752 } 2753 2754 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 2755 scsi_zbc_in(&ccb->csio, 2756 /*retries*/ da_retry_count, 2757 /*cbcfnp*/ dadone, 2758 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2759 /*service_action*/ ZBC_IN_SA_REPORT_ZONES, 2760 /*zone_start_lba*/ rep->starting_id, 2761 /*zone_options*/ rep->rep_options, 2762 /*data_ptr*/ rz_ptr, 2763 /*dxfer_len*/ alloc_size, 2764 /*sense_len*/ SSD_FULL_SIZE, 2765 /*timeout*/ da_default_timeout * 1000); 2766 } else { 2767 /* 2768 * Note that in this case, even though we can 2769 * technically use NCQ, we don't bother for several 2770 * reasons: 2771 * 1. It hasn't been tested on a SAT layer that 2772 * supports it. This is new as of SAT-4. 2773 * 2. Even when there is a SAT layer that supports 2774 * it, that SAT layer will also probably support 2775 * ZBC -> ZAC translation, since they are both 2776 * in the SAT-4 spec. 2777 * 3. Translation will likely be preferable to ATA 2778 * passthrough. LSI / Avago at least single 2779 * steps ATA passthrough commands in the HBA, 2780 * regardless of protocol, so unless that 2781 * changes, there is a performance penalty for 2782 * doing ATA passthrough no matter whether 2783 * you're using NCQ/FPDMA, DMA or PIO. 2784 * 4. It requires a 32-byte CDB, which at least at 2785 * this point in CAM requires a CDB pointer, which 2786 * would require us to allocate an additional bit 2787 * of storage separate from the CCB. 2788 */ 2789 error = scsi_ata_zac_mgmt_in(&ccb->csio, 2790 /*retries*/ da_retry_count, 2791 /*cbcfnp*/ dadone, 2792 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2793 /*use_ncq*/ 0, 2794 /*zm_action*/ ATA_ZM_REPORT_ZONES, 2795 /*zone_id*/ rep->starting_id, 2796 /*zone_flags*/ rep->rep_options, 2797 /*data_ptr*/ rz_ptr, 2798 /*dxfer_len*/ alloc_size, 2799 /*cdb_storage*/ NULL, 2800 /*cdb_storage_len*/ 0, 2801 /*sense_len*/ SSD_FULL_SIZE, 2802 /*timeout*/ da_default_timeout * 1000); 2803 if (error != 0) { 2804 error = EINVAL; 2805 xpt_print(periph->path, 2806 "scsi_ata_zac_mgmt_in() returned an " 2807 "error!"); 2808 goto bailout; 2809 } 2810 } 2811 2812 /* 2813 * For BIO_ZONE, this isn't normally needed. However, it 2814 * is used by devstat_end_transaction_bio() to determine 2815 * how much data was transferred. 2816 */ 2817 /* 2818 * XXX KDM we have a problem. But I'm not sure how to fix 2819 * it. devstat uses bio_bcount - bio_resid to calculate 2820 * the amount of data transferred. The GEOM disk code 2821 * uses bio_length - bio_resid to calculate the amount of 2822 * data in bio_completed. We have different structure 2823 * sizes above and below the ada(4) driver. So, if we 2824 * use the sizes above, the amount transferred won't be 2825 * quite accurate for devstat. If we use different sizes 2826 * for bio_bcount and bio_length (above and below 2827 * respectively), then the residual needs to match one or 2828 * the other. Everything is calculated after the bio 2829 * leaves the driver, so changing the values around isn't 2830 * really an option. For now, just set the count to the 2831 * passed in length. This means that the calculations 2832 * above (e.g. bio_completed) will be correct, but the 2833 * amount of data reported to devstat will be slightly 2834 * under or overstated. 2835 */ 2836 bp->bio_bcount = bp->bio_length; 2837 2838 *queue_ccb = 1; 2839 2840 break; 2841 } 2842 case DISK_ZONE_GET_PARAMS: { 2843 struct disk_zone_disk_params *params; 2844 2845 params = &bp->bio_zone.zone_params.disk_params; 2846 bzero(params, sizeof(*params)); 2847 2848 switch (softc->zone_mode) { 2849 case DA_ZONE_DRIVE_MANAGED: 2850 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; 2851 break; 2852 case DA_ZONE_HOST_AWARE: 2853 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; 2854 break; 2855 case DA_ZONE_HOST_MANAGED: 2856 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; 2857 break; 2858 default: 2859 case DA_ZONE_NONE: 2860 params->zone_mode = DISK_ZONE_MODE_NONE; 2861 break; 2862 } 2863 2864 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) 2865 params->flags |= DISK_ZONE_DISK_URSWRZ; 2866 2867 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { 2868 params->optimal_seq_zones = softc->optimal_seq_zones; 2869 params->flags |= DISK_ZONE_OPT_SEQ_SET; 2870 } 2871 2872 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { 2873 params->optimal_nonseq_zones = 2874 softc->optimal_nonseq_zones; 2875 params->flags |= DISK_ZONE_OPT_NONSEQ_SET; 2876 } 2877 2878 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { 2879 params->max_seq_zones = softc->max_seq_zones; 2880 params->flags |= DISK_ZONE_MAX_SEQ_SET; 2881 } 2882 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) 2883 params->flags |= DISK_ZONE_RZ_SUP; 2884 2885 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) 2886 params->flags |= DISK_ZONE_OPEN_SUP; 2887 2888 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) 2889 params->flags |= DISK_ZONE_CLOSE_SUP; 2890 2891 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) 2892 params->flags |= DISK_ZONE_FINISH_SUP; 2893 2894 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) 2895 params->flags |= DISK_ZONE_RWP_SUP; 2896 break; 2897 } 2898 default: 2899 break; 2900 } 2901 bailout: 2902 return (error); 2903 } 2904 2905 static void 2906 dastart(struct cam_periph *periph, union ccb *start_ccb) 2907 { 2908 struct da_softc *softc; 2909 2910 softc = (struct da_softc *)periph->softc; 2911 2912 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); 2913 2914 skipstate: 2915 switch (softc->state) { 2916 case DA_STATE_NORMAL: 2917 { 2918 struct bio *bp; 2919 uint8_t tag_code; 2920 2921 more: 2922 bp = cam_iosched_next_bio(softc->cam_iosched); 2923 if (bp == NULL) { 2924 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { 2925 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); 2926 scsi_test_unit_ready(&start_ccb->csio, 2927 /*retries*/ da_retry_count, 2928 dadone, 2929 MSG_SIMPLE_Q_TAG, 2930 SSD_FULL_SIZE, 2931 da_default_timeout * 1000); 2932 start_ccb->ccb_h.ccb_bp = NULL; 2933 start_ccb->ccb_h.ccb_state = DA_CCB_TUR; 2934 xpt_action(start_ccb); 2935 } else 2936 xpt_release_ccb(start_ccb); 2937 break; 2938 } 2939 2940 if (bp->bio_cmd == BIO_DELETE) { 2941 if (softc->delete_func != NULL) { 2942 softc->delete_func(periph, start_ccb, bp); 2943 goto out; 2944 } else { 2945 /* Not sure this is possible, but failsafe by lying and saying "sure, done." */ 2946 biofinish(bp, NULL, 0); 2947 goto more; 2948 } 2949 } 2950 2951 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { 2952 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); 2953 cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */ 2954 } 2955 2956 if ((bp->bio_flags & BIO_ORDERED) != 0 || 2957 (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 2958 softc->flags &= ~DA_FLAG_NEED_OTAG; 2959 softc->flags |= DA_FLAG_WAS_OTAG; 2960 tag_code = MSG_ORDERED_Q_TAG; 2961 } else { 2962 tag_code = MSG_SIMPLE_Q_TAG; 2963 } 2964 2965 switch (bp->bio_cmd) { 2966 case BIO_WRITE: 2967 case BIO_READ: 2968 { 2969 void *data_ptr; 2970 int rw_op; 2971 2972 biotrack(bp, __func__); 2973 2974 if (bp->bio_cmd == BIO_WRITE) { 2975 softc->flags |= DA_FLAG_DIRTY; 2976 rw_op = SCSI_RW_WRITE; 2977 } else { 2978 rw_op = SCSI_RW_READ; 2979 } 2980 2981 data_ptr = bp->bio_data; 2982 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { 2983 rw_op |= SCSI_RW_BIO; 2984 data_ptr = bp; 2985 } 2986 2987 scsi_read_write(&start_ccb->csio, 2988 /*retries*/da_retry_count, 2989 /*cbfcnp*/dadone, 2990 /*tag_action*/tag_code, 2991 rw_op, 2992 /*byte2*/0, 2993 softc->minimum_cmd_size, 2994 /*lba*/bp->bio_pblkno, 2995 /*block_count*/bp->bio_bcount / 2996 softc->params.secsize, 2997 data_ptr, 2998 /*dxfer_len*/ bp->bio_bcount, 2999 /*sense_len*/SSD_FULL_SIZE, 3000 da_default_timeout * 1000); 3001 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 3002 start_ccb->csio.bio = bp; 3003 #endif 3004 break; 3005 } 3006 case BIO_FLUSH: 3007 /* 3008 * BIO_FLUSH doesn't currently communicate 3009 * range data, so we synchronize the cache 3010 * over the whole disk. We also force 3011 * ordered tag semantics the flush applies 3012 * to all previously queued I/O. 3013 */ 3014 scsi_synchronize_cache(&start_ccb->csio, 3015 /*retries*/1, 3016 /*cbfcnp*/dadone, 3017 MSG_ORDERED_Q_TAG, 3018 /*begin_lba*/0, 3019 /*lb_count*/0, 3020 SSD_FULL_SIZE, 3021 da_default_timeout*1000); 3022 break; 3023 case BIO_ZONE: { 3024 int error, queue_ccb; 3025 3026 queue_ccb = 0; 3027 3028 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); 3029 if ((error != 0) 3030 || (queue_ccb == 0)) { 3031 biofinish(bp, NULL, error); 3032 xpt_release_ccb(start_ccb); 3033 return; 3034 } 3035 break; 3036 } 3037 } 3038 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 3039 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 3040 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); 3041 3042 out: 3043 LIST_INSERT_HEAD(&softc->pending_ccbs, 3044 &start_ccb->ccb_h, periph_links.le); 3045 3046 /* We expect a unit attention from this device */ 3047 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 3048 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 3049 softc->flags &= ~DA_FLAG_RETRY_UA; 3050 } 3051 3052 start_ccb->ccb_h.ccb_bp = bp; 3053 softc->refcount++; 3054 cam_periph_unlock(periph); 3055 xpt_action(start_ccb); 3056 cam_periph_lock(periph); 3057 softc->refcount--; 3058 3059 /* May have more work to do, so ensure we stay scheduled */ 3060 daschedule(periph); 3061 break; 3062 } 3063 case DA_STATE_PROBE_RC: 3064 { 3065 struct scsi_read_capacity_data *rcap; 3066 3067 rcap = (struct scsi_read_capacity_data *) 3068 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); 3069 if (rcap == NULL) { 3070 printf("dastart: Couldn't malloc read_capacity data\n"); 3071 /* da_free_periph??? */ 3072 break; 3073 } 3074 scsi_read_capacity(&start_ccb->csio, 3075 /*retries*/da_retry_count, 3076 dadone, 3077 MSG_SIMPLE_Q_TAG, 3078 rcap, 3079 SSD_FULL_SIZE, 3080 /*timeout*/5000); 3081 start_ccb->ccb_h.ccb_bp = NULL; 3082 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; 3083 xpt_action(start_ccb); 3084 break; 3085 } 3086 case DA_STATE_PROBE_RC16: 3087 { 3088 struct scsi_read_capacity_data_long *rcaplong; 3089 3090 rcaplong = (struct scsi_read_capacity_data_long *) 3091 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); 3092 if (rcaplong == NULL) { 3093 printf("dastart: Couldn't malloc read_capacity data\n"); 3094 /* da_free_periph??? */ 3095 break; 3096 } 3097 scsi_read_capacity_16(&start_ccb->csio, 3098 /*retries*/ da_retry_count, 3099 /*cbfcnp*/ dadone, 3100 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3101 /*lba*/ 0, 3102 /*reladr*/ 0, 3103 /*pmi*/ 0, 3104 /*rcap_buf*/ (uint8_t *)rcaplong, 3105 /*rcap_buf_len*/ sizeof(*rcaplong), 3106 /*sense_len*/ SSD_FULL_SIZE, 3107 /*timeout*/ da_default_timeout * 1000); 3108 start_ccb->ccb_h.ccb_bp = NULL; 3109 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; 3110 xpt_action(start_ccb); 3111 break; 3112 } 3113 case DA_STATE_PROBE_LBP: 3114 { 3115 struct scsi_vpd_logical_block_prov *lbp; 3116 3117 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { 3118 /* 3119 * If we get here we don't support any SBC-3 delete 3120 * methods with UNMAP as the Logical Block Provisioning 3121 * VPD page support is required for devices which 3122 * support it according to T10/1799-D Revision 31 3123 * however older revisions of the spec don't mandate 3124 * this so we currently don't remove these methods 3125 * from the available set. 3126 */ 3127 softc->state = DA_STATE_PROBE_BLK_LIMITS; 3128 goto skipstate; 3129 } 3130 3131 lbp = (struct scsi_vpd_logical_block_prov *) 3132 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); 3133 3134 if (lbp == NULL) { 3135 printf("dastart: Couldn't malloc lbp data\n"); 3136 /* da_free_periph??? */ 3137 break; 3138 } 3139 3140 scsi_inquiry(&start_ccb->csio, 3141 /*retries*/da_retry_count, 3142 /*cbfcnp*/dadone, 3143 /*tag_action*/MSG_SIMPLE_Q_TAG, 3144 /*inq_buf*/(u_int8_t *)lbp, 3145 /*inq_len*/sizeof(*lbp), 3146 /*evpd*/TRUE, 3147 /*page_code*/SVPD_LBP, 3148 /*sense_len*/SSD_MIN_SIZE, 3149 /*timeout*/da_default_timeout * 1000); 3150 start_ccb->ccb_h.ccb_bp = NULL; 3151 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; 3152 xpt_action(start_ccb); 3153 break; 3154 } 3155 case DA_STATE_PROBE_BLK_LIMITS: 3156 { 3157 struct scsi_vpd_block_limits *block_limits; 3158 3159 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { 3160 /* Not supported skip to next probe */ 3161 softc->state = DA_STATE_PROBE_BDC; 3162 goto skipstate; 3163 } 3164 3165 block_limits = (struct scsi_vpd_block_limits *) 3166 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); 3167 3168 if (block_limits == NULL) { 3169 printf("dastart: Couldn't malloc block_limits data\n"); 3170 /* da_free_periph??? */ 3171 break; 3172 } 3173 3174 scsi_inquiry(&start_ccb->csio, 3175 /*retries*/da_retry_count, 3176 /*cbfcnp*/dadone, 3177 /*tag_action*/MSG_SIMPLE_Q_TAG, 3178 /*inq_buf*/(u_int8_t *)block_limits, 3179 /*inq_len*/sizeof(*block_limits), 3180 /*evpd*/TRUE, 3181 /*page_code*/SVPD_BLOCK_LIMITS, 3182 /*sense_len*/SSD_MIN_SIZE, 3183 /*timeout*/da_default_timeout * 1000); 3184 start_ccb->ccb_h.ccb_bp = NULL; 3185 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; 3186 xpt_action(start_ccb); 3187 break; 3188 } 3189 case DA_STATE_PROBE_BDC: 3190 { 3191 struct scsi_vpd_block_characteristics *bdc; 3192 3193 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { 3194 softc->state = DA_STATE_PROBE_ATA; 3195 goto skipstate; 3196 } 3197 3198 bdc = (struct scsi_vpd_block_characteristics *) 3199 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3200 3201 if (bdc == NULL) { 3202 printf("dastart: Couldn't malloc bdc data\n"); 3203 /* da_free_periph??? */ 3204 break; 3205 } 3206 3207 scsi_inquiry(&start_ccb->csio, 3208 /*retries*/da_retry_count, 3209 /*cbfcnp*/dadone, 3210 /*tag_action*/MSG_SIMPLE_Q_TAG, 3211 /*inq_buf*/(u_int8_t *)bdc, 3212 /*inq_len*/sizeof(*bdc), 3213 /*evpd*/TRUE, 3214 /*page_code*/SVPD_BDC, 3215 /*sense_len*/SSD_MIN_SIZE, 3216 /*timeout*/da_default_timeout * 1000); 3217 start_ccb->ccb_h.ccb_bp = NULL; 3218 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; 3219 xpt_action(start_ccb); 3220 break; 3221 } 3222 case DA_STATE_PROBE_ATA: 3223 { 3224 struct ata_params *ata_params; 3225 3226 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 3227 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 3228 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 3229 /* 3230 * Note that if the ATA VPD page isn't 3231 * supported, we aren't talking to an ATA 3232 * device anyway. Support for that VPD 3233 * page is mandatory for SCSI to ATA (SAT) 3234 * translation layers. 3235 */ 3236 softc->state = DA_STATE_PROBE_ZONE; 3237 goto skipstate; 3238 } 3239 daprobedone(periph, start_ccb); 3240 break; 3241 } 3242 3243 ata_params = (struct ata_params*) 3244 malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO); 3245 3246 if (ata_params == NULL) { 3247 xpt_print(periph->path, "Couldn't malloc ata_params " 3248 "data\n"); 3249 /* da_free_periph??? */ 3250 break; 3251 } 3252 3253 scsi_ata_identify(&start_ccb->csio, 3254 /*retries*/da_retry_count, 3255 /*cbfcnp*/dadone, 3256 /*tag_action*/MSG_SIMPLE_Q_TAG, 3257 /*data_ptr*/(u_int8_t *)ata_params, 3258 /*dxfer_len*/sizeof(*ata_params), 3259 /*sense_len*/SSD_FULL_SIZE, 3260 /*timeout*/da_default_timeout * 1000); 3261 start_ccb->ccb_h.ccb_bp = NULL; 3262 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; 3263 xpt_action(start_ccb); 3264 break; 3265 } 3266 case DA_STATE_PROBE_ATA_LOGDIR: 3267 { 3268 struct ata_gp_log_dir *log_dir; 3269 int retval; 3270 3271 retval = 0; 3272 3273 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { 3274 /* 3275 * If we don't have log support, not much point in 3276 * trying to probe zone support. 3277 */ 3278 daprobedone(periph, start_ccb); 3279 break; 3280 } 3281 3282 /* 3283 * If we have an ATA device (the SCSI ATA Information VPD 3284 * page should be present and the ATA identify should have 3285 * succeeded) and it supports logs, ask for the log directory. 3286 */ 3287 3288 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); 3289 if (log_dir == NULL) { 3290 xpt_print(periph->path, "Couldn't malloc log_dir " 3291 "data\n"); 3292 daprobedone(periph, start_ccb); 3293 break; 3294 } 3295 3296 retval = scsi_ata_read_log(&start_ccb->csio, 3297 /*retries*/ da_retry_count, 3298 /*cbfcnp*/ dadone, 3299 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3300 /*log_address*/ ATA_LOG_DIRECTORY, 3301 /*page_number*/ 0, 3302 /*block_count*/ 1, 3303 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3304 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3305 /*data_ptr*/ (uint8_t *)log_dir, 3306 /*dxfer_len*/ sizeof(*log_dir), 3307 /*sense_len*/ SSD_FULL_SIZE, 3308 /*timeout*/ da_default_timeout * 1000); 3309 3310 if (retval != 0) { 3311 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3312 free(log_dir, M_SCSIDA); 3313 daprobedone(periph, start_ccb); 3314 break; 3315 } 3316 start_ccb->ccb_h.ccb_bp = NULL; 3317 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; 3318 xpt_action(start_ccb); 3319 break; 3320 } 3321 case DA_STATE_PROBE_ATA_IDDIR: 3322 { 3323 struct ata_identify_log_pages *id_dir; 3324 int retval; 3325 3326 retval = 0; 3327 3328 /* 3329 * Check here to see whether the Identify Device log is 3330 * supported in the directory of logs. If so, continue 3331 * with requesting the log of identify device pages. 3332 */ 3333 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { 3334 daprobedone(periph, start_ccb); 3335 break; 3336 } 3337 3338 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); 3339 if (id_dir == NULL) { 3340 xpt_print(periph->path, "Couldn't malloc id_dir " 3341 "data\n"); 3342 daprobedone(periph, start_ccb); 3343 break; 3344 } 3345 3346 retval = scsi_ata_read_log(&start_ccb->csio, 3347 /*retries*/ da_retry_count, 3348 /*cbfcnp*/ dadone, 3349 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3350 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3351 /*page_number*/ ATA_IDL_PAGE_LIST, 3352 /*block_count*/ 1, 3353 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3354 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3355 /*data_ptr*/ (uint8_t *)id_dir, 3356 /*dxfer_len*/ sizeof(*id_dir), 3357 /*sense_len*/ SSD_FULL_SIZE, 3358 /*timeout*/ da_default_timeout * 1000); 3359 3360 if (retval != 0) { 3361 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3362 free(id_dir, M_SCSIDA); 3363 daprobedone(periph, start_ccb); 3364 break; 3365 } 3366 start_ccb->ccb_h.ccb_bp = NULL; 3367 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; 3368 xpt_action(start_ccb); 3369 break; 3370 } 3371 case DA_STATE_PROBE_ATA_SUP: 3372 { 3373 struct ata_identify_log_sup_cap *sup_cap; 3374 int retval; 3375 3376 retval = 0; 3377 3378 /* 3379 * Check here to see whether the Supported Capabilities log 3380 * is in the list of Identify Device logs. 3381 */ 3382 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { 3383 daprobedone(periph, start_ccb); 3384 break; 3385 } 3386 3387 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); 3388 if (sup_cap == NULL) { 3389 xpt_print(periph->path, "Couldn't malloc sup_cap " 3390 "data\n"); 3391 daprobedone(periph, start_ccb); 3392 break; 3393 } 3394 3395 retval = scsi_ata_read_log(&start_ccb->csio, 3396 /*retries*/ da_retry_count, 3397 /*cbfcnp*/ dadone, 3398 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3399 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3400 /*page_number*/ ATA_IDL_SUP_CAP, 3401 /*block_count*/ 1, 3402 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3403 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3404 /*data_ptr*/ (uint8_t *)sup_cap, 3405 /*dxfer_len*/ sizeof(*sup_cap), 3406 /*sense_len*/ SSD_FULL_SIZE, 3407 /*timeout*/ da_default_timeout * 1000); 3408 3409 if (retval != 0) { 3410 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3411 free(sup_cap, M_SCSIDA); 3412 daprobedone(periph, start_ccb); 3413 break; 3414 3415 } 3416 3417 start_ccb->ccb_h.ccb_bp = NULL; 3418 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; 3419 xpt_action(start_ccb); 3420 break; 3421 } 3422 case DA_STATE_PROBE_ATA_ZONE: 3423 { 3424 struct ata_zoned_info_log *ata_zone; 3425 int retval; 3426 3427 retval = 0; 3428 3429 /* 3430 * Check here to see whether the zoned device information 3431 * page is supported. If so, continue on to request it. 3432 * If not, skip to DA_STATE_PROBE_LOG or done. 3433 */ 3434 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { 3435 daprobedone(periph, start_ccb); 3436 break; 3437 } 3438 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, 3439 M_NOWAIT|M_ZERO); 3440 if (ata_zone == NULL) { 3441 xpt_print(periph->path, "Couldn't malloc ata_zone " 3442 "data\n"); 3443 daprobedone(periph, start_ccb); 3444 break; 3445 } 3446 3447 retval = scsi_ata_read_log(&start_ccb->csio, 3448 /*retries*/ da_retry_count, 3449 /*cbfcnp*/ dadone, 3450 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3451 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3452 /*page_number*/ ATA_IDL_ZDI, 3453 /*block_count*/ 1, 3454 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3455 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3456 /*data_ptr*/ (uint8_t *)ata_zone, 3457 /*dxfer_len*/ sizeof(*ata_zone), 3458 /*sense_len*/ SSD_FULL_SIZE, 3459 /*timeout*/ da_default_timeout * 1000); 3460 3461 if (retval != 0) { 3462 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3463 free(ata_zone, M_SCSIDA); 3464 daprobedone(periph, start_ccb); 3465 break; 3466 } 3467 start_ccb->ccb_h.ccb_bp = NULL; 3468 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; 3469 xpt_action(start_ccb); 3470 3471 break; 3472 } 3473 case DA_STATE_PROBE_ZONE: 3474 { 3475 struct scsi_vpd_zoned_bdc *bdc; 3476 3477 /* 3478 * Note that this page will be supported for SCSI protocol 3479 * devices that support ZBC (SMR devices), as well as ATA 3480 * protocol devices that are behind a SAT (SCSI to ATA 3481 * Translation) layer that supports converting ZBC commands 3482 * to their ZAC equivalents. 3483 */ 3484 if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { 3485 daprobedone(periph, start_ccb); 3486 break; 3487 } 3488 bdc = (struct scsi_vpd_zoned_bdc *) 3489 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3490 3491 if (bdc == NULL) { 3492 xpt_release_ccb(start_ccb); 3493 xpt_print(periph->path, "Couldn't malloc zone VPD " 3494 "data\n"); 3495 break; 3496 } 3497 scsi_inquiry(&start_ccb->csio, 3498 /*retries*/da_retry_count, 3499 /*cbfcnp*/dadone, 3500 /*tag_action*/MSG_SIMPLE_Q_TAG, 3501 /*inq_buf*/(u_int8_t *)bdc, 3502 /*inq_len*/sizeof(*bdc), 3503 /*evpd*/TRUE, 3504 /*page_code*/SVPD_ZONED_BDC, 3505 /*sense_len*/SSD_FULL_SIZE, 3506 /*timeout*/da_default_timeout * 1000); 3507 start_ccb->ccb_h.ccb_bp = NULL; 3508 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; 3509 xpt_action(start_ccb); 3510 break; 3511 } 3512 } 3513 } 3514 3515 /* 3516 * In each of the methods below, while its the caller's 3517 * responsibility to ensure the request will fit into a 3518 * single device request, we might have changed the delete 3519 * method due to the device incorrectly advertising either 3520 * its supported methods or limits. 3521 * 3522 * To prevent this causing further issues we validate the 3523 * against the methods limits, and warn which would 3524 * otherwise be unnecessary. 3525 */ 3526 static void 3527 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 3528 { 3529 struct da_softc *softc = (struct da_softc *)periph->softc;; 3530 struct bio *bp1; 3531 uint8_t *buf = softc->unmap_buf; 3532 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; 3533 uint64_t lba, lastlba = (uint64_t)-1; 3534 uint64_t totalcount = 0; 3535 uint64_t count; 3536 uint32_t c, lastcount = 0, ranges = 0; 3537 3538 /* 3539 * Currently this doesn't take the UNMAP 3540 * Granularity and Granularity Alignment 3541 * fields into account. 3542 * 3543 * This could result in both unoptimal unmap 3544 * requests as as well as UNMAP calls unmapping 3545 * fewer LBA's than requested. 3546 */ 3547 3548 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 3549 bp1 = bp; 3550 do { 3551 /* 3552 * Note: ada and da are different in how they store the 3553 * pending bp's in a trim. ada stores all of them in the 3554 * trim_req.bps. da stores all but the first one in the 3555 * delete_run_queue. ada then completes all the bps in 3556 * its adadone() loop. da completes all the bps in the 3557 * delete_run_queue in dadone, and relies on the biodone 3558 * after to complete. This should be reconciled since there's 3559 * no real reason to do it differently. XXX 3560 */ 3561 if (bp1 != bp) 3562 bioq_insert_tail(&softc->delete_run_queue, bp1); 3563 lba = bp1->bio_pblkno; 3564 count = bp1->bio_bcount / softc->params.secsize; 3565 3566 /* Try to extend the previous range. */ 3567 if (lba == lastlba) { 3568 c = omin(count, UNMAP_RANGE_MAX - lastcount); 3569 lastlba += c; 3570 lastcount += c; 3571 scsi_ulto4b(lastcount, d[ranges - 1].length); 3572 count -= c; 3573 lba += c; 3574 totalcount += c; 3575 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && 3576 softc->unmap_gran != 0) { 3577 /* Align length of the previous range. */ 3578 if ((c = lastcount % softc->unmap_gran) != 0) { 3579 if (lastcount <= c) { 3580 totalcount -= lastcount; 3581 lastlba = (uint64_t)-1; 3582 lastcount = 0; 3583 ranges--; 3584 } else { 3585 totalcount -= c; 3586 lastlba -= c; 3587 lastcount -= c; 3588 scsi_ulto4b(lastcount, d[ranges - 1].length); 3589 } 3590 } 3591 /* Align beginning of the new range. */ 3592 c = (lba - softc->unmap_gran_align) % softc->unmap_gran; 3593 if (c != 0) { 3594 c = softc->unmap_gran - c; 3595 if (count <= c) { 3596 count = 0; 3597 } else { 3598 lba += c; 3599 count -= c; 3600 } 3601 } 3602 } 3603 3604 while (count > 0) { 3605 c = omin(count, UNMAP_RANGE_MAX); 3606 if (totalcount + c > softc->unmap_max_lba || 3607 ranges >= softc->unmap_max_ranges) { 3608 xpt_print(periph->path, 3609 "%s issuing short delete %ld > %ld" 3610 "|| %d >= %d", 3611 da_delete_method_desc[softc->delete_method], 3612 totalcount + c, softc->unmap_max_lba, 3613 ranges, softc->unmap_max_ranges); 3614 break; 3615 } 3616 scsi_u64to8b(lba, d[ranges].lba); 3617 scsi_ulto4b(c, d[ranges].length); 3618 lba += c; 3619 totalcount += c; 3620 ranges++; 3621 count -= c; 3622 lastlba = lba; 3623 lastcount = c; 3624 } 3625 bp1 = cam_iosched_next_trim(softc->cam_iosched); 3626 if (bp1 == NULL) 3627 break; 3628 if (ranges >= softc->unmap_max_ranges || 3629 totalcount + bp1->bio_bcount / 3630 softc->params.secsize > softc->unmap_max_lba) { 3631 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 3632 break; 3633 } 3634 } while (1); 3635 3636 /* Align length of the last range. */ 3637 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && 3638 (c = lastcount % softc->unmap_gran) != 0) { 3639 if (lastcount <= c) 3640 ranges--; 3641 else 3642 scsi_ulto4b(lastcount - c, d[ranges - 1].length); 3643 } 3644 3645 scsi_ulto2b(ranges * 16 + 6, &buf[0]); 3646 scsi_ulto2b(ranges * 16, &buf[2]); 3647 3648 scsi_unmap(&ccb->csio, 3649 /*retries*/da_retry_count, 3650 /*cbfcnp*/dadone, 3651 /*tag_action*/MSG_SIMPLE_Q_TAG, 3652 /*byte2*/0, 3653 /*data_ptr*/ buf, 3654 /*dxfer_len*/ ranges * 16 + 8, 3655 /*sense_len*/SSD_FULL_SIZE, 3656 da_default_timeout * 1000); 3657 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 3658 ccb->ccb_h.flags |= CAM_UNLOCKED; 3659 cam_iosched_submit_trim(softc->cam_iosched); 3660 } 3661 3662 static void 3663 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 3664 { 3665 struct da_softc *softc = (struct da_softc *)periph->softc; 3666 struct bio *bp1; 3667 uint8_t *buf = softc->unmap_buf; 3668 uint64_t lastlba = (uint64_t)-1; 3669 uint64_t count; 3670 uint64_t lba; 3671 uint32_t lastcount = 0, c, requestcount; 3672 int ranges = 0, off, block_count; 3673 3674 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 3675 bp1 = bp; 3676 do { 3677 if (bp1 != bp)//XXX imp XXX 3678 bioq_insert_tail(&softc->delete_run_queue, bp1); 3679 lba = bp1->bio_pblkno; 3680 count = bp1->bio_bcount / softc->params.secsize; 3681 requestcount = count; 3682 3683 /* Try to extend the previous range. */ 3684 if (lba == lastlba) { 3685 c = omin(count, ATA_DSM_RANGE_MAX - lastcount); 3686 lastcount += c; 3687 off = (ranges - 1) * 8; 3688 buf[off + 6] = lastcount & 0xff; 3689 buf[off + 7] = (lastcount >> 8) & 0xff; 3690 count -= c; 3691 lba += c; 3692 } 3693 3694 while (count > 0) { 3695 c = omin(count, ATA_DSM_RANGE_MAX); 3696 off = ranges * 8; 3697 3698 buf[off + 0] = lba & 0xff; 3699 buf[off + 1] = (lba >> 8) & 0xff; 3700 buf[off + 2] = (lba >> 16) & 0xff; 3701 buf[off + 3] = (lba >> 24) & 0xff; 3702 buf[off + 4] = (lba >> 32) & 0xff; 3703 buf[off + 5] = (lba >> 40) & 0xff; 3704 buf[off + 6] = c & 0xff; 3705 buf[off + 7] = (c >> 8) & 0xff; 3706 lba += c; 3707 ranges++; 3708 count -= c; 3709 lastcount = c; 3710 if (count != 0 && ranges == softc->trim_max_ranges) { 3711 xpt_print(periph->path, 3712 "%s issuing short delete %ld > %ld\n", 3713 da_delete_method_desc[softc->delete_method], 3714 requestcount, 3715 (softc->trim_max_ranges - ranges) * 3716 ATA_DSM_RANGE_MAX); 3717 break; 3718 } 3719 } 3720 lastlba = lba; 3721 bp1 = cam_iosched_next_trim(softc->cam_iosched); 3722 if (bp1 == NULL) 3723 break; 3724 if (bp1->bio_bcount / softc->params.secsize > 3725 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { 3726 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 3727 break; 3728 } 3729 } while (1); 3730 3731 block_count = howmany(ranges, ATA_DSM_BLK_RANGES); 3732 scsi_ata_trim(&ccb->csio, 3733 /*retries*/da_retry_count, 3734 /*cbfcnp*/dadone, 3735 /*tag_action*/MSG_SIMPLE_Q_TAG, 3736 block_count, 3737 /*data_ptr*/buf, 3738 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, 3739 /*sense_len*/SSD_FULL_SIZE, 3740 da_default_timeout * 1000); 3741 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 3742 ccb->ccb_h.flags |= CAM_UNLOCKED; 3743 cam_iosched_submit_trim(softc->cam_iosched); 3744 } 3745 3746 /* 3747 * We calculate ws_max_blks here based off d_delmaxsize instead 3748 * of using softc->ws_max_blks as it is absolute max for the 3749 * device not the protocol max which may well be lower. 3750 */ 3751 static void 3752 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 3753 { 3754 struct da_softc *softc; 3755 struct bio *bp1; 3756 uint64_t ws_max_blks; 3757 uint64_t lba; 3758 uint64_t count; /* forward compat with WS32 */ 3759 3760 softc = (struct da_softc *)periph->softc; 3761 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; 3762 lba = bp->bio_pblkno; 3763 count = 0; 3764 bp1 = bp; 3765 do { 3766 if (bp1 != bp)//XXX imp XXX 3767 bioq_insert_tail(&softc->delete_run_queue, bp1); 3768 count += bp1->bio_bcount / softc->params.secsize; 3769 if (count > ws_max_blks) { 3770 xpt_print(periph->path, 3771 "%s issuing short delete %ld > %ld\n", 3772 da_delete_method_desc[softc->delete_method], 3773 count, ws_max_blks); 3774 count = omin(count, ws_max_blks); 3775 break; 3776 } 3777 bp1 = cam_iosched_next_trim(softc->cam_iosched); 3778 if (bp1 == NULL) 3779 break; 3780 if (lba + count != bp1->bio_pblkno || 3781 count + bp1->bio_bcount / 3782 softc->params.secsize > ws_max_blks) { 3783 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 3784 break; 3785 } 3786 } while (1); 3787 3788 scsi_write_same(&ccb->csio, 3789 /*retries*/da_retry_count, 3790 /*cbfcnp*/dadone, 3791 /*tag_action*/MSG_SIMPLE_Q_TAG, 3792 /*byte2*/softc->delete_method == 3793 DA_DELETE_ZERO ? 0 : SWS_UNMAP, 3794 softc->delete_method == DA_DELETE_WS16 ? 16 : 10, 3795 /*lba*/lba, 3796 /*block_count*/count, 3797 /*data_ptr*/ __DECONST(void *, zero_region), 3798 /*dxfer_len*/ softc->params.secsize, 3799 /*sense_len*/SSD_FULL_SIZE, 3800 da_default_timeout * 1000); 3801 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 3802 ccb->ccb_h.flags |= CAM_UNLOCKED; 3803 cam_iosched_submit_trim(softc->cam_iosched); 3804 } 3805 3806 static int 3807 cmd6workaround(union ccb *ccb) 3808 { 3809 struct scsi_rw_6 cmd6; 3810 struct scsi_rw_10 *cmd10; 3811 struct da_softc *softc; 3812 u_int8_t *cdb; 3813 struct bio *bp; 3814 int frozen; 3815 3816 cdb = ccb->csio.cdb_io.cdb_bytes; 3817 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; 3818 3819 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { 3820 da_delete_methods old_method = softc->delete_method; 3821 3822 /* 3823 * Typically there are two reasons for failure here 3824 * 1. Delete method was detected as supported but isn't 3825 * 2. Delete failed due to invalid params e.g. too big 3826 * 3827 * While we will attempt to choose an alternative delete method 3828 * this may result in short deletes if the existing delete 3829 * requests from geom are big for the new method chosen. 3830 * 3831 * This method assumes that the error which triggered this 3832 * will not retry the io otherwise a panic will occur 3833 */ 3834 dadeleteflag(softc, old_method, 0); 3835 dadeletemethodchoose(softc, DA_DELETE_DISABLE); 3836 if (softc->delete_method == DA_DELETE_DISABLE) 3837 xpt_print(ccb->ccb_h.path, 3838 "%s failed, disabling BIO_DELETE\n", 3839 da_delete_method_desc[old_method]); 3840 else 3841 xpt_print(ccb->ccb_h.path, 3842 "%s failed, switching to %s BIO_DELETE\n", 3843 da_delete_method_desc[old_method], 3844 da_delete_method_desc[softc->delete_method]); 3845 3846 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) 3847 cam_iosched_queue_work(softc->cam_iosched, bp); 3848 cam_iosched_queue_work(softc->cam_iosched, 3849 (struct bio *)ccb->ccb_h.ccb_bp); 3850 ccb->ccb_h.ccb_bp = NULL; 3851 return (0); 3852 } 3853 3854 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ 3855 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 3856 (*cdb == PREVENT_ALLOW) && 3857 (softc->quirks & DA_Q_NO_PREVENT) == 0) { 3858 if (bootverbose) 3859 xpt_print(ccb->ccb_h.path, 3860 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); 3861 softc->quirks |= DA_Q_NO_PREVENT; 3862 return (0); 3863 } 3864 3865 /* Detect unsupported SYNCHRONIZE CACHE(10). */ 3866 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 3867 (*cdb == SYNCHRONIZE_CACHE) && 3868 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 3869 if (bootverbose) 3870 xpt_print(ccb->ccb_h.path, 3871 "SYNCHRONIZE CACHE(10) not supported.\n"); 3872 softc->quirks |= DA_Q_NO_SYNC_CACHE; 3873 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; 3874 return (0); 3875 } 3876 3877 /* Translation only possible if CDB is an array and cmd is R/W6 */ 3878 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || 3879 (*cdb != READ_6 && *cdb != WRITE_6)) 3880 return 0; 3881 3882 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " 3883 "increasing minimum_cmd_size to 10.\n"); 3884 softc->minimum_cmd_size = 10; 3885 3886 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); 3887 cmd10 = (struct scsi_rw_10 *)cdb; 3888 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; 3889 cmd10->byte2 = 0; 3890 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); 3891 cmd10->reserved = 0; 3892 scsi_ulto2b(cmd6.length, cmd10->length); 3893 cmd10->control = cmd6.control; 3894 ccb->csio.cdb_len = sizeof(*cmd10); 3895 3896 /* Requeue request, unfreezing queue if necessary */ 3897 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 3898 ccb->ccb_h.status = CAM_REQUEUE_REQ; 3899 xpt_action(ccb); 3900 if (frozen) { 3901 cam_release_devq(ccb->ccb_h.path, 3902 /*relsim_flags*/0, 3903 /*reduction*/0, 3904 /*timeout*/0, 3905 /*getcount_only*/0); 3906 } 3907 return (ERESTART); 3908 } 3909 3910 static void 3911 dazonedone(struct cam_periph *periph, union ccb *ccb) 3912 { 3913 struct da_softc *softc; 3914 struct bio *bp; 3915 3916 softc = periph->softc; 3917 bp = (struct bio *)ccb->ccb_h.ccb_bp; 3918 3919 switch (bp->bio_zone.zone_cmd) { 3920 case DISK_ZONE_OPEN: 3921 case DISK_ZONE_CLOSE: 3922 case DISK_ZONE_FINISH: 3923 case DISK_ZONE_RWP: 3924 break; 3925 case DISK_ZONE_REPORT_ZONES: { 3926 uint32_t avail_len; 3927 struct disk_zone_report *rep; 3928 struct scsi_report_zones_hdr *hdr; 3929 struct scsi_report_zones_desc *desc; 3930 struct disk_zone_rep_entry *entry; 3931 uint32_t num_alloced, hdr_len, num_avail; 3932 uint32_t num_to_fill, i; 3933 int ata; 3934 3935 rep = &bp->bio_zone.zone_params.report; 3936 avail_len = ccb->csio.dxfer_len - ccb->csio.resid; 3937 /* 3938 * Note that bio_resid isn't normally used for zone 3939 * commands, but it is used by devstat_end_transaction_bio() 3940 * to determine how much data was transferred. Because 3941 * the size of the SCSI/ATA data structures is different 3942 * than the size of the BIO interface structures, the 3943 * amount of data actually transferred from the drive will 3944 * be different than the amount of data transferred to 3945 * the user. 3946 */ 3947 bp->bio_resid = ccb->csio.resid; 3948 num_alloced = rep->entries_allocated; 3949 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; 3950 if (avail_len < sizeof(*hdr)) { 3951 /* 3952 * Is there a better error than EIO here? We asked 3953 * for at least the header, and we got less than 3954 * that. 3955 */ 3956 bp->bio_error = EIO; 3957 bp->bio_flags |= BIO_ERROR; 3958 bp->bio_resid = bp->bio_bcount; 3959 break; 3960 } 3961 3962 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) 3963 ata = 1; 3964 else 3965 ata = 0; 3966 3967 hdr_len = ata ? le32dec(hdr->length) : 3968 scsi_4btoul(hdr->length); 3969 if (hdr_len > 0) 3970 rep->entries_available = hdr_len / sizeof(*desc); 3971 else 3972 rep->entries_available = 0; 3973 /* 3974 * NOTE: using the same values for the BIO version of the 3975 * same field as the SCSI/ATA values. This means we could 3976 * get some additional values that aren't defined in bio.h 3977 * if more values of the same field are defined later. 3978 */ 3979 rep->header.same = hdr->byte4 & SRZ_SAME_MASK; 3980 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : 3981 scsi_8btou64(hdr->maximum_lba); 3982 /* 3983 * If the drive reports no entries that match the query, 3984 * we're done. 3985 */ 3986 if (hdr_len == 0) { 3987 rep->entries_filled = 0; 3988 break; 3989 } 3990 3991 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), 3992 hdr_len / sizeof(*desc)); 3993 /* 3994 * If the drive didn't return any data, then we're done. 3995 */ 3996 if (num_avail == 0) { 3997 rep->entries_filled = 0; 3998 break; 3999 } 4000 4001 num_to_fill = min(num_avail, rep->entries_allocated); 4002 /* 4003 * If the user didn't allocate any entries for us to fill, 4004 * we're done. 4005 */ 4006 if (num_to_fill == 0) { 4007 rep->entries_filled = 0; 4008 break; 4009 } 4010 4011 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; 4012 i < num_to_fill; i++, desc++, entry++) { 4013 /* 4014 * NOTE: we're mapping the values here directly 4015 * from the SCSI/ATA bit definitions to the bio.h 4016 * definitons. There is also a warning in 4017 * disk_zone.h, but the impact is that if 4018 * additional values are added in the SCSI/ATA 4019 * specs these will be visible to consumers of 4020 * this interface. 4021 */ 4022 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; 4023 entry->zone_condition = 4024 (desc->zone_flags & SRZ_ZONE_COND_MASK) >> 4025 SRZ_ZONE_COND_SHIFT; 4026 entry->zone_flags |= desc->zone_flags & 4027 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); 4028 entry->zone_length = 4029 ata ? le64dec(desc->zone_length) : 4030 scsi_8btou64(desc->zone_length); 4031 entry->zone_start_lba = 4032 ata ? le64dec(desc->zone_start_lba) : 4033 scsi_8btou64(desc->zone_start_lba); 4034 entry->write_pointer_lba = 4035 ata ? le64dec(desc->write_pointer_lba) : 4036 scsi_8btou64(desc->write_pointer_lba); 4037 } 4038 rep->entries_filled = num_to_fill; 4039 break; 4040 } 4041 case DISK_ZONE_GET_PARAMS: 4042 default: 4043 /* 4044 * In theory we should not get a GET_PARAMS bio, since it 4045 * should be handled without queueing the command to the 4046 * drive. 4047 */ 4048 panic("%s: Invalid zone command %d", __func__, 4049 bp->bio_zone.zone_cmd); 4050 break; 4051 } 4052 4053 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) 4054 free(ccb->csio.data_ptr, M_SCSIDA); 4055 } 4056 4057 static void 4058 dadone(struct cam_periph *periph, union ccb *done_ccb) 4059 { 4060 struct da_softc *softc; 4061 struct ccb_scsiio *csio; 4062 u_int32_t priority; 4063 da_ccb_state state; 4064 4065 softc = (struct da_softc *)periph->softc; 4066 priority = done_ccb->ccb_h.pinfo.priority; 4067 4068 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); 4069 4070 csio = &done_ccb->csio; 4071 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4072 if (csio->bio != NULL) 4073 biotrack(csio->bio, __func__); 4074 #endif 4075 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 4076 switch (state) { 4077 case DA_CCB_BUFFER_IO: 4078 case DA_CCB_DELETE: 4079 { 4080 struct bio *bp, *bp1; 4081 4082 cam_periph_lock(periph); 4083 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4084 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 4085 int error; 4086 int sf; 4087 4088 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 4089 sf = SF_RETRY_UA; 4090 else 4091 sf = 0; 4092 4093 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 4094 if (error == ERESTART) { 4095 /* 4096 * A retry was scheduled, so 4097 * just return. 4098 */ 4099 cam_periph_unlock(periph); 4100 return; 4101 } 4102 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4103 if (error != 0) { 4104 int queued_error; 4105 4106 /* 4107 * return all queued I/O with EIO, so that 4108 * the client can retry these I/Os in the 4109 * proper order should it attempt to recover. 4110 */ 4111 queued_error = EIO; 4112 4113 if (error == ENXIO 4114 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { 4115 /* 4116 * Catastrophic error. Mark our pack as 4117 * invalid. 4118 */ 4119 /* 4120 * XXX See if this is really a media 4121 * XXX change first? 4122 */ 4123 xpt_print(periph->path, 4124 "Invalidating pack\n"); 4125 softc->flags |= DA_FLAG_PACK_INVALID; 4126 #ifdef CAM_IO_STATS 4127 softc->invalidations++; 4128 #endif 4129 queued_error = ENXIO; 4130 } 4131 cam_iosched_flush(softc->cam_iosched, NULL, 4132 queued_error); 4133 if (bp != NULL) { 4134 bp->bio_error = error; 4135 bp->bio_resid = bp->bio_bcount; 4136 bp->bio_flags |= BIO_ERROR; 4137 } 4138 } else if (bp != NULL) { 4139 if (state == DA_CCB_DELETE) 4140 bp->bio_resid = 0; 4141 else 4142 bp->bio_resid = csio->resid; 4143 bp->bio_error = 0; 4144 if (bp->bio_resid != 0) 4145 bp->bio_flags |= BIO_ERROR; 4146 } 4147 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4148 cam_release_devq(done_ccb->ccb_h.path, 4149 /*relsim_flags*/0, 4150 /*reduction*/0, 4151 /*timeout*/0, 4152 /*getcount_only*/0); 4153 } else if (bp != NULL) { 4154 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4155 panic("REQ_CMP with QFRZN"); 4156 if (bp->bio_cmd == BIO_ZONE) 4157 dazonedone(periph, done_ccb); 4158 else if (state == DA_CCB_DELETE) 4159 bp->bio_resid = 0; 4160 else 4161 bp->bio_resid = csio->resid; 4162 if ((csio->resid > 0) 4163 && (bp->bio_cmd != BIO_ZONE)) 4164 bp->bio_flags |= BIO_ERROR; 4165 if (softc->error_inject != 0) { 4166 bp->bio_error = softc->error_inject; 4167 bp->bio_resid = bp->bio_bcount; 4168 bp->bio_flags |= BIO_ERROR; 4169 softc->error_inject = 0; 4170 } 4171 } 4172 4173 biotrack(bp, __func__); 4174 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 4175 if (LIST_EMPTY(&softc->pending_ccbs)) 4176 softc->flags |= DA_FLAG_WAS_OTAG; 4177 4178 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); 4179 xpt_release_ccb(done_ccb); 4180 if (state == DA_CCB_DELETE) { 4181 TAILQ_HEAD(, bio) queue; 4182 4183 TAILQ_INIT(&queue); 4184 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); 4185 softc->delete_run_queue.insert_point = NULL; 4186 /* 4187 * Normally, the xpt_release_ccb() above would make sure 4188 * that when we have more work to do, that work would 4189 * get kicked off. However, we specifically keep 4190 * delete_running set to 0 before the call above to 4191 * allow other I/O to progress when many BIO_DELETE 4192 * requests are pushed down. We set delete_running to 0 4193 * and call daschedule again so that we don't stall if 4194 * there are no other I/Os pending apart from BIO_DELETEs. 4195 */ 4196 cam_iosched_trim_done(softc->cam_iosched); 4197 daschedule(periph); 4198 cam_periph_unlock(periph); 4199 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { 4200 TAILQ_REMOVE(&queue, bp1, bio_queue); 4201 bp1->bio_error = bp->bio_error; 4202 if (bp->bio_flags & BIO_ERROR) { 4203 bp1->bio_flags |= BIO_ERROR; 4204 bp1->bio_resid = bp1->bio_bcount; 4205 } else 4206 bp1->bio_resid = 0; 4207 biodone(bp1); 4208 } 4209 } else { 4210 daschedule(periph); 4211 cam_periph_unlock(periph); 4212 } 4213 if (bp != NULL) 4214 biodone(bp); 4215 return; 4216 } 4217 case DA_CCB_PROBE_RC: 4218 case DA_CCB_PROBE_RC16: 4219 { 4220 struct scsi_read_capacity_data *rdcap; 4221 struct scsi_read_capacity_data_long *rcaplong; 4222 char announce_buf[80]; 4223 int lbp; 4224 4225 lbp = 0; 4226 rdcap = NULL; 4227 rcaplong = NULL; 4228 if (state == DA_CCB_PROBE_RC) 4229 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; 4230 else 4231 rcaplong = (struct scsi_read_capacity_data_long *) 4232 csio->data_ptr; 4233 4234 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4235 struct disk_params *dp; 4236 uint32_t block_size; 4237 uint64_t maxsector; 4238 u_int lalba; /* Lowest aligned LBA. */ 4239 4240 if (state == DA_CCB_PROBE_RC) { 4241 block_size = scsi_4btoul(rdcap->length); 4242 maxsector = scsi_4btoul(rdcap->addr); 4243 lalba = 0; 4244 4245 /* 4246 * According to SBC-2, if the standard 10 4247 * byte READ CAPACITY command returns 2^32, 4248 * we should issue the 16 byte version of 4249 * the command, since the device in question 4250 * has more sectors than can be represented 4251 * with the short version of the command. 4252 */ 4253 if (maxsector == 0xffffffff) { 4254 free(rdcap, M_SCSIDA); 4255 xpt_release_ccb(done_ccb); 4256 softc->state = DA_STATE_PROBE_RC16; 4257 xpt_schedule(periph, priority); 4258 return; 4259 } 4260 } else { 4261 block_size = scsi_4btoul(rcaplong->length); 4262 maxsector = scsi_8btou64(rcaplong->addr); 4263 lalba = scsi_2btoul(rcaplong->lalba_lbp); 4264 } 4265 4266 /* 4267 * Because GEOM code just will panic us if we 4268 * give them an 'illegal' value we'll avoid that 4269 * here. 4270 */ 4271 if (block_size == 0) { 4272 block_size = 512; 4273 if (maxsector == 0) 4274 maxsector = -1; 4275 } 4276 if (block_size >= MAXPHYS) { 4277 xpt_print(periph->path, 4278 "unsupportable block size %ju\n", 4279 (uintmax_t) block_size); 4280 announce_buf[0] = '\0'; 4281 cam_periph_invalidate(periph); 4282 } else { 4283 /* 4284 * We pass rcaplong into dasetgeom(), 4285 * because it will only use it if it is 4286 * non-NULL. 4287 */ 4288 dasetgeom(periph, block_size, maxsector, 4289 rcaplong, sizeof(*rcaplong)); 4290 lbp = (lalba & SRC16_LBPME_A); 4291 dp = &softc->params; 4292 snprintf(announce_buf, sizeof(announce_buf), 4293 "%juMB (%ju %u byte sectors)", 4294 ((uintmax_t)dp->secsize * dp->sectors) / 4295 (1024 * 1024), 4296 (uintmax_t)dp->sectors, dp->secsize); 4297 } 4298 } else { 4299 int error; 4300 4301 announce_buf[0] = '\0'; 4302 4303 /* 4304 * Retry any UNIT ATTENTION type errors. They 4305 * are expected at boot. 4306 */ 4307 error = daerror(done_ccb, CAM_RETRY_SELTO, 4308 SF_RETRY_UA|SF_NO_PRINT); 4309 if (error == ERESTART) { 4310 /* 4311 * A retry was scheuled, so 4312 * just return. 4313 */ 4314 return; 4315 } else if (error != 0) { 4316 int asc, ascq; 4317 int sense_key, error_code; 4318 int have_sense; 4319 cam_status status; 4320 struct ccb_getdev cgd; 4321 4322 /* Don't wedge this device's queue */ 4323 status = done_ccb->ccb_h.status; 4324 if ((status & CAM_DEV_QFRZN) != 0) 4325 cam_release_devq(done_ccb->ccb_h.path, 4326 /*relsim_flags*/0, 4327 /*reduction*/0, 4328 /*timeout*/0, 4329 /*getcount_only*/0); 4330 4331 4332 xpt_setup_ccb(&cgd.ccb_h, 4333 done_ccb->ccb_h.path, 4334 CAM_PRIORITY_NORMAL); 4335 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 4336 xpt_action((union ccb *)&cgd); 4337 4338 if (scsi_extract_sense_ccb(done_ccb, 4339 &error_code, &sense_key, &asc, &ascq)) 4340 have_sense = TRUE; 4341 else 4342 have_sense = FALSE; 4343 4344 /* 4345 * If we tried READ CAPACITY(16) and failed, 4346 * fallback to READ CAPACITY(10). 4347 */ 4348 if ((state == DA_CCB_PROBE_RC16) && 4349 (softc->flags & DA_FLAG_CAN_RC16) && 4350 (((csio->ccb_h.status & CAM_STATUS_MASK) == 4351 CAM_REQ_INVALID) || 4352 ((have_sense) && 4353 (error_code == SSD_CURRENT_ERROR) && 4354 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { 4355 softc->flags &= ~DA_FLAG_CAN_RC16; 4356 free(rdcap, M_SCSIDA); 4357 xpt_release_ccb(done_ccb); 4358 softc->state = DA_STATE_PROBE_RC; 4359 xpt_schedule(periph, priority); 4360 return; 4361 } 4362 4363 /* 4364 * Attach to anything that claims to be a 4365 * direct access or optical disk device, 4366 * as long as it doesn't return a "Logical 4367 * unit not supported" (0x25) error. 4368 * "Internal Target Failure" (0x44) is also 4369 * special and typically means that the 4370 * device is a SATA drive behind a SATL 4371 * translation that's fallen into a 4372 * terminally fatal state. 4373 */ 4374 if ((have_sense) 4375 && (asc != 0x25) && (asc != 0x44) 4376 && (error_code == SSD_CURRENT_ERROR)) { 4377 const char *sense_key_desc; 4378 const char *asc_desc; 4379 4380 dasetgeom(periph, 512, -1, NULL, 0); 4381 scsi_sense_desc(sense_key, asc, ascq, 4382 &cgd.inq_data, 4383 &sense_key_desc, 4384 &asc_desc); 4385 snprintf(announce_buf, 4386 sizeof(announce_buf), 4387 "Attempt to query device " 4388 "size failed: %s, %s", 4389 sense_key_desc, 4390 asc_desc); 4391 } else { 4392 if (have_sense) 4393 scsi_sense_print( 4394 &done_ccb->csio); 4395 else { 4396 xpt_print(periph->path, 4397 "got CAM status %#x\n", 4398 done_ccb->ccb_h.status); 4399 } 4400 4401 xpt_print(periph->path, "fatal error, " 4402 "failed to attach to device\n"); 4403 4404 /* 4405 * Free up resources. 4406 */ 4407 cam_periph_invalidate(periph); 4408 } 4409 } 4410 } 4411 free(csio->data_ptr, M_SCSIDA); 4412 if (announce_buf[0] != '\0' && 4413 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { 4414 /* 4415 * Create our sysctl variables, now that we know 4416 * we have successfully attached. 4417 */ 4418 /* increase the refcount */ 4419 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 4420 taskqueue_enqueue(taskqueue_thread, 4421 &softc->sysctl_task); 4422 xpt_announce_periph(periph, announce_buf); 4423 xpt_announce_quirks(periph, softc->quirks, 4424 DA_Q_BIT_STRING); 4425 } else { 4426 xpt_print(periph->path, "fatal error, " 4427 "could not acquire reference count\n"); 4428 } 4429 } 4430 4431 /* We already probed the device. */ 4432 if (softc->flags & DA_FLAG_PROBED) { 4433 daprobedone(periph, done_ccb); 4434 return; 4435 } 4436 4437 /* Ensure re-probe doesn't see old delete. */ 4438 softc->delete_available = 0; 4439 dadeleteflag(softc, DA_DELETE_ZERO, 1); 4440 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { 4441 /* 4442 * Based on older SBC-3 spec revisions 4443 * any of the UNMAP methods "may" be 4444 * available via LBP given this flag so 4445 * we flag all of them as available and 4446 * then remove those which further 4447 * probes confirm aren't available 4448 * later. 4449 * 4450 * We could also check readcap(16) p_type 4451 * flag to exclude one or more invalid 4452 * write same (X) types here 4453 */ 4454 dadeleteflag(softc, DA_DELETE_WS16, 1); 4455 dadeleteflag(softc, DA_DELETE_WS10, 1); 4456 dadeleteflag(softc, DA_DELETE_UNMAP, 1); 4457 4458 xpt_release_ccb(done_ccb); 4459 softc->state = DA_STATE_PROBE_LBP; 4460 xpt_schedule(periph, priority); 4461 return; 4462 } 4463 4464 xpt_release_ccb(done_ccb); 4465 softc->state = DA_STATE_PROBE_BDC; 4466 xpt_schedule(periph, priority); 4467 return; 4468 } 4469 case DA_CCB_PROBE_LBP: 4470 { 4471 struct scsi_vpd_logical_block_prov *lbp; 4472 4473 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; 4474 4475 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4476 /* 4477 * T10/1799-D Revision 31 states at least one of these 4478 * must be supported but we don't currently enforce this. 4479 */ 4480 dadeleteflag(softc, DA_DELETE_WS16, 4481 (lbp->flags & SVPD_LBP_WS16)); 4482 dadeleteflag(softc, DA_DELETE_WS10, 4483 (lbp->flags & SVPD_LBP_WS10)); 4484 dadeleteflag(softc, DA_DELETE_UNMAP, 4485 (lbp->flags & SVPD_LBP_UNMAP)); 4486 } else { 4487 int error; 4488 error = daerror(done_ccb, CAM_RETRY_SELTO, 4489 SF_RETRY_UA|SF_NO_PRINT); 4490 if (error == ERESTART) 4491 return; 4492 else if (error != 0) { 4493 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4494 /* Don't wedge this device's queue */ 4495 cam_release_devq(done_ccb->ccb_h.path, 4496 /*relsim_flags*/0, 4497 /*reduction*/0, 4498 /*timeout*/0, 4499 /*getcount_only*/0); 4500 } 4501 4502 /* 4503 * Failure indicates we don't support any SBC-3 4504 * delete methods with UNMAP 4505 */ 4506 } 4507 } 4508 4509 free(lbp, M_SCSIDA); 4510 xpt_release_ccb(done_ccb); 4511 softc->state = DA_STATE_PROBE_BLK_LIMITS; 4512 xpt_schedule(periph, priority); 4513 return; 4514 } 4515 case DA_CCB_PROBE_BLK_LIMITS: 4516 { 4517 struct scsi_vpd_block_limits *block_limits; 4518 4519 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; 4520 4521 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4522 uint32_t max_txfer_len = scsi_4btoul( 4523 block_limits->max_txfer_len); 4524 uint32_t max_unmap_lba_cnt = scsi_4btoul( 4525 block_limits->max_unmap_lba_cnt); 4526 uint32_t max_unmap_blk_cnt = scsi_4btoul( 4527 block_limits->max_unmap_blk_cnt); 4528 uint32_t unmap_gran = scsi_4btoul( 4529 block_limits->opt_unmap_grain); 4530 uint32_t unmap_gran_align = scsi_4btoul( 4531 block_limits->unmap_grain_align); 4532 uint64_t ws_max_blks = scsi_8btou64( 4533 block_limits->max_write_same_length); 4534 4535 if (max_txfer_len != 0) { 4536 softc->disk->d_maxsize = MIN(softc->maxio, 4537 (off_t)max_txfer_len * softc->params.secsize); 4538 } 4539 4540 /* 4541 * We should already support UNMAP but we check lba 4542 * and block count to be sure 4543 */ 4544 if (max_unmap_lba_cnt != 0x00L && 4545 max_unmap_blk_cnt != 0x00L) { 4546 softc->unmap_max_lba = max_unmap_lba_cnt; 4547 softc->unmap_max_ranges = min(max_unmap_blk_cnt, 4548 UNMAP_MAX_RANGES); 4549 if (unmap_gran > 1) { 4550 softc->unmap_gran = unmap_gran; 4551 if (unmap_gran_align & 0x80000000) { 4552 softc->unmap_gran_align = 4553 unmap_gran_align & 4554 0x7fffffff; 4555 } 4556 } 4557 } else { 4558 /* 4559 * Unexpected UNMAP limits which means the 4560 * device doesn't actually support UNMAP 4561 */ 4562 dadeleteflag(softc, DA_DELETE_UNMAP, 0); 4563 } 4564 4565 if (ws_max_blks != 0x00L) 4566 softc->ws_max_blks = ws_max_blks; 4567 } else { 4568 int error; 4569 error = daerror(done_ccb, CAM_RETRY_SELTO, 4570 SF_RETRY_UA|SF_NO_PRINT); 4571 if (error == ERESTART) 4572 return; 4573 else if (error != 0) { 4574 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4575 /* Don't wedge this device's queue */ 4576 cam_release_devq(done_ccb->ccb_h.path, 4577 /*relsim_flags*/0, 4578 /*reduction*/0, 4579 /*timeout*/0, 4580 /*getcount_only*/0); 4581 } 4582 4583 /* 4584 * Failure here doesn't mean UNMAP is not 4585 * supported as this is an optional page. 4586 */ 4587 softc->unmap_max_lba = 1; 4588 softc->unmap_max_ranges = 1; 4589 } 4590 } 4591 4592 free(block_limits, M_SCSIDA); 4593 xpt_release_ccb(done_ccb); 4594 softc->state = DA_STATE_PROBE_BDC; 4595 xpt_schedule(periph, priority); 4596 return; 4597 } 4598 case DA_CCB_PROBE_BDC: 4599 { 4600 struct scsi_vpd_block_device_characteristics *bdc; 4601 4602 bdc = (struct scsi_vpd_block_device_characteristics *) 4603 csio->data_ptr; 4604 4605 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4606 uint32_t valid_len; 4607 4608 /* 4609 * Disable queue sorting for non-rotational media 4610 * by default. 4611 */ 4612 u_int16_t old_rate = softc->disk->d_rotation_rate; 4613 4614 valid_len = csio->dxfer_len - csio->resid; 4615 if (SBDC_IS_PRESENT(bdc, valid_len, 4616 medium_rotation_rate)) { 4617 softc->disk->d_rotation_rate = 4618 scsi_2btoul(bdc->medium_rotation_rate); 4619 if (softc->disk->d_rotation_rate == 4620 SVPD_BDC_RATE_NON_ROTATING) { 4621 cam_iosched_set_sort_queue( 4622 softc->cam_iosched, 0); 4623 softc->rotating = 0; 4624 } 4625 if (softc->disk->d_rotation_rate != old_rate) { 4626 disk_attr_changed(softc->disk, 4627 "GEOM::rotation_rate", M_NOWAIT); 4628 } 4629 } 4630 if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) 4631 && (softc->zone_mode == DA_ZONE_NONE)) { 4632 int ata_proto; 4633 4634 if (scsi_vpd_supported_page(periph, 4635 SVPD_ATA_INFORMATION)) 4636 ata_proto = 1; 4637 else 4638 ata_proto = 0; 4639 4640 /* 4641 * The Zoned field will only be set for 4642 * Drive Managed and Host Aware drives. If 4643 * they are Host Managed, the device type 4644 * in the standard INQUIRY data should be 4645 * set to T_ZBC_HM (0x14). 4646 */ 4647 if ((bdc->flags & SVPD_ZBC_MASK) == 4648 SVPD_HAW_ZBC) { 4649 softc->zone_mode = DA_ZONE_HOST_AWARE; 4650 softc->zone_interface = (ata_proto) ? 4651 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 4652 } else if ((bdc->flags & SVPD_ZBC_MASK) == 4653 SVPD_DM_ZBC) { 4654 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 4655 softc->zone_interface = (ata_proto) ? 4656 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 4657 } else if ((bdc->flags & SVPD_ZBC_MASK) != 4658 SVPD_ZBC_NR) { 4659 xpt_print(periph->path, "Unknown zoned " 4660 "type %#x", 4661 bdc->flags & SVPD_ZBC_MASK); 4662 } 4663 } 4664 } else { 4665 int error; 4666 error = daerror(done_ccb, CAM_RETRY_SELTO, 4667 SF_RETRY_UA|SF_NO_PRINT); 4668 if (error == ERESTART) 4669 return; 4670 else if (error != 0) { 4671 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4672 /* Don't wedge this device's queue */ 4673 cam_release_devq(done_ccb->ccb_h.path, 4674 /*relsim_flags*/0, 4675 /*reduction*/0, 4676 /*timeout*/0, 4677 /*getcount_only*/0); 4678 } 4679 } 4680 } 4681 4682 free(bdc, M_SCSIDA); 4683 xpt_release_ccb(done_ccb); 4684 softc->state = DA_STATE_PROBE_ATA; 4685 xpt_schedule(periph, priority); 4686 return; 4687 } 4688 case DA_CCB_PROBE_ATA: 4689 { 4690 int i; 4691 struct ata_params *ata_params; 4692 int continue_probe; 4693 int error; 4694 int16_t *ptr; 4695 4696 ata_params = (struct ata_params *)csio->data_ptr; 4697 ptr = (uint16_t *)ata_params; 4698 continue_probe = 0; 4699 error = 0; 4700 4701 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4702 uint16_t old_rate; 4703 4704 for (i = 0; i < sizeof(*ata_params) / 2; i++) 4705 ptr[i] = le16toh(ptr[i]); 4706 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && 4707 (softc->quirks & DA_Q_NO_UNMAP) == 0) { 4708 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); 4709 if (ata_params->max_dsm_blocks != 0) 4710 softc->trim_max_ranges = min( 4711 softc->trim_max_ranges, 4712 ata_params->max_dsm_blocks * 4713 ATA_DSM_BLK_RANGES); 4714 } 4715 /* 4716 * Disable queue sorting for non-rotational media 4717 * by default. 4718 */ 4719 old_rate = softc->disk->d_rotation_rate; 4720 softc->disk->d_rotation_rate = 4721 ata_params->media_rotation_rate; 4722 if (softc->disk->d_rotation_rate == 4723 ATA_RATE_NON_ROTATING) { 4724 cam_iosched_set_sort_queue(softc->cam_iosched, 0); 4725 softc->rotating = 0; 4726 } 4727 if (softc->disk->d_rotation_rate != old_rate) { 4728 disk_attr_changed(softc->disk, 4729 "GEOM::rotation_rate", M_NOWAIT); 4730 } 4731 4732 if (ata_params->capabilities1 & ATA_SUPPORT_DMA) 4733 softc->flags |= DA_FLAG_CAN_ATA_DMA; 4734 4735 if (ata_params->support.extension & 4736 ATA_SUPPORT_GENLOG) 4737 softc->flags |= DA_FLAG_CAN_ATA_LOG; 4738 4739 /* 4740 * At this point, if we have a SATA host aware drive, 4741 * we communicate via ATA passthrough unless the 4742 * SAT layer supports ZBC -> ZAC translation. In 4743 * that case, 4744 */ 4745 /* 4746 * XXX KDM figure out how to detect a host managed 4747 * SATA drive. 4748 */ 4749 if (softc->zone_mode == DA_ZONE_NONE) { 4750 /* 4751 * Note that we don't override the zone 4752 * mode or interface if it has already been 4753 * set. This is because it has either been 4754 * set as a quirk, or when we probed the 4755 * SCSI Block Device Characteristics page, 4756 * the zoned field was set. The latter 4757 * means that the SAT layer supports ZBC to 4758 * ZAC translation, and we would prefer to 4759 * use that if it is available. 4760 */ 4761 if ((ata_params->support3 & 4762 ATA_SUPPORT_ZONE_MASK) == 4763 ATA_SUPPORT_ZONE_HOST_AWARE) { 4764 softc->zone_mode = DA_ZONE_HOST_AWARE; 4765 softc->zone_interface = 4766 DA_ZONE_IF_ATA_PASS; 4767 } else if ((ata_params->support3 & 4768 ATA_SUPPORT_ZONE_MASK) == 4769 ATA_SUPPORT_ZONE_DEV_MANAGED) { 4770 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 4771 softc->zone_interface = 4772 DA_ZONE_IF_ATA_PASS; 4773 } 4774 } 4775 4776 } else { 4777 error = daerror(done_ccb, CAM_RETRY_SELTO, 4778 SF_RETRY_UA|SF_NO_PRINT); 4779 if (error == ERESTART) 4780 return; 4781 else if (error != 0) { 4782 if ((done_ccb->ccb_h.status & 4783 CAM_DEV_QFRZN) != 0) { 4784 /* Don't wedge this device's queue */ 4785 cam_release_devq(done_ccb->ccb_h.path, 4786 /*relsim_flags*/0, 4787 /*reduction*/0, 4788 /*timeout*/0, 4789 /*getcount_only*/0); 4790 } 4791 } 4792 } 4793 4794 free(ata_params, M_SCSIDA); 4795 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 4796 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 4797 /* 4798 * If the ATA IDENTIFY failed, we could be talking 4799 * to a SCSI drive, although that seems unlikely, 4800 * since the drive did report that it supported the 4801 * ATA Information VPD page. If the ATA IDENTIFY 4802 * succeeded, and the SAT layer doesn't support 4803 * ZBC -> ZAC translation, continue on to get the 4804 * directory of ATA logs, and complete the rest of 4805 * the ZAC probe. If the SAT layer does support 4806 * ZBC -> ZAC translation, we want to use that, 4807 * and we'll probe the SCSI Zoned Block Device 4808 * Characteristics VPD page next. 4809 */ 4810 if ((error == 0) 4811 && (softc->flags & DA_FLAG_CAN_ATA_LOG) 4812 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) 4813 softc->state = DA_STATE_PROBE_ATA_LOGDIR; 4814 else 4815 softc->state = DA_STATE_PROBE_ZONE; 4816 continue_probe = 1; 4817 } 4818 if (continue_probe != 0) { 4819 xpt_release_ccb(done_ccb); 4820 xpt_schedule(periph, priority); 4821 return; 4822 } else 4823 daprobedone(periph, done_ccb); 4824 return; 4825 } 4826 case DA_CCB_PROBE_ATA_LOGDIR: 4827 { 4828 int error; 4829 4830 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4831 error = 0; 4832 softc->valid_logdir_len = 0; 4833 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); 4834 softc->valid_logdir_len = 4835 csio->dxfer_len - csio->resid; 4836 if (softc->valid_logdir_len > 0) 4837 bcopy(csio->data_ptr, &softc->ata_logdir, 4838 min(softc->valid_logdir_len, 4839 sizeof(softc->ata_logdir))); 4840 /* 4841 * Figure out whether the Identify Device log is 4842 * supported. The General Purpose log directory 4843 * has a header, and lists the number of pages 4844 * available for each GP log identified by the 4845 * offset into the list. 4846 */ 4847 if ((softc->valid_logdir_len >= 4848 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) 4849 && (le16dec(softc->ata_logdir.header) == 4850 ATA_GP_LOG_DIR_VERSION) 4851 && (le16dec(&softc->ata_logdir.num_pages[ 4852 (ATA_IDENTIFY_DATA_LOG * 4853 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ 4854 softc->flags |= DA_FLAG_CAN_ATA_IDLOG; 4855 } else { 4856 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 4857 } 4858 } else { 4859 error = daerror(done_ccb, CAM_RETRY_SELTO, 4860 SF_RETRY_UA|SF_NO_PRINT); 4861 if (error == ERESTART) 4862 return; 4863 else if (error != 0) { 4864 /* 4865 * If we can't get the ATA log directory, 4866 * then ATA logs are effectively not 4867 * supported even if the bit is set in the 4868 * identify data. 4869 */ 4870 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | 4871 DA_FLAG_CAN_ATA_IDLOG); 4872 if ((done_ccb->ccb_h.status & 4873 CAM_DEV_QFRZN) != 0) { 4874 /* Don't wedge this device's queue */ 4875 cam_release_devq(done_ccb->ccb_h.path, 4876 /*relsim_flags*/0, 4877 /*reduction*/0, 4878 /*timeout*/0, 4879 /*getcount_only*/0); 4880 } 4881 } 4882 } 4883 4884 free(csio->data_ptr, M_SCSIDA); 4885 4886 if ((error == 0) 4887 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { 4888 softc->state = DA_STATE_PROBE_ATA_IDDIR; 4889 xpt_release_ccb(done_ccb); 4890 xpt_schedule(periph, priority); 4891 return; 4892 } 4893 daprobedone(periph, done_ccb); 4894 return; 4895 } 4896 case DA_CCB_PROBE_ATA_IDDIR: 4897 { 4898 int error; 4899 4900 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4901 off_t entries_offset, max_entries; 4902 error = 0; 4903 4904 softc->valid_iddir_len = 0; 4905 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); 4906 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | 4907 DA_FLAG_CAN_ATA_ZONE); 4908 softc->valid_iddir_len = 4909 csio->dxfer_len - csio->resid; 4910 if (softc->valid_iddir_len > 0) 4911 bcopy(csio->data_ptr, &softc->ata_iddir, 4912 min(softc->valid_iddir_len, 4913 sizeof(softc->ata_iddir))); 4914 4915 entries_offset = 4916 __offsetof(struct ata_identify_log_pages,entries); 4917 max_entries = softc->valid_iddir_len - entries_offset; 4918 if ((softc->valid_iddir_len > (entries_offset + 1)) 4919 && (le64dec(softc->ata_iddir.header) == 4920 ATA_IDLOG_REVISION) 4921 && (softc->ata_iddir.entry_count > 0)) { 4922 int num_entries, i; 4923 4924 num_entries = softc->ata_iddir.entry_count; 4925 num_entries = min(num_entries, 4926 softc->valid_iddir_len - entries_offset); 4927 for (i = 0; i < num_entries && 4928 i < max_entries; i++) { 4929 if (softc->ata_iddir.entries[i] == 4930 ATA_IDL_SUP_CAP) 4931 softc->flags |= 4932 DA_FLAG_CAN_ATA_SUPCAP; 4933 else if (softc->ata_iddir.entries[i]== 4934 ATA_IDL_ZDI) 4935 softc->flags |= 4936 DA_FLAG_CAN_ATA_ZONE; 4937 4938 if ((softc->flags & 4939 DA_FLAG_CAN_ATA_SUPCAP) 4940 && (softc->flags & 4941 DA_FLAG_CAN_ATA_ZONE)) 4942 break; 4943 } 4944 } 4945 } else { 4946 error = daerror(done_ccb, CAM_RETRY_SELTO, 4947 SF_RETRY_UA|SF_NO_PRINT); 4948 if (error == ERESTART) 4949 return; 4950 else if (error != 0) { 4951 /* 4952 * If we can't get the ATA Identify Data log 4953 * directory, then it effectively isn't 4954 * supported even if the ATA Log directory 4955 * a non-zero number of pages present for 4956 * this log. 4957 */ 4958 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 4959 if ((done_ccb->ccb_h.status & 4960 CAM_DEV_QFRZN) != 0) { 4961 /* Don't wedge this device's queue */ 4962 cam_release_devq(done_ccb->ccb_h.path, 4963 /*relsim_flags*/0, 4964 /*reduction*/0, 4965 /*timeout*/0, 4966 /*getcount_only*/0); 4967 } 4968 } 4969 } 4970 4971 free(csio->data_ptr, M_SCSIDA); 4972 4973 if ((error == 0) 4974 && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { 4975 softc->state = DA_STATE_PROBE_ATA_SUP; 4976 xpt_release_ccb(done_ccb); 4977 xpt_schedule(periph, priority); 4978 return; 4979 } 4980 daprobedone(periph, done_ccb); 4981 return; 4982 } 4983 case DA_CCB_PROBE_ATA_SUP: 4984 { 4985 int error; 4986 4987 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4988 uint32_t valid_len; 4989 size_t needed_size; 4990 struct ata_identify_log_sup_cap *sup_cap; 4991 error = 0; 4992 4993 sup_cap = (struct ata_identify_log_sup_cap *) 4994 csio->data_ptr; 4995 valid_len = csio->dxfer_len - csio->resid; 4996 needed_size = 4997 __offsetof(struct ata_identify_log_sup_cap, 4998 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); 4999 if (valid_len >= needed_size) { 5000 uint64_t zoned, zac_cap; 5001 5002 zoned = le64dec(sup_cap->zoned_cap); 5003 if (zoned & ATA_ZONED_VALID) { 5004 /* 5005 * This should have already been 5006 * set, because this is also in the 5007 * ATA identify data. 5008 */ 5009 if ((zoned & ATA_ZONED_MASK) == 5010 ATA_SUPPORT_ZONE_HOST_AWARE) 5011 softc->zone_mode = 5012 DA_ZONE_HOST_AWARE; 5013 else if ((zoned & ATA_ZONED_MASK) == 5014 ATA_SUPPORT_ZONE_DEV_MANAGED) 5015 softc->zone_mode = 5016 DA_ZONE_DRIVE_MANAGED; 5017 } 5018 5019 zac_cap = le64dec(sup_cap->sup_zac_cap); 5020 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { 5021 if (zac_cap & ATA_REPORT_ZONES_SUP) 5022 softc->zone_flags |= 5023 DA_ZONE_FLAG_RZ_SUP; 5024 if (zac_cap & ATA_ND_OPEN_ZONE_SUP) 5025 softc->zone_flags |= 5026 DA_ZONE_FLAG_OPEN_SUP; 5027 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) 5028 softc->zone_flags |= 5029 DA_ZONE_FLAG_CLOSE_SUP; 5030 if (zac_cap & ATA_ND_FINISH_ZONE_SUP) 5031 softc->zone_flags |= 5032 DA_ZONE_FLAG_FINISH_SUP; 5033 if (zac_cap & ATA_ND_RWP_SUP) 5034 softc->zone_flags |= 5035 DA_ZONE_FLAG_RWP_SUP; 5036 } else { 5037 /* 5038 * This field was introduced in 5039 * ACS-4, r08 on April 28th, 2015. 5040 * If the drive firmware was written 5041 * to an earlier spec, it won't have 5042 * the field. So, assume all 5043 * commands are supported. 5044 */ 5045 softc->zone_flags |= 5046 DA_ZONE_FLAG_SUP_MASK; 5047 } 5048 5049 } 5050 } else { 5051 error = daerror(done_ccb, CAM_RETRY_SELTO, 5052 SF_RETRY_UA|SF_NO_PRINT); 5053 if (error == ERESTART) 5054 return; 5055 else if (error != 0) { 5056 /* 5057 * If we can't get the ATA Identify Data 5058 * Supported Capabilities page, clear the 5059 * flag... 5060 */ 5061 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; 5062 /* 5063 * And clear zone capabilities. 5064 */ 5065 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; 5066 if ((done_ccb->ccb_h.status & 5067 CAM_DEV_QFRZN) != 0) { 5068 /* Don't wedge this device's queue */ 5069 cam_release_devq(done_ccb->ccb_h.path, 5070 /*relsim_flags*/0, 5071 /*reduction*/0, 5072 /*timeout*/0, 5073 /*getcount_only*/0); 5074 } 5075 } 5076 } 5077 5078 free(csio->data_ptr, M_SCSIDA); 5079 5080 if ((error == 0) 5081 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { 5082 softc->state = DA_STATE_PROBE_ATA_ZONE; 5083 xpt_release_ccb(done_ccb); 5084 xpt_schedule(periph, priority); 5085 return; 5086 } 5087 daprobedone(periph, done_ccb); 5088 return; 5089 } 5090 case DA_CCB_PROBE_ATA_ZONE: 5091 { 5092 int error; 5093 5094 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5095 struct ata_zoned_info_log *zi_log; 5096 uint32_t valid_len; 5097 size_t needed_size; 5098 5099 zi_log = (struct ata_zoned_info_log *)csio->data_ptr; 5100 5101 valid_len = csio->dxfer_len - csio->resid; 5102 needed_size = __offsetof(struct ata_zoned_info_log, 5103 version_info) + 1 + sizeof(zi_log->version_info); 5104 if (valid_len >= needed_size) { 5105 uint64_t tmpvar; 5106 5107 tmpvar = le64dec(zi_log->zoned_cap); 5108 if (tmpvar & ATA_ZDI_CAP_VALID) { 5109 if (tmpvar & ATA_ZDI_CAP_URSWRZ) 5110 softc->zone_flags |= 5111 DA_ZONE_FLAG_URSWRZ; 5112 else 5113 softc->zone_flags &= 5114 ~DA_ZONE_FLAG_URSWRZ; 5115 } 5116 tmpvar = le64dec(zi_log->optimal_seq_zones); 5117 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { 5118 softc->zone_flags |= 5119 DA_ZONE_FLAG_OPT_SEQ_SET; 5120 softc->optimal_seq_zones = (tmpvar & 5121 ATA_ZDI_OPT_SEQ_MASK); 5122 } else { 5123 softc->zone_flags &= 5124 ~DA_ZONE_FLAG_OPT_SEQ_SET; 5125 softc->optimal_seq_zones = 0; 5126 } 5127 5128 tmpvar =le64dec(zi_log->optimal_nonseq_zones); 5129 if (tmpvar & ATA_ZDI_OPT_NS_VALID) { 5130 softc->zone_flags |= 5131 DA_ZONE_FLAG_OPT_NONSEQ_SET; 5132 softc->optimal_nonseq_zones = 5133 (tmpvar & ATA_ZDI_OPT_NS_MASK); 5134 } else { 5135 softc->zone_flags &= 5136 ~DA_ZONE_FLAG_OPT_NONSEQ_SET; 5137 softc->optimal_nonseq_zones = 0; 5138 } 5139 5140 tmpvar = le64dec(zi_log->max_seq_req_zones); 5141 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { 5142 softc->zone_flags |= 5143 DA_ZONE_FLAG_MAX_SEQ_SET; 5144 softc->max_seq_zones = 5145 (tmpvar & ATA_ZDI_MAX_SEQ_MASK); 5146 } else { 5147 softc->zone_flags &= 5148 ~DA_ZONE_FLAG_MAX_SEQ_SET; 5149 softc->max_seq_zones = 0; 5150 } 5151 } 5152 } else { 5153 error = daerror(done_ccb, CAM_RETRY_SELTO, 5154 SF_RETRY_UA|SF_NO_PRINT); 5155 if (error == ERESTART) 5156 return; 5157 else if (error != 0) { 5158 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; 5159 softc->flags &= ~DA_ZONE_FLAG_SET_MASK; 5160 5161 if ((done_ccb->ccb_h.status & 5162 CAM_DEV_QFRZN) != 0) { 5163 /* Don't wedge this device's queue */ 5164 cam_release_devq(done_ccb->ccb_h.path, 5165 /*relsim_flags*/0, 5166 /*reduction*/0, 5167 /*timeout*/0, 5168 /*getcount_only*/0); 5169 } 5170 } 5171 5172 } 5173 free(csio->data_ptr, M_SCSIDA); 5174 5175 daprobedone(periph, done_ccb); 5176 return; 5177 } 5178 case DA_CCB_PROBE_ZONE: 5179 { 5180 int error; 5181 5182 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5183 uint32_t valid_len; 5184 size_t needed_len; 5185 struct scsi_vpd_zoned_bdc *zoned_bdc; 5186 5187 error = 0; 5188 zoned_bdc = (struct scsi_vpd_zoned_bdc *) 5189 csio->data_ptr; 5190 valid_len = csio->dxfer_len - csio->resid; 5191 needed_len = __offsetof(struct scsi_vpd_zoned_bdc, 5192 max_seq_req_zones) + 1 + 5193 sizeof(zoned_bdc->max_seq_req_zones); 5194 if ((valid_len >= needed_len) 5195 && (scsi_2btoul(zoned_bdc->page_length) >= 5196 SVPD_ZBDC_PL)) { 5197 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) 5198 softc->zone_flags |= 5199 DA_ZONE_FLAG_URSWRZ; 5200 else 5201 softc->zone_flags &= 5202 ~DA_ZONE_FLAG_URSWRZ; 5203 softc->optimal_seq_zones = 5204 scsi_4btoul(zoned_bdc->optimal_seq_zones); 5205 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; 5206 softc->optimal_nonseq_zones = scsi_4btoul( 5207 zoned_bdc->optimal_nonseq_zones); 5208 softc->zone_flags |= 5209 DA_ZONE_FLAG_OPT_NONSEQ_SET; 5210 softc->max_seq_zones = 5211 scsi_4btoul(zoned_bdc->max_seq_req_zones); 5212 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; 5213 } 5214 /* 5215 * All of the zone commands are mandatory for SCSI 5216 * devices. 5217 * 5218 * XXX KDM this is valid as of September 2015. 5219 * Re-check this assumption once the SAT spec is 5220 * updated to support SCSI ZBC to ATA ZAC mapping. 5221 * Since ATA allows zone commands to be reported 5222 * as supported or not, this may not necessarily 5223 * be true for an ATA device behind a SAT (SCSI to 5224 * ATA Translation) layer. 5225 */ 5226 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; 5227 } else { 5228 error = daerror(done_ccb, CAM_RETRY_SELTO, 5229 SF_RETRY_UA|SF_NO_PRINT); 5230 if (error == ERESTART) 5231 return; 5232 else if (error != 0) { 5233 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5234 /* Don't wedge this device's queue */ 5235 cam_release_devq(done_ccb->ccb_h.path, 5236 /*relsim_flags*/0, 5237 /*reduction*/0, 5238 /*timeout*/0, 5239 /*getcount_only*/0); 5240 } 5241 } 5242 } 5243 daprobedone(periph, done_ccb); 5244 return; 5245 } 5246 case DA_CCB_DUMP: 5247 /* No-op. We're polling */ 5248 return; 5249 case DA_CCB_TUR: 5250 { 5251 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5252 5253 if (daerror(done_ccb, CAM_RETRY_SELTO, 5254 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == 5255 ERESTART) 5256 return; 5257 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5258 cam_release_devq(done_ccb->ccb_h.path, 5259 /*relsim_flags*/0, 5260 /*reduction*/0, 5261 /*timeout*/0, 5262 /*getcount_only*/0); 5263 } 5264 xpt_release_ccb(done_ccb); 5265 cam_periph_release_locked(periph); 5266 return; 5267 } 5268 default: 5269 break; 5270 } 5271 xpt_release_ccb(done_ccb); 5272 } 5273 5274 static void 5275 dareprobe(struct cam_periph *periph) 5276 { 5277 struct da_softc *softc; 5278 cam_status status; 5279 5280 softc = (struct da_softc *)periph->softc; 5281 5282 /* Probe in progress; don't interfere. */ 5283 if (softc->state != DA_STATE_NORMAL) 5284 return; 5285 5286 status = cam_periph_acquire(periph); 5287 KASSERT(status == CAM_REQ_CMP, 5288 ("dareprobe: cam_periph_acquire failed")); 5289 5290 if (softc->flags & DA_FLAG_CAN_RC16) 5291 softc->state = DA_STATE_PROBE_RC16; 5292 else 5293 softc->state = DA_STATE_PROBE_RC; 5294 5295 xpt_schedule(periph, CAM_PRIORITY_DEV); 5296 } 5297 5298 static int 5299 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 5300 { 5301 struct da_softc *softc; 5302 struct cam_periph *periph; 5303 int error, error_code, sense_key, asc, ascq; 5304 5305 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5306 if (ccb->csio.bio != NULL) 5307 biotrack(ccb->csio.bio, __func__); 5308 #endif 5309 5310 periph = xpt_path_periph(ccb->ccb_h.path); 5311 softc = (struct da_softc *)periph->softc; 5312 5313 /* 5314 * Automatically detect devices that do not support 5315 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. 5316 */ 5317 error = 0; 5318 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { 5319 error = cmd6workaround(ccb); 5320 } else if (scsi_extract_sense_ccb(ccb, 5321 &error_code, &sense_key, &asc, &ascq)) { 5322 if (sense_key == SSD_KEY_ILLEGAL_REQUEST) 5323 error = cmd6workaround(ccb); 5324 /* 5325 * If the target replied with CAPACITY DATA HAS CHANGED UA, 5326 * query the capacity and notify upper layers. 5327 */ 5328 else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5329 asc == 0x2A && ascq == 0x09) { 5330 xpt_print(periph->path, "Capacity data has changed\n"); 5331 softc->flags &= ~DA_FLAG_PROBED; 5332 dareprobe(periph); 5333 sense_flags |= SF_NO_PRINT; 5334 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5335 asc == 0x28 && ascq == 0x00) { 5336 softc->flags &= ~DA_FLAG_PROBED; 5337 disk_media_changed(softc->disk, M_NOWAIT); 5338 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5339 asc == 0x3F && ascq == 0x03) { 5340 xpt_print(periph->path, "INQUIRY data has changed\n"); 5341 softc->flags &= ~DA_FLAG_PROBED; 5342 dareprobe(periph); 5343 sense_flags |= SF_NO_PRINT; 5344 } else if (sense_key == SSD_KEY_NOT_READY && 5345 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 5346 softc->flags |= DA_FLAG_PACK_INVALID; 5347 disk_media_gone(softc->disk, M_NOWAIT); 5348 } 5349 } 5350 if (error == ERESTART) 5351 return (ERESTART); 5352 5353 #ifdef CAM_IO_STATS 5354 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 5355 case CAM_CMD_TIMEOUT: 5356 softc->timeouts++; 5357 break; 5358 case CAM_REQ_ABORTED: 5359 case CAM_REQ_CMP_ERR: 5360 case CAM_REQ_TERMIO: 5361 case CAM_UNREC_HBA_ERROR: 5362 case CAM_DATA_RUN_ERR: 5363 softc->errors++; 5364 break; 5365 default: 5366 break; 5367 } 5368 #endif 5369 5370 /* 5371 * XXX 5372 * Until we have a better way of doing pack validation, 5373 * don't treat UAs as errors. 5374 */ 5375 sense_flags |= SF_RETRY_UA; 5376 5377 if (softc->quirks & DA_Q_RETRY_BUSY) 5378 sense_flags |= SF_RETRY_BUSY; 5379 return(cam_periph_error(ccb, cam_flags, sense_flags, 5380 &softc->saved_ccb)); 5381 } 5382 5383 static void 5384 damediapoll(void *arg) 5385 { 5386 struct cam_periph *periph = arg; 5387 struct da_softc *softc = periph->softc; 5388 5389 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && 5390 LIST_EMPTY(&softc->pending_ccbs)) { 5391 if (cam_periph_acquire(periph) == CAM_REQ_CMP) { 5392 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 5393 daschedule(periph); 5394 } 5395 } 5396 /* Queue us up again */ 5397 if (da_poll_period != 0) 5398 callout_schedule(&softc->mediapoll_c, da_poll_period * hz); 5399 } 5400 5401 static void 5402 daprevent(struct cam_periph *periph, int action) 5403 { 5404 struct da_softc *softc; 5405 union ccb *ccb; 5406 int error; 5407 5408 softc = (struct da_softc *)periph->softc; 5409 5410 if (((action == PR_ALLOW) 5411 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 5412 || ((action == PR_PREVENT) 5413 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 5414 return; 5415 } 5416 5417 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 5418 5419 scsi_prevent(&ccb->csio, 5420 /*retries*/1, 5421 /*cbcfp*/dadone, 5422 MSG_SIMPLE_Q_TAG, 5423 action, 5424 SSD_FULL_SIZE, 5425 5000); 5426 5427 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, 5428 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); 5429 5430 if (error == 0) { 5431 if (action == PR_ALLOW) 5432 softc->flags &= ~DA_FLAG_PACK_LOCKED; 5433 else 5434 softc->flags |= DA_FLAG_PACK_LOCKED; 5435 } 5436 5437 xpt_release_ccb(ccb); 5438 } 5439 5440 static void 5441 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, 5442 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) 5443 { 5444 struct ccb_calc_geometry ccg; 5445 struct da_softc *softc; 5446 struct disk_params *dp; 5447 u_int lbppbe, lalba; 5448 int error; 5449 5450 softc = (struct da_softc *)periph->softc; 5451 5452 dp = &softc->params; 5453 dp->secsize = block_len; 5454 dp->sectors = maxsector + 1; 5455 if (rcaplong != NULL) { 5456 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; 5457 lalba = scsi_2btoul(rcaplong->lalba_lbp); 5458 lalba &= SRC16_LALBA_A; 5459 } else { 5460 lbppbe = 0; 5461 lalba = 0; 5462 } 5463 5464 if (lbppbe > 0) { 5465 dp->stripesize = block_len << lbppbe; 5466 dp->stripeoffset = (dp->stripesize - block_len * lalba) % 5467 dp->stripesize; 5468 } else if (softc->quirks & DA_Q_4K) { 5469 dp->stripesize = 4096; 5470 dp->stripeoffset = 0; 5471 } else if (softc->unmap_gran != 0) { 5472 dp->stripesize = block_len * softc->unmap_gran; 5473 dp->stripeoffset = (dp->stripesize - block_len * 5474 softc->unmap_gran_align) % dp->stripesize; 5475 } else { 5476 dp->stripesize = 0; 5477 dp->stripeoffset = 0; 5478 } 5479 /* 5480 * Have the controller provide us with a geometry 5481 * for this disk. The only time the geometry 5482 * matters is when we boot and the controller 5483 * is the only one knowledgeable enough to come 5484 * up with something that will make this a bootable 5485 * device. 5486 */ 5487 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 5488 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 5489 ccg.block_size = dp->secsize; 5490 ccg.volume_size = dp->sectors; 5491 ccg.heads = 0; 5492 ccg.secs_per_track = 0; 5493 ccg.cylinders = 0; 5494 xpt_action((union ccb*)&ccg); 5495 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5496 /* 5497 * We don't know what went wrong here- but just pick 5498 * a geometry so we don't have nasty things like divide 5499 * by zero. 5500 */ 5501 dp->heads = 255; 5502 dp->secs_per_track = 255; 5503 dp->cylinders = dp->sectors / (255 * 255); 5504 if (dp->cylinders == 0) { 5505 dp->cylinders = 1; 5506 } 5507 } else { 5508 dp->heads = ccg.heads; 5509 dp->secs_per_track = ccg.secs_per_track; 5510 dp->cylinders = ccg.cylinders; 5511 } 5512 5513 /* 5514 * If the user supplied a read capacity buffer, and if it is 5515 * different than the previous buffer, update the data in the EDT. 5516 * If it's the same, we don't bother. This avoids sending an 5517 * update every time someone opens this device. 5518 */ 5519 if ((rcaplong != NULL) 5520 && (bcmp(rcaplong, &softc->rcaplong, 5521 min(sizeof(softc->rcaplong), rcap_len)) != 0)) { 5522 struct ccb_dev_advinfo cdai; 5523 5524 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 5525 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 5526 cdai.buftype = CDAI_TYPE_RCAPLONG; 5527 cdai.flags = CDAI_FLAG_STORE; 5528 cdai.bufsiz = rcap_len; 5529 cdai.buf = (uint8_t *)rcaplong; 5530 xpt_action((union ccb *)&cdai); 5531 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 5532 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 5533 if (cdai.ccb_h.status != CAM_REQ_CMP) { 5534 xpt_print(periph->path, "%s: failed to set read " 5535 "capacity advinfo\n", __func__); 5536 /* Use cam_error_print() to decode the status */ 5537 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, 5538 CAM_EPF_ALL); 5539 } else { 5540 bcopy(rcaplong, &softc->rcaplong, 5541 min(sizeof(softc->rcaplong), rcap_len)); 5542 } 5543 } 5544 5545 softc->disk->d_sectorsize = softc->params.secsize; 5546 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; 5547 softc->disk->d_stripesize = softc->params.stripesize; 5548 softc->disk->d_stripeoffset = softc->params.stripeoffset; 5549 /* XXX: these are not actually "firmware" values, so they may be wrong */ 5550 softc->disk->d_fwsectors = softc->params.secs_per_track; 5551 softc->disk->d_fwheads = softc->params.heads; 5552 softc->disk->d_devstat->block_size = softc->params.secsize; 5553 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; 5554 5555 error = disk_resize(softc->disk, M_NOWAIT); 5556 if (error != 0) 5557 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); 5558 } 5559 5560 static void 5561 dasendorderedtag(void *arg) 5562 { 5563 struct da_softc *softc = arg; 5564 5565 if (da_send_ordered) { 5566 if (!LIST_EMPTY(&softc->pending_ccbs)) { 5567 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) 5568 softc->flags |= DA_FLAG_NEED_OTAG; 5569 softc->flags &= ~DA_FLAG_WAS_OTAG; 5570 } 5571 } 5572 /* Queue us up again */ 5573 callout_reset(&softc->sendordered_c, 5574 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 5575 dasendorderedtag, softc); 5576 } 5577 5578 /* 5579 * Step through all DA peripheral drivers, and if the device is still open, 5580 * sync the disk cache to physical media. 5581 */ 5582 static void 5583 dashutdown(void * arg, int howto) 5584 { 5585 struct cam_periph *periph; 5586 struct da_softc *softc; 5587 union ccb *ccb; 5588 int error; 5589 5590 CAM_PERIPH_FOREACH(periph, &dadriver) { 5591 softc = (struct da_softc *)periph->softc; 5592 if (SCHEDULER_STOPPED()) { 5593 /* If we paniced with the lock held, do not recurse. */ 5594 if (!cam_periph_owned(periph) && 5595 (softc->flags & DA_FLAG_OPEN)) { 5596 dadump(softc->disk, NULL, 0, 0, 0); 5597 } 5598 continue; 5599 } 5600 cam_periph_lock(periph); 5601 5602 /* 5603 * We only sync the cache if the drive is still open, and 5604 * if the drive is capable of it.. 5605 */ 5606 if (((softc->flags & DA_FLAG_OPEN) == 0) 5607 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { 5608 cam_periph_unlock(periph); 5609 continue; 5610 } 5611 5612 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 5613 scsi_synchronize_cache(&ccb->csio, 5614 /*retries*/0, 5615 /*cbfcnp*/dadone, 5616 MSG_SIMPLE_Q_TAG, 5617 /*begin_lba*/0, /* whole disk */ 5618 /*lb_count*/0, 5619 SSD_FULL_SIZE, 5620 60 * 60 * 1000); 5621 5622 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 5623 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, 5624 softc->disk->d_devstat); 5625 if (error != 0) 5626 xpt_print(periph->path, "Synchronize cache failed\n"); 5627 xpt_release_ccb(ccb); 5628 cam_periph_unlock(periph); 5629 } 5630 } 5631 5632 #else /* !_KERNEL */ 5633 5634 /* 5635 * XXX These are only left out of the kernel build to silence warnings. If, 5636 * for some reason these functions are used in the kernel, the ifdefs should 5637 * be moved so they are included both in the kernel and userland. 5638 */ 5639 void 5640 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 5641 void (*cbfcnp)(struct cam_periph *, union ccb *), 5642 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 5643 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 5644 u_int32_t timeout) 5645 { 5646 struct scsi_format_unit *scsi_cmd; 5647 5648 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 5649 scsi_cmd->opcode = FORMAT_UNIT; 5650 scsi_cmd->byte2 = byte2; 5651 scsi_ulto2b(ileave, scsi_cmd->interleave); 5652 5653 cam_fill_csio(csio, 5654 retries, 5655 cbfcnp, 5656 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 5657 tag_action, 5658 data_ptr, 5659 dxfer_len, 5660 sense_len, 5661 sizeof(*scsi_cmd), 5662 timeout); 5663 } 5664 5665 void 5666 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, 5667 void (*cbfcnp)(struct cam_periph *, union ccb *), 5668 uint8_t tag_action, uint8_t list_format, 5669 uint32_t addr_desc_index, uint8_t *data_ptr, 5670 uint32_t dxfer_len, int minimum_cmd_size, 5671 uint8_t sense_len, uint32_t timeout) 5672 { 5673 uint8_t cdb_len; 5674 5675 /* 5676 * These conditions allow using the 10 byte command. Otherwise we 5677 * need to use the 12 byte command. 5678 */ 5679 if ((minimum_cmd_size <= 10) 5680 && (addr_desc_index == 0) 5681 && (dxfer_len <= SRDD10_MAX_LENGTH)) { 5682 struct scsi_read_defect_data_10 *cdb10; 5683 5684 cdb10 = (struct scsi_read_defect_data_10 *) 5685 &csio->cdb_io.cdb_bytes; 5686 5687 cdb_len = sizeof(*cdb10); 5688 bzero(cdb10, cdb_len); 5689 cdb10->opcode = READ_DEFECT_DATA_10; 5690 cdb10->format = list_format; 5691 scsi_ulto2b(dxfer_len, cdb10->alloc_length); 5692 } else { 5693 struct scsi_read_defect_data_12 *cdb12; 5694 5695 cdb12 = (struct scsi_read_defect_data_12 *) 5696 &csio->cdb_io.cdb_bytes; 5697 5698 cdb_len = sizeof(*cdb12); 5699 bzero(cdb12, cdb_len); 5700 cdb12->opcode = READ_DEFECT_DATA_12; 5701 cdb12->format = list_format; 5702 scsi_ulto4b(dxfer_len, cdb12->alloc_length); 5703 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); 5704 } 5705 5706 cam_fill_csio(csio, 5707 retries, 5708 cbfcnp, 5709 /*flags*/ CAM_DIR_IN, 5710 tag_action, 5711 data_ptr, 5712 dxfer_len, 5713 sense_len, 5714 cdb_len, 5715 timeout); 5716 } 5717 5718 void 5719 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, 5720 void (*cbfcnp)(struct cam_periph *, union ccb *), 5721 u_int8_t tag_action, u_int8_t byte2, u_int16_t control, 5722 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 5723 u_int32_t timeout) 5724 { 5725 struct scsi_sanitize *scsi_cmd; 5726 5727 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; 5728 scsi_cmd->opcode = SANITIZE; 5729 scsi_cmd->byte2 = byte2; 5730 scsi_cmd->control = control; 5731 scsi_ulto2b(dxfer_len, scsi_cmd->length); 5732 5733 cam_fill_csio(csio, 5734 retries, 5735 cbfcnp, 5736 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 5737 tag_action, 5738 data_ptr, 5739 dxfer_len, 5740 sense_len, 5741 sizeof(*scsi_cmd), 5742 timeout); 5743 } 5744 5745 #endif /* _KERNEL */ 5746 5747 void 5748 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, 5749 void (*cbfcnp)(struct cam_periph *, union ccb *), 5750 uint8_t tag_action, uint8_t service_action, uint64_t zone_id, 5751 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, 5752 uint8_t sense_len, uint32_t timeout) 5753 { 5754 struct scsi_zbc_out *scsi_cmd; 5755 5756 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; 5757 scsi_cmd->opcode = ZBC_OUT; 5758 scsi_cmd->service_action = service_action; 5759 scsi_u64to8b(zone_id, scsi_cmd->zone_id); 5760 scsi_cmd->zone_flags = zone_flags; 5761 5762 cam_fill_csio(csio, 5763 retries, 5764 cbfcnp, 5765 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 5766 tag_action, 5767 data_ptr, 5768 dxfer_len, 5769 sense_len, 5770 sizeof(*scsi_cmd), 5771 timeout); 5772 } 5773 5774 void 5775 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, 5776 void (*cbfcnp)(struct cam_periph *, union ccb *), 5777 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, 5778 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, 5779 uint8_t sense_len, uint32_t timeout) 5780 { 5781 struct scsi_zbc_in *scsi_cmd; 5782 5783 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; 5784 scsi_cmd->opcode = ZBC_IN; 5785 scsi_cmd->service_action = service_action; 5786 scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); 5787 scsi_cmd->zone_options = zone_options; 5788 5789 cam_fill_csio(csio, 5790 retries, 5791 cbfcnp, 5792 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, 5793 tag_action, 5794 data_ptr, 5795 dxfer_len, 5796 sense_len, 5797 sizeof(*scsi_cmd), 5798 timeout); 5799 5800 } 5801 5802 int 5803 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, 5804 void (*cbfcnp)(struct cam_periph *, union ccb *), 5805 uint8_t tag_action, int use_ncq, 5806 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 5807 uint8_t *data_ptr, uint32_t dxfer_len, 5808 uint8_t *cdb_storage, size_t cdb_storage_len, 5809 uint8_t sense_len, uint32_t timeout) 5810 { 5811 uint8_t command_out, protocol, ata_flags; 5812 uint16_t features_out; 5813 uint32_t sectors_out, auxiliary; 5814 int retval; 5815 5816 retval = 0; 5817 5818 if (use_ncq == 0) { 5819 command_out = ATA_ZAC_MANAGEMENT_OUT; 5820 features_out = (zm_action & 0xf) | (zone_flags << 8); 5821 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 5822 if (dxfer_len == 0) { 5823 protocol = AP_PROTO_NON_DATA; 5824 ata_flags |= AP_FLAG_TLEN_NO_DATA; 5825 sectors_out = 0; 5826 } else { 5827 protocol = AP_PROTO_DMA; 5828 ata_flags |= AP_FLAG_TLEN_SECT_CNT | 5829 AP_FLAG_TDIR_TO_DEV; 5830 sectors_out = ((dxfer_len >> 9) & 0xffff); 5831 } 5832 auxiliary = 0; 5833 } else { 5834 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 5835 if (dxfer_len == 0) { 5836 command_out = ATA_NCQ_NON_DATA; 5837 features_out = ATA_NCQ_ZAC_MGMT_OUT; 5838 /* 5839 * We're assuming the SCSI to ATA translation layer 5840 * will set the NCQ tag number in the tag field. 5841 * That isn't clear from the SAT-4 spec (as of rev 05). 5842 */ 5843 sectors_out = 0; 5844 ata_flags |= AP_FLAG_TLEN_NO_DATA; 5845 } else { 5846 command_out = ATA_SEND_FPDMA_QUEUED; 5847 /* 5848 * Note that we're defaulting to normal priority, 5849 * and assuming that the SCSI to ATA translation 5850 * layer will insert the NCQ tag number in the tag 5851 * field. That isn't clear in the SAT-4 spec (as 5852 * of rev 05). 5853 */ 5854 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; 5855 5856 ata_flags |= AP_FLAG_TLEN_FEAT | 5857 AP_FLAG_TDIR_TO_DEV; 5858 5859 /* 5860 * For SEND FPDMA QUEUED, the transfer length is 5861 * encoded in the FEATURE register, and 0 means 5862 * that 65536 512 byte blocks are to be tranferred. 5863 * In practice, it seems unlikely that we'll see 5864 * a transfer that large, and it may confuse the 5865 * the SAT layer, because generally that means that 5866 * 0 bytes should be transferred. 5867 */ 5868 if (dxfer_len == (65536 * 512)) { 5869 features_out = 0; 5870 } else if (dxfer_len <= (65535 * 512)) { 5871 features_out = ((dxfer_len >> 9) & 0xffff); 5872 } else { 5873 /* The transfer is too big. */ 5874 retval = 1; 5875 goto bailout; 5876 } 5877 5878 } 5879 5880 auxiliary = (zm_action & 0xf) | (zone_flags << 8); 5881 protocol = AP_PROTO_FPDMA; 5882 } 5883 5884 protocol |= AP_EXTEND; 5885 5886 retval = scsi_ata_pass(csio, 5887 retries, 5888 cbfcnp, 5889 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 5890 tag_action, 5891 /*protocol*/ protocol, 5892 /*ata_flags*/ ata_flags, 5893 /*features*/ features_out, 5894 /*sector_count*/ sectors_out, 5895 /*lba*/ zone_id, 5896 /*command*/ command_out, 5897 /*device*/ 0, 5898 /*icc*/ 0, 5899 /*auxiliary*/ auxiliary, 5900 /*control*/ 0, 5901 /*data_ptr*/ data_ptr, 5902 /*dxfer_len*/ dxfer_len, 5903 /*cdb_storage*/ cdb_storage, 5904 /*cdb_storage_len*/ cdb_storage_len, 5905 /*minimum_cmd_size*/ 0, 5906 /*sense_len*/ SSD_FULL_SIZE, 5907 /*timeout*/ timeout); 5908 5909 bailout: 5910 5911 return (retval); 5912 } 5913 5914 int 5915 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, 5916 void (*cbfcnp)(struct cam_periph *, union ccb *), 5917 uint8_t tag_action, int use_ncq, 5918 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 5919 uint8_t *data_ptr, uint32_t dxfer_len, 5920 uint8_t *cdb_storage, size_t cdb_storage_len, 5921 uint8_t sense_len, uint32_t timeout) 5922 { 5923 uint8_t command_out, protocol; 5924 uint16_t features_out, sectors_out; 5925 uint32_t auxiliary; 5926 int ata_flags; 5927 int retval; 5928 5929 retval = 0; 5930 ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; 5931 5932 if (use_ncq == 0) { 5933 command_out = ATA_ZAC_MANAGEMENT_IN; 5934 /* XXX KDM put a macro here */ 5935 features_out = (zm_action & 0xf) | (zone_flags << 8); 5936 sectors_out = dxfer_len >> 9; /* XXX KDM macro */ 5937 protocol = AP_PROTO_DMA; 5938 ata_flags |= AP_FLAG_TLEN_SECT_CNT; 5939 auxiliary = 0; 5940 } else { 5941 ata_flags |= AP_FLAG_TLEN_FEAT; 5942 5943 command_out = ATA_RECV_FPDMA_QUEUED; 5944 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; 5945 5946 /* 5947 * For RECEIVE FPDMA QUEUED, the transfer length is 5948 * encoded in the FEATURE register, and 0 means 5949 * that 65536 512 byte blocks are to be tranferred. 5950 * In practice, it seems unlikely that we'll see 5951 * a transfer that large, and it may confuse the 5952 * the SAT layer, because generally that means that 5953 * 0 bytes should be transferred. 5954 */ 5955 if (dxfer_len == (65536 * 512)) { 5956 features_out = 0; 5957 } else if (dxfer_len <= (65535 * 512)) { 5958 features_out = ((dxfer_len >> 9) & 0xffff); 5959 } else { 5960 /* The transfer is too big. */ 5961 retval = 1; 5962 goto bailout; 5963 } 5964 auxiliary = (zm_action & 0xf) | (zone_flags << 8), 5965 protocol = AP_PROTO_FPDMA; 5966 } 5967 5968 protocol |= AP_EXTEND; 5969 5970 retval = scsi_ata_pass(csio, 5971 retries, 5972 cbfcnp, 5973 /*flags*/ CAM_DIR_IN, 5974 tag_action, 5975 /*protocol*/ protocol, 5976 /*ata_flags*/ ata_flags, 5977 /*features*/ features_out, 5978 /*sector_count*/ sectors_out, 5979 /*lba*/ zone_id, 5980 /*command*/ command_out, 5981 /*device*/ 0, 5982 /*icc*/ 0, 5983 /*auxiliary*/ auxiliary, 5984 /*control*/ 0, 5985 /*data_ptr*/ data_ptr, 5986 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ 5987 /*cdb_storage*/ cdb_storage, 5988 /*cdb_storage_len*/ cdb_storage_len, 5989 /*minimum_cmd_size*/ 0, 5990 /*sense_len*/ SSD_FULL_SIZE, 5991 /*timeout*/ timeout); 5992 5993 bailout: 5994 return (retval); 5995 } 5996