1 /*- 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 5 * 6 * Copyright (c) 1997 Justin T. Gibbs. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification, immediately at the beginning of the file. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 36 #ifdef _KERNEL 37 #include "opt_da.h" 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/sysctl.h> 42 #include <sys/taskqueue.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/conf.h> 46 #include <sys/devicestat.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/cons.h> 50 #include <sys/endian.h> 51 #include <sys/proc.h> 52 #include <sys/sbuf.h> 53 #include <geom/geom.h> 54 #include <geom/geom_disk.h> 55 #include <machine/atomic.h> 56 #endif /* _KERNEL */ 57 58 #ifndef _KERNEL 59 #include <stdio.h> 60 #include <string.h> 61 #endif /* _KERNEL */ 62 63 #include <cam/cam.h> 64 #include <cam/cam_ccb.h> 65 #include <cam/cam_periph.h> 66 #include <cam/cam_xpt_periph.h> 67 #ifdef _KERNEL 68 #include <cam/cam_xpt_internal.h> 69 #endif /* _KERNEL */ 70 #include <cam/cam_sim.h> 71 #include <cam/cam_iosched.h> 72 73 #include <cam/scsi/scsi_message.h> 74 #include <cam/scsi/scsi_da.h> 75 76 #ifdef _KERNEL 77 /* 78 * Note that there are probe ordering dependencies here. The order isn't 79 * controlled by this enumeration, but by explicit state transitions in 80 * dastart() and dadone(). Here are some of the dependencies: 81 * 82 * 1. RC should come first, before RC16, unless there is evidence that RC16 83 * is supported. 84 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. 85 * 3. The ATA probes should go in this order: 86 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE 87 */ 88 typedef enum { 89 DA_STATE_PROBE_WP, 90 DA_STATE_PROBE_RC, 91 DA_STATE_PROBE_RC16, 92 DA_STATE_PROBE_LBP, 93 DA_STATE_PROBE_BLK_LIMITS, 94 DA_STATE_PROBE_BDC, 95 DA_STATE_PROBE_ATA, 96 DA_STATE_PROBE_ATA_LOGDIR, 97 DA_STATE_PROBE_ATA_IDDIR, 98 DA_STATE_PROBE_ATA_SUP, 99 DA_STATE_PROBE_ATA_ZONE, 100 DA_STATE_PROBE_ZONE, 101 DA_STATE_NORMAL 102 } da_state; 103 104 typedef enum { 105 DA_FLAG_PACK_INVALID = 0x000001, 106 DA_FLAG_NEW_PACK = 0x000002, 107 DA_FLAG_PACK_LOCKED = 0x000004, 108 DA_FLAG_PACK_REMOVABLE = 0x000008, 109 DA_FLAG_NEED_OTAG = 0x000020, 110 DA_FLAG_WAS_OTAG = 0x000040, 111 DA_FLAG_RETRY_UA = 0x000080, 112 DA_FLAG_OPEN = 0x000100, 113 DA_FLAG_SCTX_INIT = 0x000200, 114 DA_FLAG_CAN_RC16 = 0x000400, 115 DA_FLAG_PROBED = 0x000800, 116 DA_FLAG_DIRTY = 0x001000, 117 DA_FLAG_ANNOUNCED = 0x002000, 118 DA_FLAG_CAN_ATA_DMA = 0x004000, 119 DA_FLAG_CAN_ATA_LOG = 0x008000, 120 DA_FLAG_CAN_ATA_IDLOG = 0x010000, 121 DA_FLAG_CAN_ATA_SUPCAP = 0x020000, 122 DA_FLAG_CAN_ATA_ZONE = 0x040000, 123 DA_FLAG_TUR_PENDING = 0x080000 124 } da_flags; 125 126 typedef enum { 127 DA_Q_NONE = 0x00, 128 DA_Q_NO_SYNC_CACHE = 0x01, 129 DA_Q_NO_6_BYTE = 0x02, 130 DA_Q_NO_PREVENT = 0x04, 131 DA_Q_4K = 0x08, 132 DA_Q_NO_RC16 = 0x10, 133 DA_Q_NO_UNMAP = 0x20, 134 DA_Q_RETRY_BUSY = 0x40, 135 DA_Q_SMR_DM = 0x80, 136 DA_Q_STRICT_UNMAP = 0x100, 137 DA_Q_128KB = 0x200 138 } da_quirks; 139 140 #define DA_Q_BIT_STRING \ 141 "\020" \ 142 "\001NO_SYNC_CACHE" \ 143 "\002NO_6_BYTE" \ 144 "\003NO_PREVENT" \ 145 "\0044K" \ 146 "\005NO_RC16" \ 147 "\006NO_UNMAP" \ 148 "\007RETRY_BUSY" \ 149 "\010SMR_DM" \ 150 "\011STRICT_UNMAP" \ 151 "\012128KB" 152 153 typedef enum { 154 DA_CCB_PROBE_RC = 0x01, 155 DA_CCB_PROBE_RC16 = 0x02, 156 DA_CCB_PROBE_LBP = 0x03, 157 DA_CCB_PROBE_BLK_LIMITS = 0x04, 158 DA_CCB_PROBE_BDC = 0x05, 159 DA_CCB_PROBE_ATA = 0x06, 160 DA_CCB_BUFFER_IO = 0x07, 161 DA_CCB_DUMP = 0x0A, 162 DA_CCB_DELETE = 0x0B, 163 DA_CCB_TUR = 0x0C, 164 DA_CCB_PROBE_ZONE = 0x0D, 165 DA_CCB_PROBE_ATA_LOGDIR = 0x0E, 166 DA_CCB_PROBE_ATA_IDDIR = 0x0F, 167 DA_CCB_PROBE_ATA_SUP = 0x10, 168 DA_CCB_PROBE_ATA_ZONE = 0x11, 169 DA_CCB_PROBE_WP = 0x12, 170 DA_CCB_TYPE_MASK = 0x1F, 171 DA_CCB_RETRY_UA = 0x20 172 } da_ccb_state; 173 174 /* 175 * Order here is important for method choice 176 * 177 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to 178 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes 179 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql 180 * import taking 5mins. 181 * 182 */ 183 typedef enum { 184 DA_DELETE_NONE, 185 DA_DELETE_DISABLE, 186 DA_DELETE_ATA_TRIM, 187 DA_DELETE_UNMAP, 188 DA_DELETE_WS16, 189 DA_DELETE_WS10, 190 DA_DELETE_ZERO, 191 DA_DELETE_MIN = DA_DELETE_ATA_TRIM, 192 DA_DELETE_MAX = DA_DELETE_ZERO 193 } da_delete_methods; 194 195 /* 196 * For SCSI, host managed drives show up as a separate device type. For 197 * ATA, host managed drives also have a different device signature. 198 * XXX KDM figure out the ATA host managed signature. 199 */ 200 typedef enum { 201 DA_ZONE_NONE = 0x00, 202 DA_ZONE_DRIVE_MANAGED = 0x01, 203 DA_ZONE_HOST_AWARE = 0x02, 204 DA_ZONE_HOST_MANAGED = 0x03 205 } da_zone_mode; 206 207 /* 208 * We distinguish between these interface cases in addition to the drive type: 209 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC 210 * o ATA drive behind a SCSI translation layer that does not know about 211 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this 212 * case, we would need to share the ATA code with the ada(4) driver. 213 * o SCSI drive. 214 */ 215 typedef enum { 216 DA_ZONE_IF_SCSI, 217 DA_ZONE_IF_ATA_PASS, 218 DA_ZONE_IF_ATA_SAT, 219 } da_zone_interface; 220 221 typedef enum { 222 DA_ZONE_FLAG_RZ_SUP = 0x0001, 223 DA_ZONE_FLAG_OPEN_SUP = 0x0002, 224 DA_ZONE_FLAG_CLOSE_SUP = 0x0004, 225 DA_ZONE_FLAG_FINISH_SUP = 0x0008, 226 DA_ZONE_FLAG_RWP_SUP = 0x0010, 227 DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | 228 DA_ZONE_FLAG_OPEN_SUP | 229 DA_ZONE_FLAG_CLOSE_SUP | 230 DA_ZONE_FLAG_FINISH_SUP | 231 DA_ZONE_FLAG_RWP_SUP), 232 DA_ZONE_FLAG_URSWRZ = 0x0020, 233 DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, 234 DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, 235 DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, 236 DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | 237 DA_ZONE_FLAG_OPT_NONSEQ_SET | 238 DA_ZONE_FLAG_MAX_SEQ_SET) 239 } da_zone_flags; 240 241 static struct da_zone_desc { 242 da_zone_flags value; 243 const char *desc; 244 } da_zone_desc_table[] = { 245 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, 246 {DA_ZONE_FLAG_OPEN_SUP, "Open" }, 247 {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, 248 {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, 249 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, 250 }; 251 252 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, 253 struct bio *bp); 254 static da_delete_func_t da_delete_trim; 255 static da_delete_func_t da_delete_unmap; 256 static da_delete_func_t da_delete_ws; 257 258 static const void * da_delete_functions[] = { 259 NULL, 260 NULL, 261 da_delete_trim, 262 da_delete_unmap, 263 da_delete_ws, 264 da_delete_ws, 265 da_delete_ws 266 }; 267 268 static const char *da_delete_method_names[] = 269 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; 270 static const char *da_delete_method_desc[] = 271 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", 272 "WRITE SAME(10) with UNMAP", "ZERO" }; 273 274 /* Offsets into our private area for storing information */ 275 #define ccb_state ppriv_field0 276 #define ccb_bp ppriv_ptr1 277 278 struct disk_params { 279 u_int8_t heads; 280 u_int32_t cylinders; 281 u_int8_t secs_per_track; 282 u_int32_t secsize; /* Number of bytes/sector */ 283 u_int64_t sectors; /* total number sectors */ 284 u_int stripesize; 285 u_int stripeoffset; 286 }; 287 288 #define UNMAP_RANGE_MAX 0xffffffff 289 #define UNMAP_HEAD_SIZE 8 290 #define UNMAP_RANGE_SIZE 16 291 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ 292 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ 293 UNMAP_HEAD_SIZE) 294 295 #define WS10_MAX_BLKS 0xffff 296 #define WS16_MAX_BLKS 0xffffffff 297 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ 298 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) 299 300 #define DA_WORK_TUR (1 << 16) 301 302 typedef enum { 303 DA_REF_OPEN = 1, 304 DA_REF_OPEN_HOLD, 305 DA_REF_CLOSE_HOLD, 306 DA_REF_PROBE_HOLD, 307 DA_REF_TUR, 308 DA_REF_GEOM, 309 DA_REF_SYSCTL, 310 DA_REF_REPROBE, 311 DA_REF_MAX /* KEEP LAST */ 312 } da_ref_token; 313 314 struct da_softc { 315 struct cam_iosched_softc *cam_iosched; 316 struct bio_queue_head delete_run_queue; 317 LIST_HEAD(, ccb_hdr) pending_ccbs; 318 int refcount; /* Active xpt_action() calls */ 319 da_state state; 320 da_flags flags; 321 da_quirks quirks; 322 int minimum_cmd_size; 323 int error_inject; 324 int trim_max_ranges; 325 int delete_available; /* Delete methods possibly available */ 326 da_zone_mode zone_mode; 327 da_zone_interface zone_interface; 328 da_zone_flags zone_flags; 329 struct ata_gp_log_dir ata_logdir; 330 int valid_logdir_len; 331 struct ata_identify_log_pages ata_iddir; 332 int valid_iddir_len; 333 uint64_t optimal_seq_zones; 334 uint64_t optimal_nonseq_zones; 335 uint64_t max_seq_zones; 336 u_int maxio; 337 uint32_t unmap_max_ranges; 338 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ 339 uint32_t unmap_gran; 340 uint32_t unmap_gran_align; 341 uint64_t ws_max_blks; 342 uint64_t trim_count; 343 uint64_t trim_ranges; 344 uint64_t trim_lbas; 345 da_delete_methods delete_method_pref; 346 da_delete_methods delete_method; 347 da_delete_func_t *delete_func; 348 int unmappedio; 349 int rotating; 350 int p_type; 351 struct disk_params params; 352 struct disk *disk; 353 union ccb saved_ccb; 354 struct task sysctl_task; 355 struct sysctl_ctx_list sysctl_ctx; 356 struct sysctl_oid *sysctl_tree; 357 struct callout sendordered_c; 358 uint64_t wwpn; 359 uint8_t unmap_buf[UNMAP_BUF_SIZE]; 360 struct scsi_read_capacity_data_long rcaplong; 361 struct callout mediapoll_c; 362 int ref_flags[DA_REF_MAX]; 363 #ifdef CAM_IO_STATS 364 struct sysctl_ctx_list sysctl_stats_ctx; 365 struct sysctl_oid *sysctl_stats_tree; 366 u_int errors; 367 u_int timeouts; 368 u_int invalidations; 369 #endif 370 #define DA_ANNOUNCETMP_SZ 160 371 char announce_temp[DA_ANNOUNCETMP_SZ]; 372 #define DA_ANNOUNCE_SZ 400 373 char announcebuf[DA_ANNOUNCE_SZ]; 374 }; 375 376 #define dadeleteflag(softc, delete_method, enable) \ 377 if (enable) { \ 378 softc->delete_available |= (1 << delete_method); \ 379 } else { \ 380 softc->delete_available &= ~(1 << delete_method); \ 381 } 382 383 struct da_quirk_entry { 384 struct scsi_inquiry_pattern inq_pat; 385 da_quirks quirks; 386 }; 387 388 static const char quantum[] = "QUANTUM"; 389 static const char microp[] = "MICROP"; 390 391 static struct da_quirk_entry da_quirk_table[] = 392 { 393 /* SPI, FC devices */ 394 { 395 /* 396 * Fujitsu M2513A MO drives. 397 * Tested devices: M2513A2 firmware versions 1200 & 1300. 398 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 399 * Reported by: W.Scholten <whs@xs4all.nl> 400 */ 401 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 402 /*quirks*/ DA_Q_NO_SYNC_CACHE 403 }, 404 { 405 /* See above. */ 406 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 407 /*quirks*/ DA_Q_NO_SYNC_CACHE 408 }, 409 { 410 /* 411 * This particular Fujitsu drive doesn't like the 412 * synchronize cache command. 413 * Reported by: Tom Jackson <toj@gorilla.net> 414 */ 415 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 416 /*quirks*/ DA_Q_NO_SYNC_CACHE 417 }, 418 { 419 /* 420 * This drive doesn't like the synchronize cache command 421 * either. Reported by: Matthew Jacob <mjacob@feral.com> 422 * in NetBSD PR kern/6027, August 24, 1998. 423 */ 424 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 425 /*quirks*/ DA_Q_NO_SYNC_CACHE 426 }, 427 { 428 /* 429 * This drive doesn't like the synchronize cache command 430 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 431 * (PR 8882). 432 */ 433 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 434 /*quirks*/ DA_Q_NO_SYNC_CACHE 435 }, 436 { 437 /* 438 * Doesn't like the synchronize cache command. 439 * Reported by: Blaz Zupan <blaz@gold.amis.net> 440 */ 441 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 442 /*quirks*/ DA_Q_NO_SYNC_CACHE 443 }, 444 { 445 /* 446 * Doesn't like the synchronize cache command. 447 * Reported by: Blaz Zupan <blaz@gold.amis.net> 448 */ 449 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 450 /*quirks*/ DA_Q_NO_SYNC_CACHE 451 }, 452 { 453 /* 454 * Doesn't like the synchronize cache command. 455 */ 456 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 457 /*quirks*/ DA_Q_NO_SYNC_CACHE 458 }, 459 { 460 /* 461 * Doesn't like the synchronize cache command. 462 * Reported by: walter@pelissero.de 463 */ 464 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, 465 /*quirks*/ DA_Q_NO_SYNC_CACHE 466 }, 467 { 468 /* 469 * Doesn't work correctly with 6 byte reads/writes. 470 * Returns illegal request, and points to byte 9 of the 471 * 6-byte CDB. 472 * Reported by: Adam McDougall <bsdx@spawnet.com> 473 */ 474 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 475 /*quirks*/ DA_Q_NO_6_BYTE 476 }, 477 { 478 /* See above. */ 479 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 480 /*quirks*/ DA_Q_NO_6_BYTE 481 }, 482 { 483 /* 484 * Doesn't like the synchronize cache command. 485 * Reported by: walter@pelissero.de 486 */ 487 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, 488 /*quirks*/ DA_Q_NO_SYNC_CACHE 489 }, 490 { 491 /* 492 * The CISS RAID controllers do not support SYNC_CACHE 493 */ 494 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, 495 /*quirks*/ DA_Q_NO_SYNC_CACHE 496 }, 497 { 498 /* 499 * The STEC SSDs sometimes hang on UNMAP. 500 */ 501 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, 502 /*quirks*/ DA_Q_NO_UNMAP 503 }, 504 { 505 /* 506 * VMware returns BUSY status when storage has transient 507 * connectivity problems, so better wait. 508 * Also VMware returns odd errors on misaligned UNMAPs. 509 */ 510 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, 511 /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP 512 }, 513 /* USB mass storage devices supported by umass(4) */ 514 { 515 /* 516 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player 517 * PR: kern/51675 518 */ 519 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, 520 /*quirks*/ DA_Q_NO_SYNC_CACHE 521 }, 522 { 523 /* 524 * Power Quotient Int. (PQI) USB flash key 525 * PR: kern/53067 526 */ 527 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", 528 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 529 }, 530 { 531 /* 532 * Creative Nomad MUVO mp3 player (USB) 533 * PR: kern/53094 534 */ 535 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, 536 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 537 }, 538 { 539 /* 540 * Jungsoft NEXDISK USB flash key 541 * PR: kern/54737 542 */ 543 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, 544 /*quirks*/ DA_Q_NO_SYNC_CACHE 545 }, 546 { 547 /* 548 * FreeDik USB Mini Data Drive 549 * PR: kern/54786 550 */ 551 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", 552 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 553 }, 554 { 555 /* 556 * Sigmatel USB Flash MP3 Player 557 * PR: kern/57046 558 */ 559 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, 560 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 561 }, 562 { 563 /* 564 * Neuros USB Digital Audio Computer 565 * PR: kern/63645 566 */ 567 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", 568 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 569 }, 570 { 571 /* 572 * SEAGRAND NP-900 MP3 Player 573 * PR: kern/64563 574 */ 575 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, 576 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 577 }, 578 { 579 /* 580 * iRiver iFP MP3 player (with UMS Firmware) 581 * PR: kern/54881, i386/63941, kern/66124 582 */ 583 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, 584 /*quirks*/ DA_Q_NO_SYNC_CACHE 585 }, 586 { 587 /* 588 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 589 * PR: kern/70158 590 */ 591 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, 592 /*quirks*/ DA_Q_NO_SYNC_CACHE 593 }, 594 { 595 /* 596 * ZICPlay USB MP3 Player with FM 597 * PR: kern/75057 598 */ 599 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, 600 /*quirks*/ DA_Q_NO_SYNC_CACHE 601 }, 602 { 603 /* 604 * TEAC USB floppy mechanisms 605 */ 606 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, 607 /*quirks*/ DA_Q_NO_SYNC_CACHE 608 }, 609 { 610 /* 611 * Kingston DataTraveler II+ USB Pen-Drive. 612 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org> 613 */ 614 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", 615 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 616 }, 617 { 618 /* 619 * USB DISK Pro PMAP 620 * Reported by: jhs 621 * PR: usb/96381 622 */ 623 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, 624 /*quirks*/ DA_Q_NO_SYNC_CACHE 625 }, 626 { 627 /* 628 * Motorola E398 Mobile Phone (TransFlash memory card). 629 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl> 630 * PR: usb/89889 631 */ 632 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", 633 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 634 }, 635 { 636 /* 637 * Qware BeatZkey! Pro 638 * PR: usb/79164 639 */ 640 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", 641 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 642 }, 643 { 644 /* 645 * Time DPA20B 1GB MP3 Player 646 * PR: usb/81846 647 */ 648 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", 649 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 650 }, 651 { 652 /* 653 * Samsung USB key 128Mb 654 * PR: usb/90081 655 */ 656 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", 657 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 658 }, 659 { 660 /* 661 * Kingston DataTraveler 2.0 USB Flash memory. 662 * PR: usb/89196 663 */ 664 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", 665 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 666 }, 667 { 668 /* 669 * Creative MUVO Slim mp3 player (USB) 670 * PR: usb/86131 671 */ 672 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", 673 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 674 }, 675 { 676 /* 677 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) 678 * PR: usb/80487 679 */ 680 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", 681 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 682 }, 683 { 684 /* 685 * SanDisk Micro Cruzer 128MB 686 * PR: usb/75970 687 */ 688 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", 689 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 690 }, 691 { 692 /* 693 * TOSHIBA TransMemory USB sticks 694 * PR: kern/94660 695 */ 696 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", 697 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 698 }, 699 { 700 /* 701 * PNY USB 3.0 Flash Drives 702 */ 703 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", 704 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 705 }, 706 { 707 /* 708 * PNY USB Flash keys 709 * PR: usb/75578, usb/72344, usb/65436 710 */ 711 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", 712 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 713 }, 714 { 715 /* 716 * Genesys GL3224 717 */ 718 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 719 "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 720 }, 721 { 722 /* 723 * Genesys 6-in-1 Card Reader 724 * PR: usb/94647 725 */ 726 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 727 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 728 }, 729 { 730 /* 731 * Rekam Digital CAMERA 732 * PR: usb/98713 733 */ 734 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", 735 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 736 }, 737 { 738 /* 739 * iRiver H10 MP3 player 740 * PR: usb/102547 741 */ 742 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", 743 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 744 }, 745 { 746 /* 747 * iRiver U10 MP3 player 748 * PR: usb/92306 749 */ 750 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", 751 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 752 }, 753 { 754 /* 755 * X-Micro Flash Disk 756 * PR: usb/96901 757 */ 758 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", 759 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 760 }, 761 { 762 /* 763 * EasyMP3 EM732X USB 2.0 Flash MP3 Player 764 * PR: usb/96546 765 */ 766 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", 767 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 768 }, 769 { 770 /* 771 * Denver MP3 player 772 * PR: usb/107101 773 */ 774 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", 775 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 776 }, 777 { 778 /* 779 * Philips USB Key Audio KEY013 780 * PR: usb/68412 781 */ 782 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, 783 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 784 }, 785 { 786 /* 787 * JNC MP3 Player 788 * PR: usb/94439 789 */ 790 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", 791 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 792 }, 793 { 794 /* 795 * SAMSUNG MP0402H 796 * PR: usb/108427 797 */ 798 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, 799 /*quirks*/ DA_Q_NO_SYNC_CACHE 800 }, 801 { 802 /* 803 * I/O Magic USB flash - Giga Bank 804 * PR: usb/108810 805 */ 806 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, 807 /*quirks*/ DA_Q_NO_SYNC_CACHE 808 }, 809 { 810 /* 811 * JoyFly 128mb USB Flash Drive 812 * PR: 96133 813 */ 814 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", 815 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 816 }, 817 { 818 /* 819 * ChipsBnk usb stick 820 * PR: 103702 821 */ 822 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", 823 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 824 }, 825 { 826 /* 827 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A 828 * PR: 129858 829 */ 830 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", 831 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 832 }, 833 { 834 /* 835 * Samsung YP-U3 mp3-player 836 * PR: 125398 837 */ 838 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", 839 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 840 }, 841 { 842 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", 843 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 844 }, 845 { 846 /* 847 * Sony Cyber-Shot DSC cameras 848 * PR: usb/137035 849 */ 850 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 851 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 852 }, 853 { 854 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", 855 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT 856 }, 857 { 858 /* At least several Transcent USB sticks lie on RC16. */ 859 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", 860 "*"}, /*quirks*/ DA_Q_NO_RC16 861 }, 862 { 863 /* 864 * I-O Data USB Flash Disk 865 * PR: usb/211716 866 */ 867 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", 868 "*"}, /*quirks*/ DA_Q_NO_RC16 869 }, 870 { 871 /* 872 * SLC CHIPFANCIER USB drives 873 * PR: usb/234503 (RC10 right, RC16 wrong) 874 * 16GB, 32GB and 128GB confirmed to have same issue 875 */ 876 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER", 877 "*"}, /*quirks*/ DA_Q_NO_RC16 878 }, 879 /* ATA/SATA devices over SAS/USB/... */ 880 { 881 /* Sandisk X400 */ 882 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" }, 883 /*quirks*/DA_Q_128KB 884 }, 885 { 886 /* Hitachi Advanced Format (4k) drives */ 887 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, 888 /*quirks*/DA_Q_4K 889 }, 890 { 891 /* Micron Advanced Format (4k) drives */ 892 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, 893 /*quirks*/DA_Q_4K 894 }, 895 { 896 /* Samsung Advanced Format (4k) drives */ 897 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, 898 /*quirks*/DA_Q_4K 899 }, 900 { 901 /* Samsung Advanced Format (4k) drives */ 902 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, 903 /*quirks*/DA_Q_4K 904 }, 905 { 906 /* Samsung Advanced Format (4k) drives */ 907 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, 908 /*quirks*/DA_Q_4K 909 }, 910 { 911 /* Samsung Advanced Format (4k) drives */ 912 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, 913 /*quirks*/DA_Q_4K 914 }, 915 { 916 /* Seagate Barracuda Green Advanced Format (4k) drives */ 917 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, 918 /*quirks*/DA_Q_4K 919 }, 920 { 921 /* Seagate Barracuda Green Advanced Format (4k) drives */ 922 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, 923 /*quirks*/DA_Q_4K 924 }, 925 { 926 /* Seagate Barracuda Green Advanced Format (4k) drives */ 927 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, 928 /*quirks*/DA_Q_4K 929 }, 930 { 931 /* Seagate Barracuda Green Advanced Format (4k) drives */ 932 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, 933 /*quirks*/DA_Q_4K 934 }, 935 { 936 /* Seagate Barracuda Green Advanced Format (4k) drives */ 937 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, 938 /*quirks*/DA_Q_4K 939 }, 940 { 941 /* Seagate Barracuda Green Advanced Format (4k) drives */ 942 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, 943 /*quirks*/DA_Q_4K 944 }, 945 { 946 /* Seagate Momentus Advanced Format (4k) drives */ 947 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, 948 /*quirks*/DA_Q_4K 949 }, 950 { 951 /* Seagate Momentus Advanced Format (4k) drives */ 952 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, 953 /*quirks*/DA_Q_4K 954 }, 955 { 956 /* Seagate Momentus Advanced Format (4k) drives */ 957 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, 958 /*quirks*/DA_Q_4K 959 }, 960 { 961 /* Seagate Momentus Advanced Format (4k) drives */ 962 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, 963 /*quirks*/DA_Q_4K 964 }, 965 { 966 /* Seagate Momentus Advanced Format (4k) drives */ 967 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, 968 /*quirks*/DA_Q_4K 969 }, 970 { 971 /* Seagate Momentus Advanced Format (4k) drives */ 972 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, 973 /*quirks*/DA_Q_4K 974 }, 975 { 976 /* Seagate Momentus Advanced Format (4k) drives */ 977 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, 978 /*quirks*/DA_Q_4K 979 }, 980 { 981 /* Seagate Momentus Advanced Format (4k) drives */ 982 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, 983 /*quirks*/DA_Q_4K 984 }, 985 { 986 /* Seagate Momentus Advanced Format (4k) drives */ 987 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, 988 /*quirks*/DA_Q_4K 989 }, 990 { 991 /* Seagate Momentus Advanced Format (4k) drives */ 992 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, 993 /*quirks*/DA_Q_4K 994 }, 995 { 996 /* Seagate Momentus Advanced Format (4k) drives */ 997 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, 998 /*quirks*/DA_Q_4K 999 }, 1000 { 1001 /* Seagate Momentus Advanced Format (4k) drives */ 1002 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, 1003 /*quirks*/DA_Q_4K 1004 }, 1005 { 1006 /* Seagate Momentus Advanced Format (4k) drives */ 1007 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, 1008 /*quirks*/DA_Q_4K 1009 }, 1010 { 1011 /* Seagate Momentus Advanced Format (4k) drives */ 1012 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, 1013 /*quirks*/DA_Q_4K 1014 }, 1015 { 1016 /* Seagate Momentus Thin Advanced Format (4k) drives */ 1017 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, 1018 /*quirks*/DA_Q_4K 1019 }, 1020 { 1021 /* Seagate Momentus Thin Advanced Format (4k) drives */ 1022 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, 1023 /*quirks*/DA_Q_4K 1024 }, 1025 { 1026 /* WDC Caviar Green Advanced Format (4k) drives */ 1027 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, 1028 /*quirks*/DA_Q_4K 1029 }, 1030 { 1031 /* WDC Caviar Green Advanced Format (4k) drives */ 1032 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, 1033 /*quirks*/DA_Q_4K 1034 }, 1035 { 1036 /* WDC Caviar Green Advanced Format (4k) drives */ 1037 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, 1038 /*quirks*/DA_Q_4K 1039 }, 1040 { 1041 /* WDC Caviar Green Advanced Format (4k) drives */ 1042 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, 1043 /*quirks*/DA_Q_4K 1044 }, 1045 { 1046 /* WDC Caviar Green Advanced Format (4k) drives */ 1047 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, 1048 /*quirks*/DA_Q_4K 1049 }, 1050 { 1051 /* WDC Caviar Green Advanced Format (4k) drives */ 1052 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, 1053 /*quirks*/DA_Q_4K 1054 }, 1055 { 1056 /* WDC Caviar Green Advanced Format (4k) drives */ 1057 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, 1058 /*quirks*/DA_Q_4K 1059 }, 1060 { 1061 /* WDC Caviar Green Advanced Format (4k) drives */ 1062 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, 1063 /*quirks*/DA_Q_4K 1064 }, 1065 { 1066 /* WDC Scorpio Black Advanced Format (4k) drives */ 1067 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, 1068 /*quirks*/DA_Q_4K 1069 }, 1070 { 1071 /* WDC Scorpio Black Advanced Format (4k) drives */ 1072 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, 1073 /*quirks*/DA_Q_4K 1074 }, 1075 { 1076 /* WDC Scorpio Black Advanced Format (4k) drives */ 1077 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, 1078 /*quirks*/DA_Q_4K 1079 }, 1080 { 1081 /* WDC Scorpio Black Advanced Format (4k) drives */ 1082 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, 1083 /*quirks*/DA_Q_4K 1084 }, 1085 { 1086 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1087 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, 1088 /*quirks*/DA_Q_4K 1089 }, 1090 { 1091 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1092 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, 1093 /*quirks*/DA_Q_4K 1094 }, 1095 { 1096 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1097 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, 1098 /*quirks*/DA_Q_4K 1099 }, 1100 { 1101 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1102 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, 1103 /*quirks*/DA_Q_4K 1104 }, 1105 { 1106 /* 1107 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) 1108 * PR: usb/97472 1109 */ 1110 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"}, 1111 /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE 1112 }, 1113 { 1114 /* 1115 * Olympus digital cameras (D-370) 1116 * PR: usb/97472 1117 */ 1118 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"}, 1119 /*quirks*/ DA_Q_NO_6_BYTE 1120 }, 1121 { 1122 /* 1123 * Olympus digital cameras (E-100RS, E-10). 1124 * PR: usb/97472 1125 */ 1126 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"}, 1127 /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE 1128 }, 1129 { 1130 /* 1131 * Olympus FE-210 camera 1132 */ 1133 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", 1134 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1135 }, 1136 { 1137 /* 1138 * Pentax Digital Camera 1139 * PR: usb/93389 1140 */ 1141 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA", 1142 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1143 }, 1144 { 1145 /* 1146 * LG UP3S MP3 player 1147 */ 1148 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", 1149 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1150 }, 1151 { 1152 /* 1153 * Laser MP3-2GA13 MP3 player 1154 */ 1155 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", 1156 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1157 }, 1158 { 1159 /* 1160 * LaCie external 250GB Hard drive des by Porsche 1161 * Submitted by: Ben Stuyts <ben@altesco.nl> 1162 * PR: 121474 1163 */ 1164 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, 1165 /*quirks*/ DA_Q_NO_SYNC_CACHE 1166 }, 1167 /* SATA SSDs */ 1168 { 1169 /* 1170 * Corsair Force 2 SSDs 1171 * 4k optimised & trim only works in 4k requests + 4k aligned 1172 */ 1173 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, 1174 /*quirks*/DA_Q_4K 1175 }, 1176 { 1177 /* 1178 * Corsair Force 3 SSDs 1179 * 4k optimised & trim only works in 4k requests + 4k aligned 1180 */ 1181 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, 1182 /*quirks*/DA_Q_4K 1183 }, 1184 { 1185 /* 1186 * Corsair Neutron GTX SSDs 1187 * 4k optimised & trim only works in 4k requests + 4k aligned 1188 */ 1189 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, 1190 /*quirks*/DA_Q_4K 1191 }, 1192 { 1193 /* 1194 * Corsair Force GT & GS SSDs 1195 * 4k optimised & trim only works in 4k requests + 4k aligned 1196 */ 1197 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, 1198 /*quirks*/DA_Q_4K 1199 }, 1200 { 1201 /* 1202 * Crucial M4 SSDs 1203 * 4k optimised & trim only works in 4k requests + 4k aligned 1204 */ 1205 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, 1206 /*quirks*/DA_Q_4K 1207 }, 1208 { 1209 /* 1210 * Crucial RealSSD C300 SSDs 1211 * 4k optimised 1212 */ 1213 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", 1214 "*" }, /*quirks*/DA_Q_4K 1215 }, 1216 { 1217 /* 1218 * Intel 320 Series SSDs 1219 * 4k optimised & trim only works in 4k requests + 4k aligned 1220 */ 1221 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, 1222 /*quirks*/DA_Q_4K 1223 }, 1224 { 1225 /* 1226 * Intel 330 Series SSDs 1227 * 4k optimised & trim only works in 4k requests + 4k aligned 1228 */ 1229 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, 1230 /*quirks*/DA_Q_4K 1231 }, 1232 { 1233 /* 1234 * Intel 510 Series SSDs 1235 * 4k optimised & trim only works in 4k requests + 4k aligned 1236 */ 1237 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, 1238 /*quirks*/DA_Q_4K 1239 }, 1240 { 1241 /* 1242 * Intel 520 Series SSDs 1243 * 4k optimised & trim only works in 4k requests + 4k aligned 1244 */ 1245 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, 1246 /*quirks*/DA_Q_4K 1247 }, 1248 { 1249 /* 1250 * Intel S3610 Series SSDs 1251 * 4k optimised & trim only works in 4k requests + 4k aligned 1252 */ 1253 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, 1254 /*quirks*/DA_Q_4K 1255 }, 1256 { 1257 /* 1258 * Intel X25-M Series SSDs 1259 * 4k optimised & trim only works in 4k requests + 4k aligned 1260 */ 1261 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, 1262 /*quirks*/DA_Q_4K 1263 }, 1264 { 1265 /* 1266 * Kingston E100 Series SSDs 1267 * 4k optimised & trim only works in 4k requests + 4k aligned 1268 */ 1269 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, 1270 /*quirks*/DA_Q_4K 1271 }, 1272 { 1273 /* 1274 * Kingston HyperX 3k SSDs 1275 * 4k optimised & trim only works in 4k requests + 4k aligned 1276 */ 1277 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, 1278 /*quirks*/DA_Q_4K 1279 }, 1280 { 1281 /* 1282 * Marvell SSDs (entry taken from OpenSolaris) 1283 * 4k optimised & trim only works in 4k requests + 4k aligned 1284 */ 1285 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, 1286 /*quirks*/DA_Q_4K 1287 }, 1288 { 1289 /* 1290 * OCZ Agility 2 SSDs 1291 * 4k optimised & trim only works in 4k requests + 4k aligned 1292 */ 1293 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, 1294 /*quirks*/DA_Q_4K 1295 }, 1296 { 1297 /* 1298 * OCZ Agility 3 SSDs 1299 * 4k optimised & trim only works in 4k requests + 4k aligned 1300 */ 1301 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, 1302 /*quirks*/DA_Q_4K 1303 }, 1304 { 1305 /* 1306 * OCZ Deneva R Series SSDs 1307 * 4k optimised & trim only works in 4k requests + 4k aligned 1308 */ 1309 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, 1310 /*quirks*/DA_Q_4K 1311 }, 1312 { 1313 /* 1314 * OCZ Vertex 2 SSDs (inc pro series) 1315 * 4k optimised & trim only works in 4k requests + 4k aligned 1316 */ 1317 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, 1318 /*quirks*/DA_Q_4K 1319 }, 1320 { 1321 /* 1322 * OCZ Vertex 3 SSDs 1323 * 4k optimised & trim only works in 4k requests + 4k aligned 1324 */ 1325 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, 1326 /*quirks*/DA_Q_4K 1327 }, 1328 { 1329 /* 1330 * OCZ Vertex 4 SSDs 1331 * 4k optimised & trim only works in 4k requests + 4k aligned 1332 */ 1333 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, 1334 /*quirks*/DA_Q_4K 1335 }, 1336 { 1337 /* 1338 * Samsung 750 Series SSDs 1339 * 4k optimised & trim only works in 4k requests + 4k aligned 1340 */ 1341 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" }, 1342 /*quirks*/DA_Q_4K 1343 }, 1344 { 1345 /* 1346 * Samsung 830 Series SSDs 1347 * 4k optimised & trim only works in 4k requests + 4k aligned 1348 */ 1349 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, 1350 /*quirks*/DA_Q_4K 1351 }, 1352 { 1353 /* 1354 * Samsung 840 SSDs 1355 * 4k optimised & trim only works in 4k requests + 4k aligned 1356 */ 1357 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, 1358 /*quirks*/DA_Q_4K 1359 }, 1360 { 1361 /* 1362 * Samsung 845 SSDs 1363 * 4k optimised & trim only works in 4k requests + 4k aligned 1364 */ 1365 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" }, 1366 /*quirks*/DA_Q_4K 1367 }, 1368 { 1369 /* 1370 * Samsung 850 SSDs 1371 * 4k optimised & trim only works in 4k requests + 4k aligned 1372 */ 1373 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, 1374 /*quirks*/DA_Q_4K 1375 }, 1376 { 1377 /* 1378 * Samsung 843T Series SSDs (MZ7WD*) 1379 * Samsung PM851 Series SSDs (MZ7TE*) 1380 * Samsung PM853T Series SSDs (MZ7GE*) 1381 * Samsung SM863 Series SSDs (MZ7KM*) 1382 * 4k optimised 1383 */ 1384 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, 1385 /*quirks*/DA_Q_4K 1386 }, 1387 { 1388 /* 1389 * Same as for SAMSUNG MZ7* but enable the quirks for SSD 1390 * starting with MZ7* too 1391 */ 1392 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" }, 1393 /*quirks*/DA_Q_4K 1394 }, 1395 { 1396 /* 1397 * SuperTalent TeraDrive CT SSDs 1398 * 4k optimised & trim only works in 4k requests + 4k aligned 1399 */ 1400 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, 1401 /*quirks*/DA_Q_4K 1402 }, 1403 { 1404 /* 1405 * XceedIOPS SATA SSDs 1406 * 4k optimised 1407 */ 1408 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, 1409 /*quirks*/DA_Q_4K 1410 }, 1411 { 1412 /* 1413 * Hama Innostor USB-Stick 1414 */ 1415 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, 1416 /*quirks*/DA_Q_NO_RC16 1417 }, 1418 { 1419 /* 1420 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) 1421 * Drive Managed SATA hard drive. This drive doesn't report 1422 * in firmware that it is a drive managed SMR drive. 1423 */ 1424 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" }, 1425 /*quirks*/DA_Q_SMR_DM 1426 }, 1427 { 1428 /* 1429 * MX-ES USB Drive by Mach Xtreme 1430 */ 1431 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, 1432 /*quirks*/DA_Q_NO_RC16 1433 }, 1434 }; 1435 1436 static disk_strategy_t dastrategy; 1437 static dumper_t dadump; 1438 static periph_init_t dainit; 1439 static void daasync(void *callback_arg, u_int32_t code, 1440 struct cam_path *path, void *arg); 1441 static void dasysctlinit(void *context, int pending); 1442 static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); 1443 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); 1444 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); 1445 static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); 1446 static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); 1447 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); 1448 static void dadeletemethodset(struct da_softc *softc, 1449 da_delete_methods delete_method); 1450 static off_t dadeletemaxsize(struct da_softc *softc, 1451 da_delete_methods delete_method); 1452 static void dadeletemethodchoose(struct da_softc *softc, 1453 da_delete_methods default_method); 1454 static void daprobedone(struct cam_periph *periph, union ccb *ccb); 1455 1456 static periph_ctor_t daregister; 1457 static periph_dtor_t dacleanup; 1458 static periph_start_t dastart; 1459 static periph_oninv_t daoninvalidate; 1460 static void dazonedone(struct cam_periph *periph, union ccb *ccb); 1461 static void dadone(struct cam_periph *periph, 1462 union ccb *done_ccb); 1463 static void dadone_probewp(struct cam_periph *periph, 1464 union ccb *done_ccb); 1465 static void dadone_proberc(struct cam_periph *periph, 1466 union ccb *done_ccb); 1467 static void dadone_probelbp(struct cam_periph *periph, 1468 union ccb *done_ccb); 1469 static void dadone_probeblklimits(struct cam_periph *periph, 1470 union ccb *done_ccb); 1471 static void dadone_probebdc(struct cam_periph *periph, 1472 union ccb *done_ccb); 1473 static void dadone_probeata(struct cam_periph *periph, 1474 union ccb *done_ccb); 1475 static void dadone_probeatalogdir(struct cam_periph *periph, 1476 union ccb *done_ccb); 1477 static void dadone_probeataiddir(struct cam_periph *periph, 1478 union ccb *done_ccb); 1479 static void dadone_probeatasup(struct cam_periph *periph, 1480 union ccb *done_ccb); 1481 static void dadone_probeatazone(struct cam_periph *periph, 1482 union ccb *done_ccb); 1483 static void dadone_probezone(struct cam_periph *periph, 1484 union ccb *done_ccb); 1485 static void dadone_tur(struct cam_periph *periph, 1486 union ccb *done_ccb); 1487 static int daerror(union ccb *ccb, u_int32_t cam_flags, 1488 u_int32_t sense_flags); 1489 static void daprevent(struct cam_periph *periph, int action); 1490 static void dareprobe(struct cam_periph *periph); 1491 static void dasetgeom(struct cam_periph *periph, uint32_t block_len, 1492 uint64_t maxsector, 1493 struct scsi_read_capacity_data_long *rcaplong, 1494 size_t rcap_size); 1495 static callout_func_t dasendorderedtag; 1496 static void dashutdown(void *arg, int howto); 1497 static callout_func_t damediapoll; 1498 1499 #ifndef DA_DEFAULT_POLL_PERIOD 1500 #define DA_DEFAULT_POLL_PERIOD 3 1501 #endif 1502 1503 #ifndef DA_DEFAULT_TIMEOUT 1504 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 1505 #endif 1506 1507 #ifndef DA_DEFAULT_SOFTTIMEOUT 1508 #define DA_DEFAULT_SOFTTIMEOUT 0 1509 #endif 1510 1511 #ifndef DA_DEFAULT_RETRY 1512 #define DA_DEFAULT_RETRY 4 1513 #endif 1514 1515 #ifndef DA_DEFAULT_SEND_ORDERED 1516 #define DA_DEFAULT_SEND_ORDERED 1 1517 #endif 1518 1519 static int da_poll_period = DA_DEFAULT_POLL_PERIOD; 1520 static int da_retry_count = DA_DEFAULT_RETRY; 1521 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 1522 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; 1523 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; 1524 static int da_disable_wp_detection = 0; 1525 1526 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 1527 "CAM Direct Access Disk driver"); 1528 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, 1529 &da_poll_period, 0, "Media polling period in seconds"); 1530 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, 1531 &da_retry_count, 0, "Normal I/O retry count"); 1532 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, 1533 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 1534 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, 1535 &da_send_ordered, 0, "Send Ordered Tags"); 1536 SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN, 1537 &da_disable_wp_detection, 0, 1538 "Disable detection of write-protected disks"); 1539 1540 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, 1541 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", 1542 "Soft I/O timeout (ms)"); 1543 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); 1544 1545 /* 1546 * DA_ORDEREDTAG_INTERVAL determines how often, relative 1547 * to the default timeout, we check to see whether an ordered 1548 * tagged transaction is appropriate to prevent simple tag 1549 * starvation. Since we'd like to ensure that there is at least 1550 * 1/2 of the timeout length left for a starved transaction to 1551 * complete after we've sent an ordered tag, we must poll at least 1552 * four times in every timeout period. This takes care of the worst 1553 * case where a starved transaction starts during an interval that 1554 * meets the requirement "don't send an ordered tag" test so it takes 1555 * us two intervals to determine that a tag must be sent. 1556 */ 1557 #ifndef DA_ORDEREDTAG_INTERVAL 1558 #define DA_ORDEREDTAG_INTERVAL 4 1559 #endif 1560 1561 static struct periph_driver dadriver = 1562 { 1563 dainit, "da", 1564 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 1565 }; 1566 1567 PERIPHDRIVER_DECLARE(da, dadriver); 1568 1569 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); 1570 1571 /* 1572 * This driver takes out references / holds in well defined pairs, never 1573 * recursively. These macros / inline functions enforce those rules. They 1574 * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is 1575 * defined to be 2 or larger, the tracking also includes debug printfs. 1576 */ 1577 #if defined(DA_TRACK_REFS) || defined(INVARIANTS) 1578 1579 #ifndef DA_TRACK_REFS 1580 #define DA_TRACK_REFS 1 1581 #endif 1582 1583 #if DA_TRACK_REFS > 1 1584 static const char *da_ref_text[] = { 1585 "bogus", 1586 "open", 1587 "open hold", 1588 "close hold", 1589 "reprobe hold", 1590 "Test Unit Ready", 1591 "Geom", 1592 "sysctl", 1593 "reprobe", 1594 "max -- also bogus" 1595 }; 1596 1597 #define DA_PERIPH_PRINT(periph, msg, args...) \ 1598 CAM_PERIPH_PRINT(periph, msg, ##args) 1599 #else 1600 #define DA_PERIPH_PRINT(periph, msg, args...) 1601 #endif 1602 1603 static inline void 1604 token_sanity(da_ref_token token) 1605 { 1606 if ((unsigned)token >= DA_REF_MAX) 1607 panic("Bad token value passed in %d\n", token); 1608 } 1609 1610 static inline int 1611 da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token) 1612 { 1613 int err = cam_periph_hold(periph, priority); 1614 1615 token_sanity(token); 1616 DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n", 1617 da_ref_text[token], token, err); 1618 if (err == 0) { 1619 int cnt; 1620 struct da_softc *softc = periph->softc; 1621 1622 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); 1623 if (cnt != 0) 1624 panic("Re-holding for reason %d, cnt = %d", token, cnt); 1625 } 1626 return (err); 1627 } 1628 1629 static inline void 1630 da_periph_unhold(struct cam_periph *periph, da_ref_token token) 1631 { 1632 int cnt; 1633 struct da_softc *softc = periph->softc; 1634 1635 token_sanity(token); 1636 DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n", 1637 da_ref_text[token], token); 1638 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1639 if (cnt != 1) 1640 panic("Unholding %d with cnt = %d", token, cnt); 1641 cam_periph_unhold(periph); 1642 } 1643 1644 static inline int 1645 da_periph_acquire(struct cam_periph *periph, da_ref_token token) 1646 { 1647 int err = cam_periph_acquire(periph); 1648 1649 token_sanity(token); 1650 DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n", 1651 da_ref_text[token], token, err); 1652 if (err == 0) { 1653 int cnt; 1654 struct da_softc *softc = periph->softc; 1655 1656 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); 1657 if (cnt != 0) 1658 panic("Re-refing for reason %d, cnt = %d", token, cnt); 1659 } 1660 return (err); 1661 } 1662 1663 static inline void 1664 da_periph_release(struct cam_periph *periph, da_ref_token token) 1665 { 1666 int cnt; 1667 struct da_softc *softc = periph->softc; 1668 1669 token_sanity(token); 1670 DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n", 1671 da_ref_text[token], token); 1672 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1673 if (cnt != 1) 1674 panic("Releasing %d with cnt = %d", token, cnt); 1675 cam_periph_release(periph); 1676 } 1677 1678 static inline void 1679 da_periph_release_locked(struct cam_periph *periph, da_ref_token token) 1680 { 1681 int cnt; 1682 struct da_softc *softc = periph->softc; 1683 1684 token_sanity(token); 1685 DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n", 1686 da_ref_text[token], token); 1687 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1688 if (cnt != 1) 1689 panic("releasing (locked) %d with cnt = %d", token, cnt); 1690 cam_periph_release_locked(periph); 1691 } 1692 1693 #define cam_periph_hold POISON 1694 #define cam_periph_unhold POISON 1695 #define cam_periph_acquire POISON 1696 #define cam_periph_release POISON 1697 #define cam_periph_release_locked POISON 1698 1699 #else 1700 #define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio)) 1701 #define da_periph_unhold(periph, token) cam_periph_unhold((periph)) 1702 #define da_periph_acquire(periph, token) cam_periph_acquire((periph)) 1703 #define da_periph_release(periph, token) cam_periph_release((periph)) 1704 #define da_periph_release_locked(periph, token) cam_periph_release_locked((periph)) 1705 #endif 1706 1707 static int 1708 daopen(struct disk *dp) 1709 { 1710 struct cam_periph *periph; 1711 struct da_softc *softc; 1712 int error; 1713 1714 periph = (struct cam_periph *)dp->d_drv1; 1715 if (da_periph_acquire(periph, DA_REF_OPEN) != 0) { 1716 return (ENXIO); 1717 } 1718 1719 cam_periph_lock(periph); 1720 if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) { 1721 cam_periph_unlock(periph); 1722 da_periph_release(periph, DA_REF_OPEN); 1723 return (error); 1724 } 1725 1726 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1727 ("daopen\n")); 1728 1729 softc = (struct da_softc *)periph->softc; 1730 dareprobe(periph); 1731 1732 /* Wait for the disk size update. */ 1733 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, 1734 "dareprobe", 0); 1735 if (error != 0) 1736 xpt_print(periph->path, "unable to retrieve capacity data\n"); 1737 1738 if (periph->flags & CAM_PERIPH_INVALID) 1739 error = ENXIO; 1740 1741 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1742 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1743 daprevent(periph, PR_PREVENT); 1744 1745 if (error == 0) { 1746 softc->flags &= ~DA_FLAG_PACK_INVALID; 1747 softc->flags |= DA_FLAG_OPEN; 1748 } 1749 1750 da_periph_unhold(periph, DA_REF_OPEN_HOLD); 1751 cam_periph_unlock(periph); 1752 1753 if (error != 0) 1754 da_periph_release(periph, DA_REF_OPEN); 1755 1756 return (error); 1757 } 1758 1759 static int 1760 daclose(struct disk *dp) 1761 { 1762 struct cam_periph *periph; 1763 struct da_softc *softc; 1764 union ccb *ccb; 1765 1766 periph = (struct cam_periph *)dp->d_drv1; 1767 softc = (struct da_softc *)periph->softc; 1768 cam_periph_lock(periph); 1769 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1770 ("daclose\n")); 1771 1772 if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) { 1773 1774 /* Flush disk cache. */ 1775 if ((softc->flags & DA_FLAG_DIRTY) != 0 && 1776 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && 1777 (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 1778 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1779 scsi_synchronize_cache(&ccb->csio, /*retries*/1, 1780 /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, 1781 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 1782 5 * 60 * 1000); 1783 cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 1784 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, 1785 softc->disk->d_devstat); 1786 softc->flags &= ~DA_FLAG_DIRTY; 1787 xpt_release_ccb(ccb); 1788 } 1789 1790 /* Allow medium removal. */ 1791 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1792 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1793 daprevent(periph, PR_ALLOW); 1794 1795 da_periph_unhold(periph, DA_REF_CLOSE_HOLD); 1796 } 1797 1798 /* 1799 * If we've got removable media, mark the blocksize as 1800 * unavailable, since it could change when new media is 1801 * inserted. 1802 */ 1803 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 1804 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; 1805 1806 softc->flags &= ~DA_FLAG_OPEN; 1807 while (softc->refcount != 0) 1808 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); 1809 cam_periph_unlock(periph); 1810 da_periph_release(periph, DA_REF_OPEN); 1811 return (0); 1812 } 1813 1814 static void 1815 daschedule(struct cam_periph *periph) 1816 { 1817 struct da_softc *softc = (struct da_softc *)periph->softc; 1818 1819 if (softc->state != DA_STATE_NORMAL) 1820 return; 1821 1822 cam_iosched_schedule(softc->cam_iosched, periph); 1823 } 1824 1825 /* 1826 * Actually translate the requested transfer into one the physical driver 1827 * can understand. The transfer is described by a buf and will include 1828 * only one physical transfer. 1829 */ 1830 static void 1831 dastrategy(struct bio *bp) 1832 { 1833 struct cam_periph *periph; 1834 struct da_softc *softc; 1835 1836 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1837 softc = (struct da_softc *)periph->softc; 1838 1839 cam_periph_lock(periph); 1840 1841 /* 1842 * If the device has been made invalid, error out 1843 */ 1844 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 1845 cam_periph_unlock(periph); 1846 biofinish(bp, NULL, ENXIO); 1847 return; 1848 } 1849 1850 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); 1851 1852 /* 1853 * Zone commands must be ordered, because they can depend on the 1854 * effects of previously issued commands, and they may affect 1855 * commands after them. 1856 */ 1857 if (bp->bio_cmd == BIO_ZONE) 1858 bp->bio_flags |= BIO_ORDERED; 1859 1860 /* 1861 * Place it in the queue of disk activities for this disk 1862 */ 1863 cam_iosched_queue_work(softc->cam_iosched, bp); 1864 1865 /* 1866 * Schedule ourselves for performing the work. 1867 */ 1868 daschedule(periph); 1869 cam_periph_unlock(periph); 1870 1871 return; 1872 } 1873 1874 static int 1875 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 1876 { 1877 struct cam_periph *periph; 1878 struct da_softc *softc; 1879 u_int secsize; 1880 struct ccb_scsiio csio; 1881 struct disk *dp; 1882 int error = 0; 1883 1884 dp = arg; 1885 periph = dp->d_drv1; 1886 softc = (struct da_softc *)periph->softc; 1887 secsize = softc->params.secsize; 1888 1889 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 1890 return (ENXIO); 1891 1892 memset(&csio, 0, sizeof(csio)); 1893 if (length > 0) { 1894 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1895 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1896 scsi_read_write(&csio, 1897 /*retries*/0, 1898 /*cbfcnp*/NULL, 1899 MSG_ORDERED_Q_TAG, 1900 /*read*/SCSI_RW_WRITE, 1901 /*byte2*/0, 1902 /*minimum_cmd_size*/ softc->minimum_cmd_size, 1903 offset / secsize, 1904 length / secsize, 1905 /*data_ptr*/(u_int8_t *) virtual, 1906 /*dxfer_len*/length, 1907 /*sense_len*/SSD_FULL_SIZE, 1908 da_default_timeout * 1000); 1909 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 1910 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1911 if (error != 0) 1912 printf("Aborting dump due to I/O error.\n"); 1913 return (error); 1914 } 1915 1916 /* 1917 * Sync the disk cache contents to the physical media. 1918 */ 1919 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 1920 1921 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1922 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1923 scsi_synchronize_cache(&csio, 1924 /*retries*/0, 1925 /*cbfcnp*/NULL, 1926 MSG_SIMPLE_Q_TAG, 1927 /*begin_lba*/0,/* Cover the whole disk */ 1928 /*lb_count*/0, 1929 SSD_FULL_SIZE, 1930 5 * 1000); 1931 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 1932 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1933 if (error != 0) 1934 xpt_print(periph->path, "Synchronize cache failed\n"); 1935 } 1936 return (error); 1937 } 1938 1939 static int 1940 dagetattr(struct bio *bp) 1941 { 1942 int ret; 1943 struct cam_periph *periph; 1944 1945 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1946 cam_periph_lock(periph); 1947 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 1948 periph->path); 1949 cam_periph_unlock(periph); 1950 if (ret == 0) 1951 bp->bio_completed = bp->bio_length; 1952 return ret; 1953 } 1954 1955 static void 1956 dainit(void) 1957 { 1958 cam_status status; 1959 1960 /* 1961 * Install a global async callback. This callback will 1962 * receive async callbacks like "new device found". 1963 */ 1964 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); 1965 1966 if (status != CAM_REQ_CMP) { 1967 printf("da: Failed to attach master async callback " 1968 "due to status 0x%x!\n", status); 1969 } else if (da_send_ordered) { 1970 1971 /* Register our shutdown event handler */ 1972 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 1973 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 1974 printf("dainit: shutdown event registration failed!\n"); 1975 } 1976 } 1977 1978 /* 1979 * Callback from GEOM, called when it has finished cleaning up its 1980 * resources. 1981 */ 1982 static void 1983 dadiskgonecb(struct disk *dp) 1984 { 1985 struct cam_periph *periph; 1986 1987 periph = (struct cam_periph *)dp->d_drv1; 1988 da_periph_release(periph, DA_REF_GEOM); 1989 } 1990 1991 static void 1992 daoninvalidate(struct cam_periph *periph) 1993 { 1994 struct da_softc *softc; 1995 1996 cam_periph_assert(periph, MA_OWNED); 1997 softc = (struct da_softc *)periph->softc; 1998 1999 /* 2000 * De-register any async callbacks. 2001 */ 2002 xpt_register_async(0, daasync, periph, periph->path); 2003 2004 softc->flags |= DA_FLAG_PACK_INVALID; 2005 #ifdef CAM_IO_STATS 2006 softc->invalidations++; 2007 #endif 2008 2009 /* 2010 * Return all queued I/O with ENXIO. 2011 * XXX Handle any transactions queued to the card 2012 * with XPT_ABORT_CCB. 2013 */ 2014 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); 2015 2016 /* 2017 * Tell GEOM that we've gone away, we'll get a callback when it is 2018 * done cleaning up its resources. 2019 */ 2020 disk_gone(softc->disk); 2021 } 2022 2023 static void 2024 dacleanup(struct cam_periph *periph) 2025 { 2026 struct da_softc *softc; 2027 2028 softc = (struct da_softc *)periph->softc; 2029 2030 cam_periph_unlock(periph); 2031 2032 cam_iosched_fini(softc->cam_iosched); 2033 2034 /* 2035 * If we can't free the sysctl tree, oh well... 2036 */ 2037 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { 2038 #ifdef CAM_IO_STATS 2039 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) 2040 xpt_print(periph->path, 2041 "can't remove sysctl stats context\n"); 2042 #endif 2043 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) 2044 xpt_print(periph->path, 2045 "can't remove sysctl context\n"); 2046 } 2047 2048 callout_drain(&softc->mediapoll_c); 2049 disk_destroy(softc->disk); 2050 callout_drain(&softc->sendordered_c); 2051 free(softc, M_DEVBUF); 2052 cam_periph_lock(periph); 2053 } 2054 2055 static void 2056 daasync(void *callback_arg, u_int32_t code, 2057 struct cam_path *path, void *arg) 2058 { 2059 struct cam_periph *periph; 2060 struct da_softc *softc; 2061 2062 periph = (struct cam_periph *)callback_arg; 2063 switch (code) { 2064 case AC_FOUND_DEVICE: /* callback to create periph, no locking yet */ 2065 { 2066 struct ccb_getdev *cgd; 2067 cam_status status; 2068 2069 cgd = (struct ccb_getdev *)arg; 2070 if (cgd == NULL) 2071 break; 2072 2073 if (cgd->protocol != PROTO_SCSI) 2074 break; 2075 if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) 2076 break; 2077 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 2078 && SID_TYPE(&cgd->inq_data) != T_RBC 2079 && SID_TYPE(&cgd->inq_data) != T_OPTICAL 2080 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) 2081 break; 2082 2083 /* 2084 * Allocate a peripheral instance for 2085 * this device and start the probe 2086 * process. 2087 */ 2088 status = cam_periph_alloc(daregister, daoninvalidate, 2089 dacleanup, dastart, 2090 "da", CAM_PERIPH_BIO, 2091 path, daasync, 2092 AC_FOUND_DEVICE, cgd); 2093 2094 if (status != CAM_REQ_CMP 2095 && status != CAM_REQ_INPROG) 2096 printf("daasync: Unable to attach to new device " 2097 "due to status 0x%x\n", status); 2098 return; 2099 } 2100 case AC_ADVINFO_CHANGED: /* Doesn't touch periph */ 2101 { 2102 uintptr_t buftype; 2103 2104 buftype = (uintptr_t)arg; 2105 if (buftype == CDAI_TYPE_PHYS_PATH) { 2106 struct da_softc *softc; 2107 2108 softc = periph->softc; 2109 disk_attr_changed(softc->disk, "GEOM::physpath", 2110 M_NOWAIT); 2111 } 2112 break; 2113 } 2114 case AC_UNIT_ATTENTION: 2115 { 2116 union ccb *ccb; 2117 int error_code, sense_key, asc, ascq; 2118 2119 softc = (struct da_softc *)periph->softc; 2120 ccb = (union ccb *)arg; 2121 2122 /* 2123 * Handle all UNIT ATTENTIONs except our own, as they will be 2124 * handled by daerror(). Since this comes from a different periph, 2125 * that periph's lock is held, not ours, so we have to take it ours 2126 * out to touch softc flags. 2127 */ 2128 if (xpt_path_periph(ccb->ccb_h.path) != periph && 2129 scsi_extract_sense_ccb(ccb, 2130 &error_code, &sense_key, &asc, &ascq)) { 2131 if (asc == 0x2A && ascq == 0x09) { 2132 xpt_print(ccb->ccb_h.path, 2133 "Capacity data has changed\n"); 2134 cam_periph_lock(periph); 2135 softc->flags &= ~DA_FLAG_PROBED; 2136 dareprobe(periph); 2137 cam_periph_unlock(periph); 2138 } else if (asc == 0x28 && ascq == 0x00) { 2139 cam_periph_lock(periph); 2140 softc->flags &= ~DA_FLAG_PROBED; 2141 cam_periph_unlock(periph); 2142 disk_media_changed(softc->disk, M_NOWAIT); 2143 } else if (asc == 0x3F && ascq == 0x03) { 2144 xpt_print(ccb->ccb_h.path, 2145 "INQUIRY data has changed\n"); 2146 cam_periph_lock(periph); 2147 softc->flags &= ~DA_FLAG_PROBED; 2148 dareprobe(periph); 2149 cam_periph_unlock(periph); 2150 } 2151 } 2152 break; 2153 } 2154 case AC_SCSI_AEN: /* Called for this path: periph locked */ 2155 /* 2156 * Appears to be currently unused for SCSI devices, only ata SIMs 2157 * generate this. 2158 */ 2159 cam_periph_assert(periph, MA_OWNED); 2160 softc = (struct da_softc *)periph->softc; 2161 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && 2162 (softc->flags & DA_FLAG_TUR_PENDING) == 0) { 2163 if (da_periph_acquire(periph, DA_REF_TUR) == 0) { 2164 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 2165 daschedule(periph); 2166 } 2167 } 2168 /* FALLTHROUGH */ 2169 case AC_SENT_BDR: /* Called for this path: periph locked */ 2170 case AC_BUS_RESET: /* Called for this path: periph locked */ 2171 { 2172 struct ccb_hdr *ccbh; 2173 2174 cam_periph_assert(periph, MA_OWNED); 2175 softc = (struct da_softc *)periph->softc; 2176 /* 2177 * Don't fail on the expected unit attention 2178 * that will occur. 2179 */ 2180 softc->flags |= DA_FLAG_RETRY_UA; 2181 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 2182 ccbh->ccb_state |= DA_CCB_RETRY_UA; 2183 break; 2184 } 2185 case AC_INQ_CHANGED: /* Called for this path: periph locked */ 2186 cam_periph_assert(periph, MA_OWNED); 2187 softc = (struct da_softc *)periph->softc; 2188 softc->flags &= ~DA_FLAG_PROBED; 2189 dareprobe(periph); 2190 break; 2191 default: 2192 break; 2193 } 2194 cam_periph_async(periph, code, path, arg); 2195 } 2196 2197 static void 2198 dasysctlinit(void *context, int pending) 2199 { 2200 struct cam_periph *periph; 2201 struct da_softc *softc; 2202 char tmpstr[32], tmpstr2[16]; 2203 struct ccb_trans_settings cts; 2204 2205 periph = (struct cam_periph *)context; 2206 /* 2207 * periph was held for us when this task was enqueued 2208 */ 2209 if (periph->flags & CAM_PERIPH_INVALID) { 2210 da_periph_release(periph, DA_REF_SYSCTL); 2211 return; 2212 } 2213 2214 softc = (struct da_softc *)periph->softc; 2215 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); 2216 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 2217 2218 sysctl_ctx_init(&softc->sysctl_ctx); 2219 cam_periph_lock(periph); 2220 softc->flags |= DA_FLAG_SCTX_INIT; 2221 cam_periph_unlock(periph); 2222 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, 2223 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, 2224 CTLFLAG_RD, 0, tmpstr, "device_index"); 2225 if (softc->sysctl_tree == NULL) { 2226 printf("dasysctlinit: unable to allocate sysctl tree\n"); 2227 da_periph_release(periph, DA_REF_SYSCTL); 2228 return; 2229 } 2230 2231 /* 2232 * Now register the sysctl handler, so the user can change the value on 2233 * the fly. 2234 */ 2235 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2236 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, 2237 softc, 0, dadeletemethodsysctl, "A", 2238 "BIO_DELETE execution method"); 2239 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2240 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, 2241 softc, 0, dadeletemaxsysctl, "Q", 2242 "Maximum BIO_DELETE size"); 2243 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2244 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, 2245 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", 2246 "Minimum CDB size"); 2247 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2248 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2249 "trim_count", CTLFLAG_RD, &softc->trim_count, 2250 "Total number of unmap/dsm commands sent"); 2251 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2252 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2253 "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, 2254 "Total number of ranges in unmap/dsm commands"); 2255 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2256 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2257 "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, 2258 "Total lbas in the unmap/dsm commands sent"); 2259 2260 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2261 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, 2262 softc, 0, dazonemodesysctl, "A", 2263 "Zone Mode"); 2264 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2265 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, 2266 softc, 0, dazonesupsysctl, "A", 2267 "Zone Support"); 2268 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2269 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2270 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, 2271 "Optimal Number of Open Sequential Write Preferred Zones"); 2272 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2273 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2274 "optimal_nonseq_zones", CTLFLAG_RD, 2275 &softc->optimal_nonseq_zones, 2276 "Optimal Number of Non-Sequentially Written Sequential Write " 2277 "Preferred Zones"); 2278 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2279 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2280 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, 2281 "Maximum Number of Open Sequential Write Required Zones"); 2282 2283 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2284 SYSCTL_CHILDREN(softc->sysctl_tree), 2285 OID_AUTO, 2286 "error_inject", 2287 CTLFLAG_RW, 2288 &softc->error_inject, 2289 0, 2290 "error_inject leaf"); 2291 2292 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2293 SYSCTL_CHILDREN(softc->sysctl_tree), 2294 OID_AUTO, 2295 "unmapped_io", 2296 CTLFLAG_RD, 2297 &softc->unmappedio, 2298 0, 2299 "Unmapped I/O support"); 2300 2301 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2302 SYSCTL_CHILDREN(softc->sysctl_tree), 2303 OID_AUTO, 2304 "rotating", 2305 CTLFLAG_RD, 2306 &softc->rotating, 2307 0, 2308 "Rotating media"); 2309 2310 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2311 SYSCTL_CHILDREN(softc->sysctl_tree), 2312 OID_AUTO, 2313 "p_type", 2314 CTLFLAG_RD, 2315 &softc->p_type, 2316 0, 2317 "DIF protection type"); 2318 2319 #ifdef CAM_TEST_FAILURE 2320 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2321 OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 2322 periph, 0, cam_periph_invalidate_sysctl, "I", 2323 "Write 1 to invalidate the drive immediately"); 2324 #endif 2325 2326 /* 2327 * Add some addressing info. 2328 */ 2329 memset(&cts, 0, sizeof (cts)); 2330 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 2331 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 2332 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2333 cam_periph_lock(periph); 2334 xpt_action((union ccb *)&cts); 2335 cam_periph_unlock(periph); 2336 if (cts.ccb_h.status != CAM_REQ_CMP) { 2337 da_periph_release(periph, DA_REF_SYSCTL); 2338 return; 2339 } 2340 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { 2341 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 2342 if (fc->valid & CTS_FC_VALID_WWPN) { 2343 softc->wwpn = fc->wwpn; 2344 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2345 SYSCTL_CHILDREN(softc->sysctl_tree), 2346 OID_AUTO, "wwpn", CTLFLAG_RD, 2347 &softc->wwpn, "World Wide Port Name"); 2348 } 2349 } 2350 2351 #ifdef CAM_IO_STATS 2352 /* 2353 * Now add some useful stats. 2354 * XXX These should live in cam_periph and be common to all periphs 2355 */ 2356 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, 2357 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", 2358 CTLFLAG_RD, 0, "Statistics"); 2359 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2360 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2361 OID_AUTO, 2362 "errors", 2363 CTLFLAG_RD, 2364 &softc->errors, 2365 0, 2366 "Transport errors reported by the SIM"); 2367 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2368 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2369 OID_AUTO, 2370 "timeouts", 2371 CTLFLAG_RD, 2372 &softc->timeouts, 2373 0, 2374 "Device timeouts reported by the SIM"); 2375 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2376 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2377 OID_AUTO, 2378 "pack_invalidations", 2379 CTLFLAG_RD, 2380 &softc->invalidations, 2381 0, 2382 "Device pack invalidations"); 2383 #endif 2384 2385 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, 2386 softc->sysctl_tree); 2387 2388 da_periph_release(periph, DA_REF_SYSCTL); 2389 } 2390 2391 static int 2392 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) 2393 { 2394 int error; 2395 uint64_t value; 2396 struct da_softc *softc; 2397 2398 softc = (struct da_softc *)arg1; 2399 2400 value = softc->disk->d_delmaxsize; 2401 error = sysctl_handle_64(oidp, &value, 0, req); 2402 if ((error != 0) || (req->newptr == NULL)) 2403 return (error); 2404 2405 /* only accept values smaller than the calculated value */ 2406 if (value > dadeletemaxsize(softc, softc->delete_method)) { 2407 return (EINVAL); 2408 } 2409 softc->disk->d_delmaxsize = value; 2410 2411 return (0); 2412 } 2413 2414 static int 2415 dacmdsizesysctl(SYSCTL_HANDLER_ARGS) 2416 { 2417 int error, value; 2418 2419 value = *(int *)arg1; 2420 2421 error = sysctl_handle_int(oidp, &value, 0, req); 2422 2423 if ((error != 0) 2424 || (req->newptr == NULL)) 2425 return (error); 2426 2427 /* 2428 * Acceptable values here are 6, 10, 12 or 16. 2429 */ 2430 if (value < 6) 2431 value = 6; 2432 else if ((value > 6) 2433 && (value <= 10)) 2434 value = 10; 2435 else if ((value > 10) 2436 && (value <= 12)) 2437 value = 12; 2438 else if (value > 12) 2439 value = 16; 2440 2441 *(int *)arg1 = value; 2442 2443 return (0); 2444 } 2445 2446 static int 2447 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) 2448 { 2449 sbintime_t value; 2450 int error; 2451 2452 value = da_default_softtimeout / SBT_1MS; 2453 2454 error = sysctl_handle_int(oidp, (int *)&value, 0, req); 2455 if ((error != 0) || (req->newptr == NULL)) 2456 return (error); 2457 2458 /* XXX Should clip this to a reasonable level */ 2459 if (value > da_default_timeout * 1000) 2460 return (EINVAL); 2461 2462 da_default_softtimeout = value * SBT_1MS; 2463 return (0); 2464 } 2465 2466 static void 2467 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) 2468 { 2469 2470 softc->delete_method = delete_method; 2471 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); 2472 softc->delete_func = da_delete_functions[delete_method]; 2473 2474 if (softc->delete_method > DA_DELETE_DISABLE) 2475 softc->disk->d_flags |= DISKFLAG_CANDELETE; 2476 else 2477 softc->disk->d_flags &= ~DISKFLAG_CANDELETE; 2478 } 2479 2480 static off_t 2481 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) 2482 { 2483 off_t sectors; 2484 2485 switch(delete_method) { 2486 case DA_DELETE_UNMAP: 2487 sectors = (off_t)softc->unmap_max_lba; 2488 break; 2489 case DA_DELETE_ATA_TRIM: 2490 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; 2491 break; 2492 case DA_DELETE_WS16: 2493 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); 2494 break; 2495 case DA_DELETE_ZERO: 2496 case DA_DELETE_WS10: 2497 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); 2498 break; 2499 default: 2500 return 0; 2501 } 2502 2503 return (off_t)softc->params.secsize * 2504 omin(sectors, softc->params.sectors); 2505 } 2506 2507 static void 2508 daprobedone(struct cam_periph *periph, union ccb *ccb) 2509 { 2510 struct da_softc *softc; 2511 2512 softc = (struct da_softc *)periph->softc; 2513 2514 cam_periph_assert(periph, MA_OWNED); 2515 2516 dadeletemethodchoose(softc, DA_DELETE_NONE); 2517 2518 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2519 char buf[80]; 2520 int i, sep; 2521 2522 snprintf(buf, sizeof(buf), "Delete methods: <"); 2523 sep = 0; 2524 for (i = 0; i <= DA_DELETE_MAX; i++) { 2525 if ((softc->delete_available & (1 << i)) == 0 && 2526 i != softc->delete_method) 2527 continue; 2528 if (sep) 2529 strlcat(buf, ",", sizeof(buf)); 2530 strlcat(buf, da_delete_method_names[i], 2531 sizeof(buf)); 2532 if (i == softc->delete_method) 2533 strlcat(buf, "(*)", sizeof(buf)); 2534 sep = 1; 2535 } 2536 strlcat(buf, ">", sizeof(buf)); 2537 printf("%s%d: %s\n", periph->periph_name, 2538 periph->unit_number, buf); 2539 } 2540 if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 && 2541 (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2542 printf("%s%d: Write Protected\n", periph->periph_name, 2543 periph->unit_number); 2544 } 2545 2546 /* 2547 * Since our peripheral may be invalidated by an error 2548 * above or an external event, we must release our CCB 2549 * before releasing the probe lock on the peripheral. 2550 * The peripheral will only go away once the last lock 2551 * is removed, and we need it around for the CCB release 2552 * operation. 2553 */ 2554 xpt_release_ccb(ccb); 2555 softc->state = DA_STATE_NORMAL; 2556 softc->flags |= DA_FLAG_PROBED; 2557 daschedule(periph); 2558 wakeup(&softc->disk->d_mediasize); 2559 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2560 softc->flags |= DA_FLAG_ANNOUNCED; 2561 da_periph_unhold(periph, DA_REF_PROBE_HOLD); 2562 } else 2563 da_periph_release_locked(periph, DA_REF_REPROBE); 2564 } 2565 2566 static void 2567 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) 2568 { 2569 int i, methods; 2570 2571 /* If available, prefer the method requested by user. */ 2572 i = softc->delete_method_pref; 2573 methods = softc->delete_available | (1 << DA_DELETE_DISABLE); 2574 if (methods & (1 << i)) { 2575 dadeletemethodset(softc, i); 2576 return; 2577 } 2578 2579 /* Use the pre-defined order to choose the best performing delete. */ 2580 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 2581 if (i == DA_DELETE_ZERO) 2582 continue; 2583 if (softc->delete_available & (1 << i)) { 2584 dadeletemethodset(softc, i); 2585 return; 2586 } 2587 } 2588 2589 /* Fallback to default. */ 2590 dadeletemethodset(softc, default_method); 2591 } 2592 2593 static int 2594 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) 2595 { 2596 char buf[16]; 2597 const char *p; 2598 struct da_softc *softc; 2599 int i, error, value; 2600 2601 softc = (struct da_softc *)arg1; 2602 2603 value = softc->delete_method; 2604 if (value < 0 || value > DA_DELETE_MAX) 2605 p = "UNKNOWN"; 2606 else 2607 p = da_delete_method_names[value]; 2608 strncpy(buf, p, sizeof(buf)); 2609 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 2610 if (error != 0 || req->newptr == NULL) 2611 return (error); 2612 for (i = 0; i <= DA_DELETE_MAX; i++) { 2613 if (strcmp(buf, da_delete_method_names[i]) == 0) 2614 break; 2615 } 2616 if (i > DA_DELETE_MAX) 2617 return (EINVAL); 2618 softc->delete_method_pref = i; 2619 dadeletemethodchoose(softc, DA_DELETE_NONE); 2620 return (0); 2621 } 2622 2623 static int 2624 dazonemodesysctl(SYSCTL_HANDLER_ARGS) 2625 { 2626 char tmpbuf[40]; 2627 struct da_softc *softc; 2628 int error; 2629 2630 softc = (struct da_softc *)arg1; 2631 2632 switch (softc->zone_mode) { 2633 case DA_ZONE_DRIVE_MANAGED: 2634 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); 2635 break; 2636 case DA_ZONE_HOST_AWARE: 2637 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); 2638 break; 2639 case DA_ZONE_HOST_MANAGED: 2640 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); 2641 break; 2642 case DA_ZONE_NONE: 2643 default: 2644 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); 2645 break; 2646 } 2647 2648 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); 2649 2650 return (error); 2651 } 2652 2653 static int 2654 dazonesupsysctl(SYSCTL_HANDLER_ARGS) 2655 { 2656 char tmpbuf[180]; 2657 struct da_softc *softc; 2658 struct sbuf sb; 2659 int error, first; 2660 unsigned int i; 2661 2662 softc = (struct da_softc *)arg1; 2663 2664 error = 0; 2665 first = 1; 2666 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); 2667 2668 for (i = 0; i < sizeof(da_zone_desc_table) / 2669 sizeof(da_zone_desc_table[0]); i++) { 2670 if (softc->zone_flags & da_zone_desc_table[i].value) { 2671 if (first == 0) 2672 sbuf_printf(&sb, ", "); 2673 else 2674 first = 0; 2675 sbuf_cat(&sb, da_zone_desc_table[i].desc); 2676 } 2677 } 2678 2679 if (first == 1) 2680 sbuf_printf(&sb, "None"); 2681 2682 sbuf_finish(&sb); 2683 2684 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 2685 2686 return (error); 2687 } 2688 2689 static cam_status 2690 daregister(struct cam_periph *periph, void *arg) 2691 { 2692 struct da_softc *softc; 2693 struct ccb_pathinq cpi; 2694 struct ccb_getdev *cgd; 2695 char tmpstr[80]; 2696 caddr_t match; 2697 int quirks; 2698 2699 cgd = (struct ccb_getdev *)arg; 2700 if (cgd == NULL) { 2701 printf("daregister: no getdev CCB, can't register device\n"); 2702 return(CAM_REQ_CMP_ERR); 2703 } 2704 2705 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, 2706 M_NOWAIT|M_ZERO); 2707 2708 if (softc == NULL) { 2709 printf("daregister: Unable to probe new device. " 2710 "Unable to allocate softc\n"); 2711 return(CAM_REQ_CMP_ERR); 2712 } 2713 2714 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { 2715 printf("daregister: Unable to probe new device. " 2716 "Unable to allocate iosched memory\n"); 2717 free(softc, M_DEVBUF); 2718 return(CAM_REQ_CMP_ERR); 2719 } 2720 2721 LIST_INIT(&softc->pending_ccbs); 2722 softc->state = DA_STATE_PROBE_WP; 2723 bioq_init(&softc->delete_run_queue); 2724 if (SID_IS_REMOVABLE(&cgd->inq_data)) 2725 softc->flags |= DA_FLAG_PACK_REMOVABLE; 2726 softc->unmap_max_ranges = UNMAP_MAX_RANGES; 2727 softc->unmap_max_lba = UNMAP_RANGE_MAX; 2728 softc->unmap_gran = 0; 2729 softc->unmap_gran_align = 0; 2730 softc->ws_max_blks = WS16_MAX_BLKS; 2731 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; 2732 softc->rotating = 1; 2733 2734 periph->softc = softc; 2735 2736 /* 2737 * See if this device has any quirks. 2738 */ 2739 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 2740 (caddr_t)da_quirk_table, 2741 nitems(da_quirk_table), 2742 sizeof(*da_quirk_table), scsi_inquiry_match); 2743 2744 if (match != NULL) 2745 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 2746 else 2747 softc->quirks = DA_Q_NONE; 2748 2749 /* Check if the SIM does not want 6 byte commands */ 2750 xpt_path_inq(&cpi, periph->path); 2751 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) 2752 softc->quirks |= DA_Q_NO_6_BYTE; 2753 2754 /* Override quirks if tunable is set */ 2755 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks", 2756 periph->unit_number); 2757 quirks = softc->quirks; 2758 TUNABLE_INT_FETCH(tmpstr, &quirks); 2759 softc->quirks = quirks; 2760 2761 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) 2762 softc->zone_mode = DA_ZONE_HOST_MANAGED; 2763 else if (softc->quirks & DA_Q_SMR_DM) 2764 softc->zone_mode = DA_ZONE_DRIVE_MANAGED; 2765 else 2766 softc->zone_mode = DA_ZONE_NONE; 2767 2768 if (softc->zone_mode != DA_ZONE_NONE) { 2769 if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 2770 if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) 2771 softc->zone_interface = DA_ZONE_IF_ATA_SAT; 2772 else 2773 softc->zone_interface = DA_ZONE_IF_ATA_PASS; 2774 } else 2775 softc->zone_interface = DA_ZONE_IF_SCSI; 2776 } 2777 2778 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); 2779 2780 /* 2781 * Take an exclusive section lock qon the periph while dastart is called 2782 * to finish the probe. The lock will be dropped in dadone at the end 2783 * of probe. This locks out daopen and daclose from racing with the 2784 * probe. 2785 * 2786 * XXX if cam_periph_hold returns an error, we don't hold a refcount. 2787 */ 2788 (void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD); 2789 2790 /* 2791 * Schedule a periodic event to occasionally send an 2792 * ordered tag to a device. 2793 */ 2794 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); 2795 callout_reset(&softc->sendordered_c, 2796 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 2797 dasendorderedtag, periph); 2798 2799 cam_periph_unlock(periph); 2800 /* 2801 * RBC devices don't have to support READ(6), only READ(10). 2802 */ 2803 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) 2804 softc->minimum_cmd_size = 10; 2805 else 2806 softc->minimum_cmd_size = 6; 2807 2808 /* 2809 * Load the user's default, if any. 2810 */ 2811 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", 2812 periph->unit_number); 2813 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); 2814 2815 /* 2816 * 6, 10, 12 and 16 are the currently permissible values. 2817 */ 2818 if (softc->minimum_cmd_size > 12) 2819 softc->minimum_cmd_size = 16; 2820 else if (softc->minimum_cmd_size > 10) 2821 softc->minimum_cmd_size = 12; 2822 else if (softc->minimum_cmd_size > 6) 2823 softc->minimum_cmd_size = 10; 2824 else 2825 softc->minimum_cmd_size = 6; 2826 2827 /* Predict whether device may support READ CAPACITY(16). */ 2828 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && 2829 (softc->quirks & DA_Q_NO_RC16) == 0) { 2830 softc->flags |= DA_FLAG_CAN_RC16; 2831 } 2832 2833 /* 2834 * Register this media as a disk. 2835 */ 2836 softc->disk = disk_alloc(); 2837 softc->disk->d_devstat = devstat_new_entry(periph->periph_name, 2838 periph->unit_number, 0, 2839 DEVSTAT_BS_UNAVAILABLE, 2840 SID_TYPE(&cgd->inq_data) | 2841 XPORT_DEVSTAT_TYPE(cpi.transport), 2842 DEVSTAT_PRIORITY_DISK); 2843 softc->disk->d_open = daopen; 2844 softc->disk->d_close = daclose; 2845 softc->disk->d_strategy = dastrategy; 2846 softc->disk->d_dump = dadump; 2847 softc->disk->d_getattr = dagetattr; 2848 softc->disk->d_gone = dadiskgonecb; 2849 softc->disk->d_name = "da"; 2850 softc->disk->d_drv1 = periph; 2851 if (cpi.maxio == 0) 2852 softc->maxio = DFLTPHYS; /* traditional default */ 2853 else if (cpi.maxio > MAXPHYS) 2854 softc->maxio = MAXPHYS; /* for safety */ 2855 else 2856 softc->maxio = cpi.maxio; 2857 if (softc->quirks & DA_Q_128KB) 2858 softc->maxio = min(softc->maxio, 128 * 1024); 2859 softc->disk->d_maxsize = softc->maxio; 2860 softc->disk->d_unit = periph->unit_number; 2861 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; 2862 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) 2863 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 2864 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { 2865 softc->unmappedio = 1; 2866 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 2867 } 2868 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, 2869 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); 2870 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); 2871 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], 2872 cgd->inq_data.product, sizeof(cgd->inq_data.product), 2873 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); 2874 softc->disk->d_hba_vendor = cpi.hba_vendor; 2875 softc->disk->d_hba_device = cpi.hba_device; 2876 softc->disk->d_hba_subvendor = cpi.hba_subvendor; 2877 softc->disk->d_hba_subdevice = cpi.hba_subdevice; 2878 snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), 2879 "%s%d", cpi.dev_name, cpi.unit_number); 2880 2881 /* 2882 * Acquire a reference to the periph before we register with GEOM. 2883 * We'll release this reference once GEOM calls us back (via 2884 * dadiskgonecb()) telling us that our provider has been freed. 2885 */ 2886 if (da_periph_acquire(periph, DA_REF_GEOM) != 0) { 2887 xpt_print(periph->path, "%s: lost periph during " 2888 "registration!\n", __func__); 2889 cam_periph_lock(periph); 2890 return (CAM_REQ_CMP_ERR); 2891 } 2892 2893 disk_create(softc->disk, DISK_VERSION); 2894 cam_periph_lock(periph); 2895 2896 /* 2897 * Add async callbacks for events of interest. 2898 * I don't bother checking if this fails as, 2899 * in most cases, the system will function just 2900 * fine without them and the only alternative 2901 * would be to not attach the device on failure. 2902 */ 2903 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | 2904 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | 2905 AC_INQ_CHANGED, daasync, periph, periph->path); 2906 2907 /* 2908 * Emit an attribute changed notification just in case 2909 * physical path information arrived before our async 2910 * event handler was registered, but after anyone attaching 2911 * to our disk device polled it. 2912 */ 2913 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); 2914 2915 /* 2916 * Schedule a periodic media polling events. 2917 */ 2918 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); 2919 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && 2920 (cgd->inq_flags & SID_AEN) == 0 && 2921 da_poll_period != 0) 2922 callout_reset(&softc->mediapoll_c, da_poll_period * hz, 2923 damediapoll, periph); 2924 2925 xpt_schedule(periph, CAM_PRIORITY_DEV); 2926 2927 return(CAM_REQ_CMP); 2928 } 2929 2930 static int 2931 da_zone_bio_to_scsi(int disk_zone_cmd) 2932 { 2933 switch (disk_zone_cmd) { 2934 case DISK_ZONE_OPEN: 2935 return ZBC_OUT_SA_OPEN; 2936 case DISK_ZONE_CLOSE: 2937 return ZBC_OUT_SA_CLOSE; 2938 case DISK_ZONE_FINISH: 2939 return ZBC_OUT_SA_FINISH; 2940 case DISK_ZONE_RWP: 2941 return ZBC_OUT_SA_RWP; 2942 } 2943 2944 return -1; 2945 } 2946 2947 static int 2948 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, 2949 int *queue_ccb) 2950 { 2951 struct da_softc *softc; 2952 int error; 2953 2954 error = 0; 2955 2956 if (bp->bio_cmd != BIO_ZONE) { 2957 error = EINVAL; 2958 goto bailout; 2959 } 2960 2961 softc = periph->softc; 2962 2963 switch (bp->bio_zone.zone_cmd) { 2964 case DISK_ZONE_OPEN: 2965 case DISK_ZONE_CLOSE: 2966 case DISK_ZONE_FINISH: 2967 case DISK_ZONE_RWP: { 2968 int zone_flags; 2969 int zone_sa; 2970 uint64_t lba; 2971 2972 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); 2973 if (zone_sa == -1) { 2974 xpt_print(periph->path, "Cannot translate zone " 2975 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); 2976 error = EINVAL; 2977 goto bailout; 2978 } 2979 2980 zone_flags = 0; 2981 lba = bp->bio_zone.zone_params.rwp.id; 2982 2983 if (bp->bio_zone.zone_params.rwp.flags & 2984 DISK_ZONE_RWP_FLAG_ALL) 2985 zone_flags |= ZBC_OUT_ALL; 2986 2987 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 2988 scsi_zbc_out(&ccb->csio, 2989 /*retries*/ da_retry_count, 2990 /*cbfcnp*/ dadone, 2991 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2992 /*service_action*/ zone_sa, 2993 /*zone_id*/ lba, 2994 /*zone_flags*/ zone_flags, 2995 /*data_ptr*/ NULL, 2996 /*dxfer_len*/ 0, 2997 /*sense_len*/ SSD_FULL_SIZE, 2998 /*timeout*/ da_default_timeout * 1000); 2999 } else { 3000 /* 3001 * Note that in this case, even though we can 3002 * technically use NCQ, we don't bother for several 3003 * reasons: 3004 * 1. It hasn't been tested on a SAT layer that 3005 * supports it. This is new as of SAT-4. 3006 * 2. Even when there is a SAT layer that supports 3007 * it, that SAT layer will also probably support 3008 * ZBC -> ZAC translation, since they are both 3009 * in the SAT-4 spec. 3010 * 3. Translation will likely be preferable to ATA 3011 * passthrough. LSI / Avago at least single 3012 * steps ATA passthrough commands in the HBA, 3013 * regardless of protocol, so unless that 3014 * changes, there is a performance penalty for 3015 * doing ATA passthrough no matter whether 3016 * you're using NCQ/FPDMA, DMA or PIO. 3017 * 4. It requires a 32-byte CDB, which at least at 3018 * this point in CAM requires a CDB pointer, which 3019 * would require us to allocate an additional bit 3020 * of storage separate from the CCB. 3021 */ 3022 error = scsi_ata_zac_mgmt_out(&ccb->csio, 3023 /*retries*/ da_retry_count, 3024 /*cbfcnp*/ dadone, 3025 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3026 /*use_ncq*/ 0, 3027 /*zm_action*/ zone_sa, 3028 /*zone_id*/ lba, 3029 /*zone_flags*/ zone_flags, 3030 /*data_ptr*/ NULL, 3031 /*dxfer_len*/ 0, 3032 /*cdb_storage*/ NULL, 3033 /*cdb_storage_len*/ 0, 3034 /*sense_len*/ SSD_FULL_SIZE, 3035 /*timeout*/ da_default_timeout * 1000); 3036 if (error != 0) { 3037 error = EINVAL; 3038 xpt_print(periph->path, 3039 "scsi_ata_zac_mgmt_out() returned an " 3040 "error!"); 3041 goto bailout; 3042 } 3043 } 3044 *queue_ccb = 1; 3045 3046 break; 3047 } 3048 case DISK_ZONE_REPORT_ZONES: { 3049 uint8_t *rz_ptr; 3050 uint32_t num_entries, alloc_size; 3051 struct disk_zone_report *rep; 3052 3053 rep = &bp->bio_zone.zone_params.report; 3054 3055 num_entries = rep->entries_allocated; 3056 if (num_entries == 0) { 3057 xpt_print(periph->path, "No entries allocated for " 3058 "Report Zones request\n"); 3059 error = EINVAL; 3060 goto bailout; 3061 } 3062 alloc_size = sizeof(struct scsi_report_zones_hdr) + 3063 (sizeof(struct scsi_report_zones_desc) * num_entries); 3064 alloc_size = min(alloc_size, softc->disk->d_maxsize); 3065 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); 3066 if (rz_ptr == NULL) { 3067 xpt_print(periph->path, "Unable to allocate memory " 3068 "for Report Zones request\n"); 3069 error = ENOMEM; 3070 goto bailout; 3071 } 3072 3073 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 3074 scsi_zbc_in(&ccb->csio, 3075 /*retries*/ da_retry_count, 3076 /*cbcfnp*/ dadone, 3077 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3078 /*service_action*/ ZBC_IN_SA_REPORT_ZONES, 3079 /*zone_start_lba*/ rep->starting_id, 3080 /*zone_options*/ rep->rep_options, 3081 /*data_ptr*/ rz_ptr, 3082 /*dxfer_len*/ alloc_size, 3083 /*sense_len*/ SSD_FULL_SIZE, 3084 /*timeout*/ da_default_timeout * 1000); 3085 } else { 3086 /* 3087 * Note that in this case, even though we can 3088 * technically use NCQ, we don't bother for several 3089 * reasons: 3090 * 1. It hasn't been tested on a SAT layer that 3091 * supports it. This is new as of SAT-4. 3092 * 2. Even when there is a SAT layer that supports 3093 * it, that SAT layer will also probably support 3094 * ZBC -> ZAC translation, since they are both 3095 * in the SAT-4 spec. 3096 * 3. Translation will likely be preferable to ATA 3097 * passthrough. LSI / Avago at least single 3098 * steps ATA passthrough commands in the HBA, 3099 * regardless of protocol, so unless that 3100 * changes, there is a performance penalty for 3101 * doing ATA passthrough no matter whether 3102 * you're using NCQ/FPDMA, DMA or PIO. 3103 * 4. It requires a 32-byte CDB, which at least at 3104 * this point in CAM requires a CDB pointer, which 3105 * would require us to allocate an additional bit 3106 * of storage separate from the CCB. 3107 */ 3108 error = scsi_ata_zac_mgmt_in(&ccb->csio, 3109 /*retries*/ da_retry_count, 3110 /*cbcfnp*/ dadone, 3111 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3112 /*use_ncq*/ 0, 3113 /*zm_action*/ ATA_ZM_REPORT_ZONES, 3114 /*zone_id*/ rep->starting_id, 3115 /*zone_flags*/ rep->rep_options, 3116 /*data_ptr*/ rz_ptr, 3117 /*dxfer_len*/ alloc_size, 3118 /*cdb_storage*/ NULL, 3119 /*cdb_storage_len*/ 0, 3120 /*sense_len*/ SSD_FULL_SIZE, 3121 /*timeout*/ da_default_timeout * 1000); 3122 if (error != 0) { 3123 error = EINVAL; 3124 xpt_print(periph->path, 3125 "scsi_ata_zac_mgmt_in() returned an " 3126 "error!"); 3127 goto bailout; 3128 } 3129 } 3130 3131 /* 3132 * For BIO_ZONE, this isn't normally needed. However, it 3133 * is used by devstat_end_transaction_bio() to determine 3134 * how much data was transferred. 3135 */ 3136 /* 3137 * XXX KDM we have a problem. But I'm not sure how to fix 3138 * it. devstat uses bio_bcount - bio_resid to calculate 3139 * the amount of data transferred. The GEOM disk code 3140 * uses bio_length - bio_resid to calculate the amount of 3141 * data in bio_completed. We have different structure 3142 * sizes above and below the ada(4) driver. So, if we 3143 * use the sizes above, the amount transferred won't be 3144 * quite accurate for devstat. If we use different sizes 3145 * for bio_bcount and bio_length (above and below 3146 * respectively), then the residual needs to match one or 3147 * the other. Everything is calculated after the bio 3148 * leaves the driver, so changing the values around isn't 3149 * really an option. For now, just set the count to the 3150 * passed in length. This means that the calculations 3151 * above (e.g. bio_completed) will be correct, but the 3152 * amount of data reported to devstat will be slightly 3153 * under or overstated. 3154 */ 3155 bp->bio_bcount = bp->bio_length; 3156 3157 *queue_ccb = 1; 3158 3159 break; 3160 } 3161 case DISK_ZONE_GET_PARAMS: { 3162 struct disk_zone_disk_params *params; 3163 3164 params = &bp->bio_zone.zone_params.disk_params; 3165 bzero(params, sizeof(*params)); 3166 3167 switch (softc->zone_mode) { 3168 case DA_ZONE_DRIVE_MANAGED: 3169 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; 3170 break; 3171 case DA_ZONE_HOST_AWARE: 3172 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; 3173 break; 3174 case DA_ZONE_HOST_MANAGED: 3175 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; 3176 break; 3177 default: 3178 case DA_ZONE_NONE: 3179 params->zone_mode = DISK_ZONE_MODE_NONE; 3180 break; 3181 } 3182 3183 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) 3184 params->flags |= DISK_ZONE_DISK_URSWRZ; 3185 3186 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { 3187 params->optimal_seq_zones = softc->optimal_seq_zones; 3188 params->flags |= DISK_ZONE_OPT_SEQ_SET; 3189 } 3190 3191 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { 3192 params->optimal_nonseq_zones = 3193 softc->optimal_nonseq_zones; 3194 params->flags |= DISK_ZONE_OPT_NONSEQ_SET; 3195 } 3196 3197 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { 3198 params->max_seq_zones = softc->max_seq_zones; 3199 params->flags |= DISK_ZONE_MAX_SEQ_SET; 3200 } 3201 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) 3202 params->flags |= DISK_ZONE_RZ_SUP; 3203 3204 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) 3205 params->flags |= DISK_ZONE_OPEN_SUP; 3206 3207 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) 3208 params->flags |= DISK_ZONE_CLOSE_SUP; 3209 3210 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) 3211 params->flags |= DISK_ZONE_FINISH_SUP; 3212 3213 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) 3214 params->flags |= DISK_ZONE_RWP_SUP; 3215 break; 3216 } 3217 default: 3218 break; 3219 } 3220 bailout: 3221 return (error); 3222 } 3223 3224 static void 3225 dastart(struct cam_periph *periph, union ccb *start_ccb) 3226 { 3227 struct da_softc *softc; 3228 3229 cam_periph_assert(periph, MA_OWNED); 3230 softc = (struct da_softc *)periph->softc; 3231 3232 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); 3233 3234 skipstate: 3235 switch (softc->state) { 3236 case DA_STATE_NORMAL: 3237 { 3238 struct bio *bp; 3239 uint8_t tag_code; 3240 3241 more: 3242 bp = cam_iosched_next_bio(softc->cam_iosched); 3243 if (bp == NULL) { 3244 if (cam_iosched_has_work_flags(softc->cam_iosched, 3245 DA_WORK_TUR)) { 3246 softc->flags |= DA_FLAG_TUR_PENDING; 3247 cam_iosched_clr_work_flags(softc->cam_iosched, 3248 DA_WORK_TUR); 3249 scsi_test_unit_ready(&start_ccb->csio, 3250 /*retries*/ da_retry_count, 3251 dadone_tur, 3252 MSG_SIMPLE_Q_TAG, 3253 SSD_FULL_SIZE, 3254 da_default_timeout * 1000); 3255 start_ccb->ccb_h.ccb_bp = NULL; 3256 start_ccb->ccb_h.ccb_state = DA_CCB_TUR; 3257 xpt_action(start_ccb); 3258 } else 3259 xpt_release_ccb(start_ccb); 3260 break; 3261 } 3262 3263 if (bp->bio_cmd == BIO_DELETE) { 3264 if (softc->delete_func != NULL) { 3265 softc->delete_func(periph, start_ccb, bp); 3266 goto out; 3267 } else { 3268 /* 3269 * Not sure this is possible, but failsafe by 3270 * lying and saying "sure, done." 3271 */ 3272 biofinish(bp, NULL, 0); 3273 goto more; 3274 } 3275 } 3276 3277 if (cam_iosched_has_work_flags(softc->cam_iosched, 3278 DA_WORK_TUR)) { 3279 cam_iosched_clr_work_flags(softc->cam_iosched, 3280 DA_WORK_TUR); 3281 da_periph_release_locked(periph, DA_REF_TUR); 3282 } 3283 3284 if ((bp->bio_flags & BIO_ORDERED) != 0 || 3285 (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 3286 softc->flags &= ~DA_FLAG_NEED_OTAG; 3287 softc->flags |= DA_FLAG_WAS_OTAG; 3288 tag_code = MSG_ORDERED_Q_TAG; 3289 } else { 3290 tag_code = MSG_SIMPLE_Q_TAG; 3291 } 3292 3293 switch (bp->bio_cmd) { 3294 case BIO_WRITE: 3295 case BIO_READ: 3296 { 3297 void *data_ptr; 3298 int rw_op; 3299 3300 biotrack(bp, __func__); 3301 3302 if (bp->bio_cmd == BIO_WRITE) { 3303 softc->flags |= DA_FLAG_DIRTY; 3304 rw_op = SCSI_RW_WRITE; 3305 } else { 3306 rw_op = SCSI_RW_READ; 3307 } 3308 3309 data_ptr = bp->bio_data; 3310 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { 3311 rw_op |= SCSI_RW_BIO; 3312 data_ptr = bp; 3313 } 3314 3315 scsi_read_write(&start_ccb->csio, 3316 /*retries*/da_retry_count, 3317 /*cbfcnp*/dadone, 3318 /*tag_action*/tag_code, 3319 rw_op, 3320 /*byte2*/0, 3321 softc->minimum_cmd_size, 3322 /*lba*/bp->bio_pblkno, 3323 /*block_count*/bp->bio_bcount / 3324 softc->params.secsize, 3325 data_ptr, 3326 /*dxfer_len*/ bp->bio_bcount, 3327 /*sense_len*/SSD_FULL_SIZE, 3328 da_default_timeout * 1000); 3329 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 3330 start_ccb->csio.bio = bp; 3331 #endif 3332 break; 3333 } 3334 case BIO_FLUSH: 3335 /* 3336 * If we don't support sync cache, or the disk 3337 * isn't dirty, FLUSH is a no-op. Use the 3338 * allocated CCB for the next bio if one is 3339 * available. 3340 */ 3341 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 || 3342 (softc->flags & DA_FLAG_DIRTY) == 0) { 3343 biodone(bp); 3344 goto skipstate; 3345 } 3346 3347 /* 3348 * BIO_FLUSH doesn't currently communicate 3349 * range data, so we synchronize the cache 3350 * over the whole disk. 3351 */ 3352 scsi_synchronize_cache(&start_ccb->csio, 3353 /*retries*/1, 3354 /*cbfcnp*/dadone, 3355 /*tag_action*/tag_code, 3356 /*begin_lba*/0, 3357 /*lb_count*/0, 3358 SSD_FULL_SIZE, 3359 da_default_timeout*1000); 3360 /* 3361 * Clear the dirty flag before sending the command. 3362 * Either this sync cache will be successful, or it 3363 * will fail after a retry. If it fails, it is 3364 * unlikely to be successful if retried later, so 3365 * we'll save ourselves time by just marking the 3366 * device clean. 3367 */ 3368 softc->flags &= ~DA_FLAG_DIRTY; 3369 break; 3370 case BIO_ZONE: { 3371 int error, queue_ccb; 3372 3373 queue_ccb = 0; 3374 3375 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); 3376 if ((error != 0) 3377 || (queue_ccb == 0)) { 3378 biofinish(bp, NULL, error); 3379 xpt_release_ccb(start_ccb); 3380 return; 3381 } 3382 break; 3383 } 3384 default: 3385 biofinish(bp, NULL, EOPNOTSUPP); 3386 xpt_release_ccb(start_ccb); 3387 return; 3388 } 3389 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 3390 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 3391 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); 3392 3393 out: 3394 LIST_INSERT_HEAD(&softc->pending_ccbs, 3395 &start_ccb->ccb_h, periph_links.le); 3396 3397 /* We expect a unit attention from this device */ 3398 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 3399 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 3400 softc->flags &= ~DA_FLAG_RETRY_UA; 3401 } 3402 3403 start_ccb->ccb_h.ccb_bp = bp; 3404 softc->refcount++; 3405 cam_periph_unlock(periph); 3406 xpt_action(start_ccb); 3407 cam_periph_lock(periph); 3408 3409 /* May have more work to do, so ensure we stay scheduled */ 3410 daschedule(periph); 3411 break; 3412 } 3413 case DA_STATE_PROBE_WP: 3414 { 3415 void *mode_buf; 3416 int mode_buf_len; 3417 3418 if (da_disable_wp_detection) { 3419 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 3420 softc->state = DA_STATE_PROBE_RC16; 3421 else 3422 softc->state = DA_STATE_PROBE_RC; 3423 goto skipstate; 3424 } 3425 mode_buf_len = 192; 3426 mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT); 3427 if (mode_buf == NULL) { 3428 xpt_print(periph->path, "Unable to send mode sense - " 3429 "malloc failure\n"); 3430 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 3431 softc->state = DA_STATE_PROBE_RC16; 3432 else 3433 softc->state = DA_STATE_PROBE_RC; 3434 goto skipstate; 3435 } 3436 scsi_mode_sense_len(&start_ccb->csio, 3437 /*retries*/ da_retry_count, 3438 /*cbfcnp*/ dadone_probewp, 3439 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3440 /*dbd*/ FALSE, 3441 /*pc*/ SMS_PAGE_CTRL_CURRENT, 3442 /*page*/ SMS_ALL_PAGES_PAGE, 3443 /*param_buf*/ mode_buf, 3444 /*param_len*/ mode_buf_len, 3445 /*minimum_cmd_size*/ softc->minimum_cmd_size, 3446 /*sense_len*/ SSD_FULL_SIZE, 3447 /*timeout*/ da_default_timeout * 1000); 3448 start_ccb->ccb_h.ccb_bp = NULL; 3449 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP; 3450 xpt_action(start_ccb); 3451 break; 3452 } 3453 case DA_STATE_PROBE_RC: 3454 { 3455 struct scsi_read_capacity_data *rcap; 3456 3457 rcap = (struct scsi_read_capacity_data *) 3458 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); 3459 if (rcap == NULL) { 3460 printf("dastart: Couldn't malloc read_capacity data\n"); 3461 /* da_free_periph??? */ 3462 break; 3463 } 3464 scsi_read_capacity(&start_ccb->csio, 3465 /*retries*/da_retry_count, 3466 dadone_proberc, 3467 MSG_SIMPLE_Q_TAG, 3468 rcap, 3469 SSD_FULL_SIZE, 3470 /*timeout*/5000); 3471 start_ccb->ccb_h.ccb_bp = NULL; 3472 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; 3473 xpt_action(start_ccb); 3474 break; 3475 } 3476 case DA_STATE_PROBE_RC16: 3477 { 3478 struct scsi_read_capacity_data_long *rcaplong; 3479 3480 rcaplong = (struct scsi_read_capacity_data_long *) 3481 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); 3482 if (rcaplong == NULL) { 3483 printf("dastart: Couldn't malloc read_capacity data\n"); 3484 /* da_free_periph??? */ 3485 break; 3486 } 3487 scsi_read_capacity_16(&start_ccb->csio, 3488 /*retries*/ da_retry_count, 3489 /*cbfcnp*/ dadone_proberc, 3490 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3491 /*lba*/ 0, 3492 /*reladr*/ 0, 3493 /*pmi*/ 0, 3494 /*rcap_buf*/ (uint8_t *)rcaplong, 3495 /*rcap_buf_len*/ sizeof(*rcaplong), 3496 /*sense_len*/ SSD_FULL_SIZE, 3497 /*timeout*/ da_default_timeout * 1000); 3498 start_ccb->ccb_h.ccb_bp = NULL; 3499 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; 3500 xpt_action(start_ccb); 3501 break; 3502 } 3503 case DA_STATE_PROBE_LBP: 3504 { 3505 struct scsi_vpd_logical_block_prov *lbp; 3506 3507 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { 3508 /* 3509 * If we get here we don't support any SBC-3 delete 3510 * methods with UNMAP as the Logical Block Provisioning 3511 * VPD page support is required for devices which 3512 * support it according to T10/1799-D Revision 31 3513 * however older revisions of the spec don't mandate 3514 * this so we currently don't remove these methods 3515 * from the available set. 3516 */ 3517 softc->state = DA_STATE_PROBE_BLK_LIMITS; 3518 goto skipstate; 3519 } 3520 3521 lbp = (struct scsi_vpd_logical_block_prov *) 3522 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); 3523 3524 if (lbp == NULL) { 3525 printf("dastart: Couldn't malloc lbp data\n"); 3526 /* da_free_periph??? */ 3527 break; 3528 } 3529 3530 scsi_inquiry(&start_ccb->csio, 3531 /*retries*/da_retry_count, 3532 /*cbfcnp*/dadone_probelbp, 3533 /*tag_action*/MSG_SIMPLE_Q_TAG, 3534 /*inq_buf*/(u_int8_t *)lbp, 3535 /*inq_len*/sizeof(*lbp), 3536 /*evpd*/TRUE, 3537 /*page_code*/SVPD_LBP, 3538 /*sense_len*/SSD_MIN_SIZE, 3539 /*timeout*/da_default_timeout * 1000); 3540 start_ccb->ccb_h.ccb_bp = NULL; 3541 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; 3542 xpt_action(start_ccb); 3543 break; 3544 } 3545 case DA_STATE_PROBE_BLK_LIMITS: 3546 { 3547 struct scsi_vpd_block_limits *block_limits; 3548 3549 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { 3550 /* Not supported skip to next probe */ 3551 softc->state = DA_STATE_PROBE_BDC; 3552 goto skipstate; 3553 } 3554 3555 block_limits = (struct scsi_vpd_block_limits *) 3556 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); 3557 3558 if (block_limits == NULL) { 3559 printf("dastart: Couldn't malloc block_limits data\n"); 3560 /* da_free_periph??? */ 3561 break; 3562 } 3563 3564 scsi_inquiry(&start_ccb->csio, 3565 /*retries*/da_retry_count, 3566 /*cbfcnp*/dadone_probeblklimits, 3567 /*tag_action*/MSG_SIMPLE_Q_TAG, 3568 /*inq_buf*/(u_int8_t *)block_limits, 3569 /*inq_len*/sizeof(*block_limits), 3570 /*evpd*/TRUE, 3571 /*page_code*/SVPD_BLOCK_LIMITS, 3572 /*sense_len*/SSD_MIN_SIZE, 3573 /*timeout*/da_default_timeout * 1000); 3574 start_ccb->ccb_h.ccb_bp = NULL; 3575 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; 3576 xpt_action(start_ccb); 3577 break; 3578 } 3579 case DA_STATE_PROBE_BDC: 3580 { 3581 struct scsi_vpd_block_characteristics *bdc; 3582 3583 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { 3584 softc->state = DA_STATE_PROBE_ATA; 3585 goto skipstate; 3586 } 3587 3588 bdc = (struct scsi_vpd_block_characteristics *) 3589 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3590 3591 if (bdc == NULL) { 3592 printf("dastart: Couldn't malloc bdc data\n"); 3593 /* da_free_periph??? */ 3594 break; 3595 } 3596 3597 scsi_inquiry(&start_ccb->csio, 3598 /*retries*/da_retry_count, 3599 /*cbfcnp*/dadone_probebdc, 3600 /*tag_action*/MSG_SIMPLE_Q_TAG, 3601 /*inq_buf*/(u_int8_t *)bdc, 3602 /*inq_len*/sizeof(*bdc), 3603 /*evpd*/TRUE, 3604 /*page_code*/SVPD_BDC, 3605 /*sense_len*/SSD_MIN_SIZE, 3606 /*timeout*/da_default_timeout * 1000); 3607 start_ccb->ccb_h.ccb_bp = NULL; 3608 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; 3609 xpt_action(start_ccb); 3610 break; 3611 } 3612 case DA_STATE_PROBE_ATA: 3613 { 3614 struct ata_params *ata_params; 3615 3616 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 3617 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 3618 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 3619 /* 3620 * Note that if the ATA VPD page isn't 3621 * supported, we aren't talking to an ATA 3622 * device anyway. Support for that VPD 3623 * page is mandatory for SCSI to ATA (SAT) 3624 * translation layers. 3625 */ 3626 softc->state = DA_STATE_PROBE_ZONE; 3627 goto skipstate; 3628 } 3629 daprobedone(periph, start_ccb); 3630 break; 3631 } 3632 3633 ata_params = &periph->path->device->ident_data; 3634 3635 scsi_ata_identify(&start_ccb->csio, 3636 /*retries*/da_retry_count, 3637 /*cbfcnp*/dadone_probeata, 3638 /*tag_action*/MSG_SIMPLE_Q_TAG, 3639 /*data_ptr*/(u_int8_t *)ata_params, 3640 /*dxfer_len*/sizeof(*ata_params), 3641 /*sense_len*/SSD_FULL_SIZE, 3642 /*timeout*/da_default_timeout * 1000); 3643 start_ccb->ccb_h.ccb_bp = NULL; 3644 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; 3645 xpt_action(start_ccb); 3646 break; 3647 } 3648 case DA_STATE_PROBE_ATA_LOGDIR: 3649 { 3650 struct ata_gp_log_dir *log_dir; 3651 int retval; 3652 3653 retval = 0; 3654 3655 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { 3656 /* 3657 * If we don't have log support, not much point in 3658 * trying to probe zone support. 3659 */ 3660 daprobedone(periph, start_ccb); 3661 break; 3662 } 3663 3664 /* 3665 * If we have an ATA device (the SCSI ATA Information VPD 3666 * page should be present and the ATA identify should have 3667 * succeeded) and it supports logs, ask for the log directory. 3668 */ 3669 3670 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); 3671 if (log_dir == NULL) { 3672 xpt_print(periph->path, "Couldn't malloc log_dir " 3673 "data\n"); 3674 daprobedone(periph, start_ccb); 3675 break; 3676 } 3677 3678 retval = scsi_ata_read_log(&start_ccb->csio, 3679 /*retries*/ da_retry_count, 3680 /*cbfcnp*/ dadone_probeatalogdir, 3681 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3682 /*log_address*/ ATA_LOG_DIRECTORY, 3683 /*page_number*/ 0, 3684 /*block_count*/ 1, 3685 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3686 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3687 /*data_ptr*/ (uint8_t *)log_dir, 3688 /*dxfer_len*/ sizeof(*log_dir), 3689 /*sense_len*/ SSD_FULL_SIZE, 3690 /*timeout*/ da_default_timeout * 1000); 3691 3692 if (retval != 0) { 3693 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3694 free(log_dir, M_SCSIDA); 3695 daprobedone(periph, start_ccb); 3696 break; 3697 } 3698 start_ccb->ccb_h.ccb_bp = NULL; 3699 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; 3700 xpt_action(start_ccb); 3701 break; 3702 } 3703 case DA_STATE_PROBE_ATA_IDDIR: 3704 { 3705 struct ata_identify_log_pages *id_dir; 3706 int retval; 3707 3708 retval = 0; 3709 3710 /* 3711 * Check here to see whether the Identify Device log is 3712 * supported in the directory of logs. If so, continue 3713 * with requesting the log of identify device pages. 3714 */ 3715 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { 3716 daprobedone(periph, start_ccb); 3717 break; 3718 } 3719 3720 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); 3721 if (id_dir == NULL) { 3722 xpt_print(periph->path, "Couldn't malloc id_dir " 3723 "data\n"); 3724 daprobedone(periph, start_ccb); 3725 break; 3726 } 3727 3728 retval = scsi_ata_read_log(&start_ccb->csio, 3729 /*retries*/ da_retry_count, 3730 /*cbfcnp*/ dadone_probeataiddir, 3731 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3732 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3733 /*page_number*/ ATA_IDL_PAGE_LIST, 3734 /*block_count*/ 1, 3735 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3736 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3737 /*data_ptr*/ (uint8_t *)id_dir, 3738 /*dxfer_len*/ sizeof(*id_dir), 3739 /*sense_len*/ SSD_FULL_SIZE, 3740 /*timeout*/ da_default_timeout * 1000); 3741 3742 if (retval != 0) { 3743 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3744 free(id_dir, M_SCSIDA); 3745 daprobedone(periph, start_ccb); 3746 break; 3747 } 3748 start_ccb->ccb_h.ccb_bp = NULL; 3749 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; 3750 xpt_action(start_ccb); 3751 break; 3752 } 3753 case DA_STATE_PROBE_ATA_SUP: 3754 { 3755 struct ata_identify_log_sup_cap *sup_cap; 3756 int retval; 3757 3758 retval = 0; 3759 3760 /* 3761 * Check here to see whether the Supported Capabilities log 3762 * is in the list of Identify Device logs. 3763 */ 3764 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { 3765 daprobedone(periph, start_ccb); 3766 break; 3767 } 3768 3769 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); 3770 if (sup_cap == NULL) { 3771 xpt_print(periph->path, "Couldn't malloc sup_cap " 3772 "data\n"); 3773 daprobedone(periph, start_ccb); 3774 break; 3775 } 3776 3777 retval = scsi_ata_read_log(&start_ccb->csio, 3778 /*retries*/ da_retry_count, 3779 /*cbfcnp*/ dadone_probeatasup, 3780 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3781 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3782 /*page_number*/ ATA_IDL_SUP_CAP, 3783 /*block_count*/ 1, 3784 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3785 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3786 /*data_ptr*/ (uint8_t *)sup_cap, 3787 /*dxfer_len*/ sizeof(*sup_cap), 3788 /*sense_len*/ SSD_FULL_SIZE, 3789 /*timeout*/ da_default_timeout * 1000); 3790 3791 if (retval != 0) { 3792 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3793 free(sup_cap, M_SCSIDA); 3794 daprobedone(periph, start_ccb); 3795 break; 3796 3797 } 3798 3799 start_ccb->ccb_h.ccb_bp = NULL; 3800 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; 3801 xpt_action(start_ccb); 3802 break; 3803 } 3804 case DA_STATE_PROBE_ATA_ZONE: 3805 { 3806 struct ata_zoned_info_log *ata_zone; 3807 int retval; 3808 3809 retval = 0; 3810 3811 /* 3812 * Check here to see whether the zoned device information 3813 * page is supported. If so, continue on to request it. 3814 * If not, skip to DA_STATE_PROBE_LOG or done. 3815 */ 3816 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { 3817 daprobedone(periph, start_ccb); 3818 break; 3819 } 3820 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, 3821 M_NOWAIT|M_ZERO); 3822 if (ata_zone == NULL) { 3823 xpt_print(periph->path, "Couldn't malloc ata_zone " 3824 "data\n"); 3825 daprobedone(periph, start_ccb); 3826 break; 3827 } 3828 3829 retval = scsi_ata_read_log(&start_ccb->csio, 3830 /*retries*/ da_retry_count, 3831 /*cbfcnp*/ dadone_probeatazone, 3832 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3833 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3834 /*page_number*/ ATA_IDL_ZDI, 3835 /*block_count*/ 1, 3836 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3837 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3838 /*data_ptr*/ (uint8_t *)ata_zone, 3839 /*dxfer_len*/ sizeof(*ata_zone), 3840 /*sense_len*/ SSD_FULL_SIZE, 3841 /*timeout*/ da_default_timeout * 1000); 3842 3843 if (retval != 0) { 3844 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3845 free(ata_zone, M_SCSIDA); 3846 daprobedone(periph, start_ccb); 3847 break; 3848 } 3849 start_ccb->ccb_h.ccb_bp = NULL; 3850 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; 3851 xpt_action(start_ccb); 3852 3853 break; 3854 } 3855 case DA_STATE_PROBE_ZONE: 3856 { 3857 struct scsi_vpd_zoned_bdc *bdc; 3858 3859 /* 3860 * Note that this page will be supported for SCSI protocol 3861 * devices that support ZBC (SMR devices), as well as ATA 3862 * protocol devices that are behind a SAT (SCSI to ATA 3863 * Translation) layer that supports converting ZBC commands 3864 * to their ZAC equivalents. 3865 */ 3866 if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { 3867 daprobedone(periph, start_ccb); 3868 break; 3869 } 3870 bdc = (struct scsi_vpd_zoned_bdc *) 3871 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3872 3873 if (bdc == NULL) { 3874 xpt_release_ccb(start_ccb); 3875 xpt_print(periph->path, "Couldn't malloc zone VPD " 3876 "data\n"); 3877 break; 3878 } 3879 scsi_inquiry(&start_ccb->csio, 3880 /*retries*/da_retry_count, 3881 /*cbfcnp*/dadone_probezone, 3882 /*tag_action*/MSG_SIMPLE_Q_TAG, 3883 /*inq_buf*/(u_int8_t *)bdc, 3884 /*inq_len*/sizeof(*bdc), 3885 /*evpd*/TRUE, 3886 /*page_code*/SVPD_ZONED_BDC, 3887 /*sense_len*/SSD_FULL_SIZE, 3888 /*timeout*/da_default_timeout * 1000); 3889 start_ccb->ccb_h.ccb_bp = NULL; 3890 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; 3891 xpt_action(start_ccb); 3892 break; 3893 } 3894 } 3895 } 3896 3897 /* 3898 * In each of the methods below, while its the caller's 3899 * responsibility to ensure the request will fit into a 3900 * single device request, we might have changed the delete 3901 * method due to the device incorrectly advertising either 3902 * its supported methods or limits. 3903 * 3904 * To prevent this causing further issues we validate the 3905 * against the methods limits, and warn which would 3906 * otherwise be unnecessary. 3907 */ 3908 static void 3909 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 3910 { 3911 struct da_softc *softc = (struct da_softc *)periph->softc;; 3912 struct bio *bp1; 3913 uint8_t *buf = softc->unmap_buf; 3914 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; 3915 uint64_t lba, lastlba = (uint64_t)-1; 3916 uint64_t totalcount = 0; 3917 uint64_t count; 3918 uint32_t c, lastcount = 0, ranges = 0; 3919 3920 /* 3921 * Currently this doesn't take the UNMAP 3922 * Granularity and Granularity Alignment 3923 * fields into account. 3924 * 3925 * This could result in both unoptimal unmap 3926 * requests as as well as UNMAP calls unmapping 3927 * fewer LBA's than requested. 3928 */ 3929 3930 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 3931 bp1 = bp; 3932 do { 3933 /* 3934 * Note: ada and da are different in how they store the 3935 * pending bp's in a trim. ada stores all of them in the 3936 * trim_req.bps. da stores all but the first one in the 3937 * delete_run_queue. ada then completes all the bps in 3938 * its adadone() loop. da completes all the bps in the 3939 * delete_run_queue in dadone, and relies on the biodone 3940 * after to complete. This should be reconciled since there's 3941 * no real reason to do it differently. XXX 3942 */ 3943 if (bp1 != bp) 3944 bioq_insert_tail(&softc->delete_run_queue, bp1); 3945 lba = bp1->bio_pblkno; 3946 count = bp1->bio_bcount / softc->params.secsize; 3947 3948 /* Try to extend the previous range. */ 3949 if (lba == lastlba) { 3950 c = omin(count, UNMAP_RANGE_MAX - lastcount); 3951 lastlba += c; 3952 lastcount += c; 3953 scsi_ulto4b(lastcount, d[ranges - 1].length); 3954 count -= c; 3955 lba += c; 3956 totalcount += c; 3957 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && 3958 softc->unmap_gran != 0) { 3959 /* Align length of the previous range. */ 3960 if ((c = lastcount % softc->unmap_gran) != 0) { 3961 if (lastcount <= c) { 3962 totalcount -= lastcount; 3963 lastlba = (uint64_t)-1; 3964 lastcount = 0; 3965 ranges--; 3966 } else { 3967 totalcount -= c; 3968 lastlba -= c; 3969 lastcount -= c; 3970 scsi_ulto4b(lastcount, 3971 d[ranges - 1].length); 3972 } 3973 } 3974 /* Align beginning of the new range. */ 3975 c = (lba - softc->unmap_gran_align) % softc->unmap_gran; 3976 if (c != 0) { 3977 c = softc->unmap_gran - c; 3978 if (count <= c) { 3979 count = 0; 3980 } else { 3981 lba += c; 3982 count -= c; 3983 } 3984 } 3985 } 3986 3987 while (count > 0) { 3988 c = omin(count, UNMAP_RANGE_MAX); 3989 if (totalcount + c > softc->unmap_max_lba || 3990 ranges >= softc->unmap_max_ranges) { 3991 xpt_print(periph->path, 3992 "%s issuing short delete %ld > %ld" 3993 "|| %d >= %d", 3994 da_delete_method_desc[softc->delete_method], 3995 totalcount + c, softc->unmap_max_lba, 3996 ranges, softc->unmap_max_ranges); 3997 break; 3998 } 3999 scsi_u64to8b(lba, d[ranges].lba); 4000 scsi_ulto4b(c, d[ranges].length); 4001 lba += c; 4002 totalcount += c; 4003 ranges++; 4004 count -= c; 4005 lastlba = lba; 4006 lastcount = c; 4007 } 4008 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4009 if (bp1 == NULL) 4010 break; 4011 if (ranges >= softc->unmap_max_ranges || 4012 totalcount + bp1->bio_bcount / 4013 softc->params.secsize > softc->unmap_max_lba) { 4014 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4015 break; 4016 } 4017 } while (1); 4018 4019 /* Align length of the last range. */ 4020 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && 4021 (c = lastcount % softc->unmap_gran) != 0) { 4022 if (lastcount <= c) 4023 ranges--; 4024 else 4025 scsi_ulto4b(lastcount - c, d[ranges - 1].length); 4026 } 4027 4028 scsi_ulto2b(ranges * 16 + 6, &buf[0]); 4029 scsi_ulto2b(ranges * 16, &buf[2]); 4030 4031 scsi_unmap(&ccb->csio, 4032 /*retries*/da_retry_count, 4033 /*cbfcnp*/dadone, 4034 /*tag_action*/MSG_SIMPLE_Q_TAG, 4035 /*byte2*/0, 4036 /*data_ptr*/ buf, 4037 /*dxfer_len*/ ranges * 16 + 8, 4038 /*sense_len*/SSD_FULL_SIZE, 4039 da_default_timeout * 1000); 4040 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4041 ccb->ccb_h.flags |= CAM_UNLOCKED; 4042 softc->trim_count++; 4043 softc->trim_ranges += ranges; 4044 softc->trim_lbas += totalcount; 4045 cam_iosched_submit_trim(softc->cam_iosched); 4046 } 4047 4048 static void 4049 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 4050 { 4051 struct da_softc *softc = (struct da_softc *)periph->softc; 4052 struct bio *bp1; 4053 uint8_t *buf = softc->unmap_buf; 4054 uint64_t lastlba = (uint64_t)-1; 4055 uint64_t count; 4056 uint64_t lba; 4057 uint32_t lastcount = 0, c, requestcount; 4058 int ranges = 0, off, block_count; 4059 4060 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 4061 bp1 = bp; 4062 do { 4063 if (bp1 != bp)//XXX imp XXX 4064 bioq_insert_tail(&softc->delete_run_queue, bp1); 4065 lba = bp1->bio_pblkno; 4066 count = bp1->bio_bcount / softc->params.secsize; 4067 requestcount = count; 4068 4069 /* Try to extend the previous range. */ 4070 if (lba == lastlba) { 4071 c = omin(count, ATA_DSM_RANGE_MAX - lastcount); 4072 lastcount += c; 4073 off = (ranges - 1) * 8; 4074 buf[off + 6] = lastcount & 0xff; 4075 buf[off + 7] = (lastcount >> 8) & 0xff; 4076 count -= c; 4077 lba += c; 4078 } 4079 4080 while (count > 0) { 4081 c = omin(count, ATA_DSM_RANGE_MAX); 4082 off = ranges * 8; 4083 4084 buf[off + 0] = lba & 0xff; 4085 buf[off + 1] = (lba >> 8) & 0xff; 4086 buf[off + 2] = (lba >> 16) & 0xff; 4087 buf[off + 3] = (lba >> 24) & 0xff; 4088 buf[off + 4] = (lba >> 32) & 0xff; 4089 buf[off + 5] = (lba >> 40) & 0xff; 4090 buf[off + 6] = c & 0xff; 4091 buf[off + 7] = (c >> 8) & 0xff; 4092 lba += c; 4093 ranges++; 4094 count -= c; 4095 lastcount = c; 4096 if (count != 0 && ranges == softc->trim_max_ranges) { 4097 xpt_print(periph->path, 4098 "%s issuing short delete %ld > %ld\n", 4099 da_delete_method_desc[softc->delete_method], 4100 requestcount, 4101 (softc->trim_max_ranges - ranges) * 4102 ATA_DSM_RANGE_MAX); 4103 break; 4104 } 4105 } 4106 lastlba = lba; 4107 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4108 if (bp1 == NULL) 4109 break; 4110 if (bp1->bio_bcount / softc->params.secsize > 4111 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { 4112 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4113 break; 4114 } 4115 } while (1); 4116 4117 block_count = howmany(ranges, ATA_DSM_BLK_RANGES); 4118 scsi_ata_trim(&ccb->csio, 4119 /*retries*/da_retry_count, 4120 /*cbfcnp*/dadone, 4121 /*tag_action*/MSG_SIMPLE_Q_TAG, 4122 block_count, 4123 /*data_ptr*/buf, 4124 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, 4125 /*sense_len*/SSD_FULL_SIZE, 4126 da_default_timeout * 1000); 4127 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4128 ccb->ccb_h.flags |= CAM_UNLOCKED; 4129 cam_iosched_submit_trim(softc->cam_iosched); 4130 } 4131 4132 /* 4133 * We calculate ws_max_blks here based off d_delmaxsize instead 4134 * of using softc->ws_max_blks as it is absolute max for the 4135 * device not the protocol max which may well be lower. 4136 */ 4137 static void 4138 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 4139 { 4140 struct da_softc *softc; 4141 struct bio *bp1; 4142 uint64_t ws_max_blks; 4143 uint64_t lba; 4144 uint64_t count; /* forward compat with WS32 */ 4145 4146 softc = (struct da_softc *)periph->softc; 4147 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; 4148 lba = bp->bio_pblkno; 4149 count = 0; 4150 bp1 = bp; 4151 do { 4152 if (bp1 != bp)//XXX imp XXX 4153 bioq_insert_tail(&softc->delete_run_queue, bp1); 4154 count += bp1->bio_bcount / softc->params.secsize; 4155 if (count > ws_max_blks) { 4156 xpt_print(periph->path, 4157 "%s issuing short delete %ld > %ld\n", 4158 da_delete_method_desc[softc->delete_method], 4159 count, ws_max_blks); 4160 count = omin(count, ws_max_blks); 4161 break; 4162 } 4163 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4164 if (bp1 == NULL) 4165 break; 4166 if (lba + count != bp1->bio_pblkno || 4167 count + bp1->bio_bcount / 4168 softc->params.secsize > ws_max_blks) { 4169 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4170 break; 4171 } 4172 } while (1); 4173 4174 scsi_write_same(&ccb->csio, 4175 /*retries*/da_retry_count, 4176 /*cbfcnp*/dadone, 4177 /*tag_action*/MSG_SIMPLE_Q_TAG, 4178 /*byte2*/softc->delete_method == 4179 DA_DELETE_ZERO ? 0 : SWS_UNMAP, 4180 softc->delete_method == DA_DELETE_WS16 ? 16 : 10, 4181 /*lba*/lba, 4182 /*block_count*/count, 4183 /*data_ptr*/ __DECONST(void *, zero_region), 4184 /*dxfer_len*/ softc->params.secsize, 4185 /*sense_len*/SSD_FULL_SIZE, 4186 da_default_timeout * 1000); 4187 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4188 ccb->ccb_h.flags |= CAM_UNLOCKED; 4189 cam_iosched_submit_trim(softc->cam_iosched); 4190 } 4191 4192 static int 4193 cmd6workaround(union ccb *ccb) 4194 { 4195 struct scsi_rw_6 cmd6; 4196 struct scsi_rw_10 *cmd10; 4197 struct da_softc *softc; 4198 u_int8_t *cdb; 4199 struct bio *bp; 4200 int frozen; 4201 4202 cdb = ccb->csio.cdb_io.cdb_bytes; 4203 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; 4204 4205 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { 4206 da_delete_methods old_method = softc->delete_method; 4207 4208 /* 4209 * Typically there are two reasons for failure here 4210 * 1. Delete method was detected as supported but isn't 4211 * 2. Delete failed due to invalid params e.g. too big 4212 * 4213 * While we will attempt to choose an alternative delete method 4214 * this may result in short deletes if the existing delete 4215 * requests from geom are big for the new method chosen. 4216 * 4217 * This method assumes that the error which triggered this 4218 * will not retry the io otherwise a panic will occur 4219 */ 4220 dadeleteflag(softc, old_method, 0); 4221 dadeletemethodchoose(softc, DA_DELETE_DISABLE); 4222 if (softc->delete_method == DA_DELETE_DISABLE) 4223 xpt_print(ccb->ccb_h.path, 4224 "%s failed, disabling BIO_DELETE\n", 4225 da_delete_method_desc[old_method]); 4226 else 4227 xpt_print(ccb->ccb_h.path, 4228 "%s failed, switching to %s BIO_DELETE\n", 4229 da_delete_method_desc[old_method], 4230 da_delete_method_desc[softc->delete_method]); 4231 4232 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) 4233 cam_iosched_queue_work(softc->cam_iosched, bp); 4234 cam_iosched_queue_work(softc->cam_iosched, 4235 (struct bio *)ccb->ccb_h.ccb_bp); 4236 ccb->ccb_h.ccb_bp = NULL; 4237 return (0); 4238 } 4239 4240 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ 4241 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 4242 (*cdb == PREVENT_ALLOW) && 4243 (softc->quirks & DA_Q_NO_PREVENT) == 0) { 4244 if (bootverbose) 4245 xpt_print(ccb->ccb_h.path, 4246 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); 4247 softc->quirks |= DA_Q_NO_PREVENT; 4248 return (0); 4249 } 4250 4251 /* Detect unsupported SYNCHRONIZE CACHE(10). */ 4252 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 4253 (*cdb == SYNCHRONIZE_CACHE) && 4254 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 4255 if (bootverbose) 4256 xpt_print(ccb->ccb_h.path, 4257 "SYNCHRONIZE CACHE(10) not supported.\n"); 4258 softc->quirks |= DA_Q_NO_SYNC_CACHE; 4259 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; 4260 return (0); 4261 } 4262 4263 /* Translation only possible if CDB is an array and cmd is R/W6 */ 4264 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || 4265 (*cdb != READ_6 && *cdb != WRITE_6)) 4266 return 0; 4267 4268 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " 4269 "increasing minimum_cmd_size to 10.\n"); 4270 softc->minimum_cmd_size = 10; 4271 4272 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); 4273 cmd10 = (struct scsi_rw_10 *)cdb; 4274 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; 4275 cmd10->byte2 = 0; 4276 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); 4277 cmd10->reserved = 0; 4278 scsi_ulto2b(cmd6.length, cmd10->length); 4279 cmd10->control = cmd6.control; 4280 ccb->csio.cdb_len = sizeof(*cmd10); 4281 4282 /* Requeue request, unfreezing queue if necessary */ 4283 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 4284 ccb->ccb_h.status = CAM_REQUEUE_REQ; 4285 xpt_action(ccb); 4286 if (frozen) { 4287 cam_release_devq(ccb->ccb_h.path, 4288 /*relsim_flags*/0, 4289 /*reduction*/0, 4290 /*timeout*/0, 4291 /*getcount_only*/0); 4292 } 4293 return (ERESTART); 4294 } 4295 4296 static void 4297 dazonedone(struct cam_periph *periph, union ccb *ccb) 4298 { 4299 struct da_softc *softc; 4300 struct bio *bp; 4301 4302 softc = periph->softc; 4303 bp = (struct bio *)ccb->ccb_h.ccb_bp; 4304 4305 switch (bp->bio_zone.zone_cmd) { 4306 case DISK_ZONE_OPEN: 4307 case DISK_ZONE_CLOSE: 4308 case DISK_ZONE_FINISH: 4309 case DISK_ZONE_RWP: 4310 break; 4311 case DISK_ZONE_REPORT_ZONES: { 4312 uint32_t avail_len; 4313 struct disk_zone_report *rep; 4314 struct scsi_report_zones_hdr *hdr; 4315 struct scsi_report_zones_desc *desc; 4316 struct disk_zone_rep_entry *entry; 4317 uint32_t hdr_len, num_avail; 4318 uint32_t num_to_fill, i; 4319 int ata; 4320 4321 rep = &bp->bio_zone.zone_params.report; 4322 avail_len = ccb->csio.dxfer_len - ccb->csio.resid; 4323 /* 4324 * Note that bio_resid isn't normally used for zone 4325 * commands, but it is used by devstat_end_transaction_bio() 4326 * to determine how much data was transferred. Because 4327 * the size of the SCSI/ATA data structures is different 4328 * than the size of the BIO interface structures, the 4329 * amount of data actually transferred from the drive will 4330 * be different than the amount of data transferred to 4331 * the user. 4332 */ 4333 bp->bio_resid = ccb->csio.resid; 4334 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; 4335 if (avail_len < sizeof(*hdr)) { 4336 /* 4337 * Is there a better error than EIO here? We asked 4338 * for at least the header, and we got less than 4339 * that. 4340 */ 4341 bp->bio_error = EIO; 4342 bp->bio_flags |= BIO_ERROR; 4343 bp->bio_resid = bp->bio_bcount; 4344 break; 4345 } 4346 4347 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) 4348 ata = 1; 4349 else 4350 ata = 0; 4351 4352 hdr_len = ata ? le32dec(hdr->length) : 4353 scsi_4btoul(hdr->length); 4354 if (hdr_len > 0) 4355 rep->entries_available = hdr_len / sizeof(*desc); 4356 else 4357 rep->entries_available = 0; 4358 /* 4359 * NOTE: using the same values for the BIO version of the 4360 * same field as the SCSI/ATA values. This means we could 4361 * get some additional values that aren't defined in bio.h 4362 * if more values of the same field are defined later. 4363 */ 4364 rep->header.same = hdr->byte4 & SRZ_SAME_MASK; 4365 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : 4366 scsi_8btou64(hdr->maximum_lba); 4367 /* 4368 * If the drive reports no entries that match the query, 4369 * we're done. 4370 */ 4371 if (hdr_len == 0) { 4372 rep->entries_filled = 0; 4373 break; 4374 } 4375 4376 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), 4377 hdr_len / sizeof(*desc)); 4378 /* 4379 * If the drive didn't return any data, then we're done. 4380 */ 4381 if (num_avail == 0) { 4382 rep->entries_filled = 0; 4383 break; 4384 } 4385 4386 num_to_fill = min(num_avail, rep->entries_allocated); 4387 /* 4388 * If the user didn't allocate any entries for us to fill, 4389 * we're done. 4390 */ 4391 if (num_to_fill == 0) { 4392 rep->entries_filled = 0; 4393 break; 4394 } 4395 4396 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; 4397 i < num_to_fill; i++, desc++, entry++) { 4398 /* 4399 * NOTE: we're mapping the values here directly 4400 * from the SCSI/ATA bit definitions to the bio.h 4401 * definitons. There is also a warning in 4402 * disk_zone.h, but the impact is that if 4403 * additional values are added in the SCSI/ATA 4404 * specs these will be visible to consumers of 4405 * this interface. 4406 */ 4407 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; 4408 entry->zone_condition = 4409 (desc->zone_flags & SRZ_ZONE_COND_MASK) >> 4410 SRZ_ZONE_COND_SHIFT; 4411 entry->zone_flags |= desc->zone_flags & 4412 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); 4413 entry->zone_length = 4414 ata ? le64dec(desc->zone_length) : 4415 scsi_8btou64(desc->zone_length); 4416 entry->zone_start_lba = 4417 ata ? le64dec(desc->zone_start_lba) : 4418 scsi_8btou64(desc->zone_start_lba); 4419 entry->write_pointer_lba = 4420 ata ? le64dec(desc->write_pointer_lba) : 4421 scsi_8btou64(desc->write_pointer_lba); 4422 } 4423 rep->entries_filled = num_to_fill; 4424 break; 4425 } 4426 case DISK_ZONE_GET_PARAMS: 4427 default: 4428 /* 4429 * In theory we should not get a GET_PARAMS bio, since it 4430 * should be handled without queueing the command to the 4431 * drive. 4432 */ 4433 panic("%s: Invalid zone command %d", __func__, 4434 bp->bio_zone.zone_cmd); 4435 break; 4436 } 4437 4438 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) 4439 free(ccb->csio.data_ptr, M_SCSIDA); 4440 } 4441 4442 static void 4443 dadone(struct cam_periph *periph, union ccb *done_ccb) 4444 { 4445 struct bio *bp, *bp1; 4446 struct da_softc *softc; 4447 struct ccb_scsiio *csio; 4448 u_int32_t priority; 4449 da_ccb_state state; 4450 4451 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); 4452 4453 softc = (struct da_softc *)periph->softc; 4454 priority = done_ccb->ccb_h.pinfo.priority; 4455 csio = &done_ccb->csio; 4456 4457 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4458 if (csio->bio != NULL) 4459 biotrack(csio->bio, __func__); 4460 #endif 4461 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 4462 4463 cam_periph_lock(periph); 4464 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4465 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 4466 int error; 4467 int sf; 4468 4469 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 4470 sf = SF_RETRY_UA; 4471 else 4472 sf = 0; 4473 4474 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 4475 if (error == ERESTART) { 4476 /* A retry was scheduled, so just return. */ 4477 cam_periph_unlock(periph); 4478 return; 4479 } 4480 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4481 if (error != 0) { 4482 int queued_error; 4483 4484 /* 4485 * return all queued I/O with EIO, so that 4486 * the client can retry these I/Os in the 4487 * proper order should it attempt to recover. 4488 */ 4489 queued_error = EIO; 4490 4491 if (error == ENXIO 4492 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { 4493 /* 4494 * Catastrophic error. Mark our pack as 4495 * invalid. 4496 * 4497 * XXX See if this is really a media 4498 * XXX change first? 4499 */ 4500 xpt_print(periph->path, "Invalidating pack\n"); 4501 softc->flags |= DA_FLAG_PACK_INVALID; 4502 #ifdef CAM_IO_STATS 4503 softc->invalidations++; 4504 #endif 4505 queued_error = ENXIO; 4506 } 4507 cam_iosched_flush(softc->cam_iosched, NULL, 4508 queued_error); 4509 if (bp != NULL) { 4510 bp->bio_error = error; 4511 bp->bio_resid = bp->bio_bcount; 4512 bp->bio_flags |= BIO_ERROR; 4513 } 4514 } else if (bp != NULL) { 4515 if (state == DA_CCB_DELETE) 4516 bp->bio_resid = 0; 4517 else 4518 bp->bio_resid = csio->resid; 4519 bp->bio_error = 0; 4520 if (bp->bio_resid != 0) 4521 bp->bio_flags |= BIO_ERROR; 4522 } 4523 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4524 cam_release_devq(done_ccb->ccb_h.path, 4525 /*relsim_flags*/0, 4526 /*reduction*/0, 4527 /*timeout*/0, 4528 /*getcount_only*/0); 4529 } else if (bp != NULL) { 4530 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4531 panic("REQ_CMP with QFRZN"); 4532 if (bp->bio_cmd == BIO_ZONE) 4533 dazonedone(periph, done_ccb); 4534 else if (state == DA_CCB_DELETE) 4535 bp->bio_resid = 0; 4536 else 4537 bp->bio_resid = csio->resid; 4538 if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) 4539 bp->bio_flags |= BIO_ERROR; 4540 if (softc->error_inject != 0) { 4541 bp->bio_error = softc->error_inject; 4542 bp->bio_resid = bp->bio_bcount; 4543 bp->bio_flags |= BIO_ERROR; 4544 softc->error_inject = 0; 4545 } 4546 } 4547 4548 if (bp != NULL) 4549 biotrack(bp, __func__); 4550 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 4551 if (LIST_EMPTY(&softc->pending_ccbs)) 4552 softc->flags |= DA_FLAG_WAS_OTAG; 4553 4554 /* 4555 * We need to call cam_iosched before we call biodone so that we don't 4556 * measure any activity that happens in the completion routine, which in 4557 * the case of sendfile can be quite extensive. Release the periph 4558 * refcount taken in dastart() for each CCB. 4559 */ 4560 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); 4561 xpt_release_ccb(done_ccb); 4562 KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount)); 4563 softc->refcount--; 4564 if (state == DA_CCB_DELETE) { 4565 TAILQ_HEAD(, bio) queue; 4566 4567 TAILQ_INIT(&queue); 4568 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); 4569 softc->delete_run_queue.insert_point = NULL; 4570 /* 4571 * Normally, the xpt_release_ccb() above would make sure 4572 * that when we have more work to do, that work would 4573 * get kicked off. However, we specifically keep 4574 * delete_running set to 0 before the call above to 4575 * allow other I/O to progress when many BIO_DELETE 4576 * requests are pushed down. We set delete_running to 0 4577 * and call daschedule again so that we don't stall if 4578 * there are no other I/Os pending apart from BIO_DELETEs. 4579 */ 4580 cam_iosched_trim_done(softc->cam_iosched); 4581 daschedule(periph); 4582 cam_periph_unlock(periph); 4583 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { 4584 TAILQ_REMOVE(&queue, bp1, bio_queue); 4585 bp1->bio_error = bp->bio_error; 4586 if (bp->bio_flags & BIO_ERROR) { 4587 bp1->bio_flags |= BIO_ERROR; 4588 bp1->bio_resid = bp1->bio_bcount; 4589 } else 4590 bp1->bio_resid = 0; 4591 biodone(bp1); 4592 } 4593 } else { 4594 daschedule(periph); 4595 cam_periph_unlock(periph); 4596 } 4597 if (bp != NULL) 4598 biodone(bp); 4599 return; 4600 } 4601 4602 static void 4603 dadone_probewp(struct cam_periph *periph, union ccb *done_ccb) 4604 { 4605 struct scsi_mode_header_6 *mode_hdr6; 4606 struct scsi_mode_header_10 *mode_hdr10; 4607 struct da_softc *softc; 4608 struct ccb_scsiio *csio; 4609 u_int32_t priority; 4610 uint8_t dev_spec; 4611 4612 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n")); 4613 4614 softc = (struct da_softc *)periph->softc; 4615 priority = done_ccb->ccb_h.pinfo.priority; 4616 csio = &done_ccb->csio; 4617 4618 cam_periph_assert(periph, MA_OWNED); 4619 4620 KASSERT(softc->state == DA_STATE_PROBE_WP, 4621 ("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p", 4622 softc->state, periph, done_ccb)); 4623 KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP, 4624 ("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p", 4625 (unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph, 4626 done_ccb)); 4627 4628 if (softc->minimum_cmd_size > 6) { 4629 mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr; 4630 dev_spec = mode_hdr10->dev_spec; 4631 } else { 4632 mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr; 4633 dev_spec = mode_hdr6->dev_spec; 4634 } 4635 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { 4636 if ((dev_spec & 0x80) != 0) 4637 softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT; 4638 else 4639 softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT; 4640 } else { 4641 int error; 4642 4643 error = daerror(done_ccb, CAM_RETRY_SELTO, 4644 SF_RETRY_UA|SF_NO_PRINT); 4645 if (error == ERESTART) 4646 return; 4647 else if (error != 0) { 4648 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4649 /* Don't wedge this device's queue */ 4650 cam_release_devq(done_ccb->ccb_h.path, 4651 /*relsim_flags*/0, 4652 /*reduction*/0, 4653 /*timeout*/0, 4654 /*getcount_only*/0); 4655 } 4656 } 4657 } 4658 4659 free(csio->data_ptr, M_SCSIDA); 4660 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 4661 softc->state = DA_STATE_PROBE_RC16; 4662 else 4663 softc->state = DA_STATE_PROBE_RC; 4664 xpt_release_ccb(done_ccb); 4665 xpt_schedule(periph, priority); 4666 return; 4667 } 4668 4669 static void 4670 dadone_proberc(struct cam_periph *periph, union ccb *done_ccb) 4671 { 4672 struct scsi_read_capacity_data *rdcap; 4673 struct scsi_read_capacity_data_long *rcaplong; 4674 struct da_softc *softc; 4675 struct ccb_scsiio *csio; 4676 da_ccb_state state; 4677 char *announce_buf; 4678 u_int32_t priority; 4679 int lbp, n; 4680 4681 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n")); 4682 4683 softc = (struct da_softc *)periph->softc; 4684 priority = done_ccb->ccb_h.pinfo.priority; 4685 csio = &done_ccb->csio; 4686 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 4687 4688 KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16, 4689 ("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p", 4690 softc->state, periph, done_ccb)); 4691 KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16, 4692 ("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p", 4693 (unsigned long)state, periph, done_ccb)); 4694 4695 lbp = 0; 4696 rdcap = NULL; 4697 rcaplong = NULL; 4698 /* XXX TODO: can this be a malloc? */ 4699 announce_buf = softc->announce_temp; 4700 bzero(announce_buf, DA_ANNOUNCETMP_SZ); 4701 4702 if (state == DA_CCB_PROBE_RC) 4703 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; 4704 else 4705 rcaplong = (struct scsi_read_capacity_data_long *) 4706 csio->data_ptr; 4707 4708 cam_periph_assert(periph, MA_OWNED); 4709 4710 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4711 struct disk_params *dp; 4712 uint32_t block_size; 4713 uint64_t maxsector; 4714 u_int lalba; /* Lowest aligned LBA. */ 4715 4716 if (state == DA_CCB_PROBE_RC) { 4717 block_size = scsi_4btoul(rdcap->length); 4718 maxsector = scsi_4btoul(rdcap->addr); 4719 lalba = 0; 4720 4721 /* 4722 * According to SBC-2, if the standard 10 4723 * byte READ CAPACITY command returns 2^32, 4724 * we should issue the 16 byte version of 4725 * the command, since the device in question 4726 * has more sectors than can be represented 4727 * with the short version of the command. 4728 */ 4729 if (maxsector == 0xffffffff) { 4730 free(rdcap, M_SCSIDA); 4731 softc->state = DA_STATE_PROBE_RC16; 4732 xpt_release_ccb(done_ccb); 4733 xpt_schedule(periph, priority); 4734 return; 4735 } 4736 } else { 4737 block_size = scsi_4btoul(rcaplong->length); 4738 maxsector = scsi_8btou64(rcaplong->addr); 4739 lalba = scsi_2btoul(rcaplong->lalba_lbp); 4740 } 4741 4742 /* 4743 * Because GEOM code just will panic us if we 4744 * give them an 'illegal' value we'll avoid that 4745 * here. 4746 */ 4747 if (block_size == 0) { 4748 block_size = 512; 4749 if (maxsector == 0) 4750 maxsector = -1; 4751 } 4752 if (block_size >= MAXPHYS) { 4753 xpt_print(periph->path, 4754 "unsupportable block size %ju\n", 4755 (uintmax_t) block_size); 4756 announce_buf = NULL; 4757 cam_periph_invalidate(periph); 4758 } else { 4759 /* 4760 * We pass rcaplong into dasetgeom(), 4761 * because it will only use it if it is 4762 * non-NULL. 4763 */ 4764 dasetgeom(periph, block_size, maxsector, 4765 rcaplong, sizeof(*rcaplong)); 4766 lbp = (lalba & SRC16_LBPME_A); 4767 dp = &softc->params; 4768 n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ, 4769 "%juMB (%ju %u byte sectors", 4770 ((uintmax_t)dp->secsize * dp->sectors) / 4771 (1024 * 1024), 4772 (uintmax_t)dp->sectors, dp->secsize); 4773 if (softc->p_type != 0) { 4774 n += snprintf(announce_buf + n, 4775 DA_ANNOUNCETMP_SZ - n, 4776 ", DIF type %d", softc->p_type); 4777 } 4778 snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")"); 4779 } 4780 } else { 4781 int error; 4782 4783 /* 4784 * Retry any UNIT ATTENTION type errors. They 4785 * are expected at boot. 4786 */ 4787 error = daerror(done_ccb, CAM_RETRY_SELTO, 4788 SF_RETRY_UA|SF_NO_PRINT); 4789 if (error == ERESTART) { 4790 /* 4791 * A retry was scheuled, so 4792 * just return. 4793 */ 4794 return; 4795 } else if (error != 0) { 4796 int asc, ascq; 4797 int sense_key, error_code; 4798 int have_sense; 4799 cam_status status; 4800 struct ccb_getdev cgd; 4801 4802 /* Don't wedge this device's queue */ 4803 status = done_ccb->ccb_h.status; 4804 if ((status & CAM_DEV_QFRZN) != 0) 4805 cam_release_devq(done_ccb->ccb_h.path, 4806 /*relsim_flags*/0, 4807 /*reduction*/0, 4808 /*timeout*/0, 4809 /*getcount_only*/0); 4810 4811 4812 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 4813 CAM_PRIORITY_NORMAL); 4814 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 4815 xpt_action((union ccb *)&cgd); 4816 4817 if (scsi_extract_sense_ccb(done_ccb, 4818 &error_code, &sense_key, &asc, &ascq)) 4819 have_sense = TRUE; 4820 else 4821 have_sense = FALSE; 4822 4823 /* 4824 * If we tried READ CAPACITY(16) and failed, 4825 * fallback to READ CAPACITY(10). 4826 */ 4827 if ((state == DA_CCB_PROBE_RC16) && 4828 (softc->flags & DA_FLAG_CAN_RC16) && 4829 (((csio->ccb_h.status & CAM_STATUS_MASK) == 4830 CAM_REQ_INVALID) || 4831 ((have_sense) && 4832 (error_code == SSD_CURRENT_ERROR || 4833 error_code == SSD_DESC_CURRENT_ERROR) && 4834 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { 4835 cam_periph_assert(periph, MA_OWNED); 4836 softc->flags &= ~DA_FLAG_CAN_RC16; 4837 free(rdcap, M_SCSIDA); 4838 softc->state = DA_STATE_PROBE_RC; 4839 xpt_release_ccb(done_ccb); 4840 xpt_schedule(periph, priority); 4841 return; 4842 } 4843 4844 /* 4845 * Attach to anything that claims to be a 4846 * direct access or optical disk device, 4847 * as long as it doesn't return a "Logical 4848 * unit not supported" (0x25) error. 4849 * "Internal Target Failure" (0x44) is also 4850 * special and typically means that the 4851 * device is a SATA drive behind a SATL 4852 * translation that's fallen into a 4853 * terminally fatal state. 4854 */ 4855 if ((have_sense) 4856 && (asc != 0x25) && (asc != 0x44) 4857 && (error_code == SSD_CURRENT_ERROR 4858 || error_code == SSD_DESC_CURRENT_ERROR)) { 4859 const char *sense_key_desc; 4860 const char *asc_desc; 4861 4862 dasetgeom(periph, 512, -1, NULL, 0); 4863 scsi_sense_desc(sense_key, asc, ascq, 4864 &cgd.inq_data, &sense_key_desc, 4865 &asc_desc); 4866 snprintf(announce_buf, DA_ANNOUNCETMP_SZ, 4867 "Attempt to query device " 4868 "size failed: %s, %s", 4869 sense_key_desc, asc_desc); 4870 } else { 4871 if (have_sense) 4872 scsi_sense_print(&done_ccb->csio); 4873 else { 4874 xpt_print(periph->path, 4875 "got CAM status %#x\n", 4876 done_ccb->ccb_h.status); 4877 } 4878 4879 xpt_print(periph->path, "fatal error, " 4880 "failed to attach to device\n"); 4881 4882 announce_buf = NULL; 4883 4884 /* 4885 * Free up resources. 4886 */ 4887 cam_periph_invalidate(periph); 4888 } 4889 } 4890 } 4891 free(csio->data_ptr, M_SCSIDA); 4892 if (announce_buf != NULL && 4893 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { 4894 struct sbuf sb; 4895 4896 sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, 4897 SBUF_FIXEDLEN); 4898 xpt_announce_periph_sbuf(periph, &sb, announce_buf); 4899 xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, 4900 DA_Q_BIT_STRING); 4901 sbuf_finish(&sb); 4902 sbuf_putbuf(&sb); 4903 4904 /* 4905 * Create our sysctl variables, now that we know 4906 * we have successfully attached. 4907 */ 4908 /* increase the refcount */ 4909 if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) { 4910 taskqueue_enqueue(taskqueue_thread, 4911 &softc->sysctl_task); 4912 } else { 4913 /* XXX This message is useless! */ 4914 xpt_print(periph->path, "fatal error, " 4915 "could not acquire reference count\n"); 4916 } 4917 } 4918 4919 /* We already probed the device. */ 4920 if (softc->flags & DA_FLAG_PROBED) { 4921 daprobedone(periph, done_ccb); 4922 return; 4923 } 4924 4925 /* Ensure re-probe doesn't see old delete. */ 4926 softc->delete_available = 0; 4927 dadeleteflag(softc, DA_DELETE_ZERO, 1); 4928 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { 4929 /* 4930 * Based on older SBC-3 spec revisions 4931 * any of the UNMAP methods "may" be 4932 * available via LBP given this flag so 4933 * we flag all of them as available and 4934 * then remove those which further 4935 * probes confirm aren't available 4936 * later. 4937 * 4938 * We could also check readcap(16) p_type 4939 * flag to exclude one or more invalid 4940 * write same (X) types here 4941 */ 4942 dadeleteflag(softc, DA_DELETE_WS16, 1); 4943 dadeleteflag(softc, DA_DELETE_WS10, 1); 4944 dadeleteflag(softc, DA_DELETE_UNMAP, 1); 4945 4946 softc->state = DA_STATE_PROBE_LBP; 4947 xpt_release_ccb(done_ccb); 4948 xpt_schedule(periph, priority); 4949 return; 4950 } 4951 4952 softc->state = DA_STATE_PROBE_BDC; 4953 xpt_release_ccb(done_ccb); 4954 xpt_schedule(periph, priority); 4955 return; 4956 } 4957 4958 static void 4959 dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb) 4960 { 4961 struct scsi_vpd_logical_block_prov *lbp; 4962 struct da_softc *softc; 4963 struct ccb_scsiio *csio; 4964 u_int32_t priority; 4965 4966 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n")); 4967 4968 softc = (struct da_softc *)periph->softc; 4969 priority = done_ccb->ccb_h.pinfo.priority; 4970 csio = &done_ccb->csio; 4971 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; 4972 4973 cam_periph_assert(periph, MA_OWNED); 4974 4975 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4976 /* 4977 * T10/1799-D Revision 31 states at least one of these 4978 * must be supported but we don't currently enforce this. 4979 */ 4980 dadeleteflag(softc, DA_DELETE_WS16, 4981 (lbp->flags & SVPD_LBP_WS16)); 4982 dadeleteflag(softc, DA_DELETE_WS10, 4983 (lbp->flags & SVPD_LBP_WS10)); 4984 dadeleteflag(softc, DA_DELETE_UNMAP, 4985 (lbp->flags & SVPD_LBP_UNMAP)); 4986 } else { 4987 int error; 4988 error = daerror(done_ccb, CAM_RETRY_SELTO, 4989 SF_RETRY_UA|SF_NO_PRINT); 4990 if (error == ERESTART) 4991 return; 4992 else if (error != 0) { 4993 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4994 /* Don't wedge this device's queue */ 4995 cam_release_devq(done_ccb->ccb_h.path, 4996 /*relsim_flags*/0, 4997 /*reduction*/0, 4998 /*timeout*/0, 4999 /*getcount_only*/0); 5000 } 5001 5002 /* 5003 * Failure indicates we don't support any SBC-3 5004 * delete methods with UNMAP 5005 */ 5006 } 5007 } 5008 5009 free(lbp, M_SCSIDA); 5010 softc->state = DA_STATE_PROBE_BLK_LIMITS; 5011 xpt_release_ccb(done_ccb); 5012 xpt_schedule(periph, priority); 5013 return; 5014 } 5015 5016 static void 5017 dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb) 5018 { 5019 struct scsi_vpd_block_limits *block_limits; 5020 struct da_softc *softc; 5021 struct ccb_scsiio *csio; 5022 u_int32_t priority; 5023 5024 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n")); 5025 5026 softc = (struct da_softc *)periph->softc; 5027 priority = done_ccb->ccb_h.pinfo.priority; 5028 csio = &done_ccb->csio; 5029 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; 5030 5031 cam_periph_assert(periph, MA_OWNED); 5032 5033 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5034 uint32_t max_txfer_len = scsi_4btoul( 5035 block_limits->max_txfer_len); 5036 uint32_t max_unmap_lba_cnt = scsi_4btoul( 5037 block_limits->max_unmap_lba_cnt); 5038 uint32_t max_unmap_blk_cnt = scsi_4btoul( 5039 block_limits->max_unmap_blk_cnt); 5040 uint32_t unmap_gran = scsi_4btoul( 5041 block_limits->opt_unmap_grain); 5042 uint32_t unmap_gran_align = scsi_4btoul( 5043 block_limits->unmap_grain_align); 5044 uint64_t ws_max_blks = scsi_8btou64( 5045 block_limits->max_write_same_length); 5046 5047 if (max_txfer_len != 0) { 5048 softc->disk->d_maxsize = MIN(softc->maxio, 5049 (off_t)max_txfer_len * softc->params.secsize); 5050 } 5051 5052 /* 5053 * We should already support UNMAP but we check lba 5054 * and block count to be sure 5055 */ 5056 if (max_unmap_lba_cnt != 0x00L && 5057 max_unmap_blk_cnt != 0x00L) { 5058 softc->unmap_max_lba = max_unmap_lba_cnt; 5059 softc->unmap_max_ranges = min(max_unmap_blk_cnt, 5060 UNMAP_MAX_RANGES); 5061 if (unmap_gran > 1) { 5062 softc->unmap_gran = unmap_gran; 5063 if (unmap_gran_align & 0x80000000) { 5064 softc->unmap_gran_align = 5065 unmap_gran_align & 0x7fffffff; 5066 } 5067 } 5068 } else { 5069 /* 5070 * Unexpected UNMAP limits which means the 5071 * device doesn't actually support UNMAP 5072 */ 5073 dadeleteflag(softc, DA_DELETE_UNMAP, 0); 5074 } 5075 5076 if (ws_max_blks != 0x00L) 5077 softc->ws_max_blks = ws_max_blks; 5078 } else { 5079 int error; 5080 error = daerror(done_ccb, CAM_RETRY_SELTO, 5081 SF_RETRY_UA|SF_NO_PRINT); 5082 if (error == ERESTART) 5083 return; 5084 else if (error != 0) { 5085 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5086 /* Don't wedge this device's queue */ 5087 cam_release_devq(done_ccb->ccb_h.path, 5088 /*relsim_flags*/0, 5089 /*reduction*/0, 5090 /*timeout*/0, 5091 /*getcount_only*/0); 5092 } 5093 5094 /* 5095 * Failure here doesn't mean UNMAP is not 5096 * supported as this is an optional page. 5097 */ 5098 softc->unmap_max_lba = 1; 5099 softc->unmap_max_ranges = 1; 5100 } 5101 } 5102 5103 free(block_limits, M_SCSIDA); 5104 softc->state = DA_STATE_PROBE_BDC; 5105 xpt_release_ccb(done_ccb); 5106 xpt_schedule(periph, priority); 5107 return; 5108 } 5109 5110 static void 5111 dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb) 5112 { 5113 struct scsi_vpd_block_device_characteristics *bdc; 5114 struct da_softc *softc; 5115 struct ccb_scsiio *csio; 5116 u_int32_t priority; 5117 5118 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n")); 5119 5120 softc = (struct da_softc *)periph->softc; 5121 priority = done_ccb->ccb_h.pinfo.priority; 5122 csio = &done_ccb->csio; 5123 bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr; 5124 5125 cam_periph_assert(periph, MA_OWNED); 5126 5127 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5128 uint32_t valid_len; 5129 5130 /* 5131 * Disable queue sorting for non-rotational media 5132 * by default. 5133 */ 5134 u_int16_t old_rate = softc->disk->d_rotation_rate; 5135 5136 valid_len = csio->dxfer_len - csio->resid; 5137 if (SBDC_IS_PRESENT(bdc, valid_len, 5138 medium_rotation_rate)) { 5139 softc->disk->d_rotation_rate = 5140 scsi_2btoul(bdc->medium_rotation_rate); 5141 if (softc->disk->d_rotation_rate == 5142 SVPD_BDC_RATE_NON_ROTATING) { 5143 cam_iosched_set_sort_queue( 5144 softc->cam_iosched, 0); 5145 softc->rotating = 0; 5146 } 5147 if (softc->disk->d_rotation_rate != old_rate) { 5148 disk_attr_changed(softc->disk, 5149 "GEOM::rotation_rate", M_NOWAIT); 5150 } 5151 } 5152 if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) 5153 && (softc->zone_mode == DA_ZONE_NONE)) { 5154 int ata_proto; 5155 5156 if (scsi_vpd_supported_page(periph, 5157 SVPD_ATA_INFORMATION)) 5158 ata_proto = 1; 5159 else 5160 ata_proto = 0; 5161 5162 /* 5163 * The Zoned field will only be set for 5164 * Drive Managed and Host Aware drives. If 5165 * they are Host Managed, the device type 5166 * in the standard INQUIRY data should be 5167 * set to T_ZBC_HM (0x14). 5168 */ 5169 if ((bdc->flags & SVPD_ZBC_MASK) == 5170 SVPD_HAW_ZBC) { 5171 softc->zone_mode = DA_ZONE_HOST_AWARE; 5172 softc->zone_interface = (ata_proto) ? 5173 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 5174 } else if ((bdc->flags & SVPD_ZBC_MASK) == 5175 SVPD_DM_ZBC) { 5176 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 5177 softc->zone_interface = (ata_proto) ? 5178 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 5179 } else if ((bdc->flags & SVPD_ZBC_MASK) != 5180 SVPD_ZBC_NR) { 5181 xpt_print(periph->path, "Unknown zoned " 5182 "type %#x", 5183 bdc->flags & SVPD_ZBC_MASK); 5184 } 5185 } 5186 } else { 5187 int error; 5188 error = daerror(done_ccb, CAM_RETRY_SELTO, 5189 SF_RETRY_UA|SF_NO_PRINT); 5190 if (error == ERESTART) 5191 return; 5192 else if (error != 0) { 5193 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5194 /* Don't wedge this device's queue */ 5195 cam_release_devq(done_ccb->ccb_h.path, 5196 /*relsim_flags*/0, 5197 /*reduction*/0, 5198 /*timeout*/0, 5199 /*getcount_only*/0); 5200 } 5201 } 5202 } 5203 5204 free(bdc, M_SCSIDA); 5205 softc->state = DA_STATE_PROBE_ATA; 5206 xpt_release_ccb(done_ccb); 5207 xpt_schedule(periph, priority); 5208 return; 5209 } 5210 5211 static void 5212 dadone_probeata(struct cam_periph *periph, union ccb *done_ccb) 5213 { 5214 struct ata_params *ata_params; 5215 struct ccb_scsiio *csio; 5216 struct da_softc *softc; 5217 u_int32_t priority; 5218 int continue_probe; 5219 int error; 5220 int16_t *ptr; 5221 5222 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n")); 5223 5224 softc = (struct da_softc *)periph->softc; 5225 priority = done_ccb->ccb_h.pinfo.priority; 5226 csio = &done_ccb->csio; 5227 ata_params = (struct ata_params *)csio->data_ptr; 5228 ptr = (uint16_t *)ata_params; 5229 continue_probe = 0; 5230 error = 0; 5231 5232 cam_periph_assert(periph, MA_OWNED); 5233 5234 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5235 uint16_t old_rate; 5236 5237 ata_param_fixup(ata_params); 5238 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && 5239 (softc->quirks & DA_Q_NO_UNMAP) == 0) { 5240 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); 5241 if (ata_params->max_dsm_blocks != 0) 5242 softc->trim_max_ranges = min( 5243 softc->trim_max_ranges, 5244 ata_params->max_dsm_blocks * 5245 ATA_DSM_BLK_RANGES); 5246 } 5247 /* 5248 * Disable queue sorting for non-rotational media 5249 * by default. 5250 */ 5251 old_rate = softc->disk->d_rotation_rate; 5252 softc->disk->d_rotation_rate = ata_params->media_rotation_rate; 5253 if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { 5254 cam_iosched_set_sort_queue(softc->cam_iosched, 0); 5255 softc->rotating = 0; 5256 } 5257 if (softc->disk->d_rotation_rate != old_rate) { 5258 disk_attr_changed(softc->disk, 5259 "GEOM::rotation_rate", M_NOWAIT); 5260 } 5261 5262 cam_periph_assert(periph, MA_OWNED); 5263 if (ata_params->capabilities1 & ATA_SUPPORT_DMA) 5264 softc->flags |= DA_FLAG_CAN_ATA_DMA; 5265 5266 if (ata_params->support.extension & ATA_SUPPORT_GENLOG) 5267 softc->flags |= DA_FLAG_CAN_ATA_LOG; 5268 5269 /* 5270 * At this point, if we have a SATA host aware drive, 5271 * we communicate via ATA passthrough unless the 5272 * SAT layer supports ZBC -> ZAC translation. In 5273 * that case, 5274 * 5275 * XXX KDM figure out how to detect a host managed 5276 * SATA drive. 5277 */ 5278 if (softc->zone_mode == DA_ZONE_NONE) { 5279 /* 5280 * Note that we don't override the zone 5281 * mode or interface if it has already been 5282 * set. This is because it has either been 5283 * set as a quirk, or when we probed the 5284 * SCSI Block Device Characteristics page, 5285 * the zoned field was set. The latter 5286 * means that the SAT layer supports ZBC to 5287 * ZAC translation, and we would prefer to 5288 * use that if it is available. 5289 */ 5290 if ((ata_params->support3 & 5291 ATA_SUPPORT_ZONE_MASK) == 5292 ATA_SUPPORT_ZONE_HOST_AWARE) { 5293 softc->zone_mode = DA_ZONE_HOST_AWARE; 5294 softc->zone_interface = 5295 DA_ZONE_IF_ATA_PASS; 5296 } else if ((ata_params->support3 & 5297 ATA_SUPPORT_ZONE_MASK) == 5298 ATA_SUPPORT_ZONE_DEV_MANAGED) { 5299 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 5300 softc->zone_interface = DA_ZONE_IF_ATA_PASS; 5301 } 5302 } 5303 5304 } else { 5305 error = daerror(done_ccb, CAM_RETRY_SELTO, 5306 SF_RETRY_UA|SF_NO_PRINT); 5307 if (error == ERESTART) 5308 return; 5309 else if (error != 0) { 5310 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5311 /* Don't wedge this device's queue */ 5312 cam_release_devq(done_ccb->ccb_h.path, 5313 /*relsim_flags*/0, 5314 /*reduction*/0, 5315 /*timeout*/0, 5316 /*getcount_only*/0); 5317 } 5318 } 5319 } 5320 5321 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 5322 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 5323 /* 5324 * If the ATA IDENTIFY failed, we could be talking 5325 * to a SCSI drive, although that seems unlikely, 5326 * since the drive did report that it supported the 5327 * ATA Information VPD page. If the ATA IDENTIFY 5328 * succeeded, and the SAT layer doesn't support 5329 * ZBC -> ZAC translation, continue on to get the 5330 * directory of ATA logs, and complete the rest of 5331 * the ZAC probe. If the SAT layer does support 5332 * ZBC -> ZAC translation, we want to use that, 5333 * and we'll probe the SCSI Zoned Block Device 5334 * Characteristics VPD page next. 5335 */ 5336 if ((error == 0) 5337 && (softc->flags & DA_FLAG_CAN_ATA_LOG) 5338 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) 5339 softc->state = DA_STATE_PROBE_ATA_LOGDIR; 5340 else 5341 softc->state = DA_STATE_PROBE_ZONE; 5342 continue_probe = 1; 5343 } 5344 if (continue_probe != 0) { 5345 xpt_schedule(periph, priority); 5346 xpt_release_ccb(done_ccb); 5347 return; 5348 } else 5349 daprobedone(periph, done_ccb); 5350 return; 5351 } 5352 5353 static void 5354 dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb) 5355 { 5356 struct da_softc *softc; 5357 struct ccb_scsiio *csio; 5358 u_int32_t priority; 5359 int error; 5360 5361 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n")); 5362 5363 softc = (struct da_softc *)periph->softc; 5364 priority = done_ccb->ccb_h.pinfo.priority; 5365 csio = &done_ccb->csio; 5366 5367 cam_periph_assert(periph, MA_OWNED); 5368 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5369 error = 0; 5370 softc->valid_logdir_len = 0; 5371 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); 5372 softc->valid_logdir_len = csio->dxfer_len - csio->resid; 5373 if (softc->valid_logdir_len > 0) 5374 bcopy(csio->data_ptr, &softc->ata_logdir, 5375 min(softc->valid_logdir_len, 5376 sizeof(softc->ata_logdir))); 5377 /* 5378 * Figure out whether the Identify Device log is 5379 * supported. The General Purpose log directory 5380 * has a header, and lists the number of pages 5381 * available for each GP log identified by the 5382 * offset into the list. 5383 */ 5384 if ((softc->valid_logdir_len >= 5385 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) 5386 && (le16dec(softc->ata_logdir.header) == 5387 ATA_GP_LOG_DIR_VERSION) 5388 && (le16dec(&softc->ata_logdir.num_pages[ 5389 (ATA_IDENTIFY_DATA_LOG * 5390 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ 5391 softc->flags |= DA_FLAG_CAN_ATA_IDLOG; 5392 } else { 5393 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 5394 } 5395 } else { 5396 error = daerror(done_ccb, CAM_RETRY_SELTO, 5397 SF_RETRY_UA|SF_NO_PRINT); 5398 if (error == ERESTART) 5399 return; 5400 else if (error != 0) { 5401 /* 5402 * If we can't get the ATA log directory, 5403 * then ATA logs are effectively not 5404 * supported even if the bit is set in the 5405 * identify data. 5406 */ 5407 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | 5408 DA_FLAG_CAN_ATA_IDLOG); 5409 if ((done_ccb->ccb_h.status & 5410 CAM_DEV_QFRZN) != 0) { 5411 /* Don't wedge this device's queue */ 5412 cam_release_devq(done_ccb->ccb_h.path, 5413 /*relsim_flags*/0, 5414 /*reduction*/0, 5415 /*timeout*/0, 5416 /*getcount_only*/0); 5417 } 5418 } 5419 } 5420 5421 free(csio->data_ptr, M_SCSIDA); 5422 5423 if ((error == 0) 5424 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { 5425 softc->state = DA_STATE_PROBE_ATA_IDDIR; 5426 xpt_release_ccb(done_ccb); 5427 xpt_schedule(periph, priority); 5428 return; 5429 } 5430 daprobedone(periph, done_ccb); 5431 return; 5432 } 5433 5434 static void 5435 dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb) 5436 { 5437 struct da_softc *softc; 5438 struct ccb_scsiio *csio; 5439 u_int32_t priority; 5440 int error; 5441 5442 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n")); 5443 5444 softc = (struct da_softc *)periph->softc; 5445 priority = done_ccb->ccb_h.pinfo.priority; 5446 csio = &done_ccb->csio; 5447 5448 cam_periph_assert(periph, MA_OWNED); 5449 5450 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5451 off_t entries_offset, max_entries; 5452 error = 0; 5453 5454 softc->valid_iddir_len = 0; 5455 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); 5456 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | 5457 DA_FLAG_CAN_ATA_ZONE); 5458 softc->valid_iddir_len = csio->dxfer_len - csio->resid; 5459 if (softc->valid_iddir_len > 0) 5460 bcopy(csio->data_ptr, &softc->ata_iddir, 5461 min(softc->valid_iddir_len, 5462 sizeof(softc->ata_iddir))); 5463 5464 entries_offset = 5465 __offsetof(struct ata_identify_log_pages,entries); 5466 max_entries = softc->valid_iddir_len - entries_offset; 5467 if ((softc->valid_iddir_len > (entries_offset + 1)) 5468 && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) 5469 && (softc->ata_iddir.entry_count > 0)) { 5470 int num_entries, i; 5471 5472 num_entries = softc->ata_iddir.entry_count; 5473 num_entries = min(num_entries, 5474 softc->valid_iddir_len - entries_offset); 5475 for (i = 0; i < num_entries && i < max_entries; i++) { 5476 if (softc->ata_iddir.entries[i] == 5477 ATA_IDL_SUP_CAP) 5478 softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; 5479 else if (softc->ata_iddir.entries[i] == 5480 ATA_IDL_ZDI) 5481 softc->flags |= DA_FLAG_CAN_ATA_ZONE; 5482 5483 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) 5484 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) 5485 break; 5486 } 5487 } 5488 } else { 5489 error = daerror(done_ccb, CAM_RETRY_SELTO, 5490 SF_RETRY_UA|SF_NO_PRINT); 5491 if (error == ERESTART) 5492 return; 5493 else if (error != 0) { 5494 /* 5495 * If we can't get the ATA Identify Data log 5496 * directory, then it effectively isn't 5497 * supported even if the ATA Log directory 5498 * a non-zero number of pages present for 5499 * this log. 5500 */ 5501 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 5502 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5503 /* Don't wedge this device's queue */ 5504 cam_release_devq(done_ccb->ccb_h.path, 5505 /*relsim_flags*/0, 5506 /*reduction*/0, 5507 /*timeout*/0, 5508 /*getcount_only*/0); 5509 } 5510 } 5511 } 5512 5513 free(csio->data_ptr, M_SCSIDA); 5514 5515 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { 5516 softc->state = DA_STATE_PROBE_ATA_SUP; 5517 xpt_release_ccb(done_ccb); 5518 xpt_schedule(periph, priority); 5519 return; 5520 } 5521 daprobedone(periph, done_ccb); 5522 return; 5523 } 5524 5525 static void 5526 dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb) 5527 { 5528 struct da_softc *softc; 5529 struct ccb_scsiio *csio; 5530 u_int32_t priority; 5531 int error; 5532 5533 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n")); 5534 5535 softc = (struct da_softc *)periph->softc; 5536 priority = done_ccb->ccb_h.pinfo.priority; 5537 csio = &done_ccb->csio; 5538 5539 cam_periph_assert(periph, MA_OWNED); 5540 5541 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5542 uint32_t valid_len; 5543 size_t needed_size; 5544 struct ata_identify_log_sup_cap *sup_cap; 5545 error = 0; 5546 5547 sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr; 5548 valid_len = csio->dxfer_len - csio->resid; 5549 needed_size = __offsetof(struct ata_identify_log_sup_cap, 5550 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); 5551 if (valid_len >= needed_size) { 5552 uint64_t zoned, zac_cap; 5553 5554 zoned = le64dec(sup_cap->zoned_cap); 5555 if (zoned & ATA_ZONED_VALID) { 5556 /* 5557 * This should have already been 5558 * set, because this is also in the 5559 * ATA identify data. 5560 */ 5561 if ((zoned & ATA_ZONED_MASK) == 5562 ATA_SUPPORT_ZONE_HOST_AWARE) 5563 softc->zone_mode = DA_ZONE_HOST_AWARE; 5564 else if ((zoned & ATA_ZONED_MASK) == 5565 ATA_SUPPORT_ZONE_DEV_MANAGED) 5566 softc->zone_mode = 5567 DA_ZONE_DRIVE_MANAGED; 5568 } 5569 5570 zac_cap = le64dec(sup_cap->sup_zac_cap); 5571 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { 5572 if (zac_cap & ATA_REPORT_ZONES_SUP) 5573 softc->zone_flags |= 5574 DA_ZONE_FLAG_RZ_SUP; 5575 if (zac_cap & ATA_ND_OPEN_ZONE_SUP) 5576 softc->zone_flags |= 5577 DA_ZONE_FLAG_OPEN_SUP; 5578 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) 5579 softc->zone_flags |= 5580 DA_ZONE_FLAG_CLOSE_SUP; 5581 if (zac_cap & ATA_ND_FINISH_ZONE_SUP) 5582 softc->zone_flags |= 5583 DA_ZONE_FLAG_FINISH_SUP; 5584 if (zac_cap & ATA_ND_RWP_SUP) 5585 softc->zone_flags |= 5586 DA_ZONE_FLAG_RWP_SUP; 5587 } else { 5588 /* 5589 * This field was introduced in 5590 * ACS-4, r08 on April 28th, 2015. 5591 * If the drive firmware was written 5592 * to an earlier spec, it won't have 5593 * the field. So, assume all 5594 * commands are supported. 5595 */ 5596 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; 5597 } 5598 } 5599 } else { 5600 error = daerror(done_ccb, CAM_RETRY_SELTO, 5601 SF_RETRY_UA|SF_NO_PRINT); 5602 if (error == ERESTART) 5603 return; 5604 else if (error != 0) { 5605 /* 5606 * If we can't get the ATA Identify Data 5607 * Supported Capabilities page, clear the 5608 * flag... 5609 */ 5610 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; 5611 /* 5612 * And clear zone capabilities. 5613 */ 5614 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; 5615 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5616 /* Don't wedge this device's queue */ 5617 cam_release_devq(done_ccb->ccb_h.path, 5618 /*relsim_flags*/0, 5619 /*reduction*/0, 5620 /*timeout*/0, 5621 /*getcount_only*/0); 5622 } 5623 } 5624 } 5625 5626 free(csio->data_ptr, M_SCSIDA); 5627 5628 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { 5629 softc->state = DA_STATE_PROBE_ATA_ZONE; 5630 xpt_release_ccb(done_ccb); 5631 xpt_schedule(periph, priority); 5632 return; 5633 } 5634 daprobedone(periph, done_ccb); 5635 return; 5636 } 5637 5638 static void 5639 dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb) 5640 { 5641 struct da_softc *softc; 5642 struct ccb_scsiio *csio; 5643 int error; 5644 5645 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n")); 5646 5647 softc = (struct da_softc *)periph->softc; 5648 csio = &done_ccb->csio; 5649 5650 cam_periph_assert(periph, MA_OWNED); 5651 5652 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5653 struct ata_zoned_info_log *zi_log; 5654 uint32_t valid_len; 5655 size_t needed_size; 5656 5657 zi_log = (struct ata_zoned_info_log *)csio->data_ptr; 5658 5659 valid_len = csio->dxfer_len - csio->resid; 5660 needed_size = __offsetof(struct ata_zoned_info_log, 5661 version_info) + 1 + sizeof(zi_log->version_info); 5662 if (valid_len >= needed_size) { 5663 uint64_t tmpvar; 5664 5665 tmpvar = le64dec(zi_log->zoned_cap); 5666 if (tmpvar & ATA_ZDI_CAP_VALID) { 5667 if (tmpvar & ATA_ZDI_CAP_URSWRZ) 5668 softc->zone_flags |= 5669 DA_ZONE_FLAG_URSWRZ; 5670 else 5671 softc->zone_flags &= 5672 ~DA_ZONE_FLAG_URSWRZ; 5673 } 5674 tmpvar = le64dec(zi_log->optimal_seq_zones); 5675 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { 5676 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; 5677 softc->optimal_seq_zones = (tmpvar & 5678 ATA_ZDI_OPT_SEQ_MASK); 5679 } else { 5680 softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; 5681 softc->optimal_seq_zones = 0; 5682 } 5683 5684 tmpvar =le64dec(zi_log->optimal_nonseq_zones); 5685 if (tmpvar & ATA_ZDI_OPT_NS_VALID) { 5686 softc->zone_flags |= 5687 DA_ZONE_FLAG_OPT_NONSEQ_SET; 5688 softc->optimal_nonseq_zones = 5689 (tmpvar & ATA_ZDI_OPT_NS_MASK); 5690 } else { 5691 softc->zone_flags &= 5692 ~DA_ZONE_FLAG_OPT_NONSEQ_SET; 5693 softc->optimal_nonseq_zones = 0; 5694 } 5695 5696 tmpvar = le64dec(zi_log->max_seq_req_zones); 5697 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { 5698 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; 5699 softc->max_seq_zones = 5700 (tmpvar & ATA_ZDI_MAX_SEQ_MASK); 5701 } else { 5702 softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; 5703 softc->max_seq_zones = 0; 5704 } 5705 } 5706 } else { 5707 error = daerror(done_ccb, CAM_RETRY_SELTO, 5708 SF_RETRY_UA|SF_NO_PRINT); 5709 if (error == ERESTART) 5710 return; 5711 else if (error != 0) { 5712 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; 5713 softc->flags &= ~DA_ZONE_FLAG_SET_MASK; 5714 5715 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5716 /* Don't wedge this device's queue */ 5717 cam_release_devq(done_ccb->ccb_h.path, 5718 /*relsim_flags*/0, 5719 /*reduction*/0, 5720 /*timeout*/0, 5721 /*getcount_only*/0); 5722 } 5723 } 5724 5725 } 5726 5727 free(csio->data_ptr, M_SCSIDA); 5728 5729 daprobedone(periph, done_ccb); 5730 return; 5731 } 5732 5733 static void 5734 dadone_probezone(struct cam_periph *periph, union ccb *done_ccb) 5735 { 5736 struct da_softc *softc; 5737 struct ccb_scsiio *csio; 5738 int error; 5739 5740 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n")); 5741 5742 softc = (struct da_softc *)periph->softc; 5743 csio = &done_ccb->csio; 5744 5745 cam_periph_assert(periph, MA_OWNED); 5746 5747 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5748 uint32_t valid_len; 5749 size_t needed_len; 5750 struct scsi_vpd_zoned_bdc *zoned_bdc; 5751 5752 error = 0; 5753 zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr; 5754 valid_len = csio->dxfer_len - csio->resid; 5755 needed_len = __offsetof(struct scsi_vpd_zoned_bdc, 5756 max_seq_req_zones) + 1 + 5757 sizeof(zoned_bdc->max_seq_req_zones); 5758 if ((valid_len >= needed_len) 5759 && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { 5760 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) 5761 softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; 5762 else 5763 softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; 5764 softc->optimal_seq_zones = 5765 scsi_4btoul(zoned_bdc->optimal_seq_zones); 5766 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; 5767 softc->optimal_nonseq_zones = scsi_4btoul( 5768 zoned_bdc->optimal_nonseq_zones); 5769 softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; 5770 softc->max_seq_zones = 5771 scsi_4btoul(zoned_bdc->max_seq_req_zones); 5772 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; 5773 } 5774 /* 5775 * All of the zone commands are mandatory for SCSI 5776 * devices. 5777 * 5778 * XXX KDM this is valid as of September 2015. 5779 * Re-check this assumption once the SAT spec is 5780 * updated to support SCSI ZBC to ATA ZAC mapping. 5781 * Since ATA allows zone commands to be reported 5782 * as supported or not, this may not necessarily 5783 * be true for an ATA device behind a SAT (SCSI to 5784 * ATA Translation) layer. 5785 */ 5786 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; 5787 } else { 5788 error = daerror(done_ccb, CAM_RETRY_SELTO, 5789 SF_RETRY_UA|SF_NO_PRINT); 5790 if (error == ERESTART) 5791 return; 5792 else if (error != 0) { 5793 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5794 /* Don't wedge this device's queue */ 5795 cam_release_devq(done_ccb->ccb_h.path, 5796 /*relsim_flags*/0, 5797 /*reduction*/0, 5798 /*timeout*/0, 5799 /*getcount_only*/0); 5800 } 5801 } 5802 } 5803 5804 free(csio->data_ptr, M_SCSIDA); 5805 5806 daprobedone(periph, done_ccb); 5807 return; 5808 } 5809 5810 static void 5811 dadone_tur(struct cam_periph *periph, union ccb *done_ccb) 5812 { 5813 struct da_softc *softc; 5814 struct ccb_scsiio *csio; 5815 5816 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n")); 5817 5818 softc = (struct da_softc *)periph->softc; 5819 csio = &done_ccb->csio; 5820 5821 cam_periph_assert(periph, MA_OWNED); 5822 5823 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5824 5825 if (daerror(done_ccb, CAM_RETRY_SELTO, 5826 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) 5827 return; /* Will complete again, keep reference */ 5828 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5829 cam_release_devq(done_ccb->ccb_h.path, 5830 /*relsim_flags*/0, 5831 /*reduction*/0, 5832 /*timeout*/0, 5833 /*getcount_only*/0); 5834 } 5835 softc->flags &= ~DA_FLAG_TUR_PENDING; 5836 xpt_release_ccb(done_ccb); 5837 da_periph_release_locked(periph, DA_REF_TUR); 5838 return; 5839 } 5840 5841 static void 5842 dareprobe(struct cam_periph *periph) 5843 { 5844 struct da_softc *softc; 5845 int status; 5846 5847 softc = (struct da_softc *)periph->softc; 5848 5849 cam_periph_assert(periph, MA_OWNED); 5850 5851 /* Probe in progress; don't interfere. */ 5852 if (softc->state != DA_STATE_NORMAL) 5853 return; 5854 5855 status = da_periph_acquire(periph, DA_REF_REPROBE); 5856 KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed")); 5857 5858 softc->state = DA_STATE_PROBE_WP; 5859 xpt_schedule(periph, CAM_PRIORITY_DEV); 5860 } 5861 5862 static int 5863 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 5864 { 5865 struct da_softc *softc; 5866 struct cam_periph *periph; 5867 int error, error_code, sense_key, asc, ascq; 5868 5869 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5870 if (ccb->csio.bio != NULL) 5871 biotrack(ccb->csio.bio, __func__); 5872 #endif 5873 5874 periph = xpt_path_periph(ccb->ccb_h.path); 5875 softc = (struct da_softc *)periph->softc; 5876 5877 cam_periph_assert(periph, MA_OWNED); 5878 5879 /* 5880 * Automatically detect devices that do not support 5881 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. 5882 */ 5883 error = 0; 5884 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { 5885 error = cmd6workaround(ccb); 5886 } else if (scsi_extract_sense_ccb(ccb, 5887 &error_code, &sense_key, &asc, &ascq)) { 5888 if (sense_key == SSD_KEY_ILLEGAL_REQUEST) 5889 error = cmd6workaround(ccb); 5890 /* 5891 * If the target replied with CAPACITY DATA HAS CHANGED UA, 5892 * query the capacity and notify upper layers. 5893 */ 5894 else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5895 asc == 0x2A && ascq == 0x09) { 5896 xpt_print(periph->path, "Capacity data has changed\n"); 5897 softc->flags &= ~DA_FLAG_PROBED; 5898 dareprobe(periph); 5899 sense_flags |= SF_NO_PRINT; 5900 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5901 asc == 0x28 && ascq == 0x00) { 5902 softc->flags &= ~DA_FLAG_PROBED; 5903 disk_media_changed(softc->disk, M_NOWAIT); 5904 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5905 asc == 0x3F && ascq == 0x03) { 5906 xpt_print(periph->path, "INQUIRY data has changed\n"); 5907 softc->flags &= ~DA_FLAG_PROBED; 5908 dareprobe(periph); 5909 sense_flags |= SF_NO_PRINT; 5910 } else if (sense_key == SSD_KEY_NOT_READY && 5911 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 5912 softc->flags |= DA_FLAG_PACK_INVALID; 5913 disk_media_gone(softc->disk, M_NOWAIT); 5914 } 5915 } 5916 if (error == ERESTART) 5917 return (ERESTART); 5918 5919 #ifdef CAM_IO_STATS 5920 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 5921 case CAM_CMD_TIMEOUT: 5922 softc->timeouts++; 5923 break; 5924 case CAM_REQ_ABORTED: 5925 case CAM_REQ_CMP_ERR: 5926 case CAM_REQ_TERMIO: 5927 case CAM_UNREC_HBA_ERROR: 5928 case CAM_DATA_RUN_ERR: 5929 softc->errors++; 5930 break; 5931 default: 5932 break; 5933 } 5934 #endif 5935 5936 /* 5937 * XXX 5938 * Until we have a better way of doing pack validation, 5939 * don't treat UAs as errors. 5940 */ 5941 sense_flags |= SF_RETRY_UA; 5942 5943 if (softc->quirks & DA_Q_RETRY_BUSY) 5944 sense_flags |= SF_RETRY_BUSY; 5945 return(cam_periph_error(ccb, cam_flags, sense_flags)); 5946 } 5947 5948 static void 5949 damediapoll(void *arg) 5950 { 5951 struct cam_periph *periph = arg; 5952 struct da_softc *softc = periph->softc; 5953 5954 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && 5955 (softc->flags & DA_FLAG_TUR_PENDING) == 0 && 5956 softc->state == DA_STATE_NORMAL && 5957 LIST_EMPTY(&softc->pending_ccbs)) { 5958 if (da_periph_acquire(periph, DA_REF_TUR) == 0) { 5959 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 5960 daschedule(periph); 5961 } 5962 } 5963 /* Queue us up again */ 5964 if (da_poll_period != 0) 5965 callout_schedule(&softc->mediapoll_c, da_poll_period * hz); 5966 } 5967 5968 static void 5969 daprevent(struct cam_periph *periph, int action) 5970 { 5971 struct da_softc *softc; 5972 union ccb *ccb; 5973 int error; 5974 5975 cam_periph_assert(periph, MA_OWNED); 5976 softc = (struct da_softc *)periph->softc; 5977 5978 if (((action == PR_ALLOW) 5979 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 5980 || ((action == PR_PREVENT) 5981 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 5982 return; 5983 } 5984 5985 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 5986 5987 scsi_prevent(&ccb->csio, 5988 /*retries*/1, 5989 /*cbcfp*/NULL, 5990 MSG_SIMPLE_Q_TAG, 5991 action, 5992 SSD_FULL_SIZE, 5993 5000); 5994 5995 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, 5996 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); 5997 5998 if (error == 0) { 5999 if (action == PR_ALLOW) 6000 softc->flags &= ~DA_FLAG_PACK_LOCKED; 6001 else 6002 softc->flags |= DA_FLAG_PACK_LOCKED; 6003 } 6004 6005 xpt_release_ccb(ccb); 6006 } 6007 6008 static void 6009 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, 6010 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) 6011 { 6012 struct ccb_calc_geometry ccg; 6013 struct da_softc *softc; 6014 struct disk_params *dp; 6015 u_int lbppbe, lalba; 6016 int error; 6017 6018 softc = (struct da_softc *)periph->softc; 6019 6020 dp = &softc->params; 6021 dp->secsize = block_len; 6022 dp->sectors = maxsector + 1; 6023 if (rcaplong != NULL) { 6024 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; 6025 lalba = scsi_2btoul(rcaplong->lalba_lbp); 6026 lalba &= SRC16_LALBA_A; 6027 if (rcaplong->prot & SRC16_PROT_EN) 6028 softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >> 6029 SRC16_P_TYPE_SHIFT) + 1; 6030 else 6031 softc->p_type = 0; 6032 } else { 6033 lbppbe = 0; 6034 lalba = 0; 6035 softc->p_type = 0; 6036 } 6037 6038 if (lbppbe > 0) { 6039 dp->stripesize = block_len << lbppbe; 6040 dp->stripeoffset = (dp->stripesize - block_len * lalba) % 6041 dp->stripesize; 6042 } else if (softc->quirks & DA_Q_4K) { 6043 dp->stripesize = 4096; 6044 dp->stripeoffset = 0; 6045 } else if (softc->unmap_gran != 0) { 6046 dp->stripesize = block_len * softc->unmap_gran; 6047 dp->stripeoffset = (dp->stripesize - block_len * 6048 softc->unmap_gran_align) % dp->stripesize; 6049 } else { 6050 dp->stripesize = 0; 6051 dp->stripeoffset = 0; 6052 } 6053 /* 6054 * Have the controller provide us with a geometry 6055 * for this disk. The only time the geometry 6056 * matters is when we boot and the controller 6057 * is the only one knowledgeable enough to come 6058 * up with something that will make this a bootable 6059 * device. 6060 */ 6061 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 6062 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 6063 ccg.block_size = dp->secsize; 6064 ccg.volume_size = dp->sectors; 6065 ccg.heads = 0; 6066 ccg.secs_per_track = 0; 6067 ccg.cylinders = 0; 6068 xpt_action((union ccb*)&ccg); 6069 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 6070 /* 6071 * We don't know what went wrong here- but just pick 6072 * a geometry so we don't have nasty things like divide 6073 * by zero. 6074 */ 6075 dp->heads = 255; 6076 dp->secs_per_track = 255; 6077 dp->cylinders = dp->sectors / (255 * 255); 6078 if (dp->cylinders == 0) { 6079 dp->cylinders = 1; 6080 } 6081 } else { 6082 dp->heads = ccg.heads; 6083 dp->secs_per_track = ccg.secs_per_track; 6084 dp->cylinders = ccg.cylinders; 6085 } 6086 6087 /* 6088 * If the user supplied a read capacity buffer, and if it is 6089 * different than the previous buffer, update the data in the EDT. 6090 * If it's the same, we don't bother. This avoids sending an 6091 * update every time someone opens this device. 6092 */ 6093 if ((rcaplong != NULL) 6094 && (bcmp(rcaplong, &softc->rcaplong, 6095 min(sizeof(softc->rcaplong), rcap_len)) != 0)) { 6096 struct ccb_dev_advinfo cdai; 6097 6098 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 6099 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 6100 cdai.buftype = CDAI_TYPE_RCAPLONG; 6101 cdai.flags = CDAI_FLAG_STORE; 6102 cdai.bufsiz = rcap_len; 6103 cdai.buf = (uint8_t *)rcaplong; 6104 xpt_action((union ccb *)&cdai); 6105 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 6106 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 6107 if (cdai.ccb_h.status != CAM_REQ_CMP) { 6108 xpt_print(periph->path, "%s: failed to set read " 6109 "capacity advinfo\n", __func__); 6110 /* Use cam_error_print() to decode the status */ 6111 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, 6112 CAM_EPF_ALL); 6113 } else { 6114 bcopy(rcaplong, &softc->rcaplong, 6115 min(sizeof(softc->rcaplong), rcap_len)); 6116 } 6117 } 6118 6119 softc->disk->d_sectorsize = softc->params.secsize; 6120 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; 6121 softc->disk->d_stripesize = softc->params.stripesize; 6122 softc->disk->d_stripeoffset = softc->params.stripeoffset; 6123 /* XXX: these are not actually "firmware" values, so they may be wrong */ 6124 softc->disk->d_fwsectors = softc->params.secs_per_track; 6125 softc->disk->d_fwheads = softc->params.heads; 6126 softc->disk->d_devstat->block_size = softc->params.secsize; 6127 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; 6128 6129 error = disk_resize(softc->disk, M_NOWAIT); 6130 if (error != 0) 6131 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); 6132 } 6133 6134 static void 6135 dasendorderedtag(void *arg) 6136 { 6137 struct cam_periph *periph = arg; 6138 struct da_softc *softc = periph->softc; 6139 6140 cam_periph_assert(periph, MA_OWNED); 6141 if (da_send_ordered) { 6142 if (!LIST_EMPTY(&softc->pending_ccbs)) { 6143 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) 6144 softc->flags |= DA_FLAG_NEED_OTAG; 6145 softc->flags &= ~DA_FLAG_WAS_OTAG; 6146 } 6147 } 6148 6149 /* Queue us up again */ 6150 callout_reset(&softc->sendordered_c, 6151 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 6152 dasendorderedtag, periph); 6153 } 6154 6155 /* 6156 * Step through all DA peripheral drivers, and if the device is still open, 6157 * sync the disk cache to physical media. 6158 */ 6159 static void 6160 dashutdown(void * arg, int howto) 6161 { 6162 struct cam_periph *periph; 6163 struct da_softc *softc; 6164 union ccb *ccb; 6165 int error; 6166 6167 CAM_PERIPH_FOREACH(periph, &dadriver) { 6168 softc = (struct da_softc *)periph->softc; 6169 if (SCHEDULER_STOPPED()) { 6170 /* If we paniced with the lock held, do not recurse. */ 6171 if (!cam_periph_owned(periph) && 6172 (softc->flags & DA_FLAG_OPEN)) { 6173 dadump(softc->disk, NULL, 0, 0, 0); 6174 } 6175 continue; 6176 } 6177 cam_periph_lock(periph); 6178 6179 /* 6180 * We only sync the cache if the drive is still open, and 6181 * if the drive is capable of it.. 6182 */ 6183 if (((softc->flags & DA_FLAG_OPEN) == 0) 6184 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { 6185 cam_periph_unlock(periph); 6186 continue; 6187 } 6188 6189 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 6190 scsi_synchronize_cache(&ccb->csio, 6191 /*retries*/0, 6192 /*cbfcnp*/NULL, 6193 MSG_SIMPLE_Q_TAG, 6194 /*begin_lba*/0, /* whole disk */ 6195 /*lb_count*/0, 6196 SSD_FULL_SIZE, 6197 60 * 60 * 1000); 6198 6199 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 6200 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, 6201 softc->disk->d_devstat); 6202 if (error != 0) 6203 xpt_print(periph->path, "Synchronize cache failed\n"); 6204 xpt_release_ccb(ccb); 6205 cam_periph_unlock(periph); 6206 } 6207 } 6208 6209 #else /* !_KERNEL */ 6210 6211 /* 6212 * XXX These are only left out of the kernel build to silence warnings. If, 6213 * for some reason these functions are used in the kernel, the ifdefs should 6214 * be moved so they are included both in the kernel and userland. 6215 */ 6216 void 6217 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 6218 void (*cbfcnp)(struct cam_periph *, union ccb *), 6219 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 6220 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 6221 u_int32_t timeout) 6222 { 6223 struct scsi_format_unit *scsi_cmd; 6224 6225 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 6226 scsi_cmd->opcode = FORMAT_UNIT; 6227 scsi_cmd->byte2 = byte2; 6228 scsi_ulto2b(ileave, scsi_cmd->interleave); 6229 6230 cam_fill_csio(csio, 6231 retries, 6232 cbfcnp, 6233 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6234 tag_action, 6235 data_ptr, 6236 dxfer_len, 6237 sense_len, 6238 sizeof(*scsi_cmd), 6239 timeout); 6240 } 6241 6242 void 6243 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, 6244 void (*cbfcnp)(struct cam_periph *, union ccb *), 6245 uint8_t tag_action, uint8_t list_format, 6246 uint32_t addr_desc_index, uint8_t *data_ptr, 6247 uint32_t dxfer_len, int minimum_cmd_size, 6248 uint8_t sense_len, uint32_t timeout) 6249 { 6250 uint8_t cdb_len; 6251 6252 /* 6253 * These conditions allow using the 10 byte command. Otherwise we 6254 * need to use the 12 byte command. 6255 */ 6256 if ((minimum_cmd_size <= 10) 6257 && (addr_desc_index == 0) 6258 && (dxfer_len <= SRDD10_MAX_LENGTH)) { 6259 struct scsi_read_defect_data_10 *cdb10; 6260 6261 cdb10 = (struct scsi_read_defect_data_10 *) 6262 &csio->cdb_io.cdb_bytes; 6263 6264 cdb_len = sizeof(*cdb10); 6265 bzero(cdb10, cdb_len); 6266 cdb10->opcode = READ_DEFECT_DATA_10; 6267 cdb10->format = list_format; 6268 scsi_ulto2b(dxfer_len, cdb10->alloc_length); 6269 } else { 6270 struct scsi_read_defect_data_12 *cdb12; 6271 6272 cdb12 = (struct scsi_read_defect_data_12 *) 6273 &csio->cdb_io.cdb_bytes; 6274 6275 cdb_len = sizeof(*cdb12); 6276 bzero(cdb12, cdb_len); 6277 cdb12->opcode = READ_DEFECT_DATA_12; 6278 cdb12->format = list_format; 6279 scsi_ulto4b(dxfer_len, cdb12->alloc_length); 6280 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); 6281 } 6282 6283 cam_fill_csio(csio, 6284 retries, 6285 cbfcnp, 6286 /*flags*/ CAM_DIR_IN, 6287 tag_action, 6288 data_ptr, 6289 dxfer_len, 6290 sense_len, 6291 cdb_len, 6292 timeout); 6293 } 6294 6295 void 6296 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, 6297 void (*cbfcnp)(struct cam_periph *, union ccb *), 6298 u_int8_t tag_action, u_int8_t byte2, u_int16_t control, 6299 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 6300 u_int32_t timeout) 6301 { 6302 struct scsi_sanitize *scsi_cmd; 6303 6304 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; 6305 scsi_cmd->opcode = SANITIZE; 6306 scsi_cmd->byte2 = byte2; 6307 scsi_cmd->control = control; 6308 scsi_ulto2b(dxfer_len, scsi_cmd->length); 6309 6310 cam_fill_csio(csio, 6311 retries, 6312 cbfcnp, 6313 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6314 tag_action, 6315 data_ptr, 6316 dxfer_len, 6317 sense_len, 6318 sizeof(*scsi_cmd), 6319 timeout); 6320 } 6321 6322 #endif /* _KERNEL */ 6323 6324 void 6325 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, 6326 void (*cbfcnp)(struct cam_periph *, union ccb *), 6327 uint8_t tag_action, uint8_t service_action, uint64_t zone_id, 6328 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, 6329 uint8_t sense_len, uint32_t timeout) 6330 { 6331 struct scsi_zbc_out *scsi_cmd; 6332 6333 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; 6334 scsi_cmd->opcode = ZBC_OUT; 6335 scsi_cmd->service_action = service_action; 6336 scsi_u64to8b(zone_id, scsi_cmd->zone_id); 6337 scsi_cmd->zone_flags = zone_flags; 6338 6339 cam_fill_csio(csio, 6340 retries, 6341 cbfcnp, 6342 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6343 tag_action, 6344 data_ptr, 6345 dxfer_len, 6346 sense_len, 6347 sizeof(*scsi_cmd), 6348 timeout); 6349 } 6350 6351 void 6352 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, 6353 void (*cbfcnp)(struct cam_periph *, union ccb *), 6354 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, 6355 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, 6356 uint8_t sense_len, uint32_t timeout) 6357 { 6358 struct scsi_zbc_in *scsi_cmd; 6359 6360 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; 6361 scsi_cmd->opcode = ZBC_IN; 6362 scsi_cmd->service_action = service_action; 6363 scsi_ulto4b(dxfer_len, scsi_cmd->length); 6364 scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); 6365 scsi_cmd->zone_options = zone_options; 6366 6367 cam_fill_csio(csio, 6368 retries, 6369 cbfcnp, 6370 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, 6371 tag_action, 6372 data_ptr, 6373 dxfer_len, 6374 sense_len, 6375 sizeof(*scsi_cmd), 6376 timeout); 6377 6378 } 6379 6380 int 6381 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, 6382 void (*cbfcnp)(struct cam_periph *, union ccb *), 6383 uint8_t tag_action, int use_ncq, 6384 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 6385 uint8_t *data_ptr, uint32_t dxfer_len, 6386 uint8_t *cdb_storage, size_t cdb_storage_len, 6387 uint8_t sense_len, uint32_t timeout) 6388 { 6389 uint8_t command_out, protocol, ata_flags; 6390 uint16_t features_out; 6391 uint32_t sectors_out, auxiliary; 6392 int retval; 6393 6394 retval = 0; 6395 6396 if (use_ncq == 0) { 6397 command_out = ATA_ZAC_MANAGEMENT_OUT; 6398 features_out = (zm_action & 0xf) | (zone_flags << 8); 6399 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 6400 if (dxfer_len == 0) { 6401 protocol = AP_PROTO_NON_DATA; 6402 ata_flags |= AP_FLAG_TLEN_NO_DATA; 6403 sectors_out = 0; 6404 } else { 6405 protocol = AP_PROTO_DMA; 6406 ata_flags |= AP_FLAG_TLEN_SECT_CNT | 6407 AP_FLAG_TDIR_TO_DEV; 6408 sectors_out = ((dxfer_len >> 9) & 0xffff); 6409 } 6410 auxiliary = 0; 6411 } else { 6412 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 6413 if (dxfer_len == 0) { 6414 command_out = ATA_NCQ_NON_DATA; 6415 features_out = ATA_NCQ_ZAC_MGMT_OUT; 6416 /* 6417 * We're assuming the SCSI to ATA translation layer 6418 * will set the NCQ tag number in the tag field. 6419 * That isn't clear from the SAT-4 spec (as of rev 05). 6420 */ 6421 sectors_out = 0; 6422 ata_flags |= AP_FLAG_TLEN_NO_DATA; 6423 } else { 6424 command_out = ATA_SEND_FPDMA_QUEUED; 6425 /* 6426 * Note that we're defaulting to normal priority, 6427 * and assuming that the SCSI to ATA translation 6428 * layer will insert the NCQ tag number in the tag 6429 * field. That isn't clear in the SAT-4 spec (as 6430 * of rev 05). 6431 */ 6432 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; 6433 6434 ata_flags |= AP_FLAG_TLEN_FEAT | 6435 AP_FLAG_TDIR_TO_DEV; 6436 6437 /* 6438 * For SEND FPDMA QUEUED, the transfer length is 6439 * encoded in the FEATURE register, and 0 means 6440 * that 65536 512 byte blocks are to be tranferred. 6441 * In practice, it seems unlikely that we'll see 6442 * a transfer that large, and it may confuse the 6443 * the SAT layer, because generally that means that 6444 * 0 bytes should be transferred. 6445 */ 6446 if (dxfer_len == (65536 * 512)) { 6447 features_out = 0; 6448 } else if (dxfer_len <= (65535 * 512)) { 6449 features_out = ((dxfer_len >> 9) & 0xffff); 6450 } else { 6451 /* The transfer is too big. */ 6452 retval = 1; 6453 goto bailout; 6454 } 6455 6456 } 6457 6458 auxiliary = (zm_action & 0xf) | (zone_flags << 8); 6459 protocol = AP_PROTO_FPDMA; 6460 } 6461 6462 protocol |= AP_EXTEND; 6463 6464 retval = scsi_ata_pass(csio, 6465 retries, 6466 cbfcnp, 6467 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6468 tag_action, 6469 /*protocol*/ protocol, 6470 /*ata_flags*/ ata_flags, 6471 /*features*/ features_out, 6472 /*sector_count*/ sectors_out, 6473 /*lba*/ zone_id, 6474 /*command*/ command_out, 6475 /*device*/ 0, 6476 /*icc*/ 0, 6477 /*auxiliary*/ auxiliary, 6478 /*control*/ 0, 6479 /*data_ptr*/ data_ptr, 6480 /*dxfer_len*/ dxfer_len, 6481 /*cdb_storage*/ cdb_storage, 6482 /*cdb_storage_len*/ cdb_storage_len, 6483 /*minimum_cmd_size*/ 0, 6484 /*sense_len*/ SSD_FULL_SIZE, 6485 /*timeout*/ timeout); 6486 6487 bailout: 6488 6489 return (retval); 6490 } 6491 6492 int 6493 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, 6494 void (*cbfcnp)(struct cam_periph *, union ccb *), 6495 uint8_t tag_action, int use_ncq, 6496 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 6497 uint8_t *data_ptr, uint32_t dxfer_len, 6498 uint8_t *cdb_storage, size_t cdb_storage_len, 6499 uint8_t sense_len, uint32_t timeout) 6500 { 6501 uint8_t command_out, protocol; 6502 uint16_t features_out, sectors_out; 6503 uint32_t auxiliary; 6504 int ata_flags; 6505 int retval; 6506 6507 retval = 0; 6508 ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; 6509 6510 if (use_ncq == 0) { 6511 command_out = ATA_ZAC_MANAGEMENT_IN; 6512 /* XXX KDM put a macro here */ 6513 features_out = (zm_action & 0xf) | (zone_flags << 8); 6514 sectors_out = dxfer_len >> 9; /* XXX KDM macro */ 6515 protocol = AP_PROTO_DMA; 6516 ata_flags |= AP_FLAG_TLEN_SECT_CNT; 6517 auxiliary = 0; 6518 } else { 6519 ata_flags |= AP_FLAG_TLEN_FEAT; 6520 6521 command_out = ATA_RECV_FPDMA_QUEUED; 6522 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; 6523 6524 /* 6525 * For RECEIVE FPDMA QUEUED, the transfer length is 6526 * encoded in the FEATURE register, and 0 means 6527 * that 65536 512 byte blocks are to be tranferred. 6528 * In practice, it seems unlikely that we'll see 6529 * a transfer that large, and it may confuse the 6530 * the SAT layer, because generally that means that 6531 * 0 bytes should be transferred. 6532 */ 6533 if (dxfer_len == (65536 * 512)) { 6534 features_out = 0; 6535 } else if (dxfer_len <= (65535 * 512)) { 6536 features_out = ((dxfer_len >> 9) & 0xffff); 6537 } else { 6538 /* The transfer is too big. */ 6539 retval = 1; 6540 goto bailout; 6541 } 6542 auxiliary = (zm_action & 0xf) | (zone_flags << 8), 6543 protocol = AP_PROTO_FPDMA; 6544 } 6545 6546 protocol |= AP_EXTEND; 6547 6548 retval = scsi_ata_pass(csio, 6549 retries, 6550 cbfcnp, 6551 /*flags*/ CAM_DIR_IN, 6552 tag_action, 6553 /*protocol*/ protocol, 6554 /*ata_flags*/ ata_flags, 6555 /*features*/ features_out, 6556 /*sector_count*/ sectors_out, 6557 /*lba*/ zone_id, 6558 /*command*/ command_out, 6559 /*device*/ 0, 6560 /*icc*/ 0, 6561 /*auxiliary*/ auxiliary, 6562 /*control*/ 0, 6563 /*data_ptr*/ data_ptr, 6564 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ 6565 /*cdb_storage*/ cdb_storage, 6566 /*cdb_storage_len*/ cdb_storage_len, 6567 /*minimum_cmd_size*/ 0, 6568 /*sense_len*/ SSD_FULL_SIZE, 6569 /*timeout*/ timeout); 6570 6571 bailout: 6572 return (retval); 6573 } 6574