1 /*- 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 5 * 6 * Copyright (c) 1997 Justin T. Gibbs. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification, immediately at the beginning of the file. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 36 #ifdef _KERNEL 37 #include "opt_da.h" 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/sysctl.h> 42 #include <sys/taskqueue.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/conf.h> 46 #include <sys/devicestat.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/cons.h> 50 #include <sys/endian.h> 51 #include <sys/proc.h> 52 #include <sys/sbuf.h> 53 #include <geom/geom.h> 54 #include <geom/geom_disk.h> 55 #include <machine/atomic.h> 56 #endif /* _KERNEL */ 57 58 #ifndef _KERNEL 59 #include <stdio.h> 60 #include <string.h> 61 #endif /* _KERNEL */ 62 63 #include <cam/cam.h> 64 #include <cam/cam_ccb.h> 65 #include <cam/cam_periph.h> 66 #include <cam/cam_xpt_periph.h> 67 #ifdef _KERNEL 68 #include <cam/cam_xpt_internal.h> 69 #endif /* _KERNEL */ 70 #include <cam/cam_sim.h> 71 #include <cam/cam_iosched.h> 72 73 #include <cam/scsi/scsi_message.h> 74 #include <cam/scsi/scsi_da.h> 75 76 #ifdef _KERNEL 77 /* 78 * Note that there are probe ordering dependencies here. The order isn't 79 * controlled by this enumeration, but by explicit state transitions in 80 * dastart() and dadone(). Here are some of the dependencies: 81 * 82 * 1. RC should come first, before RC16, unless there is evidence that RC16 83 * is supported. 84 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. 85 * 3. The ATA probes should go in this order: 86 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE 87 */ 88 typedef enum { 89 DA_STATE_PROBE_WP, 90 DA_STATE_PROBE_RC, 91 DA_STATE_PROBE_RC16, 92 DA_STATE_PROBE_LBP, 93 DA_STATE_PROBE_BLK_LIMITS, 94 DA_STATE_PROBE_BDC, 95 DA_STATE_PROBE_ATA, 96 DA_STATE_PROBE_ATA_LOGDIR, 97 DA_STATE_PROBE_ATA_IDDIR, 98 DA_STATE_PROBE_ATA_SUP, 99 DA_STATE_PROBE_ATA_ZONE, 100 DA_STATE_PROBE_ZONE, 101 DA_STATE_NORMAL 102 } da_state; 103 104 typedef enum { 105 DA_FLAG_PACK_INVALID = 0x000001, 106 DA_FLAG_NEW_PACK = 0x000002, 107 DA_FLAG_PACK_LOCKED = 0x000004, 108 DA_FLAG_PACK_REMOVABLE = 0x000008, 109 DA_FLAG_NEED_OTAG = 0x000020, 110 DA_FLAG_WAS_OTAG = 0x000040, 111 DA_FLAG_RETRY_UA = 0x000080, 112 DA_FLAG_OPEN = 0x000100, 113 DA_FLAG_SCTX_INIT = 0x000200, 114 DA_FLAG_CAN_RC16 = 0x000400, 115 DA_FLAG_PROBED = 0x000800, 116 DA_FLAG_DIRTY = 0x001000, 117 DA_FLAG_ANNOUNCED = 0x002000, 118 DA_FLAG_CAN_ATA_DMA = 0x004000, 119 DA_FLAG_CAN_ATA_LOG = 0x008000, 120 DA_FLAG_CAN_ATA_IDLOG = 0x010000, 121 DA_FLAG_CAN_ATA_SUPCAP = 0x020000, 122 DA_FLAG_CAN_ATA_ZONE = 0x040000, 123 DA_FLAG_TUR_PENDING = 0x080000 124 } da_flags; 125 126 typedef enum { 127 DA_Q_NONE = 0x00, 128 DA_Q_NO_SYNC_CACHE = 0x01, 129 DA_Q_NO_6_BYTE = 0x02, 130 DA_Q_NO_PREVENT = 0x04, 131 DA_Q_4K = 0x08, 132 DA_Q_NO_RC16 = 0x10, 133 DA_Q_NO_UNMAP = 0x20, 134 DA_Q_RETRY_BUSY = 0x40, 135 DA_Q_SMR_DM = 0x80, 136 DA_Q_STRICT_UNMAP = 0x100, 137 DA_Q_128KB = 0x200 138 } da_quirks; 139 140 #define DA_Q_BIT_STRING \ 141 "\020" \ 142 "\001NO_SYNC_CACHE" \ 143 "\002NO_6_BYTE" \ 144 "\003NO_PREVENT" \ 145 "\0044K" \ 146 "\005NO_RC16" \ 147 "\006NO_UNMAP" \ 148 "\007RETRY_BUSY" \ 149 "\010SMR_DM" \ 150 "\011STRICT_UNMAP" \ 151 "\012128KB" 152 153 typedef enum { 154 DA_CCB_PROBE_RC = 0x01, 155 DA_CCB_PROBE_RC16 = 0x02, 156 DA_CCB_PROBE_LBP = 0x03, 157 DA_CCB_PROBE_BLK_LIMITS = 0x04, 158 DA_CCB_PROBE_BDC = 0x05, 159 DA_CCB_PROBE_ATA = 0x06, 160 DA_CCB_BUFFER_IO = 0x07, 161 DA_CCB_DUMP = 0x0A, 162 DA_CCB_DELETE = 0x0B, 163 DA_CCB_TUR = 0x0C, 164 DA_CCB_PROBE_ZONE = 0x0D, 165 DA_CCB_PROBE_ATA_LOGDIR = 0x0E, 166 DA_CCB_PROBE_ATA_IDDIR = 0x0F, 167 DA_CCB_PROBE_ATA_SUP = 0x10, 168 DA_CCB_PROBE_ATA_ZONE = 0x11, 169 DA_CCB_PROBE_WP = 0x12, 170 DA_CCB_TYPE_MASK = 0x1F, 171 DA_CCB_RETRY_UA = 0x20 172 } da_ccb_state; 173 174 /* 175 * Order here is important for method choice 176 * 177 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to 178 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes 179 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql 180 * import taking 5mins. 181 * 182 */ 183 typedef enum { 184 DA_DELETE_NONE, 185 DA_DELETE_DISABLE, 186 DA_DELETE_ATA_TRIM, 187 DA_DELETE_UNMAP, 188 DA_DELETE_WS16, 189 DA_DELETE_WS10, 190 DA_DELETE_ZERO, 191 DA_DELETE_MIN = DA_DELETE_ATA_TRIM, 192 DA_DELETE_MAX = DA_DELETE_ZERO 193 } da_delete_methods; 194 195 /* 196 * For SCSI, host managed drives show up as a separate device type. For 197 * ATA, host managed drives also have a different device signature. 198 * XXX KDM figure out the ATA host managed signature. 199 */ 200 typedef enum { 201 DA_ZONE_NONE = 0x00, 202 DA_ZONE_DRIVE_MANAGED = 0x01, 203 DA_ZONE_HOST_AWARE = 0x02, 204 DA_ZONE_HOST_MANAGED = 0x03 205 } da_zone_mode; 206 207 /* 208 * We distinguish between these interface cases in addition to the drive type: 209 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC 210 * o ATA drive behind a SCSI translation layer that does not know about 211 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this 212 * case, we would need to share the ATA code with the ada(4) driver. 213 * o SCSI drive. 214 */ 215 typedef enum { 216 DA_ZONE_IF_SCSI, 217 DA_ZONE_IF_ATA_PASS, 218 DA_ZONE_IF_ATA_SAT, 219 } da_zone_interface; 220 221 typedef enum { 222 DA_ZONE_FLAG_RZ_SUP = 0x0001, 223 DA_ZONE_FLAG_OPEN_SUP = 0x0002, 224 DA_ZONE_FLAG_CLOSE_SUP = 0x0004, 225 DA_ZONE_FLAG_FINISH_SUP = 0x0008, 226 DA_ZONE_FLAG_RWP_SUP = 0x0010, 227 DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | 228 DA_ZONE_FLAG_OPEN_SUP | 229 DA_ZONE_FLAG_CLOSE_SUP | 230 DA_ZONE_FLAG_FINISH_SUP | 231 DA_ZONE_FLAG_RWP_SUP), 232 DA_ZONE_FLAG_URSWRZ = 0x0020, 233 DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, 234 DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, 235 DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, 236 DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | 237 DA_ZONE_FLAG_OPT_NONSEQ_SET | 238 DA_ZONE_FLAG_MAX_SEQ_SET) 239 } da_zone_flags; 240 241 static struct da_zone_desc { 242 da_zone_flags value; 243 const char *desc; 244 } da_zone_desc_table[] = { 245 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, 246 {DA_ZONE_FLAG_OPEN_SUP, "Open" }, 247 {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, 248 {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, 249 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, 250 }; 251 252 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, 253 struct bio *bp); 254 static da_delete_func_t da_delete_trim; 255 static da_delete_func_t da_delete_unmap; 256 static da_delete_func_t da_delete_ws; 257 258 static const void * da_delete_functions[] = { 259 NULL, 260 NULL, 261 da_delete_trim, 262 da_delete_unmap, 263 da_delete_ws, 264 da_delete_ws, 265 da_delete_ws 266 }; 267 268 static const char *da_delete_method_names[] = 269 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; 270 static const char *da_delete_method_desc[] = 271 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", 272 "WRITE SAME(10) with UNMAP", "ZERO" }; 273 274 /* Offsets into our private area for storing information */ 275 #define ccb_state ppriv_field0 276 #define ccb_bp ppriv_ptr1 277 278 struct disk_params { 279 u_int8_t heads; 280 u_int32_t cylinders; 281 u_int8_t secs_per_track; 282 u_int32_t secsize; /* Number of bytes/sector */ 283 u_int64_t sectors; /* total number sectors */ 284 u_int stripesize; 285 u_int stripeoffset; 286 }; 287 288 #define UNMAP_RANGE_MAX 0xffffffff 289 #define UNMAP_HEAD_SIZE 8 290 #define UNMAP_RANGE_SIZE 16 291 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ 292 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ 293 UNMAP_HEAD_SIZE) 294 295 #define WS10_MAX_BLKS 0xffff 296 #define WS16_MAX_BLKS 0xffffffff 297 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ 298 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) 299 300 #define DA_WORK_TUR (1 << 16) 301 302 typedef enum { 303 DA_REF_OPEN = 1, 304 DA_REF_OPEN_HOLD, 305 DA_REF_CLOSE_HOLD, 306 DA_REF_PROBE_HOLD, 307 DA_REF_TUR, 308 DA_REF_GEOM, 309 DA_REF_SYSCTL, 310 DA_REF_REPROBE, 311 DA_REF_MAX /* KEEP LAST */ 312 } da_ref_token; 313 314 struct da_softc { 315 struct cam_iosched_softc *cam_iosched; 316 struct bio_queue_head delete_run_queue; 317 LIST_HEAD(, ccb_hdr) pending_ccbs; 318 int refcount; /* Active xpt_action() calls */ 319 da_state state; 320 da_flags flags; 321 da_quirks quirks; 322 int minimum_cmd_size; 323 int error_inject; 324 int trim_max_ranges; 325 int delete_available; /* Delete methods possibly available */ 326 da_zone_mode zone_mode; 327 da_zone_interface zone_interface; 328 da_zone_flags zone_flags; 329 struct ata_gp_log_dir ata_logdir; 330 int valid_logdir_len; 331 struct ata_identify_log_pages ata_iddir; 332 int valid_iddir_len; 333 uint64_t optimal_seq_zones; 334 uint64_t optimal_nonseq_zones; 335 uint64_t max_seq_zones; 336 u_int maxio; 337 uint32_t unmap_max_ranges; 338 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ 339 uint32_t unmap_gran; 340 uint32_t unmap_gran_align; 341 uint64_t ws_max_blks; 342 uint64_t trim_count; 343 uint64_t trim_ranges; 344 uint64_t trim_lbas; 345 da_delete_methods delete_method_pref; 346 da_delete_methods delete_method; 347 da_delete_func_t *delete_func; 348 int unmappedio; 349 int rotating; 350 int p_type; 351 struct disk_params params; 352 struct disk *disk; 353 union ccb saved_ccb; 354 struct task sysctl_task; 355 struct sysctl_ctx_list sysctl_ctx; 356 struct sysctl_oid *sysctl_tree; 357 struct callout sendordered_c; 358 uint64_t wwpn; 359 uint8_t unmap_buf[UNMAP_BUF_SIZE]; 360 struct scsi_read_capacity_data_long rcaplong; 361 struct callout mediapoll_c; 362 int ref_flags[DA_REF_MAX]; 363 #ifdef CAM_IO_STATS 364 struct sysctl_ctx_list sysctl_stats_ctx; 365 struct sysctl_oid *sysctl_stats_tree; 366 u_int errors; 367 u_int timeouts; 368 u_int invalidations; 369 #endif 370 #define DA_ANNOUNCETMP_SZ 160 371 char announce_temp[DA_ANNOUNCETMP_SZ]; 372 #define DA_ANNOUNCE_SZ 400 373 char announcebuf[DA_ANNOUNCE_SZ]; 374 }; 375 376 #define dadeleteflag(softc, delete_method, enable) \ 377 if (enable) { \ 378 softc->delete_available |= (1 << delete_method); \ 379 } else { \ 380 softc->delete_available &= ~(1 << delete_method); \ 381 } 382 383 struct da_quirk_entry { 384 struct scsi_inquiry_pattern inq_pat; 385 da_quirks quirks; 386 }; 387 388 static const char quantum[] = "QUANTUM"; 389 static const char microp[] = "MICROP"; 390 391 static struct da_quirk_entry da_quirk_table[] = 392 { 393 /* SPI, FC devices */ 394 { 395 /* 396 * Fujitsu M2513A MO drives. 397 * Tested devices: M2513A2 firmware versions 1200 & 1300. 398 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 399 * Reported by: W.Scholten <whs@xs4all.nl> 400 */ 401 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 402 /*quirks*/ DA_Q_NO_SYNC_CACHE 403 }, 404 { 405 /* See above. */ 406 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 407 /*quirks*/ DA_Q_NO_SYNC_CACHE 408 }, 409 { 410 /* 411 * This particular Fujitsu drive doesn't like the 412 * synchronize cache command. 413 * Reported by: Tom Jackson <toj@gorilla.net> 414 */ 415 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 416 /*quirks*/ DA_Q_NO_SYNC_CACHE 417 }, 418 { 419 /* 420 * This drive doesn't like the synchronize cache command 421 * either. Reported by: Matthew Jacob <mjacob@feral.com> 422 * in NetBSD PR kern/6027, August 24, 1998. 423 */ 424 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 425 /*quirks*/ DA_Q_NO_SYNC_CACHE 426 }, 427 { 428 /* 429 * This drive doesn't like the synchronize cache command 430 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 431 * (PR 8882). 432 */ 433 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 434 /*quirks*/ DA_Q_NO_SYNC_CACHE 435 }, 436 { 437 /* 438 * Doesn't like the synchronize cache command. 439 * Reported by: Blaz Zupan <blaz@gold.amis.net> 440 */ 441 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 442 /*quirks*/ DA_Q_NO_SYNC_CACHE 443 }, 444 { 445 /* 446 * Doesn't like the synchronize cache command. 447 * Reported by: Blaz Zupan <blaz@gold.amis.net> 448 */ 449 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 450 /*quirks*/ DA_Q_NO_SYNC_CACHE 451 }, 452 { 453 /* 454 * Doesn't like the synchronize cache command. 455 */ 456 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 457 /*quirks*/ DA_Q_NO_SYNC_CACHE 458 }, 459 { 460 /* 461 * Doesn't like the synchronize cache command. 462 * Reported by: walter@pelissero.de 463 */ 464 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, 465 /*quirks*/ DA_Q_NO_SYNC_CACHE 466 }, 467 { 468 /* 469 * Doesn't work correctly with 6 byte reads/writes. 470 * Returns illegal request, and points to byte 9 of the 471 * 6-byte CDB. 472 * Reported by: Adam McDougall <bsdx@spawnet.com> 473 */ 474 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 475 /*quirks*/ DA_Q_NO_6_BYTE 476 }, 477 { 478 /* See above. */ 479 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 480 /*quirks*/ DA_Q_NO_6_BYTE 481 }, 482 { 483 /* 484 * Doesn't like the synchronize cache command. 485 * Reported by: walter@pelissero.de 486 */ 487 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, 488 /*quirks*/ DA_Q_NO_SYNC_CACHE 489 }, 490 { 491 /* 492 * The CISS RAID controllers do not support SYNC_CACHE 493 */ 494 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, 495 /*quirks*/ DA_Q_NO_SYNC_CACHE 496 }, 497 { 498 /* 499 * The STEC SSDs sometimes hang on UNMAP. 500 */ 501 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, 502 /*quirks*/ DA_Q_NO_UNMAP 503 }, 504 { 505 /* 506 * VMware returns BUSY status when storage has transient 507 * connectivity problems, so better wait. 508 * Also VMware returns odd errors on misaligned UNMAPs. 509 */ 510 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, 511 /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP 512 }, 513 /* USB mass storage devices supported by umass(4) */ 514 { 515 /* 516 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player 517 * PR: kern/51675 518 */ 519 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, 520 /*quirks*/ DA_Q_NO_SYNC_CACHE 521 }, 522 { 523 /* 524 * Power Quotient Int. (PQI) USB flash key 525 * PR: kern/53067 526 */ 527 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", 528 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 529 }, 530 { 531 /* 532 * Creative Nomad MUVO mp3 player (USB) 533 * PR: kern/53094 534 */ 535 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, 536 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 537 }, 538 { 539 /* 540 * Jungsoft NEXDISK USB flash key 541 * PR: kern/54737 542 */ 543 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, 544 /*quirks*/ DA_Q_NO_SYNC_CACHE 545 }, 546 { 547 /* 548 * FreeDik USB Mini Data Drive 549 * PR: kern/54786 550 */ 551 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", 552 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 553 }, 554 { 555 /* 556 * Sigmatel USB Flash MP3 Player 557 * PR: kern/57046 558 */ 559 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, 560 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 561 }, 562 { 563 /* 564 * Neuros USB Digital Audio Computer 565 * PR: kern/63645 566 */ 567 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", 568 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 569 }, 570 { 571 /* 572 * SEAGRAND NP-900 MP3 Player 573 * PR: kern/64563 574 */ 575 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, 576 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 577 }, 578 { 579 /* 580 * iRiver iFP MP3 player (with UMS Firmware) 581 * PR: kern/54881, i386/63941, kern/66124 582 */ 583 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, 584 /*quirks*/ DA_Q_NO_SYNC_CACHE 585 }, 586 { 587 /* 588 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 589 * PR: kern/70158 590 */ 591 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, 592 /*quirks*/ DA_Q_NO_SYNC_CACHE 593 }, 594 { 595 /* 596 * ZICPlay USB MP3 Player with FM 597 * PR: kern/75057 598 */ 599 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, 600 /*quirks*/ DA_Q_NO_SYNC_CACHE 601 }, 602 { 603 /* 604 * TEAC USB floppy mechanisms 605 */ 606 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, 607 /*quirks*/ DA_Q_NO_SYNC_CACHE 608 }, 609 { 610 /* 611 * Kingston DataTraveler II+ USB Pen-Drive. 612 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org> 613 */ 614 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", 615 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 616 }, 617 { 618 /* 619 * USB DISK Pro PMAP 620 * Reported by: jhs 621 * PR: usb/96381 622 */ 623 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, 624 /*quirks*/ DA_Q_NO_SYNC_CACHE 625 }, 626 { 627 /* 628 * Motorola E398 Mobile Phone (TransFlash memory card). 629 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl> 630 * PR: usb/89889 631 */ 632 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", 633 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 634 }, 635 { 636 /* 637 * Qware BeatZkey! Pro 638 * PR: usb/79164 639 */ 640 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", 641 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 642 }, 643 { 644 /* 645 * Time DPA20B 1GB MP3 Player 646 * PR: usb/81846 647 */ 648 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", 649 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 650 }, 651 { 652 /* 653 * Samsung USB key 128Mb 654 * PR: usb/90081 655 */ 656 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", 657 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 658 }, 659 { 660 /* 661 * Kingston DataTraveler 2.0 USB Flash memory. 662 * PR: usb/89196 663 */ 664 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", 665 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 666 }, 667 { 668 /* 669 * Creative MUVO Slim mp3 player (USB) 670 * PR: usb/86131 671 */ 672 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", 673 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 674 }, 675 { 676 /* 677 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) 678 * PR: usb/80487 679 */ 680 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", 681 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 682 }, 683 { 684 /* 685 * SanDisk Micro Cruzer 128MB 686 * PR: usb/75970 687 */ 688 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", 689 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 690 }, 691 { 692 /* 693 * TOSHIBA TransMemory USB sticks 694 * PR: kern/94660 695 */ 696 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", 697 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 698 }, 699 { 700 /* 701 * PNY USB 3.0 Flash Drives 702 */ 703 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", 704 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 705 }, 706 { 707 /* 708 * PNY USB Flash keys 709 * PR: usb/75578, usb/72344, usb/65436 710 */ 711 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", 712 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 713 }, 714 { 715 /* 716 * Genesys GL3224 717 */ 718 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 719 "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 720 }, 721 { 722 /* 723 * Genesys 6-in-1 Card Reader 724 * PR: usb/94647 725 */ 726 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 727 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 728 }, 729 { 730 /* 731 * Rekam Digital CAMERA 732 * PR: usb/98713 733 */ 734 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", 735 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 736 }, 737 { 738 /* 739 * iRiver H10 MP3 player 740 * PR: usb/102547 741 */ 742 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", 743 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 744 }, 745 { 746 /* 747 * iRiver U10 MP3 player 748 * PR: usb/92306 749 */ 750 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", 751 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 752 }, 753 { 754 /* 755 * X-Micro Flash Disk 756 * PR: usb/96901 757 */ 758 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", 759 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 760 }, 761 { 762 /* 763 * EasyMP3 EM732X USB 2.0 Flash MP3 Player 764 * PR: usb/96546 765 */ 766 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", 767 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 768 }, 769 { 770 /* 771 * Denver MP3 player 772 * PR: usb/107101 773 */ 774 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", 775 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 776 }, 777 { 778 /* 779 * Philips USB Key Audio KEY013 780 * PR: usb/68412 781 */ 782 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, 783 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 784 }, 785 { 786 /* 787 * JNC MP3 Player 788 * PR: usb/94439 789 */ 790 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", 791 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 792 }, 793 { 794 /* 795 * SAMSUNG MP0402H 796 * PR: usb/108427 797 */ 798 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, 799 /*quirks*/ DA_Q_NO_SYNC_CACHE 800 }, 801 { 802 /* 803 * I/O Magic USB flash - Giga Bank 804 * PR: usb/108810 805 */ 806 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, 807 /*quirks*/ DA_Q_NO_SYNC_CACHE 808 }, 809 { 810 /* 811 * JoyFly 128mb USB Flash Drive 812 * PR: 96133 813 */ 814 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", 815 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 816 }, 817 { 818 /* 819 * ChipsBnk usb stick 820 * PR: 103702 821 */ 822 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", 823 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 824 }, 825 { 826 /* 827 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A 828 * PR: 129858 829 */ 830 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", 831 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 832 }, 833 { 834 /* 835 * Samsung YP-U3 mp3-player 836 * PR: 125398 837 */ 838 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", 839 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 840 }, 841 { 842 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", 843 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 844 }, 845 { 846 /* 847 * Sony Cyber-Shot DSC cameras 848 * PR: usb/137035 849 */ 850 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 851 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 852 }, 853 { 854 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", 855 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT 856 }, 857 { 858 /* At least several Transcent USB sticks lie on RC16. */ 859 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", 860 "*"}, /*quirks*/ DA_Q_NO_RC16 861 }, 862 { 863 /* 864 * I-O Data USB Flash Disk 865 * PR: usb/211716 866 */ 867 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", 868 "*"}, /*quirks*/ DA_Q_NO_RC16 869 }, 870 { 871 /* 872 * SLC CHIPFANCIER USB drives 873 * PR: usb/234503 (RC10 right, RC16 wrong) 874 * 16GB, 32GB and 128GB confirmed to have same issue 875 */ 876 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER", 877 "*"}, /*quirks*/ DA_Q_NO_RC16 878 }, 879 /* ATA/SATA devices over SAS/USB/... */ 880 { 881 /* Sandisk X400 */ 882 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" }, 883 /*quirks*/DA_Q_128KB 884 }, 885 { 886 /* Hitachi Advanced Format (4k) drives */ 887 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, 888 /*quirks*/DA_Q_4K 889 }, 890 { 891 /* Micron Advanced Format (4k) drives */ 892 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, 893 /*quirks*/DA_Q_4K 894 }, 895 { 896 /* Samsung Advanced Format (4k) drives */ 897 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, 898 /*quirks*/DA_Q_4K 899 }, 900 { 901 /* Samsung Advanced Format (4k) drives */ 902 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, 903 /*quirks*/DA_Q_4K 904 }, 905 { 906 /* Samsung Advanced Format (4k) drives */ 907 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, 908 /*quirks*/DA_Q_4K 909 }, 910 { 911 /* Samsung Advanced Format (4k) drives */ 912 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, 913 /*quirks*/DA_Q_4K 914 }, 915 { 916 /* Seagate Barracuda Green Advanced Format (4k) drives */ 917 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, 918 /*quirks*/DA_Q_4K 919 }, 920 { 921 /* Seagate Barracuda Green Advanced Format (4k) drives */ 922 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, 923 /*quirks*/DA_Q_4K 924 }, 925 { 926 /* Seagate Barracuda Green Advanced Format (4k) drives */ 927 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, 928 /*quirks*/DA_Q_4K 929 }, 930 { 931 /* Seagate Barracuda Green Advanced Format (4k) drives */ 932 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, 933 /*quirks*/DA_Q_4K 934 }, 935 { 936 /* Seagate Barracuda Green Advanced Format (4k) drives */ 937 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, 938 /*quirks*/DA_Q_4K 939 }, 940 { 941 /* Seagate Barracuda Green Advanced Format (4k) drives */ 942 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, 943 /*quirks*/DA_Q_4K 944 }, 945 { 946 /* Seagate Momentus Advanced Format (4k) drives */ 947 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, 948 /*quirks*/DA_Q_4K 949 }, 950 { 951 /* Seagate Momentus Advanced Format (4k) drives */ 952 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, 953 /*quirks*/DA_Q_4K 954 }, 955 { 956 /* Seagate Momentus Advanced Format (4k) drives */ 957 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, 958 /*quirks*/DA_Q_4K 959 }, 960 { 961 /* Seagate Momentus Advanced Format (4k) drives */ 962 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, 963 /*quirks*/DA_Q_4K 964 }, 965 { 966 /* Seagate Momentus Advanced Format (4k) drives */ 967 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, 968 /*quirks*/DA_Q_4K 969 }, 970 { 971 /* Seagate Momentus Advanced Format (4k) drives */ 972 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, 973 /*quirks*/DA_Q_4K 974 }, 975 { 976 /* Seagate Momentus Advanced Format (4k) drives */ 977 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, 978 /*quirks*/DA_Q_4K 979 }, 980 { 981 /* Seagate Momentus Advanced Format (4k) drives */ 982 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, 983 /*quirks*/DA_Q_4K 984 }, 985 { 986 /* Seagate Momentus Advanced Format (4k) drives */ 987 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, 988 /*quirks*/DA_Q_4K 989 }, 990 { 991 /* Seagate Momentus Advanced Format (4k) drives */ 992 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, 993 /*quirks*/DA_Q_4K 994 }, 995 { 996 /* Seagate Momentus Advanced Format (4k) drives */ 997 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, 998 /*quirks*/DA_Q_4K 999 }, 1000 { 1001 /* Seagate Momentus Advanced Format (4k) drives */ 1002 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, 1003 /*quirks*/DA_Q_4K 1004 }, 1005 { 1006 /* Seagate Momentus Advanced Format (4k) drives */ 1007 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, 1008 /*quirks*/DA_Q_4K 1009 }, 1010 { 1011 /* Seagate Momentus Advanced Format (4k) drives */ 1012 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, 1013 /*quirks*/DA_Q_4K 1014 }, 1015 { 1016 /* Seagate Momentus Thin Advanced Format (4k) drives */ 1017 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, 1018 /*quirks*/DA_Q_4K 1019 }, 1020 { 1021 /* Seagate Momentus Thin Advanced Format (4k) drives */ 1022 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, 1023 /*quirks*/DA_Q_4K 1024 }, 1025 { 1026 /* WDC Caviar Green Advanced Format (4k) drives */ 1027 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, 1028 /*quirks*/DA_Q_4K 1029 }, 1030 { 1031 /* WDC Caviar Green Advanced Format (4k) drives */ 1032 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, 1033 /*quirks*/DA_Q_4K 1034 }, 1035 { 1036 /* WDC Caviar Green Advanced Format (4k) drives */ 1037 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, 1038 /*quirks*/DA_Q_4K 1039 }, 1040 { 1041 /* WDC Caviar Green Advanced Format (4k) drives */ 1042 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, 1043 /*quirks*/DA_Q_4K 1044 }, 1045 { 1046 /* WDC Caviar Green Advanced Format (4k) drives */ 1047 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, 1048 /*quirks*/DA_Q_4K 1049 }, 1050 { 1051 /* WDC Caviar Green Advanced Format (4k) drives */ 1052 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, 1053 /*quirks*/DA_Q_4K 1054 }, 1055 { 1056 /* WDC Caviar Green Advanced Format (4k) drives */ 1057 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, 1058 /*quirks*/DA_Q_4K 1059 }, 1060 { 1061 /* WDC Caviar Green Advanced Format (4k) drives */ 1062 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, 1063 /*quirks*/DA_Q_4K 1064 }, 1065 { 1066 /* WDC Scorpio Black Advanced Format (4k) drives */ 1067 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, 1068 /*quirks*/DA_Q_4K 1069 }, 1070 { 1071 /* WDC Scorpio Black Advanced Format (4k) drives */ 1072 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, 1073 /*quirks*/DA_Q_4K 1074 }, 1075 { 1076 /* WDC Scorpio Black Advanced Format (4k) drives */ 1077 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, 1078 /*quirks*/DA_Q_4K 1079 }, 1080 { 1081 /* WDC Scorpio Black Advanced Format (4k) drives */ 1082 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, 1083 /*quirks*/DA_Q_4K 1084 }, 1085 { 1086 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1087 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, 1088 /*quirks*/DA_Q_4K 1089 }, 1090 { 1091 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1092 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, 1093 /*quirks*/DA_Q_4K 1094 }, 1095 { 1096 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1097 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, 1098 /*quirks*/DA_Q_4K 1099 }, 1100 { 1101 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1102 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, 1103 /*quirks*/DA_Q_4K 1104 }, 1105 { 1106 /* 1107 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) 1108 * PR: usb/97472 1109 */ 1110 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"}, 1111 /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE 1112 }, 1113 { 1114 /* 1115 * Olympus digital cameras (D-370) 1116 * PR: usb/97472 1117 */ 1118 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"}, 1119 /*quirks*/ DA_Q_NO_6_BYTE 1120 }, 1121 { 1122 /* 1123 * Olympus digital cameras (E-100RS, E-10). 1124 * PR: usb/97472 1125 */ 1126 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"}, 1127 /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE 1128 }, 1129 { 1130 /* 1131 * Olympus FE-210 camera 1132 */ 1133 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", 1134 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1135 }, 1136 { 1137 /* 1138 * Pentax Digital Camera 1139 * PR: usb/93389 1140 */ 1141 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA", 1142 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1143 }, 1144 { 1145 /* 1146 * LG UP3S MP3 player 1147 */ 1148 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", 1149 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1150 }, 1151 { 1152 /* 1153 * Laser MP3-2GA13 MP3 player 1154 */ 1155 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", 1156 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1157 }, 1158 { 1159 /* 1160 * LaCie external 250GB Hard drive des by Porsche 1161 * Submitted by: Ben Stuyts <ben@altesco.nl> 1162 * PR: 121474 1163 */ 1164 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, 1165 /*quirks*/ DA_Q_NO_SYNC_CACHE 1166 }, 1167 /* SATA SSDs */ 1168 { 1169 /* 1170 * Corsair Force 2 SSDs 1171 * 4k optimised & trim only works in 4k requests + 4k aligned 1172 */ 1173 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, 1174 /*quirks*/DA_Q_4K 1175 }, 1176 { 1177 /* 1178 * Corsair Force 3 SSDs 1179 * 4k optimised & trim only works in 4k requests + 4k aligned 1180 */ 1181 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, 1182 /*quirks*/DA_Q_4K 1183 }, 1184 { 1185 /* 1186 * Corsair Neutron GTX SSDs 1187 * 4k optimised & trim only works in 4k requests + 4k aligned 1188 */ 1189 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, 1190 /*quirks*/DA_Q_4K 1191 }, 1192 { 1193 /* 1194 * Corsair Force GT & GS SSDs 1195 * 4k optimised & trim only works in 4k requests + 4k aligned 1196 */ 1197 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, 1198 /*quirks*/DA_Q_4K 1199 }, 1200 { 1201 /* 1202 * Crucial M4 SSDs 1203 * 4k optimised & trim only works in 4k requests + 4k aligned 1204 */ 1205 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, 1206 /*quirks*/DA_Q_4K 1207 }, 1208 { 1209 /* 1210 * Crucial RealSSD C300 SSDs 1211 * 4k optimised 1212 */ 1213 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", 1214 "*" }, /*quirks*/DA_Q_4K 1215 }, 1216 { 1217 /* 1218 * Intel 320 Series SSDs 1219 * 4k optimised & trim only works in 4k requests + 4k aligned 1220 */ 1221 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, 1222 /*quirks*/DA_Q_4K 1223 }, 1224 { 1225 /* 1226 * Intel 330 Series SSDs 1227 * 4k optimised & trim only works in 4k requests + 4k aligned 1228 */ 1229 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, 1230 /*quirks*/DA_Q_4K 1231 }, 1232 { 1233 /* 1234 * Intel 510 Series SSDs 1235 * 4k optimised & trim only works in 4k requests + 4k aligned 1236 */ 1237 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, 1238 /*quirks*/DA_Q_4K 1239 }, 1240 { 1241 /* 1242 * Intel 520 Series SSDs 1243 * 4k optimised & trim only works in 4k requests + 4k aligned 1244 */ 1245 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, 1246 /*quirks*/DA_Q_4K 1247 }, 1248 { 1249 /* 1250 * Intel S3610 Series SSDs 1251 * 4k optimised & trim only works in 4k requests + 4k aligned 1252 */ 1253 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, 1254 /*quirks*/DA_Q_4K 1255 }, 1256 { 1257 /* 1258 * Intel X25-M Series SSDs 1259 * 4k optimised & trim only works in 4k requests + 4k aligned 1260 */ 1261 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, 1262 /*quirks*/DA_Q_4K 1263 }, 1264 { 1265 /* 1266 * Kingston E100 Series SSDs 1267 * 4k optimised & trim only works in 4k requests + 4k aligned 1268 */ 1269 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, 1270 /*quirks*/DA_Q_4K 1271 }, 1272 { 1273 /* 1274 * Kingston HyperX 3k SSDs 1275 * 4k optimised & trim only works in 4k requests + 4k aligned 1276 */ 1277 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, 1278 /*quirks*/DA_Q_4K 1279 }, 1280 { 1281 /* 1282 * Marvell SSDs (entry taken from OpenSolaris) 1283 * 4k optimised & trim only works in 4k requests + 4k aligned 1284 */ 1285 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, 1286 /*quirks*/DA_Q_4K 1287 }, 1288 { 1289 /* 1290 * OCZ Agility 2 SSDs 1291 * 4k optimised & trim only works in 4k requests + 4k aligned 1292 */ 1293 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, 1294 /*quirks*/DA_Q_4K 1295 }, 1296 { 1297 /* 1298 * OCZ Agility 3 SSDs 1299 * 4k optimised & trim only works in 4k requests + 4k aligned 1300 */ 1301 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, 1302 /*quirks*/DA_Q_4K 1303 }, 1304 { 1305 /* 1306 * OCZ Deneva R Series SSDs 1307 * 4k optimised & trim only works in 4k requests + 4k aligned 1308 */ 1309 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, 1310 /*quirks*/DA_Q_4K 1311 }, 1312 { 1313 /* 1314 * OCZ Vertex 2 SSDs (inc pro series) 1315 * 4k optimised & trim only works in 4k requests + 4k aligned 1316 */ 1317 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, 1318 /*quirks*/DA_Q_4K 1319 }, 1320 { 1321 /* 1322 * OCZ Vertex 3 SSDs 1323 * 4k optimised & trim only works in 4k requests + 4k aligned 1324 */ 1325 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, 1326 /*quirks*/DA_Q_4K 1327 }, 1328 { 1329 /* 1330 * OCZ Vertex 4 SSDs 1331 * 4k optimised & trim only works in 4k requests + 4k aligned 1332 */ 1333 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, 1334 /*quirks*/DA_Q_4K 1335 }, 1336 { 1337 /* 1338 * Samsung 750 Series SSDs 1339 * 4k optimised & trim only works in 4k requests + 4k aligned 1340 */ 1341 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" }, 1342 /*quirks*/DA_Q_4K 1343 }, 1344 { 1345 /* 1346 * Samsung 830 Series SSDs 1347 * 4k optimised & trim only works in 4k requests + 4k aligned 1348 */ 1349 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, 1350 /*quirks*/DA_Q_4K 1351 }, 1352 { 1353 /* 1354 * Samsung 840 SSDs 1355 * 4k optimised & trim only works in 4k requests + 4k aligned 1356 */ 1357 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, 1358 /*quirks*/DA_Q_4K 1359 }, 1360 { 1361 /* 1362 * Samsung 845 SSDs 1363 * 4k optimised & trim only works in 4k requests + 4k aligned 1364 */ 1365 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" }, 1366 /*quirks*/DA_Q_4K 1367 }, 1368 { 1369 /* 1370 * Samsung 850 SSDs 1371 * 4k optimised & trim only works in 4k requests + 4k aligned 1372 */ 1373 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, 1374 /*quirks*/DA_Q_4K 1375 }, 1376 { 1377 /* 1378 * Samsung 843T Series SSDs (MZ7WD*) 1379 * Samsung PM851 Series SSDs (MZ7TE*) 1380 * Samsung PM853T Series SSDs (MZ7GE*) 1381 * Samsung SM863 Series SSDs (MZ7KM*) 1382 * 4k optimised 1383 */ 1384 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, 1385 /*quirks*/DA_Q_4K 1386 }, 1387 { 1388 /* 1389 * Same as for SAMSUNG MZ7* but enable the quirks for SSD 1390 * starting with MZ7* too 1391 */ 1392 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" }, 1393 /*quirks*/DA_Q_4K 1394 }, 1395 { 1396 /* 1397 * SuperTalent TeraDrive CT SSDs 1398 * 4k optimised & trim only works in 4k requests + 4k aligned 1399 */ 1400 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, 1401 /*quirks*/DA_Q_4K 1402 }, 1403 { 1404 /* 1405 * XceedIOPS SATA SSDs 1406 * 4k optimised 1407 */ 1408 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, 1409 /*quirks*/DA_Q_4K 1410 }, 1411 { 1412 /* 1413 * Hama Innostor USB-Stick 1414 */ 1415 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, 1416 /*quirks*/DA_Q_NO_RC16 1417 }, 1418 { 1419 /* 1420 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) 1421 * Drive Managed SATA hard drive. This drive doesn't report 1422 * in firmware that it is a drive managed SMR drive. 1423 */ 1424 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" }, 1425 /*quirks*/DA_Q_SMR_DM 1426 }, 1427 { 1428 /* 1429 * MX-ES USB Drive by Mach Xtreme 1430 */ 1431 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, 1432 /*quirks*/DA_Q_NO_RC16 1433 }, 1434 }; 1435 1436 static disk_strategy_t dastrategy; 1437 static dumper_t dadump; 1438 static periph_init_t dainit; 1439 static void daasync(void *callback_arg, u_int32_t code, 1440 struct cam_path *path, void *arg); 1441 static void dasysctlinit(void *context, int pending); 1442 static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); 1443 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); 1444 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); 1445 static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); 1446 static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); 1447 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); 1448 static void dadeletemethodset(struct da_softc *softc, 1449 da_delete_methods delete_method); 1450 static off_t dadeletemaxsize(struct da_softc *softc, 1451 da_delete_methods delete_method); 1452 static void dadeletemethodchoose(struct da_softc *softc, 1453 da_delete_methods default_method); 1454 static void daprobedone(struct cam_periph *periph, union ccb *ccb); 1455 1456 static periph_ctor_t daregister; 1457 static periph_dtor_t dacleanup; 1458 static periph_start_t dastart; 1459 static periph_oninv_t daoninvalidate; 1460 static void dazonedone(struct cam_periph *periph, union ccb *ccb); 1461 static void dadone(struct cam_periph *periph, 1462 union ccb *done_ccb); 1463 static void dadone_probewp(struct cam_periph *periph, 1464 union ccb *done_ccb); 1465 static void dadone_proberc(struct cam_periph *periph, 1466 union ccb *done_ccb); 1467 static void dadone_probelbp(struct cam_periph *periph, 1468 union ccb *done_ccb); 1469 static void dadone_probeblklimits(struct cam_periph *periph, 1470 union ccb *done_ccb); 1471 static void dadone_probebdc(struct cam_periph *periph, 1472 union ccb *done_ccb); 1473 static void dadone_probeata(struct cam_periph *periph, 1474 union ccb *done_ccb); 1475 static void dadone_probeatalogdir(struct cam_periph *periph, 1476 union ccb *done_ccb); 1477 static void dadone_probeataiddir(struct cam_periph *periph, 1478 union ccb *done_ccb); 1479 static void dadone_probeatasup(struct cam_periph *periph, 1480 union ccb *done_ccb); 1481 static void dadone_probeatazone(struct cam_periph *periph, 1482 union ccb *done_ccb); 1483 static void dadone_probezone(struct cam_periph *periph, 1484 union ccb *done_ccb); 1485 static void dadone_tur(struct cam_periph *periph, 1486 union ccb *done_ccb); 1487 static int daerror(union ccb *ccb, u_int32_t cam_flags, 1488 u_int32_t sense_flags); 1489 static void daprevent(struct cam_periph *periph, int action); 1490 static void dareprobe(struct cam_periph *periph); 1491 static void dasetgeom(struct cam_periph *periph, uint32_t block_len, 1492 uint64_t maxsector, 1493 struct scsi_read_capacity_data_long *rcaplong, 1494 size_t rcap_size); 1495 static callout_func_t dasendorderedtag; 1496 static void dashutdown(void *arg, int howto); 1497 static callout_func_t damediapoll; 1498 1499 #ifndef DA_DEFAULT_POLL_PERIOD 1500 #define DA_DEFAULT_POLL_PERIOD 3 1501 #endif 1502 1503 #ifndef DA_DEFAULT_TIMEOUT 1504 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 1505 #endif 1506 1507 #ifndef DA_DEFAULT_SOFTTIMEOUT 1508 #define DA_DEFAULT_SOFTTIMEOUT 0 1509 #endif 1510 1511 #ifndef DA_DEFAULT_RETRY 1512 #define DA_DEFAULT_RETRY 4 1513 #endif 1514 1515 #ifndef DA_DEFAULT_SEND_ORDERED 1516 #define DA_DEFAULT_SEND_ORDERED 1 1517 #endif 1518 1519 static int da_poll_period = DA_DEFAULT_POLL_PERIOD; 1520 static int da_retry_count = DA_DEFAULT_RETRY; 1521 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 1522 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; 1523 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; 1524 static int da_disable_wp_detection = 0; 1525 1526 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 1527 "CAM Direct Access Disk driver"); 1528 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, 1529 &da_poll_period, 0, "Media polling period in seconds"); 1530 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, 1531 &da_retry_count, 0, "Normal I/O retry count"); 1532 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, 1533 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 1534 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, 1535 &da_send_ordered, 0, "Send Ordered Tags"); 1536 SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN, 1537 &da_disable_wp_detection, 0, 1538 "Disable detection of write-protected disks"); 1539 1540 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, 1541 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", 1542 "Soft I/O timeout (ms)"); 1543 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); 1544 1545 /* 1546 * DA_ORDEREDTAG_INTERVAL determines how often, relative 1547 * to the default timeout, we check to see whether an ordered 1548 * tagged transaction is appropriate to prevent simple tag 1549 * starvation. Since we'd like to ensure that there is at least 1550 * 1/2 of the timeout length left for a starved transaction to 1551 * complete after we've sent an ordered tag, we must poll at least 1552 * four times in every timeout period. This takes care of the worst 1553 * case where a starved transaction starts during an interval that 1554 * meets the requirement "don't send an ordered tag" test so it takes 1555 * us two intervals to determine that a tag must be sent. 1556 */ 1557 #ifndef DA_ORDEREDTAG_INTERVAL 1558 #define DA_ORDEREDTAG_INTERVAL 4 1559 #endif 1560 1561 static struct periph_driver dadriver = 1562 { 1563 dainit, "da", 1564 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 1565 }; 1566 1567 PERIPHDRIVER_DECLARE(da, dadriver); 1568 1569 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); 1570 1571 /* 1572 * This driver takes out references / holds in well defined pairs, never 1573 * recursively. These macros / inline functions enforce those rules. They 1574 * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is 1575 * defined to be 2 or larger, the tracking also includes debug printfs. 1576 */ 1577 #if defined(DA_TRACK_REFS) || defined(INVARIANTS) 1578 1579 #ifndef DA_TRACK_REFS 1580 #define DA_TRACK_REFS 1 1581 #endif 1582 1583 #if DA_TRACK_REFS > 1 1584 static const char *da_ref_text[] = { 1585 "bogus", 1586 "open", 1587 "open hold", 1588 "close hold", 1589 "reprobe hold", 1590 "Test Unit Ready", 1591 "Geom", 1592 "sysctl", 1593 "reprobe", 1594 "max -- also bogus" 1595 }; 1596 1597 #define DA_PERIPH_PRINT(periph, msg, args...) \ 1598 CAM_PERIPH_PRINT(periph, msg, ##args) 1599 #else 1600 #define DA_PERIPH_PRINT(periph, msg, args...) 1601 #endif 1602 1603 static inline void 1604 token_sanity(da_ref_token token) 1605 { 1606 if ((unsigned)token >= DA_REF_MAX) 1607 panic("Bad token value passed in %d\n", token); 1608 } 1609 1610 static inline int 1611 da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token) 1612 { 1613 int err = cam_periph_hold(periph, priority); 1614 1615 token_sanity(token); 1616 DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n", 1617 da_ref_text[token], token, err); 1618 if (err == 0) { 1619 int cnt; 1620 struct da_softc *softc = periph->softc; 1621 1622 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); 1623 if (cnt != 0) 1624 panic("Re-holding for reason %d, cnt = %d", token, cnt); 1625 } 1626 return (err); 1627 } 1628 1629 static inline void 1630 da_periph_unhold(struct cam_periph *periph, da_ref_token token) 1631 { 1632 int cnt; 1633 struct da_softc *softc = periph->softc; 1634 1635 token_sanity(token); 1636 DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n", 1637 da_ref_text[token], token); 1638 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1639 if (cnt != 1) 1640 panic("Unholding %d with cnt = %d", token, cnt); 1641 cam_periph_unhold(periph); 1642 } 1643 1644 static inline int 1645 da_periph_acquire(struct cam_periph *periph, da_ref_token token) 1646 { 1647 int err = cam_periph_acquire(periph); 1648 1649 token_sanity(token); 1650 DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n", 1651 da_ref_text[token], token, err); 1652 if (err == 0) { 1653 int cnt; 1654 struct da_softc *softc = periph->softc; 1655 1656 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); 1657 if (cnt != 0) 1658 panic("Re-refing for reason %d, cnt = %d", token, cnt); 1659 } 1660 return (err); 1661 } 1662 1663 static inline void 1664 da_periph_release(struct cam_periph *periph, da_ref_token token) 1665 { 1666 int cnt; 1667 struct da_softc *softc = periph->softc; 1668 1669 token_sanity(token); 1670 DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n", 1671 da_ref_text[token], token); 1672 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1673 if (cnt != 1) 1674 panic("Releasing %d with cnt = %d", token, cnt); 1675 cam_periph_release(periph); 1676 } 1677 1678 static inline void 1679 da_periph_release_locked(struct cam_periph *periph, da_ref_token token) 1680 { 1681 int cnt; 1682 struct da_softc *softc = periph->softc; 1683 1684 token_sanity(token); 1685 DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n", 1686 da_ref_text[token], token); 1687 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1688 if (cnt != 1) 1689 panic("releasing (locked) %d with cnt = %d", token, cnt); 1690 cam_periph_release_locked(periph); 1691 } 1692 1693 #define cam_periph_hold POISON 1694 #define cam_periph_unhold POISON 1695 #define cam_periph_acquire POISON 1696 #define cam_periph_release POISON 1697 #define cam_periph_release_locked POISON 1698 1699 #else 1700 #define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio)) 1701 #define da_periph_unhold(periph, token) cam_periph_unhold((periph)) 1702 #define da_periph_acquire(periph, token) cam_periph_acquire((periph)) 1703 #define da_periph_release(periph, token) cam_periph_release((periph)) 1704 #define da_periph_release_locked(periph, token) cam_periph_release_locked((periph)) 1705 #endif 1706 1707 static int 1708 daopen(struct disk *dp) 1709 { 1710 struct cam_periph *periph; 1711 struct da_softc *softc; 1712 int error; 1713 1714 periph = (struct cam_periph *)dp->d_drv1; 1715 if (da_periph_acquire(periph, DA_REF_OPEN) != 0) { 1716 return (ENXIO); 1717 } 1718 1719 cam_periph_lock(periph); 1720 if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) { 1721 cam_periph_unlock(periph); 1722 da_periph_release(periph, DA_REF_OPEN); 1723 return (error); 1724 } 1725 1726 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1727 ("daopen\n")); 1728 1729 softc = (struct da_softc *)periph->softc; 1730 dareprobe(periph); 1731 1732 /* Wait for the disk size update. */ 1733 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, 1734 "dareprobe", 0); 1735 if (error != 0) 1736 xpt_print(periph->path, "unable to retrieve capacity data\n"); 1737 1738 if (periph->flags & CAM_PERIPH_INVALID) 1739 error = ENXIO; 1740 1741 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1742 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1743 daprevent(periph, PR_PREVENT); 1744 1745 if (error == 0) { 1746 softc->flags &= ~DA_FLAG_PACK_INVALID; 1747 softc->flags |= DA_FLAG_OPEN; 1748 } 1749 1750 da_periph_unhold(periph, DA_REF_OPEN_HOLD); 1751 cam_periph_unlock(periph); 1752 1753 if (error != 0) 1754 da_periph_release(periph, DA_REF_OPEN); 1755 1756 return (error); 1757 } 1758 1759 static int 1760 daclose(struct disk *dp) 1761 { 1762 struct cam_periph *periph; 1763 struct da_softc *softc; 1764 union ccb *ccb; 1765 1766 periph = (struct cam_periph *)dp->d_drv1; 1767 softc = (struct da_softc *)periph->softc; 1768 cam_periph_lock(periph); 1769 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1770 ("daclose\n")); 1771 1772 if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) { 1773 1774 /* Flush disk cache. */ 1775 if ((softc->flags & DA_FLAG_DIRTY) != 0 && 1776 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && 1777 (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 1778 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1779 scsi_synchronize_cache(&ccb->csio, /*retries*/1, 1780 /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, 1781 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 1782 5 * 60 * 1000); 1783 cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 1784 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, 1785 softc->disk->d_devstat); 1786 softc->flags &= ~DA_FLAG_DIRTY; 1787 xpt_release_ccb(ccb); 1788 } 1789 1790 /* Allow medium removal. */ 1791 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1792 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1793 daprevent(periph, PR_ALLOW); 1794 1795 da_periph_unhold(periph, DA_REF_CLOSE_HOLD); 1796 } 1797 1798 /* 1799 * If we've got removeable media, mark the blocksize as 1800 * unavailable, since it could change when new media is 1801 * inserted. 1802 */ 1803 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 1804 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; 1805 1806 softc->flags &= ~DA_FLAG_OPEN; 1807 while (softc->refcount != 0) 1808 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); 1809 cam_periph_unlock(periph); 1810 da_periph_release(periph, DA_REF_OPEN); 1811 return (0); 1812 } 1813 1814 static void 1815 daschedule(struct cam_periph *periph) 1816 { 1817 struct da_softc *softc = (struct da_softc *)periph->softc; 1818 1819 if (softc->state != DA_STATE_NORMAL) 1820 return; 1821 1822 cam_iosched_schedule(softc->cam_iosched, periph); 1823 } 1824 1825 /* 1826 * Actually translate the requested transfer into one the physical driver 1827 * can understand. The transfer is described by a buf and will include 1828 * only one physical transfer. 1829 */ 1830 static void 1831 dastrategy(struct bio *bp) 1832 { 1833 struct cam_periph *periph; 1834 struct da_softc *softc; 1835 1836 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1837 softc = (struct da_softc *)periph->softc; 1838 1839 cam_periph_lock(periph); 1840 1841 /* 1842 * If the device has been made invalid, error out 1843 */ 1844 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 1845 cam_periph_unlock(periph); 1846 biofinish(bp, NULL, ENXIO); 1847 return; 1848 } 1849 1850 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); 1851 1852 /* 1853 * Zone commands must be ordered, because they can depend on the 1854 * effects of previously issued commands, and they may affect 1855 * commands after them. 1856 */ 1857 if (bp->bio_cmd == BIO_ZONE) 1858 bp->bio_flags |= BIO_ORDERED; 1859 1860 /* 1861 * Place it in the queue of disk activities for this disk 1862 */ 1863 cam_iosched_queue_work(softc->cam_iosched, bp); 1864 1865 /* 1866 * Schedule ourselves for performing the work. 1867 */ 1868 daschedule(periph); 1869 cam_periph_unlock(periph); 1870 1871 return; 1872 } 1873 1874 static int 1875 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 1876 { 1877 struct cam_periph *periph; 1878 struct da_softc *softc; 1879 u_int secsize; 1880 struct ccb_scsiio csio; 1881 struct disk *dp; 1882 int error = 0; 1883 1884 dp = arg; 1885 periph = dp->d_drv1; 1886 softc = (struct da_softc *)periph->softc; 1887 secsize = softc->params.secsize; 1888 1889 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 1890 return (ENXIO); 1891 1892 memset(&csio, 0, sizeof(csio)); 1893 if (length > 0) { 1894 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1895 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1896 scsi_read_write(&csio, 1897 /*retries*/0, 1898 /*cbfcnp*/NULL, 1899 MSG_ORDERED_Q_TAG, 1900 /*read*/SCSI_RW_WRITE, 1901 /*byte2*/0, 1902 /*minimum_cmd_size*/ softc->minimum_cmd_size, 1903 offset / secsize, 1904 length / secsize, 1905 /*data_ptr*/(u_int8_t *) virtual, 1906 /*dxfer_len*/length, 1907 /*sense_len*/SSD_FULL_SIZE, 1908 da_default_timeout * 1000); 1909 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 1910 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1911 if (error != 0) 1912 printf("Aborting dump due to I/O error.\n"); 1913 return (error); 1914 } 1915 1916 /* 1917 * Sync the disk cache contents to the physical media. 1918 */ 1919 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 1920 1921 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1922 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1923 scsi_synchronize_cache(&csio, 1924 /*retries*/0, 1925 /*cbfcnp*/NULL, 1926 MSG_SIMPLE_Q_TAG, 1927 /*begin_lba*/0,/* Cover the whole disk */ 1928 /*lb_count*/0, 1929 SSD_FULL_SIZE, 1930 5 * 1000); 1931 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 1932 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1933 if (error != 0) 1934 xpt_print(periph->path, "Synchronize cache failed\n"); 1935 } 1936 return (error); 1937 } 1938 1939 static int 1940 dagetattr(struct bio *bp) 1941 { 1942 int ret; 1943 struct cam_periph *periph; 1944 1945 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1946 cam_periph_lock(periph); 1947 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 1948 periph->path); 1949 cam_periph_unlock(periph); 1950 if (ret == 0) 1951 bp->bio_completed = bp->bio_length; 1952 return ret; 1953 } 1954 1955 static void 1956 dainit(void) 1957 { 1958 cam_status status; 1959 1960 /* 1961 * Install a global async callback. This callback will 1962 * receive async callbacks like "new device found". 1963 */ 1964 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); 1965 1966 if (status != CAM_REQ_CMP) { 1967 printf("da: Failed to attach master async callback " 1968 "due to status 0x%x!\n", status); 1969 } else if (da_send_ordered) { 1970 1971 /* Register our shutdown event handler */ 1972 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 1973 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 1974 printf("dainit: shutdown event registration failed!\n"); 1975 } 1976 } 1977 1978 /* 1979 * Callback from GEOM, called when it has finished cleaning up its 1980 * resources. 1981 */ 1982 static void 1983 dadiskgonecb(struct disk *dp) 1984 { 1985 struct cam_periph *periph; 1986 1987 periph = (struct cam_periph *)dp->d_drv1; 1988 da_periph_release(periph, DA_REF_GEOM); 1989 } 1990 1991 static void 1992 daoninvalidate(struct cam_periph *periph) 1993 { 1994 struct da_softc *softc; 1995 1996 cam_periph_assert(periph, MA_OWNED); 1997 softc = (struct da_softc *)periph->softc; 1998 1999 /* 2000 * De-register any async callbacks. 2001 */ 2002 xpt_register_async(0, daasync, periph, periph->path); 2003 2004 softc->flags |= DA_FLAG_PACK_INVALID; 2005 #ifdef CAM_IO_STATS 2006 softc->invalidations++; 2007 #endif 2008 2009 /* 2010 * Return all queued I/O with ENXIO. 2011 * XXX Handle any transactions queued to the card 2012 * with XPT_ABORT_CCB. 2013 */ 2014 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); 2015 2016 /* 2017 * Tell GEOM that we've gone away, we'll get a callback when it is 2018 * done cleaning up its resources. 2019 */ 2020 disk_gone(softc->disk); 2021 } 2022 2023 static void 2024 dacleanup(struct cam_periph *periph) 2025 { 2026 struct da_softc *softc; 2027 2028 softc = (struct da_softc *)periph->softc; 2029 2030 cam_periph_unlock(periph); 2031 2032 cam_iosched_fini(softc->cam_iosched); 2033 2034 /* 2035 * If we can't free the sysctl tree, oh well... 2036 */ 2037 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { 2038 #ifdef CAM_IO_STATS 2039 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) 2040 xpt_print(periph->path, 2041 "can't remove sysctl stats context\n"); 2042 #endif 2043 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) 2044 xpt_print(periph->path, 2045 "can't remove sysctl context\n"); 2046 } 2047 2048 callout_drain(&softc->mediapoll_c); 2049 disk_destroy(softc->disk); 2050 callout_drain(&softc->sendordered_c); 2051 free(softc, M_DEVBUF); 2052 cam_periph_lock(periph); 2053 } 2054 2055 static void 2056 daasync(void *callback_arg, u_int32_t code, 2057 struct cam_path *path, void *arg) 2058 { 2059 struct cam_periph *periph; 2060 struct da_softc *softc; 2061 2062 periph = (struct cam_periph *)callback_arg; 2063 switch (code) { 2064 case AC_FOUND_DEVICE: /* callback to create periph, no locking yet */ 2065 { 2066 struct ccb_getdev *cgd; 2067 cam_status status; 2068 2069 cgd = (struct ccb_getdev *)arg; 2070 if (cgd == NULL) 2071 break; 2072 2073 if (cgd->protocol != PROTO_SCSI) 2074 break; 2075 if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) 2076 break; 2077 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 2078 && SID_TYPE(&cgd->inq_data) != T_RBC 2079 && SID_TYPE(&cgd->inq_data) != T_OPTICAL 2080 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) 2081 break; 2082 2083 /* 2084 * Allocate a peripheral instance for 2085 * this device and start the probe 2086 * process. 2087 */ 2088 status = cam_periph_alloc(daregister, daoninvalidate, 2089 dacleanup, dastart, 2090 "da", CAM_PERIPH_BIO, 2091 path, daasync, 2092 AC_FOUND_DEVICE, cgd); 2093 2094 if (status != CAM_REQ_CMP 2095 && status != CAM_REQ_INPROG) 2096 printf("daasync: Unable to attach to new device " 2097 "due to status 0x%x\n", status); 2098 return; 2099 } 2100 case AC_ADVINFO_CHANGED: /* Doesn't touch periph */ 2101 { 2102 uintptr_t buftype; 2103 2104 buftype = (uintptr_t)arg; 2105 if (buftype == CDAI_TYPE_PHYS_PATH) { 2106 struct da_softc *softc; 2107 2108 softc = periph->softc; 2109 disk_attr_changed(softc->disk, "GEOM::physpath", 2110 M_NOWAIT); 2111 } 2112 break; 2113 } 2114 case AC_UNIT_ATTENTION: 2115 { 2116 union ccb *ccb; 2117 int error_code, sense_key, asc, ascq; 2118 2119 softc = (struct da_softc *)periph->softc; 2120 ccb = (union ccb *)arg; 2121 2122 /* 2123 * Handle all UNIT ATTENTIONs except our own, as they will be 2124 * handled by daerror(). Since this comes from a different periph, 2125 * that periph's lock is held, not ours, so we have to take it ours 2126 * out to touch softc flags. 2127 */ 2128 if (xpt_path_periph(ccb->ccb_h.path) != periph && 2129 scsi_extract_sense_ccb(ccb, 2130 &error_code, &sense_key, &asc, &ascq)) { 2131 if (asc == 0x2A && ascq == 0x09) { 2132 xpt_print(ccb->ccb_h.path, 2133 "Capacity data has changed\n"); 2134 cam_periph_lock(periph); 2135 softc->flags &= ~DA_FLAG_PROBED; 2136 dareprobe(periph); 2137 cam_periph_unlock(periph); 2138 } else if (asc == 0x28 && ascq == 0x00) { 2139 cam_periph_lock(periph); 2140 softc->flags &= ~DA_FLAG_PROBED; 2141 cam_periph_unlock(periph); 2142 disk_media_changed(softc->disk, M_NOWAIT); 2143 } else if (asc == 0x3F && ascq == 0x03) { 2144 xpt_print(ccb->ccb_h.path, 2145 "INQUIRY data has changed\n"); 2146 cam_periph_lock(periph); 2147 softc->flags &= ~DA_FLAG_PROBED; 2148 dareprobe(periph); 2149 cam_periph_unlock(periph); 2150 } 2151 } 2152 break; 2153 } 2154 case AC_SCSI_AEN: /* Called for this path: periph locked */ 2155 /* 2156 * Appears to be currently unused for SCSI devices, only ata SIMs 2157 * generate this. 2158 */ 2159 cam_periph_assert(periph, MA_OWNED); 2160 softc = (struct da_softc *)periph->softc; 2161 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && 2162 (softc->flags & DA_FLAG_TUR_PENDING) == 0) { 2163 if (da_periph_acquire(periph, DA_REF_TUR) == 0) { 2164 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 2165 daschedule(periph); 2166 } 2167 } 2168 /* FALLTHROUGH */ 2169 case AC_SENT_BDR: /* Called for this path: periph locked */ 2170 case AC_BUS_RESET: /* Called for this path: periph locked */ 2171 { 2172 struct ccb_hdr *ccbh; 2173 2174 cam_periph_assert(periph, MA_OWNED); 2175 softc = (struct da_softc *)periph->softc; 2176 /* 2177 * Don't fail on the expected unit attention 2178 * that will occur. 2179 */ 2180 softc->flags |= DA_FLAG_RETRY_UA; 2181 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 2182 ccbh->ccb_state |= DA_CCB_RETRY_UA; 2183 break; 2184 } 2185 case AC_INQ_CHANGED: /* Called for this path: periph locked */ 2186 cam_periph_assert(periph, MA_OWNED); 2187 softc = (struct da_softc *)periph->softc; 2188 softc->flags &= ~DA_FLAG_PROBED; 2189 dareprobe(periph); 2190 break; 2191 default: 2192 break; 2193 } 2194 cam_periph_async(periph, code, path, arg); 2195 } 2196 2197 static void 2198 dasysctlinit(void *context, int pending) 2199 { 2200 struct cam_periph *periph; 2201 struct da_softc *softc; 2202 char tmpstr[32], tmpstr2[16]; 2203 struct ccb_trans_settings cts; 2204 2205 periph = (struct cam_periph *)context; 2206 /* 2207 * periph was held for us when this task was enqueued 2208 */ 2209 if (periph->flags & CAM_PERIPH_INVALID) { 2210 da_periph_release(periph, DA_REF_SYSCTL); 2211 return; 2212 } 2213 2214 softc = (struct da_softc *)periph->softc; 2215 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); 2216 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 2217 2218 sysctl_ctx_init(&softc->sysctl_ctx); 2219 cam_periph_lock(periph); 2220 softc->flags |= DA_FLAG_SCTX_INIT; 2221 cam_periph_unlock(periph); 2222 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, 2223 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, 2224 CTLFLAG_RD, 0, tmpstr, "device_index"); 2225 if (softc->sysctl_tree == NULL) { 2226 printf("dasysctlinit: unable to allocate sysctl tree\n"); 2227 da_periph_release(periph, DA_REF_SYSCTL); 2228 return; 2229 } 2230 2231 /* 2232 * Now register the sysctl handler, so the user can change the value on 2233 * the fly. 2234 */ 2235 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2236 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, 2237 softc, 0, dadeletemethodsysctl, "A", 2238 "BIO_DELETE execution method"); 2239 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2240 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, 2241 softc, 0, dadeletemaxsysctl, "Q", 2242 "Maximum BIO_DELETE size"); 2243 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2244 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, 2245 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", 2246 "Minimum CDB size"); 2247 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2248 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2249 "trim_count", CTLFLAG_RD, &softc->trim_count, 2250 "Total number of unmap/dsm commands sent"); 2251 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2252 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2253 "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, 2254 "Total number of ranges in unmap/dsm commands"); 2255 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2256 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2257 "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, 2258 "Total lbas in the unmap/dsm commands sent"); 2259 2260 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2261 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, 2262 softc, 0, dazonemodesysctl, "A", 2263 "Zone Mode"); 2264 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2265 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, 2266 softc, 0, dazonesupsysctl, "A", 2267 "Zone Support"); 2268 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2269 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2270 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, 2271 "Optimal Number of Open Sequential Write Preferred Zones"); 2272 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2273 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2274 "optimal_nonseq_zones", CTLFLAG_RD, 2275 &softc->optimal_nonseq_zones, 2276 "Optimal Number of Non-Sequentially Written Sequential Write " 2277 "Preferred Zones"); 2278 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2279 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2280 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, 2281 "Maximum Number of Open Sequential Write Required Zones"); 2282 2283 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2284 SYSCTL_CHILDREN(softc->sysctl_tree), 2285 OID_AUTO, 2286 "error_inject", 2287 CTLFLAG_RW, 2288 &softc->error_inject, 2289 0, 2290 "error_inject leaf"); 2291 2292 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2293 SYSCTL_CHILDREN(softc->sysctl_tree), 2294 OID_AUTO, 2295 "unmapped_io", 2296 CTLFLAG_RD, 2297 &softc->unmappedio, 2298 0, 2299 "Unmapped I/O support"); 2300 2301 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2302 SYSCTL_CHILDREN(softc->sysctl_tree), 2303 OID_AUTO, 2304 "rotating", 2305 CTLFLAG_RD, 2306 &softc->rotating, 2307 0, 2308 "Rotating media"); 2309 2310 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2311 SYSCTL_CHILDREN(softc->sysctl_tree), 2312 OID_AUTO, 2313 "p_type", 2314 CTLFLAG_RD, 2315 &softc->p_type, 2316 0, 2317 "DIF protection type"); 2318 2319 #ifdef CAM_TEST_FAILURE 2320 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2321 OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 2322 periph, 0, cam_periph_invalidate_sysctl, "I", 2323 "Write 1 to invalidate the drive immediately"); 2324 #endif 2325 2326 /* 2327 * Add some addressing info. 2328 */ 2329 memset(&cts, 0, sizeof (cts)); 2330 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 2331 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 2332 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2333 cam_periph_lock(periph); 2334 xpt_action((union ccb *)&cts); 2335 cam_periph_unlock(periph); 2336 if (cts.ccb_h.status != CAM_REQ_CMP) { 2337 da_periph_release(periph, DA_REF_SYSCTL); 2338 return; 2339 } 2340 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { 2341 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 2342 if (fc->valid & CTS_FC_VALID_WWPN) { 2343 softc->wwpn = fc->wwpn; 2344 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2345 SYSCTL_CHILDREN(softc->sysctl_tree), 2346 OID_AUTO, "wwpn", CTLFLAG_RD, 2347 &softc->wwpn, "World Wide Port Name"); 2348 } 2349 } 2350 2351 #ifdef CAM_IO_STATS 2352 /* 2353 * Now add some useful stats. 2354 * XXX These should live in cam_periph and be common to all periphs 2355 */ 2356 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, 2357 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", 2358 CTLFLAG_RD, 0, "Statistics"); 2359 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2360 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2361 OID_AUTO, 2362 "errors", 2363 CTLFLAG_RD, 2364 &softc->errors, 2365 0, 2366 "Transport errors reported by the SIM"); 2367 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2368 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2369 OID_AUTO, 2370 "timeouts", 2371 CTLFLAG_RD, 2372 &softc->timeouts, 2373 0, 2374 "Device timeouts reported by the SIM"); 2375 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2376 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2377 OID_AUTO, 2378 "pack_invalidations", 2379 CTLFLAG_RD, 2380 &softc->invalidations, 2381 0, 2382 "Device pack invalidations"); 2383 #endif 2384 2385 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, 2386 softc->sysctl_tree); 2387 2388 da_periph_release(periph, DA_REF_SYSCTL); 2389 } 2390 2391 static int 2392 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) 2393 { 2394 int error; 2395 uint64_t value; 2396 struct da_softc *softc; 2397 2398 softc = (struct da_softc *)arg1; 2399 2400 value = softc->disk->d_delmaxsize; 2401 error = sysctl_handle_64(oidp, &value, 0, req); 2402 if ((error != 0) || (req->newptr == NULL)) 2403 return (error); 2404 2405 /* only accept values smaller than the calculated value */ 2406 if (value > dadeletemaxsize(softc, softc->delete_method)) { 2407 return (EINVAL); 2408 } 2409 softc->disk->d_delmaxsize = value; 2410 2411 return (0); 2412 } 2413 2414 static int 2415 dacmdsizesysctl(SYSCTL_HANDLER_ARGS) 2416 { 2417 int error, value; 2418 2419 value = *(int *)arg1; 2420 2421 error = sysctl_handle_int(oidp, &value, 0, req); 2422 2423 if ((error != 0) 2424 || (req->newptr == NULL)) 2425 return (error); 2426 2427 /* 2428 * Acceptable values here are 6, 10, 12 or 16. 2429 */ 2430 if (value < 6) 2431 value = 6; 2432 else if ((value > 6) 2433 && (value <= 10)) 2434 value = 10; 2435 else if ((value > 10) 2436 && (value <= 12)) 2437 value = 12; 2438 else if (value > 12) 2439 value = 16; 2440 2441 *(int *)arg1 = value; 2442 2443 return (0); 2444 } 2445 2446 static int 2447 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) 2448 { 2449 sbintime_t value; 2450 int error; 2451 2452 value = da_default_softtimeout / SBT_1MS; 2453 2454 error = sysctl_handle_int(oidp, (int *)&value, 0, req); 2455 if ((error != 0) || (req->newptr == NULL)) 2456 return (error); 2457 2458 /* XXX Should clip this to a reasonable level */ 2459 if (value > da_default_timeout * 1000) 2460 return (EINVAL); 2461 2462 da_default_softtimeout = value * SBT_1MS; 2463 return (0); 2464 } 2465 2466 static void 2467 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) 2468 { 2469 2470 softc->delete_method = delete_method; 2471 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); 2472 softc->delete_func = da_delete_functions[delete_method]; 2473 2474 if (softc->delete_method > DA_DELETE_DISABLE) 2475 softc->disk->d_flags |= DISKFLAG_CANDELETE; 2476 else 2477 softc->disk->d_flags &= ~DISKFLAG_CANDELETE; 2478 } 2479 2480 static off_t 2481 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) 2482 { 2483 off_t sectors; 2484 2485 switch(delete_method) { 2486 case DA_DELETE_UNMAP: 2487 sectors = (off_t)softc->unmap_max_lba; 2488 break; 2489 case DA_DELETE_ATA_TRIM: 2490 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; 2491 break; 2492 case DA_DELETE_WS16: 2493 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); 2494 break; 2495 case DA_DELETE_ZERO: 2496 case DA_DELETE_WS10: 2497 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); 2498 break; 2499 default: 2500 return 0; 2501 } 2502 2503 return (off_t)softc->params.secsize * 2504 omin(sectors, softc->params.sectors); 2505 } 2506 2507 static void 2508 daprobedone(struct cam_periph *periph, union ccb *ccb) 2509 { 2510 struct da_softc *softc; 2511 2512 softc = (struct da_softc *)periph->softc; 2513 2514 cam_periph_assert(periph, MA_OWNED); 2515 2516 dadeletemethodchoose(softc, DA_DELETE_NONE); 2517 2518 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2519 char buf[80]; 2520 int i, sep; 2521 2522 snprintf(buf, sizeof(buf), "Delete methods: <"); 2523 sep = 0; 2524 for (i = 0; i <= DA_DELETE_MAX; i++) { 2525 if ((softc->delete_available & (1 << i)) == 0 && 2526 i != softc->delete_method) 2527 continue; 2528 if (sep) 2529 strlcat(buf, ",", sizeof(buf)); 2530 strlcat(buf, da_delete_method_names[i], 2531 sizeof(buf)); 2532 if (i == softc->delete_method) 2533 strlcat(buf, "(*)", sizeof(buf)); 2534 sep = 1; 2535 } 2536 strlcat(buf, ">", sizeof(buf)); 2537 printf("%s%d: %s\n", periph->periph_name, 2538 periph->unit_number, buf); 2539 } 2540 if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 && 2541 (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2542 printf("%s%d: Write Protected\n", periph->periph_name, 2543 periph->unit_number); 2544 } 2545 2546 /* 2547 * Since our peripheral may be invalidated by an error 2548 * above or an external event, we must release our CCB 2549 * before releasing the probe lock on the peripheral. 2550 * The peripheral will only go away once the last lock 2551 * is removed, and we need it around for the CCB release 2552 * operation. 2553 */ 2554 xpt_release_ccb(ccb); 2555 softc->state = DA_STATE_NORMAL; 2556 softc->flags |= DA_FLAG_PROBED; 2557 daschedule(periph); 2558 wakeup(&softc->disk->d_mediasize); 2559 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2560 softc->flags |= DA_FLAG_ANNOUNCED; 2561 da_periph_unhold(periph, DA_REF_PROBE_HOLD); 2562 } else 2563 da_periph_release_locked(periph, DA_REF_REPROBE); 2564 } 2565 2566 static void 2567 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) 2568 { 2569 int i, methods; 2570 2571 /* If available, prefer the method requested by user. */ 2572 i = softc->delete_method_pref; 2573 methods = softc->delete_available | (1 << DA_DELETE_DISABLE); 2574 if (methods & (1 << i)) { 2575 dadeletemethodset(softc, i); 2576 return; 2577 } 2578 2579 /* Use the pre-defined order to choose the best performing delete. */ 2580 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 2581 if (i == DA_DELETE_ZERO) 2582 continue; 2583 if (softc->delete_available & (1 << i)) { 2584 dadeletemethodset(softc, i); 2585 return; 2586 } 2587 } 2588 2589 /* Fallback to default. */ 2590 dadeletemethodset(softc, default_method); 2591 } 2592 2593 static int 2594 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) 2595 { 2596 char buf[16]; 2597 const char *p; 2598 struct da_softc *softc; 2599 int i, error, value; 2600 2601 softc = (struct da_softc *)arg1; 2602 2603 value = softc->delete_method; 2604 if (value < 0 || value > DA_DELETE_MAX) 2605 p = "UNKNOWN"; 2606 else 2607 p = da_delete_method_names[value]; 2608 strncpy(buf, p, sizeof(buf)); 2609 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 2610 if (error != 0 || req->newptr == NULL) 2611 return (error); 2612 for (i = 0; i <= DA_DELETE_MAX; i++) { 2613 if (strcmp(buf, da_delete_method_names[i]) == 0) 2614 break; 2615 } 2616 if (i > DA_DELETE_MAX) 2617 return (EINVAL); 2618 softc->delete_method_pref = i; 2619 dadeletemethodchoose(softc, DA_DELETE_NONE); 2620 return (0); 2621 } 2622 2623 static int 2624 dazonemodesysctl(SYSCTL_HANDLER_ARGS) 2625 { 2626 char tmpbuf[40]; 2627 struct da_softc *softc; 2628 int error; 2629 2630 softc = (struct da_softc *)arg1; 2631 2632 switch (softc->zone_mode) { 2633 case DA_ZONE_DRIVE_MANAGED: 2634 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); 2635 break; 2636 case DA_ZONE_HOST_AWARE: 2637 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); 2638 break; 2639 case DA_ZONE_HOST_MANAGED: 2640 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); 2641 break; 2642 case DA_ZONE_NONE: 2643 default: 2644 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); 2645 break; 2646 } 2647 2648 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); 2649 2650 return (error); 2651 } 2652 2653 static int 2654 dazonesupsysctl(SYSCTL_HANDLER_ARGS) 2655 { 2656 char tmpbuf[180]; 2657 struct da_softc *softc; 2658 struct sbuf sb; 2659 int error, first; 2660 unsigned int i; 2661 2662 softc = (struct da_softc *)arg1; 2663 2664 error = 0; 2665 first = 1; 2666 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); 2667 2668 for (i = 0; i < sizeof(da_zone_desc_table) / 2669 sizeof(da_zone_desc_table[0]); i++) { 2670 if (softc->zone_flags & da_zone_desc_table[i].value) { 2671 if (first == 0) 2672 sbuf_printf(&sb, ", "); 2673 else 2674 first = 0; 2675 sbuf_cat(&sb, da_zone_desc_table[i].desc); 2676 } 2677 } 2678 2679 if (first == 1) 2680 sbuf_printf(&sb, "None"); 2681 2682 sbuf_finish(&sb); 2683 2684 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 2685 2686 return (error); 2687 } 2688 2689 static cam_status 2690 daregister(struct cam_periph *periph, void *arg) 2691 { 2692 struct da_softc *softc; 2693 struct ccb_pathinq cpi; 2694 struct ccb_getdev *cgd; 2695 char tmpstr[80]; 2696 caddr_t match; 2697 int quirks; 2698 2699 cgd = (struct ccb_getdev *)arg; 2700 if (cgd == NULL) { 2701 printf("daregister: no getdev CCB, can't register device\n"); 2702 return(CAM_REQ_CMP_ERR); 2703 } 2704 2705 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, 2706 M_NOWAIT|M_ZERO); 2707 2708 if (softc == NULL) { 2709 printf("daregister: Unable to probe new device. " 2710 "Unable to allocate softc\n"); 2711 return(CAM_REQ_CMP_ERR); 2712 } 2713 2714 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { 2715 printf("daregister: Unable to probe new device. " 2716 "Unable to allocate iosched memory\n"); 2717 free(softc, M_DEVBUF); 2718 return(CAM_REQ_CMP_ERR); 2719 } 2720 2721 LIST_INIT(&softc->pending_ccbs); 2722 softc->state = DA_STATE_PROBE_WP; 2723 bioq_init(&softc->delete_run_queue); 2724 if (SID_IS_REMOVABLE(&cgd->inq_data)) 2725 softc->flags |= DA_FLAG_PACK_REMOVABLE; 2726 softc->unmap_max_ranges = UNMAP_MAX_RANGES; 2727 softc->unmap_max_lba = UNMAP_RANGE_MAX; 2728 softc->unmap_gran = 0; 2729 softc->unmap_gran_align = 0; 2730 softc->ws_max_blks = WS16_MAX_BLKS; 2731 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; 2732 softc->rotating = 1; 2733 2734 periph->softc = softc; 2735 2736 /* 2737 * See if this device has any quirks. 2738 */ 2739 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 2740 (caddr_t)da_quirk_table, 2741 nitems(da_quirk_table), 2742 sizeof(*da_quirk_table), scsi_inquiry_match); 2743 2744 if (match != NULL) 2745 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 2746 else 2747 softc->quirks = DA_Q_NONE; 2748 2749 /* Check if the SIM does not want 6 byte commands */ 2750 xpt_path_inq(&cpi, periph->path); 2751 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) 2752 softc->quirks |= DA_Q_NO_6_BYTE; 2753 2754 /* Override quirks if tunable is set */ 2755 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks", 2756 periph->unit_number); 2757 quirks = softc->quirks; 2758 TUNABLE_INT_FETCH(tmpstr, &quirks); 2759 softc->quirks = quirks; 2760 2761 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) 2762 softc->zone_mode = DA_ZONE_HOST_MANAGED; 2763 else if (softc->quirks & DA_Q_SMR_DM) 2764 softc->zone_mode = DA_ZONE_DRIVE_MANAGED; 2765 else 2766 softc->zone_mode = DA_ZONE_NONE; 2767 2768 if (softc->zone_mode != DA_ZONE_NONE) { 2769 if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 2770 if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) 2771 softc->zone_interface = DA_ZONE_IF_ATA_SAT; 2772 else 2773 softc->zone_interface = DA_ZONE_IF_ATA_PASS; 2774 } else 2775 softc->zone_interface = DA_ZONE_IF_SCSI; 2776 } 2777 2778 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); 2779 2780 /* 2781 * Take an exclusive section lock qon the periph while dastart is called 2782 * to finish the probe. The lock will be dropped in dadone at the end 2783 * of probe. This locks out daopen and daclose from racing with the 2784 * probe. 2785 * 2786 * XXX if cam_periph_hold returns an error, we don't hold a refcount. 2787 */ 2788 (void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD); 2789 2790 /* 2791 * Schedule a periodic event to occasionally send an 2792 * ordered tag to a device. 2793 */ 2794 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); 2795 callout_reset(&softc->sendordered_c, 2796 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 2797 dasendorderedtag, periph); 2798 2799 cam_periph_unlock(periph); 2800 /* 2801 * RBC devices don't have to support READ(6), only READ(10). 2802 */ 2803 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) 2804 softc->minimum_cmd_size = 10; 2805 else 2806 softc->minimum_cmd_size = 6; 2807 2808 /* 2809 * Load the user's default, if any. 2810 */ 2811 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", 2812 periph->unit_number); 2813 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); 2814 2815 /* 2816 * 6, 10, 12 and 16 are the currently permissible values. 2817 */ 2818 if (softc->minimum_cmd_size > 12) 2819 softc->minimum_cmd_size = 16; 2820 else if (softc->minimum_cmd_size > 10) 2821 softc->minimum_cmd_size = 12; 2822 else if (softc->minimum_cmd_size > 6) 2823 softc->minimum_cmd_size = 10; 2824 else 2825 softc->minimum_cmd_size = 6; 2826 2827 /* Predict whether device may support READ CAPACITY(16). */ 2828 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && 2829 (softc->quirks & DA_Q_NO_RC16) == 0) { 2830 softc->flags |= DA_FLAG_CAN_RC16; 2831 } 2832 2833 /* 2834 * Register this media as a disk. 2835 */ 2836 softc->disk = disk_alloc(); 2837 softc->disk->d_devstat = devstat_new_entry(periph->periph_name, 2838 periph->unit_number, 0, 2839 DEVSTAT_BS_UNAVAILABLE, 2840 SID_TYPE(&cgd->inq_data) | 2841 XPORT_DEVSTAT_TYPE(cpi.transport), 2842 DEVSTAT_PRIORITY_DISK); 2843 softc->disk->d_open = daopen; 2844 softc->disk->d_close = daclose; 2845 softc->disk->d_strategy = dastrategy; 2846 softc->disk->d_dump = dadump; 2847 softc->disk->d_getattr = dagetattr; 2848 softc->disk->d_gone = dadiskgonecb; 2849 softc->disk->d_name = "da"; 2850 softc->disk->d_drv1 = periph; 2851 if (cpi.maxio == 0) 2852 softc->maxio = DFLTPHYS; /* traditional default */ 2853 else if (cpi.maxio > MAXPHYS) 2854 softc->maxio = MAXPHYS; /* for safety */ 2855 else 2856 softc->maxio = cpi.maxio; 2857 if (softc->quirks & DA_Q_128KB) 2858 softc->maxio = min(softc->maxio, 128 * 1024); 2859 softc->disk->d_maxsize = softc->maxio; 2860 softc->disk->d_unit = periph->unit_number; 2861 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; 2862 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) 2863 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 2864 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { 2865 softc->unmappedio = 1; 2866 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 2867 } 2868 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, 2869 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); 2870 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); 2871 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], 2872 cgd->inq_data.product, sizeof(cgd->inq_data.product), 2873 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); 2874 softc->disk->d_hba_vendor = cpi.hba_vendor; 2875 softc->disk->d_hba_device = cpi.hba_device; 2876 softc->disk->d_hba_subvendor = cpi.hba_subvendor; 2877 softc->disk->d_hba_subdevice = cpi.hba_subdevice; 2878 snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), 2879 "%s%d", cpi.dev_name, cpi.unit_number); 2880 2881 /* 2882 * Acquire a reference to the periph before we register with GEOM. 2883 * We'll release this reference once GEOM calls us back (via 2884 * dadiskgonecb()) telling us that our provider has been freed. 2885 */ 2886 if (da_periph_acquire(periph, DA_REF_GEOM) != 0) { 2887 xpt_print(periph->path, "%s: lost periph during " 2888 "registration!\n", __func__); 2889 cam_periph_lock(periph); 2890 return (CAM_REQ_CMP_ERR); 2891 } 2892 2893 disk_create(softc->disk, DISK_VERSION); 2894 cam_periph_lock(periph); 2895 2896 /* 2897 * Add async callbacks for events of interest. 2898 * I don't bother checking if this fails as, 2899 * in most cases, the system will function just 2900 * fine without them and the only alternative 2901 * would be to not attach the device on failure. 2902 */ 2903 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | 2904 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | 2905 AC_INQ_CHANGED, daasync, periph, periph->path); 2906 2907 /* 2908 * Emit an attribute changed notification just in case 2909 * physical path information arrived before our async 2910 * event handler was registered, but after anyone attaching 2911 * to our disk device polled it. 2912 */ 2913 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); 2914 2915 /* 2916 * Schedule a periodic media polling events. 2917 */ 2918 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); 2919 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && 2920 (cgd->inq_flags & SID_AEN) == 0 && 2921 da_poll_period != 0) 2922 callout_reset(&softc->mediapoll_c, da_poll_period * hz, 2923 damediapoll, periph); 2924 2925 xpt_schedule(periph, CAM_PRIORITY_DEV); 2926 2927 return(CAM_REQ_CMP); 2928 } 2929 2930 static int 2931 da_zone_bio_to_scsi(int disk_zone_cmd) 2932 { 2933 switch (disk_zone_cmd) { 2934 case DISK_ZONE_OPEN: 2935 return ZBC_OUT_SA_OPEN; 2936 case DISK_ZONE_CLOSE: 2937 return ZBC_OUT_SA_CLOSE; 2938 case DISK_ZONE_FINISH: 2939 return ZBC_OUT_SA_FINISH; 2940 case DISK_ZONE_RWP: 2941 return ZBC_OUT_SA_RWP; 2942 } 2943 2944 return -1; 2945 } 2946 2947 static int 2948 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, 2949 int *queue_ccb) 2950 { 2951 struct da_softc *softc; 2952 int error; 2953 2954 error = 0; 2955 2956 if (bp->bio_cmd != BIO_ZONE) { 2957 error = EINVAL; 2958 goto bailout; 2959 } 2960 2961 softc = periph->softc; 2962 2963 switch (bp->bio_zone.zone_cmd) { 2964 case DISK_ZONE_OPEN: 2965 case DISK_ZONE_CLOSE: 2966 case DISK_ZONE_FINISH: 2967 case DISK_ZONE_RWP: { 2968 int zone_flags; 2969 int zone_sa; 2970 uint64_t lba; 2971 2972 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); 2973 if (zone_sa == -1) { 2974 xpt_print(periph->path, "Cannot translate zone " 2975 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); 2976 error = EINVAL; 2977 goto bailout; 2978 } 2979 2980 zone_flags = 0; 2981 lba = bp->bio_zone.zone_params.rwp.id; 2982 2983 if (bp->bio_zone.zone_params.rwp.flags & 2984 DISK_ZONE_RWP_FLAG_ALL) 2985 zone_flags |= ZBC_OUT_ALL; 2986 2987 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 2988 scsi_zbc_out(&ccb->csio, 2989 /*retries*/ da_retry_count, 2990 /*cbfcnp*/ dadone, 2991 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2992 /*service_action*/ zone_sa, 2993 /*zone_id*/ lba, 2994 /*zone_flags*/ zone_flags, 2995 /*data_ptr*/ NULL, 2996 /*dxfer_len*/ 0, 2997 /*sense_len*/ SSD_FULL_SIZE, 2998 /*timeout*/ da_default_timeout * 1000); 2999 } else { 3000 /* 3001 * Note that in this case, even though we can 3002 * technically use NCQ, we don't bother for several 3003 * reasons: 3004 * 1. It hasn't been tested on a SAT layer that 3005 * supports it. This is new as of SAT-4. 3006 * 2. Even when there is a SAT layer that supports 3007 * it, that SAT layer will also probably support 3008 * ZBC -> ZAC translation, since they are both 3009 * in the SAT-4 spec. 3010 * 3. Translation will likely be preferable to ATA 3011 * passthrough. LSI / Avago at least single 3012 * steps ATA passthrough commands in the HBA, 3013 * regardless of protocol, so unless that 3014 * changes, there is a performance penalty for 3015 * doing ATA passthrough no matter whether 3016 * you're using NCQ/FPDMA, DMA or PIO. 3017 * 4. It requires a 32-byte CDB, which at least at 3018 * this point in CAM requires a CDB pointer, which 3019 * would require us to allocate an additional bit 3020 * of storage separate from the CCB. 3021 */ 3022 error = scsi_ata_zac_mgmt_out(&ccb->csio, 3023 /*retries*/ da_retry_count, 3024 /*cbfcnp*/ dadone, 3025 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3026 /*use_ncq*/ 0, 3027 /*zm_action*/ zone_sa, 3028 /*zone_id*/ lba, 3029 /*zone_flags*/ zone_flags, 3030 /*data_ptr*/ NULL, 3031 /*dxfer_len*/ 0, 3032 /*cdb_storage*/ NULL, 3033 /*cdb_storage_len*/ 0, 3034 /*sense_len*/ SSD_FULL_SIZE, 3035 /*timeout*/ da_default_timeout * 1000); 3036 if (error != 0) { 3037 error = EINVAL; 3038 xpt_print(periph->path, 3039 "scsi_ata_zac_mgmt_out() returned an " 3040 "error!"); 3041 goto bailout; 3042 } 3043 } 3044 *queue_ccb = 1; 3045 3046 break; 3047 } 3048 case DISK_ZONE_REPORT_ZONES: { 3049 uint8_t *rz_ptr; 3050 uint32_t num_entries, alloc_size; 3051 struct disk_zone_report *rep; 3052 3053 rep = &bp->bio_zone.zone_params.report; 3054 3055 num_entries = rep->entries_allocated; 3056 if (num_entries == 0) { 3057 xpt_print(periph->path, "No entries allocated for " 3058 "Report Zones request\n"); 3059 error = EINVAL; 3060 goto bailout; 3061 } 3062 alloc_size = sizeof(struct scsi_report_zones_hdr) + 3063 (sizeof(struct scsi_report_zones_desc) * num_entries); 3064 alloc_size = min(alloc_size, softc->disk->d_maxsize); 3065 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); 3066 if (rz_ptr == NULL) { 3067 xpt_print(periph->path, "Unable to allocate memory " 3068 "for Report Zones request\n"); 3069 error = ENOMEM; 3070 goto bailout; 3071 } 3072 3073 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 3074 scsi_zbc_in(&ccb->csio, 3075 /*retries*/ da_retry_count, 3076 /*cbcfnp*/ dadone, 3077 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3078 /*service_action*/ ZBC_IN_SA_REPORT_ZONES, 3079 /*zone_start_lba*/ rep->starting_id, 3080 /*zone_options*/ rep->rep_options, 3081 /*data_ptr*/ rz_ptr, 3082 /*dxfer_len*/ alloc_size, 3083 /*sense_len*/ SSD_FULL_SIZE, 3084 /*timeout*/ da_default_timeout * 1000); 3085 } else { 3086 /* 3087 * Note that in this case, even though we can 3088 * technically use NCQ, we don't bother for several 3089 * reasons: 3090 * 1. It hasn't been tested on a SAT layer that 3091 * supports it. This is new as of SAT-4. 3092 * 2. Even when there is a SAT layer that supports 3093 * it, that SAT layer will also probably support 3094 * ZBC -> ZAC translation, since they are both 3095 * in the SAT-4 spec. 3096 * 3. Translation will likely be preferable to ATA 3097 * passthrough. LSI / Avago at least single 3098 * steps ATA passthrough commands in the HBA, 3099 * regardless of protocol, so unless that 3100 * changes, there is a performance penalty for 3101 * doing ATA passthrough no matter whether 3102 * you're using NCQ/FPDMA, DMA or PIO. 3103 * 4. It requires a 32-byte CDB, which at least at 3104 * this point in CAM requires a CDB pointer, which 3105 * would require us to allocate an additional bit 3106 * of storage separate from the CCB. 3107 */ 3108 error = scsi_ata_zac_mgmt_in(&ccb->csio, 3109 /*retries*/ da_retry_count, 3110 /*cbcfnp*/ dadone, 3111 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3112 /*use_ncq*/ 0, 3113 /*zm_action*/ ATA_ZM_REPORT_ZONES, 3114 /*zone_id*/ rep->starting_id, 3115 /*zone_flags*/ rep->rep_options, 3116 /*data_ptr*/ rz_ptr, 3117 /*dxfer_len*/ alloc_size, 3118 /*cdb_storage*/ NULL, 3119 /*cdb_storage_len*/ 0, 3120 /*sense_len*/ SSD_FULL_SIZE, 3121 /*timeout*/ da_default_timeout * 1000); 3122 if (error != 0) { 3123 error = EINVAL; 3124 xpt_print(periph->path, 3125 "scsi_ata_zac_mgmt_in() returned an " 3126 "error!"); 3127 goto bailout; 3128 } 3129 } 3130 3131 /* 3132 * For BIO_ZONE, this isn't normally needed. However, it 3133 * is used by devstat_end_transaction_bio() to determine 3134 * how much data was transferred. 3135 */ 3136 /* 3137 * XXX KDM we have a problem. But I'm not sure how to fix 3138 * it. devstat uses bio_bcount - bio_resid to calculate 3139 * the amount of data transferred. The GEOM disk code 3140 * uses bio_length - bio_resid to calculate the amount of 3141 * data in bio_completed. We have different structure 3142 * sizes above and below the ada(4) driver. So, if we 3143 * use the sizes above, the amount transferred won't be 3144 * quite accurate for devstat. If we use different sizes 3145 * for bio_bcount and bio_length (above and below 3146 * respectively), then the residual needs to match one or 3147 * the other. Everything is calculated after the bio 3148 * leaves the driver, so changing the values around isn't 3149 * really an option. For now, just set the count to the 3150 * passed in length. This means that the calculations 3151 * above (e.g. bio_completed) will be correct, but the 3152 * amount of data reported to devstat will be slightly 3153 * under or overstated. 3154 */ 3155 bp->bio_bcount = bp->bio_length; 3156 3157 *queue_ccb = 1; 3158 3159 break; 3160 } 3161 case DISK_ZONE_GET_PARAMS: { 3162 struct disk_zone_disk_params *params; 3163 3164 params = &bp->bio_zone.zone_params.disk_params; 3165 bzero(params, sizeof(*params)); 3166 3167 switch (softc->zone_mode) { 3168 case DA_ZONE_DRIVE_MANAGED: 3169 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; 3170 break; 3171 case DA_ZONE_HOST_AWARE: 3172 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; 3173 break; 3174 case DA_ZONE_HOST_MANAGED: 3175 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; 3176 break; 3177 default: 3178 case DA_ZONE_NONE: 3179 params->zone_mode = DISK_ZONE_MODE_NONE; 3180 break; 3181 } 3182 3183 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) 3184 params->flags |= DISK_ZONE_DISK_URSWRZ; 3185 3186 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { 3187 params->optimal_seq_zones = softc->optimal_seq_zones; 3188 params->flags |= DISK_ZONE_OPT_SEQ_SET; 3189 } 3190 3191 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { 3192 params->optimal_nonseq_zones = 3193 softc->optimal_nonseq_zones; 3194 params->flags |= DISK_ZONE_OPT_NONSEQ_SET; 3195 } 3196 3197 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { 3198 params->max_seq_zones = softc->max_seq_zones; 3199 params->flags |= DISK_ZONE_MAX_SEQ_SET; 3200 } 3201 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) 3202 params->flags |= DISK_ZONE_RZ_SUP; 3203 3204 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) 3205 params->flags |= DISK_ZONE_OPEN_SUP; 3206 3207 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) 3208 params->flags |= DISK_ZONE_CLOSE_SUP; 3209 3210 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) 3211 params->flags |= DISK_ZONE_FINISH_SUP; 3212 3213 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) 3214 params->flags |= DISK_ZONE_RWP_SUP; 3215 break; 3216 } 3217 default: 3218 break; 3219 } 3220 bailout: 3221 return (error); 3222 } 3223 3224 static void 3225 dastart(struct cam_periph *periph, union ccb *start_ccb) 3226 { 3227 struct da_softc *softc; 3228 3229 cam_periph_assert(periph, MA_OWNED); 3230 softc = (struct da_softc *)periph->softc; 3231 3232 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); 3233 3234 skipstate: 3235 switch (softc->state) { 3236 case DA_STATE_NORMAL: 3237 { 3238 struct bio *bp; 3239 uint8_t tag_code; 3240 3241 more: 3242 bp = cam_iosched_next_bio(softc->cam_iosched); 3243 if (bp == NULL) { 3244 if (cam_iosched_has_work_flags(softc->cam_iosched, 3245 DA_WORK_TUR)) { 3246 softc->flags |= DA_FLAG_TUR_PENDING; 3247 cam_iosched_clr_work_flags(softc->cam_iosched, 3248 DA_WORK_TUR); 3249 scsi_test_unit_ready(&start_ccb->csio, 3250 /*retries*/ da_retry_count, 3251 dadone_tur, 3252 MSG_SIMPLE_Q_TAG, 3253 SSD_FULL_SIZE, 3254 da_default_timeout * 1000); 3255 start_ccb->ccb_h.ccb_bp = NULL; 3256 start_ccb->ccb_h.ccb_state = DA_CCB_TUR; 3257 xpt_action(start_ccb); 3258 } else 3259 xpt_release_ccb(start_ccb); 3260 break; 3261 } 3262 3263 if (bp->bio_cmd == BIO_DELETE) { 3264 if (softc->delete_func != NULL) { 3265 softc->delete_func(periph, start_ccb, bp); 3266 goto out; 3267 } else { 3268 /* 3269 * Not sure this is possible, but failsafe by 3270 * lying and saying "sure, done." 3271 */ 3272 biofinish(bp, NULL, 0); 3273 goto more; 3274 } 3275 } 3276 3277 if (cam_iosched_has_work_flags(softc->cam_iosched, 3278 DA_WORK_TUR)) { 3279 cam_iosched_clr_work_flags(softc->cam_iosched, 3280 DA_WORK_TUR); 3281 da_periph_release_locked(periph, DA_REF_TUR); 3282 } 3283 3284 if ((bp->bio_flags & BIO_ORDERED) != 0 || 3285 (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 3286 softc->flags &= ~DA_FLAG_NEED_OTAG; 3287 softc->flags |= DA_FLAG_WAS_OTAG; 3288 tag_code = MSG_ORDERED_Q_TAG; 3289 } else { 3290 tag_code = MSG_SIMPLE_Q_TAG; 3291 } 3292 3293 switch (bp->bio_cmd) { 3294 case BIO_WRITE: 3295 case BIO_READ: 3296 { 3297 void *data_ptr; 3298 int rw_op; 3299 3300 biotrack(bp, __func__); 3301 3302 if (bp->bio_cmd == BIO_WRITE) { 3303 softc->flags |= DA_FLAG_DIRTY; 3304 rw_op = SCSI_RW_WRITE; 3305 } else { 3306 rw_op = SCSI_RW_READ; 3307 } 3308 3309 data_ptr = bp->bio_data; 3310 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { 3311 rw_op |= SCSI_RW_BIO; 3312 data_ptr = bp; 3313 } 3314 3315 scsi_read_write(&start_ccb->csio, 3316 /*retries*/da_retry_count, 3317 /*cbfcnp*/dadone, 3318 /*tag_action*/tag_code, 3319 rw_op, 3320 /*byte2*/0, 3321 softc->minimum_cmd_size, 3322 /*lba*/bp->bio_pblkno, 3323 /*block_count*/bp->bio_bcount / 3324 softc->params.secsize, 3325 data_ptr, 3326 /*dxfer_len*/ bp->bio_bcount, 3327 /*sense_len*/SSD_FULL_SIZE, 3328 da_default_timeout * 1000); 3329 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 3330 start_ccb->csio.bio = bp; 3331 #endif 3332 break; 3333 } 3334 case BIO_FLUSH: 3335 /* 3336 * If we don't support sync cache, or the disk 3337 * isn't dirty, FLUSH is a no-op. Use the 3338 * allocated CCB for the next bio if one is 3339 * available. 3340 */ 3341 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 || 3342 (softc->flags & DA_FLAG_DIRTY) == 0) { 3343 biodone(bp); 3344 goto skipstate; 3345 } 3346 3347 /* 3348 * BIO_FLUSH doesn't currently communicate 3349 * range data, so we synchronize the cache 3350 * over the whole disk. 3351 */ 3352 scsi_synchronize_cache(&start_ccb->csio, 3353 /*retries*/1, 3354 /*cbfcnp*/dadone, 3355 /*tag_action*/tag_code, 3356 /*begin_lba*/0, 3357 /*lb_count*/0, 3358 SSD_FULL_SIZE, 3359 da_default_timeout*1000); 3360 /* 3361 * Clear the dirty flag before sending the command. 3362 * Either this sync cache will be successful, or it 3363 * will fail after a retry. If it fails, it is 3364 * unlikely to be successful if retried later, so 3365 * we'll save ourselves time by just marking the 3366 * device clean. 3367 */ 3368 softc->flags &= ~DA_FLAG_DIRTY; 3369 break; 3370 case BIO_ZONE: { 3371 int error, queue_ccb; 3372 3373 queue_ccb = 0; 3374 3375 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); 3376 if ((error != 0) 3377 || (queue_ccb == 0)) { 3378 biofinish(bp, NULL, error); 3379 xpt_release_ccb(start_ccb); 3380 return; 3381 } 3382 break; 3383 } 3384 } 3385 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 3386 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 3387 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); 3388 3389 out: 3390 LIST_INSERT_HEAD(&softc->pending_ccbs, 3391 &start_ccb->ccb_h, periph_links.le); 3392 3393 /* We expect a unit attention from this device */ 3394 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 3395 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 3396 softc->flags &= ~DA_FLAG_RETRY_UA; 3397 } 3398 3399 start_ccb->ccb_h.ccb_bp = bp; 3400 softc->refcount++; 3401 cam_periph_unlock(periph); 3402 xpt_action(start_ccb); 3403 cam_periph_lock(periph); 3404 3405 /* May have more work to do, so ensure we stay scheduled */ 3406 daschedule(periph); 3407 break; 3408 } 3409 case DA_STATE_PROBE_WP: 3410 { 3411 void *mode_buf; 3412 int mode_buf_len; 3413 3414 if (da_disable_wp_detection) { 3415 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 3416 softc->state = DA_STATE_PROBE_RC16; 3417 else 3418 softc->state = DA_STATE_PROBE_RC; 3419 goto skipstate; 3420 } 3421 mode_buf_len = 192; 3422 mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT); 3423 if (mode_buf == NULL) { 3424 xpt_print(periph->path, "Unable to send mode sense - " 3425 "malloc failure\n"); 3426 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 3427 softc->state = DA_STATE_PROBE_RC16; 3428 else 3429 softc->state = DA_STATE_PROBE_RC; 3430 goto skipstate; 3431 } 3432 scsi_mode_sense_len(&start_ccb->csio, 3433 /*retries*/ da_retry_count, 3434 /*cbfcnp*/ dadone_probewp, 3435 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3436 /*dbd*/ FALSE, 3437 /*pc*/ SMS_PAGE_CTRL_CURRENT, 3438 /*page*/ SMS_ALL_PAGES_PAGE, 3439 /*param_buf*/ mode_buf, 3440 /*param_len*/ mode_buf_len, 3441 /*minimum_cmd_size*/ softc->minimum_cmd_size, 3442 /*sense_len*/ SSD_FULL_SIZE, 3443 /*timeout*/ da_default_timeout * 1000); 3444 start_ccb->ccb_h.ccb_bp = NULL; 3445 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP; 3446 xpt_action(start_ccb); 3447 break; 3448 } 3449 case DA_STATE_PROBE_RC: 3450 { 3451 struct scsi_read_capacity_data *rcap; 3452 3453 rcap = (struct scsi_read_capacity_data *) 3454 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); 3455 if (rcap == NULL) { 3456 printf("dastart: Couldn't malloc read_capacity data\n"); 3457 /* da_free_periph??? */ 3458 break; 3459 } 3460 scsi_read_capacity(&start_ccb->csio, 3461 /*retries*/da_retry_count, 3462 dadone_proberc, 3463 MSG_SIMPLE_Q_TAG, 3464 rcap, 3465 SSD_FULL_SIZE, 3466 /*timeout*/5000); 3467 start_ccb->ccb_h.ccb_bp = NULL; 3468 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; 3469 xpt_action(start_ccb); 3470 break; 3471 } 3472 case DA_STATE_PROBE_RC16: 3473 { 3474 struct scsi_read_capacity_data_long *rcaplong; 3475 3476 rcaplong = (struct scsi_read_capacity_data_long *) 3477 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); 3478 if (rcaplong == NULL) { 3479 printf("dastart: Couldn't malloc read_capacity data\n"); 3480 /* da_free_periph??? */ 3481 break; 3482 } 3483 scsi_read_capacity_16(&start_ccb->csio, 3484 /*retries*/ da_retry_count, 3485 /*cbfcnp*/ dadone_proberc, 3486 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3487 /*lba*/ 0, 3488 /*reladr*/ 0, 3489 /*pmi*/ 0, 3490 /*rcap_buf*/ (uint8_t *)rcaplong, 3491 /*rcap_buf_len*/ sizeof(*rcaplong), 3492 /*sense_len*/ SSD_FULL_SIZE, 3493 /*timeout*/ da_default_timeout * 1000); 3494 start_ccb->ccb_h.ccb_bp = NULL; 3495 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; 3496 xpt_action(start_ccb); 3497 break; 3498 } 3499 case DA_STATE_PROBE_LBP: 3500 { 3501 struct scsi_vpd_logical_block_prov *lbp; 3502 3503 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { 3504 /* 3505 * If we get here we don't support any SBC-3 delete 3506 * methods with UNMAP as the Logical Block Provisioning 3507 * VPD page support is required for devices which 3508 * support it according to T10/1799-D Revision 31 3509 * however older revisions of the spec don't mandate 3510 * this so we currently don't remove these methods 3511 * from the available set. 3512 */ 3513 softc->state = DA_STATE_PROBE_BLK_LIMITS; 3514 goto skipstate; 3515 } 3516 3517 lbp = (struct scsi_vpd_logical_block_prov *) 3518 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); 3519 3520 if (lbp == NULL) { 3521 printf("dastart: Couldn't malloc lbp data\n"); 3522 /* da_free_periph??? */ 3523 break; 3524 } 3525 3526 scsi_inquiry(&start_ccb->csio, 3527 /*retries*/da_retry_count, 3528 /*cbfcnp*/dadone_probelbp, 3529 /*tag_action*/MSG_SIMPLE_Q_TAG, 3530 /*inq_buf*/(u_int8_t *)lbp, 3531 /*inq_len*/sizeof(*lbp), 3532 /*evpd*/TRUE, 3533 /*page_code*/SVPD_LBP, 3534 /*sense_len*/SSD_MIN_SIZE, 3535 /*timeout*/da_default_timeout * 1000); 3536 start_ccb->ccb_h.ccb_bp = NULL; 3537 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; 3538 xpt_action(start_ccb); 3539 break; 3540 } 3541 case DA_STATE_PROBE_BLK_LIMITS: 3542 { 3543 struct scsi_vpd_block_limits *block_limits; 3544 3545 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { 3546 /* Not supported skip to next probe */ 3547 softc->state = DA_STATE_PROBE_BDC; 3548 goto skipstate; 3549 } 3550 3551 block_limits = (struct scsi_vpd_block_limits *) 3552 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); 3553 3554 if (block_limits == NULL) { 3555 printf("dastart: Couldn't malloc block_limits data\n"); 3556 /* da_free_periph??? */ 3557 break; 3558 } 3559 3560 scsi_inquiry(&start_ccb->csio, 3561 /*retries*/da_retry_count, 3562 /*cbfcnp*/dadone_probeblklimits, 3563 /*tag_action*/MSG_SIMPLE_Q_TAG, 3564 /*inq_buf*/(u_int8_t *)block_limits, 3565 /*inq_len*/sizeof(*block_limits), 3566 /*evpd*/TRUE, 3567 /*page_code*/SVPD_BLOCK_LIMITS, 3568 /*sense_len*/SSD_MIN_SIZE, 3569 /*timeout*/da_default_timeout * 1000); 3570 start_ccb->ccb_h.ccb_bp = NULL; 3571 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; 3572 xpt_action(start_ccb); 3573 break; 3574 } 3575 case DA_STATE_PROBE_BDC: 3576 { 3577 struct scsi_vpd_block_characteristics *bdc; 3578 3579 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { 3580 softc->state = DA_STATE_PROBE_ATA; 3581 goto skipstate; 3582 } 3583 3584 bdc = (struct scsi_vpd_block_characteristics *) 3585 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3586 3587 if (bdc == NULL) { 3588 printf("dastart: Couldn't malloc bdc data\n"); 3589 /* da_free_periph??? */ 3590 break; 3591 } 3592 3593 scsi_inquiry(&start_ccb->csio, 3594 /*retries*/da_retry_count, 3595 /*cbfcnp*/dadone_probebdc, 3596 /*tag_action*/MSG_SIMPLE_Q_TAG, 3597 /*inq_buf*/(u_int8_t *)bdc, 3598 /*inq_len*/sizeof(*bdc), 3599 /*evpd*/TRUE, 3600 /*page_code*/SVPD_BDC, 3601 /*sense_len*/SSD_MIN_SIZE, 3602 /*timeout*/da_default_timeout * 1000); 3603 start_ccb->ccb_h.ccb_bp = NULL; 3604 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; 3605 xpt_action(start_ccb); 3606 break; 3607 } 3608 case DA_STATE_PROBE_ATA: 3609 { 3610 struct ata_params *ata_params; 3611 3612 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 3613 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 3614 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 3615 /* 3616 * Note that if the ATA VPD page isn't 3617 * supported, we aren't talking to an ATA 3618 * device anyway. Support for that VPD 3619 * page is mandatory for SCSI to ATA (SAT) 3620 * translation layers. 3621 */ 3622 softc->state = DA_STATE_PROBE_ZONE; 3623 goto skipstate; 3624 } 3625 daprobedone(periph, start_ccb); 3626 break; 3627 } 3628 3629 ata_params = &periph->path->device->ident_data; 3630 3631 scsi_ata_identify(&start_ccb->csio, 3632 /*retries*/da_retry_count, 3633 /*cbfcnp*/dadone_probeata, 3634 /*tag_action*/MSG_SIMPLE_Q_TAG, 3635 /*data_ptr*/(u_int8_t *)ata_params, 3636 /*dxfer_len*/sizeof(*ata_params), 3637 /*sense_len*/SSD_FULL_SIZE, 3638 /*timeout*/da_default_timeout * 1000); 3639 start_ccb->ccb_h.ccb_bp = NULL; 3640 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; 3641 xpt_action(start_ccb); 3642 break; 3643 } 3644 case DA_STATE_PROBE_ATA_LOGDIR: 3645 { 3646 struct ata_gp_log_dir *log_dir; 3647 int retval; 3648 3649 retval = 0; 3650 3651 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { 3652 /* 3653 * If we don't have log support, not much point in 3654 * trying to probe zone support. 3655 */ 3656 daprobedone(periph, start_ccb); 3657 break; 3658 } 3659 3660 /* 3661 * If we have an ATA device (the SCSI ATA Information VPD 3662 * page should be present and the ATA identify should have 3663 * succeeded) and it supports logs, ask for the log directory. 3664 */ 3665 3666 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); 3667 if (log_dir == NULL) { 3668 xpt_print(periph->path, "Couldn't malloc log_dir " 3669 "data\n"); 3670 daprobedone(periph, start_ccb); 3671 break; 3672 } 3673 3674 retval = scsi_ata_read_log(&start_ccb->csio, 3675 /*retries*/ da_retry_count, 3676 /*cbfcnp*/ dadone_probeatalogdir, 3677 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3678 /*log_address*/ ATA_LOG_DIRECTORY, 3679 /*page_number*/ 0, 3680 /*block_count*/ 1, 3681 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3682 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3683 /*data_ptr*/ (uint8_t *)log_dir, 3684 /*dxfer_len*/ sizeof(*log_dir), 3685 /*sense_len*/ SSD_FULL_SIZE, 3686 /*timeout*/ da_default_timeout * 1000); 3687 3688 if (retval != 0) { 3689 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3690 free(log_dir, M_SCSIDA); 3691 daprobedone(periph, start_ccb); 3692 break; 3693 } 3694 start_ccb->ccb_h.ccb_bp = NULL; 3695 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; 3696 xpt_action(start_ccb); 3697 break; 3698 } 3699 case DA_STATE_PROBE_ATA_IDDIR: 3700 { 3701 struct ata_identify_log_pages *id_dir; 3702 int retval; 3703 3704 retval = 0; 3705 3706 /* 3707 * Check here to see whether the Identify Device log is 3708 * supported in the directory of logs. If so, continue 3709 * with requesting the log of identify device pages. 3710 */ 3711 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { 3712 daprobedone(periph, start_ccb); 3713 break; 3714 } 3715 3716 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); 3717 if (id_dir == NULL) { 3718 xpt_print(periph->path, "Couldn't malloc id_dir " 3719 "data\n"); 3720 daprobedone(periph, start_ccb); 3721 break; 3722 } 3723 3724 retval = scsi_ata_read_log(&start_ccb->csio, 3725 /*retries*/ da_retry_count, 3726 /*cbfcnp*/ dadone_probeataiddir, 3727 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3728 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3729 /*page_number*/ ATA_IDL_PAGE_LIST, 3730 /*block_count*/ 1, 3731 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3732 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3733 /*data_ptr*/ (uint8_t *)id_dir, 3734 /*dxfer_len*/ sizeof(*id_dir), 3735 /*sense_len*/ SSD_FULL_SIZE, 3736 /*timeout*/ da_default_timeout * 1000); 3737 3738 if (retval != 0) { 3739 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3740 free(id_dir, M_SCSIDA); 3741 daprobedone(periph, start_ccb); 3742 break; 3743 } 3744 start_ccb->ccb_h.ccb_bp = NULL; 3745 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; 3746 xpt_action(start_ccb); 3747 break; 3748 } 3749 case DA_STATE_PROBE_ATA_SUP: 3750 { 3751 struct ata_identify_log_sup_cap *sup_cap; 3752 int retval; 3753 3754 retval = 0; 3755 3756 /* 3757 * Check here to see whether the Supported Capabilities log 3758 * is in the list of Identify Device logs. 3759 */ 3760 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { 3761 daprobedone(periph, start_ccb); 3762 break; 3763 } 3764 3765 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); 3766 if (sup_cap == NULL) { 3767 xpt_print(periph->path, "Couldn't malloc sup_cap " 3768 "data\n"); 3769 daprobedone(periph, start_ccb); 3770 break; 3771 } 3772 3773 retval = scsi_ata_read_log(&start_ccb->csio, 3774 /*retries*/ da_retry_count, 3775 /*cbfcnp*/ dadone_probeatasup, 3776 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3777 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3778 /*page_number*/ ATA_IDL_SUP_CAP, 3779 /*block_count*/ 1, 3780 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3781 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3782 /*data_ptr*/ (uint8_t *)sup_cap, 3783 /*dxfer_len*/ sizeof(*sup_cap), 3784 /*sense_len*/ SSD_FULL_SIZE, 3785 /*timeout*/ da_default_timeout * 1000); 3786 3787 if (retval != 0) { 3788 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3789 free(sup_cap, M_SCSIDA); 3790 daprobedone(periph, start_ccb); 3791 break; 3792 3793 } 3794 3795 start_ccb->ccb_h.ccb_bp = NULL; 3796 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; 3797 xpt_action(start_ccb); 3798 break; 3799 } 3800 case DA_STATE_PROBE_ATA_ZONE: 3801 { 3802 struct ata_zoned_info_log *ata_zone; 3803 int retval; 3804 3805 retval = 0; 3806 3807 /* 3808 * Check here to see whether the zoned device information 3809 * page is supported. If so, continue on to request it. 3810 * If not, skip to DA_STATE_PROBE_LOG or done. 3811 */ 3812 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { 3813 daprobedone(periph, start_ccb); 3814 break; 3815 } 3816 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, 3817 M_NOWAIT|M_ZERO); 3818 if (ata_zone == NULL) { 3819 xpt_print(periph->path, "Couldn't malloc ata_zone " 3820 "data\n"); 3821 daprobedone(periph, start_ccb); 3822 break; 3823 } 3824 3825 retval = scsi_ata_read_log(&start_ccb->csio, 3826 /*retries*/ da_retry_count, 3827 /*cbfcnp*/ dadone_probeatazone, 3828 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3829 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3830 /*page_number*/ ATA_IDL_ZDI, 3831 /*block_count*/ 1, 3832 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3833 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3834 /*data_ptr*/ (uint8_t *)ata_zone, 3835 /*dxfer_len*/ sizeof(*ata_zone), 3836 /*sense_len*/ SSD_FULL_SIZE, 3837 /*timeout*/ da_default_timeout * 1000); 3838 3839 if (retval != 0) { 3840 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3841 free(ata_zone, M_SCSIDA); 3842 daprobedone(periph, start_ccb); 3843 break; 3844 } 3845 start_ccb->ccb_h.ccb_bp = NULL; 3846 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; 3847 xpt_action(start_ccb); 3848 3849 break; 3850 } 3851 case DA_STATE_PROBE_ZONE: 3852 { 3853 struct scsi_vpd_zoned_bdc *bdc; 3854 3855 /* 3856 * Note that this page will be supported for SCSI protocol 3857 * devices that support ZBC (SMR devices), as well as ATA 3858 * protocol devices that are behind a SAT (SCSI to ATA 3859 * Translation) layer that supports converting ZBC commands 3860 * to their ZAC equivalents. 3861 */ 3862 if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { 3863 daprobedone(periph, start_ccb); 3864 break; 3865 } 3866 bdc = (struct scsi_vpd_zoned_bdc *) 3867 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3868 3869 if (bdc == NULL) { 3870 xpt_release_ccb(start_ccb); 3871 xpt_print(periph->path, "Couldn't malloc zone VPD " 3872 "data\n"); 3873 break; 3874 } 3875 scsi_inquiry(&start_ccb->csio, 3876 /*retries*/da_retry_count, 3877 /*cbfcnp*/dadone_probezone, 3878 /*tag_action*/MSG_SIMPLE_Q_TAG, 3879 /*inq_buf*/(u_int8_t *)bdc, 3880 /*inq_len*/sizeof(*bdc), 3881 /*evpd*/TRUE, 3882 /*page_code*/SVPD_ZONED_BDC, 3883 /*sense_len*/SSD_FULL_SIZE, 3884 /*timeout*/da_default_timeout * 1000); 3885 start_ccb->ccb_h.ccb_bp = NULL; 3886 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; 3887 xpt_action(start_ccb); 3888 break; 3889 } 3890 } 3891 } 3892 3893 /* 3894 * In each of the methods below, while its the caller's 3895 * responsibility to ensure the request will fit into a 3896 * single device request, we might have changed the delete 3897 * method due to the device incorrectly advertising either 3898 * its supported methods or limits. 3899 * 3900 * To prevent this causing further issues we validate the 3901 * against the methods limits, and warn which would 3902 * otherwise be unnecessary. 3903 */ 3904 static void 3905 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 3906 { 3907 struct da_softc *softc = (struct da_softc *)periph->softc;; 3908 struct bio *bp1; 3909 uint8_t *buf = softc->unmap_buf; 3910 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; 3911 uint64_t lba, lastlba = (uint64_t)-1; 3912 uint64_t totalcount = 0; 3913 uint64_t count; 3914 uint32_t c, lastcount = 0, ranges = 0; 3915 3916 /* 3917 * Currently this doesn't take the UNMAP 3918 * Granularity and Granularity Alignment 3919 * fields into account. 3920 * 3921 * This could result in both unoptimal unmap 3922 * requests as as well as UNMAP calls unmapping 3923 * fewer LBA's than requested. 3924 */ 3925 3926 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 3927 bp1 = bp; 3928 do { 3929 /* 3930 * Note: ada and da are different in how they store the 3931 * pending bp's in a trim. ada stores all of them in the 3932 * trim_req.bps. da stores all but the first one in the 3933 * delete_run_queue. ada then completes all the bps in 3934 * its adadone() loop. da completes all the bps in the 3935 * delete_run_queue in dadone, and relies on the biodone 3936 * after to complete. This should be reconciled since there's 3937 * no real reason to do it differently. XXX 3938 */ 3939 if (bp1 != bp) 3940 bioq_insert_tail(&softc->delete_run_queue, bp1); 3941 lba = bp1->bio_pblkno; 3942 count = bp1->bio_bcount / softc->params.secsize; 3943 3944 /* Try to extend the previous range. */ 3945 if (lba == lastlba) { 3946 c = omin(count, UNMAP_RANGE_MAX - lastcount); 3947 lastlba += c; 3948 lastcount += c; 3949 scsi_ulto4b(lastcount, d[ranges - 1].length); 3950 count -= c; 3951 lba += c; 3952 totalcount += c; 3953 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && 3954 softc->unmap_gran != 0) { 3955 /* Align length of the previous range. */ 3956 if ((c = lastcount % softc->unmap_gran) != 0) { 3957 if (lastcount <= c) { 3958 totalcount -= lastcount; 3959 lastlba = (uint64_t)-1; 3960 lastcount = 0; 3961 ranges--; 3962 } else { 3963 totalcount -= c; 3964 lastlba -= c; 3965 lastcount -= c; 3966 scsi_ulto4b(lastcount, 3967 d[ranges - 1].length); 3968 } 3969 } 3970 /* Align beginning of the new range. */ 3971 c = (lba - softc->unmap_gran_align) % softc->unmap_gran; 3972 if (c != 0) { 3973 c = softc->unmap_gran - c; 3974 if (count <= c) { 3975 count = 0; 3976 } else { 3977 lba += c; 3978 count -= c; 3979 } 3980 } 3981 } 3982 3983 while (count > 0) { 3984 c = omin(count, UNMAP_RANGE_MAX); 3985 if (totalcount + c > softc->unmap_max_lba || 3986 ranges >= softc->unmap_max_ranges) { 3987 xpt_print(periph->path, 3988 "%s issuing short delete %ld > %ld" 3989 "|| %d >= %d", 3990 da_delete_method_desc[softc->delete_method], 3991 totalcount + c, softc->unmap_max_lba, 3992 ranges, softc->unmap_max_ranges); 3993 break; 3994 } 3995 scsi_u64to8b(lba, d[ranges].lba); 3996 scsi_ulto4b(c, d[ranges].length); 3997 lba += c; 3998 totalcount += c; 3999 ranges++; 4000 count -= c; 4001 lastlba = lba; 4002 lastcount = c; 4003 } 4004 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4005 if (bp1 == NULL) 4006 break; 4007 if (ranges >= softc->unmap_max_ranges || 4008 totalcount + bp1->bio_bcount / 4009 softc->params.secsize > softc->unmap_max_lba) { 4010 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4011 break; 4012 } 4013 } while (1); 4014 4015 /* Align length of the last range. */ 4016 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && 4017 (c = lastcount % softc->unmap_gran) != 0) { 4018 if (lastcount <= c) 4019 ranges--; 4020 else 4021 scsi_ulto4b(lastcount - c, d[ranges - 1].length); 4022 } 4023 4024 scsi_ulto2b(ranges * 16 + 6, &buf[0]); 4025 scsi_ulto2b(ranges * 16, &buf[2]); 4026 4027 scsi_unmap(&ccb->csio, 4028 /*retries*/da_retry_count, 4029 /*cbfcnp*/dadone, 4030 /*tag_action*/MSG_SIMPLE_Q_TAG, 4031 /*byte2*/0, 4032 /*data_ptr*/ buf, 4033 /*dxfer_len*/ ranges * 16 + 8, 4034 /*sense_len*/SSD_FULL_SIZE, 4035 da_default_timeout * 1000); 4036 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4037 ccb->ccb_h.flags |= CAM_UNLOCKED; 4038 softc->trim_count++; 4039 softc->trim_ranges += ranges; 4040 softc->trim_lbas += totalcount; 4041 cam_iosched_submit_trim(softc->cam_iosched); 4042 } 4043 4044 static void 4045 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 4046 { 4047 struct da_softc *softc = (struct da_softc *)periph->softc; 4048 struct bio *bp1; 4049 uint8_t *buf = softc->unmap_buf; 4050 uint64_t lastlba = (uint64_t)-1; 4051 uint64_t count; 4052 uint64_t lba; 4053 uint32_t lastcount = 0, c, requestcount; 4054 int ranges = 0, off, block_count; 4055 4056 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 4057 bp1 = bp; 4058 do { 4059 if (bp1 != bp)//XXX imp XXX 4060 bioq_insert_tail(&softc->delete_run_queue, bp1); 4061 lba = bp1->bio_pblkno; 4062 count = bp1->bio_bcount / softc->params.secsize; 4063 requestcount = count; 4064 4065 /* Try to extend the previous range. */ 4066 if (lba == lastlba) { 4067 c = omin(count, ATA_DSM_RANGE_MAX - lastcount); 4068 lastcount += c; 4069 off = (ranges - 1) * 8; 4070 buf[off + 6] = lastcount & 0xff; 4071 buf[off + 7] = (lastcount >> 8) & 0xff; 4072 count -= c; 4073 lba += c; 4074 } 4075 4076 while (count > 0) { 4077 c = omin(count, ATA_DSM_RANGE_MAX); 4078 off = ranges * 8; 4079 4080 buf[off + 0] = lba & 0xff; 4081 buf[off + 1] = (lba >> 8) & 0xff; 4082 buf[off + 2] = (lba >> 16) & 0xff; 4083 buf[off + 3] = (lba >> 24) & 0xff; 4084 buf[off + 4] = (lba >> 32) & 0xff; 4085 buf[off + 5] = (lba >> 40) & 0xff; 4086 buf[off + 6] = c & 0xff; 4087 buf[off + 7] = (c >> 8) & 0xff; 4088 lba += c; 4089 ranges++; 4090 count -= c; 4091 lastcount = c; 4092 if (count != 0 && ranges == softc->trim_max_ranges) { 4093 xpt_print(periph->path, 4094 "%s issuing short delete %ld > %ld\n", 4095 da_delete_method_desc[softc->delete_method], 4096 requestcount, 4097 (softc->trim_max_ranges - ranges) * 4098 ATA_DSM_RANGE_MAX); 4099 break; 4100 } 4101 } 4102 lastlba = lba; 4103 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4104 if (bp1 == NULL) 4105 break; 4106 if (bp1->bio_bcount / softc->params.secsize > 4107 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { 4108 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4109 break; 4110 } 4111 } while (1); 4112 4113 block_count = howmany(ranges, ATA_DSM_BLK_RANGES); 4114 scsi_ata_trim(&ccb->csio, 4115 /*retries*/da_retry_count, 4116 /*cbfcnp*/dadone, 4117 /*tag_action*/MSG_SIMPLE_Q_TAG, 4118 block_count, 4119 /*data_ptr*/buf, 4120 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, 4121 /*sense_len*/SSD_FULL_SIZE, 4122 da_default_timeout * 1000); 4123 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4124 ccb->ccb_h.flags |= CAM_UNLOCKED; 4125 cam_iosched_submit_trim(softc->cam_iosched); 4126 } 4127 4128 /* 4129 * We calculate ws_max_blks here based off d_delmaxsize instead 4130 * of using softc->ws_max_blks as it is absolute max for the 4131 * device not the protocol max which may well be lower. 4132 */ 4133 static void 4134 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 4135 { 4136 struct da_softc *softc; 4137 struct bio *bp1; 4138 uint64_t ws_max_blks; 4139 uint64_t lba; 4140 uint64_t count; /* forward compat with WS32 */ 4141 4142 softc = (struct da_softc *)periph->softc; 4143 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; 4144 lba = bp->bio_pblkno; 4145 count = 0; 4146 bp1 = bp; 4147 do { 4148 if (bp1 != bp)//XXX imp XXX 4149 bioq_insert_tail(&softc->delete_run_queue, bp1); 4150 count += bp1->bio_bcount / softc->params.secsize; 4151 if (count > ws_max_blks) { 4152 xpt_print(periph->path, 4153 "%s issuing short delete %ld > %ld\n", 4154 da_delete_method_desc[softc->delete_method], 4155 count, ws_max_blks); 4156 count = omin(count, ws_max_blks); 4157 break; 4158 } 4159 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4160 if (bp1 == NULL) 4161 break; 4162 if (lba + count != bp1->bio_pblkno || 4163 count + bp1->bio_bcount / 4164 softc->params.secsize > ws_max_blks) { 4165 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4166 break; 4167 } 4168 } while (1); 4169 4170 scsi_write_same(&ccb->csio, 4171 /*retries*/da_retry_count, 4172 /*cbfcnp*/dadone, 4173 /*tag_action*/MSG_SIMPLE_Q_TAG, 4174 /*byte2*/softc->delete_method == 4175 DA_DELETE_ZERO ? 0 : SWS_UNMAP, 4176 softc->delete_method == DA_DELETE_WS16 ? 16 : 10, 4177 /*lba*/lba, 4178 /*block_count*/count, 4179 /*data_ptr*/ __DECONST(void *, zero_region), 4180 /*dxfer_len*/ softc->params.secsize, 4181 /*sense_len*/SSD_FULL_SIZE, 4182 da_default_timeout * 1000); 4183 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4184 ccb->ccb_h.flags |= CAM_UNLOCKED; 4185 cam_iosched_submit_trim(softc->cam_iosched); 4186 } 4187 4188 static int 4189 cmd6workaround(union ccb *ccb) 4190 { 4191 struct scsi_rw_6 cmd6; 4192 struct scsi_rw_10 *cmd10; 4193 struct da_softc *softc; 4194 u_int8_t *cdb; 4195 struct bio *bp; 4196 int frozen; 4197 4198 cdb = ccb->csio.cdb_io.cdb_bytes; 4199 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; 4200 4201 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { 4202 da_delete_methods old_method = softc->delete_method; 4203 4204 /* 4205 * Typically there are two reasons for failure here 4206 * 1. Delete method was detected as supported but isn't 4207 * 2. Delete failed due to invalid params e.g. too big 4208 * 4209 * While we will attempt to choose an alternative delete method 4210 * this may result in short deletes if the existing delete 4211 * requests from geom are big for the new method chosen. 4212 * 4213 * This method assumes that the error which triggered this 4214 * will not retry the io otherwise a panic will occur 4215 */ 4216 dadeleteflag(softc, old_method, 0); 4217 dadeletemethodchoose(softc, DA_DELETE_DISABLE); 4218 if (softc->delete_method == DA_DELETE_DISABLE) 4219 xpt_print(ccb->ccb_h.path, 4220 "%s failed, disabling BIO_DELETE\n", 4221 da_delete_method_desc[old_method]); 4222 else 4223 xpt_print(ccb->ccb_h.path, 4224 "%s failed, switching to %s BIO_DELETE\n", 4225 da_delete_method_desc[old_method], 4226 da_delete_method_desc[softc->delete_method]); 4227 4228 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) 4229 cam_iosched_queue_work(softc->cam_iosched, bp); 4230 cam_iosched_queue_work(softc->cam_iosched, 4231 (struct bio *)ccb->ccb_h.ccb_bp); 4232 ccb->ccb_h.ccb_bp = NULL; 4233 return (0); 4234 } 4235 4236 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ 4237 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 4238 (*cdb == PREVENT_ALLOW) && 4239 (softc->quirks & DA_Q_NO_PREVENT) == 0) { 4240 if (bootverbose) 4241 xpt_print(ccb->ccb_h.path, 4242 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); 4243 softc->quirks |= DA_Q_NO_PREVENT; 4244 return (0); 4245 } 4246 4247 /* Detect unsupported SYNCHRONIZE CACHE(10). */ 4248 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 4249 (*cdb == SYNCHRONIZE_CACHE) && 4250 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 4251 if (bootverbose) 4252 xpt_print(ccb->ccb_h.path, 4253 "SYNCHRONIZE CACHE(10) not supported.\n"); 4254 softc->quirks |= DA_Q_NO_SYNC_CACHE; 4255 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; 4256 return (0); 4257 } 4258 4259 /* Translation only possible if CDB is an array and cmd is R/W6 */ 4260 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || 4261 (*cdb != READ_6 && *cdb != WRITE_6)) 4262 return 0; 4263 4264 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " 4265 "increasing minimum_cmd_size to 10.\n"); 4266 softc->minimum_cmd_size = 10; 4267 4268 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); 4269 cmd10 = (struct scsi_rw_10 *)cdb; 4270 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; 4271 cmd10->byte2 = 0; 4272 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); 4273 cmd10->reserved = 0; 4274 scsi_ulto2b(cmd6.length, cmd10->length); 4275 cmd10->control = cmd6.control; 4276 ccb->csio.cdb_len = sizeof(*cmd10); 4277 4278 /* Requeue request, unfreezing queue if necessary */ 4279 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 4280 ccb->ccb_h.status = CAM_REQUEUE_REQ; 4281 xpt_action(ccb); 4282 if (frozen) { 4283 cam_release_devq(ccb->ccb_h.path, 4284 /*relsim_flags*/0, 4285 /*reduction*/0, 4286 /*timeout*/0, 4287 /*getcount_only*/0); 4288 } 4289 return (ERESTART); 4290 } 4291 4292 static void 4293 dazonedone(struct cam_periph *periph, union ccb *ccb) 4294 { 4295 struct da_softc *softc; 4296 struct bio *bp; 4297 4298 softc = periph->softc; 4299 bp = (struct bio *)ccb->ccb_h.ccb_bp; 4300 4301 switch (bp->bio_zone.zone_cmd) { 4302 case DISK_ZONE_OPEN: 4303 case DISK_ZONE_CLOSE: 4304 case DISK_ZONE_FINISH: 4305 case DISK_ZONE_RWP: 4306 break; 4307 case DISK_ZONE_REPORT_ZONES: { 4308 uint32_t avail_len; 4309 struct disk_zone_report *rep; 4310 struct scsi_report_zones_hdr *hdr; 4311 struct scsi_report_zones_desc *desc; 4312 struct disk_zone_rep_entry *entry; 4313 uint32_t hdr_len, num_avail; 4314 uint32_t num_to_fill, i; 4315 int ata; 4316 4317 rep = &bp->bio_zone.zone_params.report; 4318 avail_len = ccb->csio.dxfer_len - ccb->csio.resid; 4319 /* 4320 * Note that bio_resid isn't normally used for zone 4321 * commands, but it is used by devstat_end_transaction_bio() 4322 * to determine how much data was transferred. Because 4323 * the size of the SCSI/ATA data structures is different 4324 * than the size of the BIO interface structures, the 4325 * amount of data actually transferred from the drive will 4326 * be different than the amount of data transferred to 4327 * the user. 4328 */ 4329 bp->bio_resid = ccb->csio.resid; 4330 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; 4331 if (avail_len < sizeof(*hdr)) { 4332 /* 4333 * Is there a better error than EIO here? We asked 4334 * for at least the header, and we got less than 4335 * that. 4336 */ 4337 bp->bio_error = EIO; 4338 bp->bio_flags |= BIO_ERROR; 4339 bp->bio_resid = bp->bio_bcount; 4340 break; 4341 } 4342 4343 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) 4344 ata = 1; 4345 else 4346 ata = 0; 4347 4348 hdr_len = ata ? le32dec(hdr->length) : 4349 scsi_4btoul(hdr->length); 4350 if (hdr_len > 0) 4351 rep->entries_available = hdr_len / sizeof(*desc); 4352 else 4353 rep->entries_available = 0; 4354 /* 4355 * NOTE: using the same values for the BIO version of the 4356 * same field as the SCSI/ATA values. This means we could 4357 * get some additional values that aren't defined in bio.h 4358 * if more values of the same field are defined later. 4359 */ 4360 rep->header.same = hdr->byte4 & SRZ_SAME_MASK; 4361 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : 4362 scsi_8btou64(hdr->maximum_lba); 4363 /* 4364 * If the drive reports no entries that match the query, 4365 * we're done. 4366 */ 4367 if (hdr_len == 0) { 4368 rep->entries_filled = 0; 4369 break; 4370 } 4371 4372 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), 4373 hdr_len / sizeof(*desc)); 4374 /* 4375 * If the drive didn't return any data, then we're done. 4376 */ 4377 if (num_avail == 0) { 4378 rep->entries_filled = 0; 4379 break; 4380 } 4381 4382 num_to_fill = min(num_avail, rep->entries_allocated); 4383 /* 4384 * If the user didn't allocate any entries for us to fill, 4385 * we're done. 4386 */ 4387 if (num_to_fill == 0) { 4388 rep->entries_filled = 0; 4389 break; 4390 } 4391 4392 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; 4393 i < num_to_fill; i++, desc++, entry++) { 4394 /* 4395 * NOTE: we're mapping the values here directly 4396 * from the SCSI/ATA bit definitions to the bio.h 4397 * definitons. There is also a warning in 4398 * disk_zone.h, but the impact is that if 4399 * additional values are added in the SCSI/ATA 4400 * specs these will be visible to consumers of 4401 * this interface. 4402 */ 4403 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; 4404 entry->zone_condition = 4405 (desc->zone_flags & SRZ_ZONE_COND_MASK) >> 4406 SRZ_ZONE_COND_SHIFT; 4407 entry->zone_flags |= desc->zone_flags & 4408 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); 4409 entry->zone_length = 4410 ata ? le64dec(desc->zone_length) : 4411 scsi_8btou64(desc->zone_length); 4412 entry->zone_start_lba = 4413 ata ? le64dec(desc->zone_start_lba) : 4414 scsi_8btou64(desc->zone_start_lba); 4415 entry->write_pointer_lba = 4416 ata ? le64dec(desc->write_pointer_lba) : 4417 scsi_8btou64(desc->write_pointer_lba); 4418 } 4419 rep->entries_filled = num_to_fill; 4420 break; 4421 } 4422 case DISK_ZONE_GET_PARAMS: 4423 default: 4424 /* 4425 * In theory we should not get a GET_PARAMS bio, since it 4426 * should be handled without queueing the command to the 4427 * drive. 4428 */ 4429 panic("%s: Invalid zone command %d", __func__, 4430 bp->bio_zone.zone_cmd); 4431 break; 4432 } 4433 4434 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) 4435 free(ccb->csio.data_ptr, M_SCSIDA); 4436 } 4437 4438 static void 4439 dadone(struct cam_periph *periph, union ccb *done_ccb) 4440 { 4441 struct bio *bp, *bp1; 4442 struct da_softc *softc; 4443 struct ccb_scsiio *csio; 4444 u_int32_t priority; 4445 da_ccb_state state; 4446 4447 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); 4448 4449 softc = (struct da_softc *)periph->softc; 4450 priority = done_ccb->ccb_h.pinfo.priority; 4451 csio = &done_ccb->csio; 4452 4453 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4454 if (csio->bio != NULL) 4455 biotrack(csio->bio, __func__); 4456 #endif 4457 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 4458 4459 cam_periph_lock(periph); 4460 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4461 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 4462 int error; 4463 int sf; 4464 4465 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 4466 sf = SF_RETRY_UA; 4467 else 4468 sf = 0; 4469 4470 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 4471 if (error == ERESTART) { 4472 /* A retry was scheduled, so just return. */ 4473 cam_periph_unlock(periph); 4474 return; 4475 } 4476 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4477 if (error != 0) { 4478 int queued_error; 4479 4480 /* 4481 * return all queued I/O with EIO, so that 4482 * the client can retry these I/Os in the 4483 * proper order should it attempt to recover. 4484 */ 4485 queued_error = EIO; 4486 4487 if (error == ENXIO 4488 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { 4489 /* 4490 * Catastrophic error. Mark our pack as 4491 * invalid. 4492 * 4493 * XXX See if this is really a media 4494 * XXX change first? 4495 */ 4496 xpt_print(periph->path, "Invalidating pack\n"); 4497 softc->flags |= DA_FLAG_PACK_INVALID; 4498 #ifdef CAM_IO_STATS 4499 softc->invalidations++; 4500 #endif 4501 queued_error = ENXIO; 4502 } 4503 cam_iosched_flush(softc->cam_iosched, NULL, 4504 queued_error); 4505 if (bp != NULL) { 4506 bp->bio_error = error; 4507 bp->bio_resid = bp->bio_bcount; 4508 bp->bio_flags |= BIO_ERROR; 4509 } 4510 } else if (bp != NULL) { 4511 if (state == DA_CCB_DELETE) 4512 bp->bio_resid = 0; 4513 else 4514 bp->bio_resid = csio->resid; 4515 bp->bio_error = 0; 4516 if (bp->bio_resid != 0) 4517 bp->bio_flags |= BIO_ERROR; 4518 } 4519 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4520 cam_release_devq(done_ccb->ccb_h.path, 4521 /*relsim_flags*/0, 4522 /*reduction*/0, 4523 /*timeout*/0, 4524 /*getcount_only*/0); 4525 } else if (bp != NULL) { 4526 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4527 panic("REQ_CMP with QFRZN"); 4528 if (bp->bio_cmd == BIO_ZONE) 4529 dazonedone(periph, done_ccb); 4530 else if (state == DA_CCB_DELETE) 4531 bp->bio_resid = 0; 4532 else 4533 bp->bio_resid = csio->resid; 4534 if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) 4535 bp->bio_flags |= BIO_ERROR; 4536 if (softc->error_inject != 0) { 4537 bp->bio_error = softc->error_inject; 4538 bp->bio_resid = bp->bio_bcount; 4539 bp->bio_flags |= BIO_ERROR; 4540 softc->error_inject = 0; 4541 } 4542 } 4543 4544 if (bp != NULL) 4545 biotrack(bp, __func__); 4546 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 4547 if (LIST_EMPTY(&softc->pending_ccbs)) 4548 softc->flags |= DA_FLAG_WAS_OTAG; 4549 4550 /* 4551 * We need to call cam_iosched before we call biodone so that we don't 4552 * measure any activity that happens in the completion routine, which in 4553 * the case of sendfile can be quite extensive. Release the periph 4554 * refcount taken in dastart() for each CCB. 4555 */ 4556 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); 4557 xpt_release_ccb(done_ccb); 4558 KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount)); 4559 softc->refcount--; 4560 if (state == DA_CCB_DELETE) { 4561 TAILQ_HEAD(, bio) queue; 4562 4563 TAILQ_INIT(&queue); 4564 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); 4565 softc->delete_run_queue.insert_point = NULL; 4566 /* 4567 * Normally, the xpt_release_ccb() above would make sure 4568 * that when we have more work to do, that work would 4569 * get kicked off. However, we specifically keep 4570 * delete_running set to 0 before the call above to 4571 * allow other I/O to progress when many BIO_DELETE 4572 * requests are pushed down. We set delete_running to 0 4573 * and call daschedule again so that we don't stall if 4574 * there are no other I/Os pending apart from BIO_DELETEs. 4575 */ 4576 cam_iosched_trim_done(softc->cam_iosched); 4577 daschedule(periph); 4578 cam_periph_unlock(periph); 4579 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { 4580 TAILQ_REMOVE(&queue, bp1, bio_queue); 4581 bp1->bio_error = bp->bio_error; 4582 if (bp->bio_flags & BIO_ERROR) { 4583 bp1->bio_flags |= BIO_ERROR; 4584 bp1->bio_resid = bp1->bio_bcount; 4585 } else 4586 bp1->bio_resid = 0; 4587 biodone(bp1); 4588 } 4589 } else { 4590 daschedule(periph); 4591 cam_periph_unlock(periph); 4592 } 4593 if (bp != NULL) 4594 biodone(bp); 4595 return; 4596 } 4597 4598 static void 4599 dadone_probewp(struct cam_periph *periph, union ccb *done_ccb) 4600 { 4601 struct scsi_mode_header_6 *mode_hdr6; 4602 struct scsi_mode_header_10 *mode_hdr10; 4603 struct da_softc *softc; 4604 struct ccb_scsiio *csio; 4605 u_int32_t priority; 4606 uint8_t dev_spec; 4607 4608 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n")); 4609 4610 softc = (struct da_softc *)periph->softc; 4611 priority = done_ccb->ccb_h.pinfo.priority; 4612 csio = &done_ccb->csio; 4613 4614 cam_periph_assert(periph, MA_OWNED); 4615 4616 KASSERT(softc->state == DA_STATE_PROBE_WP, 4617 ("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p", 4618 softc->state, periph, done_ccb)); 4619 KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP, 4620 ("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p", 4621 (unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph, 4622 done_ccb)); 4623 4624 if (softc->minimum_cmd_size > 6) { 4625 mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr; 4626 dev_spec = mode_hdr10->dev_spec; 4627 } else { 4628 mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr; 4629 dev_spec = mode_hdr6->dev_spec; 4630 } 4631 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { 4632 if ((dev_spec & 0x80) != 0) 4633 softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT; 4634 else 4635 softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT; 4636 } else { 4637 int error; 4638 4639 error = daerror(done_ccb, CAM_RETRY_SELTO, 4640 SF_RETRY_UA|SF_NO_PRINT); 4641 if (error == ERESTART) 4642 return; 4643 else if (error != 0) { 4644 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4645 /* Don't wedge this device's queue */ 4646 cam_release_devq(done_ccb->ccb_h.path, 4647 /*relsim_flags*/0, 4648 /*reduction*/0, 4649 /*timeout*/0, 4650 /*getcount_only*/0); 4651 } 4652 } 4653 } 4654 4655 free(csio->data_ptr, M_SCSIDA); 4656 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 4657 softc->state = DA_STATE_PROBE_RC16; 4658 else 4659 softc->state = DA_STATE_PROBE_RC; 4660 xpt_release_ccb(done_ccb); 4661 xpt_schedule(periph, priority); 4662 return; 4663 } 4664 4665 static void 4666 dadone_proberc(struct cam_periph *periph, union ccb *done_ccb) 4667 { 4668 struct scsi_read_capacity_data *rdcap; 4669 struct scsi_read_capacity_data_long *rcaplong; 4670 struct da_softc *softc; 4671 struct ccb_scsiio *csio; 4672 da_ccb_state state; 4673 char *announce_buf; 4674 u_int32_t priority; 4675 int lbp, n; 4676 4677 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n")); 4678 4679 softc = (struct da_softc *)periph->softc; 4680 priority = done_ccb->ccb_h.pinfo.priority; 4681 csio = &done_ccb->csio; 4682 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 4683 4684 KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16, 4685 ("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p", 4686 softc->state, periph, done_ccb)); 4687 KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16, 4688 ("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p", 4689 (unsigned long)state, periph, done_ccb)); 4690 4691 lbp = 0; 4692 rdcap = NULL; 4693 rcaplong = NULL; 4694 /* XXX TODO: can this be a malloc? */ 4695 announce_buf = softc->announce_temp; 4696 bzero(announce_buf, DA_ANNOUNCETMP_SZ); 4697 4698 if (state == DA_CCB_PROBE_RC) 4699 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; 4700 else 4701 rcaplong = (struct scsi_read_capacity_data_long *) 4702 csio->data_ptr; 4703 4704 cam_periph_assert(periph, MA_OWNED); 4705 4706 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4707 struct disk_params *dp; 4708 uint32_t block_size; 4709 uint64_t maxsector; 4710 u_int lalba; /* Lowest aligned LBA. */ 4711 4712 if (state == DA_CCB_PROBE_RC) { 4713 block_size = scsi_4btoul(rdcap->length); 4714 maxsector = scsi_4btoul(rdcap->addr); 4715 lalba = 0; 4716 4717 /* 4718 * According to SBC-2, if the standard 10 4719 * byte READ CAPACITY command returns 2^32, 4720 * we should issue the 16 byte version of 4721 * the command, since the device in question 4722 * has more sectors than can be represented 4723 * with the short version of the command. 4724 */ 4725 if (maxsector == 0xffffffff) { 4726 free(rdcap, M_SCSIDA); 4727 softc->state = DA_STATE_PROBE_RC16; 4728 xpt_release_ccb(done_ccb); 4729 xpt_schedule(periph, priority); 4730 return; 4731 } 4732 } else { 4733 block_size = scsi_4btoul(rcaplong->length); 4734 maxsector = scsi_8btou64(rcaplong->addr); 4735 lalba = scsi_2btoul(rcaplong->lalba_lbp); 4736 } 4737 4738 /* 4739 * Because GEOM code just will panic us if we 4740 * give them an 'illegal' value we'll avoid that 4741 * here. 4742 */ 4743 if (block_size == 0) { 4744 block_size = 512; 4745 if (maxsector == 0) 4746 maxsector = -1; 4747 } 4748 if (block_size >= MAXPHYS) { 4749 xpt_print(periph->path, 4750 "unsupportable block size %ju\n", 4751 (uintmax_t) block_size); 4752 announce_buf = NULL; 4753 cam_periph_invalidate(periph); 4754 } else { 4755 /* 4756 * We pass rcaplong into dasetgeom(), 4757 * because it will only use it if it is 4758 * non-NULL. 4759 */ 4760 dasetgeom(periph, block_size, maxsector, 4761 rcaplong, sizeof(*rcaplong)); 4762 lbp = (lalba & SRC16_LBPME_A); 4763 dp = &softc->params; 4764 n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ, 4765 "%juMB (%ju %u byte sectors", 4766 ((uintmax_t)dp->secsize * dp->sectors) / 4767 (1024 * 1024), 4768 (uintmax_t)dp->sectors, dp->secsize); 4769 if (softc->p_type != 0) { 4770 n += snprintf(announce_buf + n, 4771 DA_ANNOUNCETMP_SZ - n, 4772 ", DIF type %d", softc->p_type); 4773 } 4774 snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")"); 4775 } 4776 } else { 4777 int error; 4778 4779 /* 4780 * Retry any UNIT ATTENTION type errors. They 4781 * are expected at boot. 4782 */ 4783 error = daerror(done_ccb, CAM_RETRY_SELTO, 4784 SF_RETRY_UA|SF_NO_PRINT); 4785 if (error == ERESTART) { 4786 /* 4787 * A retry was scheuled, so 4788 * just return. 4789 */ 4790 return; 4791 } else if (error != 0) { 4792 int asc, ascq; 4793 int sense_key, error_code; 4794 int have_sense; 4795 cam_status status; 4796 struct ccb_getdev cgd; 4797 4798 /* Don't wedge this device's queue */ 4799 status = done_ccb->ccb_h.status; 4800 if ((status & CAM_DEV_QFRZN) != 0) 4801 cam_release_devq(done_ccb->ccb_h.path, 4802 /*relsim_flags*/0, 4803 /*reduction*/0, 4804 /*timeout*/0, 4805 /*getcount_only*/0); 4806 4807 4808 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 4809 CAM_PRIORITY_NORMAL); 4810 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 4811 xpt_action((union ccb *)&cgd); 4812 4813 if (scsi_extract_sense_ccb(done_ccb, 4814 &error_code, &sense_key, &asc, &ascq)) 4815 have_sense = TRUE; 4816 else 4817 have_sense = FALSE; 4818 4819 /* 4820 * If we tried READ CAPACITY(16) and failed, 4821 * fallback to READ CAPACITY(10). 4822 */ 4823 if ((state == DA_CCB_PROBE_RC16) && 4824 (softc->flags & DA_FLAG_CAN_RC16) && 4825 (((csio->ccb_h.status & CAM_STATUS_MASK) == 4826 CAM_REQ_INVALID) || 4827 ((have_sense) && 4828 (error_code == SSD_CURRENT_ERROR || 4829 error_code == SSD_DESC_CURRENT_ERROR) && 4830 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { 4831 cam_periph_assert(periph, MA_OWNED); 4832 softc->flags &= ~DA_FLAG_CAN_RC16; 4833 free(rdcap, M_SCSIDA); 4834 softc->state = DA_STATE_PROBE_RC; 4835 xpt_release_ccb(done_ccb); 4836 xpt_schedule(periph, priority); 4837 return; 4838 } 4839 4840 /* 4841 * Attach to anything that claims to be a 4842 * direct access or optical disk device, 4843 * as long as it doesn't return a "Logical 4844 * unit not supported" (0x25) error. 4845 * "Internal Target Failure" (0x44) is also 4846 * special and typically means that the 4847 * device is a SATA drive behind a SATL 4848 * translation that's fallen into a 4849 * terminally fatal state. 4850 */ 4851 if ((have_sense) 4852 && (asc != 0x25) && (asc != 0x44) 4853 && (error_code == SSD_CURRENT_ERROR 4854 || error_code == SSD_DESC_CURRENT_ERROR)) { 4855 const char *sense_key_desc; 4856 const char *asc_desc; 4857 4858 dasetgeom(periph, 512, -1, NULL, 0); 4859 scsi_sense_desc(sense_key, asc, ascq, 4860 &cgd.inq_data, &sense_key_desc, 4861 &asc_desc); 4862 snprintf(announce_buf, DA_ANNOUNCETMP_SZ, 4863 "Attempt to query device " 4864 "size failed: %s, %s", 4865 sense_key_desc, asc_desc); 4866 } else { 4867 if (have_sense) 4868 scsi_sense_print(&done_ccb->csio); 4869 else { 4870 xpt_print(periph->path, 4871 "got CAM status %#x\n", 4872 done_ccb->ccb_h.status); 4873 } 4874 4875 xpt_print(periph->path, "fatal error, " 4876 "failed to attach to device\n"); 4877 4878 announce_buf = NULL; 4879 4880 /* 4881 * Free up resources. 4882 */ 4883 cam_periph_invalidate(periph); 4884 } 4885 } 4886 } 4887 free(csio->data_ptr, M_SCSIDA); 4888 if (announce_buf != NULL && 4889 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { 4890 struct sbuf sb; 4891 4892 sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, 4893 SBUF_FIXEDLEN); 4894 xpt_announce_periph_sbuf(periph, &sb, announce_buf); 4895 xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, 4896 DA_Q_BIT_STRING); 4897 sbuf_finish(&sb); 4898 sbuf_putbuf(&sb); 4899 4900 /* 4901 * Create our sysctl variables, now that we know 4902 * we have successfully attached. 4903 */ 4904 /* increase the refcount */ 4905 if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) { 4906 taskqueue_enqueue(taskqueue_thread, 4907 &softc->sysctl_task); 4908 } else { 4909 /* XXX This message is useless! */ 4910 xpt_print(periph->path, "fatal error, " 4911 "could not acquire reference count\n"); 4912 } 4913 } 4914 4915 /* We already probed the device. */ 4916 if (softc->flags & DA_FLAG_PROBED) { 4917 daprobedone(periph, done_ccb); 4918 return; 4919 } 4920 4921 /* Ensure re-probe doesn't see old delete. */ 4922 softc->delete_available = 0; 4923 dadeleteflag(softc, DA_DELETE_ZERO, 1); 4924 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { 4925 /* 4926 * Based on older SBC-3 spec revisions 4927 * any of the UNMAP methods "may" be 4928 * available via LBP given this flag so 4929 * we flag all of them as available and 4930 * then remove those which further 4931 * probes confirm aren't available 4932 * later. 4933 * 4934 * We could also check readcap(16) p_type 4935 * flag to exclude one or more invalid 4936 * write same (X) types here 4937 */ 4938 dadeleteflag(softc, DA_DELETE_WS16, 1); 4939 dadeleteflag(softc, DA_DELETE_WS10, 1); 4940 dadeleteflag(softc, DA_DELETE_UNMAP, 1); 4941 4942 softc->state = DA_STATE_PROBE_LBP; 4943 xpt_release_ccb(done_ccb); 4944 xpt_schedule(periph, priority); 4945 return; 4946 } 4947 4948 softc->state = DA_STATE_PROBE_BDC; 4949 xpt_release_ccb(done_ccb); 4950 xpt_schedule(periph, priority); 4951 return; 4952 } 4953 4954 static void 4955 dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb) 4956 { 4957 struct scsi_vpd_logical_block_prov *lbp; 4958 struct da_softc *softc; 4959 struct ccb_scsiio *csio; 4960 u_int32_t priority; 4961 4962 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n")); 4963 4964 softc = (struct da_softc *)periph->softc; 4965 priority = done_ccb->ccb_h.pinfo.priority; 4966 csio = &done_ccb->csio; 4967 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; 4968 4969 cam_periph_assert(periph, MA_OWNED); 4970 4971 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4972 /* 4973 * T10/1799-D Revision 31 states at least one of these 4974 * must be supported but we don't currently enforce this. 4975 */ 4976 dadeleteflag(softc, DA_DELETE_WS16, 4977 (lbp->flags & SVPD_LBP_WS16)); 4978 dadeleteflag(softc, DA_DELETE_WS10, 4979 (lbp->flags & SVPD_LBP_WS10)); 4980 dadeleteflag(softc, DA_DELETE_UNMAP, 4981 (lbp->flags & SVPD_LBP_UNMAP)); 4982 } else { 4983 int error; 4984 error = daerror(done_ccb, CAM_RETRY_SELTO, 4985 SF_RETRY_UA|SF_NO_PRINT); 4986 if (error == ERESTART) 4987 return; 4988 else if (error != 0) { 4989 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4990 /* Don't wedge this device's queue */ 4991 cam_release_devq(done_ccb->ccb_h.path, 4992 /*relsim_flags*/0, 4993 /*reduction*/0, 4994 /*timeout*/0, 4995 /*getcount_only*/0); 4996 } 4997 4998 /* 4999 * Failure indicates we don't support any SBC-3 5000 * delete methods with UNMAP 5001 */ 5002 } 5003 } 5004 5005 free(lbp, M_SCSIDA); 5006 softc->state = DA_STATE_PROBE_BLK_LIMITS; 5007 xpt_release_ccb(done_ccb); 5008 xpt_schedule(periph, priority); 5009 return; 5010 } 5011 5012 static void 5013 dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb) 5014 { 5015 struct scsi_vpd_block_limits *block_limits; 5016 struct da_softc *softc; 5017 struct ccb_scsiio *csio; 5018 u_int32_t priority; 5019 5020 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n")); 5021 5022 softc = (struct da_softc *)periph->softc; 5023 priority = done_ccb->ccb_h.pinfo.priority; 5024 csio = &done_ccb->csio; 5025 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; 5026 5027 cam_periph_assert(periph, MA_OWNED); 5028 5029 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5030 uint32_t max_txfer_len = scsi_4btoul( 5031 block_limits->max_txfer_len); 5032 uint32_t max_unmap_lba_cnt = scsi_4btoul( 5033 block_limits->max_unmap_lba_cnt); 5034 uint32_t max_unmap_blk_cnt = scsi_4btoul( 5035 block_limits->max_unmap_blk_cnt); 5036 uint32_t unmap_gran = scsi_4btoul( 5037 block_limits->opt_unmap_grain); 5038 uint32_t unmap_gran_align = scsi_4btoul( 5039 block_limits->unmap_grain_align); 5040 uint64_t ws_max_blks = scsi_8btou64( 5041 block_limits->max_write_same_length); 5042 5043 if (max_txfer_len != 0) { 5044 softc->disk->d_maxsize = MIN(softc->maxio, 5045 (off_t)max_txfer_len * softc->params.secsize); 5046 } 5047 5048 /* 5049 * We should already support UNMAP but we check lba 5050 * and block count to be sure 5051 */ 5052 if (max_unmap_lba_cnt != 0x00L && 5053 max_unmap_blk_cnt != 0x00L) { 5054 softc->unmap_max_lba = max_unmap_lba_cnt; 5055 softc->unmap_max_ranges = min(max_unmap_blk_cnt, 5056 UNMAP_MAX_RANGES); 5057 if (unmap_gran > 1) { 5058 softc->unmap_gran = unmap_gran; 5059 if (unmap_gran_align & 0x80000000) { 5060 softc->unmap_gran_align = 5061 unmap_gran_align & 0x7fffffff; 5062 } 5063 } 5064 } else { 5065 /* 5066 * Unexpected UNMAP limits which means the 5067 * device doesn't actually support UNMAP 5068 */ 5069 dadeleteflag(softc, DA_DELETE_UNMAP, 0); 5070 } 5071 5072 if (ws_max_blks != 0x00L) 5073 softc->ws_max_blks = ws_max_blks; 5074 } else { 5075 int error; 5076 error = daerror(done_ccb, CAM_RETRY_SELTO, 5077 SF_RETRY_UA|SF_NO_PRINT); 5078 if (error == ERESTART) 5079 return; 5080 else if (error != 0) { 5081 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5082 /* Don't wedge this device's queue */ 5083 cam_release_devq(done_ccb->ccb_h.path, 5084 /*relsim_flags*/0, 5085 /*reduction*/0, 5086 /*timeout*/0, 5087 /*getcount_only*/0); 5088 } 5089 5090 /* 5091 * Failure here doesn't mean UNMAP is not 5092 * supported as this is an optional page. 5093 */ 5094 softc->unmap_max_lba = 1; 5095 softc->unmap_max_ranges = 1; 5096 } 5097 } 5098 5099 free(block_limits, M_SCSIDA); 5100 softc->state = DA_STATE_PROBE_BDC; 5101 xpt_release_ccb(done_ccb); 5102 xpt_schedule(periph, priority); 5103 return; 5104 } 5105 5106 static void 5107 dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb) 5108 { 5109 struct scsi_vpd_block_device_characteristics *bdc; 5110 struct da_softc *softc; 5111 struct ccb_scsiio *csio; 5112 u_int32_t priority; 5113 5114 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n")); 5115 5116 softc = (struct da_softc *)periph->softc; 5117 priority = done_ccb->ccb_h.pinfo.priority; 5118 csio = &done_ccb->csio; 5119 bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr; 5120 5121 cam_periph_assert(periph, MA_OWNED); 5122 5123 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5124 uint32_t valid_len; 5125 5126 /* 5127 * Disable queue sorting for non-rotational media 5128 * by default. 5129 */ 5130 u_int16_t old_rate = softc->disk->d_rotation_rate; 5131 5132 valid_len = csio->dxfer_len - csio->resid; 5133 if (SBDC_IS_PRESENT(bdc, valid_len, 5134 medium_rotation_rate)) { 5135 softc->disk->d_rotation_rate = 5136 scsi_2btoul(bdc->medium_rotation_rate); 5137 if (softc->disk->d_rotation_rate == 5138 SVPD_BDC_RATE_NON_ROTATING) { 5139 cam_iosched_set_sort_queue( 5140 softc->cam_iosched, 0); 5141 softc->rotating = 0; 5142 } 5143 if (softc->disk->d_rotation_rate != old_rate) { 5144 disk_attr_changed(softc->disk, 5145 "GEOM::rotation_rate", M_NOWAIT); 5146 } 5147 } 5148 if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) 5149 && (softc->zone_mode == DA_ZONE_NONE)) { 5150 int ata_proto; 5151 5152 if (scsi_vpd_supported_page(periph, 5153 SVPD_ATA_INFORMATION)) 5154 ata_proto = 1; 5155 else 5156 ata_proto = 0; 5157 5158 /* 5159 * The Zoned field will only be set for 5160 * Drive Managed and Host Aware drives. If 5161 * they are Host Managed, the device type 5162 * in the standard INQUIRY data should be 5163 * set to T_ZBC_HM (0x14). 5164 */ 5165 if ((bdc->flags & SVPD_ZBC_MASK) == 5166 SVPD_HAW_ZBC) { 5167 softc->zone_mode = DA_ZONE_HOST_AWARE; 5168 softc->zone_interface = (ata_proto) ? 5169 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 5170 } else if ((bdc->flags & SVPD_ZBC_MASK) == 5171 SVPD_DM_ZBC) { 5172 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 5173 softc->zone_interface = (ata_proto) ? 5174 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 5175 } else if ((bdc->flags & SVPD_ZBC_MASK) != 5176 SVPD_ZBC_NR) { 5177 xpt_print(periph->path, "Unknown zoned " 5178 "type %#x", 5179 bdc->flags & SVPD_ZBC_MASK); 5180 } 5181 } 5182 } else { 5183 int error; 5184 error = daerror(done_ccb, CAM_RETRY_SELTO, 5185 SF_RETRY_UA|SF_NO_PRINT); 5186 if (error == ERESTART) 5187 return; 5188 else if (error != 0) { 5189 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5190 /* Don't wedge this device's queue */ 5191 cam_release_devq(done_ccb->ccb_h.path, 5192 /*relsim_flags*/0, 5193 /*reduction*/0, 5194 /*timeout*/0, 5195 /*getcount_only*/0); 5196 } 5197 } 5198 } 5199 5200 free(bdc, M_SCSIDA); 5201 softc->state = DA_STATE_PROBE_ATA; 5202 xpt_release_ccb(done_ccb); 5203 xpt_schedule(periph, priority); 5204 return; 5205 } 5206 5207 static void 5208 dadone_probeata(struct cam_periph *periph, union ccb *done_ccb) 5209 { 5210 struct ata_params *ata_params; 5211 struct ccb_scsiio *csio; 5212 struct da_softc *softc; 5213 u_int32_t priority; 5214 int continue_probe; 5215 int error; 5216 int16_t *ptr; 5217 5218 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n")); 5219 5220 softc = (struct da_softc *)periph->softc; 5221 priority = done_ccb->ccb_h.pinfo.priority; 5222 csio = &done_ccb->csio; 5223 ata_params = (struct ata_params *)csio->data_ptr; 5224 ptr = (uint16_t *)ata_params; 5225 continue_probe = 0; 5226 error = 0; 5227 5228 cam_periph_assert(periph, MA_OWNED); 5229 5230 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5231 uint16_t old_rate; 5232 5233 ata_param_fixup(ata_params); 5234 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && 5235 (softc->quirks & DA_Q_NO_UNMAP) == 0) { 5236 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); 5237 if (ata_params->max_dsm_blocks != 0) 5238 softc->trim_max_ranges = min( 5239 softc->trim_max_ranges, 5240 ata_params->max_dsm_blocks * 5241 ATA_DSM_BLK_RANGES); 5242 } 5243 /* 5244 * Disable queue sorting for non-rotational media 5245 * by default. 5246 */ 5247 old_rate = softc->disk->d_rotation_rate; 5248 softc->disk->d_rotation_rate = ata_params->media_rotation_rate; 5249 if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { 5250 cam_iosched_set_sort_queue(softc->cam_iosched, 0); 5251 softc->rotating = 0; 5252 } 5253 if (softc->disk->d_rotation_rate != old_rate) { 5254 disk_attr_changed(softc->disk, 5255 "GEOM::rotation_rate", M_NOWAIT); 5256 } 5257 5258 cam_periph_assert(periph, MA_OWNED); 5259 if (ata_params->capabilities1 & ATA_SUPPORT_DMA) 5260 softc->flags |= DA_FLAG_CAN_ATA_DMA; 5261 5262 if (ata_params->support.extension & ATA_SUPPORT_GENLOG) 5263 softc->flags |= DA_FLAG_CAN_ATA_LOG; 5264 5265 /* 5266 * At this point, if we have a SATA host aware drive, 5267 * we communicate via ATA passthrough unless the 5268 * SAT layer supports ZBC -> ZAC translation. In 5269 * that case, 5270 * 5271 * XXX KDM figure out how to detect a host managed 5272 * SATA drive. 5273 */ 5274 if (softc->zone_mode == DA_ZONE_NONE) { 5275 /* 5276 * Note that we don't override the zone 5277 * mode or interface if it has already been 5278 * set. This is because it has either been 5279 * set as a quirk, or when we probed the 5280 * SCSI Block Device Characteristics page, 5281 * the zoned field was set. The latter 5282 * means that the SAT layer supports ZBC to 5283 * ZAC translation, and we would prefer to 5284 * use that if it is available. 5285 */ 5286 if ((ata_params->support3 & 5287 ATA_SUPPORT_ZONE_MASK) == 5288 ATA_SUPPORT_ZONE_HOST_AWARE) { 5289 softc->zone_mode = DA_ZONE_HOST_AWARE; 5290 softc->zone_interface = 5291 DA_ZONE_IF_ATA_PASS; 5292 } else if ((ata_params->support3 & 5293 ATA_SUPPORT_ZONE_MASK) == 5294 ATA_SUPPORT_ZONE_DEV_MANAGED) { 5295 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 5296 softc->zone_interface = DA_ZONE_IF_ATA_PASS; 5297 } 5298 } 5299 5300 } else { 5301 error = daerror(done_ccb, CAM_RETRY_SELTO, 5302 SF_RETRY_UA|SF_NO_PRINT); 5303 if (error == ERESTART) 5304 return; 5305 else if (error != 0) { 5306 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5307 /* Don't wedge this device's queue */ 5308 cam_release_devq(done_ccb->ccb_h.path, 5309 /*relsim_flags*/0, 5310 /*reduction*/0, 5311 /*timeout*/0, 5312 /*getcount_only*/0); 5313 } 5314 } 5315 } 5316 5317 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 5318 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 5319 /* 5320 * If the ATA IDENTIFY failed, we could be talking 5321 * to a SCSI drive, although that seems unlikely, 5322 * since the drive did report that it supported the 5323 * ATA Information VPD page. If the ATA IDENTIFY 5324 * succeeded, and the SAT layer doesn't support 5325 * ZBC -> ZAC translation, continue on to get the 5326 * directory of ATA logs, and complete the rest of 5327 * the ZAC probe. If the SAT layer does support 5328 * ZBC -> ZAC translation, we want to use that, 5329 * and we'll probe the SCSI Zoned Block Device 5330 * Characteristics VPD page next. 5331 */ 5332 if ((error == 0) 5333 && (softc->flags & DA_FLAG_CAN_ATA_LOG) 5334 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) 5335 softc->state = DA_STATE_PROBE_ATA_LOGDIR; 5336 else 5337 softc->state = DA_STATE_PROBE_ZONE; 5338 continue_probe = 1; 5339 } 5340 if (continue_probe != 0) { 5341 xpt_schedule(periph, priority); 5342 xpt_release_ccb(done_ccb); 5343 return; 5344 } else 5345 daprobedone(periph, done_ccb); 5346 return; 5347 } 5348 5349 static void 5350 dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb) 5351 { 5352 struct da_softc *softc; 5353 struct ccb_scsiio *csio; 5354 u_int32_t priority; 5355 int error; 5356 5357 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n")); 5358 5359 softc = (struct da_softc *)periph->softc; 5360 priority = done_ccb->ccb_h.pinfo.priority; 5361 csio = &done_ccb->csio; 5362 5363 cam_periph_assert(periph, MA_OWNED); 5364 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5365 error = 0; 5366 softc->valid_logdir_len = 0; 5367 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); 5368 softc->valid_logdir_len = csio->dxfer_len - csio->resid; 5369 if (softc->valid_logdir_len > 0) 5370 bcopy(csio->data_ptr, &softc->ata_logdir, 5371 min(softc->valid_logdir_len, 5372 sizeof(softc->ata_logdir))); 5373 /* 5374 * Figure out whether the Identify Device log is 5375 * supported. The General Purpose log directory 5376 * has a header, and lists the number of pages 5377 * available for each GP log identified by the 5378 * offset into the list. 5379 */ 5380 if ((softc->valid_logdir_len >= 5381 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) 5382 && (le16dec(softc->ata_logdir.header) == 5383 ATA_GP_LOG_DIR_VERSION) 5384 && (le16dec(&softc->ata_logdir.num_pages[ 5385 (ATA_IDENTIFY_DATA_LOG * 5386 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ 5387 softc->flags |= DA_FLAG_CAN_ATA_IDLOG; 5388 } else { 5389 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 5390 } 5391 } else { 5392 error = daerror(done_ccb, CAM_RETRY_SELTO, 5393 SF_RETRY_UA|SF_NO_PRINT); 5394 if (error == ERESTART) 5395 return; 5396 else if (error != 0) { 5397 /* 5398 * If we can't get the ATA log directory, 5399 * then ATA logs are effectively not 5400 * supported even if the bit is set in the 5401 * identify data. 5402 */ 5403 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | 5404 DA_FLAG_CAN_ATA_IDLOG); 5405 if ((done_ccb->ccb_h.status & 5406 CAM_DEV_QFRZN) != 0) { 5407 /* Don't wedge this device's queue */ 5408 cam_release_devq(done_ccb->ccb_h.path, 5409 /*relsim_flags*/0, 5410 /*reduction*/0, 5411 /*timeout*/0, 5412 /*getcount_only*/0); 5413 } 5414 } 5415 } 5416 5417 free(csio->data_ptr, M_SCSIDA); 5418 5419 if ((error == 0) 5420 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { 5421 softc->state = DA_STATE_PROBE_ATA_IDDIR; 5422 xpt_release_ccb(done_ccb); 5423 xpt_schedule(periph, priority); 5424 return; 5425 } 5426 daprobedone(periph, done_ccb); 5427 return; 5428 } 5429 5430 static void 5431 dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb) 5432 { 5433 struct da_softc *softc; 5434 struct ccb_scsiio *csio; 5435 u_int32_t priority; 5436 int error; 5437 5438 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n")); 5439 5440 softc = (struct da_softc *)periph->softc; 5441 priority = done_ccb->ccb_h.pinfo.priority; 5442 csio = &done_ccb->csio; 5443 5444 cam_periph_assert(periph, MA_OWNED); 5445 5446 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5447 off_t entries_offset, max_entries; 5448 error = 0; 5449 5450 softc->valid_iddir_len = 0; 5451 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); 5452 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | 5453 DA_FLAG_CAN_ATA_ZONE); 5454 softc->valid_iddir_len = csio->dxfer_len - csio->resid; 5455 if (softc->valid_iddir_len > 0) 5456 bcopy(csio->data_ptr, &softc->ata_iddir, 5457 min(softc->valid_iddir_len, 5458 sizeof(softc->ata_iddir))); 5459 5460 entries_offset = 5461 __offsetof(struct ata_identify_log_pages,entries); 5462 max_entries = softc->valid_iddir_len - entries_offset; 5463 if ((softc->valid_iddir_len > (entries_offset + 1)) 5464 && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) 5465 && (softc->ata_iddir.entry_count > 0)) { 5466 int num_entries, i; 5467 5468 num_entries = softc->ata_iddir.entry_count; 5469 num_entries = min(num_entries, 5470 softc->valid_iddir_len - entries_offset); 5471 for (i = 0; i < num_entries && i < max_entries; i++) { 5472 if (softc->ata_iddir.entries[i] == 5473 ATA_IDL_SUP_CAP) 5474 softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; 5475 else if (softc->ata_iddir.entries[i] == 5476 ATA_IDL_ZDI) 5477 softc->flags |= DA_FLAG_CAN_ATA_ZONE; 5478 5479 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) 5480 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) 5481 break; 5482 } 5483 } 5484 } else { 5485 error = daerror(done_ccb, CAM_RETRY_SELTO, 5486 SF_RETRY_UA|SF_NO_PRINT); 5487 if (error == ERESTART) 5488 return; 5489 else if (error != 0) { 5490 /* 5491 * If we can't get the ATA Identify Data log 5492 * directory, then it effectively isn't 5493 * supported even if the ATA Log directory 5494 * a non-zero number of pages present for 5495 * this log. 5496 */ 5497 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 5498 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5499 /* Don't wedge this device's queue */ 5500 cam_release_devq(done_ccb->ccb_h.path, 5501 /*relsim_flags*/0, 5502 /*reduction*/0, 5503 /*timeout*/0, 5504 /*getcount_only*/0); 5505 } 5506 } 5507 } 5508 5509 free(csio->data_ptr, M_SCSIDA); 5510 5511 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { 5512 softc->state = DA_STATE_PROBE_ATA_SUP; 5513 xpt_release_ccb(done_ccb); 5514 xpt_schedule(periph, priority); 5515 return; 5516 } 5517 daprobedone(periph, done_ccb); 5518 return; 5519 } 5520 5521 static void 5522 dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb) 5523 { 5524 struct da_softc *softc; 5525 struct ccb_scsiio *csio; 5526 u_int32_t priority; 5527 int error; 5528 5529 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n")); 5530 5531 softc = (struct da_softc *)periph->softc; 5532 priority = done_ccb->ccb_h.pinfo.priority; 5533 csio = &done_ccb->csio; 5534 5535 cam_periph_assert(periph, MA_OWNED); 5536 5537 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5538 uint32_t valid_len; 5539 size_t needed_size; 5540 struct ata_identify_log_sup_cap *sup_cap; 5541 error = 0; 5542 5543 sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr; 5544 valid_len = csio->dxfer_len - csio->resid; 5545 needed_size = __offsetof(struct ata_identify_log_sup_cap, 5546 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); 5547 if (valid_len >= needed_size) { 5548 uint64_t zoned, zac_cap; 5549 5550 zoned = le64dec(sup_cap->zoned_cap); 5551 if (zoned & ATA_ZONED_VALID) { 5552 /* 5553 * This should have already been 5554 * set, because this is also in the 5555 * ATA identify data. 5556 */ 5557 if ((zoned & ATA_ZONED_MASK) == 5558 ATA_SUPPORT_ZONE_HOST_AWARE) 5559 softc->zone_mode = DA_ZONE_HOST_AWARE; 5560 else if ((zoned & ATA_ZONED_MASK) == 5561 ATA_SUPPORT_ZONE_DEV_MANAGED) 5562 softc->zone_mode = 5563 DA_ZONE_DRIVE_MANAGED; 5564 } 5565 5566 zac_cap = le64dec(sup_cap->sup_zac_cap); 5567 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { 5568 if (zac_cap & ATA_REPORT_ZONES_SUP) 5569 softc->zone_flags |= 5570 DA_ZONE_FLAG_RZ_SUP; 5571 if (zac_cap & ATA_ND_OPEN_ZONE_SUP) 5572 softc->zone_flags |= 5573 DA_ZONE_FLAG_OPEN_SUP; 5574 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) 5575 softc->zone_flags |= 5576 DA_ZONE_FLAG_CLOSE_SUP; 5577 if (zac_cap & ATA_ND_FINISH_ZONE_SUP) 5578 softc->zone_flags |= 5579 DA_ZONE_FLAG_FINISH_SUP; 5580 if (zac_cap & ATA_ND_RWP_SUP) 5581 softc->zone_flags |= 5582 DA_ZONE_FLAG_RWP_SUP; 5583 } else { 5584 /* 5585 * This field was introduced in 5586 * ACS-4, r08 on April 28th, 2015. 5587 * If the drive firmware was written 5588 * to an earlier spec, it won't have 5589 * the field. So, assume all 5590 * commands are supported. 5591 */ 5592 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; 5593 } 5594 } 5595 } else { 5596 error = daerror(done_ccb, CAM_RETRY_SELTO, 5597 SF_RETRY_UA|SF_NO_PRINT); 5598 if (error == ERESTART) 5599 return; 5600 else if (error != 0) { 5601 /* 5602 * If we can't get the ATA Identify Data 5603 * Supported Capabilities page, clear the 5604 * flag... 5605 */ 5606 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; 5607 /* 5608 * And clear zone capabilities. 5609 */ 5610 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; 5611 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5612 /* Don't wedge this device's queue */ 5613 cam_release_devq(done_ccb->ccb_h.path, 5614 /*relsim_flags*/0, 5615 /*reduction*/0, 5616 /*timeout*/0, 5617 /*getcount_only*/0); 5618 } 5619 } 5620 } 5621 5622 free(csio->data_ptr, M_SCSIDA); 5623 5624 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { 5625 softc->state = DA_STATE_PROBE_ATA_ZONE; 5626 xpt_release_ccb(done_ccb); 5627 xpt_schedule(periph, priority); 5628 return; 5629 } 5630 daprobedone(periph, done_ccb); 5631 return; 5632 } 5633 5634 static void 5635 dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb) 5636 { 5637 struct da_softc *softc; 5638 struct ccb_scsiio *csio; 5639 int error; 5640 5641 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n")); 5642 5643 softc = (struct da_softc *)periph->softc; 5644 csio = &done_ccb->csio; 5645 5646 cam_periph_assert(periph, MA_OWNED); 5647 5648 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5649 struct ata_zoned_info_log *zi_log; 5650 uint32_t valid_len; 5651 size_t needed_size; 5652 5653 zi_log = (struct ata_zoned_info_log *)csio->data_ptr; 5654 5655 valid_len = csio->dxfer_len - csio->resid; 5656 needed_size = __offsetof(struct ata_zoned_info_log, 5657 version_info) + 1 + sizeof(zi_log->version_info); 5658 if (valid_len >= needed_size) { 5659 uint64_t tmpvar; 5660 5661 tmpvar = le64dec(zi_log->zoned_cap); 5662 if (tmpvar & ATA_ZDI_CAP_VALID) { 5663 if (tmpvar & ATA_ZDI_CAP_URSWRZ) 5664 softc->zone_flags |= 5665 DA_ZONE_FLAG_URSWRZ; 5666 else 5667 softc->zone_flags &= 5668 ~DA_ZONE_FLAG_URSWRZ; 5669 } 5670 tmpvar = le64dec(zi_log->optimal_seq_zones); 5671 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { 5672 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; 5673 softc->optimal_seq_zones = (tmpvar & 5674 ATA_ZDI_OPT_SEQ_MASK); 5675 } else { 5676 softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; 5677 softc->optimal_seq_zones = 0; 5678 } 5679 5680 tmpvar =le64dec(zi_log->optimal_nonseq_zones); 5681 if (tmpvar & ATA_ZDI_OPT_NS_VALID) { 5682 softc->zone_flags |= 5683 DA_ZONE_FLAG_OPT_NONSEQ_SET; 5684 softc->optimal_nonseq_zones = 5685 (tmpvar & ATA_ZDI_OPT_NS_MASK); 5686 } else { 5687 softc->zone_flags &= 5688 ~DA_ZONE_FLAG_OPT_NONSEQ_SET; 5689 softc->optimal_nonseq_zones = 0; 5690 } 5691 5692 tmpvar = le64dec(zi_log->max_seq_req_zones); 5693 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { 5694 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; 5695 softc->max_seq_zones = 5696 (tmpvar & ATA_ZDI_MAX_SEQ_MASK); 5697 } else { 5698 softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; 5699 softc->max_seq_zones = 0; 5700 } 5701 } 5702 } else { 5703 error = daerror(done_ccb, CAM_RETRY_SELTO, 5704 SF_RETRY_UA|SF_NO_PRINT); 5705 if (error == ERESTART) 5706 return; 5707 else if (error != 0) { 5708 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; 5709 softc->flags &= ~DA_ZONE_FLAG_SET_MASK; 5710 5711 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5712 /* Don't wedge this device's queue */ 5713 cam_release_devq(done_ccb->ccb_h.path, 5714 /*relsim_flags*/0, 5715 /*reduction*/0, 5716 /*timeout*/0, 5717 /*getcount_only*/0); 5718 } 5719 } 5720 5721 } 5722 5723 free(csio->data_ptr, M_SCSIDA); 5724 5725 daprobedone(periph, done_ccb); 5726 return; 5727 } 5728 5729 static void 5730 dadone_probezone(struct cam_periph *periph, union ccb *done_ccb) 5731 { 5732 struct da_softc *softc; 5733 struct ccb_scsiio *csio; 5734 int error; 5735 5736 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n")); 5737 5738 softc = (struct da_softc *)periph->softc; 5739 csio = &done_ccb->csio; 5740 5741 cam_periph_assert(periph, MA_OWNED); 5742 5743 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5744 uint32_t valid_len; 5745 size_t needed_len; 5746 struct scsi_vpd_zoned_bdc *zoned_bdc; 5747 5748 error = 0; 5749 zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr; 5750 valid_len = csio->dxfer_len - csio->resid; 5751 needed_len = __offsetof(struct scsi_vpd_zoned_bdc, 5752 max_seq_req_zones) + 1 + 5753 sizeof(zoned_bdc->max_seq_req_zones); 5754 if ((valid_len >= needed_len) 5755 && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { 5756 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) 5757 softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; 5758 else 5759 softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; 5760 softc->optimal_seq_zones = 5761 scsi_4btoul(zoned_bdc->optimal_seq_zones); 5762 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; 5763 softc->optimal_nonseq_zones = scsi_4btoul( 5764 zoned_bdc->optimal_nonseq_zones); 5765 softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; 5766 softc->max_seq_zones = 5767 scsi_4btoul(zoned_bdc->max_seq_req_zones); 5768 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; 5769 } 5770 /* 5771 * All of the zone commands are mandatory for SCSI 5772 * devices. 5773 * 5774 * XXX KDM this is valid as of September 2015. 5775 * Re-check this assumption once the SAT spec is 5776 * updated to support SCSI ZBC to ATA ZAC mapping. 5777 * Since ATA allows zone commands to be reported 5778 * as supported or not, this may not necessarily 5779 * be true for an ATA device behind a SAT (SCSI to 5780 * ATA Translation) layer. 5781 */ 5782 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; 5783 } else { 5784 error = daerror(done_ccb, CAM_RETRY_SELTO, 5785 SF_RETRY_UA|SF_NO_PRINT); 5786 if (error == ERESTART) 5787 return; 5788 else if (error != 0) { 5789 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5790 /* Don't wedge this device's queue */ 5791 cam_release_devq(done_ccb->ccb_h.path, 5792 /*relsim_flags*/0, 5793 /*reduction*/0, 5794 /*timeout*/0, 5795 /*getcount_only*/0); 5796 } 5797 } 5798 } 5799 5800 free(csio->data_ptr, M_SCSIDA); 5801 5802 daprobedone(periph, done_ccb); 5803 return; 5804 } 5805 5806 static void 5807 dadone_tur(struct cam_periph *periph, union ccb *done_ccb) 5808 { 5809 struct da_softc *softc; 5810 struct ccb_scsiio *csio; 5811 5812 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n")); 5813 5814 softc = (struct da_softc *)periph->softc; 5815 csio = &done_ccb->csio; 5816 5817 cam_periph_assert(periph, MA_OWNED); 5818 5819 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5820 5821 if (daerror(done_ccb, CAM_RETRY_SELTO, 5822 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) 5823 return; /* Will complete again, keep reference */ 5824 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5825 cam_release_devq(done_ccb->ccb_h.path, 5826 /*relsim_flags*/0, 5827 /*reduction*/0, 5828 /*timeout*/0, 5829 /*getcount_only*/0); 5830 } 5831 softc->flags &= ~DA_FLAG_TUR_PENDING; 5832 xpt_release_ccb(done_ccb); 5833 da_periph_release_locked(periph, DA_REF_TUR); 5834 return; 5835 } 5836 5837 static void 5838 dareprobe(struct cam_periph *periph) 5839 { 5840 struct da_softc *softc; 5841 int status; 5842 5843 softc = (struct da_softc *)periph->softc; 5844 5845 cam_periph_assert(periph, MA_OWNED); 5846 5847 /* Probe in progress; don't interfere. */ 5848 if (softc->state != DA_STATE_NORMAL) 5849 return; 5850 5851 status = da_periph_acquire(periph, DA_REF_REPROBE); 5852 KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed")); 5853 5854 softc->state = DA_STATE_PROBE_WP; 5855 xpt_schedule(periph, CAM_PRIORITY_DEV); 5856 } 5857 5858 static int 5859 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 5860 { 5861 struct da_softc *softc; 5862 struct cam_periph *periph; 5863 int error, error_code, sense_key, asc, ascq; 5864 5865 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5866 if (ccb->csio.bio != NULL) 5867 biotrack(ccb->csio.bio, __func__); 5868 #endif 5869 5870 periph = xpt_path_periph(ccb->ccb_h.path); 5871 softc = (struct da_softc *)periph->softc; 5872 5873 cam_periph_assert(periph, MA_OWNED); 5874 5875 /* 5876 * Automatically detect devices that do not support 5877 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. 5878 */ 5879 error = 0; 5880 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { 5881 error = cmd6workaround(ccb); 5882 } else if (scsi_extract_sense_ccb(ccb, 5883 &error_code, &sense_key, &asc, &ascq)) { 5884 if (sense_key == SSD_KEY_ILLEGAL_REQUEST) 5885 error = cmd6workaround(ccb); 5886 /* 5887 * If the target replied with CAPACITY DATA HAS CHANGED UA, 5888 * query the capacity and notify upper layers. 5889 */ 5890 else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5891 asc == 0x2A && ascq == 0x09) { 5892 xpt_print(periph->path, "Capacity data has changed\n"); 5893 softc->flags &= ~DA_FLAG_PROBED; 5894 dareprobe(periph); 5895 sense_flags |= SF_NO_PRINT; 5896 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5897 asc == 0x28 && ascq == 0x00) { 5898 softc->flags &= ~DA_FLAG_PROBED; 5899 disk_media_changed(softc->disk, M_NOWAIT); 5900 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5901 asc == 0x3F && ascq == 0x03) { 5902 xpt_print(periph->path, "INQUIRY data has changed\n"); 5903 softc->flags &= ~DA_FLAG_PROBED; 5904 dareprobe(periph); 5905 sense_flags |= SF_NO_PRINT; 5906 } else if (sense_key == SSD_KEY_NOT_READY && 5907 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 5908 softc->flags |= DA_FLAG_PACK_INVALID; 5909 disk_media_gone(softc->disk, M_NOWAIT); 5910 } 5911 } 5912 if (error == ERESTART) 5913 return (ERESTART); 5914 5915 #ifdef CAM_IO_STATS 5916 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 5917 case CAM_CMD_TIMEOUT: 5918 softc->timeouts++; 5919 break; 5920 case CAM_REQ_ABORTED: 5921 case CAM_REQ_CMP_ERR: 5922 case CAM_REQ_TERMIO: 5923 case CAM_UNREC_HBA_ERROR: 5924 case CAM_DATA_RUN_ERR: 5925 softc->errors++; 5926 break; 5927 default: 5928 break; 5929 } 5930 #endif 5931 5932 /* 5933 * XXX 5934 * Until we have a better way of doing pack validation, 5935 * don't treat UAs as errors. 5936 */ 5937 sense_flags |= SF_RETRY_UA; 5938 5939 if (softc->quirks & DA_Q_RETRY_BUSY) 5940 sense_flags |= SF_RETRY_BUSY; 5941 return(cam_periph_error(ccb, cam_flags, sense_flags)); 5942 } 5943 5944 static void 5945 damediapoll(void *arg) 5946 { 5947 struct cam_periph *periph = arg; 5948 struct da_softc *softc = periph->softc; 5949 5950 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && 5951 (softc->flags & DA_FLAG_TUR_PENDING) == 0 && 5952 softc->state == DA_STATE_NORMAL && 5953 LIST_EMPTY(&softc->pending_ccbs)) { 5954 if (da_periph_acquire(periph, DA_REF_TUR) == 0) { 5955 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 5956 daschedule(periph); 5957 } 5958 } 5959 /* Queue us up again */ 5960 if (da_poll_period != 0) 5961 callout_schedule(&softc->mediapoll_c, da_poll_period * hz); 5962 } 5963 5964 static void 5965 daprevent(struct cam_periph *periph, int action) 5966 { 5967 struct da_softc *softc; 5968 union ccb *ccb; 5969 int error; 5970 5971 cam_periph_assert(periph, MA_OWNED); 5972 softc = (struct da_softc *)periph->softc; 5973 5974 if (((action == PR_ALLOW) 5975 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 5976 || ((action == PR_PREVENT) 5977 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 5978 return; 5979 } 5980 5981 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 5982 5983 scsi_prevent(&ccb->csio, 5984 /*retries*/1, 5985 /*cbcfp*/NULL, 5986 MSG_SIMPLE_Q_TAG, 5987 action, 5988 SSD_FULL_SIZE, 5989 5000); 5990 5991 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, 5992 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); 5993 5994 if (error == 0) { 5995 if (action == PR_ALLOW) 5996 softc->flags &= ~DA_FLAG_PACK_LOCKED; 5997 else 5998 softc->flags |= DA_FLAG_PACK_LOCKED; 5999 } 6000 6001 xpt_release_ccb(ccb); 6002 } 6003 6004 static void 6005 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, 6006 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) 6007 { 6008 struct ccb_calc_geometry ccg; 6009 struct da_softc *softc; 6010 struct disk_params *dp; 6011 u_int lbppbe, lalba; 6012 int error; 6013 6014 softc = (struct da_softc *)periph->softc; 6015 6016 dp = &softc->params; 6017 dp->secsize = block_len; 6018 dp->sectors = maxsector + 1; 6019 if (rcaplong != NULL) { 6020 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; 6021 lalba = scsi_2btoul(rcaplong->lalba_lbp); 6022 lalba &= SRC16_LALBA_A; 6023 if (rcaplong->prot & SRC16_PROT_EN) 6024 softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >> 6025 SRC16_P_TYPE_SHIFT) + 1; 6026 else 6027 softc->p_type = 0; 6028 } else { 6029 lbppbe = 0; 6030 lalba = 0; 6031 softc->p_type = 0; 6032 } 6033 6034 if (lbppbe > 0) { 6035 dp->stripesize = block_len << lbppbe; 6036 dp->stripeoffset = (dp->stripesize - block_len * lalba) % 6037 dp->stripesize; 6038 } else if (softc->quirks & DA_Q_4K) { 6039 dp->stripesize = 4096; 6040 dp->stripeoffset = 0; 6041 } else if (softc->unmap_gran != 0) { 6042 dp->stripesize = block_len * softc->unmap_gran; 6043 dp->stripeoffset = (dp->stripesize - block_len * 6044 softc->unmap_gran_align) % dp->stripesize; 6045 } else { 6046 dp->stripesize = 0; 6047 dp->stripeoffset = 0; 6048 } 6049 /* 6050 * Have the controller provide us with a geometry 6051 * for this disk. The only time the geometry 6052 * matters is when we boot and the controller 6053 * is the only one knowledgeable enough to come 6054 * up with something that will make this a bootable 6055 * device. 6056 */ 6057 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 6058 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 6059 ccg.block_size = dp->secsize; 6060 ccg.volume_size = dp->sectors; 6061 ccg.heads = 0; 6062 ccg.secs_per_track = 0; 6063 ccg.cylinders = 0; 6064 xpt_action((union ccb*)&ccg); 6065 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 6066 /* 6067 * We don't know what went wrong here- but just pick 6068 * a geometry so we don't have nasty things like divide 6069 * by zero. 6070 */ 6071 dp->heads = 255; 6072 dp->secs_per_track = 255; 6073 dp->cylinders = dp->sectors / (255 * 255); 6074 if (dp->cylinders == 0) { 6075 dp->cylinders = 1; 6076 } 6077 } else { 6078 dp->heads = ccg.heads; 6079 dp->secs_per_track = ccg.secs_per_track; 6080 dp->cylinders = ccg.cylinders; 6081 } 6082 6083 /* 6084 * If the user supplied a read capacity buffer, and if it is 6085 * different than the previous buffer, update the data in the EDT. 6086 * If it's the same, we don't bother. This avoids sending an 6087 * update every time someone opens this device. 6088 */ 6089 if ((rcaplong != NULL) 6090 && (bcmp(rcaplong, &softc->rcaplong, 6091 min(sizeof(softc->rcaplong), rcap_len)) != 0)) { 6092 struct ccb_dev_advinfo cdai; 6093 6094 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 6095 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 6096 cdai.buftype = CDAI_TYPE_RCAPLONG; 6097 cdai.flags = CDAI_FLAG_STORE; 6098 cdai.bufsiz = rcap_len; 6099 cdai.buf = (uint8_t *)rcaplong; 6100 xpt_action((union ccb *)&cdai); 6101 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 6102 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 6103 if (cdai.ccb_h.status != CAM_REQ_CMP) { 6104 xpt_print(periph->path, "%s: failed to set read " 6105 "capacity advinfo\n", __func__); 6106 /* Use cam_error_print() to decode the status */ 6107 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, 6108 CAM_EPF_ALL); 6109 } else { 6110 bcopy(rcaplong, &softc->rcaplong, 6111 min(sizeof(softc->rcaplong), rcap_len)); 6112 } 6113 } 6114 6115 softc->disk->d_sectorsize = softc->params.secsize; 6116 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; 6117 softc->disk->d_stripesize = softc->params.stripesize; 6118 softc->disk->d_stripeoffset = softc->params.stripeoffset; 6119 /* XXX: these are not actually "firmware" values, so they may be wrong */ 6120 softc->disk->d_fwsectors = softc->params.secs_per_track; 6121 softc->disk->d_fwheads = softc->params.heads; 6122 softc->disk->d_devstat->block_size = softc->params.secsize; 6123 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; 6124 6125 error = disk_resize(softc->disk, M_NOWAIT); 6126 if (error != 0) 6127 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); 6128 } 6129 6130 static void 6131 dasendorderedtag(void *arg) 6132 { 6133 struct cam_periph *periph = arg; 6134 struct da_softc *softc = periph->softc; 6135 6136 cam_periph_assert(periph, MA_OWNED); 6137 if (da_send_ordered) { 6138 if (!LIST_EMPTY(&softc->pending_ccbs)) { 6139 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) 6140 softc->flags |= DA_FLAG_NEED_OTAG; 6141 softc->flags &= ~DA_FLAG_WAS_OTAG; 6142 } 6143 } 6144 6145 /* Queue us up again */ 6146 callout_reset(&softc->sendordered_c, 6147 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 6148 dasendorderedtag, periph); 6149 } 6150 6151 /* 6152 * Step through all DA peripheral drivers, and if the device is still open, 6153 * sync the disk cache to physical media. 6154 */ 6155 static void 6156 dashutdown(void * arg, int howto) 6157 { 6158 struct cam_periph *periph; 6159 struct da_softc *softc; 6160 union ccb *ccb; 6161 int error; 6162 6163 CAM_PERIPH_FOREACH(periph, &dadriver) { 6164 softc = (struct da_softc *)periph->softc; 6165 if (SCHEDULER_STOPPED()) { 6166 /* If we paniced with the lock held, do not recurse. */ 6167 if (!cam_periph_owned(periph) && 6168 (softc->flags & DA_FLAG_OPEN)) { 6169 dadump(softc->disk, NULL, 0, 0, 0); 6170 } 6171 continue; 6172 } 6173 cam_periph_lock(periph); 6174 6175 /* 6176 * We only sync the cache if the drive is still open, and 6177 * if the drive is capable of it.. 6178 */ 6179 if (((softc->flags & DA_FLAG_OPEN) == 0) 6180 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { 6181 cam_periph_unlock(periph); 6182 continue; 6183 } 6184 6185 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 6186 scsi_synchronize_cache(&ccb->csio, 6187 /*retries*/0, 6188 /*cbfcnp*/NULL, 6189 MSG_SIMPLE_Q_TAG, 6190 /*begin_lba*/0, /* whole disk */ 6191 /*lb_count*/0, 6192 SSD_FULL_SIZE, 6193 60 * 60 * 1000); 6194 6195 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 6196 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, 6197 softc->disk->d_devstat); 6198 if (error != 0) 6199 xpt_print(periph->path, "Synchronize cache failed\n"); 6200 xpt_release_ccb(ccb); 6201 cam_periph_unlock(periph); 6202 } 6203 } 6204 6205 #else /* !_KERNEL */ 6206 6207 /* 6208 * XXX These are only left out of the kernel build to silence warnings. If, 6209 * for some reason these functions are used in the kernel, the ifdefs should 6210 * be moved so they are included both in the kernel and userland. 6211 */ 6212 void 6213 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 6214 void (*cbfcnp)(struct cam_periph *, union ccb *), 6215 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 6216 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 6217 u_int32_t timeout) 6218 { 6219 struct scsi_format_unit *scsi_cmd; 6220 6221 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 6222 scsi_cmd->opcode = FORMAT_UNIT; 6223 scsi_cmd->byte2 = byte2; 6224 scsi_ulto2b(ileave, scsi_cmd->interleave); 6225 6226 cam_fill_csio(csio, 6227 retries, 6228 cbfcnp, 6229 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6230 tag_action, 6231 data_ptr, 6232 dxfer_len, 6233 sense_len, 6234 sizeof(*scsi_cmd), 6235 timeout); 6236 } 6237 6238 void 6239 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, 6240 void (*cbfcnp)(struct cam_periph *, union ccb *), 6241 uint8_t tag_action, uint8_t list_format, 6242 uint32_t addr_desc_index, uint8_t *data_ptr, 6243 uint32_t dxfer_len, int minimum_cmd_size, 6244 uint8_t sense_len, uint32_t timeout) 6245 { 6246 uint8_t cdb_len; 6247 6248 /* 6249 * These conditions allow using the 10 byte command. Otherwise we 6250 * need to use the 12 byte command. 6251 */ 6252 if ((minimum_cmd_size <= 10) 6253 && (addr_desc_index == 0) 6254 && (dxfer_len <= SRDD10_MAX_LENGTH)) { 6255 struct scsi_read_defect_data_10 *cdb10; 6256 6257 cdb10 = (struct scsi_read_defect_data_10 *) 6258 &csio->cdb_io.cdb_bytes; 6259 6260 cdb_len = sizeof(*cdb10); 6261 bzero(cdb10, cdb_len); 6262 cdb10->opcode = READ_DEFECT_DATA_10; 6263 cdb10->format = list_format; 6264 scsi_ulto2b(dxfer_len, cdb10->alloc_length); 6265 } else { 6266 struct scsi_read_defect_data_12 *cdb12; 6267 6268 cdb12 = (struct scsi_read_defect_data_12 *) 6269 &csio->cdb_io.cdb_bytes; 6270 6271 cdb_len = sizeof(*cdb12); 6272 bzero(cdb12, cdb_len); 6273 cdb12->opcode = READ_DEFECT_DATA_12; 6274 cdb12->format = list_format; 6275 scsi_ulto4b(dxfer_len, cdb12->alloc_length); 6276 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); 6277 } 6278 6279 cam_fill_csio(csio, 6280 retries, 6281 cbfcnp, 6282 /*flags*/ CAM_DIR_IN, 6283 tag_action, 6284 data_ptr, 6285 dxfer_len, 6286 sense_len, 6287 cdb_len, 6288 timeout); 6289 } 6290 6291 void 6292 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, 6293 void (*cbfcnp)(struct cam_periph *, union ccb *), 6294 u_int8_t tag_action, u_int8_t byte2, u_int16_t control, 6295 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 6296 u_int32_t timeout) 6297 { 6298 struct scsi_sanitize *scsi_cmd; 6299 6300 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; 6301 scsi_cmd->opcode = SANITIZE; 6302 scsi_cmd->byte2 = byte2; 6303 scsi_cmd->control = control; 6304 scsi_ulto2b(dxfer_len, scsi_cmd->length); 6305 6306 cam_fill_csio(csio, 6307 retries, 6308 cbfcnp, 6309 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6310 tag_action, 6311 data_ptr, 6312 dxfer_len, 6313 sense_len, 6314 sizeof(*scsi_cmd), 6315 timeout); 6316 } 6317 6318 #endif /* _KERNEL */ 6319 6320 void 6321 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, 6322 void (*cbfcnp)(struct cam_periph *, union ccb *), 6323 uint8_t tag_action, uint8_t service_action, uint64_t zone_id, 6324 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, 6325 uint8_t sense_len, uint32_t timeout) 6326 { 6327 struct scsi_zbc_out *scsi_cmd; 6328 6329 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; 6330 scsi_cmd->opcode = ZBC_OUT; 6331 scsi_cmd->service_action = service_action; 6332 scsi_u64to8b(zone_id, scsi_cmd->zone_id); 6333 scsi_cmd->zone_flags = zone_flags; 6334 6335 cam_fill_csio(csio, 6336 retries, 6337 cbfcnp, 6338 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6339 tag_action, 6340 data_ptr, 6341 dxfer_len, 6342 sense_len, 6343 sizeof(*scsi_cmd), 6344 timeout); 6345 } 6346 6347 void 6348 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, 6349 void (*cbfcnp)(struct cam_periph *, union ccb *), 6350 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, 6351 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, 6352 uint8_t sense_len, uint32_t timeout) 6353 { 6354 struct scsi_zbc_in *scsi_cmd; 6355 6356 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; 6357 scsi_cmd->opcode = ZBC_IN; 6358 scsi_cmd->service_action = service_action; 6359 scsi_ulto4b(dxfer_len, scsi_cmd->length); 6360 scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); 6361 scsi_cmd->zone_options = zone_options; 6362 6363 cam_fill_csio(csio, 6364 retries, 6365 cbfcnp, 6366 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, 6367 tag_action, 6368 data_ptr, 6369 dxfer_len, 6370 sense_len, 6371 sizeof(*scsi_cmd), 6372 timeout); 6373 6374 } 6375 6376 int 6377 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, 6378 void (*cbfcnp)(struct cam_periph *, union ccb *), 6379 uint8_t tag_action, int use_ncq, 6380 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 6381 uint8_t *data_ptr, uint32_t dxfer_len, 6382 uint8_t *cdb_storage, size_t cdb_storage_len, 6383 uint8_t sense_len, uint32_t timeout) 6384 { 6385 uint8_t command_out, protocol, ata_flags; 6386 uint16_t features_out; 6387 uint32_t sectors_out, auxiliary; 6388 int retval; 6389 6390 retval = 0; 6391 6392 if (use_ncq == 0) { 6393 command_out = ATA_ZAC_MANAGEMENT_OUT; 6394 features_out = (zm_action & 0xf) | (zone_flags << 8); 6395 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 6396 if (dxfer_len == 0) { 6397 protocol = AP_PROTO_NON_DATA; 6398 ata_flags |= AP_FLAG_TLEN_NO_DATA; 6399 sectors_out = 0; 6400 } else { 6401 protocol = AP_PROTO_DMA; 6402 ata_flags |= AP_FLAG_TLEN_SECT_CNT | 6403 AP_FLAG_TDIR_TO_DEV; 6404 sectors_out = ((dxfer_len >> 9) & 0xffff); 6405 } 6406 auxiliary = 0; 6407 } else { 6408 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 6409 if (dxfer_len == 0) { 6410 command_out = ATA_NCQ_NON_DATA; 6411 features_out = ATA_NCQ_ZAC_MGMT_OUT; 6412 /* 6413 * We're assuming the SCSI to ATA translation layer 6414 * will set the NCQ tag number in the tag field. 6415 * That isn't clear from the SAT-4 spec (as of rev 05). 6416 */ 6417 sectors_out = 0; 6418 ata_flags |= AP_FLAG_TLEN_NO_DATA; 6419 } else { 6420 command_out = ATA_SEND_FPDMA_QUEUED; 6421 /* 6422 * Note that we're defaulting to normal priority, 6423 * and assuming that the SCSI to ATA translation 6424 * layer will insert the NCQ tag number in the tag 6425 * field. That isn't clear in the SAT-4 spec (as 6426 * of rev 05). 6427 */ 6428 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; 6429 6430 ata_flags |= AP_FLAG_TLEN_FEAT | 6431 AP_FLAG_TDIR_TO_DEV; 6432 6433 /* 6434 * For SEND FPDMA QUEUED, the transfer length is 6435 * encoded in the FEATURE register, and 0 means 6436 * that 65536 512 byte blocks are to be tranferred. 6437 * In practice, it seems unlikely that we'll see 6438 * a transfer that large, and it may confuse the 6439 * the SAT layer, because generally that means that 6440 * 0 bytes should be transferred. 6441 */ 6442 if (dxfer_len == (65536 * 512)) { 6443 features_out = 0; 6444 } else if (dxfer_len <= (65535 * 512)) { 6445 features_out = ((dxfer_len >> 9) & 0xffff); 6446 } else { 6447 /* The transfer is too big. */ 6448 retval = 1; 6449 goto bailout; 6450 } 6451 6452 } 6453 6454 auxiliary = (zm_action & 0xf) | (zone_flags << 8); 6455 protocol = AP_PROTO_FPDMA; 6456 } 6457 6458 protocol |= AP_EXTEND; 6459 6460 retval = scsi_ata_pass(csio, 6461 retries, 6462 cbfcnp, 6463 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6464 tag_action, 6465 /*protocol*/ protocol, 6466 /*ata_flags*/ ata_flags, 6467 /*features*/ features_out, 6468 /*sector_count*/ sectors_out, 6469 /*lba*/ zone_id, 6470 /*command*/ command_out, 6471 /*device*/ 0, 6472 /*icc*/ 0, 6473 /*auxiliary*/ auxiliary, 6474 /*control*/ 0, 6475 /*data_ptr*/ data_ptr, 6476 /*dxfer_len*/ dxfer_len, 6477 /*cdb_storage*/ cdb_storage, 6478 /*cdb_storage_len*/ cdb_storage_len, 6479 /*minimum_cmd_size*/ 0, 6480 /*sense_len*/ SSD_FULL_SIZE, 6481 /*timeout*/ timeout); 6482 6483 bailout: 6484 6485 return (retval); 6486 } 6487 6488 int 6489 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, 6490 void (*cbfcnp)(struct cam_periph *, union ccb *), 6491 uint8_t tag_action, int use_ncq, 6492 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 6493 uint8_t *data_ptr, uint32_t dxfer_len, 6494 uint8_t *cdb_storage, size_t cdb_storage_len, 6495 uint8_t sense_len, uint32_t timeout) 6496 { 6497 uint8_t command_out, protocol; 6498 uint16_t features_out, sectors_out; 6499 uint32_t auxiliary; 6500 int ata_flags; 6501 int retval; 6502 6503 retval = 0; 6504 ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; 6505 6506 if (use_ncq == 0) { 6507 command_out = ATA_ZAC_MANAGEMENT_IN; 6508 /* XXX KDM put a macro here */ 6509 features_out = (zm_action & 0xf) | (zone_flags << 8); 6510 sectors_out = dxfer_len >> 9; /* XXX KDM macro */ 6511 protocol = AP_PROTO_DMA; 6512 ata_flags |= AP_FLAG_TLEN_SECT_CNT; 6513 auxiliary = 0; 6514 } else { 6515 ata_flags |= AP_FLAG_TLEN_FEAT; 6516 6517 command_out = ATA_RECV_FPDMA_QUEUED; 6518 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; 6519 6520 /* 6521 * For RECEIVE FPDMA QUEUED, the transfer length is 6522 * encoded in the FEATURE register, and 0 means 6523 * that 65536 512 byte blocks are to be tranferred. 6524 * In practice, it seems unlikely that we'll see 6525 * a transfer that large, and it may confuse the 6526 * the SAT layer, because generally that means that 6527 * 0 bytes should be transferred. 6528 */ 6529 if (dxfer_len == (65536 * 512)) { 6530 features_out = 0; 6531 } else if (dxfer_len <= (65535 * 512)) { 6532 features_out = ((dxfer_len >> 9) & 0xffff); 6533 } else { 6534 /* The transfer is too big. */ 6535 retval = 1; 6536 goto bailout; 6537 } 6538 auxiliary = (zm_action & 0xf) | (zone_flags << 8), 6539 protocol = AP_PROTO_FPDMA; 6540 } 6541 6542 protocol |= AP_EXTEND; 6543 6544 retval = scsi_ata_pass(csio, 6545 retries, 6546 cbfcnp, 6547 /*flags*/ CAM_DIR_IN, 6548 tag_action, 6549 /*protocol*/ protocol, 6550 /*ata_flags*/ ata_flags, 6551 /*features*/ features_out, 6552 /*sector_count*/ sectors_out, 6553 /*lba*/ zone_id, 6554 /*command*/ command_out, 6555 /*device*/ 0, 6556 /*icc*/ 0, 6557 /*auxiliary*/ auxiliary, 6558 /*control*/ 0, 6559 /*data_ptr*/ data_ptr, 6560 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ 6561 /*cdb_storage*/ cdb_storage, 6562 /*cdb_storage_len*/ cdb_storage_len, 6563 /*minimum_cmd_size*/ 0, 6564 /*sense_len*/ SSD_FULL_SIZE, 6565 /*timeout*/ timeout); 6566 6567 bailout: 6568 return (retval); 6569 } 6570