1 /*- 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 5 * 6 * Copyright (c) 1997 Justin T. Gibbs. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification, immediately at the beginning of the file. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 36 #ifdef _KERNEL 37 #include "opt_da.h" 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/sysctl.h> 42 #include <sys/taskqueue.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/conf.h> 46 #include <sys/devicestat.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/cons.h> 50 #include <sys/endian.h> 51 #include <sys/proc.h> 52 #include <sys/sbuf.h> 53 #include <geom/geom.h> 54 #include <geom/geom_disk.h> 55 #include <machine/atomic.h> 56 #endif /* _KERNEL */ 57 58 #ifndef _KERNEL 59 #include <stdio.h> 60 #include <string.h> 61 #endif /* _KERNEL */ 62 63 #include <cam/cam.h> 64 #include <cam/cam_ccb.h> 65 #include <cam/cam_periph.h> 66 #include <cam/cam_xpt_periph.h> 67 #ifdef _KERNEL 68 #include <cam/cam_xpt_internal.h> 69 #endif /* _KERNEL */ 70 #include <cam/cam_sim.h> 71 #include <cam/cam_iosched.h> 72 73 #include <cam/scsi/scsi_message.h> 74 #include <cam/scsi/scsi_da.h> 75 76 #ifdef _KERNEL 77 /* 78 * Note that there are probe ordering dependencies here. The order isn't 79 * controlled by this enumeration, but by explicit state transitions in 80 * dastart() and dadone(). Here are some of the dependencies: 81 * 82 * 1. RC should come first, before RC16, unless there is evidence that RC16 83 * is supported. 84 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. 85 * 3. The ATA probes should go in this order: 86 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE 87 */ 88 typedef enum { 89 DA_STATE_PROBE_WP, 90 DA_STATE_PROBE_RC, 91 DA_STATE_PROBE_RC16, 92 DA_STATE_PROBE_LBP, 93 DA_STATE_PROBE_BLK_LIMITS, 94 DA_STATE_PROBE_BDC, 95 DA_STATE_PROBE_ATA, 96 DA_STATE_PROBE_ATA_LOGDIR, 97 DA_STATE_PROBE_ATA_IDDIR, 98 DA_STATE_PROBE_ATA_SUP, 99 DA_STATE_PROBE_ATA_ZONE, 100 DA_STATE_PROBE_ZONE, 101 DA_STATE_NORMAL 102 } da_state; 103 104 typedef enum { 105 DA_FLAG_PACK_INVALID = 0x000001, 106 DA_FLAG_NEW_PACK = 0x000002, 107 DA_FLAG_PACK_LOCKED = 0x000004, 108 DA_FLAG_PACK_REMOVABLE = 0x000008, 109 DA_FLAG_NEED_OTAG = 0x000020, 110 DA_FLAG_WAS_OTAG = 0x000040, 111 DA_FLAG_RETRY_UA = 0x000080, 112 DA_FLAG_OPEN = 0x000100, 113 DA_FLAG_SCTX_INIT = 0x000200, 114 DA_FLAG_CAN_RC16 = 0x000400, 115 DA_FLAG_PROBED = 0x000800, 116 DA_FLAG_DIRTY = 0x001000, 117 DA_FLAG_ANNOUNCED = 0x002000, 118 DA_FLAG_CAN_ATA_DMA = 0x004000, 119 DA_FLAG_CAN_ATA_LOG = 0x008000, 120 DA_FLAG_CAN_ATA_IDLOG = 0x010000, 121 DA_FLAG_CAN_ATA_SUPCAP = 0x020000, 122 DA_FLAG_CAN_ATA_ZONE = 0x040000, 123 DA_FLAG_TUR_PENDING = 0x080000 124 } da_flags; 125 126 typedef enum { 127 DA_Q_NONE = 0x00, 128 DA_Q_NO_SYNC_CACHE = 0x01, 129 DA_Q_NO_6_BYTE = 0x02, 130 DA_Q_NO_PREVENT = 0x04, 131 DA_Q_4K = 0x08, 132 DA_Q_NO_RC16 = 0x10, 133 DA_Q_NO_UNMAP = 0x20, 134 DA_Q_RETRY_BUSY = 0x40, 135 DA_Q_SMR_DM = 0x80, 136 DA_Q_STRICT_UNMAP = 0x100, 137 DA_Q_128KB = 0x200 138 } da_quirks; 139 140 #define DA_Q_BIT_STRING \ 141 "\020" \ 142 "\001NO_SYNC_CACHE" \ 143 "\002NO_6_BYTE" \ 144 "\003NO_PREVENT" \ 145 "\0044K" \ 146 "\005NO_RC16" \ 147 "\006NO_UNMAP" \ 148 "\007RETRY_BUSY" \ 149 "\010SMR_DM" \ 150 "\011STRICT_UNMAP" \ 151 "\012128KB" 152 153 typedef enum { 154 DA_CCB_PROBE_RC = 0x01, 155 DA_CCB_PROBE_RC16 = 0x02, 156 DA_CCB_PROBE_LBP = 0x03, 157 DA_CCB_PROBE_BLK_LIMITS = 0x04, 158 DA_CCB_PROBE_BDC = 0x05, 159 DA_CCB_PROBE_ATA = 0x06, 160 DA_CCB_BUFFER_IO = 0x07, 161 DA_CCB_DUMP = 0x0A, 162 DA_CCB_DELETE = 0x0B, 163 DA_CCB_TUR = 0x0C, 164 DA_CCB_PROBE_ZONE = 0x0D, 165 DA_CCB_PROBE_ATA_LOGDIR = 0x0E, 166 DA_CCB_PROBE_ATA_IDDIR = 0x0F, 167 DA_CCB_PROBE_ATA_SUP = 0x10, 168 DA_CCB_PROBE_ATA_ZONE = 0x11, 169 DA_CCB_PROBE_WP = 0x12, 170 DA_CCB_TYPE_MASK = 0x1F, 171 DA_CCB_RETRY_UA = 0x20 172 } da_ccb_state; 173 174 /* 175 * Order here is important for method choice 176 * 177 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to 178 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes 179 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql 180 * import taking 5mins. 181 * 182 */ 183 typedef enum { 184 DA_DELETE_NONE, 185 DA_DELETE_DISABLE, 186 DA_DELETE_ATA_TRIM, 187 DA_DELETE_UNMAP, 188 DA_DELETE_WS16, 189 DA_DELETE_WS10, 190 DA_DELETE_ZERO, 191 DA_DELETE_MIN = DA_DELETE_ATA_TRIM, 192 DA_DELETE_MAX = DA_DELETE_ZERO 193 } da_delete_methods; 194 195 /* 196 * For SCSI, host managed drives show up as a separate device type. For 197 * ATA, host managed drives also have a different device signature. 198 * XXX KDM figure out the ATA host managed signature. 199 */ 200 typedef enum { 201 DA_ZONE_NONE = 0x00, 202 DA_ZONE_DRIVE_MANAGED = 0x01, 203 DA_ZONE_HOST_AWARE = 0x02, 204 DA_ZONE_HOST_MANAGED = 0x03 205 } da_zone_mode; 206 207 /* 208 * We distinguish between these interface cases in addition to the drive type: 209 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC 210 * o ATA drive behind a SCSI translation layer that does not know about 211 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this 212 * case, we would need to share the ATA code with the ada(4) driver. 213 * o SCSI drive. 214 */ 215 typedef enum { 216 DA_ZONE_IF_SCSI, 217 DA_ZONE_IF_ATA_PASS, 218 DA_ZONE_IF_ATA_SAT, 219 } da_zone_interface; 220 221 typedef enum { 222 DA_ZONE_FLAG_RZ_SUP = 0x0001, 223 DA_ZONE_FLAG_OPEN_SUP = 0x0002, 224 DA_ZONE_FLAG_CLOSE_SUP = 0x0004, 225 DA_ZONE_FLAG_FINISH_SUP = 0x0008, 226 DA_ZONE_FLAG_RWP_SUP = 0x0010, 227 DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | 228 DA_ZONE_FLAG_OPEN_SUP | 229 DA_ZONE_FLAG_CLOSE_SUP | 230 DA_ZONE_FLAG_FINISH_SUP | 231 DA_ZONE_FLAG_RWP_SUP), 232 DA_ZONE_FLAG_URSWRZ = 0x0020, 233 DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, 234 DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, 235 DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, 236 DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | 237 DA_ZONE_FLAG_OPT_NONSEQ_SET | 238 DA_ZONE_FLAG_MAX_SEQ_SET) 239 } da_zone_flags; 240 241 static struct da_zone_desc { 242 da_zone_flags value; 243 const char *desc; 244 } da_zone_desc_table[] = { 245 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, 246 {DA_ZONE_FLAG_OPEN_SUP, "Open" }, 247 {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, 248 {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, 249 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, 250 }; 251 252 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, 253 struct bio *bp); 254 static da_delete_func_t da_delete_trim; 255 static da_delete_func_t da_delete_unmap; 256 static da_delete_func_t da_delete_ws; 257 258 static const void * da_delete_functions[] = { 259 NULL, 260 NULL, 261 da_delete_trim, 262 da_delete_unmap, 263 da_delete_ws, 264 da_delete_ws, 265 da_delete_ws 266 }; 267 268 static const char *da_delete_method_names[] = 269 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; 270 static const char *da_delete_method_desc[] = 271 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", 272 "WRITE SAME(10) with UNMAP", "ZERO" }; 273 274 /* Offsets into our private area for storing information */ 275 #define ccb_state ppriv_field0 276 #define ccb_bp ppriv_ptr1 277 278 struct disk_params { 279 u_int8_t heads; 280 u_int32_t cylinders; 281 u_int8_t secs_per_track; 282 u_int32_t secsize; /* Number of bytes/sector */ 283 u_int64_t sectors; /* total number sectors */ 284 u_int stripesize; 285 u_int stripeoffset; 286 }; 287 288 #define UNMAP_RANGE_MAX 0xffffffff 289 #define UNMAP_HEAD_SIZE 8 290 #define UNMAP_RANGE_SIZE 16 291 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ 292 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ 293 UNMAP_HEAD_SIZE) 294 295 #define WS10_MAX_BLKS 0xffff 296 #define WS16_MAX_BLKS 0xffffffff 297 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ 298 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) 299 300 #define DA_WORK_TUR (1 << 16) 301 302 typedef enum { 303 DA_REF_OPEN = 1, 304 DA_REF_OPEN_HOLD, 305 DA_REF_CLOSE_HOLD, 306 DA_REF_PROBE_HOLD, 307 DA_REF_TUR, 308 DA_REF_GEOM, 309 DA_REF_SYSCTL, 310 DA_REF_REPROBE, 311 DA_REF_MAX /* KEEP LAST */ 312 } da_ref_token; 313 314 struct da_softc { 315 struct cam_iosched_softc *cam_iosched; 316 struct bio_queue_head delete_run_queue; 317 LIST_HEAD(, ccb_hdr) pending_ccbs; 318 int refcount; /* Active xpt_action() calls */ 319 da_state state; 320 da_flags flags; 321 da_quirks quirks; 322 int minimum_cmd_size; 323 int error_inject; 324 int trim_max_ranges; 325 int delete_available; /* Delete methods possibly available */ 326 da_zone_mode zone_mode; 327 da_zone_interface zone_interface; 328 da_zone_flags zone_flags; 329 struct ata_gp_log_dir ata_logdir; 330 int valid_logdir_len; 331 struct ata_identify_log_pages ata_iddir; 332 int valid_iddir_len; 333 uint64_t optimal_seq_zones; 334 uint64_t optimal_nonseq_zones; 335 uint64_t max_seq_zones; 336 u_int maxio; 337 uint32_t unmap_max_ranges; 338 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ 339 uint32_t unmap_gran; 340 uint32_t unmap_gran_align; 341 uint64_t ws_max_blks; 342 uint64_t trim_count; 343 uint64_t trim_ranges; 344 uint64_t trim_lbas; 345 da_delete_methods delete_method_pref; 346 da_delete_methods delete_method; 347 da_delete_func_t *delete_func; 348 int unmappedio; 349 int rotating; 350 int p_type; 351 struct disk_params params; 352 struct disk *disk; 353 union ccb saved_ccb; 354 struct task sysctl_task; 355 struct sysctl_ctx_list sysctl_ctx; 356 struct sysctl_oid *sysctl_tree; 357 struct callout sendordered_c; 358 uint64_t wwpn; 359 uint8_t unmap_buf[UNMAP_BUF_SIZE]; 360 struct scsi_read_capacity_data_long rcaplong; 361 struct callout mediapoll_c; 362 int ref_flags[DA_REF_MAX]; 363 #ifdef CAM_IO_STATS 364 struct sysctl_ctx_list sysctl_stats_ctx; 365 struct sysctl_oid *sysctl_stats_tree; 366 u_int errors; 367 u_int timeouts; 368 u_int invalidations; 369 #endif 370 #define DA_ANNOUNCETMP_SZ 160 371 char announce_temp[DA_ANNOUNCETMP_SZ]; 372 #define DA_ANNOUNCE_SZ 400 373 char announcebuf[DA_ANNOUNCE_SZ]; 374 }; 375 376 #define dadeleteflag(softc, delete_method, enable) \ 377 if (enable) { \ 378 softc->delete_available |= (1 << delete_method); \ 379 } else { \ 380 softc->delete_available &= ~(1 << delete_method); \ 381 } 382 383 struct da_quirk_entry { 384 struct scsi_inquiry_pattern inq_pat; 385 da_quirks quirks; 386 }; 387 388 static const char quantum[] = "QUANTUM"; 389 static const char microp[] = "MICROP"; 390 391 static struct da_quirk_entry da_quirk_table[] = 392 { 393 /* SPI, FC devices */ 394 { 395 /* 396 * Fujitsu M2513A MO drives. 397 * Tested devices: M2513A2 firmware versions 1200 & 1300. 398 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 399 * Reported by: W.Scholten <whs@xs4all.nl> 400 */ 401 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 402 /*quirks*/ DA_Q_NO_SYNC_CACHE 403 }, 404 { 405 /* See above. */ 406 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 407 /*quirks*/ DA_Q_NO_SYNC_CACHE 408 }, 409 { 410 /* 411 * This particular Fujitsu drive doesn't like the 412 * synchronize cache command. 413 * Reported by: Tom Jackson <toj@gorilla.net> 414 */ 415 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 416 /*quirks*/ DA_Q_NO_SYNC_CACHE 417 }, 418 { 419 /* 420 * This drive doesn't like the synchronize cache command 421 * either. Reported by: Matthew Jacob <mjacob@feral.com> 422 * in NetBSD PR kern/6027, August 24, 1998. 423 */ 424 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 425 /*quirks*/ DA_Q_NO_SYNC_CACHE 426 }, 427 { 428 /* 429 * This drive doesn't like the synchronize cache command 430 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 431 * (PR 8882). 432 */ 433 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 434 /*quirks*/ DA_Q_NO_SYNC_CACHE 435 }, 436 { 437 /* 438 * Doesn't like the synchronize cache command. 439 * Reported by: Blaz Zupan <blaz@gold.amis.net> 440 */ 441 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 442 /*quirks*/ DA_Q_NO_SYNC_CACHE 443 }, 444 { 445 /* 446 * Doesn't like the synchronize cache command. 447 * Reported by: Blaz Zupan <blaz@gold.amis.net> 448 */ 449 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 450 /*quirks*/ DA_Q_NO_SYNC_CACHE 451 }, 452 { 453 /* 454 * Doesn't like the synchronize cache command. 455 */ 456 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 457 /*quirks*/ DA_Q_NO_SYNC_CACHE 458 }, 459 { 460 /* 461 * Doesn't like the synchronize cache command. 462 * Reported by: walter@pelissero.de 463 */ 464 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, 465 /*quirks*/ DA_Q_NO_SYNC_CACHE 466 }, 467 { 468 /* 469 * Doesn't work correctly with 6 byte reads/writes. 470 * Returns illegal request, and points to byte 9 of the 471 * 6-byte CDB. 472 * Reported by: Adam McDougall <bsdx@spawnet.com> 473 */ 474 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 475 /*quirks*/ DA_Q_NO_6_BYTE 476 }, 477 { 478 /* See above. */ 479 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 480 /*quirks*/ DA_Q_NO_6_BYTE 481 }, 482 { 483 /* 484 * Doesn't like the synchronize cache command. 485 * Reported by: walter@pelissero.de 486 */ 487 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, 488 /*quirks*/ DA_Q_NO_SYNC_CACHE 489 }, 490 { 491 /* 492 * The CISS RAID controllers do not support SYNC_CACHE 493 */ 494 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, 495 /*quirks*/ DA_Q_NO_SYNC_CACHE 496 }, 497 { 498 /* 499 * The STEC SSDs sometimes hang on UNMAP. 500 */ 501 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, 502 /*quirks*/ DA_Q_NO_UNMAP 503 }, 504 { 505 /* 506 * VMware returns BUSY status when storage has transient 507 * connectivity problems, so better wait. 508 * Also VMware returns odd errors on misaligned UNMAPs. 509 */ 510 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, 511 /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP 512 }, 513 /* USB mass storage devices supported by umass(4) */ 514 { 515 /* 516 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player 517 * PR: kern/51675 518 */ 519 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, 520 /*quirks*/ DA_Q_NO_SYNC_CACHE 521 }, 522 { 523 /* 524 * Power Quotient Int. (PQI) USB flash key 525 * PR: kern/53067 526 */ 527 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", 528 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 529 }, 530 { 531 /* 532 * Creative Nomad MUVO mp3 player (USB) 533 * PR: kern/53094 534 */ 535 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, 536 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 537 }, 538 { 539 /* 540 * Jungsoft NEXDISK USB flash key 541 * PR: kern/54737 542 */ 543 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, 544 /*quirks*/ DA_Q_NO_SYNC_CACHE 545 }, 546 { 547 /* 548 * FreeDik USB Mini Data Drive 549 * PR: kern/54786 550 */ 551 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", 552 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 553 }, 554 { 555 /* 556 * Sigmatel USB Flash MP3 Player 557 * PR: kern/57046 558 */ 559 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, 560 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 561 }, 562 { 563 /* 564 * Neuros USB Digital Audio Computer 565 * PR: kern/63645 566 */ 567 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", 568 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 569 }, 570 { 571 /* 572 * SEAGRAND NP-900 MP3 Player 573 * PR: kern/64563 574 */ 575 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, 576 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 577 }, 578 { 579 /* 580 * iRiver iFP MP3 player (with UMS Firmware) 581 * PR: kern/54881, i386/63941, kern/66124 582 */ 583 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, 584 /*quirks*/ DA_Q_NO_SYNC_CACHE 585 }, 586 { 587 /* 588 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 589 * PR: kern/70158 590 */ 591 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, 592 /*quirks*/ DA_Q_NO_SYNC_CACHE 593 }, 594 { 595 /* 596 * ZICPlay USB MP3 Player with FM 597 * PR: kern/75057 598 */ 599 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, 600 /*quirks*/ DA_Q_NO_SYNC_CACHE 601 }, 602 { 603 /* 604 * TEAC USB floppy mechanisms 605 */ 606 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, 607 /*quirks*/ DA_Q_NO_SYNC_CACHE 608 }, 609 { 610 /* 611 * Kingston DataTraveler II+ USB Pen-Drive. 612 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org> 613 */ 614 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", 615 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 616 }, 617 { 618 /* 619 * USB DISK Pro PMAP 620 * Reported by: jhs 621 * PR: usb/96381 622 */ 623 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, 624 /*quirks*/ DA_Q_NO_SYNC_CACHE 625 }, 626 { 627 /* 628 * Motorola E398 Mobile Phone (TransFlash memory card). 629 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl> 630 * PR: usb/89889 631 */ 632 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", 633 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 634 }, 635 { 636 /* 637 * Qware BeatZkey! Pro 638 * PR: usb/79164 639 */ 640 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", 641 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 642 }, 643 { 644 /* 645 * Time DPA20B 1GB MP3 Player 646 * PR: usb/81846 647 */ 648 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", 649 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 650 }, 651 { 652 /* 653 * Samsung USB key 128Mb 654 * PR: usb/90081 655 */ 656 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", 657 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 658 }, 659 { 660 /* 661 * Kingston DataTraveler 2.0 USB Flash memory. 662 * PR: usb/89196 663 */ 664 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", 665 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 666 }, 667 { 668 /* 669 * Creative MUVO Slim mp3 player (USB) 670 * PR: usb/86131 671 */ 672 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", 673 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 674 }, 675 { 676 /* 677 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) 678 * PR: usb/80487 679 */ 680 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", 681 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 682 }, 683 { 684 /* 685 * SanDisk Micro Cruzer 128MB 686 * PR: usb/75970 687 */ 688 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", 689 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 690 }, 691 { 692 /* 693 * TOSHIBA TransMemory USB sticks 694 * PR: kern/94660 695 */ 696 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", 697 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 698 }, 699 { 700 /* 701 * PNY USB 3.0 Flash Drives 702 */ 703 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", 704 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 705 }, 706 { 707 /* 708 * PNY USB Flash keys 709 * PR: usb/75578, usb/72344, usb/65436 710 */ 711 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", 712 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 713 }, 714 { 715 /* 716 * Genesys GL3224 717 */ 718 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 719 "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16 720 }, 721 { 722 /* 723 * Genesys 6-in-1 Card Reader 724 * PR: usb/94647 725 */ 726 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", 727 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 728 }, 729 { 730 /* 731 * Rekam Digital CAMERA 732 * PR: usb/98713 733 */ 734 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", 735 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 736 }, 737 { 738 /* 739 * iRiver H10 MP3 player 740 * PR: usb/102547 741 */ 742 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", 743 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 744 }, 745 { 746 /* 747 * iRiver U10 MP3 player 748 * PR: usb/92306 749 */ 750 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", 751 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 752 }, 753 { 754 /* 755 * X-Micro Flash Disk 756 * PR: usb/96901 757 */ 758 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", 759 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 760 }, 761 { 762 /* 763 * EasyMP3 EM732X USB 2.0 Flash MP3 Player 764 * PR: usb/96546 765 */ 766 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", 767 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 768 }, 769 { 770 /* 771 * Denver MP3 player 772 * PR: usb/107101 773 */ 774 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", 775 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 776 }, 777 { 778 /* 779 * Philips USB Key Audio KEY013 780 * PR: usb/68412 781 */ 782 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, 783 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 784 }, 785 { 786 /* 787 * JNC MP3 Player 788 * PR: usb/94439 789 */ 790 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", 791 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 792 }, 793 { 794 /* 795 * SAMSUNG MP0402H 796 * PR: usb/108427 797 */ 798 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, 799 /*quirks*/ DA_Q_NO_SYNC_CACHE 800 }, 801 { 802 /* 803 * I/O Magic USB flash - Giga Bank 804 * PR: usb/108810 805 */ 806 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, 807 /*quirks*/ DA_Q_NO_SYNC_CACHE 808 }, 809 { 810 /* 811 * JoyFly 128mb USB Flash Drive 812 * PR: 96133 813 */ 814 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", 815 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 816 }, 817 { 818 /* 819 * ChipsBnk usb stick 820 * PR: 103702 821 */ 822 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", 823 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 824 }, 825 { 826 /* 827 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A 828 * PR: 129858 829 */ 830 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", 831 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 832 }, 833 { 834 /* 835 * Samsung YP-U3 mp3-player 836 * PR: 125398 837 */ 838 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", 839 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 840 }, 841 { 842 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", 843 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 844 }, 845 { 846 /* 847 * Sony Cyber-Shot DSC cameras 848 * PR: usb/137035 849 */ 850 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 851 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT 852 }, 853 { 854 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", 855 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT 856 }, 857 { 858 /* At least several Transcent USB sticks lie on RC16. */ 859 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", 860 "*"}, /*quirks*/ DA_Q_NO_RC16 861 }, 862 { 863 /* 864 * I-O Data USB Flash Disk 865 * PR: usb/211716 866 */ 867 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*", 868 "*"}, /*quirks*/ DA_Q_NO_RC16 869 }, 870 { 871 /* 872 * SLC CHIPFANCIER USB drives 873 * PR: usb/234503 (RC10 right, RC16 wrong) 874 * 16GB, 32GB and 128GB confirmed to have same issue 875 */ 876 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER", 877 "*"}, /*quirks*/ DA_Q_NO_RC16 878 }, 879 /* ATA/SATA devices over SAS/USB/... */ 880 { 881 /* Sandisk X400 */ 882 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" }, 883 /*quirks*/DA_Q_128KB 884 }, 885 { 886 /* Hitachi Advanced Format (4k) drives */ 887 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, 888 /*quirks*/DA_Q_4K 889 }, 890 { 891 /* Micron Advanced Format (4k) drives */ 892 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" }, 893 /*quirks*/DA_Q_4K 894 }, 895 { 896 /* Samsung Advanced Format (4k) drives */ 897 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, 898 /*quirks*/DA_Q_4K 899 }, 900 { 901 /* Samsung Advanced Format (4k) drives */ 902 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, 903 /*quirks*/DA_Q_4K 904 }, 905 { 906 /* Samsung Advanced Format (4k) drives */ 907 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, 908 /*quirks*/DA_Q_4K 909 }, 910 { 911 /* Samsung Advanced Format (4k) drives */ 912 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, 913 /*quirks*/DA_Q_4K 914 }, 915 { 916 /* Seagate Barracuda Green Advanced Format (4k) drives */ 917 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, 918 /*quirks*/DA_Q_4K 919 }, 920 { 921 /* Seagate Barracuda Green Advanced Format (4k) drives */ 922 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, 923 /*quirks*/DA_Q_4K 924 }, 925 { 926 /* Seagate Barracuda Green Advanced Format (4k) drives */ 927 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, 928 /*quirks*/DA_Q_4K 929 }, 930 { 931 /* Seagate Barracuda Green Advanced Format (4k) drives */ 932 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, 933 /*quirks*/DA_Q_4K 934 }, 935 { 936 /* Seagate Barracuda Green Advanced Format (4k) drives */ 937 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, 938 /*quirks*/DA_Q_4K 939 }, 940 { 941 /* Seagate Barracuda Green Advanced Format (4k) drives */ 942 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, 943 /*quirks*/DA_Q_4K 944 }, 945 { 946 /* Seagate Momentus Advanced Format (4k) drives */ 947 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, 948 /*quirks*/DA_Q_4K 949 }, 950 { 951 /* Seagate Momentus Advanced Format (4k) drives */ 952 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, 953 /*quirks*/DA_Q_4K 954 }, 955 { 956 /* Seagate Momentus Advanced Format (4k) drives */ 957 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, 958 /*quirks*/DA_Q_4K 959 }, 960 { 961 /* Seagate Momentus Advanced Format (4k) drives */ 962 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, 963 /*quirks*/DA_Q_4K 964 }, 965 { 966 /* Seagate Momentus Advanced Format (4k) drives */ 967 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, 968 /*quirks*/DA_Q_4K 969 }, 970 { 971 /* Seagate Momentus Advanced Format (4k) drives */ 972 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, 973 /*quirks*/DA_Q_4K 974 }, 975 { 976 /* Seagate Momentus Advanced Format (4k) drives */ 977 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, 978 /*quirks*/DA_Q_4K 979 }, 980 { 981 /* Seagate Momentus Advanced Format (4k) drives */ 982 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, 983 /*quirks*/DA_Q_4K 984 }, 985 { 986 /* Seagate Momentus Advanced Format (4k) drives */ 987 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, 988 /*quirks*/DA_Q_4K 989 }, 990 { 991 /* Seagate Momentus Advanced Format (4k) drives */ 992 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, 993 /*quirks*/DA_Q_4K 994 }, 995 { 996 /* Seagate Momentus Advanced Format (4k) drives */ 997 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, 998 /*quirks*/DA_Q_4K 999 }, 1000 { 1001 /* Seagate Momentus Advanced Format (4k) drives */ 1002 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, 1003 /*quirks*/DA_Q_4K 1004 }, 1005 { 1006 /* Seagate Momentus Advanced Format (4k) drives */ 1007 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, 1008 /*quirks*/DA_Q_4K 1009 }, 1010 { 1011 /* Seagate Momentus Advanced Format (4k) drives */ 1012 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, 1013 /*quirks*/DA_Q_4K 1014 }, 1015 { 1016 /* Seagate Momentus Thin Advanced Format (4k) drives */ 1017 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, 1018 /*quirks*/DA_Q_4K 1019 }, 1020 { 1021 /* Seagate Momentus Thin Advanced Format (4k) drives */ 1022 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, 1023 /*quirks*/DA_Q_4K 1024 }, 1025 { 1026 /* WDC Caviar Green Advanced Format (4k) drives */ 1027 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, 1028 /*quirks*/DA_Q_4K 1029 }, 1030 { 1031 /* WDC Caviar Green Advanced Format (4k) drives */ 1032 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, 1033 /*quirks*/DA_Q_4K 1034 }, 1035 { 1036 /* WDC Caviar Green Advanced Format (4k) drives */ 1037 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, 1038 /*quirks*/DA_Q_4K 1039 }, 1040 { 1041 /* WDC Caviar Green Advanced Format (4k) drives */ 1042 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, 1043 /*quirks*/DA_Q_4K 1044 }, 1045 { 1046 /* WDC Caviar Green Advanced Format (4k) drives */ 1047 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, 1048 /*quirks*/DA_Q_4K 1049 }, 1050 { 1051 /* WDC Caviar Green Advanced Format (4k) drives */ 1052 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, 1053 /*quirks*/DA_Q_4K 1054 }, 1055 { 1056 /* WDC Caviar Green Advanced Format (4k) drives */ 1057 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, 1058 /*quirks*/DA_Q_4K 1059 }, 1060 { 1061 /* WDC Caviar Green Advanced Format (4k) drives */ 1062 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, 1063 /*quirks*/DA_Q_4K 1064 }, 1065 { 1066 /* WDC Scorpio Black Advanced Format (4k) drives */ 1067 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, 1068 /*quirks*/DA_Q_4K 1069 }, 1070 { 1071 /* WDC Scorpio Black Advanced Format (4k) drives */ 1072 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, 1073 /*quirks*/DA_Q_4K 1074 }, 1075 { 1076 /* WDC Scorpio Black Advanced Format (4k) drives */ 1077 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, 1078 /*quirks*/DA_Q_4K 1079 }, 1080 { 1081 /* WDC Scorpio Black Advanced Format (4k) drives */ 1082 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, 1083 /*quirks*/DA_Q_4K 1084 }, 1085 { 1086 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1087 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, 1088 /*quirks*/DA_Q_4K 1089 }, 1090 { 1091 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1092 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, 1093 /*quirks*/DA_Q_4K 1094 }, 1095 { 1096 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1097 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, 1098 /*quirks*/DA_Q_4K 1099 }, 1100 { 1101 /* WDC Scorpio Blue Advanced Format (4k) drives */ 1102 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, 1103 /*quirks*/DA_Q_4K 1104 }, 1105 { 1106 /* 1107 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) 1108 * PR: usb/97472 1109 */ 1110 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"}, 1111 /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE 1112 }, 1113 { 1114 /* 1115 * Olympus digital cameras (D-370) 1116 * PR: usb/97472 1117 */ 1118 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"}, 1119 /*quirks*/ DA_Q_NO_6_BYTE 1120 }, 1121 { 1122 /* 1123 * Olympus digital cameras (E-100RS, E-10). 1124 * PR: usb/97472 1125 */ 1126 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"}, 1127 /*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE 1128 }, 1129 { 1130 /* 1131 * Olympus FE-210 camera 1132 */ 1133 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", 1134 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1135 }, 1136 { 1137 /* 1138 * Pentax Digital Camera 1139 * PR: usb/93389 1140 */ 1141 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA", 1142 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1143 }, 1144 { 1145 /* 1146 * LG UP3S MP3 player 1147 */ 1148 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", 1149 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1150 }, 1151 { 1152 /* 1153 * Laser MP3-2GA13 MP3 player 1154 */ 1155 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", 1156 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 1157 }, 1158 { 1159 /* 1160 * LaCie external 250GB Hard drive des by Porsche 1161 * Submitted by: Ben Stuyts <ben@altesco.nl> 1162 * PR: 121474 1163 */ 1164 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, 1165 /*quirks*/ DA_Q_NO_SYNC_CACHE 1166 }, 1167 /* SATA SSDs */ 1168 { 1169 /* 1170 * Corsair Force 2 SSDs 1171 * 4k optimised & trim only works in 4k requests + 4k aligned 1172 */ 1173 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, 1174 /*quirks*/DA_Q_4K 1175 }, 1176 { 1177 /* 1178 * Corsair Force 3 SSDs 1179 * 4k optimised & trim only works in 4k requests + 4k aligned 1180 */ 1181 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, 1182 /*quirks*/DA_Q_4K 1183 }, 1184 { 1185 /* 1186 * Corsair Neutron GTX SSDs 1187 * 4k optimised & trim only works in 4k requests + 4k aligned 1188 */ 1189 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, 1190 /*quirks*/DA_Q_4K 1191 }, 1192 { 1193 /* 1194 * Corsair Force GT & GS SSDs 1195 * 4k optimised & trim only works in 4k requests + 4k aligned 1196 */ 1197 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, 1198 /*quirks*/DA_Q_4K 1199 }, 1200 { 1201 /* 1202 * Crucial M4 SSDs 1203 * 4k optimised & trim only works in 4k requests + 4k aligned 1204 */ 1205 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, 1206 /*quirks*/DA_Q_4K 1207 }, 1208 { 1209 /* 1210 * Crucial RealSSD C300 SSDs 1211 * 4k optimised 1212 */ 1213 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", 1214 "*" }, /*quirks*/DA_Q_4K 1215 }, 1216 { 1217 /* 1218 * Intel 320 Series SSDs 1219 * 4k optimised & trim only works in 4k requests + 4k aligned 1220 */ 1221 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, 1222 /*quirks*/DA_Q_4K 1223 }, 1224 { 1225 /* 1226 * Intel 330 Series SSDs 1227 * 4k optimised & trim only works in 4k requests + 4k aligned 1228 */ 1229 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, 1230 /*quirks*/DA_Q_4K 1231 }, 1232 { 1233 /* 1234 * Intel 510 Series SSDs 1235 * 4k optimised & trim only works in 4k requests + 4k aligned 1236 */ 1237 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, 1238 /*quirks*/DA_Q_4K 1239 }, 1240 { 1241 /* 1242 * Intel 520 Series SSDs 1243 * 4k optimised & trim only works in 4k requests + 4k aligned 1244 */ 1245 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, 1246 /*quirks*/DA_Q_4K 1247 }, 1248 { 1249 /* 1250 * Intel S3610 Series SSDs 1251 * 4k optimised & trim only works in 4k requests + 4k aligned 1252 */ 1253 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" }, 1254 /*quirks*/DA_Q_4K 1255 }, 1256 { 1257 /* 1258 * Intel X25-M Series SSDs 1259 * 4k optimised & trim only works in 4k requests + 4k aligned 1260 */ 1261 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, 1262 /*quirks*/DA_Q_4K 1263 }, 1264 { 1265 /* 1266 * Kingston E100 Series SSDs 1267 * 4k optimised & trim only works in 4k requests + 4k aligned 1268 */ 1269 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, 1270 /*quirks*/DA_Q_4K 1271 }, 1272 { 1273 /* 1274 * Kingston HyperX 3k SSDs 1275 * 4k optimised & trim only works in 4k requests + 4k aligned 1276 */ 1277 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, 1278 /*quirks*/DA_Q_4K 1279 }, 1280 { 1281 /* 1282 * Marvell SSDs (entry taken from OpenSolaris) 1283 * 4k optimised & trim only works in 4k requests + 4k aligned 1284 */ 1285 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, 1286 /*quirks*/DA_Q_4K 1287 }, 1288 { 1289 /* 1290 * OCZ Agility 2 SSDs 1291 * 4k optimised & trim only works in 4k requests + 4k aligned 1292 */ 1293 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, 1294 /*quirks*/DA_Q_4K 1295 }, 1296 { 1297 /* 1298 * OCZ Agility 3 SSDs 1299 * 4k optimised & trim only works in 4k requests + 4k aligned 1300 */ 1301 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, 1302 /*quirks*/DA_Q_4K 1303 }, 1304 { 1305 /* 1306 * OCZ Deneva R Series SSDs 1307 * 4k optimised & trim only works in 4k requests + 4k aligned 1308 */ 1309 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, 1310 /*quirks*/DA_Q_4K 1311 }, 1312 { 1313 /* 1314 * OCZ Vertex 2 SSDs (inc pro series) 1315 * 4k optimised & trim only works in 4k requests + 4k aligned 1316 */ 1317 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, 1318 /*quirks*/DA_Q_4K 1319 }, 1320 { 1321 /* 1322 * OCZ Vertex 3 SSDs 1323 * 4k optimised & trim only works in 4k requests + 4k aligned 1324 */ 1325 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, 1326 /*quirks*/DA_Q_4K 1327 }, 1328 { 1329 /* 1330 * OCZ Vertex 4 SSDs 1331 * 4k optimised & trim only works in 4k requests + 4k aligned 1332 */ 1333 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, 1334 /*quirks*/DA_Q_4K 1335 }, 1336 { 1337 /* 1338 * Samsung 750 Series SSDs 1339 * 4k optimised & trim only works in 4k requests + 4k aligned 1340 */ 1341 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" }, 1342 /*quirks*/DA_Q_4K 1343 }, 1344 { 1345 /* 1346 * Samsung 830 Series SSDs 1347 * 4k optimised & trim only works in 4k requests + 4k aligned 1348 */ 1349 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, 1350 /*quirks*/DA_Q_4K 1351 }, 1352 { 1353 /* 1354 * Samsung 840 SSDs 1355 * 4k optimised & trim only works in 4k requests + 4k aligned 1356 */ 1357 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, 1358 /*quirks*/DA_Q_4K 1359 }, 1360 { 1361 /* 1362 * Samsung 845 SSDs 1363 * 4k optimised & trim only works in 4k requests + 4k aligned 1364 */ 1365 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" }, 1366 /*quirks*/DA_Q_4K 1367 }, 1368 { 1369 /* 1370 * Samsung 850 SSDs 1371 * 4k optimised & trim only works in 4k requests + 4k aligned 1372 */ 1373 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, 1374 /*quirks*/DA_Q_4K 1375 }, 1376 { 1377 /* 1378 * Samsung 843T Series SSDs (MZ7WD*) 1379 * Samsung PM851 Series SSDs (MZ7TE*) 1380 * Samsung PM853T Series SSDs (MZ7GE*) 1381 * Samsung SM863 Series SSDs (MZ7KM*) 1382 * 4k optimised 1383 */ 1384 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, 1385 /*quirks*/DA_Q_4K 1386 }, 1387 { 1388 /* 1389 * Same as for SAMSUNG MZ7* but enable the quirks for SSD 1390 * starting with MZ7* too 1391 */ 1392 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" }, 1393 /*quirks*/DA_Q_4K 1394 }, 1395 { 1396 /* 1397 * SuperTalent TeraDrive CT SSDs 1398 * 4k optimised & trim only works in 4k requests + 4k aligned 1399 */ 1400 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, 1401 /*quirks*/DA_Q_4K 1402 }, 1403 { 1404 /* 1405 * XceedIOPS SATA SSDs 1406 * 4k optimised 1407 */ 1408 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, 1409 /*quirks*/DA_Q_4K 1410 }, 1411 { 1412 /* 1413 * Hama Innostor USB-Stick 1414 */ 1415 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, 1416 /*quirks*/DA_Q_NO_RC16 1417 }, 1418 { 1419 /* 1420 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) 1421 * Drive Managed SATA hard drive. This drive doesn't report 1422 * in firmware that it is a drive managed SMR drive. 1423 */ 1424 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" }, 1425 /*quirks*/DA_Q_SMR_DM 1426 }, 1427 { 1428 /* 1429 * MX-ES USB Drive by Mach Xtreme 1430 */ 1431 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, 1432 /*quirks*/DA_Q_NO_RC16 1433 }, 1434 }; 1435 1436 static disk_strategy_t dastrategy; 1437 static dumper_t dadump; 1438 static periph_init_t dainit; 1439 static void daasync(void *callback_arg, u_int32_t code, 1440 struct cam_path *path, void *arg); 1441 static void dasysctlinit(void *context, int pending); 1442 static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); 1443 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); 1444 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); 1445 static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); 1446 static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); 1447 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); 1448 static void dadeletemethodset(struct da_softc *softc, 1449 da_delete_methods delete_method); 1450 static off_t dadeletemaxsize(struct da_softc *softc, 1451 da_delete_methods delete_method); 1452 static void dadeletemethodchoose(struct da_softc *softc, 1453 da_delete_methods default_method); 1454 static void daprobedone(struct cam_periph *periph, union ccb *ccb); 1455 1456 static periph_ctor_t daregister; 1457 static periph_dtor_t dacleanup; 1458 static periph_start_t dastart; 1459 static periph_oninv_t daoninvalidate; 1460 static void dazonedone(struct cam_periph *periph, union ccb *ccb); 1461 static void dadone(struct cam_periph *periph, 1462 union ccb *done_ccb); 1463 static void dadone_probewp(struct cam_periph *periph, 1464 union ccb *done_ccb); 1465 static void dadone_proberc(struct cam_periph *periph, 1466 union ccb *done_ccb); 1467 static void dadone_probelbp(struct cam_periph *periph, 1468 union ccb *done_ccb); 1469 static void dadone_probeblklimits(struct cam_periph *periph, 1470 union ccb *done_ccb); 1471 static void dadone_probebdc(struct cam_periph *periph, 1472 union ccb *done_ccb); 1473 static void dadone_probeata(struct cam_periph *periph, 1474 union ccb *done_ccb); 1475 static void dadone_probeatalogdir(struct cam_periph *periph, 1476 union ccb *done_ccb); 1477 static void dadone_probeataiddir(struct cam_periph *periph, 1478 union ccb *done_ccb); 1479 static void dadone_probeatasup(struct cam_periph *periph, 1480 union ccb *done_ccb); 1481 static void dadone_probeatazone(struct cam_periph *periph, 1482 union ccb *done_ccb); 1483 static void dadone_probezone(struct cam_periph *periph, 1484 union ccb *done_ccb); 1485 static void dadone_tur(struct cam_periph *periph, 1486 union ccb *done_ccb); 1487 static int daerror(union ccb *ccb, u_int32_t cam_flags, 1488 u_int32_t sense_flags); 1489 static void daprevent(struct cam_periph *periph, int action); 1490 static void dareprobe(struct cam_periph *periph); 1491 static void dasetgeom(struct cam_periph *periph, uint32_t block_len, 1492 uint64_t maxsector, 1493 struct scsi_read_capacity_data_long *rcaplong, 1494 size_t rcap_size); 1495 static timeout_t dasendorderedtag; 1496 static void dashutdown(void *arg, int howto); 1497 static timeout_t damediapoll; 1498 1499 #ifndef DA_DEFAULT_POLL_PERIOD 1500 #define DA_DEFAULT_POLL_PERIOD 3 1501 #endif 1502 1503 #ifndef DA_DEFAULT_TIMEOUT 1504 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 1505 #endif 1506 1507 #ifndef DA_DEFAULT_SOFTTIMEOUT 1508 #define DA_DEFAULT_SOFTTIMEOUT 0 1509 #endif 1510 1511 #ifndef DA_DEFAULT_RETRY 1512 #define DA_DEFAULT_RETRY 4 1513 #endif 1514 1515 #ifndef DA_DEFAULT_SEND_ORDERED 1516 #define DA_DEFAULT_SEND_ORDERED 1 1517 #endif 1518 1519 static int da_poll_period = DA_DEFAULT_POLL_PERIOD; 1520 static int da_retry_count = DA_DEFAULT_RETRY; 1521 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 1522 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; 1523 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; 1524 static int da_disable_wp_detection = 0; 1525 1526 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 1527 "CAM Direct Access Disk driver"); 1528 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, 1529 &da_poll_period, 0, "Media polling period in seconds"); 1530 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, 1531 &da_retry_count, 0, "Normal I/O retry count"); 1532 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, 1533 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 1534 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, 1535 &da_send_ordered, 0, "Send Ordered Tags"); 1536 SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN, 1537 &da_disable_wp_detection, 0, 1538 "Disable detection of write-protected disks"); 1539 1540 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, 1541 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", 1542 "Soft I/O timeout (ms)"); 1543 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); 1544 1545 /* 1546 * DA_ORDEREDTAG_INTERVAL determines how often, relative 1547 * to the default timeout, we check to see whether an ordered 1548 * tagged transaction is appropriate to prevent simple tag 1549 * starvation. Since we'd like to ensure that there is at least 1550 * 1/2 of the timeout length left for a starved transaction to 1551 * complete after we've sent an ordered tag, we must poll at least 1552 * four times in every timeout period. This takes care of the worst 1553 * case where a starved transaction starts during an interval that 1554 * meets the requirement "don't send an ordered tag" test so it takes 1555 * us two intervals to determine that a tag must be sent. 1556 */ 1557 #ifndef DA_ORDEREDTAG_INTERVAL 1558 #define DA_ORDEREDTAG_INTERVAL 4 1559 #endif 1560 1561 static struct periph_driver dadriver = 1562 { 1563 dainit, "da", 1564 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 1565 }; 1566 1567 PERIPHDRIVER_DECLARE(da, dadriver); 1568 1569 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); 1570 1571 /* 1572 * This driver takes out references / holds in well defined pairs, never 1573 * recursively. These macros / inline functions enforce those rules. They 1574 * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is 1575 * defined to be 2 or larger, the tracking also includes debug printfs. 1576 */ 1577 #if defined(DA_TRACK_REFS) || defined(INVARIANTS) 1578 1579 #ifndef DA_TRACK_REFS 1580 #define DA_TRACK_REFS 1 1581 #endif 1582 1583 #if DA_TRACK_REFS > 1 1584 static const char *da_ref_text[] = { 1585 "bogus", 1586 "open", 1587 "open hold", 1588 "close hold", 1589 "reprobe hold", 1590 "Test Unit Ready", 1591 "Geom", 1592 "sysctl", 1593 "reprobe", 1594 "max -- also bogus" 1595 }; 1596 1597 #define DA_PERIPH_PRINT(periph, msg, args...) \ 1598 CAM_PERIPH_PRINT(periph, msg, ##args) 1599 #else 1600 #define DA_PERIPH_PRINT(periph, msg, args...) 1601 #endif 1602 1603 static inline void 1604 token_sanity(da_ref_token token) 1605 { 1606 if ((unsigned)token >= DA_REF_MAX) 1607 panic("Bad token value passed in %d\n", token); 1608 } 1609 1610 static inline int 1611 da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token) 1612 { 1613 int err = cam_periph_hold(periph, priority); 1614 1615 token_sanity(token); 1616 DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n", 1617 da_ref_text[token], token, err); 1618 if (err == 0) { 1619 int cnt; 1620 struct da_softc *softc = periph->softc; 1621 1622 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); 1623 if (cnt != 0) 1624 panic("Re-holding for reason %d, cnt = %d", token, cnt); 1625 } 1626 return (err); 1627 } 1628 1629 static inline void 1630 da_periph_unhold(struct cam_periph *periph, da_ref_token token) 1631 { 1632 int cnt; 1633 struct da_softc *softc = periph->softc; 1634 1635 token_sanity(token); 1636 DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n", 1637 da_ref_text[token], token); 1638 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1639 if (cnt != 1) 1640 panic("Unholding %d with cnt = %d", token, cnt); 1641 cam_periph_unhold(periph); 1642 } 1643 1644 static inline int 1645 da_periph_acquire(struct cam_periph *periph, da_ref_token token) 1646 { 1647 int err = cam_periph_acquire(periph); 1648 1649 token_sanity(token); 1650 DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n", 1651 da_ref_text[token], token, err); 1652 if (err == 0) { 1653 int cnt; 1654 struct da_softc *softc = periph->softc; 1655 1656 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1); 1657 if (cnt != 0) 1658 panic("Re-refing for reason %d, cnt = %d", token, cnt); 1659 } 1660 return (err); 1661 } 1662 1663 static inline void 1664 da_periph_release(struct cam_periph *periph, da_ref_token token) 1665 { 1666 int cnt; 1667 struct da_softc *softc = periph->softc; 1668 1669 token_sanity(token); 1670 DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n", 1671 da_ref_text[token], token); 1672 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1673 if (cnt != 1) 1674 panic("Releasing %d with cnt = %d", token, cnt); 1675 cam_periph_release(periph); 1676 } 1677 1678 static inline void 1679 da_periph_release_locked(struct cam_periph *periph, da_ref_token token) 1680 { 1681 int cnt; 1682 struct da_softc *softc = periph->softc; 1683 1684 token_sanity(token); 1685 DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n", 1686 da_ref_text[token], token); 1687 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1); 1688 if (cnt != 1) 1689 panic("Unholding %d with cnt = %d", token, cnt); 1690 cam_periph_release_locked(periph); 1691 } 1692 1693 #define cam_periph_hold POISON 1694 #define cam_periph_unhold POISON 1695 #define cam_periph_acquire POISON 1696 #define cam_periph_release POISON 1697 #define cam_periph_release_locked POISON 1698 1699 #else 1700 #define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio)) 1701 #define da_periph_unhold(periph, token) cam_periph_unhold((periph)) 1702 #define da_periph_acquire(periph, token) cam_periph_acquire((periph)) 1703 #define da_periph_release(periph, token) cam_periph_release((periph)) 1704 #define da_periph_release_locked(periph, token) cam_periph_release_locked((periph)) 1705 #endif 1706 1707 static int 1708 daopen(struct disk *dp) 1709 { 1710 struct cam_periph *periph; 1711 struct da_softc *softc; 1712 int error; 1713 1714 periph = (struct cam_periph *)dp->d_drv1; 1715 if (da_periph_acquire(periph, DA_REF_OPEN) != 0) { 1716 return (ENXIO); 1717 } 1718 1719 cam_periph_lock(periph); 1720 if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) { 1721 cam_periph_unlock(periph); 1722 da_periph_release(periph, DA_REF_OPEN); 1723 return (error); 1724 } 1725 1726 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1727 ("daopen\n")); 1728 1729 softc = (struct da_softc *)periph->softc; 1730 dareprobe(periph); 1731 1732 /* Wait for the disk size update. */ 1733 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, 1734 "dareprobe", 0); 1735 if (error != 0) 1736 xpt_print(periph->path, "unable to retrieve capacity data\n"); 1737 1738 if (periph->flags & CAM_PERIPH_INVALID) 1739 error = ENXIO; 1740 1741 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1742 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1743 daprevent(periph, PR_PREVENT); 1744 1745 if (error == 0) { 1746 softc->flags &= ~DA_FLAG_PACK_INVALID; 1747 softc->flags |= DA_FLAG_OPEN; 1748 } 1749 1750 da_periph_unhold(periph, DA_REF_OPEN_HOLD); 1751 cam_periph_unlock(periph); 1752 1753 if (error != 0) 1754 da_periph_release(periph, DA_REF_OPEN); 1755 1756 return (error); 1757 } 1758 1759 static int 1760 daclose(struct disk *dp) 1761 { 1762 struct cam_periph *periph; 1763 struct da_softc *softc; 1764 union ccb *ccb; 1765 1766 periph = (struct cam_periph *)dp->d_drv1; 1767 softc = (struct da_softc *)periph->softc; 1768 cam_periph_lock(periph); 1769 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, 1770 ("daclose\n")); 1771 1772 if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) { 1773 1774 /* Flush disk cache. */ 1775 if ((softc->flags & DA_FLAG_DIRTY) != 0 && 1776 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && 1777 (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 1778 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1779 scsi_synchronize_cache(&ccb->csio, /*retries*/1, 1780 /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG, 1781 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 1782 5 * 60 * 1000); 1783 cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 1784 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, 1785 softc->disk->d_devstat); 1786 softc->flags &= ~DA_FLAG_DIRTY; 1787 xpt_release_ccb(ccb); 1788 } 1789 1790 /* Allow medium removal. */ 1791 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 1792 (softc->quirks & DA_Q_NO_PREVENT) == 0) 1793 daprevent(periph, PR_ALLOW); 1794 1795 da_periph_unhold(periph, DA_REF_CLOSE_HOLD); 1796 } 1797 1798 /* 1799 * If we've got removeable media, mark the blocksize as 1800 * unavailable, since it could change when new media is 1801 * inserted. 1802 */ 1803 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 1804 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; 1805 1806 softc->flags &= ~DA_FLAG_OPEN; 1807 while (softc->refcount != 0) 1808 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); 1809 cam_periph_unlock(periph); 1810 da_periph_release(periph, DA_REF_OPEN); 1811 return (0); 1812 } 1813 1814 static void 1815 daschedule(struct cam_periph *periph) 1816 { 1817 struct da_softc *softc = (struct da_softc *)periph->softc; 1818 1819 if (softc->state != DA_STATE_NORMAL) 1820 return; 1821 1822 cam_iosched_schedule(softc->cam_iosched, periph); 1823 } 1824 1825 /* 1826 * Actually translate the requested transfer into one the physical driver 1827 * can understand. The transfer is described by a buf and will include 1828 * only one physical transfer. 1829 */ 1830 static void 1831 dastrategy(struct bio *bp) 1832 { 1833 struct cam_periph *periph; 1834 struct da_softc *softc; 1835 1836 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1837 softc = (struct da_softc *)periph->softc; 1838 1839 cam_periph_lock(periph); 1840 1841 /* 1842 * If the device has been made invalid, error out 1843 */ 1844 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 1845 cam_periph_unlock(periph); 1846 biofinish(bp, NULL, ENXIO); 1847 return; 1848 } 1849 1850 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); 1851 1852 /* 1853 * Zone commands must be ordered, because they can depend on the 1854 * effects of previously issued commands, and they may affect 1855 * commands after them. 1856 */ 1857 if (bp->bio_cmd == BIO_ZONE) 1858 bp->bio_flags |= BIO_ORDERED; 1859 1860 /* 1861 * Place it in the queue of disk activities for this disk 1862 */ 1863 cam_iosched_queue_work(softc->cam_iosched, bp); 1864 1865 /* 1866 * Schedule ourselves for performing the work. 1867 */ 1868 daschedule(periph); 1869 cam_periph_unlock(periph); 1870 1871 return; 1872 } 1873 1874 static int 1875 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 1876 { 1877 struct cam_periph *periph; 1878 struct da_softc *softc; 1879 u_int secsize; 1880 struct ccb_scsiio csio; 1881 struct disk *dp; 1882 int error = 0; 1883 1884 dp = arg; 1885 periph = dp->d_drv1; 1886 softc = (struct da_softc *)periph->softc; 1887 secsize = softc->params.secsize; 1888 1889 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 1890 return (ENXIO); 1891 1892 memset(&csio, 0, sizeof(csio)); 1893 if (length > 0) { 1894 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1895 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1896 scsi_read_write(&csio, 1897 /*retries*/0, 1898 /*cbfcnp*/NULL, 1899 MSG_ORDERED_Q_TAG, 1900 /*read*/SCSI_RW_WRITE, 1901 /*byte2*/0, 1902 /*minimum_cmd_size*/ softc->minimum_cmd_size, 1903 offset / secsize, 1904 length / secsize, 1905 /*data_ptr*/(u_int8_t *) virtual, 1906 /*dxfer_len*/length, 1907 /*sense_len*/SSD_FULL_SIZE, 1908 da_default_timeout * 1000); 1909 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 1910 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1911 if (error != 0) 1912 printf("Aborting dump due to I/O error.\n"); 1913 return (error); 1914 } 1915 1916 /* 1917 * Sync the disk cache contents to the physical media. 1918 */ 1919 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 1920 1921 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1922 csio.ccb_h.ccb_state = DA_CCB_DUMP; 1923 scsi_synchronize_cache(&csio, 1924 /*retries*/0, 1925 /*cbfcnp*/NULL, 1926 MSG_SIMPLE_Q_TAG, 1927 /*begin_lba*/0,/* Cover the whole disk */ 1928 /*lb_count*/0, 1929 SSD_FULL_SIZE, 1930 5 * 1000); 1931 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error, 1932 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); 1933 if (error != 0) 1934 xpt_print(periph->path, "Synchronize cache failed\n"); 1935 } 1936 return (error); 1937 } 1938 1939 static int 1940 dagetattr(struct bio *bp) 1941 { 1942 int ret; 1943 struct cam_periph *periph; 1944 1945 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 1946 cam_periph_lock(periph); 1947 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, 1948 periph->path); 1949 cam_periph_unlock(periph); 1950 if (ret == 0) 1951 bp->bio_completed = bp->bio_length; 1952 return ret; 1953 } 1954 1955 static void 1956 dainit(void) 1957 { 1958 cam_status status; 1959 1960 /* 1961 * Install a global async callback. This callback will 1962 * receive async callbacks like "new device found". 1963 */ 1964 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); 1965 1966 if (status != CAM_REQ_CMP) { 1967 printf("da: Failed to attach master async callback " 1968 "due to status 0x%x!\n", status); 1969 } else if (da_send_ordered) { 1970 1971 /* Register our shutdown event handler */ 1972 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 1973 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 1974 printf("dainit: shutdown event registration failed!\n"); 1975 } 1976 } 1977 1978 /* 1979 * Callback from GEOM, called when it has finished cleaning up its 1980 * resources. 1981 */ 1982 static void 1983 dadiskgonecb(struct disk *dp) 1984 { 1985 struct cam_periph *periph; 1986 1987 periph = (struct cam_periph *)dp->d_drv1; 1988 da_periph_release(periph, DA_REF_GEOM); 1989 } 1990 1991 static void 1992 daoninvalidate(struct cam_periph *periph) 1993 { 1994 struct da_softc *softc; 1995 1996 cam_periph_assert(periph, MA_OWNED); 1997 softc = (struct da_softc *)periph->softc; 1998 1999 /* 2000 * De-register any async callbacks. 2001 */ 2002 xpt_register_async(0, daasync, periph, periph->path); 2003 2004 softc->flags |= DA_FLAG_PACK_INVALID; 2005 #ifdef CAM_IO_STATS 2006 softc->invalidations++; 2007 #endif 2008 2009 /* 2010 * Return all queued I/O with ENXIO. 2011 * XXX Handle any transactions queued to the card 2012 * with XPT_ABORT_CCB. 2013 */ 2014 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); 2015 2016 /* 2017 * Tell GEOM that we've gone away, we'll get a callback when it is 2018 * done cleaning up its resources. 2019 */ 2020 disk_gone(softc->disk); 2021 } 2022 2023 static void 2024 dacleanup(struct cam_periph *periph) 2025 { 2026 struct da_softc *softc; 2027 2028 softc = (struct da_softc *)periph->softc; 2029 2030 cam_periph_unlock(periph); 2031 2032 cam_iosched_fini(softc->cam_iosched); 2033 2034 /* 2035 * If we can't free the sysctl tree, oh well... 2036 */ 2037 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { 2038 #ifdef CAM_IO_STATS 2039 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) 2040 xpt_print(periph->path, 2041 "can't remove sysctl stats context\n"); 2042 #endif 2043 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) 2044 xpt_print(periph->path, 2045 "can't remove sysctl context\n"); 2046 } 2047 2048 callout_drain(&softc->mediapoll_c); 2049 disk_destroy(softc->disk); 2050 callout_drain(&softc->sendordered_c); 2051 free(softc, M_DEVBUF); 2052 cam_periph_lock(periph); 2053 } 2054 2055 static void 2056 daasync(void *callback_arg, u_int32_t code, 2057 struct cam_path *path, void *arg) 2058 { 2059 struct cam_periph *periph; 2060 struct da_softc *softc; 2061 2062 periph = (struct cam_periph *)callback_arg; 2063 switch (code) { 2064 case AC_FOUND_DEVICE: /* callback to create periph, no locking yet */ 2065 { 2066 struct ccb_getdev *cgd; 2067 cam_status status; 2068 2069 cgd = (struct ccb_getdev *)arg; 2070 if (cgd == NULL) 2071 break; 2072 2073 if (cgd->protocol != PROTO_SCSI) 2074 break; 2075 if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) 2076 break; 2077 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 2078 && SID_TYPE(&cgd->inq_data) != T_RBC 2079 && SID_TYPE(&cgd->inq_data) != T_OPTICAL 2080 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) 2081 break; 2082 2083 /* 2084 * Allocate a peripheral instance for 2085 * this device and start the probe 2086 * process. 2087 */ 2088 status = cam_periph_alloc(daregister, daoninvalidate, 2089 dacleanup, dastart, 2090 "da", CAM_PERIPH_BIO, 2091 path, daasync, 2092 AC_FOUND_DEVICE, cgd); 2093 2094 if (status != CAM_REQ_CMP 2095 && status != CAM_REQ_INPROG) 2096 printf("daasync: Unable to attach to new device " 2097 "due to status 0x%x\n", status); 2098 return; 2099 } 2100 case AC_ADVINFO_CHANGED: /* Doesn't touch periph */ 2101 { 2102 uintptr_t buftype; 2103 2104 buftype = (uintptr_t)arg; 2105 if (buftype == CDAI_TYPE_PHYS_PATH) { 2106 struct da_softc *softc; 2107 2108 softc = periph->softc; 2109 disk_attr_changed(softc->disk, "GEOM::physpath", 2110 M_NOWAIT); 2111 } 2112 break; 2113 } 2114 case AC_UNIT_ATTENTION: 2115 { 2116 union ccb *ccb; 2117 int error_code, sense_key, asc, ascq; 2118 2119 softc = (struct da_softc *)periph->softc; 2120 ccb = (union ccb *)arg; 2121 2122 /* 2123 * Handle all UNIT ATTENTIONs except our own, as they will be 2124 * handled by daerror(). Since this comes from a different periph, 2125 * that periph's lock is held, not ours, so we have to take it ours 2126 * out to touch softc flags. 2127 */ 2128 if (xpt_path_periph(ccb->ccb_h.path) != periph && 2129 scsi_extract_sense_ccb(ccb, 2130 &error_code, &sense_key, &asc, &ascq)) { 2131 if (asc == 0x2A && ascq == 0x09) { 2132 xpt_print(ccb->ccb_h.path, 2133 "Capacity data has changed\n"); 2134 cam_periph_lock(periph); 2135 softc->flags &= ~DA_FLAG_PROBED; 2136 cam_periph_unlock(periph); 2137 dareprobe(periph); 2138 } else if (asc == 0x28 && ascq == 0x00) { 2139 cam_periph_lock(periph); 2140 softc->flags &= ~DA_FLAG_PROBED; 2141 cam_periph_unlock(periph); 2142 disk_media_changed(softc->disk, M_NOWAIT); 2143 } else if (asc == 0x3F && ascq == 0x03) { 2144 xpt_print(ccb->ccb_h.path, 2145 "INQUIRY data has changed\n"); 2146 cam_periph_lock(periph); 2147 softc->flags &= ~DA_FLAG_PROBED; 2148 cam_periph_unlock(periph); 2149 dareprobe(periph); 2150 } 2151 } 2152 break; 2153 } 2154 case AC_SCSI_AEN: /* Called for this path: periph locked */ 2155 /* 2156 * Appears to be currently unused for SCSI devices, only ata SIMs 2157 * generate this. 2158 */ 2159 cam_periph_assert(periph, MA_OWNED); 2160 softc = (struct da_softc *)periph->softc; 2161 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && 2162 (softc->flags & DA_FLAG_TUR_PENDING) == 0) { 2163 if (da_periph_acquire(periph, DA_REF_TUR) == 0) { 2164 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 2165 daschedule(periph); 2166 } 2167 } 2168 /* FALLTHROUGH */ 2169 case AC_SENT_BDR: /* Called for this path: periph locked */ 2170 case AC_BUS_RESET: /* Called for this path: periph locked */ 2171 { 2172 struct ccb_hdr *ccbh; 2173 2174 cam_periph_assert(periph, MA_OWNED); 2175 softc = (struct da_softc *)periph->softc; 2176 /* 2177 * Don't fail on the expected unit attention 2178 * that will occur. 2179 */ 2180 softc->flags |= DA_FLAG_RETRY_UA; 2181 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 2182 ccbh->ccb_state |= DA_CCB_RETRY_UA; 2183 break; 2184 } 2185 case AC_INQ_CHANGED: /* Called for this path: periph locked */ 2186 cam_periph_assert(periph, MA_OWNED); 2187 softc = (struct da_softc *)periph->softc; 2188 softc->flags &= ~DA_FLAG_PROBED; 2189 dareprobe(periph); 2190 break; 2191 default: 2192 break; 2193 } 2194 cam_periph_async(periph, code, path, arg); 2195 } 2196 2197 static void 2198 dasysctlinit(void *context, int pending) 2199 { 2200 struct cam_periph *periph; 2201 struct da_softc *softc; 2202 char tmpstr[32], tmpstr2[16]; 2203 struct ccb_trans_settings cts; 2204 2205 periph = (struct cam_periph *)context; 2206 /* 2207 * periph was held for us when this task was enqueued 2208 */ 2209 if (periph->flags & CAM_PERIPH_INVALID) { 2210 da_periph_release(periph, DA_REF_SYSCTL); 2211 return; 2212 } 2213 2214 softc = (struct da_softc *)periph->softc; 2215 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); 2216 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 2217 2218 sysctl_ctx_init(&softc->sysctl_ctx); 2219 cam_periph_lock(periph); 2220 softc->flags |= DA_FLAG_SCTX_INIT; 2221 cam_periph_unlock(periph); 2222 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx, 2223 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, 2224 CTLFLAG_RD, 0, tmpstr, "device_index"); 2225 if (softc->sysctl_tree == NULL) { 2226 printf("dasysctlinit: unable to allocate sysctl tree\n"); 2227 da_periph_release(periph, DA_REF_SYSCTL); 2228 return; 2229 } 2230 2231 /* 2232 * Now register the sysctl handler, so the user can change the value on 2233 * the fly. 2234 */ 2235 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2236 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, 2237 softc, 0, dadeletemethodsysctl, "A", 2238 "BIO_DELETE execution method"); 2239 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2240 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, 2241 softc, 0, dadeletemaxsysctl, "Q", 2242 "Maximum BIO_DELETE size"); 2243 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2244 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, 2245 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", 2246 "Minimum CDB size"); 2247 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2248 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2249 "trim_count", CTLFLAG_RD, &softc->trim_count, 2250 "Total number of unmap/dsm commands sent"); 2251 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2252 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2253 "trim_ranges", CTLFLAG_RD, &softc->trim_ranges, 2254 "Total number of ranges in unmap/dsm commands"); 2255 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2256 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2257 "trim_lbas", CTLFLAG_RD, &softc->trim_lbas, 2258 "Total lbas in the unmap/dsm commands sent"); 2259 2260 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2261 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, 2262 softc, 0, dazonemodesysctl, "A", 2263 "Zone Mode"); 2264 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2265 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, 2266 softc, 0, dazonesupsysctl, "A", 2267 "Zone Support"); 2268 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2269 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2270 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, 2271 "Optimal Number of Open Sequential Write Preferred Zones"); 2272 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2273 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2274 "optimal_nonseq_zones", CTLFLAG_RD, 2275 &softc->optimal_nonseq_zones, 2276 "Optimal Number of Non-Sequentially Written Sequential Write " 2277 "Preferred Zones"); 2278 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2279 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 2280 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, 2281 "Maximum Number of Open Sequential Write Required Zones"); 2282 2283 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2284 SYSCTL_CHILDREN(softc->sysctl_tree), 2285 OID_AUTO, 2286 "error_inject", 2287 CTLFLAG_RW, 2288 &softc->error_inject, 2289 0, 2290 "error_inject leaf"); 2291 2292 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2293 SYSCTL_CHILDREN(softc->sysctl_tree), 2294 OID_AUTO, 2295 "unmapped_io", 2296 CTLFLAG_RD, 2297 &softc->unmappedio, 2298 0, 2299 "Unmapped I/O support"); 2300 2301 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2302 SYSCTL_CHILDREN(softc->sysctl_tree), 2303 OID_AUTO, 2304 "rotating", 2305 CTLFLAG_RD, 2306 &softc->rotating, 2307 0, 2308 "Rotating media"); 2309 2310 SYSCTL_ADD_INT(&softc->sysctl_ctx, 2311 SYSCTL_CHILDREN(softc->sysctl_tree), 2312 OID_AUTO, 2313 "p_type", 2314 CTLFLAG_RD, 2315 &softc->p_type, 2316 0, 2317 "DIF protection type"); 2318 2319 #ifdef CAM_TEST_FAILURE 2320 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2321 OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 2322 periph, 0, cam_periph_invalidate_sysctl, "I", 2323 "Write 1 to invalidate the drive immediately"); 2324 #endif 2325 2326 /* 2327 * Add some addressing info. 2328 */ 2329 memset(&cts, 0, sizeof (cts)); 2330 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 2331 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 2332 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2333 cam_periph_lock(periph); 2334 xpt_action((union ccb *)&cts); 2335 cam_periph_unlock(periph); 2336 if (cts.ccb_h.status != CAM_REQ_CMP) { 2337 da_periph_release(periph, DA_REF_SYSCTL); 2338 return; 2339 } 2340 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { 2341 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 2342 if (fc->valid & CTS_FC_VALID_WWPN) { 2343 softc->wwpn = fc->wwpn; 2344 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, 2345 SYSCTL_CHILDREN(softc->sysctl_tree), 2346 OID_AUTO, "wwpn", CTLFLAG_RD, 2347 &softc->wwpn, "World Wide Port Name"); 2348 } 2349 } 2350 2351 #ifdef CAM_IO_STATS 2352 /* 2353 * Now add some useful stats. 2354 * XXX These should live in cam_periph and be common to all periphs 2355 */ 2356 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, 2357 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", 2358 CTLFLAG_RD, 0, "Statistics"); 2359 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2360 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2361 OID_AUTO, 2362 "errors", 2363 CTLFLAG_RD, 2364 &softc->errors, 2365 0, 2366 "Transport errors reported by the SIM"); 2367 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2368 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2369 OID_AUTO, 2370 "timeouts", 2371 CTLFLAG_RD, 2372 &softc->timeouts, 2373 0, 2374 "Device timeouts reported by the SIM"); 2375 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, 2376 SYSCTL_CHILDREN(softc->sysctl_stats_tree), 2377 OID_AUTO, 2378 "pack_invalidations", 2379 CTLFLAG_RD, 2380 &softc->invalidations, 2381 0, 2382 "Device pack invalidations"); 2383 #endif 2384 2385 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, 2386 softc->sysctl_tree); 2387 2388 da_periph_release(periph, DA_REF_SYSCTL); 2389 } 2390 2391 static int 2392 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) 2393 { 2394 int error; 2395 uint64_t value; 2396 struct da_softc *softc; 2397 2398 softc = (struct da_softc *)arg1; 2399 2400 value = softc->disk->d_delmaxsize; 2401 error = sysctl_handle_64(oidp, &value, 0, req); 2402 if ((error != 0) || (req->newptr == NULL)) 2403 return (error); 2404 2405 /* only accept values smaller than the calculated value */ 2406 if (value > dadeletemaxsize(softc, softc->delete_method)) { 2407 return (EINVAL); 2408 } 2409 softc->disk->d_delmaxsize = value; 2410 2411 return (0); 2412 } 2413 2414 static int 2415 dacmdsizesysctl(SYSCTL_HANDLER_ARGS) 2416 { 2417 int error, value; 2418 2419 value = *(int *)arg1; 2420 2421 error = sysctl_handle_int(oidp, &value, 0, req); 2422 2423 if ((error != 0) 2424 || (req->newptr == NULL)) 2425 return (error); 2426 2427 /* 2428 * Acceptable values here are 6, 10, 12 or 16. 2429 */ 2430 if (value < 6) 2431 value = 6; 2432 else if ((value > 6) 2433 && (value <= 10)) 2434 value = 10; 2435 else if ((value > 10) 2436 && (value <= 12)) 2437 value = 12; 2438 else if (value > 12) 2439 value = 16; 2440 2441 *(int *)arg1 = value; 2442 2443 return (0); 2444 } 2445 2446 static int 2447 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) 2448 { 2449 sbintime_t value; 2450 int error; 2451 2452 value = da_default_softtimeout / SBT_1MS; 2453 2454 error = sysctl_handle_int(oidp, (int *)&value, 0, req); 2455 if ((error != 0) || (req->newptr == NULL)) 2456 return (error); 2457 2458 /* XXX Should clip this to a reasonable level */ 2459 if (value > da_default_timeout * 1000) 2460 return (EINVAL); 2461 2462 da_default_softtimeout = value * SBT_1MS; 2463 return (0); 2464 } 2465 2466 static void 2467 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) 2468 { 2469 2470 softc->delete_method = delete_method; 2471 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); 2472 softc->delete_func = da_delete_functions[delete_method]; 2473 2474 if (softc->delete_method > DA_DELETE_DISABLE) 2475 softc->disk->d_flags |= DISKFLAG_CANDELETE; 2476 else 2477 softc->disk->d_flags &= ~DISKFLAG_CANDELETE; 2478 } 2479 2480 static off_t 2481 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) 2482 { 2483 off_t sectors; 2484 2485 switch(delete_method) { 2486 case DA_DELETE_UNMAP: 2487 sectors = (off_t)softc->unmap_max_lba; 2488 break; 2489 case DA_DELETE_ATA_TRIM: 2490 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; 2491 break; 2492 case DA_DELETE_WS16: 2493 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); 2494 break; 2495 case DA_DELETE_ZERO: 2496 case DA_DELETE_WS10: 2497 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); 2498 break; 2499 default: 2500 return 0; 2501 } 2502 2503 return (off_t)softc->params.secsize * 2504 omin(sectors, softc->params.sectors); 2505 } 2506 2507 static void 2508 daprobedone(struct cam_periph *periph, union ccb *ccb) 2509 { 2510 struct da_softc *softc; 2511 2512 softc = (struct da_softc *)periph->softc; 2513 2514 cam_periph_assert(periph, MA_OWNED); 2515 2516 dadeletemethodchoose(softc, DA_DELETE_NONE); 2517 2518 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2519 char buf[80]; 2520 int i, sep; 2521 2522 snprintf(buf, sizeof(buf), "Delete methods: <"); 2523 sep = 0; 2524 for (i = 0; i <= DA_DELETE_MAX; i++) { 2525 if ((softc->delete_available & (1 << i)) == 0 && 2526 i != softc->delete_method) 2527 continue; 2528 if (sep) 2529 strlcat(buf, ",", sizeof(buf)); 2530 strlcat(buf, da_delete_method_names[i], 2531 sizeof(buf)); 2532 if (i == softc->delete_method) 2533 strlcat(buf, "(*)", sizeof(buf)); 2534 sep = 1; 2535 } 2536 strlcat(buf, ">", sizeof(buf)); 2537 printf("%s%d: %s\n", periph->periph_name, 2538 periph->unit_number, buf); 2539 } 2540 if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 && 2541 (softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2542 printf("%s%d: Write Protected\n", periph->periph_name, 2543 periph->unit_number); 2544 } 2545 2546 /* 2547 * Since our peripheral may be invalidated by an error 2548 * above or an external event, we must release our CCB 2549 * before releasing the probe lock on the peripheral. 2550 * The peripheral will only go away once the last lock 2551 * is removed, and we need it around for the CCB release 2552 * operation. 2553 */ 2554 xpt_release_ccb(ccb); 2555 softc->state = DA_STATE_NORMAL; 2556 softc->flags |= DA_FLAG_PROBED; 2557 daschedule(periph); 2558 wakeup(&softc->disk->d_mediasize); 2559 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { 2560 softc->flags |= DA_FLAG_ANNOUNCED; 2561 da_periph_unhold(periph, DA_REF_PROBE_HOLD); 2562 } else 2563 da_periph_release_locked(periph, DA_REF_REPROBE); 2564 } 2565 2566 static void 2567 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) 2568 { 2569 int i, methods; 2570 2571 /* If available, prefer the method requested by user. */ 2572 i = softc->delete_method_pref; 2573 methods = softc->delete_available | (1 << DA_DELETE_DISABLE); 2574 if (methods & (1 << i)) { 2575 dadeletemethodset(softc, i); 2576 return; 2577 } 2578 2579 /* Use the pre-defined order to choose the best performing delete. */ 2580 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { 2581 if (i == DA_DELETE_ZERO) 2582 continue; 2583 if (softc->delete_available & (1 << i)) { 2584 dadeletemethodset(softc, i); 2585 return; 2586 } 2587 } 2588 2589 /* Fallback to default. */ 2590 dadeletemethodset(softc, default_method); 2591 } 2592 2593 static int 2594 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) 2595 { 2596 char buf[16]; 2597 const char *p; 2598 struct da_softc *softc; 2599 int i, error, value; 2600 2601 softc = (struct da_softc *)arg1; 2602 2603 value = softc->delete_method; 2604 if (value < 0 || value > DA_DELETE_MAX) 2605 p = "UNKNOWN"; 2606 else 2607 p = da_delete_method_names[value]; 2608 strncpy(buf, p, sizeof(buf)); 2609 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 2610 if (error != 0 || req->newptr == NULL) 2611 return (error); 2612 for (i = 0; i <= DA_DELETE_MAX; i++) { 2613 if (strcmp(buf, da_delete_method_names[i]) == 0) 2614 break; 2615 } 2616 if (i > DA_DELETE_MAX) 2617 return (EINVAL); 2618 softc->delete_method_pref = i; 2619 dadeletemethodchoose(softc, DA_DELETE_NONE); 2620 return (0); 2621 } 2622 2623 static int 2624 dazonemodesysctl(SYSCTL_HANDLER_ARGS) 2625 { 2626 char tmpbuf[40]; 2627 struct da_softc *softc; 2628 int error; 2629 2630 softc = (struct da_softc *)arg1; 2631 2632 switch (softc->zone_mode) { 2633 case DA_ZONE_DRIVE_MANAGED: 2634 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); 2635 break; 2636 case DA_ZONE_HOST_AWARE: 2637 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); 2638 break; 2639 case DA_ZONE_HOST_MANAGED: 2640 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); 2641 break; 2642 case DA_ZONE_NONE: 2643 default: 2644 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); 2645 break; 2646 } 2647 2648 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); 2649 2650 return (error); 2651 } 2652 2653 static int 2654 dazonesupsysctl(SYSCTL_HANDLER_ARGS) 2655 { 2656 char tmpbuf[180]; 2657 struct da_softc *softc; 2658 struct sbuf sb; 2659 int error, first; 2660 unsigned int i; 2661 2662 softc = (struct da_softc *)arg1; 2663 2664 error = 0; 2665 first = 1; 2666 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); 2667 2668 for (i = 0; i < sizeof(da_zone_desc_table) / 2669 sizeof(da_zone_desc_table[0]); i++) { 2670 if (softc->zone_flags & da_zone_desc_table[i].value) { 2671 if (first == 0) 2672 sbuf_printf(&sb, ", "); 2673 else 2674 first = 0; 2675 sbuf_cat(&sb, da_zone_desc_table[i].desc); 2676 } 2677 } 2678 2679 if (first == 1) 2680 sbuf_printf(&sb, "None"); 2681 2682 sbuf_finish(&sb); 2683 2684 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 2685 2686 return (error); 2687 } 2688 2689 static cam_status 2690 daregister(struct cam_periph *periph, void *arg) 2691 { 2692 struct da_softc *softc; 2693 struct ccb_pathinq cpi; 2694 struct ccb_getdev *cgd; 2695 char tmpstr[80]; 2696 caddr_t match; 2697 int quirks; 2698 2699 cgd = (struct ccb_getdev *)arg; 2700 if (cgd == NULL) { 2701 printf("daregister: no getdev CCB, can't register device\n"); 2702 return(CAM_REQ_CMP_ERR); 2703 } 2704 2705 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, 2706 M_NOWAIT|M_ZERO); 2707 2708 if (softc == NULL) { 2709 printf("daregister: Unable to probe new device. " 2710 "Unable to allocate softc\n"); 2711 return(CAM_REQ_CMP_ERR); 2712 } 2713 2714 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { 2715 printf("daregister: Unable to probe new device. " 2716 "Unable to allocate iosched memory\n"); 2717 free(softc, M_DEVBUF); 2718 return(CAM_REQ_CMP_ERR); 2719 } 2720 2721 LIST_INIT(&softc->pending_ccbs); 2722 softc->state = DA_STATE_PROBE_WP; 2723 bioq_init(&softc->delete_run_queue); 2724 if (SID_IS_REMOVABLE(&cgd->inq_data)) 2725 softc->flags |= DA_FLAG_PACK_REMOVABLE; 2726 softc->unmap_max_ranges = UNMAP_MAX_RANGES; 2727 softc->unmap_max_lba = UNMAP_RANGE_MAX; 2728 softc->unmap_gran = 0; 2729 softc->unmap_gran_align = 0; 2730 softc->ws_max_blks = WS16_MAX_BLKS; 2731 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; 2732 softc->rotating = 1; 2733 2734 periph->softc = softc; 2735 2736 /* 2737 * See if this device has any quirks. 2738 */ 2739 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 2740 (caddr_t)da_quirk_table, 2741 nitems(da_quirk_table), 2742 sizeof(*da_quirk_table), scsi_inquiry_match); 2743 2744 if (match != NULL) 2745 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 2746 else 2747 softc->quirks = DA_Q_NONE; 2748 2749 /* Check if the SIM does not want 6 byte commands */ 2750 xpt_path_inq(&cpi, periph->path); 2751 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) 2752 softc->quirks |= DA_Q_NO_6_BYTE; 2753 2754 /* Override quirks if tunable is set */ 2755 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks", 2756 periph->unit_number); 2757 quirks = softc->quirks; 2758 TUNABLE_INT_FETCH(tmpstr, &quirks); 2759 softc->quirks = quirks; 2760 2761 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) 2762 softc->zone_mode = DA_ZONE_HOST_MANAGED; 2763 else if (softc->quirks & DA_Q_SMR_DM) 2764 softc->zone_mode = DA_ZONE_DRIVE_MANAGED; 2765 else 2766 softc->zone_mode = DA_ZONE_NONE; 2767 2768 if (softc->zone_mode != DA_ZONE_NONE) { 2769 if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 2770 if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) 2771 softc->zone_interface = DA_ZONE_IF_ATA_SAT; 2772 else 2773 softc->zone_interface = DA_ZONE_IF_ATA_PASS; 2774 } else 2775 softc->zone_interface = DA_ZONE_IF_SCSI; 2776 } 2777 2778 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); 2779 2780 /* 2781 * Take an exclusive section lock qon the periph while dastart is called 2782 * to finish the probe. The lock will be dropped in dadone at the end 2783 * of probe. This locks out daopen and daclose from racing with the 2784 * probe. 2785 * 2786 * XXX if cam_periph_hold returns an error, we don't hold a refcount. 2787 */ 2788 (void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD); 2789 2790 /* 2791 * Schedule a periodic event to occasionally send an 2792 * ordered tag to a device. 2793 */ 2794 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); 2795 callout_reset(&softc->sendordered_c, 2796 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 2797 dasendorderedtag, periph); 2798 2799 cam_periph_unlock(periph); 2800 /* 2801 * RBC devices don't have to support READ(6), only READ(10). 2802 */ 2803 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) 2804 softc->minimum_cmd_size = 10; 2805 else 2806 softc->minimum_cmd_size = 6; 2807 2808 /* 2809 * Load the user's default, if any. 2810 */ 2811 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", 2812 periph->unit_number); 2813 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); 2814 2815 /* 2816 * 6, 10, 12 and 16 are the currently permissible values. 2817 */ 2818 if (softc->minimum_cmd_size > 12) 2819 softc->minimum_cmd_size = 16; 2820 else if (softc->minimum_cmd_size > 10) 2821 softc->minimum_cmd_size = 12; 2822 else if (softc->minimum_cmd_size > 6) 2823 softc->minimum_cmd_size = 10; 2824 else 2825 softc->minimum_cmd_size = 6; 2826 2827 /* Predict whether device may support READ CAPACITY(16). */ 2828 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && 2829 (softc->quirks & DA_Q_NO_RC16) == 0) { 2830 softc->flags |= DA_FLAG_CAN_RC16; 2831 } 2832 2833 /* 2834 * Register this media as a disk. 2835 */ 2836 softc->disk = disk_alloc(); 2837 softc->disk->d_devstat = devstat_new_entry(periph->periph_name, 2838 periph->unit_number, 0, 2839 DEVSTAT_BS_UNAVAILABLE, 2840 SID_TYPE(&cgd->inq_data) | 2841 XPORT_DEVSTAT_TYPE(cpi.transport), 2842 DEVSTAT_PRIORITY_DISK); 2843 softc->disk->d_open = daopen; 2844 softc->disk->d_close = daclose; 2845 softc->disk->d_strategy = dastrategy; 2846 softc->disk->d_dump = dadump; 2847 softc->disk->d_getattr = dagetattr; 2848 softc->disk->d_gone = dadiskgonecb; 2849 softc->disk->d_name = "da"; 2850 softc->disk->d_drv1 = periph; 2851 if (cpi.maxio == 0) 2852 softc->maxio = DFLTPHYS; /* traditional default */ 2853 else if (cpi.maxio > MAXPHYS) 2854 softc->maxio = MAXPHYS; /* for safety */ 2855 else 2856 softc->maxio = cpi.maxio; 2857 if (softc->quirks & DA_Q_128KB) 2858 softc->maxio = min(softc->maxio, 128 * 1024); 2859 softc->disk->d_maxsize = softc->maxio; 2860 softc->disk->d_unit = periph->unit_number; 2861 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; 2862 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) 2863 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 2864 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { 2865 softc->unmappedio = 1; 2866 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; 2867 } 2868 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, 2869 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); 2870 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); 2871 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], 2872 cgd->inq_data.product, sizeof(cgd->inq_data.product), 2873 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); 2874 softc->disk->d_hba_vendor = cpi.hba_vendor; 2875 softc->disk->d_hba_device = cpi.hba_device; 2876 softc->disk->d_hba_subvendor = cpi.hba_subvendor; 2877 softc->disk->d_hba_subdevice = cpi.hba_subdevice; 2878 snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment), 2879 "%s%d", cpi.dev_name, cpi.unit_number); 2880 2881 /* 2882 * Acquire a reference to the periph before we register with GEOM. 2883 * We'll release this reference once GEOM calls us back (via 2884 * dadiskgonecb()) telling us that our provider has been freed. 2885 */ 2886 if (da_periph_acquire(periph, DA_REF_GEOM) != 0) { 2887 xpt_print(periph->path, "%s: lost periph during " 2888 "registration!\n", __func__); 2889 cam_periph_lock(periph); 2890 return (CAM_REQ_CMP_ERR); 2891 } 2892 2893 disk_create(softc->disk, DISK_VERSION); 2894 cam_periph_lock(periph); 2895 2896 /* 2897 * Add async callbacks for events of interest. 2898 * I don't bother checking if this fails as, 2899 * in most cases, the system will function just 2900 * fine without them and the only alternative 2901 * would be to not attach the device on failure. 2902 */ 2903 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | 2904 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | 2905 AC_INQ_CHANGED, daasync, periph, periph->path); 2906 2907 /* 2908 * Emit an attribute changed notification just in case 2909 * physical path information arrived before our async 2910 * event handler was registered, but after anyone attaching 2911 * to our disk device polled it. 2912 */ 2913 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); 2914 2915 /* 2916 * Schedule a periodic media polling events. 2917 */ 2918 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); 2919 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && 2920 (cgd->inq_flags & SID_AEN) == 0 && 2921 da_poll_period != 0) 2922 callout_reset(&softc->mediapoll_c, da_poll_period * hz, 2923 damediapoll, periph); 2924 2925 xpt_schedule(periph, CAM_PRIORITY_DEV); 2926 2927 return(CAM_REQ_CMP); 2928 } 2929 2930 static int 2931 da_zone_bio_to_scsi(int disk_zone_cmd) 2932 { 2933 switch (disk_zone_cmd) { 2934 case DISK_ZONE_OPEN: 2935 return ZBC_OUT_SA_OPEN; 2936 case DISK_ZONE_CLOSE: 2937 return ZBC_OUT_SA_CLOSE; 2938 case DISK_ZONE_FINISH: 2939 return ZBC_OUT_SA_FINISH; 2940 case DISK_ZONE_RWP: 2941 return ZBC_OUT_SA_RWP; 2942 } 2943 2944 return -1; 2945 } 2946 2947 static int 2948 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, 2949 int *queue_ccb) 2950 { 2951 struct da_softc *softc; 2952 int error; 2953 2954 error = 0; 2955 2956 if (bp->bio_cmd != BIO_ZONE) { 2957 error = EINVAL; 2958 goto bailout; 2959 } 2960 2961 softc = periph->softc; 2962 2963 switch (bp->bio_zone.zone_cmd) { 2964 case DISK_ZONE_OPEN: 2965 case DISK_ZONE_CLOSE: 2966 case DISK_ZONE_FINISH: 2967 case DISK_ZONE_RWP: { 2968 int zone_flags; 2969 int zone_sa; 2970 uint64_t lba; 2971 2972 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); 2973 if (zone_sa == -1) { 2974 xpt_print(periph->path, "Cannot translate zone " 2975 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); 2976 error = EINVAL; 2977 goto bailout; 2978 } 2979 2980 zone_flags = 0; 2981 lba = bp->bio_zone.zone_params.rwp.id; 2982 2983 if (bp->bio_zone.zone_params.rwp.flags & 2984 DISK_ZONE_RWP_FLAG_ALL) 2985 zone_flags |= ZBC_OUT_ALL; 2986 2987 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 2988 scsi_zbc_out(&ccb->csio, 2989 /*retries*/ da_retry_count, 2990 /*cbfcnp*/ dadone, 2991 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2992 /*service_action*/ zone_sa, 2993 /*zone_id*/ lba, 2994 /*zone_flags*/ zone_flags, 2995 /*data_ptr*/ NULL, 2996 /*dxfer_len*/ 0, 2997 /*sense_len*/ SSD_FULL_SIZE, 2998 /*timeout*/ da_default_timeout * 1000); 2999 } else { 3000 /* 3001 * Note that in this case, even though we can 3002 * technically use NCQ, we don't bother for several 3003 * reasons: 3004 * 1. It hasn't been tested on a SAT layer that 3005 * supports it. This is new as of SAT-4. 3006 * 2. Even when there is a SAT layer that supports 3007 * it, that SAT layer will also probably support 3008 * ZBC -> ZAC translation, since they are both 3009 * in the SAT-4 spec. 3010 * 3. Translation will likely be preferable to ATA 3011 * passthrough. LSI / Avago at least single 3012 * steps ATA passthrough commands in the HBA, 3013 * regardless of protocol, so unless that 3014 * changes, there is a performance penalty for 3015 * doing ATA passthrough no matter whether 3016 * you're using NCQ/FPDMA, DMA or PIO. 3017 * 4. It requires a 32-byte CDB, which at least at 3018 * this point in CAM requires a CDB pointer, which 3019 * would require us to allocate an additional bit 3020 * of storage separate from the CCB. 3021 */ 3022 error = scsi_ata_zac_mgmt_out(&ccb->csio, 3023 /*retries*/ da_retry_count, 3024 /*cbfcnp*/ dadone, 3025 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3026 /*use_ncq*/ 0, 3027 /*zm_action*/ zone_sa, 3028 /*zone_id*/ lba, 3029 /*zone_flags*/ zone_flags, 3030 /*data_ptr*/ NULL, 3031 /*dxfer_len*/ 0, 3032 /*cdb_storage*/ NULL, 3033 /*cdb_storage_len*/ 0, 3034 /*sense_len*/ SSD_FULL_SIZE, 3035 /*timeout*/ da_default_timeout * 1000); 3036 if (error != 0) { 3037 error = EINVAL; 3038 xpt_print(periph->path, 3039 "scsi_ata_zac_mgmt_out() returned an " 3040 "error!"); 3041 goto bailout; 3042 } 3043 } 3044 *queue_ccb = 1; 3045 3046 break; 3047 } 3048 case DISK_ZONE_REPORT_ZONES: { 3049 uint8_t *rz_ptr; 3050 uint32_t num_entries, alloc_size; 3051 struct disk_zone_report *rep; 3052 3053 rep = &bp->bio_zone.zone_params.report; 3054 3055 num_entries = rep->entries_allocated; 3056 if (num_entries == 0) { 3057 xpt_print(periph->path, "No entries allocated for " 3058 "Report Zones request\n"); 3059 error = EINVAL; 3060 goto bailout; 3061 } 3062 alloc_size = sizeof(struct scsi_report_zones_hdr) + 3063 (sizeof(struct scsi_report_zones_desc) * num_entries); 3064 alloc_size = min(alloc_size, softc->disk->d_maxsize); 3065 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); 3066 if (rz_ptr == NULL) { 3067 xpt_print(periph->path, "Unable to allocate memory " 3068 "for Report Zones request\n"); 3069 error = ENOMEM; 3070 goto bailout; 3071 } 3072 3073 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { 3074 scsi_zbc_in(&ccb->csio, 3075 /*retries*/ da_retry_count, 3076 /*cbcfnp*/ dadone, 3077 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3078 /*service_action*/ ZBC_IN_SA_REPORT_ZONES, 3079 /*zone_start_lba*/ rep->starting_id, 3080 /*zone_options*/ rep->rep_options, 3081 /*data_ptr*/ rz_ptr, 3082 /*dxfer_len*/ alloc_size, 3083 /*sense_len*/ SSD_FULL_SIZE, 3084 /*timeout*/ da_default_timeout * 1000); 3085 } else { 3086 /* 3087 * Note that in this case, even though we can 3088 * technically use NCQ, we don't bother for several 3089 * reasons: 3090 * 1. It hasn't been tested on a SAT layer that 3091 * supports it. This is new as of SAT-4. 3092 * 2. Even when there is a SAT layer that supports 3093 * it, that SAT layer will also probably support 3094 * ZBC -> ZAC translation, since they are both 3095 * in the SAT-4 spec. 3096 * 3. Translation will likely be preferable to ATA 3097 * passthrough. LSI / Avago at least single 3098 * steps ATA passthrough commands in the HBA, 3099 * regardless of protocol, so unless that 3100 * changes, there is a performance penalty for 3101 * doing ATA passthrough no matter whether 3102 * you're using NCQ/FPDMA, DMA or PIO. 3103 * 4. It requires a 32-byte CDB, which at least at 3104 * this point in CAM requires a CDB pointer, which 3105 * would require us to allocate an additional bit 3106 * of storage separate from the CCB. 3107 */ 3108 error = scsi_ata_zac_mgmt_in(&ccb->csio, 3109 /*retries*/ da_retry_count, 3110 /*cbcfnp*/ dadone, 3111 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3112 /*use_ncq*/ 0, 3113 /*zm_action*/ ATA_ZM_REPORT_ZONES, 3114 /*zone_id*/ rep->starting_id, 3115 /*zone_flags*/ rep->rep_options, 3116 /*data_ptr*/ rz_ptr, 3117 /*dxfer_len*/ alloc_size, 3118 /*cdb_storage*/ NULL, 3119 /*cdb_storage_len*/ 0, 3120 /*sense_len*/ SSD_FULL_SIZE, 3121 /*timeout*/ da_default_timeout * 1000); 3122 if (error != 0) { 3123 error = EINVAL; 3124 xpt_print(periph->path, 3125 "scsi_ata_zac_mgmt_in() returned an " 3126 "error!"); 3127 goto bailout; 3128 } 3129 } 3130 3131 /* 3132 * For BIO_ZONE, this isn't normally needed. However, it 3133 * is used by devstat_end_transaction_bio() to determine 3134 * how much data was transferred. 3135 */ 3136 /* 3137 * XXX KDM we have a problem. But I'm not sure how to fix 3138 * it. devstat uses bio_bcount - bio_resid to calculate 3139 * the amount of data transferred. The GEOM disk code 3140 * uses bio_length - bio_resid to calculate the amount of 3141 * data in bio_completed. We have different structure 3142 * sizes above and below the ada(4) driver. So, if we 3143 * use the sizes above, the amount transferred won't be 3144 * quite accurate for devstat. If we use different sizes 3145 * for bio_bcount and bio_length (above and below 3146 * respectively), then the residual needs to match one or 3147 * the other. Everything is calculated after the bio 3148 * leaves the driver, so changing the values around isn't 3149 * really an option. For now, just set the count to the 3150 * passed in length. This means that the calculations 3151 * above (e.g. bio_completed) will be correct, but the 3152 * amount of data reported to devstat will be slightly 3153 * under or overstated. 3154 */ 3155 bp->bio_bcount = bp->bio_length; 3156 3157 *queue_ccb = 1; 3158 3159 break; 3160 } 3161 case DISK_ZONE_GET_PARAMS: { 3162 struct disk_zone_disk_params *params; 3163 3164 params = &bp->bio_zone.zone_params.disk_params; 3165 bzero(params, sizeof(*params)); 3166 3167 switch (softc->zone_mode) { 3168 case DA_ZONE_DRIVE_MANAGED: 3169 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; 3170 break; 3171 case DA_ZONE_HOST_AWARE: 3172 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; 3173 break; 3174 case DA_ZONE_HOST_MANAGED: 3175 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; 3176 break; 3177 default: 3178 case DA_ZONE_NONE: 3179 params->zone_mode = DISK_ZONE_MODE_NONE; 3180 break; 3181 } 3182 3183 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) 3184 params->flags |= DISK_ZONE_DISK_URSWRZ; 3185 3186 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { 3187 params->optimal_seq_zones = softc->optimal_seq_zones; 3188 params->flags |= DISK_ZONE_OPT_SEQ_SET; 3189 } 3190 3191 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { 3192 params->optimal_nonseq_zones = 3193 softc->optimal_nonseq_zones; 3194 params->flags |= DISK_ZONE_OPT_NONSEQ_SET; 3195 } 3196 3197 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { 3198 params->max_seq_zones = softc->max_seq_zones; 3199 params->flags |= DISK_ZONE_MAX_SEQ_SET; 3200 } 3201 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) 3202 params->flags |= DISK_ZONE_RZ_SUP; 3203 3204 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) 3205 params->flags |= DISK_ZONE_OPEN_SUP; 3206 3207 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) 3208 params->flags |= DISK_ZONE_CLOSE_SUP; 3209 3210 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) 3211 params->flags |= DISK_ZONE_FINISH_SUP; 3212 3213 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) 3214 params->flags |= DISK_ZONE_RWP_SUP; 3215 break; 3216 } 3217 default: 3218 break; 3219 } 3220 bailout: 3221 return (error); 3222 } 3223 3224 static void 3225 dastart(struct cam_periph *periph, union ccb *start_ccb) 3226 { 3227 struct da_softc *softc; 3228 3229 cam_periph_assert(periph, MA_OWNED); 3230 softc = (struct da_softc *)periph->softc; 3231 3232 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); 3233 3234 skipstate: 3235 switch (softc->state) { 3236 case DA_STATE_NORMAL: 3237 { 3238 struct bio *bp; 3239 uint8_t tag_code; 3240 3241 more: 3242 bp = cam_iosched_next_bio(softc->cam_iosched); 3243 if (bp == NULL) { 3244 if (cam_iosched_has_work_flags(softc->cam_iosched, 3245 DA_WORK_TUR)) { 3246 softc->flags |= DA_FLAG_TUR_PENDING; 3247 cam_iosched_clr_work_flags(softc->cam_iosched, 3248 DA_WORK_TUR); 3249 scsi_test_unit_ready(&start_ccb->csio, 3250 /*retries*/ da_retry_count, 3251 dadone_tur, 3252 MSG_SIMPLE_Q_TAG, 3253 SSD_FULL_SIZE, 3254 da_default_timeout * 1000); 3255 start_ccb->ccb_h.ccb_bp = NULL; 3256 start_ccb->ccb_h.ccb_state = DA_CCB_TUR; 3257 xpt_action(start_ccb); 3258 } else 3259 xpt_release_ccb(start_ccb); 3260 break; 3261 } 3262 3263 if (bp->bio_cmd == BIO_DELETE) { 3264 if (softc->delete_func != NULL) { 3265 softc->delete_func(periph, start_ccb, bp); 3266 goto out; 3267 } else { 3268 /* 3269 * Not sure this is possible, but failsafe by 3270 * lying and saying "sure, done." 3271 */ 3272 biofinish(bp, NULL, 0); 3273 goto more; 3274 } 3275 } 3276 3277 if (cam_iosched_has_work_flags(softc->cam_iosched, 3278 DA_WORK_TUR)) { 3279 cam_iosched_clr_work_flags(softc->cam_iosched, 3280 DA_WORK_TUR); 3281 da_periph_release_locked(periph, DA_REF_TUR); 3282 } 3283 3284 if ((bp->bio_flags & BIO_ORDERED) != 0 || 3285 (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 3286 softc->flags &= ~DA_FLAG_NEED_OTAG; 3287 softc->flags |= DA_FLAG_WAS_OTAG; 3288 tag_code = MSG_ORDERED_Q_TAG; 3289 } else { 3290 tag_code = MSG_SIMPLE_Q_TAG; 3291 } 3292 3293 switch (bp->bio_cmd) { 3294 case BIO_WRITE: 3295 case BIO_READ: 3296 { 3297 void *data_ptr; 3298 int rw_op; 3299 3300 biotrack(bp, __func__); 3301 3302 if (bp->bio_cmd == BIO_WRITE) { 3303 softc->flags |= DA_FLAG_DIRTY; 3304 rw_op = SCSI_RW_WRITE; 3305 } else { 3306 rw_op = SCSI_RW_READ; 3307 } 3308 3309 data_ptr = bp->bio_data; 3310 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { 3311 rw_op |= SCSI_RW_BIO; 3312 data_ptr = bp; 3313 } 3314 3315 scsi_read_write(&start_ccb->csio, 3316 /*retries*/da_retry_count, 3317 /*cbfcnp*/dadone, 3318 /*tag_action*/tag_code, 3319 rw_op, 3320 /*byte2*/0, 3321 softc->minimum_cmd_size, 3322 /*lba*/bp->bio_pblkno, 3323 /*block_count*/bp->bio_bcount / 3324 softc->params.secsize, 3325 data_ptr, 3326 /*dxfer_len*/ bp->bio_bcount, 3327 /*sense_len*/SSD_FULL_SIZE, 3328 da_default_timeout * 1000); 3329 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 3330 start_ccb->csio.bio = bp; 3331 #endif 3332 break; 3333 } 3334 case BIO_FLUSH: 3335 /* 3336 * If we don't support sync cache, or the disk 3337 * isn't dirty, FLUSH is a no-op. Use the 3338 * allocated CCB for the next bio if one is 3339 * available. 3340 */ 3341 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 || 3342 (softc->flags & DA_FLAG_DIRTY) == 0) { 3343 biodone(bp); 3344 goto skipstate; 3345 } 3346 3347 /* 3348 * BIO_FLUSH doesn't currently communicate 3349 * range data, so we synchronize the cache 3350 * over the whole disk. 3351 */ 3352 scsi_synchronize_cache(&start_ccb->csio, 3353 /*retries*/1, 3354 /*cbfcnp*/dadone, 3355 /*tag_action*/tag_code, 3356 /*begin_lba*/0, 3357 /*lb_count*/0, 3358 SSD_FULL_SIZE, 3359 da_default_timeout*1000); 3360 /* 3361 * Clear the dirty flag before sending the command. 3362 * Either this sync cache will be successful, or it 3363 * will fail after a retry. If it fails, it is 3364 * unlikely to be successful if retried later, so 3365 * we'll save ourselves time by just marking the 3366 * device clean. 3367 */ 3368 softc->flags &= ~DA_FLAG_DIRTY; 3369 break; 3370 case BIO_ZONE: { 3371 int error, queue_ccb; 3372 3373 queue_ccb = 0; 3374 3375 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); 3376 if ((error != 0) 3377 || (queue_ccb == 0)) { 3378 biofinish(bp, NULL, error); 3379 xpt_release_ccb(start_ccb); 3380 return; 3381 } 3382 break; 3383 } 3384 } 3385 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 3386 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 3387 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); 3388 3389 out: 3390 LIST_INSERT_HEAD(&softc->pending_ccbs, 3391 &start_ccb->ccb_h, periph_links.le); 3392 3393 /* We expect a unit attention from this device */ 3394 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 3395 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 3396 softc->flags &= ~DA_FLAG_RETRY_UA; 3397 } 3398 3399 start_ccb->ccb_h.ccb_bp = bp; 3400 softc->refcount++; 3401 cam_periph_unlock(periph); 3402 xpt_action(start_ccb); 3403 cam_periph_lock(periph); 3404 3405 /* May have more work to do, so ensure we stay scheduled */ 3406 daschedule(periph); 3407 break; 3408 } 3409 case DA_STATE_PROBE_WP: 3410 { 3411 void *mode_buf; 3412 int mode_buf_len; 3413 3414 if (da_disable_wp_detection) { 3415 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 3416 softc->state = DA_STATE_PROBE_RC16; 3417 else 3418 softc->state = DA_STATE_PROBE_RC; 3419 goto skipstate; 3420 } 3421 mode_buf_len = 192; 3422 mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT); 3423 if (mode_buf == NULL) { 3424 xpt_print(periph->path, "Unable to send mode sense - " 3425 "malloc failure\n"); 3426 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 3427 softc->state = DA_STATE_PROBE_RC16; 3428 else 3429 softc->state = DA_STATE_PROBE_RC; 3430 goto skipstate; 3431 } 3432 scsi_mode_sense_len(&start_ccb->csio, 3433 /*retries*/ da_retry_count, 3434 /*cbfcnp*/ dadone_probewp, 3435 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3436 /*dbd*/ FALSE, 3437 /*pc*/ SMS_PAGE_CTRL_CURRENT, 3438 /*page*/ SMS_ALL_PAGES_PAGE, 3439 /*param_buf*/ mode_buf, 3440 /*param_len*/ mode_buf_len, 3441 /*minimum_cmd_size*/ softc->minimum_cmd_size, 3442 /*sense_len*/ SSD_FULL_SIZE, 3443 /*timeout*/ da_default_timeout * 1000); 3444 start_ccb->ccb_h.ccb_bp = NULL; 3445 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP; 3446 xpt_action(start_ccb); 3447 break; 3448 } 3449 case DA_STATE_PROBE_RC: 3450 { 3451 struct scsi_read_capacity_data *rcap; 3452 3453 rcap = (struct scsi_read_capacity_data *) 3454 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); 3455 if (rcap == NULL) { 3456 printf("dastart: Couldn't malloc read_capacity data\n"); 3457 /* da_free_periph??? */ 3458 break; 3459 } 3460 scsi_read_capacity(&start_ccb->csio, 3461 /*retries*/da_retry_count, 3462 dadone_proberc, 3463 MSG_SIMPLE_Q_TAG, 3464 rcap, 3465 SSD_FULL_SIZE, 3466 /*timeout*/5000); 3467 start_ccb->ccb_h.ccb_bp = NULL; 3468 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; 3469 xpt_action(start_ccb); 3470 break; 3471 } 3472 case DA_STATE_PROBE_RC16: 3473 { 3474 struct scsi_read_capacity_data_long *rcaplong; 3475 3476 rcaplong = (struct scsi_read_capacity_data_long *) 3477 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); 3478 if (rcaplong == NULL) { 3479 printf("dastart: Couldn't malloc read_capacity data\n"); 3480 /* da_free_periph??? */ 3481 break; 3482 } 3483 scsi_read_capacity_16(&start_ccb->csio, 3484 /*retries*/ da_retry_count, 3485 /*cbfcnp*/ dadone_proberc, 3486 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3487 /*lba*/ 0, 3488 /*reladr*/ 0, 3489 /*pmi*/ 0, 3490 /*rcap_buf*/ (uint8_t *)rcaplong, 3491 /*rcap_buf_len*/ sizeof(*rcaplong), 3492 /*sense_len*/ SSD_FULL_SIZE, 3493 /*timeout*/ da_default_timeout * 1000); 3494 start_ccb->ccb_h.ccb_bp = NULL; 3495 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; 3496 xpt_action(start_ccb); 3497 break; 3498 } 3499 case DA_STATE_PROBE_LBP: 3500 { 3501 struct scsi_vpd_logical_block_prov *lbp; 3502 3503 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { 3504 /* 3505 * If we get here we don't support any SBC-3 delete 3506 * methods with UNMAP as the Logical Block Provisioning 3507 * VPD page support is required for devices which 3508 * support it according to T10/1799-D Revision 31 3509 * however older revisions of the spec don't mandate 3510 * this so we currently don't remove these methods 3511 * from the available set. 3512 */ 3513 softc->state = DA_STATE_PROBE_BLK_LIMITS; 3514 goto skipstate; 3515 } 3516 3517 lbp = (struct scsi_vpd_logical_block_prov *) 3518 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); 3519 3520 if (lbp == NULL) { 3521 printf("dastart: Couldn't malloc lbp data\n"); 3522 /* da_free_periph??? */ 3523 break; 3524 } 3525 3526 scsi_inquiry(&start_ccb->csio, 3527 /*retries*/da_retry_count, 3528 /*cbfcnp*/dadone_probelbp, 3529 /*tag_action*/MSG_SIMPLE_Q_TAG, 3530 /*inq_buf*/(u_int8_t *)lbp, 3531 /*inq_len*/sizeof(*lbp), 3532 /*evpd*/TRUE, 3533 /*page_code*/SVPD_LBP, 3534 /*sense_len*/SSD_MIN_SIZE, 3535 /*timeout*/da_default_timeout * 1000); 3536 start_ccb->ccb_h.ccb_bp = NULL; 3537 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; 3538 xpt_action(start_ccb); 3539 break; 3540 } 3541 case DA_STATE_PROBE_BLK_LIMITS: 3542 { 3543 struct scsi_vpd_block_limits *block_limits; 3544 3545 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { 3546 /* Not supported skip to next probe */ 3547 softc->state = DA_STATE_PROBE_BDC; 3548 goto skipstate; 3549 } 3550 3551 block_limits = (struct scsi_vpd_block_limits *) 3552 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); 3553 3554 if (block_limits == NULL) { 3555 printf("dastart: Couldn't malloc block_limits data\n"); 3556 /* da_free_periph??? */ 3557 break; 3558 } 3559 3560 scsi_inquiry(&start_ccb->csio, 3561 /*retries*/da_retry_count, 3562 /*cbfcnp*/dadone_probeblklimits, 3563 /*tag_action*/MSG_SIMPLE_Q_TAG, 3564 /*inq_buf*/(u_int8_t *)block_limits, 3565 /*inq_len*/sizeof(*block_limits), 3566 /*evpd*/TRUE, 3567 /*page_code*/SVPD_BLOCK_LIMITS, 3568 /*sense_len*/SSD_MIN_SIZE, 3569 /*timeout*/da_default_timeout * 1000); 3570 start_ccb->ccb_h.ccb_bp = NULL; 3571 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; 3572 xpt_action(start_ccb); 3573 break; 3574 } 3575 case DA_STATE_PROBE_BDC: 3576 { 3577 struct scsi_vpd_block_characteristics *bdc; 3578 3579 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { 3580 softc->state = DA_STATE_PROBE_ATA; 3581 goto skipstate; 3582 } 3583 3584 bdc = (struct scsi_vpd_block_characteristics *) 3585 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3586 3587 if (bdc == NULL) { 3588 printf("dastart: Couldn't malloc bdc data\n"); 3589 /* da_free_periph??? */ 3590 break; 3591 } 3592 3593 scsi_inquiry(&start_ccb->csio, 3594 /*retries*/da_retry_count, 3595 /*cbfcnp*/dadone_probebdc, 3596 /*tag_action*/MSG_SIMPLE_Q_TAG, 3597 /*inq_buf*/(u_int8_t *)bdc, 3598 /*inq_len*/sizeof(*bdc), 3599 /*evpd*/TRUE, 3600 /*page_code*/SVPD_BDC, 3601 /*sense_len*/SSD_MIN_SIZE, 3602 /*timeout*/da_default_timeout * 1000); 3603 start_ccb->ccb_h.ccb_bp = NULL; 3604 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; 3605 xpt_action(start_ccb); 3606 break; 3607 } 3608 case DA_STATE_PROBE_ATA: 3609 { 3610 struct ata_params *ata_params; 3611 3612 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { 3613 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 3614 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 3615 /* 3616 * Note that if the ATA VPD page isn't 3617 * supported, we aren't talking to an ATA 3618 * device anyway. Support for that VPD 3619 * page is mandatory for SCSI to ATA (SAT) 3620 * translation layers. 3621 */ 3622 softc->state = DA_STATE_PROBE_ZONE; 3623 goto skipstate; 3624 } 3625 daprobedone(periph, start_ccb); 3626 break; 3627 } 3628 3629 ata_params = &periph->path->device->ident_data; 3630 3631 scsi_ata_identify(&start_ccb->csio, 3632 /*retries*/da_retry_count, 3633 /*cbfcnp*/dadone_probeata, 3634 /*tag_action*/MSG_SIMPLE_Q_TAG, 3635 /*data_ptr*/(u_int8_t *)ata_params, 3636 /*dxfer_len*/sizeof(*ata_params), 3637 /*sense_len*/SSD_FULL_SIZE, 3638 /*timeout*/da_default_timeout * 1000); 3639 start_ccb->ccb_h.ccb_bp = NULL; 3640 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; 3641 xpt_action(start_ccb); 3642 break; 3643 } 3644 case DA_STATE_PROBE_ATA_LOGDIR: 3645 { 3646 struct ata_gp_log_dir *log_dir; 3647 int retval; 3648 3649 retval = 0; 3650 3651 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { 3652 /* 3653 * If we don't have log support, not much point in 3654 * trying to probe zone support. 3655 */ 3656 daprobedone(periph, start_ccb); 3657 break; 3658 } 3659 3660 /* 3661 * If we have an ATA device (the SCSI ATA Information VPD 3662 * page should be present and the ATA identify should have 3663 * succeeded) and it supports logs, ask for the log directory. 3664 */ 3665 3666 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); 3667 if (log_dir == NULL) { 3668 xpt_print(periph->path, "Couldn't malloc log_dir " 3669 "data\n"); 3670 daprobedone(periph, start_ccb); 3671 break; 3672 } 3673 3674 retval = scsi_ata_read_log(&start_ccb->csio, 3675 /*retries*/ da_retry_count, 3676 /*cbfcnp*/ dadone_probeatalogdir, 3677 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3678 /*log_address*/ ATA_LOG_DIRECTORY, 3679 /*page_number*/ 0, 3680 /*block_count*/ 1, 3681 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3682 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3683 /*data_ptr*/ (uint8_t *)log_dir, 3684 /*dxfer_len*/ sizeof(*log_dir), 3685 /*sense_len*/ SSD_FULL_SIZE, 3686 /*timeout*/ da_default_timeout * 1000); 3687 3688 if (retval != 0) { 3689 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3690 free(log_dir, M_SCSIDA); 3691 daprobedone(periph, start_ccb); 3692 break; 3693 } 3694 start_ccb->ccb_h.ccb_bp = NULL; 3695 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; 3696 xpt_action(start_ccb); 3697 break; 3698 } 3699 case DA_STATE_PROBE_ATA_IDDIR: 3700 { 3701 struct ata_identify_log_pages *id_dir; 3702 int retval; 3703 3704 retval = 0; 3705 3706 /* 3707 * Check here to see whether the Identify Device log is 3708 * supported in the directory of logs. If so, continue 3709 * with requesting the log of identify device pages. 3710 */ 3711 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { 3712 daprobedone(periph, start_ccb); 3713 break; 3714 } 3715 3716 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); 3717 if (id_dir == NULL) { 3718 xpt_print(periph->path, "Couldn't malloc id_dir " 3719 "data\n"); 3720 daprobedone(periph, start_ccb); 3721 break; 3722 } 3723 3724 retval = scsi_ata_read_log(&start_ccb->csio, 3725 /*retries*/ da_retry_count, 3726 /*cbfcnp*/ dadone_probeataiddir, 3727 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3728 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3729 /*page_number*/ ATA_IDL_PAGE_LIST, 3730 /*block_count*/ 1, 3731 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3732 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3733 /*data_ptr*/ (uint8_t *)id_dir, 3734 /*dxfer_len*/ sizeof(*id_dir), 3735 /*sense_len*/ SSD_FULL_SIZE, 3736 /*timeout*/ da_default_timeout * 1000); 3737 3738 if (retval != 0) { 3739 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3740 free(id_dir, M_SCSIDA); 3741 daprobedone(periph, start_ccb); 3742 break; 3743 } 3744 start_ccb->ccb_h.ccb_bp = NULL; 3745 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; 3746 xpt_action(start_ccb); 3747 break; 3748 } 3749 case DA_STATE_PROBE_ATA_SUP: 3750 { 3751 struct ata_identify_log_sup_cap *sup_cap; 3752 int retval; 3753 3754 retval = 0; 3755 3756 /* 3757 * Check here to see whether the Supported Capabilities log 3758 * is in the list of Identify Device logs. 3759 */ 3760 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { 3761 daprobedone(periph, start_ccb); 3762 break; 3763 } 3764 3765 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); 3766 if (sup_cap == NULL) { 3767 xpt_print(periph->path, "Couldn't malloc sup_cap " 3768 "data\n"); 3769 daprobedone(periph, start_ccb); 3770 break; 3771 } 3772 3773 retval = scsi_ata_read_log(&start_ccb->csio, 3774 /*retries*/ da_retry_count, 3775 /*cbfcnp*/ dadone_probeatasup, 3776 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3777 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3778 /*page_number*/ ATA_IDL_SUP_CAP, 3779 /*block_count*/ 1, 3780 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3781 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3782 /*data_ptr*/ (uint8_t *)sup_cap, 3783 /*dxfer_len*/ sizeof(*sup_cap), 3784 /*sense_len*/ SSD_FULL_SIZE, 3785 /*timeout*/ da_default_timeout * 1000); 3786 3787 if (retval != 0) { 3788 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3789 free(sup_cap, M_SCSIDA); 3790 daprobedone(periph, start_ccb); 3791 break; 3792 3793 } 3794 3795 start_ccb->ccb_h.ccb_bp = NULL; 3796 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; 3797 xpt_action(start_ccb); 3798 break; 3799 } 3800 case DA_STATE_PROBE_ATA_ZONE: 3801 { 3802 struct ata_zoned_info_log *ata_zone; 3803 int retval; 3804 3805 retval = 0; 3806 3807 /* 3808 * Check here to see whether the zoned device information 3809 * page is supported. If so, continue on to request it. 3810 * If not, skip to DA_STATE_PROBE_LOG or done. 3811 */ 3812 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { 3813 daprobedone(periph, start_ccb); 3814 break; 3815 } 3816 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, 3817 M_NOWAIT|M_ZERO); 3818 if (ata_zone == NULL) { 3819 xpt_print(periph->path, "Couldn't malloc ata_zone " 3820 "data\n"); 3821 daprobedone(periph, start_ccb); 3822 break; 3823 } 3824 3825 retval = scsi_ata_read_log(&start_ccb->csio, 3826 /*retries*/ da_retry_count, 3827 /*cbfcnp*/ dadone_probeatazone, 3828 /*tag_action*/ MSG_SIMPLE_Q_TAG, 3829 /*log_address*/ ATA_IDENTIFY_DATA_LOG, 3830 /*page_number*/ ATA_IDL_ZDI, 3831 /*block_count*/ 1, 3832 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? 3833 AP_PROTO_DMA : AP_PROTO_PIO_IN, 3834 /*data_ptr*/ (uint8_t *)ata_zone, 3835 /*dxfer_len*/ sizeof(*ata_zone), 3836 /*sense_len*/ SSD_FULL_SIZE, 3837 /*timeout*/ da_default_timeout * 1000); 3838 3839 if (retval != 0) { 3840 xpt_print(periph->path, "scsi_ata_read_log() failed!"); 3841 free(ata_zone, M_SCSIDA); 3842 daprobedone(periph, start_ccb); 3843 break; 3844 } 3845 start_ccb->ccb_h.ccb_bp = NULL; 3846 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; 3847 xpt_action(start_ccb); 3848 3849 break; 3850 } 3851 case DA_STATE_PROBE_ZONE: 3852 { 3853 struct scsi_vpd_zoned_bdc *bdc; 3854 3855 /* 3856 * Note that this page will be supported for SCSI protocol 3857 * devices that support ZBC (SMR devices), as well as ATA 3858 * protocol devices that are behind a SAT (SCSI to ATA 3859 * Translation) layer that supports converting ZBC commands 3860 * to their ZAC equivalents. 3861 */ 3862 if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { 3863 daprobedone(periph, start_ccb); 3864 break; 3865 } 3866 bdc = (struct scsi_vpd_zoned_bdc *) 3867 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); 3868 3869 if (bdc == NULL) { 3870 xpt_release_ccb(start_ccb); 3871 xpt_print(periph->path, "Couldn't malloc zone VPD " 3872 "data\n"); 3873 break; 3874 } 3875 scsi_inquiry(&start_ccb->csio, 3876 /*retries*/da_retry_count, 3877 /*cbfcnp*/dadone_probezone, 3878 /*tag_action*/MSG_SIMPLE_Q_TAG, 3879 /*inq_buf*/(u_int8_t *)bdc, 3880 /*inq_len*/sizeof(*bdc), 3881 /*evpd*/TRUE, 3882 /*page_code*/SVPD_ZONED_BDC, 3883 /*sense_len*/SSD_FULL_SIZE, 3884 /*timeout*/da_default_timeout * 1000); 3885 start_ccb->ccb_h.ccb_bp = NULL; 3886 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; 3887 xpt_action(start_ccb); 3888 break; 3889 } 3890 } 3891 } 3892 3893 /* 3894 * In each of the methods below, while its the caller's 3895 * responsibility to ensure the request will fit into a 3896 * single device request, we might have changed the delete 3897 * method due to the device incorrectly advertising either 3898 * its supported methods or limits. 3899 * 3900 * To prevent this causing further issues we validate the 3901 * against the methods limits, and warn which would 3902 * otherwise be unnecessary. 3903 */ 3904 static void 3905 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 3906 { 3907 struct da_softc *softc = (struct da_softc *)periph->softc;; 3908 struct bio *bp1; 3909 uint8_t *buf = softc->unmap_buf; 3910 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE]; 3911 uint64_t lba, lastlba = (uint64_t)-1; 3912 uint64_t totalcount = 0; 3913 uint64_t count; 3914 uint32_t c, lastcount = 0, ranges = 0; 3915 3916 /* 3917 * Currently this doesn't take the UNMAP 3918 * Granularity and Granularity Alignment 3919 * fields into account. 3920 * 3921 * This could result in both unoptimal unmap 3922 * requests as as well as UNMAP calls unmapping 3923 * fewer LBA's than requested. 3924 */ 3925 3926 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 3927 bp1 = bp; 3928 do { 3929 /* 3930 * Note: ada and da are different in how they store the 3931 * pending bp's in a trim. ada stores all of them in the 3932 * trim_req.bps. da stores all but the first one in the 3933 * delete_run_queue. ada then completes all the bps in 3934 * its adadone() loop. da completes all the bps in the 3935 * delete_run_queue in dadone, and relies on the biodone 3936 * after to complete. This should be reconciled since there's 3937 * no real reason to do it differently. XXX 3938 */ 3939 if (bp1 != bp) 3940 bioq_insert_tail(&softc->delete_run_queue, bp1); 3941 lba = bp1->bio_pblkno; 3942 count = bp1->bio_bcount / softc->params.secsize; 3943 3944 /* Try to extend the previous range. */ 3945 if (lba == lastlba) { 3946 c = omin(count, UNMAP_RANGE_MAX - lastcount); 3947 lastlba += c; 3948 lastcount += c; 3949 scsi_ulto4b(lastcount, d[ranges - 1].length); 3950 count -= c; 3951 lba += c; 3952 totalcount += c; 3953 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) && 3954 softc->unmap_gran != 0) { 3955 /* Align length of the previous range. */ 3956 if ((c = lastcount % softc->unmap_gran) != 0) { 3957 if (lastcount <= c) { 3958 totalcount -= lastcount; 3959 lastlba = (uint64_t)-1; 3960 lastcount = 0; 3961 ranges--; 3962 } else { 3963 totalcount -= c; 3964 lastlba -= c; 3965 lastcount -= c; 3966 scsi_ulto4b(lastcount, 3967 d[ranges - 1].length); 3968 } 3969 } 3970 /* Align beginning of the new range. */ 3971 c = (lba - softc->unmap_gran_align) % softc->unmap_gran; 3972 if (c != 0) { 3973 c = softc->unmap_gran - c; 3974 if (count <= c) { 3975 count = 0; 3976 } else { 3977 lba += c; 3978 count -= c; 3979 } 3980 } 3981 } 3982 3983 while (count > 0) { 3984 c = omin(count, UNMAP_RANGE_MAX); 3985 if (totalcount + c > softc->unmap_max_lba || 3986 ranges >= softc->unmap_max_ranges) { 3987 xpt_print(periph->path, 3988 "%s issuing short delete %ld > %ld" 3989 "|| %d >= %d", 3990 da_delete_method_desc[softc->delete_method], 3991 totalcount + c, softc->unmap_max_lba, 3992 ranges, softc->unmap_max_ranges); 3993 break; 3994 } 3995 scsi_u64to8b(lba, d[ranges].lba); 3996 scsi_ulto4b(c, d[ranges].length); 3997 lba += c; 3998 totalcount += c; 3999 ranges++; 4000 count -= c; 4001 lastlba = lba; 4002 lastcount = c; 4003 } 4004 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4005 if (bp1 == NULL) 4006 break; 4007 if (ranges >= softc->unmap_max_ranges || 4008 totalcount + bp1->bio_bcount / 4009 softc->params.secsize > softc->unmap_max_lba) { 4010 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4011 break; 4012 } 4013 } while (1); 4014 4015 /* Align length of the last range. */ 4016 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 && 4017 (c = lastcount % softc->unmap_gran) != 0) { 4018 if (lastcount <= c) 4019 ranges--; 4020 else 4021 scsi_ulto4b(lastcount - c, d[ranges - 1].length); 4022 } 4023 4024 scsi_ulto2b(ranges * 16 + 6, &buf[0]); 4025 scsi_ulto2b(ranges * 16, &buf[2]); 4026 4027 scsi_unmap(&ccb->csio, 4028 /*retries*/da_retry_count, 4029 /*cbfcnp*/dadone, 4030 /*tag_action*/MSG_SIMPLE_Q_TAG, 4031 /*byte2*/0, 4032 /*data_ptr*/ buf, 4033 /*dxfer_len*/ ranges * 16 + 8, 4034 /*sense_len*/SSD_FULL_SIZE, 4035 da_default_timeout * 1000); 4036 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4037 ccb->ccb_h.flags |= CAM_UNLOCKED; 4038 softc->trim_count++; 4039 softc->trim_ranges += ranges; 4040 softc->trim_lbas += totalcount; 4041 cam_iosched_submit_trim(softc->cam_iosched); 4042 } 4043 4044 static void 4045 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 4046 { 4047 struct da_softc *softc = (struct da_softc *)periph->softc; 4048 struct bio *bp1; 4049 uint8_t *buf = softc->unmap_buf; 4050 uint64_t lastlba = (uint64_t)-1; 4051 uint64_t count; 4052 uint64_t lba; 4053 uint32_t lastcount = 0, c, requestcount; 4054 int ranges = 0, off, block_count; 4055 4056 bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); 4057 bp1 = bp; 4058 do { 4059 if (bp1 != bp)//XXX imp XXX 4060 bioq_insert_tail(&softc->delete_run_queue, bp1); 4061 lba = bp1->bio_pblkno; 4062 count = bp1->bio_bcount / softc->params.secsize; 4063 requestcount = count; 4064 4065 /* Try to extend the previous range. */ 4066 if (lba == lastlba) { 4067 c = omin(count, ATA_DSM_RANGE_MAX - lastcount); 4068 lastcount += c; 4069 off = (ranges - 1) * 8; 4070 buf[off + 6] = lastcount & 0xff; 4071 buf[off + 7] = (lastcount >> 8) & 0xff; 4072 count -= c; 4073 lba += c; 4074 } 4075 4076 while (count > 0) { 4077 c = omin(count, ATA_DSM_RANGE_MAX); 4078 off = ranges * 8; 4079 4080 buf[off + 0] = lba & 0xff; 4081 buf[off + 1] = (lba >> 8) & 0xff; 4082 buf[off + 2] = (lba >> 16) & 0xff; 4083 buf[off + 3] = (lba >> 24) & 0xff; 4084 buf[off + 4] = (lba >> 32) & 0xff; 4085 buf[off + 5] = (lba >> 40) & 0xff; 4086 buf[off + 6] = c & 0xff; 4087 buf[off + 7] = (c >> 8) & 0xff; 4088 lba += c; 4089 ranges++; 4090 count -= c; 4091 lastcount = c; 4092 if (count != 0 && ranges == softc->trim_max_ranges) { 4093 xpt_print(periph->path, 4094 "%s issuing short delete %ld > %ld\n", 4095 da_delete_method_desc[softc->delete_method], 4096 requestcount, 4097 (softc->trim_max_ranges - ranges) * 4098 ATA_DSM_RANGE_MAX); 4099 break; 4100 } 4101 } 4102 lastlba = lba; 4103 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4104 if (bp1 == NULL) 4105 break; 4106 if (bp1->bio_bcount / softc->params.secsize > 4107 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { 4108 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4109 break; 4110 } 4111 } while (1); 4112 4113 block_count = howmany(ranges, ATA_DSM_BLK_RANGES); 4114 scsi_ata_trim(&ccb->csio, 4115 /*retries*/da_retry_count, 4116 /*cbfcnp*/dadone, 4117 /*tag_action*/MSG_SIMPLE_Q_TAG, 4118 block_count, 4119 /*data_ptr*/buf, 4120 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, 4121 /*sense_len*/SSD_FULL_SIZE, 4122 da_default_timeout * 1000); 4123 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4124 ccb->ccb_h.flags |= CAM_UNLOCKED; 4125 cam_iosched_submit_trim(softc->cam_iosched); 4126 } 4127 4128 /* 4129 * We calculate ws_max_blks here based off d_delmaxsize instead 4130 * of using softc->ws_max_blks as it is absolute max for the 4131 * device not the protocol max which may well be lower. 4132 */ 4133 static void 4134 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) 4135 { 4136 struct da_softc *softc; 4137 struct bio *bp1; 4138 uint64_t ws_max_blks; 4139 uint64_t lba; 4140 uint64_t count; /* forward compat with WS32 */ 4141 4142 softc = (struct da_softc *)periph->softc; 4143 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; 4144 lba = bp->bio_pblkno; 4145 count = 0; 4146 bp1 = bp; 4147 do { 4148 if (bp1 != bp)//XXX imp XXX 4149 bioq_insert_tail(&softc->delete_run_queue, bp1); 4150 count += bp1->bio_bcount / softc->params.secsize; 4151 if (count > ws_max_blks) { 4152 xpt_print(periph->path, 4153 "%s issuing short delete %ld > %ld\n", 4154 da_delete_method_desc[softc->delete_method], 4155 count, ws_max_blks); 4156 count = omin(count, ws_max_blks); 4157 break; 4158 } 4159 bp1 = cam_iosched_next_trim(softc->cam_iosched); 4160 if (bp1 == NULL) 4161 break; 4162 if (lba + count != bp1->bio_pblkno || 4163 count + bp1->bio_bcount / 4164 softc->params.secsize > ws_max_blks) { 4165 cam_iosched_put_back_trim(softc->cam_iosched, bp1); 4166 break; 4167 } 4168 } while (1); 4169 4170 scsi_write_same(&ccb->csio, 4171 /*retries*/da_retry_count, 4172 /*cbfcnp*/dadone, 4173 /*tag_action*/MSG_SIMPLE_Q_TAG, 4174 /*byte2*/softc->delete_method == 4175 DA_DELETE_ZERO ? 0 : SWS_UNMAP, 4176 softc->delete_method == DA_DELETE_WS16 ? 16 : 10, 4177 /*lba*/lba, 4178 /*block_count*/count, 4179 /*data_ptr*/ __DECONST(void *, zero_region), 4180 /*dxfer_len*/ softc->params.secsize, 4181 /*sense_len*/SSD_FULL_SIZE, 4182 da_default_timeout * 1000); 4183 ccb->ccb_h.ccb_state = DA_CCB_DELETE; 4184 ccb->ccb_h.flags |= CAM_UNLOCKED; 4185 cam_iosched_submit_trim(softc->cam_iosched); 4186 } 4187 4188 static int 4189 cmd6workaround(union ccb *ccb) 4190 { 4191 struct scsi_rw_6 cmd6; 4192 struct scsi_rw_10 *cmd10; 4193 struct da_softc *softc; 4194 u_int8_t *cdb; 4195 struct bio *bp; 4196 int frozen; 4197 4198 cdb = ccb->csio.cdb_io.cdb_bytes; 4199 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; 4200 4201 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { 4202 da_delete_methods old_method = softc->delete_method; 4203 4204 /* 4205 * Typically there are two reasons for failure here 4206 * 1. Delete method was detected as supported but isn't 4207 * 2. Delete failed due to invalid params e.g. too big 4208 * 4209 * While we will attempt to choose an alternative delete method 4210 * this may result in short deletes if the existing delete 4211 * requests from geom are big for the new method chosen. 4212 * 4213 * This method assumes that the error which triggered this 4214 * will not retry the io otherwise a panic will occur 4215 */ 4216 dadeleteflag(softc, old_method, 0); 4217 dadeletemethodchoose(softc, DA_DELETE_DISABLE); 4218 if (softc->delete_method == DA_DELETE_DISABLE) 4219 xpt_print(ccb->ccb_h.path, 4220 "%s failed, disabling BIO_DELETE\n", 4221 da_delete_method_desc[old_method]); 4222 else 4223 xpt_print(ccb->ccb_h.path, 4224 "%s failed, switching to %s BIO_DELETE\n", 4225 da_delete_method_desc[old_method], 4226 da_delete_method_desc[softc->delete_method]); 4227 4228 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) 4229 cam_iosched_queue_work(softc->cam_iosched, bp); 4230 cam_iosched_queue_work(softc->cam_iosched, 4231 (struct bio *)ccb->ccb_h.ccb_bp); 4232 ccb->ccb_h.ccb_bp = NULL; 4233 return (0); 4234 } 4235 4236 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ 4237 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 4238 (*cdb == PREVENT_ALLOW) && 4239 (softc->quirks & DA_Q_NO_PREVENT) == 0) { 4240 if (bootverbose) 4241 xpt_print(ccb->ccb_h.path, 4242 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); 4243 softc->quirks |= DA_Q_NO_PREVENT; 4244 return (0); 4245 } 4246 4247 /* Detect unsupported SYNCHRONIZE CACHE(10). */ 4248 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && 4249 (*cdb == SYNCHRONIZE_CACHE) && 4250 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 4251 if (bootverbose) 4252 xpt_print(ccb->ccb_h.path, 4253 "SYNCHRONIZE CACHE(10) not supported.\n"); 4254 softc->quirks |= DA_Q_NO_SYNC_CACHE; 4255 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; 4256 return (0); 4257 } 4258 4259 /* Translation only possible if CDB is an array and cmd is R/W6 */ 4260 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || 4261 (*cdb != READ_6 && *cdb != WRITE_6)) 4262 return 0; 4263 4264 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " 4265 "increasing minimum_cmd_size to 10.\n"); 4266 softc->minimum_cmd_size = 10; 4267 4268 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); 4269 cmd10 = (struct scsi_rw_10 *)cdb; 4270 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; 4271 cmd10->byte2 = 0; 4272 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); 4273 cmd10->reserved = 0; 4274 scsi_ulto2b(cmd6.length, cmd10->length); 4275 cmd10->control = cmd6.control; 4276 ccb->csio.cdb_len = sizeof(*cmd10); 4277 4278 /* Requeue request, unfreezing queue if necessary */ 4279 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 4280 ccb->ccb_h.status = CAM_REQUEUE_REQ; 4281 xpt_action(ccb); 4282 if (frozen) { 4283 cam_release_devq(ccb->ccb_h.path, 4284 /*relsim_flags*/0, 4285 /*reduction*/0, 4286 /*timeout*/0, 4287 /*getcount_only*/0); 4288 } 4289 return (ERESTART); 4290 } 4291 4292 static void 4293 dazonedone(struct cam_periph *periph, union ccb *ccb) 4294 { 4295 struct da_softc *softc; 4296 struct bio *bp; 4297 4298 softc = periph->softc; 4299 bp = (struct bio *)ccb->ccb_h.ccb_bp; 4300 4301 switch (bp->bio_zone.zone_cmd) { 4302 case DISK_ZONE_OPEN: 4303 case DISK_ZONE_CLOSE: 4304 case DISK_ZONE_FINISH: 4305 case DISK_ZONE_RWP: 4306 break; 4307 case DISK_ZONE_REPORT_ZONES: { 4308 uint32_t avail_len; 4309 struct disk_zone_report *rep; 4310 struct scsi_report_zones_hdr *hdr; 4311 struct scsi_report_zones_desc *desc; 4312 struct disk_zone_rep_entry *entry; 4313 uint32_t hdr_len, num_avail; 4314 uint32_t num_to_fill, i; 4315 int ata; 4316 4317 rep = &bp->bio_zone.zone_params.report; 4318 avail_len = ccb->csio.dxfer_len - ccb->csio.resid; 4319 /* 4320 * Note that bio_resid isn't normally used for zone 4321 * commands, but it is used by devstat_end_transaction_bio() 4322 * to determine how much data was transferred. Because 4323 * the size of the SCSI/ATA data structures is different 4324 * than the size of the BIO interface structures, the 4325 * amount of data actually transferred from the drive will 4326 * be different than the amount of data transferred to 4327 * the user. 4328 */ 4329 bp->bio_resid = ccb->csio.resid; 4330 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; 4331 if (avail_len < sizeof(*hdr)) { 4332 /* 4333 * Is there a better error than EIO here? We asked 4334 * for at least the header, and we got less than 4335 * that. 4336 */ 4337 bp->bio_error = EIO; 4338 bp->bio_flags |= BIO_ERROR; 4339 bp->bio_resid = bp->bio_bcount; 4340 break; 4341 } 4342 4343 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) 4344 ata = 1; 4345 else 4346 ata = 0; 4347 4348 hdr_len = ata ? le32dec(hdr->length) : 4349 scsi_4btoul(hdr->length); 4350 if (hdr_len > 0) 4351 rep->entries_available = hdr_len / sizeof(*desc); 4352 else 4353 rep->entries_available = 0; 4354 /* 4355 * NOTE: using the same values for the BIO version of the 4356 * same field as the SCSI/ATA values. This means we could 4357 * get some additional values that aren't defined in bio.h 4358 * if more values of the same field are defined later. 4359 */ 4360 rep->header.same = hdr->byte4 & SRZ_SAME_MASK; 4361 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : 4362 scsi_8btou64(hdr->maximum_lba); 4363 /* 4364 * If the drive reports no entries that match the query, 4365 * we're done. 4366 */ 4367 if (hdr_len == 0) { 4368 rep->entries_filled = 0; 4369 break; 4370 } 4371 4372 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), 4373 hdr_len / sizeof(*desc)); 4374 /* 4375 * If the drive didn't return any data, then we're done. 4376 */ 4377 if (num_avail == 0) { 4378 rep->entries_filled = 0; 4379 break; 4380 } 4381 4382 num_to_fill = min(num_avail, rep->entries_allocated); 4383 /* 4384 * If the user didn't allocate any entries for us to fill, 4385 * we're done. 4386 */ 4387 if (num_to_fill == 0) { 4388 rep->entries_filled = 0; 4389 break; 4390 } 4391 4392 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; 4393 i < num_to_fill; i++, desc++, entry++) { 4394 /* 4395 * NOTE: we're mapping the values here directly 4396 * from the SCSI/ATA bit definitions to the bio.h 4397 * definitons. There is also a warning in 4398 * disk_zone.h, but the impact is that if 4399 * additional values are added in the SCSI/ATA 4400 * specs these will be visible to consumers of 4401 * this interface. 4402 */ 4403 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; 4404 entry->zone_condition = 4405 (desc->zone_flags & SRZ_ZONE_COND_MASK) >> 4406 SRZ_ZONE_COND_SHIFT; 4407 entry->zone_flags |= desc->zone_flags & 4408 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); 4409 entry->zone_length = 4410 ata ? le64dec(desc->zone_length) : 4411 scsi_8btou64(desc->zone_length); 4412 entry->zone_start_lba = 4413 ata ? le64dec(desc->zone_start_lba) : 4414 scsi_8btou64(desc->zone_start_lba); 4415 entry->write_pointer_lba = 4416 ata ? le64dec(desc->write_pointer_lba) : 4417 scsi_8btou64(desc->write_pointer_lba); 4418 } 4419 rep->entries_filled = num_to_fill; 4420 break; 4421 } 4422 case DISK_ZONE_GET_PARAMS: 4423 default: 4424 /* 4425 * In theory we should not get a GET_PARAMS bio, since it 4426 * should be handled without queueing the command to the 4427 * drive. 4428 */ 4429 panic("%s: Invalid zone command %d", __func__, 4430 bp->bio_zone.zone_cmd); 4431 break; 4432 } 4433 4434 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) 4435 free(ccb->csio.data_ptr, M_SCSIDA); 4436 } 4437 4438 static void 4439 dadone(struct cam_periph *periph, union ccb *done_ccb) 4440 { 4441 struct bio *bp, *bp1; 4442 struct da_softc *softc; 4443 struct ccb_scsiio *csio; 4444 u_int32_t priority; 4445 da_ccb_state state; 4446 4447 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); 4448 4449 softc = (struct da_softc *)periph->softc; 4450 priority = done_ccb->ccb_h.pinfo.priority; 4451 csio = &done_ccb->csio; 4452 4453 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4454 if (csio->bio != NULL) 4455 biotrack(csio->bio, __func__); 4456 #endif 4457 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 4458 4459 cam_periph_lock(periph); 4460 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4461 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 4462 int error; 4463 int sf; 4464 4465 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 4466 sf = SF_RETRY_UA; 4467 else 4468 sf = 0; 4469 4470 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 4471 if (error == ERESTART) { 4472 /* A retry was scheduled, so just return. */ 4473 cam_periph_unlock(periph); 4474 return; 4475 } 4476 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 4477 if (error != 0) { 4478 int queued_error; 4479 4480 /* 4481 * return all queued I/O with EIO, so that 4482 * the client can retry these I/Os in the 4483 * proper order should it attempt to recover. 4484 */ 4485 queued_error = EIO; 4486 4487 if (error == ENXIO 4488 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { 4489 /* 4490 * Catastrophic error. Mark our pack as 4491 * invalid. 4492 * 4493 * XXX See if this is really a media 4494 * XXX change first? 4495 */ 4496 xpt_print(periph->path, "Invalidating pack\n"); 4497 softc->flags |= DA_FLAG_PACK_INVALID; 4498 #ifdef CAM_IO_STATS 4499 softc->invalidations++; 4500 #endif 4501 queued_error = ENXIO; 4502 } 4503 cam_iosched_flush(softc->cam_iosched, NULL, 4504 queued_error); 4505 if (bp != NULL) { 4506 bp->bio_error = error; 4507 bp->bio_resid = bp->bio_bcount; 4508 bp->bio_flags |= BIO_ERROR; 4509 } 4510 } else if (bp != NULL) { 4511 if (state == DA_CCB_DELETE) 4512 bp->bio_resid = 0; 4513 else 4514 bp->bio_resid = csio->resid; 4515 bp->bio_error = 0; 4516 if (bp->bio_resid != 0) 4517 bp->bio_flags |= BIO_ERROR; 4518 } 4519 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4520 cam_release_devq(done_ccb->ccb_h.path, 4521 /*relsim_flags*/0, 4522 /*reduction*/0, 4523 /*timeout*/0, 4524 /*getcount_only*/0); 4525 } else if (bp != NULL) { 4526 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 4527 panic("REQ_CMP with QFRZN"); 4528 if (bp->bio_cmd == BIO_ZONE) 4529 dazonedone(periph, done_ccb); 4530 else if (state == DA_CCB_DELETE) 4531 bp->bio_resid = 0; 4532 else 4533 bp->bio_resid = csio->resid; 4534 if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE)) 4535 bp->bio_flags |= BIO_ERROR; 4536 if (softc->error_inject != 0) { 4537 bp->bio_error = softc->error_inject; 4538 bp->bio_resid = bp->bio_bcount; 4539 bp->bio_flags |= BIO_ERROR; 4540 softc->error_inject = 0; 4541 } 4542 } 4543 4544 if (bp != NULL) 4545 biotrack(bp, __func__); 4546 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 4547 if (LIST_EMPTY(&softc->pending_ccbs)) 4548 softc->flags |= DA_FLAG_WAS_OTAG; 4549 4550 /* 4551 * We need to call cam_iosched before we call biodone so that we don't 4552 * measure any activity that happens in the completion routine, which in 4553 * the case of sendfile can be quite extensive. Release the periph 4554 * refcount taken in dastart() for each CCB. 4555 */ 4556 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); 4557 xpt_release_ccb(done_ccb); 4558 KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount)); 4559 softc->refcount--; 4560 if (state == DA_CCB_DELETE) { 4561 TAILQ_HEAD(, bio) queue; 4562 4563 TAILQ_INIT(&queue); 4564 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); 4565 softc->delete_run_queue.insert_point = NULL; 4566 /* 4567 * Normally, the xpt_release_ccb() above would make sure 4568 * that when we have more work to do, that work would 4569 * get kicked off. However, we specifically keep 4570 * delete_running set to 0 before the call above to 4571 * allow other I/O to progress when many BIO_DELETE 4572 * requests are pushed down. We set delete_running to 0 4573 * and call daschedule again so that we don't stall if 4574 * there are no other I/Os pending apart from BIO_DELETEs. 4575 */ 4576 cam_iosched_trim_done(softc->cam_iosched); 4577 daschedule(periph); 4578 cam_periph_unlock(periph); 4579 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { 4580 TAILQ_REMOVE(&queue, bp1, bio_queue); 4581 bp1->bio_error = bp->bio_error; 4582 if (bp->bio_flags & BIO_ERROR) { 4583 bp1->bio_flags |= BIO_ERROR; 4584 bp1->bio_resid = bp1->bio_bcount; 4585 } else 4586 bp1->bio_resid = 0; 4587 biodone(bp1); 4588 } 4589 } else { 4590 daschedule(periph); 4591 cam_periph_unlock(periph); 4592 } 4593 if (bp != NULL) 4594 biodone(bp); 4595 return; 4596 } 4597 4598 static void 4599 dadone_probewp(struct cam_periph *periph, union ccb *done_ccb) 4600 { 4601 struct scsi_mode_header_6 *mode_hdr6; 4602 struct scsi_mode_header_10 *mode_hdr10; 4603 struct da_softc *softc; 4604 struct ccb_scsiio *csio; 4605 u_int32_t priority; 4606 uint8_t dev_spec; 4607 4608 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n")); 4609 4610 softc = (struct da_softc *)periph->softc; 4611 priority = done_ccb->ccb_h.pinfo.priority; 4612 csio = &done_ccb->csio; 4613 4614 cam_periph_assert(periph, MA_OWNED); 4615 4616 if (softc->minimum_cmd_size > 6) { 4617 mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr; 4618 dev_spec = mode_hdr10->dev_spec; 4619 } else { 4620 mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr; 4621 dev_spec = mode_hdr6->dev_spec; 4622 } 4623 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) { 4624 if ((dev_spec & 0x80) != 0) 4625 softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT; 4626 else 4627 softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT; 4628 } else { 4629 int error; 4630 4631 error = daerror(done_ccb, CAM_RETRY_SELTO, 4632 SF_RETRY_UA|SF_NO_PRINT); 4633 if (error == ERESTART) 4634 return; 4635 else if (error != 0) { 4636 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4637 /* Don't wedge this device's queue */ 4638 cam_release_devq(done_ccb->ccb_h.path, 4639 /*relsim_flags*/0, 4640 /*reduction*/0, 4641 /*timeout*/0, 4642 /*getcount_only*/0); 4643 } 4644 } 4645 } 4646 4647 free(csio->data_ptr, M_SCSIDA); 4648 xpt_release_ccb(done_ccb); 4649 if ((softc->flags & DA_FLAG_CAN_RC16) != 0) 4650 softc->state = DA_STATE_PROBE_RC16; 4651 else 4652 softc->state = DA_STATE_PROBE_RC; 4653 xpt_schedule(periph, priority); 4654 return; 4655 } 4656 4657 static void 4658 dadone_proberc(struct cam_periph *periph, union ccb *done_ccb) 4659 { 4660 struct scsi_read_capacity_data *rdcap; 4661 struct scsi_read_capacity_data_long *rcaplong; 4662 struct da_softc *softc; 4663 struct ccb_scsiio *csio; 4664 da_ccb_state state; 4665 char *announce_buf; 4666 u_int32_t priority; 4667 int lbp, n; 4668 4669 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n")); 4670 4671 softc = (struct da_softc *)periph->softc; 4672 priority = done_ccb->ccb_h.pinfo.priority; 4673 csio = &done_ccb->csio; 4674 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; 4675 4676 lbp = 0; 4677 rdcap = NULL; 4678 rcaplong = NULL; 4679 /* XXX TODO: can this be a malloc? */ 4680 announce_buf = softc->announce_temp; 4681 bzero(announce_buf, DA_ANNOUNCETMP_SZ); 4682 4683 if (state == DA_CCB_PROBE_RC) 4684 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; 4685 else 4686 rcaplong = (struct scsi_read_capacity_data_long *) 4687 csio->data_ptr; 4688 4689 cam_periph_assert(periph, MA_OWNED); 4690 4691 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4692 struct disk_params *dp; 4693 uint32_t block_size; 4694 uint64_t maxsector; 4695 u_int lalba; /* Lowest aligned LBA. */ 4696 4697 if (state == DA_CCB_PROBE_RC) { 4698 block_size = scsi_4btoul(rdcap->length); 4699 maxsector = scsi_4btoul(rdcap->addr); 4700 lalba = 0; 4701 4702 /* 4703 * According to SBC-2, if the standard 10 4704 * byte READ CAPACITY command returns 2^32, 4705 * we should issue the 16 byte version of 4706 * the command, since the device in question 4707 * has more sectors than can be represented 4708 * with the short version of the command. 4709 */ 4710 if (maxsector == 0xffffffff) { 4711 free(rdcap, M_SCSIDA); 4712 xpt_release_ccb(done_ccb); 4713 softc->state = DA_STATE_PROBE_RC16; 4714 xpt_schedule(periph, priority); 4715 return; 4716 } 4717 } else { 4718 block_size = scsi_4btoul(rcaplong->length); 4719 maxsector = scsi_8btou64(rcaplong->addr); 4720 lalba = scsi_2btoul(rcaplong->lalba_lbp); 4721 } 4722 4723 /* 4724 * Because GEOM code just will panic us if we 4725 * give them an 'illegal' value we'll avoid that 4726 * here. 4727 */ 4728 if (block_size == 0) { 4729 block_size = 512; 4730 if (maxsector == 0) 4731 maxsector = -1; 4732 } 4733 if (block_size >= MAXPHYS) { 4734 xpt_print(periph->path, 4735 "unsupportable block size %ju\n", 4736 (uintmax_t) block_size); 4737 announce_buf = NULL; 4738 cam_periph_invalidate(periph); 4739 } else { 4740 /* 4741 * We pass rcaplong into dasetgeom(), 4742 * because it will only use it if it is 4743 * non-NULL. 4744 */ 4745 dasetgeom(periph, block_size, maxsector, 4746 rcaplong, sizeof(*rcaplong)); 4747 lbp = (lalba & SRC16_LBPME_A); 4748 dp = &softc->params; 4749 n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ, 4750 "%juMB (%ju %u byte sectors", 4751 ((uintmax_t)dp->secsize * dp->sectors) / 4752 (1024 * 1024), 4753 (uintmax_t)dp->sectors, dp->secsize); 4754 if (softc->p_type != 0) { 4755 n += snprintf(announce_buf + n, 4756 DA_ANNOUNCETMP_SZ - n, 4757 ", DIF type %d", softc->p_type); 4758 } 4759 snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")"); 4760 } 4761 } else { 4762 int error; 4763 4764 /* 4765 * Retry any UNIT ATTENTION type errors. They 4766 * are expected at boot. 4767 */ 4768 error = daerror(done_ccb, CAM_RETRY_SELTO, 4769 SF_RETRY_UA|SF_NO_PRINT); 4770 if (error == ERESTART) { 4771 /* 4772 * A retry was scheuled, so 4773 * just return. 4774 */ 4775 return; 4776 } else if (error != 0) { 4777 int asc, ascq; 4778 int sense_key, error_code; 4779 int have_sense; 4780 cam_status status; 4781 struct ccb_getdev cgd; 4782 4783 /* Don't wedge this device's queue */ 4784 status = done_ccb->ccb_h.status; 4785 if ((status & CAM_DEV_QFRZN) != 0) 4786 cam_release_devq(done_ccb->ccb_h.path, 4787 /*relsim_flags*/0, 4788 /*reduction*/0, 4789 /*timeout*/0, 4790 /*getcount_only*/0); 4791 4792 4793 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 4794 CAM_PRIORITY_NORMAL); 4795 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 4796 xpt_action((union ccb *)&cgd); 4797 4798 if (scsi_extract_sense_ccb(done_ccb, 4799 &error_code, &sense_key, &asc, &ascq)) 4800 have_sense = TRUE; 4801 else 4802 have_sense = FALSE; 4803 4804 /* 4805 * If we tried READ CAPACITY(16) and failed, 4806 * fallback to READ CAPACITY(10). 4807 */ 4808 if ((state == DA_CCB_PROBE_RC16) && 4809 (softc->flags & DA_FLAG_CAN_RC16) && 4810 (((csio->ccb_h.status & CAM_STATUS_MASK) == 4811 CAM_REQ_INVALID) || 4812 ((have_sense) && 4813 (error_code == SSD_CURRENT_ERROR || 4814 error_code == SSD_DESC_CURRENT_ERROR) && 4815 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { 4816 cam_periph_assert(periph, MA_OWNED); 4817 softc->flags &= ~DA_FLAG_CAN_RC16; 4818 free(rdcap, M_SCSIDA); 4819 xpt_release_ccb(done_ccb); 4820 softc->state = DA_STATE_PROBE_RC; 4821 xpt_schedule(periph, priority); 4822 return; 4823 } 4824 4825 /* 4826 * Attach to anything that claims to be a 4827 * direct access or optical disk device, 4828 * as long as it doesn't return a "Logical 4829 * unit not supported" (0x25) error. 4830 * "Internal Target Failure" (0x44) is also 4831 * special and typically means that the 4832 * device is a SATA drive behind a SATL 4833 * translation that's fallen into a 4834 * terminally fatal state. 4835 */ 4836 if ((have_sense) 4837 && (asc != 0x25) && (asc != 0x44) 4838 && (error_code == SSD_CURRENT_ERROR 4839 || error_code == SSD_DESC_CURRENT_ERROR)) { 4840 const char *sense_key_desc; 4841 const char *asc_desc; 4842 4843 dasetgeom(periph, 512, -1, NULL, 0); 4844 scsi_sense_desc(sense_key, asc, ascq, 4845 &cgd.inq_data, &sense_key_desc, 4846 &asc_desc); 4847 snprintf(announce_buf, DA_ANNOUNCETMP_SZ, 4848 "Attempt to query device " 4849 "size failed: %s, %s", 4850 sense_key_desc, asc_desc); 4851 } else { 4852 if (have_sense) 4853 scsi_sense_print(&done_ccb->csio); 4854 else { 4855 xpt_print(periph->path, 4856 "got CAM status %#x\n", 4857 done_ccb->ccb_h.status); 4858 } 4859 4860 xpt_print(periph->path, "fatal error, " 4861 "failed to attach to device\n"); 4862 4863 announce_buf = NULL; 4864 4865 /* 4866 * Free up resources. 4867 */ 4868 cam_periph_invalidate(periph); 4869 } 4870 } 4871 } 4872 free(csio->data_ptr, M_SCSIDA); 4873 if (announce_buf != NULL && 4874 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { 4875 struct sbuf sb; 4876 4877 sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ, 4878 SBUF_FIXEDLEN); 4879 xpt_announce_periph_sbuf(periph, &sb, announce_buf); 4880 xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, 4881 DA_Q_BIT_STRING); 4882 sbuf_finish(&sb); 4883 sbuf_putbuf(&sb); 4884 4885 /* 4886 * Create our sysctl variables, now that we know 4887 * we have successfully attached. 4888 */ 4889 /* increase the refcount */ 4890 if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) { 4891 taskqueue_enqueue(taskqueue_thread, 4892 &softc->sysctl_task); 4893 } else { 4894 /* XXX This message is useless! */ 4895 xpt_print(periph->path, "fatal error, " 4896 "could not acquire reference count\n"); 4897 } 4898 } 4899 4900 /* We already probed the device. */ 4901 if (softc->flags & DA_FLAG_PROBED) { 4902 daprobedone(periph, done_ccb); 4903 return; 4904 } 4905 4906 /* Ensure re-probe doesn't see old delete. */ 4907 softc->delete_available = 0; 4908 dadeleteflag(softc, DA_DELETE_ZERO, 1); 4909 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { 4910 /* 4911 * Based on older SBC-3 spec revisions 4912 * any of the UNMAP methods "may" be 4913 * available via LBP given this flag so 4914 * we flag all of them as available and 4915 * then remove those which further 4916 * probes confirm aren't available 4917 * later. 4918 * 4919 * We could also check readcap(16) p_type 4920 * flag to exclude one or more invalid 4921 * write same (X) types here 4922 */ 4923 dadeleteflag(softc, DA_DELETE_WS16, 1); 4924 dadeleteflag(softc, DA_DELETE_WS10, 1); 4925 dadeleteflag(softc, DA_DELETE_UNMAP, 1); 4926 4927 xpt_release_ccb(done_ccb); 4928 softc->state = DA_STATE_PROBE_LBP; 4929 xpt_schedule(periph, priority); 4930 return; 4931 } 4932 4933 xpt_release_ccb(done_ccb); 4934 softc->state = DA_STATE_PROBE_BDC; 4935 xpt_schedule(periph, priority); 4936 return; 4937 } 4938 4939 static void 4940 dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb) 4941 { 4942 struct scsi_vpd_logical_block_prov *lbp; 4943 struct da_softc *softc; 4944 struct ccb_scsiio *csio; 4945 u_int32_t priority; 4946 4947 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n")); 4948 4949 softc = (struct da_softc *)periph->softc; 4950 priority = done_ccb->ccb_h.pinfo.priority; 4951 csio = &done_ccb->csio; 4952 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; 4953 4954 cam_periph_assert(periph, MA_OWNED); 4955 4956 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 4957 /* 4958 * T10/1799-D Revision 31 states at least one of these 4959 * must be supported but we don't currently enforce this. 4960 */ 4961 dadeleteflag(softc, DA_DELETE_WS16, 4962 (lbp->flags & SVPD_LBP_WS16)); 4963 dadeleteflag(softc, DA_DELETE_WS10, 4964 (lbp->flags & SVPD_LBP_WS10)); 4965 dadeleteflag(softc, DA_DELETE_UNMAP, 4966 (lbp->flags & SVPD_LBP_UNMAP)); 4967 } else { 4968 int error; 4969 error = daerror(done_ccb, CAM_RETRY_SELTO, 4970 SF_RETRY_UA|SF_NO_PRINT); 4971 if (error == ERESTART) 4972 return; 4973 else if (error != 0) { 4974 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 4975 /* Don't wedge this device's queue */ 4976 cam_release_devq(done_ccb->ccb_h.path, 4977 /*relsim_flags*/0, 4978 /*reduction*/0, 4979 /*timeout*/0, 4980 /*getcount_only*/0); 4981 } 4982 4983 /* 4984 * Failure indicates we don't support any SBC-3 4985 * delete methods with UNMAP 4986 */ 4987 } 4988 } 4989 4990 free(lbp, M_SCSIDA); 4991 xpt_release_ccb(done_ccb); 4992 softc->state = DA_STATE_PROBE_BLK_LIMITS; 4993 xpt_schedule(periph, priority); 4994 return; 4995 } 4996 4997 static void 4998 dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb) 4999 { 5000 struct scsi_vpd_block_limits *block_limits; 5001 struct da_softc *softc; 5002 struct ccb_scsiio *csio; 5003 u_int32_t priority; 5004 5005 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n")); 5006 5007 softc = (struct da_softc *)periph->softc; 5008 priority = done_ccb->ccb_h.pinfo.priority; 5009 csio = &done_ccb->csio; 5010 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; 5011 5012 cam_periph_assert(periph, MA_OWNED); 5013 5014 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5015 uint32_t max_txfer_len = scsi_4btoul( 5016 block_limits->max_txfer_len); 5017 uint32_t max_unmap_lba_cnt = scsi_4btoul( 5018 block_limits->max_unmap_lba_cnt); 5019 uint32_t max_unmap_blk_cnt = scsi_4btoul( 5020 block_limits->max_unmap_blk_cnt); 5021 uint32_t unmap_gran = scsi_4btoul( 5022 block_limits->opt_unmap_grain); 5023 uint32_t unmap_gran_align = scsi_4btoul( 5024 block_limits->unmap_grain_align); 5025 uint64_t ws_max_blks = scsi_8btou64( 5026 block_limits->max_write_same_length); 5027 5028 if (max_txfer_len != 0) { 5029 softc->disk->d_maxsize = MIN(softc->maxio, 5030 (off_t)max_txfer_len * softc->params.secsize); 5031 } 5032 5033 /* 5034 * We should already support UNMAP but we check lba 5035 * and block count to be sure 5036 */ 5037 if (max_unmap_lba_cnt != 0x00L && 5038 max_unmap_blk_cnt != 0x00L) { 5039 softc->unmap_max_lba = max_unmap_lba_cnt; 5040 softc->unmap_max_ranges = min(max_unmap_blk_cnt, 5041 UNMAP_MAX_RANGES); 5042 if (unmap_gran > 1) { 5043 softc->unmap_gran = unmap_gran; 5044 if (unmap_gran_align & 0x80000000) { 5045 softc->unmap_gran_align = 5046 unmap_gran_align & 0x7fffffff; 5047 } 5048 } 5049 } else { 5050 /* 5051 * Unexpected UNMAP limits which means the 5052 * device doesn't actually support UNMAP 5053 */ 5054 dadeleteflag(softc, DA_DELETE_UNMAP, 0); 5055 } 5056 5057 if (ws_max_blks != 0x00L) 5058 softc->ws_max_blks = ws_max_blks; 5059 } else { 5060 int error; 5061 error = daerror(done_ccb, CAM_RETRY_SELTO, 5062 SF_RETRY_UA|SF_NO_PRINT); 5063 if (error == ERESTART) 5064 return; 5065 else if (error != 0) { 5066 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5067 /* Don't wedge this device's queue */ 5068 cam_release_devq(done_ccb->ccb_h.path, 5069 /*relsim_flags*/0, 5070 /*reduction*/0, 5071 /*timeout*/0, 5072 /*getcount_only*/0); 5073 } 5074 5075 /* 5076 * Failure here doesn't mean UNMAP is not 5077 * supported as this is an optional page. 5078 */ 5079 softc->unmap_max_lba = 1; 5080 softc->unmap_max_ranges = 1; 5081 } 5082 } 5083 5084 free(block_limits, M_SCSIDA); 5085 xpt_release_ccb(done_ccb); 5086 softc->state = DA_STATE_PROBE_BDC; 5087 xpt_schedule(periph, priority); 5088 return; 5089 } 5090 5091 static void 5092 dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb) 5093 { 5094 struct scsi_vpd_block_device_characteristics *bdc; 5095 struct da_softc *softc; 5096 struct ccb_scsiio *csio; 5097 u_int32_t priority; 5098 5099 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n")); 5100 5101 softc = (struct da_softc *)periph->softc; 5102 priority = done_ccb->ccb_h.pinfo.priority; 5103 csio = &done_ccb->csio; 5104 bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr; 5105 5106 cam_periph_assert(periph, MA_OWNED); 5107 5108 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5109 uint32_t valid_len; 5110 5111 /* 5112 * Disable queue sorting for non-rotational media 5113 * by default. 5114 */ 5115 u_int16_t old_rate = softc->disk->d_rotation_rate; 5116 5117 valid_len = csio->dxfer_len - csio->resid; 5118 if (SBDC_IS_PRESENT(bdc, valid_len, 5119 medium_rotation_rate)) { 5120 softc->disk->d_rotation_rate = 5121 scsi_2btoul(bdc->medium_rotation_rate); 5122 if (softc->disk->d_rotation_rate == 5123 SVPD_BDC_RATE_NON_ROTATING) { 5124 cam_iosched_set_sort_queue( 5125 softc->cam_iosched, 0); 5126 softc->rotating = 0; 5127 } 5128 if (softc->disk->d_rotation_rate != old_rate) { 5129 disk_attr_changed(softc->disk, 5130 "GEOM::rotation_rate", M_NOWAIT); 5131 } 5132 } 5133 if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) 5134 && (softc->zone_mode == DA_ZONE_NONE)) { 5135 int ata_proto; 5136 5137 if (scsi_vpd_supported_page(periph, 5138 SVPD_ATA_INFORMATION)) 5139 ata_proto = 1; 5140 else 5141 ata_proto = 0; 5142 5143 /* 5144 * The Zoned field will only be set for 5145 * Drive Managed and Host Aware drives. If 5146 * they are Host Managed, the device type 5147 * in the standard INQUIRY data should be 5148 * set to T_ZBC_HM (0x14). 5149 */ 5150 if ((bdc->flags & SVPD_ZBC_MASK) == 5151 SVPD_HAW_ZBC) { 5152 softc->zone_mode = DA_ZONE_HOST_AWARE; 5153 softc->zone_interface = (ata_proto) ? 5154 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 5155 } else if ((bdc->flags & SVPD_ZBC_MASK) == 5156 SVPD_DM_ZBC) { 5157 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 5158 softc->zone_interface = (ata_proto) ? 5159 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; 5160 } else if ((bdc->flags & SVPD_ZBC_MASK) != 5161 SVPD_ZBC_NR) { 5162 xpt_print(periph->path, "Unknown zoned " 5163 "type %#x", 5164 bdc->flags & SVPD_ZBC_MASK); 5165 } 5166 } 5167 } else { 5168 int error; 5169 error = daerror(done_ccb, CAM_RETRY_SELTO, 5170 SF_RETRY_UA|SF_NO_PRINT); 5171 if (error == ERESTART) 5172 return; 5173 else if (error != 0) { 5174 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5175 /* Don't wedge this device's queue */ 5176 cam_release_devq(done_ccb->ccb_h.path, 5177 /*relsim_flags*/0, 5178 /*reduction*/0, 5179 /*timeout*/0, 5180 /*getcount_only*/0); 5181 } 5182 } 5183 } 5184 5185 free(bdc, M_SCSIDA); 5186 xpt_release_ccb(done_ccb); 5187 softc->state = DA_STATE_PROBE_ATA; 5188 xpt_schedule(periph, priority); 5189 return; 5190 } 5191 5192 static void 5193 dadone_probeata(struct cam_periph *periph, union ccb *done_ccb) 5194 { 5195 struct ata_params *ata_params; 5196 struct ccb_scsiio *csio; 5197 struct da_softc *softc; 5198 u_int32_t priority; 5199 int continue_probe; 5200 int error; 5201 int16_t *ptr; 5202 5203 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n")); 5204 5205 softc = (struct da_softc *)periph->softc; 5206 priority = done_ccb->ccb_h.pinfo.priority; 5207 csio = &done_ccb->csio; 5208 ata_params = (struct ata_params *)csio->data_ptr; 5209 ptr = (uint16_t *)ata_params; 5210 continue_probe = 0; 5211 error = 0; 5212 5213 cam_periph_assert(periph, MA_OWNED); 5214 5215 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5216 uint16_t old_rate; 5217 5218 ata_param_fixup(ata_params); 5219 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && 5220 (softc->quirks & DA_Q_NO_UNMAP) == 0) { 5221 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); 5222 if (ata_params->max_dsm_blocks != 0) 5223 softc->trim_max_ranges = min( 5224 softc->trim_max_ranges, 5225 ata_params->max_dsm_blocks * 5226 ATA_DSM_BLK_RANGES); 5227 } 5228 /* 5229 * Disable queue sorting for non-rotational media 5230 * by default. 5231 */ 5232 old_rate = softc->disk->d_rotation_rate; 5233 softc->disk->d_rotation_rate = ata_params->media_rotation_rate; 5234 if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { 5235 cam_iosched_set_sort_queue(softc->cam_iosched, 0); 5236 softc->rotating = 0; 5237 } 5238 if (softc->disk->d_rotation_rate != old_rate) { 5239 disk_attr_changed(softc->disk, 5240 "GEOM::rotation_rate", M_NOWAIT); 5241 } 5242 5243 cam_periph_assert(periph, MA_OWNED); 5244 if (ata_params->capabilities1 & ATA_SUPPORT_DMA) 5245 softc->flags |= DA_FLAG_CAN_ATA_DMA; 5246 5247 if (ata_params->support.extension & ATA_SUPPORT_GENLOG) 5248 softc->flags |= DA_FLAG_CAN_ATA_LOG; 5249 5250 /* 5251 * At this point, if we have a SATA host aware drive, 5252 * we communicate via ATA passthrough unless the 5253 * SAT layer supports ZBC -> ZAC translation. In 5254 * that case, 5255 * 5256 * XXX KDM figure out how to detect a host managed 5257 * SATA drive. 5258 */ 5259 if (softc->zone_mode == DA_ZONE_NONE) { 5260 /* 5261 * Note that we don't override the zone 5262 * mode or interface if it has already been 5263 * set. This is because it has either been 5264 * set as a quirk, or when we probed the 5265 * SCSI Block Device Characteristics page, 5266 * the zoned field was set. The latter 5267 * means that the SAT layer supports ZBC to 5268 * ZAC translation, and we would prefer to 5269 * use that if it is available. 5270 */ 5271 if ((ata_params->support3 & 5272 ATA_SUPPORT_ZONE_MASK) == 5273 ATA_SUPPORT_ZONE_HOST_AWARE) { 5274 softc->zone_mode = DA_ZONE_HOST_AWARE; 5275 softc->zone_interface = 5276 DA_ZONE_IF_ATA_PASS; 5277 } else if ((ata_params->support3 & 5278 ATA_SUPPORT_ZONE_MASK) == 5279 ATA_SUPPORT_ZONE_DEV_MANAGED) { 5280 softc->zone_mode =DA_ZONE_DRIVE_MANAGED; 5281 softc->zone_interface = DA_ZONE_IF_ATA_PASS; 5282 } 5283 } 5284 5285 } else { 5286 error = daerror(done_ccb, CAM_RETRY_SELTO, 5287 SF_RETRY_UA|SF_NO_PRINT); 5288 if (error == ERESTART) 5289 return; 5290 else if (error != 0) { 5291 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5292 /* Don't wedge this device's queue */ 5293 cam_release_devq(done_ccb->ccb_h.path, 5294 /*relsim_flags*/0, 5295 /*reduction*/0, 5296 /*timeout*/0, 5297 /*getcount_only*/0); 5298 } 5299 } 5300 } 5301 5302 if ((softc->zone_mode == DA_ZONE_HOST_AWARE) 5303 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { 5304 /* 5305 * If the ATA IDENTIFY failed, we could be talking 5306 * to a SCSI drive, although that seems unlikely, 5307 * since the drive did report that it supported the 5308 * ATA Information VPD page. If the ATA IDENTIFY 5309 * succeeded, and the SAT layer doesn't support 5310 * ZBC -> ZAC translation, continue on to get the 5311 * directory of ATA logs, and complete the rest of 5312 * the ZAC probe. If the SAT layer does support 5313 * ZBC -> ZAC translation, we want to use that, 5314 * and we'll probe the SCSI Zoned Block Device 5315 * Characteristics VPD page next. 5316 */ 5317 if ((error == 0) 5318 && (softc->flags & DA_FLAG_CAN_ATA_LOG) 5319 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) 5320 softc->state = DA_STATE_PROBE_ATA_LOGDIR; 5321 else 5322 softc->state = DA_STATE_PROBE_ZONE; 5323 continue_probe = 1; 5324 } 5325 if (continue_probe != 0) { 5326 xpt_release_ccb(done_ccb); 5327 xpt_schedule(periph, priority); 5328 return; 5329 } else 5330 daprobedone(periph, done_ccb); 5331 return; 5332 } 5333 5334 static void 5335 dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb) 5336 { 5337 struct da_softc *softc; 5338 struct ccb_scsiio *csio; 5339 u_int32_t priority; 5340 int error; 5341 5342 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n")); 5343 5344 softc = (struct da_softc *)periph->softc; 5345 priority = done_ccb->ccb_h.pinfo.priority; 5346 csio = &done_ccb->csio; 5347 5348 cam_periph_assert(periph, MA_OWNED); 5349 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5350 error = 0; 5351 softc->valid_logdir_len = 0; 5352 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); 5353 softc->valid_logdir_len = csio->dxfer_len - csio->resid; 5354 if (softc->valid_logdir_len > 0) 5355 bcopy(csio->data_ptr, &softc->ata_logdir, 5356 min(softc->valid_logdir_len, 5357 sizeof(softc->ata_logdir))); 5358 /* 5359 * Figure out whether the Identify Device log is 5360 * supported. The General Purpose log directory 5361 * has a header, and lists the number of pages 5362 * available for each GP log identified by the 5363 * offset into the list. 5364 */ 5365 if ((softc->valid_logdir_len >= 5366 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) 5367 && (le16dec(softc->ata_logdir.header) == 5368 ATA_GP_LOG_DIR_VERSION) 5369 && (le16dec(&softc->ata_logdir.num_pages[ 5370 (ATA_IDENTIFY_DATA_LOG * 5371 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ 5372 softc->flags |= DA_FLAG_CAN_ATA_IDLOG; 5373 } else { 5374 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 5375 } 5376 } else { 5377 error = daerror(done_ccb, CAM_RETRY_SELTO, 5378 SF_RETRY_UA|SF_NO_PRINT); 5379 if (error == ERESTART) 5380 return; 5381 else if (error != 0) { 5382 /* 5383 * If we can't get the ATA log directory, 5384 * then ATA logs are effectively not 5385 * supported even if the bit is set in the 5386 * identify data. 5387 */ 5388 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | 5389 DA_FLAG_CAN_ATA_IDLOG); 5390 if ((done_ccb->ccb_h.status & 5391 CAM_DEV_QFRZN) != 0) { 5392 /* Don't wedge this device's queue */ 5393 cam_release_devq(done_ccb->ccb_h.path, 5394 /*relsim_flags*/0, 5395 /*reduction*/0, 5396 /*timeout*/0, 5397 /*getcount_only*/0); 5398 } 5399 } 5400 } 5401 5402 free(csio->data_ptr, M_SCSIDA); 5403 5404 if ((error == 0) 5405 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { 5406 softc->state = DA_STATE_PROBE_ATA_IDDIR; 5407 xpt_release_ccb(done_ccb); 5408 xpt_schedule(periph, priority); 5409 return; 5410 } 5411 daprobedone(periph, done_ccb); 5412 return; 5413 } 5414 5415 static void 5416 dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb) 5417 { 5418 struct da_softc *softc; 5419 struct ccb_scsiio *csio; 5420 u_int32_t priority; 5421 int error; 5422 5423 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n")); 5424 5425 softc = (struct da_softc *)periph->softc; 5426 priority = done_ccb->ccb_h.pinfo.priority; 5427 csio = &done_ccb->csio; 5428 5429 cam_periph_assert(periph, MA_OWNED); 5430 5431 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5432 off_t entries_offset, max_entries; 5433 error = 0; 5434 5435 softc->valid_iddir_len = 0; 5436 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); 5437 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | 5438 DA_FLAG_CAN_ATA_ZONE); 5439 softc->valid_iddir_len = csio->dxfer_len - csio->resid; 5440 if (softc->valid_iddir_len > 0) 5441 bcopy(csio->data_ptr, &softc->ata_iddir, 5442 min(softc->valid_iddir_len, 5443 sizeof(softc->ata_iddir))); 5444 5445 entries_offset = 5446 __offsetof(struct ata_identify_log_pages,entries); 5447 max_entries = softc->valid_iddir_len - entries_offset; 5448 if ((softc->valid_iddir_len > (entries_offset + 1)) 5449 && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION) 5450 && (softc->ata_iddir.entry_count > 0)) { 5451 int num_entries, i; 5452 5453 num_entries = softc->ata_iddir.entry_count; 5454 num_entries = min(num_entries, 5455 softc->valid_iddir_len - entries_offset); 5456 for (i = 0; i < num_entries && i < max_entries; i++) { 5457 if (softc->ata_iddir.entries[i] == 5458 ATA_IDL_SUP_CAP) 5459 softc->flags |= DA_FLAG_CAN_ATA_SUPCAP; 5460 else if (softc->ata_iddir.entries[i] == 5461 ATA_IDL_ZDI) 5462 softc->flags |= DA_FLAG_CAN_ATA_ZONE; 5463 5464 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) 5465 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) 5466 break; 5467 } 5468 } 5469 } else { 5470 error = daerror(done_ccb, CAM_RETRY_SELTO, 5471 SF_RETRY_UA|SF_NO_PRINT); 5472 if (error == ERESTART) 5473 return; 5474 else if (error != 0) { 5475 /* 5476 * If we can't get the ATA Identify Data log 5477 * directory, then it effectively isn't 5478 * supported even if the ATA Log directory 5479 * a non-zero number of pages present for 5480 * this log. 5481 */ 5482 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; 5483 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5484 /* Don't wedge this device's queue */ 5485 cam_release_devq(done_ccb->ccb_h.path, 5486 /*relsim_flags*/0, 5487 /*reduction*/0, 5488 /*timeout*/0, 5489 /*getcount_only*/0); 5490 } 5491 } 5492 } 5493 5494 free(csio->data_ptr, M_SCSIDA); 5495 5496 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { 5497 softc->state = DA_STATE_PROBE_ATA_SUP; 5498 xpt_release_ccb(done_ccb); 5499 xpt_schedule(periph, priority); 5500 return; 5501 } 5502 daprobedone(periph, done_ccb); 5503 return; 5504 } 5505 5506 static void 5507 dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb) 5508 { 5509 struct da_softc *softc; 5510 struct ccb_scsiio *csio; 5511 u_int32_t priority; 5512 int error; 5513 5514 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n")); 5515 5516 softc = (struct da_softc *)periph->softc; 5517 priority = done_ccb->ccb_h.pinfo.priority; 5518 csio = &done_ccb->csio; 5519 5520 cam_periph_assert(periph, MA_OWNED); 5521 5522 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5523 uint32_t valid_len; 5524 size_t needed_size; 5525 struct ata_identify_log_sup_cap *sup_cap; 5526 error = 0; 5527 5528 sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr; 5529 valid_len = csio->dxfer_len - csio->resid; 5530 needed_size = __offsetof(struct ata_identify_log_sup_cap, 5531 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); 5532 if (valid_len >= needed_size) { 5533 uint64_t zoned, zac_cap; 5534 5535 zoned = le64dec(sup_cap->zoned_cap); 5536 if (zoned & ATA_ZONED_VALID) { 5537 /* 5538 * This should have already been 5539 * set, because this is also in the 5540 * ATA identify data. 5541 */ 5542 if ((zoned & ATA_ZONED_MASK) == 5543 ATA_SUPPORT_ZONE_HOST_AWARE) 5544 softc->zone_mode = DA_ZONE_HOST_AWARE; 5545 else if ((zoned & ATA_ZONED_MASK) == 5546 ATA_SUPPORT_ZONE_DEV_MANAGED) 5547 softc->zone_mode = 5548 DA_ZONE_DRIVE_MANAGED; 5549 } 5550 5551 zac_cap = le64dec(sup_cap->sup_zac_cap); 5552 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { 5553 if (zac_cap & ATA_REPORT_ZONES_SUP) 5554 softc->zone_flags |= 5555 DA_ZONE_FLAG_RZ_SUP; 5556 if (zac_cap & ATA_ND_OPEN_ZONE_SUP) 5557 softc->zone_flags |= 5558 DA_ZONE_FLAG_OPEN_SUP; 5559 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) 5560 softc->zone_flags |= 5561 DA_ZONE_FLAG_CLOSE_SUP; 5562 if (zac_cap & ATA_ND_FINISH_ZONE_SUP) 5563 softc->zone_flags |= 5564 DA_ZONE_FLAG_FINISH_SUP; 5565 if (zac_cap & ATA_ND_RWP_SUP) 5566 softc->zone_flags |= 5567 DA_ZONE_FLAG_RWP_SUP; 5568 } else { 5569 /* 5570 * This field was introduced in 5571 * ACS-4, r08 on April 28th, 2015. 5572 * If the drive firmware was written 5573 * to an earlier spec, it won't have 5574 * the field. So, assume all 5575 * commands are supported. 5576 */ 5577 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; 5578 } 5579 } 5580 } else { 5581 error = daerror(done_ccb, CAM_RETRY_SELTO, 5582 SF_RETRY_UA|SF_NO_PRINT); 5583 if (error == ERESTART) 5584 return; 5585 else if (error != 0) { 5586 /* 5587 * If we can't get the ATA Identify Data 5588 * Supported Capabilities page, clear the 5589 * flag... 5590 */ 5591 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; 5592 /* 5593 * And clear zone capabilities. 5594 */ 5595 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; 5596 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5597 /* Don't wedge this device's queue */ 5598 cam_release_devq(done_ccb->ccb_h.path, 5599 /*relsim_flags*/0, 5600 /*reduction*/0, 5601 /*timeout*/0, 5602 /*getcount_only*/0); 5603 } 5604 } 5605 } 5606 5607 free(csio->data_ptr, M_SCSIDA); 5608 5609 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { 5610 softc->state = DA_STATE_PROBE_ATA_ZONE; 5611 xpt_release_ccb(done_ccb); 5612 xpt_schedule(periph, priority); 5613 return; 5614 } 5615 daprobedone(periph, done_ccb); 5616 return; 5617 } 5618 5619 static void 5620 dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb) 5621 { 5622 struct da_softc *softc; 5623 struct ccb_scsiio *csio; 5624 int error; 5625 5626 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n")); 5627 5628 softc = (struct da_softc *)periph->softc; 5629 csio = &done_ccb->csio; 5630 5631 cam_periph_assert(periph, MA_OWNED); 5632 5633 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5634 struct ata_zoned_info_log *zi_log; 5635 uint32_t valid_len; 5636 size_t needed_size; 5637 5638 zi_log = (struct ata_zoned_info_log *)csio->data_ptr; 5639 5640 valid_len = csio->dxfer_len - csio->resid; 5641 needed_size = __offsetof(struct ata_zoned_info_log, 5642 version_info) + 1 + sizeof(zi_log->version_info); 5643 if (valid_len >= needed_size) { 5644 uint64_t tmpvar; 5645 5646 tmpvar = le64dec(zi_log->zoned_cap); 5647 if (tmpvar & ATA_ZDI_CAP_VALID) { 5648 if (tmpvar & ATA_ZDI_CAP_URSWRZ) 5649 softc->zone_flags |= 5650 DA_ZONE_FLAG_URSWRZ; 5651 else 5652 softc->zone_flags &= 5653 ~DA_ZONE_FLAG_URSWRZ; 5654 } 5655 tmpvar = le64dec(zi_log->optimal_seq_zones); 5656 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { 5657 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; 5658 softc->optimal_seq_zones = (tmpvar & 5659 ATA_ZDI_OPT_SEQ_MASK); 5660 } else { 5661 softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET; 5662 softc->optimal_seq_zones = 0; 5663 } 5664 5665 tmpvar =le64dec(zi_log->optimal_nonseq_zones); 5666 if (tmpvar & ATA_ZDI_OPT_NS_VALID) { 5667 softc->zone_flags |= 5668 DA_ZONE_FLAG_OPT_NONSEQ_SET; 5669 softc->optimal_nonseq_zones = 5670 (tmpvar & ATA_ZDI_OPT_NS_MASK); 5671 } else { 5672 softc->zone_flags &= 5673 ~DA_ZONE_FLAG_OPT_NONSEQ_SET; 5674 softc->optimal_nonseq_zones = 0; 5675 } 5676 5677 tmpvar = le64dec(zi_log->max_seq_req_zones); 5678 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { 5679 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; 5680 softc->max_seq_zones = 5681 (tmpvar & ATA_ZDI_MAX_SEQ_MASK); 5682 } else { 5683 softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET; 5684 softc->max_seq_zones = 0; 5685 } 5686 } 5687 } else { 5688 error = daerror(done_ccb, CAM_RETRY_SELTO, 5689 SF_RETRY_UA|SF_NO_PRINT); 5690 if (error == ERESTART) 5691 return; 5692 else if (error != 0) { 5693 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; 5694 softc->flags &= ~DA_ZONE_FLAG_SET_MASK; 5695 5696 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5697 /* Don't wedge this device's queue */ 5698 cam_release_devq(done_ccb->ccb_h.path, 5699 /*relsim_flags*/0, 5700 /*reduction*/0, 5701 /*timeout*/0, 5702 /*getcount_only*/0); 5703 } 5704 } 5705 5706 } 5707 5708 free(csio->data_ptr, M_SCSIDA); 5709 5710 daprobedone(periph, done_ccb); 5711 return; 5712 } 5713 5714 static void 5715 dadone_probezone(struct cam_periph *periph, union ccb *done_ccb) 5716 { 5717 struct da_softc *softc; 5718 struct ccb_scsiio *csio; 5719 int error; 5720 5721 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n")); 5722 5723 softc = (struct da_softc *)periph->softc; 5724 csio = &done_ccb->csio; 5725 5726 cam_periph_assert(periph, MA_OWNED); 5727 5728 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5729 uint32_t valid_len; 5730 size_t needed_len; 5731 struct scsi_vpd_zoned_bdc *zoned_bdc; 5732 5733 error = 0; 5734 zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr; 5735 valid_len = csio->dxfer_len - csio->resid; 5736 needed_len = __offsetof(struct scsi_vpd_zoned_bdc, 5737 max_seq_req_zones) + 1 + 5738 sizeof(zoned_bdc->max_seq_req_zones); 5739 if ((valid_len >= needed_len) 5740 && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) { 5741 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) 5742 softc->zone_flags |= DA_ZONE_FLAG_URSWRZ; 5743 else 5744 softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ; 5745 softc->optimal_seq_zones = 5746 scsi_4btoul(zoned_bdc->optimal_seq_zones); 5747 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; 5748 softc->optimal_nonseq_zones = scsi_4btoul( 5749 zoned_bdc->optimal_nonseq_zones); 5750 softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET; 5751 softc->max_seq_zones = 5752 scsi_4btoul(zoned_bdc->max_seq_req_zones); 5753 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; 5754 } 5755 /* 5756 * All of the zone commands are mandatory for SCSI 5757 * devices. 5758 * 5759 * XXX KDM this is valid as of September 2015. 5760 * Re-check this assumption once the SAT spec is 5761 * updated to support SCSI ZBC to ATA ZAC mapping. 5762 * Since ATA allows zone commands to be reported 5763 * as supported or not, this may not necessarily 5764 * be true for an ATA device behind a SAT (SCSI to 5765 * ATA Translation) layer. 5766 */ 5767 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; 5768 } else { 5769 error = daerror(done_ccb, CAM_RETRY_SELTO, 5770 SF_RETRY_UA|SF_NO_PRINT); 5771 if (error == ERESTART) 5772 return; 5773 else if (error != 0) { 5774 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5775 /* Don't wedge this device's queue */ 5776 cam_release_devq(done_ccb->ccb_h.path, 5777 /*relsim_flags*/0, 5778 /*reduction*/0, 5779 /*timeout*/0, 5780 /*getcount_only*/0); 5781 } 5782 } 5783 } 5784 5785 free(csio->data_ptr, M_SCSIDA); 5786 5787 daprobedone(periph, done_ccb); 5788 return; 5789 } 5790 5791 static void 5792 dadone_tur(struct cam_periph *periph, union ccb *done_ccb) 5793 { 5794 struct da_softc *softc; 5795 struct ccb_scsiio *csio; 5796 5797 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n")); 5798 5799 softc = (struct da_softc *)periph->softc; 5800 csio = &done_ccb->csio; 5801 5802 cam_periph_assert(periph, MA_OWNED); 5803 5804 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5805 5806 if (daerror(done_ccb, CAM_RETRY_SELTO, 5807 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) 5808 return; /* Will complete again, keep reference */ 5809 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5810 cam_release_devq(done_ccb->ccb_h.path, 5811 /*relsim_flags*/0, 5812 /*reduction*/0, 5813 /*timeout*/0, 5814 /*getcount_only*/0); 5815 } 5816 xpt_release_ccb(done_ccb); 5817 softc->flags &= ~DA_FLAG_TUR_PENDING; 5818 da_periph_release_locked(periph, DA_REF_TUR); 5819 return; 5820 } 5821 5822 static void 5823 dareprobe(struct cam_periph *periph) 5824 { 5825 struct da_softc *softc; 5826 int status; 5827 5828 softc = (struct da_softc *)periph->softc; 5829 5830 /* Probe in progress; don't interfere. */ 5831 if (softc->state != DA_STATE_NORMAL) 5832 return; 5833 5834 status = da_periph_acquire(periph, DA_REF_REPROBE); 5835 KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed")); 5836 5837 softc->state = DA_STATE_PROBE_WP; 5838 xpt_schedule(periph, CAM_PRIORITY_DEV); 5839 } 5840 5841 static int 5842 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 5843 { 5844 struct da_softc *softc; 5845 struct cam_periph *periph; 5846 int error, error_code, sense_key, asc, ascq; 5847 5848 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5849 if (ccb->csio.bio != NULL) 5850 biotrack(ccb->csio.bio, __func__); 5851 #endif 5852 5853 periph = xpt_path_periph(ccb->ccb_h.path); 5854 softc = (struct da_softc *)periph->softc; 5855 5856 cam_periph_assert(periph, MA_OWNED); 5857 5858 /* 5859 * Automatically detect devices that do not support 5860 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. 5861 */ 5862 error = 0; 5863 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { 5864 error = cmd6workaround(ccb); 5865 } else if (scsi_extract_sense_ccb(ccb, 5866 &error_code, &sense_key, &asc, &ascq)) { 5867 if (sense_key == SSD_KEY_ILLEGAL_REQUEST) 5868 error = cmd6workaround(ccb); 5869 /* 5870 * If the target replied with CAPACITY DATA HAS CHANGED UA, 5871 * query the capacity and notify upper layers. 5872 */ 5873 else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5874 asc == 0x2A && ascq == 0x09) { 5875 xpt_print(periph->path, "Capacity data has changed\n"); 5876 softc->flags &= ~DA_FLAG_PROBED; 5877 dareprobe(periph); 5878 sense_flags |= SF_NO_PRINT; 5879 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5880 asc == 0x28 && ascq == 0x00) { 5881 softc->flags &= ~DA_FLAG_PROBED; 5882 disk_media_changed(softc->disk, M_NOWAIT); 5883 } else if (sense_key == SSD_KEY_UNIT_ATTENTION && 5884 asc == 0x3F && ascq == 0x03) { 5885 xpt_print(periph->path, "INQUIRY data has changed\n"); 5886 softc->flags &= ~DA_FLAG_PROBED; 5887 dareprobe(periph); 5888 sense_flags |= SF_NO_PRINT; 5889 } else if (sense_key == SSD_KEY_NOT_READY && 5890 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { 5891 softc->flags |= DA_FLAG_PACK_INVALID; 5892 disk_media_gone(softc->disk, M_NOWAIT); 5893 } 5894 } 5895 if (error == ERESTART) 5896 return (ERESTART); 5897 5898 #ifdef CAM_IO_STATS 5899 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 5900 case CAM_CMD_TIMEOUT: 5901 softc->timeouts++; 5902 break; 5903 case CAM_REQ_ABORTED: 5904 case CAM_REQ_CMP_ERR: 5905 case CAM_REQ_TERMIO: 5906 case CAM_UNREC_HBA_ERROR: 5907 case CAM_DATA_RUN_ERR: 5908 softc->errors++; 5909 break; 5910 default: 5911 break; 5912 } 5913 #endif 5914 5915 /* 5916 * XXX 5917 * Until we have a better way of doing pack validation, 5918 * don't treat UAs as errors. 5919 */ 5920 sense_flags |= SF_RETRY_UA; 5921 5922 if (softc->quirks & DA_Q_RETRY_BUSY) 5923 sense_flags |= SF_RETRY_BUSY; 5924 return(cam_periph_error(ccb, cam_flags, sense_flags)); 5925 } 5926 5927 static void 5928 damediapoll(void *arg) 5929 { 5930 struct cam_periph *periph = arg; 5931 struct da_softc *softc = periph->softc; 5932 5933 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && 5934 (softc->flags & DA_FLAG_TUR_PENDING) == 0 && 5935 LIST_EMPTY(&softc->pending_ccbs)) { 5936 if (da_periph_acquire(periph, DA_REF_TUR) == 0) { 5937 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); 5938 daschedule(periph); 5939 } 5940 } 5941 /* Queue us up again */ 5942 if (da_poll_period != 0) 5943 callout_schedule(&softc->mediapoll_c, da_poll_period * hz); 5944 } 5945 5946 static void 5947 daprevent(struct cam_periph *periph, int action) 5948 { 5949 struct da_softc *softc; 5950 union ccb *ccb; 5951 int error; 5952 5953 cam_periph_assert(periph, MA_OWNED); 5954 softc = (struct da_softc *)periph->softc; 5955 5956 if (((action == PR_ALLOW) 5957 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 5958 || ((action == PR_PREVENT) 5959 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 5960 return; 5961 } 5962 5963 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 5964 5965 scsi_prevent(&ccb->csio, 5966 /*retries*/1, 5967 /*cbcfp*/NULL, 5968 MSG_SIMPLE_Q_TAG, 5969 action, 5970 SSD_FULL_SIZE, 5971 5000); 5972 5973 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, 5974 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); 5975 5976 if (error == 0) { 5977 if (action == PR_ALLOW) 5978 softc->flags &= ~DA_FLAG_PACK_LOCKED; 5979 else 5980 softc->flags |= DA_FLAG_PACK_LOCKED; 5981 } 5982 5983 xpt_release_ccb(ccb); 5984 } 5985 5986 static void 5987 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, 5988 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) 5989 { 5990 struct ccb_calc_geometry ccg; 5991 struct da_softc *softc; 5992 struct disk_params *dp; 5993 u_int lbppbe, lalba; 5994 int error; 5995 5996 softc = (struct da_softc *)periph->softc; 5997 5998 dp = &softc->params; 5999 dp->secsize = block_len; 6000 dp->sectors = maxsector + 1; 6001 if (rcaplong != NULL) { 6002 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; 6003 lalba = scsi_2btoul(rcaplong->lalba_lbp); 6004 lalba &= SRC16_LALBA_A; 6005 if (rcaplong->prot & SRC16_PROT_EN) 6006 softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >> 6007 SRC16_P_TYPE_SHIFT) + 1; 6008 else 6009 softc->p_type = 0; 6010 } else { 6011 lbppbe = 0; 6012 lalba = 0; 6013 softc->p_type = 0; 6014 } 6015 6016 if (lbppbe > 0) { 6017 dp->stripesize = block_len << lbppbe; 6018 dp->stripeoffset = (dp->stripesize - block_len * lalba) % 6019 dp->stripesize; 6020 } else if (softc->quirks & DA_Q_4K) { 6021 dp->stripesize = 4096; 6022 dp->stripeoffset = 0; 6023 } else if (softc->unmap_gran != 0) { 6024 dp->stripesize = block_len * softc->unmap_gran; 6025 dp->stripeoffset = (dp->stripesize - block_len * 6026 softc->unmap_gran_align) % dp->stripesize; 6027 } else { 6028 dp->stripesize = 0; 6029 dp->stripeoffset = 0; 6030 } 6031 /* 6032 * Have the controller provide us with a geometry 6033 * for this disk. The only time the geometry 6034 * matters is when we boot and the controller 6035 * is the only one knowledgeable enough to come 6036 * up with something that will make this a bootable 6037 * device. 6038 */ 6039 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 6040 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 6041 ccg.block_size = dp->secsize; 6042 ccg.volume_size = dp->sectors; 6043 ccg.heads = 0; 6044 ccg.secs_per_track = 0; 6045 ccg.cylinders = 0; 6046 xpt_action((union ccb*)&ccg); 6047 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 6048 /* 6049 * We don't know what went wrong here- but just pick 6050 * a geometry so we don't have nasty things like divide 6051 * by zero. 6052 */ 6053 dp->heads = 255; 6054 dp->secs_per_track = 255; 6055 dp->cylinders = dp->sectors / (255 * 255); 6056 if (dp->cylinders == 0) { 6057 dp->cylinders = 1; 6058 } 6059 } else { 6060 dp->heads = ccg.heads; 6061 dp->secs_per_track = ccg.secs_per_track; 6062 dp->cylinders = ccg.cylinders; 6063 } 6064 6065 /* 6066 * If the user supplied a read capacity buffer, and if it is 6067 * different than the previous buffer, update the data in the EDT. 6068 * If it's the same, we don't bother. This avoids sending an 6069 * update every time someone opens this device. 6070 */ 6071 if ((rcaplong != NULL) 6072 && (bcmp(rcaplong, &softc->rcaplong, 6073 min(sizeof(softc->rcaplong), rcap_len)) != 0)) { 6074 struct ccb_dev_advinfo cdai; 6075 6076 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 6077 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 6078 cdai.buftype = CDAI_TYPE_RCAPLONG; 6079 cdai.flags = CDAI_FLAG_STORE; 6080 cdai.bufsiz = rcap_len; 6081 cdai.buf = (uint8_t *)rcaplong; 6082 xpt_action((union ccb *)&cdai); 6083 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 6084 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 6085 if (cdai.ccb_h.status != CAM_REQ_CMP) { 6086 xpt_print(periph->path, "%s: failed to set read " 6087 "capacity advinfo\n", __func__); 6088 /* Use cam_error_print() to decode the status */ 6089 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, 6090 CAM_EPF_ALL); 6091 } else { 6092 bcopy(rcaplong, &softc->rcaplong, 6093 min(sizeof(softc->rcaplong), rcap_len)); 6094 } 6095 } 6096 6097 softc->disk->d_sectorsize = softc->params.secsize; 6098 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; 6099 softc->disk->d_stripesize = softc->params.stripesize; 6100 softc->disk->d_stripeoffset = softc->params.stripeoffset; 6101 /* XXX: these are not actually "firmware" values, so they may be wrong */ 6102 softc->disk->d_fwsectors = softc->params.secs_per_track; 6103 softc->disk->d_fwheads = softc->params.heads; 6104 softc->disk->d_devstat->block_size = softc->params.secsize; 6105 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; 6106 6107 error = disk_resize(softc->disk, M_NOWAIT); 6108 if (error != 0) 6109 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); 6110 } 6111 6112 static void 6113 dasendorderedtag(void *arg) 6114 { 6115 struct cam_periph *periph = arg; 6116 struct da_softc *softc = periph->softc; 6117 6118 cam_periph_assert(periph, MA_OWNED); 6119 if (da_send_ordered) { 6120 if (!LIST_EMPTY(&softc->pending_ccbs)) { 6121 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) 6122 softc->flags |= DA_FLAG_NEED_OTAG; 6123 softc->flags &= ~DA_FLAG_WAS_OTAG; 6124 } 6125 } 6126 6127 /* Queue us up again */ 6128 callout_reset(&softc->sendordered_c, 6129 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, 6130 dasendorderedtag, periph); 6131 } 6132 6133 /* 6134 * Step through all DA peripheral drivers, and if the device is still open, 6135 * sync the disk cache to physical media. 6136 */ 6137 static void 6138 dashutdown(void * arg, int howto) 6139 { 6140 struct cam_periph *periph; 6141 struct da_softc *softc; 6142 union ccb *ccb; 6143 int error; 6144 6145 CAM_PERIPH_FOREACH(periph, &dadriver) { 6146 softc = (struct da_softc *)periph->softc; 6147 if (SCHEDULER_STOPPED()) { 6148 /* If we paniced with the lock held, do not recurse. */ 6149 if (!cam_periph_owned(periph) && 6150 (softc->flags & DA_FLAG_OPEN)) { 6151 dadump(softc->disk, NULL, 0, 0, 0); 6152 } 6153 continue; 6154 } 6155 cam_periph_lock(periph); 6156 6157 /* 6158 * We only sync the cache if the drive is still open, and 6159 * if the drive is capable of it.. 6160 */ 6161 if (((softc->flags & DA_FLAG_OPEN) == 0) 6162 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { 6163 cam_periph_unlock(periph); 6164 continue; 6165 } 6166 6167 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 6168 scsi_synchronize_cache(&ccb->csio, 6169 /*retries*/0, 6170 /*cbfcnp*/NULL, 6171 MSG_SIMPLE_Q_TAG, 6172 /*begin_lba*/0, /* whole disk */ 6173 /*lb_count*/0, 6174 SSD_FULL_SIZE, 6175 60 * 60 * 1000); 6176 6177 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 6178 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, 6179 softc->disk->d_devstat); 6180 if (error != 0) 6181 xpt_print(periph->path, "Synchronize cache failed\n"); 6182 xpt_release_ccb(ccb); 6183 cam_periph_unlock(periph); 6184 } 6185 } 6186 6187 #else /* !_KERNEL */ 6188 6189 /* 6190 * XXX These are only left out of the kernel build to silence warnings. If, 6191 * for some reason these functions are used in the kernel, the ifdefs should 6192 * be moved so they are included both in the kernel and userland. 6193 */ 6194 void 6195 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 6196 void (*cbfcnp)(struct cam_periph *, union ccb *), 6197 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 6198 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 6199 u_int32_t timeout) 6200 { 6201 struct scsi_format_unit *scsi_cmd; 6202 6203 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 6204 scsi_cmd->opcode = FORMAT_UNIT; 6205 scsi_cmd->byte2 = byte2; 6206 scsi_ulto2b(ileave, scsi_cmd->interleave); 6207 6208 cam_fill_csio(csio, 6209 retries, 6210 cbfcnp, 6211 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6212 tag_action, 6213 data_ptr, 6214 dxfer_len, 6215 sense_len, 6216 sizeof(*scsi_cmd), 6217 timeout); 6218 } 6219 6220 void 6221 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, 6222 void (*cbfcnp)(struct cam_periph *, union ccb *), 6223 uint8_t tag_action, uint8_t list_format, 6224 uint32_t addr_desc_index, uint8_t *data_ptr, 6225 uint32_t dxfer_len, int minimum_cmd_size, 6226 uint8_t sense_len, uint32_t timeout) 6227 { 6228 uint8_t cdb_len; 6229 6230 /* 6231 * These conditions allow using the 10 byte command. Otherwise we 6232 * need to use the 12 byte command. 6233 */ 6234 if ((minimum_cmd_size <= 10) 6235 && (addr_desc_index == 0) 6236 && (dxfer_len <= SRDD10_MAX_LENGTH)) { 6237 struct scsi_read_defect_data_10 *cdb10; 6238 6239 cdb10 = (struct scsi_read_defect_data_10 *) 6240 &csio->cdb_io.cdb_bytes; 6241 6242 cdb_len = sizeof(*cdb10); 6243 bzero(cdb10, cdb_len); 6244 cdb10->opcode = READ_DEFECT_DATA_10; 6245 cdb10->format = list_format; 6246 scsi_ulto2b(dxfer_len, cdb10->alloc_length); 6247 } else { 6248 struct scsi_read_defect_data_12 *cdb12; 6249 6250 cdb12 = (struct scsi_read_defect_data_12 *) 6251 &csio->cdb_io.cdb_bytes; 6252 6253 cdb_len = sizeof(*cdb12); 6254 bzero(cdb12, cdb_len); 6255 cdb12->opcode = READ_DEFECT_DATA_12; 6256 cdb12->format = list_format; 6257 scsi_ulto4b(dxfer_len, cdb12->alloc_length); 6258 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); 6259 } 6260 6261 cam_fill_csio(csio, 6262 retries, 6263 cbfcnp, 6264 /*flags*/ CAM_DIR_IN, 6265 tag_action, 6266 data_ptr, 6267 dxfer_len, 6268 sense_len, 6269 cdb_len, 6270 timeout); 6271 } 6272 6273 void 6274 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, 6275 void (*cbfcnp)(struct cam_periph *, union ccb *), 6276 u_int8_t tag_action, u_int8_t byte2, u_int16_t control, 6277 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 6278 u_int32_t timeout) 6279 { 6280 struct scsi_sanitize *scsi_cmd; 6281 6282 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; 6283 scsi_cmd->opcode = SANITIZE; 6284 scsi_cmd->byte2 = byte2; 6285 scsi_cmd->control = control; 6286 scsi_ulto2b(dxfer_len, scsi_cmd->length); 6287 6288 cam_fill_csio(csio, 6289 retries, 6290 cbfcnp, 6291 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6292 tag_action, 6293 data_ptr, 6294 dxfer_len, 6295 sense_len, 6296 sizeof(*scsi_cmd), 6297 timeout); 6298 } 6299 6300 #endif /* _KERNEL */ 6301 6302 void 6303 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, 6304 void (*cbfcnp)(struct cam_periph *, union ccb *), 6305 uint8_t tag_action, uint8_t service_action, uint64_t zone_id, 6306 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, 6307 uint8_t sense_len, uint32_t timeout) 6308 { 6309 struct scsi_zbc_out *scsi_cmd; 6310 6311 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; 6312 scsi_cmd->opcode = ZBC_OUT; 6313 scsi_cmd->service_action = service_action; 6314 scsi_u64to8b(zone_id, scsi_cmd->zone_id); 6315 scsi_cmd->zone_flags = zone_flags; 6316 6317 cam_fill_csio(csio, 6318 retries, 6319 cbfcnp, 6320 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6321 tag_action, 6322 data_ptr, 6323 dxfer_len, 6324 sense_len, 6325 sizeof(*scsi_cmd), 6326 timeout); 6327 } 6328 6329 void 6330 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, 6331 void (*cbfcnp)(struct cam_periph *, union ccb *), 6332 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, 6333 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, 6334 uint8_t sense_len, uint32_t timeout) 6335 { 6336 struct scsi_zbc_in *scsi_cmd; 6337 6338 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; 6339 scsi_cmd->opcode = ZBC_IN; 6340 scsi_cmd->service_action = service_action; 6341 scsi_ulto4b(dxfer_len, scsi_cmd->length); 6342 scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); 6343 scsi_cmd->zone_options = zone_options; 6344 6345 cam_fill_csio(csio, 6346 retries, 6347 cbfcnp, 6348 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, 6349 tag_action, 6350 data_ptr, 6351 dxfer_len, 6352 sense_len, 6353 sizeof(*scsi_cmd), 6354 timeout); 6355 6356 } 6357 6358 int 6359 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, 6360 void (*cbfcnp)(struct cam_periph *, union ccb *), 6361 uint8_t tag_action, int use_ncq, 6362 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 6363 uint8_t *data_ptr, uint32_t dxfer_len, 6364 uint8_t *cdb_storage, size_t cdb_storage_len, 6365 uint8_t sense_len, uint32_t timeout) 6366 { 6367 uint8_t command_out, protocol, ata_flags; 6368 uint16_t features_out; 6369 uint32_t sectors_out, auxiliary; 6370 int retval; 6371 6372 retval = 0; 6373 6374 if (use_ncq == 0) { 6375 command_out = ATA_ZAC_MANAGEMENT_OUT; 6376 features_out = (zm_action & 0xf) | (zone_flags << 8); 6377 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 6378 if (dxfer_len == 0) { 6379 protocol = AP_PROTO_NON_DATA; 6380 ata_flags |= AP_FLAG_TLEN_NO_DATA; 6381 sectors_out = 0; 6382 } else { 6383 protocol = AP_PROTO_DMA; 6384 ata_flags |= AP_FLAG_TLEN_SECT_CNT | 6385 AP_FLAG_TDIR_TO_DEV; 6386 sectors_out = ((dxfer_len >> 9) & 0xffff); 6387 } 6388 auxiliary = 0; 6389 } else { 6390 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; 6391 if (dxfer_len == 0) { 6392 command_out = ATA_NCQ_NON_DATA; 6393 features_out = ATA_NCQ_ZAC_MGMT_OUT; 6394 /* 6395 * We're assuming the SCSI to ATA translation layer 6396 * will set the NCQ tag number in the tag field. 6397 * That isn't clear from the SAT-4 spec (as of rev 05). 6398 */ 6399 sectors_out = 0; 6400 ata_flags |= AP_FLAG_TLEN_NO_DATA; 6401 } else { 6402 command_out = ATA_SEND_FPDMA_QUEUED; 6403 /* 6404 * Note that we're defaulting to normal priority, 6405 * and assuming that the SCSI to ATA translation 6406 * layer will insert the NCQ tag number in the tag 6407 * field. That isn't clear in the SAT-4 spec (as 6408 * of rev 05). 6409 */ 6410 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; 6411 6412 ata_flags |= AP_FLAG_TLEN_FEAT | 6413 AP_FLAG_TDIR_TO_DEV; 6414 6415 /* 6416 * For SEND FPDMA QUEUED, the transfer length is 6417 * encoded in the FEATURE register, and 0 means 6418 * that 65536 512 byte blocks are to be tranferred. 6419 * In practice, it seems unlikely that we'll see 6420 * a transfer that large, and it may confuse the 6421 * the SAT layer, because generally that means that 6422 * 0 bytes should be transferred. 6423 */ 6424 if (dxfer_len == (65536 * 512)) { 6425 features_out = 0; 6426 } else if (dxfer_len <= (65535 * 512)) { 6427 features_out = ((dxfer_len >> 9) & 0xffff); 6428 } else { 6429 /* The transfer is too big. */ 6430 retval = 1; 6431 goto bailout; 6432 } 6433 6434 } 6435 6436 auxiliary = (zm_action & 0xf) | (zone_flags << 8); 6437 protocol = AP_PROTO_FPDMA; 6438 } 6439 6440 protocol |= AP_EXTEND; 6441 6442 retval = scsi_ata_pass(csio, 6443 retries, 6444 cbfcnp, 6445 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 6446 tag_action, 6447 /*protocol*/ protocol, 6448 /*ata_flags*/ ata_flags, 6449 /*features*/ features_out, 6450 /*sector_count*/ sectors_out, 6451 /*lba*/ zone_id, 6452 /*command*/ command_out, 6453 /*device*/ 0, 6454 /*icc*/ 0, 6455 /*auxiliary*/ auxiliary, 6456 /*control*/ 0, 6457 /*data_ptr*/ data_ptr, 6458 /*dxfer_len*/ dxfer_len, 6459 /*cdb_storage*/ cdb_storage, 6460 /*cdb_storage_len*/ cdb_storage_len, 6461 /*minimum_cmd_size*/ 0, 6462 /*sense_len*/ SSD_FULL_SIZE, 6463 /*timeout*/ timeout); 6464 6465 bailout: 6466 6467 return (retval); 6468 } 6469 6470 int 6471 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, 6472 void (*cbfcnp)(struct cam_periph *, union ccb *), 6473 uint8_t tag_action, int use_ncq, 6474 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, 6475 uint8_t *data_ptr, uint32_t dxfer_len, 6476 uint8_t *cdb_storage, size_t cdb_storage_len, 6477 uint8_t sense_len, uint32_t timeout) 6478 { 6479 uint8_t command_out, protocol; 6480 uint16_t features_out, sectors_out; 6481 uint32_t auxiliary; 6482 int ata_flags; 6483 int retval; 6484 6485 retval = 0; 6486 ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; 6487 6488 if (use_ncq == 0) { 6489 command_out = ATA_ZAC_MANAGEMENT_IN; 6490 /* XXX KDM put a macro here */ 6491 features_out = (zm_action & 0xf) | (zone_flags << 8); 6492 sectors_out = dxfer_len >> 9; /* XXX KDM macro */ 6493 protocol = AP_PROTO_DMA; 6494 ata_flags |= AP_FLAG_TLEN_SECT_CNT; 6495 auxiliary = 0; 6496 } else { 6497 ata_flags |= AP_FLAG_TLEN_FEAT; 6498 6499 command_out = ATA_RECV_FPDMA_QUEUED; 6500 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; 6501 6502 /* 6503 * For RECEIVE FPDMA QUEUED, the transfer length is 6504 * encoded in the FEATURE register, and 0 means 6505 * that 65536 512 byte blocks are to be tranferred. 6506 * In practice, it seems unlikely that we'll see 6507 * a transfer that large, and it may confuse the 6508 * the SAT layer, because generally that means that 6509 * 0 bytes should be transferred. 6510 */ 6511 if (dxfer_len == (65536 * 512)) { 6512 features_out = 0; 6513 } else if (dxfer_len <= (65535 * 512)) { 6514 features_out = ((dxfer_len >> 9) & 0xffff); 6515 } else { 6516 /* The transfer is too big. */ 6517 retval = 1; 6518 goto bailout; 6519 } 6520 auxiliary = (zm_action & 0xf) | (zone_flags << 8), 6521 protocol = AP_PROTO_FPDMA; 6522 } 6523 6524 protocol |= AP_EXTEND; 6525 6526 retval = scsi_ata_pass(csio, 6527 retries, 6528 cbfcnp, 6529 /*flags*/ CAM_DIR_IN, 6530 tag_action, 6531 /*protocol*/ protocol, 6532 /*ata_flags*/ ata_flags, 6533 /*features*/ features_out, 6534 /*sector_count*/ sectors_out, 6535 /*lba*/ zone_id, 6536 /*command*/ command_out, 6537 /*device*/ 0, 6538 /*icc*/ 0, 6539 /*auxiliary*/ auxiliary, 6540 /*control*/ 0, 6541 /*data_ptr*/ data_ptr, 6542 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ 6543 /*cdb_storage*/ cdb_storage, 6544 /*cdb_storage_len*/ cdb_storage_len, 6545 /*minimum_cmd_size*/ 0, 6546 /*sense_len*/ SSD_FULL_SIZE, 6547 /*timeout*/ timeout); 6548 6549 bailout: 6550 return (retval); 6551 } 6552