1 /*- 2 * Data structures and definitions for CAM Control Blocks (CCBs). 3 * 4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 5 * 6 * Copyright (c) 1997, 1998 Justin T. Gibbs. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification, immediately at the beginning of the file. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 #ifndef _CAM_CAM_CCB_H 34 #define _CAM_CAM_CCB_H 1 35 36 #include <sys/queue.h> 37 #include <sys/cdefs.h> 38 #include <sys/time.h> 39 #include <sys/limits.h> 40 #ifndef _KERNEL 41 #include <sys/callout.h> 42 #endif 43 #include <cam/cam_debug.h> 44 #include <cam/scsi/scsi_all.h> 45 #include <cam/ata/ata_all.h> 46 #include <cam/nvme/nvme_all.h> 47 #include <cam/mmc/mmc_all.h> 48 49 /* General allocation length definitions for CCB structures */ 50 #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ 51 #define VUHBALEN 14 /* Vendor Unique HBA length */ 52 #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ 53 #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ 54 #define DEV_IDLEN 16 /* ASCII string len for device names */ 55 #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ 56 #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ 57 58 /* Struct definitions for CAM control blocks */ 59 60 /* Common CCB header */ 61 /* CAM CCB flags */ 62 typedef enum { 63 CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ 64 CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ 65 CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ 66 CAM_NEGOTIATE = 0x00000008,/* 67 * Perform transport negotiation 68 * with this command. 69 */ 70 CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ 71 CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ 72 CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ 73 CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ 74 CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ 75 CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ 76 CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ 77 CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ 78 CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ 79 CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ 80 CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ 81 CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ 82 CAM_DATA_MASK = 0x00240010,/* Data type mask */ 83 CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ 84 CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ 85 CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ 86 CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ 87 CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ 88 CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ 89 CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ 90 CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ 91 CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ 92 CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ 93 CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ 94 CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ 95 CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ 96 CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ 97 98 /* Phase cognizant mode flags */ 99 CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ 100 CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ 101 CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ 102 CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ 103 CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ 104 CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ 105 CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ 106 107 /* Host target Mode flags */ 108 CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ 109 CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ 110 CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ 111 CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ 112 113 CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ 114 } ccb_flags; 115 116 typedef enum { 117 CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ 118 CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ 119 CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ 120 } ccb_xflags; 121 122 /* XPT Opcodes for xpt_action */ 123 typedef enum { 124 /* Function code flags are bits greater than 0xff */ 125 XPT_FC_QUEUED = 0x100, 126 /* Non-immediate function code */ 127 XPT_FC_USER_CCB = 0x200, 128 XPT_FC_XPT_ONLY = 0x400, 129 /* Only for the transport layer device */ 130 XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, 131 /* Passes through the device queues */ 132 /* Common function commands: 0x00->0x0F */ 133 XPT_NOOP = 0x00, 134 /* Execute Nothing */ 135 XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, 136 /* Execute the requested I/O operation */ 137 XPT_GDEV_TYPE = 0x02, 138 /* Get type information for specified device */ 139 XPT_GDEVLIST = 0x03, 140 /* Get a list of peripheral devices */ 141 XPT_PATH_INQ = 0x04, 142 /* Path routing inquiry */ 143 XPT_REL_SIMQ = 0x05, 144 /* Release a frozen device queue */ 145 XPT_SASYNC_CB = 0x06, 146 /* Set Asynchronous Callback Parameters */ 147 XPT_SDEV_TYPE = 0x07, 148 /* Set device type information */ 149 XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB 150 | XPT_FC_XPT_ONLY, 151 /* (Re)Scan the SCSI Bus */ 152 XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, 153 /* Get EDT entries matching the given pattern */ 154 XPT_DEBUG = 0x0a, 155 /* Turn on debugging for a bus, target or lun */ 156 XPT_PATH_STATS = 0x0b, 157 /* Path statistics (error counts, etc.) */ 158 XPT_GDEV_STATS = 0x0c, 159 /* Device statistics (error counts, etc.) */ 160 XPT_DEV_ADVINFO = 0x0e, 161 /* Get/Set Device advanced information */ 162 XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB 163 | XPT_FC_XPT_ONLY, 164 /* Asynchronous event */ 165 /* SCSI Control Functions: 0x10->0x1F */ 166 XPT_ABORT = 0x10, 167 /* Abort the specified CCB */ 168 XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, 169 /* Reset the specified SCSI bus */ 170 XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, 171 /* Bus Device Reset the specified SCSI device */ 172 XPT_TERM_IO = 0x13, 173 /* Terminate the I/O process */ 174 XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB 175 | XPT_FC_XPT_ONLY, 176 /* Scan Logical Unit */ 177 XPT_GET_TRAN_SETTINGS = 0x15, 178 /* 179 * Get default/user transfer settings 180 * for the target 181 */ 182 XPT_SET_TRAN_SETTINGS = 0x16, 183 /* 184 * Set transfer rate/width 185 * negotiation settings 186 */ 187 XPT_CALC_GEOMETRY = 0x17, 188 /* 189 * Calculate the geometry parameters for 190 * a device give the sector size and 191 * volume size. 192 */ 193 XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, 194 /* Execute the requested ATA I/O operation */ 195 196 XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ 197 198 XPT_SET_SIM_KNOB = 0x19, 199 /* 200 * Set SIM specific knob values. 201 */ 202 203 XPT_GET_SIM_KNOB = 0x1a, 204 /* 205 * Get SIM specific knob values. 206 */ 207 208 XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, 209 /* Serial Management Protocol */ 210 211 XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED, 212 /* Execute the requested NVMe I/O operation */ 213 214 XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED, 215 /* Placeholder for MMC / SD / SDIO I/O stuff */ 216 217 XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB 218 | XPT_FC_XPT_ONLY, 219 /* Scan Target */ 220 221 XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED, 222 /* Execute the requested NVMe Admin operation */ 223 224 /* HBA engine commands 0x20->0x2F */ 225 XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, 226 /* HBA engine feature inquiry */ 227 XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, 228 /* HBA execute engine request */ 229 230 /* Target mode commands: 0x30->0x3F */ 231 XPT_EN_LUN = 0x30, 232 /* Enable LUN as a target */ 233 XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, 234 /* Execute target I/O request */ 235 XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, 236 /* Accept Host Target Mode CDB */ 237 XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, 238 /* Continue Host Target I/O Connection */ 239 XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, 240 /* Notify Host Target driver of event (obsolete) */ 241 XPT_NOTIFY_ACK = 0x35, 242 /* Acknowledgement of event (obsolete) */ 243 XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, 244 /* Notify Host Target driver of event */ 245 XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, 246 /* Acknowledgement of event */ 247 XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, 248 /* Query device capacity and notify GEOM */ 249 250 /* Vendor Unique codes: 0x80->0x8F */ 251 XPT_VUNIQUE = 0x80 252 } xpt_opcode; 253 254 #define XPT_FC_GROUP_MASK 0xF0 255 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) 256 #define XPT_FC_GROUP_COMMON 0x00 257 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 258 #define XPT_FC_GROUP_HBA_ENGINE 0x20 259 #define XPT_FC_GROUP_TMODE 0x30 260 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 261 262 #define XPT_FC_IS_DEV_QUEUED(ccb) \ 263 (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) 264 #define XPT_FC_IS_QUEUED(ccb) \ 265 (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) 266 267 typedef enum { 268 PROTO_UNKNOWN, 269 PROTO_UNSPECIFIED, 270 PROTO_SCSI, /* Small Computer System Interface */ 271 PROTO_ATA, /* AT Attachment */ 272 PROTO_ATAPI, /* AT Attachment Packetized Interface */ 273 PROTO_SATAPM, /* SATA Port Multiplier */ 274 PROTO_SEMB, /* SATA Enclosure Management Bridge */ 275 PROTO_NVME, /* NVME */ 276 PROTO_MMCSD, /* MMC, SD, SDIO */ 277 } cam_proto; 278 279 typedef enum { 280 XPORT_UNKNOWN, 281 XPORT_UNSPECIFIED, 282 XPORT_SPI, /* SCSI Parallel Interface */ 283 XPORT_FC, /* Fiber Channel */ 284 XPORT_SSA, /* Serial Storage Architecture */ 285 XPORT_USB, /* Universal Serial Bus */ 286 XPORT_PPB, /* Parallel Port Bus */ 287 XPORT_ATA, /* AT Attachment */ 288 XPORT_SAS, /* Serial Attached SCSI */ 289 XPORT_SATA, /* Serial AT Attachment */ 290 XPORT_ISCSI, /* iSCSI */ 291 XPORT_SRP, /* SCSI RDMA Protocol */ 292 XPORT_NVME, /* NVMe over PCIe */ 293 XPORT_MMCSD, /* MMC, SD, SDIO card */ 294 } cam_xport; 295 296 #define XPORT_IS_NVME(t) ((t) == XPORT_NVME) 297 #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) 298 #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ 299 (t) != XPORT_UNSPECIFIED && \ 300 !XPORT_IS_ATA(t) && !XPORT_IS_NVME(t)) 301 #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ 302 XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ 303 DEVSTAT_TYPE_IF_OTHER) 304 305 #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) 306 #define PROTO_VERSION_UNSPECIFIED UINT_MAX 307 #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) 308 #define XPORT_VERSION_UNSPECIFIED UINT_MAX 309 310 typedef union { 311 LIST_ENTRY(ccb_hdr) le; 312 SLIST_ENTRY(ccb_hdr) sle; 313 TAILQ_ENTRY(ccb_hdr) tqe; 314 STAILQ_ENTRY(ccb_hdr) stqe; 315 } camq_entry; 316 317 typedef union { 318 void *ptr; 319 u_long field; 320 u_int8_t bytes[sizeof(uintptr_t)]; 321 } ccb_priv_entry; 322 323 typedef union { 324 ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; 325 u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; 326 } ccb_ppriv_area; 327 328 typedef union { 329 ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; 330 u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; 331 } ccb_spriv_area; 332 333 typedef struct { 334 struct timeval *etime; 335 uintptr_t sim_data; 336 uintptr_t periph_data; 337 } ccb_qos_area; 338 339 struct ccb_hdr { 340 cam_pinfo pinfo; /* Info for priority scheduling */ 341 camq_entry xpt_links; /* For chaining in the XPT layer */ 342 camq_entry sim_links; /* For chaining in the SIM layer */ 343 camq_entry periph_links; /* For chaining in the type driver */ 344 u_int32_t retry_count; 345 void (*cbfcnp)(struct cam_periph *, union ccb *); 346 /* Callback on completion function */ 347 xpt_opcode func_code; /* XPT function code */ 348 u_int32_t status; /* Status returned by CAM subsystem */ 349 struct cam_path *path; /* Compiled path for this ccb */ 350 path_id_t path_id; /* Path ID for the request */ 351 target_id_t target_id; /* Target device ID */ 352 lun_id_t target_lun; /* Target LUN number */ 353 u_int32_t flags; /* ccb_flags */ 354 u_int32_t xflags; /* Extended flags */ 355 ccb_ppriv_area periph_priv; 356 ccb_spriv_area sim_priv; 357 ccb_qos_area qos; 358 u_int32_t timeout; /* Hard timeout value in mseconds */ 359 struct timeval softtimeout; /* Soft timeout value in sec + usec */ 360 }; 361 362 /* Get Device Information CCB */ 363 struct ccb_getdev { 364 struct ccb_hdr ccb_h; 365 cam_proto protocol; 366 struct scsi_inquiry_data inq_data; 367 struct ata_params ident_data; 368 u_int8_t serial_num[252]; 369 u_int8_t inq_flags; 370 u_int8_t serial_num_len; 371 void *padding[2]; 372 }; 373 374 /* Device Statistics CCB */ 375 struct ccb_getdevstats { 376 struct ccb_hdr ccb_h; 377 int dev_openings; /* Space left for more work on device*/ 378 int dev_active; /* Transactions running on the device */ 379 int allocated; /* CCBs allocated for the device */ 380 int queued; /* CCBs queued to be sent to the device */ 381 int held; /* 382 * CCBs held by peripheral drivers 383 * for this device 384 */ 385 int maxtags; /* 386 * Boundary conditions for number of 387 * tagged operations 388 */ 389 int mintags; 390 struct timeval last_reset; /* Time of last bus reset/loop init */ 391 }; 392 393 typedef enum { 394 CAM_GDEVLIST_LAST_DEVICE, 395 CAM_GDEVLIST_LIST_CHANGED, 396 CAM_GDEVLIST_MORE_DEVS, 397 CAM_GDEVLIST_ERROR 398 } ccb_getdevlist_status_e; 399 400 struct ccb_getdevlist { 401 struct ccb_hdr ccb_h; 402 char periph_name[DEV_IDLEN]; 403 u_int32_t unit_number; 404 unsigned int generation; 405 u_int32_t index; 406 ccb_getdevlist_status_e status; 407 }; 408 409 typedef enum { 410 PERIPH_MATCH_NONE = 0x000, 411 PERIPH_MATCH_PATH = 0x001, 412 PERIPH_MATCH_TARGET = 0x002, 413 PERIPH_MATCH_LUN = 0x004, 414 PERIPH_MATCH_NAME = 0x008, 415 PERIPH_MATCH_UNIT = 0x010, 416 PERIPH_MATCH_ANY = 0x01f 417 } periph_pattern_flags; 418 419 struct periph_match_pattern { 420 char periph_name[DEV_IDLEN]; 421 u_int32_t unit_number; 422 path_id_t path_id; 423 target_id_t target_id; 424 lun_id_t target_lun; 425 periph_pattern_flags flags; 426 }; 427 428 typedef enum { 429 DEV_MATCH_NONE = 0x000, 430 DEV_MATCH_PATH = 0x001, 431 DEV_MATCH_TARGET = 0x002, 432 DEV_MATCH_LUN = 0x004, 433 DEV_MATCH_INQUIRY = 0x008, 434 DEV_MATCH_DEVID = 0x010, 435 DEV_MATCH_ANY = 0x00f 436 } dev_pattern_flags; 437 438 struct device_id_match_pattern { 439 uint8_t id_len; 440 uint8_t id[256]; 441 }; 442 443 struct device_match_pattern { 444 path_id_t path_id; 445 target_id_t target_id; 446 lun_id_t target_lun; 447 dev_pattern_flags flags; 448 union { 449 struct scsi_static_inquiry_pattern inq_pat; 450 struct device_id_match_pattern devid_pat; 451 } data; 452 }; 453 454 typedef enum { 455 BUS_MATCH_NONE = 0x000, 456 BUS_MATCH_PATH = 0x001, 457 BUS_MATCH_NAME = 0x002, 458 BUS_MATCH_UNIT = 0x004, 459 BUS_MATCH_BUS_ID = 0x008, 460 BUS_MATCH_ANY = 0x00f 461 } bus_pattern_flags; 462 463 struct bus_match_pattern { 464 path_id_t path_id; 465 char dev_name[DEV_IDLEN]; 466 u_int32_t unit_number; 467 u_int32_t bus_id; 468 bus_pattern_flags flags; 469 }; 470 471 union match_pattern { 472 struct periph_match_pattern periph_pattern; 473 struct device_match_pattern device_pattern; 474 struct bus_match_pattern bus_pattern; 475 }; 476 477 typedef enum { 478 DEV_MATCH_PERIPH, 479 DEV_MATCH_DEVICE, 480 DEV_MATCH_BUS 481 } dev_match_type; 482 483 struct dev_match_pattern { 484 dev_match_type type; 485 union match_pattern pattern; 486 }; 487 488 struct periph_match_result { 489 char periph_name[DEV_IDLEN]; 490 u_int32_t unit_number; 491 path_id_t path_id; 492 target_id_t target_id; 493 lun_id_t target_lun; 494 }; 495 496 typedef enum { 497 DEV_RESULT_NOFLAG = 0x00, 498 DEV_RESULT_UNCONFIGURED = 0x01 499 } dev_result_flags; 500 501 struct device_match_result { 502 path_id_t path_id; 503 target_id_t target_id; 504 lun_id_t target_lun; 505 cam_proto protocol; 506 struct scsi_inquiry_data inq_data; 507 struct ata_params ident_data; 508 dev_result_flags flags; 509 }; 510 511 struct bus_match_result { 512 path_id_t path_id; 513 char dev_name[DEV_IDLEN]; 514 u_int32_t unit_number; 515 u_int32_t bus_id; 516 }; 517 518 union match_result { 519 struct periph_match_result periph_result; 520 struct device_match_result device_result; 521 struct bus_match_result bus_result; 522 }; 523 524 struct dev_match_result { 525 dev_match_type type; 526 union match_result result; 527 }; 528 529 typedef enum { 530 CAM_DEV_MATCH_LAST, 531 CAM_DEV_MATCH_MORE, 532 CAM_DEV_MATCH_LIST_CHANGED, 533 CAM_DEV_MATCH_SIZE_ERROR, 534 CAM_DEV_MATCH_ERROR 535 } ccb_dev_match_status; 536 537 typedef enum { 538 CAM_DEV_POS_NONE = 0x000, 539 CAM_DEV_POS_BUS = 0x001, 540 CAM_DEV_POS_TARGET = 0x002, 541 CAM_DEV_POS_DEVICE = 0x004, 542 CAM_DEV_POS_PERIPH = 0x008, 543 CAM_DEV_POS_PDPTR = 0x010, 544 CAM_DEV_POS_TYPEMASK = 0xf00, 545 CAM_DEV_POS_EDT = 0x100, 546 CAM_DEV_POS_PDRV = 0x200 547 } dev_pos_type; 548 549 struct ccb_dm_cookie { 550 void *bus; 551 void *target; 552 void *device; 553 void *periph; 554 void *pdrv; 555 }; 556 557 struct ccb_dev_position { 558 u_int generations[4]; 559 #define CAM_BUS_GENERATION 0x00 560 #define CAM_TARGET_GENERATION 0x01 561 #define CAM_DEV_GENERATION 0x02 562 #define CAM_PERIPH_GENERATION 0x03 563 dev_pos_type position_type; 564 struct ccb_dm_cookie cookie; 565 }; 566 567 struct ccb_dev_match { 568 struct ccb_hdr ccb_h; 569 ccb_dev_match_status status; 570 u_int32_t num_patterns; 571 u_int32_t pattern_buf_len; 572 struct dev_match_pattern *patterns; 573 u_int32_t num_matches; 574 u_int32_t match_buf_len; 575 struct dev_match_result *matches; 576 struct ccb_dev_position pos; 577 }; 578 579 /* 580 * Definitions for the path inquiry CCB fields. 581 */ 582 #define CAM_VERSION 0x19 /* Hex value for current version */ 583 584 typedef enum { 585 PI_MDP_ABLE = 0x80, /* Supports MDP message */ 586 PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ 587 PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ 588 PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ 589 PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ 590 PI_SATAPM = 0x04, /* Supports SATA PM */ 591 PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ 592 PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ 593 } pi_inqflag; 594 595 typedef enum { 596 PIT_PROCESSOR = 0x80, /* Target mode processor mode */ 597 PIT_PHASE = 0x40, /* Target mode phase cog. mode */ 598 PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ 599 PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ 600 PIT_GRP_6 = 0x08, /* Group 6 commands supported */ 601 PIT_GRP_7 = 0x04 /* Group 7 commands supported */ 602 } pi_tmflag; 603 604 typedef enum { 605 PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ 606 PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ 607 PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ 608 PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ 609 PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ 610 PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ 611 PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ 612 PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ 613 PIM_UNMAPPED = 0x02, 614 PIM_NOSCAN = 0x01 /* SIM does its own scanning */ 615 } pi_miscflag; 616 617 /* Path Inquiry CCB */ 618 struct ccb_pathinq_settings_spi { 619 u_int8_t ppr_options; 620 }; 621 622 struct ccb_pathinq_settings_fc { 623 u_int64_t wwnn; /* world wide node name */ 624 u_int64_t wwpn; /* world wide port name */ 625 u_int32_t port; /* 24 bit port id, if known */ 626 u_int32_t bitrate; /* Mbps */ 627 }; 628 629 struct ccb_pathinq_settings_sas { 630 u_int32_t bitrate; /* Mbps */ 631 }; 632 633 struct ccb_pathinq_settings_nvme { 634 uint32_t nsid; /* Namespace ID for this path */ 635 uint32_t domain; 636 uint8_t bus; 637 uint8_t slot; 638 uint8_t function; 639 uint8_t extra; 640 }; 641 642 #define PATHINQ_SETTINGS_SIZE 128 643 644 struct ccb_pathinq { 645 struct ccb_hdr ccb_h; 646 u_int8_t version_num; /* Version number for the SIM/HBA */ 647 u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ 648 u_int16_t target_sprt; /* Flags for target mode support */ 649 u_int32_t hba_misc; /* Misc HBA features */ 650 u_int16_t hba_eng_cnt; /* HBA engine count */ 651 /* Vendor Unique capabilities */ 652 u_int8_t vuhba_flags[VUHBALEN]; 653 u_int32_t max_target; /* Maximum supported Target */ 654 u_int32_t max_lun; /* Maximum supported Lun */ 655 u_int32_t async_flags; /* Installed Async handlers */ 656 path_id_t hpath_id; /* Highest Path ID in the subsystem */ 657 target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ 658 char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ 659 char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ 660 char dev_name[DEV_IDLEN];/* Device name for SIM */ 661 u_int32_t unit_number; /* Unit number for SIM */ 662 u_int32_t bus_id; /* Bus ID for SIM */ 663 u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ 664 cam_proto protocol; 665 u_int protocol_version; 666 cam_xport transport; 667 u_int transport_version; 668 union { 669 struct ccb_pathinq_settings_spi spi; 670 struct ccb_pathinq_settings_fc fc; 671 struct ccb_pathinq_settings_sas sas; 672 struct ccb_pathinq_settings_nvme nvme; 673 char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; 674 } xport_specific; 675 u_int maxio; /* Max supported I/O size, in bytes. */ 676 u_int16_t hba_vendor; /* HBA vendor ID */ 677 u_int16_t hba_device; /* HBA device ID */ 678 u_int16_t hba_subvendor; /* HBA subvendor ID */ 679 u_int16_t hba_subdevice; /* HBA subdevice ID */ 680 }; 681 682 /* Path Statistics CCB */ 683 struct ccb_pathstats { 684 struct ccb_hdr ccb_h; 685 struct timeval last_reset; /* Time of last bus reset/loop init */ 686 }; 687 688 typedef enum { 689 SMP_FLAG_NONE = 0x00, 690 SMP_FLAG_REQ_SG = 0x01, 691 SMP_FLAG_RSP_SG = 0x02 692 } ccb_smp_pass_flags; 693 694 /* 695 * Serial Management Protocol CCB 696 * XXX Currently the semantics for this CCB are that it is executed either 697 * by the addressed device, or that device's parent (i.e. an expander for 698 * any device on an expander) if the addressed device doesn't support SMP. 699 * Later, once we have the ability to probe SMP-only devices and put them 700 * in CAM's topology, the CCB will only be executed by the addressed device 701 * if possible. 702 */ 703 struct ccb_smpio { 704 struct ccb_hdr ccb_h; 705 uint8_t *smp_request; 706 int smp_request_len; 707 uint16_t smp_request_sglist_cnt; 708 uint8_t *smp_response; 709 int smp_response_len; 710 uint16_t smp_response_sglist_cnt; 711 ccb_smp_pass_flags flags; 712 }; 713 714 typedef union { 715 u_int8_t *sense_ptr; /* 716 * Pointer to storage 717 * for sense information 718 */ 719 /* Storage Area for sense information */ 720 struct scsi_sense_data sense_buf; 721 } sense_t; 722 723 typedef union { 724 u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ 725 /* Area for the CDB send */ 726 u_int8_t cdb_bytes[IOCDBLEN]; 727 } cdb_t; 728 729 /* 730 * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO 731 * function codes. 732 */ 733 struct ccb_scsiio { 734 struct ccb_hdr ccb_h; 735 union ccb *next_ccb; /* Ptr for next CCB for action */ 736 u_int8_t *req_map; /* Ptr to mapping info */ 737 u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ 738 u_int32_t dxfer_len; /* Data transfer length */ 739 /* Autosense storage */ 740 struct scsi_sense_data sense_data; 741 u_int8_t sense_len; /* Number of bytes to autosense */ 742 u_int8_t cdb_len; /* Number of bytes for the CDB */ 743 u_int16_t sglist_cnt; /* Number of SG list entries */ 744 u_int8_t scsi_status; /* Returned SCSI status */ 745 u_int8_t sense_resid; /* Autosense resid length: 2's comp */ 746 u_int32_t resid; /* Transfer residual length: 2's comp */ 747 cdb_t cdb_io; /* Union for CDB bytes/pointer */ 748 u_int8_t *msg_ptr; /* Pointer to the message buffer */ 749 u_int16_t msg_len; /* Number of bytes for the Message */ 750 u_int8_t tag_action; /* What to do for tag queueing */ 751 /* 752 * The tag action should be either the define below (to send a 753 * non-tagged transaction) or one of the defined scsi tag messages 754 * from scsi_message.h. 755 */ 756 #define CAM_TAG_ACTION_NONE 0x00 757 u_int tag_id; /* tag id from initator (target mode) */ 758 u_int init_id; /* initiator id of who selected */ 759 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 760 struct bio *bio; /* Associated bio */ 761 #endif 762 }; 763 764 static __inline uint8_t * 765 scsiio_cdb_ptr(struct ccb_scsiio *ccb) 766 { 767 return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 768 ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); 769 } 770 771 /* 772 * ATA I/O Request CCB used for the XPT_ATA_IO function code. 773 */ 774 struct ccb_ataio { 775 struct ccb_hdr ccb_h; 776 union ccb *next_ccb; /* Ptr for next CCB for action */ 777 struct ata_cmd cmd; /* ATA command register set */ 778 struct ata_res res; /* ATA result register set */ 779 u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ 780 u_int32_t dxfer_len; /* Data transfer length */ 781 u_int32_t resid; /* Transfer residual length: 2's comp */ 782 u_int8_t ata_flags; /* Flags for the rest of the buffer */ 783 #define ATA_FLAG_AUX 0x1 784 uint32_t aux; 785 uint32_t unused; 786 }; 787 788 /* 789 * MMC I/O Request CCB used for the XPT_MMC_IO function code. 790 */ 791 struct ccb_mmcio { 792 struct ccb_hdr ccb_h; 793 union ccb *next_ccb; /* Ptr for next CCB for action */ 794 struct mmc_command cmd; 795 struct mmc_command stop; 796 }; 797 798 struct ccb_accept_tio { 799 struct ccb_hdr ccb_h; 800 cdb_t cdb_io; /* Union for CDB bytes/pointer */ 801 u_int8_t cdb_len; /* Number of bytes for the CDB */ 802 u_int8_t tag_action; /* What to do for tag queueing */ 803 u_int8_t sense_len; /* Number of bytes of Sense Data */ 804 u_int tag_id; /* tag id from initator (target mode) */ 805 u_int init_id; /* initiator id of who selected */ 806 struct scsi_sense_data sense_data; 807 }; 808 809 static __inline uint8_t * 810 atio_cdb_ptr(struct ccb_accept_tio *ccb) 811 { 812 return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 813 ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); 814 } 815 816 /* Release SIM Queue */ 817 struct ccb_relsim { 818 struct ccb_hdr ccb_h; 819 u_int32_t release_flags; 820 #define RELSIM_ADJUST_OPENINGS 0x01 821 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 822 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 823 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 824 u_int32_t openings; 825 u_int32_t release_timeout; /* Abstract argument. */ 826 u_int32_t qfrozen_cnt; 827 }; 828 829 /* 830 * NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes. 831 */ 832 struct ccb_nvmeio { 833 struct ccb_hdr ccb_h; 834 union ccb *next_ccb; /* Ptr for next CCB for action */ 835 struct nvme_command cmd; /* NVME command, per NVME standard */ 836 struct nvme_completion cpl; /* NVME completion, per NVME standard */ 837 uint8_t *data_ptr; /* Ptr to the data buf/SG list */ 838 uint32_t dxfer_len; /* Data transfer length */ 839 uint16_t sglist_cnt; /* Number of SG list entries */ 840 uint16_t unused; /* padding for removed uint32_t */ 841 }; 842 843 /* 844 * Definitions for the asynchronous callback CCB fields. 845 */ 846 typedef enum { 847 AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ 848 AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ 849 AC_CONTRACT = 0x1000,/* A contractual callback */ 850 AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ 851 AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ 852 AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ 853 AC_LOST_DEVICE = 0x100,/* A device went away */ 854 AC_FOUND_DEVICE = 0x080,/* A new device was found */ 855 AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ 856 AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ 857 AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ 858 AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ 859 AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ 860 AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ 861 } ac_code; 862 863 typedef void ac_callback_t (void *softc, u_int32_t code, 864 struct cam_path *path, void *args); 865 866 /* 867 * Generic Asynchronous callbacks. 868 * 869 * Generic arguments passed bac which are then interpreted between a per-system 870 * contract number. 871 */ 872 #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) 873 struct ac_contract { 874 u_int64_t contract_number; 875 u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; 876 }; 877 878 #define AC_CONTRACT_DEV_CHG 1 879 struct ac_device_changed { 880 u_int64_t wwpn; 881 u_int32_t port; 882 target_id_t target; 883 u_int8_t arrived; 884 }; 885 886 /* Set Asynchronous Callback CCB */ 887 struct ccb_setasync { 888 struct ccb_hdr ccb_h; 889 u_int32_t event_enable; /* Async Event enables */ 890 ac_callback_t *callback; 891 void *callback_arg; 892 }; 893 894 /* Set Device Type CCB */ 895 struct ccb_setdev { 896 struct ccb_hdr ccb_h; 897 u_int8_t dev_type; /* Value for dev type field in EDT */ 898 }; 899 900 /* SCSI Control Functions */ 901 902 /* Abort XPT request CCB */ 903 struct ccb_abort { 904 struct ccb_hdr ccb_h; 905 union ccb *abort_ccb; /* Pointer to CCB to abort */ 906 }; 907 908 /* Reset SCSI Bus CCB */ 909 struct ccb_resetbus { 910 struct ccb_hdr ccb_h; 911 }; 912 913 /* Reset SCSI Device CCB */ 914 struct ccb_resetdev { 915 struct ccb_hdr ccb_h; 916 }; 917 918 /* Terminate I/O Process Request CCB */ 919 struct ccb_termio { 920 struct ccb_hdr ccb_h; 921 union ccb *termio_ccb; /* Pointer to CCB to terminate */ 922 }; 923 924 typedef enum { 925 CTS_TYPE_CURRENT_SETTINGS, 926 CTS_TYPE_USER_SETTINGS 927 } cts_type; 928 929 struct ccb_trans_settings_scsi 930 { 931 u_int valid; /* Which fields to honor */ 932 #define CTS_SCSI_VALID_TQ 0x01 933 u_int flags; 934 #define CTS_SCSI_FLAGS_TAG_ENB 0x01 935 }; 936 937 struct ccb_trans_settings_ata 938 { 939 u_int valid; /* Which fields to honor */ 940 #define CTS_ATA_VALID_TQ 0x01 941 u_int flags; 942 #define CTS_ATA_FLAGS_TAG_ENB 0x01 943 }; 944 945 struct ccb_trans_settings_spi 946 { 947 u_int valid; /* Which fields to honor */ 948 #define CTS_SPI_VALID_SYNC_RATE 0x01 949 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 950 #define CTS_SPI_VALID_BUS_WIDTH 0x04 951 #define CTS_SPI_VALID_DISC 0x08 952 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 953 u_int flags; 954 #define CTS_SPI_FLAGS_DISC_ENB 0x01 955 u_int sync_period; 956 u_int sync_offset; 957 u_int bus_width; 958 u_int ppr_options; 959 }; 960 961 struct ccb_trans_settings_fc { 962 u_int valid; /* Which fields to honor */ 963 #define CTS_FC_VALID_WWNN 0x8000 964 #define CTS_FC_VALID_WWPN 0x4000 965 #define CTS_FC_VALID_PORT 0x2000 966 #define CTS_FC_VALID_SPEED 0x1000 967 u_int64_t wwnn; /* world wide node name */ 968 u_int64_t wwpn; /* world wide port name */ 969 u_int32_t port; /* 24 bit port id, if known */ 970 u_int32_t bitrate; /* Mbps */ 971 }; 972 973 struct ccb_trans_settings_sas { 974 u_int valid; /* Which fields to honor */ 975 #define CTS_SAS_VALID_SPEED 0x1000 976 u_int32_t bitrate; /* Mbps */ 977 }; 978 979 struct ccb_trans_settings_pata { 980 u_int valid; /* Which fields to honor */ 981 #define CTS_ATA_VALID_MODE 0x01 982 #define CTS_ATA_VALID_BYTECOUNT 0x02 983 #define CTS_ATA_VALID_ATAPI 0x20 984 #define CTS_ATA_VALID_CAPS 0x40 985 int mode; /* Mode */ 986 u_int bytecount; /* Length of PIO transaction */ 987 u_int atapi; /* Length of ATAPI CDB */ 988 u_int caps; /* Device and host SATA caps. */ 989 #define CTS_ATA_CAPS_H 0x0000ffff 990 #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ 991 #define CTS_ATA_CAPS_D 0xffff0000 992 }; 993 994 struct ccb_trans_settings_sata { 995 u_int valid; /* Which fields to honor */ 996 #define CTS_SATA_VALID_MODE 0x01 997 #define CTS_SATA_VALID_BYTECOUNT 0x02 998 #define CTS_SATA_VALID_REVISION 0x04 999 #define CTS_SATA_VALID_PM 0x08 1000 #define CTS_SATA_VALID_TAGS 0x10 1001 #define CTS_SATA_VALID_ATAPI 0x20 1002 #define CTS_SATA_VALID_CAPS 0x40 1003 int mode; /* Legacy PATA mode */ 1004 u_int bytecount; /* Length of PIO transaction */ 1005 int revision; /* SATA revision */ 1006 u_int pm_present; /* PM is present (XPT->SIM) */ 1007 u_int tags; /* Number of allowed tags */ 1008 u_int atapi; /* Length of ATAPI CDB */ 1009 u_int caps; /* Device and host SATA caps. */ 1010 #define CTS_SATA_CAPS_H 0x0000ffff 1011 #define CTS_SATA_CAPS_H_PMREQ 0x00000001 1012 #define CTS_SATA_CAPS_H_APST 0x00000002 1013 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ 1014 #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ 1015 #define CTS_SATA_CAPS_D 0xffff0000 1016 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 1017 #define CTS_SATA_CAPS_D_APST 0x00020000 1018 }; 1019 1020 struct ccb_trans_settings_nvme 1021 { 1022 u_int valid; /* Which fields to honor */ 1023 #define CTS_NVME_VALID_SPEC 0x01 1024 #define CTS_NVME_VALID_CAPS 0x02 1025 #define CTS_NVME_VALID_LINK 0x04 1026 uint32_t spec; /* NVMe spec implemented -- same as vs register */ 1027 uint32_t max_xfer; /* Max transfer size (0 -> unlimited */ 1028 uint32_t caps; 1029 uint8_t lanes; /* Number of PCIe lanes */ 1030 uint8_t speed; /* PCIe generation for each lane */ 1031 uint8_t max_lanes; /* Number of PCIe lanes */ 1032 uint8_t max_speed; /* PCIe generation for each lane */ 1033 }; 1034 1035 #include <cam/mmc/mmc_bus.h> 1036 struct ccb_trans_settings_mmc { 1037 struct mmc_ios ios; 1038 #define MMC_CLK (1 << 1) 1039 #define MMC_VDD (1 << 2) 1040 #define MMC_CS (1 << 3) 1041 #define MMC_BW (1 << 4) 1042 #define MMC_PM (1 << 5) 1043 #define MMC_BT (1 << 6) 1044 #define MMC_BM (1 << 7) 1045 uint32_t ios_valid; 1046 /* The folowing is used only for GET_TRAN_SETTINGS */ 1047 uint32_t host_ocr; 1048 int host_f_min; 1049 int host_f_max; 1050 #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */ 1051 #define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */ 1052 #define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */ 1053 uint32_t host_caps; 1054 }; 1055 1056 /* Get/Set transfer rate/width/disconnection/tag queueing settings */ 1057 struct ccb_trans_settings { 1058 struct ccb_hdr ccb_h; 1059 cts_type type; /* Current or User settings */ 1060 cam_proto protocol; 1061 u_int protocol_version; 1062 cam_xport transport; 1063 u_int transport_version; 1064 union { 1065 u_int valid; /* Which fields to honor */ 1066 struct ccb_trans_settings_ata ata; 1067 struct ccb_trans_settings_scsi scsi; 1068 struct ccb_trans_settings_nvme nvme; 1069 struct ccb_trans_settings_mmc mmc; 1070 } proto_specific; 1071 union { 1072 u_int valid; /* Which fields to honor */ 1073 struct ccb_trans_settings_spi spi; 1074 struct ccb_trans_settings_fc fc; 1075 struct ccb_trans_settings_sas sas; 1076 struct ccb_trans_settings_pata ata; 1077 struct ccb_trans_settings_sata sata; 1078 struct ccb_trans_settings_nvme nvme; 1079 } xport_specific; 1080 }; 1081 1082 1083 /* 1084 * Calculate the geometry parameters for a device 1085 * give the block size and volume size in blocks. 1086 */ 1087 struct ccb_calc_geometry { 1088 struct ccb_hdr ccb_h; 1089 u_int32_t block_size; 1090 u_int64_t volume_size; 1091 u_int32_t cylinders; 1092 u_int8_t heads; 1093 u_int8_t secs_per_track; 1094 }; 1095 1096 /* 1097 * Set or get SIM (and transport) specific knobs 1098 */ 1099 1100 #define KNOB_VALID_ADDRESS 0x1 1101 #define KNOB_VALID_ROLE 0x2 1102 1103 1104 #define KNOB_ROLE_NONE 0x0 1105 #define KNOB_ROLE_INITIATOR 0x1 1106 #define KNOB_ROLE_TARGET 0x2 1107 #define KNOB_ROLE_BOTH 0x3 1108 1109 struct ccb_sim_knob_settings_spi { 1110 u_int valid; 1111 u_int initiator_id; 1112 u_int role; 1113 }; 1114 1115 struct ccb_sim_knob_settings_fc { 1116 u_int valid; 1117 u_int64_t wwnn; /* world wide node name */ 1118 u_int64_t wwpn; /* world wide port name */ 1119 u_int role; 1120 }; 1121 1122 struct ccb_sim_knob_settings_sas { 1123 u_int valid; 1124 u_int64_t wwnn; /* world wide node name */ 1125 u_int role; 1126 }; 1127 #define KNOB_SETTINGS_SIZE 128 1128 1129 struct ccb_sim_knob { 1130 struct ccb_hdr ccb_h; 1131 union { 1132 u_int valid; /* Which fields to honor */ 1133 struct ccb_sim_knob_settings_spi spi; 1134 struct ccb_sim_knob_settings_fc fc; 1135 struct ccb_sim_knob_settings_sas sas; 1136 char pad[KNOB_SETTINGS_SIZE]; 1137 } xport_specific; 1138 }; 1139 1140 /* 1141 * Rescan the given bus, or bus/target/lun 1142 */ 1143 struct ccb_rescan { 1144 struct ccb_hdr ccb_h; 1145 cam_flags flags; 1146 }; 1147 1148 /* 1149 * Turn on debugging for the given bus, bus/target, or bus/target/lun. 1150 */ 1151 struct ccb_debug { 1152 struct ccb_hdr ccb_h; 1153 cam_debug_flags flags; 1154 }; 1155 1156 /* Target mode structures. */ 1157 1158 struct ccb_en_lun { 1159 struct ccb_hdr ccb_h; 1160 u_int16_t grp6_len; /* Group 6 VU CDB length */ 1161 u_int16_t grp7_len; /* Group 7 VU CDB length */ 1162 u_int8_t enable; 1163 }; 1164 1165 /* old, barely used immediate notify, binary compatibility */ 1166 struct ccb_immed_notify { 1167 struct ccb_hdr ccb_h; 1168 struct scsi_sense_data sense_data; 1169 u_int8_t sense_len; /* Number of bytes in sense buffer */ 1170 u_int8_t initiator_id; /* Id of initiator that selected */ 1171 u_int8_t message_args[7]; /* Message Arguments */ 1172 }; 1173 1174 struct ccb_notify_ack { 1175 struct ccb_hdr ccb_h; 1176 u_int16_t seq_id; /* Sequence identifier */ 1177 u_int8_t event; /* Event flags */ 1178 }; 1179 1180 struct ccb_immediate_notify { 1181 struct ccb_hdr ccb_h; 1182 u_int tag_id; /* Tag for immediate notify */ 1183 u_int seq_id; /* Tag for target of notify */ 1184 u_int initiator_id; /* Initiator Identifier */ 1185 u_int arg; /* Function specific */ 1186 }; 1187 1188 struct ccb_notify_acknowledge { 1189 struct ccb_hdr ccb_h; 1190 u_int tag_id; /* Tag for immediate notify */ 1191 u_int seq_id; /* Tar for target of notify */ 1192 u_int initiator_id; /* Initiator Identifier */ 1193 u_int arg; /* Response information */ 1194 /* 1195 * Lower byte of arg is one of RESPONSE CODE values defined below 1196 * (subset of response codes from SPL-4 and FCP-4 specifications), 1197 * upper 3 bytes is code-specific ADDITIONAL RESPONSE INFORMATION. 1198 */ 1199 #define CAM_RSP_TMF_COMPLETE 0x00 1200 #define CAM_RSP_TMF_REJECTED 0x04 1201 #define CAM_RSP_TMF_FAILED 0x05 1202 #define CAM_RSP_TMF_SUCCEEDED 0x08 1203 #define CAM_RSP_TMF_INCORRECT_LUN 0x09 1204 }; 1205 1206 /* HBA engine structures. */ 1207 1208 typedef enum { 1209 EIT_BUFFER, /* Engine type: buffer memory */ 1210 EIT_LOSSLESS, /* Engine type: lossless compression */ 1211 EIT_LOSSY, /* Engine type: lossy compression */ 1212 EIT_ENCRYPT /* Engine type: encryption */ 1213 } ei_type; 1214 1215 typedef enum { 1216 EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ 1217 EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ 1218 EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ 1219 EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ 1220 } ei_algo; 1221 1222 struct ccb_eng_inq { 1223 struct ccb_hdr ccb_h; 1224 u_int16_t eng_num; /* The engine number for this inquiry */ 1225 ei_type eng_type; /* Returned engine type */ 1226 ei_algo eng_algo; /* Returned engine algorithm type */ 1227 u_int32_t eng_memeory; /* Returned engine memory size */ 1228 }; 1229 1230 struct ccb_eng_exec { /* This structure must match SCSIIO size */ 1231 struct ccb_hdr ccb_h; 1232 u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ 1233 u_int8_t *req_map; /* Ptr for mapping info on the req. */ 1234 u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ 1235 u_int32_t dxfer_len; /* Data transfer length */ 1236 u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ 1237 u_int16_t sglist_cnt; /* Num of scatter gather list entries */ 1238 u_int32_t dmax_len; /* Destination data maximum length */ 1239 u_int32_t dest_len; /* Destination data length */ 1240 int32_t src_resid; /* Source residual length: 2's comp */ 1241 u_int32_t timeout; /* Timeout value */ 1242 u_int16_t eng_num; /* Engine number for this request */ 1243 u_int16_t vu_flags; /* Vendor Unique flags */ 1244 }; 1245 1246 /* 1247 * Definitions for the timeout field in the SCSI I/O CCB. 1248 */ 1249 #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ 1250 #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ 1251 1252 #define CAM_SUCCESS 0 /* For signaling general success */ 1253 #define CAM_FAILURE 1 /* For signaling general failure */ 1254 1255 #define CAM_FALSE 0 1256 #define CAM_TRUE 1 1257 1258 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ 1259 1260 /* 1261 * CCB for working with advanced device information. This operates in a fashion 1262 * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer 1263 * type requested, and provide a buffer size/buffer to write to. If the 1264 * buffer is too small, provsiz will be larger than bufsiz. 1265 */ 1266 struct ccb_dev_advinfo { 1267 struct ccb_hdr ccb_h; 1268 uint32_t flags; 1269 #define CDAI_FLAG_NONE 0x0 /* No flags set */ 1270 #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ 1271 uint32_t buftype; /* IN: Type of data being requested */ 1272 /* NB: buftype is interpreted on a per-transport basis */ 1273 #define CDAI_TYPE_SCSI_DEVID 1 1274 #define CDAI_TYPE_SERIAL_NUM 2 1275 #define CDAI_TYPE_PHYS_PATH 3 1276 #define CDAI_TYPE_RCAPLONG 4 1277 #define CDAI_TYPE_EXT_INQ 5 1278 #define CDAI_TYPE_NVME_CNTRL 6 /* NVMe Identify Controller data */ 1279 #define CDAI_TYPE_NVME_NS 7 /* NVMe Identify Namespace data */ 1280 #define CDAI_TYPE_MMC_PARAMS 8 /* MMC/SD ident */ 1281 off_t bufsiz; /* IN: Size of external buffer */ 1282 #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ 1283 off_t provsiz; /* OUT: Size required/used */ 1284 uint8_t *buf; /* IN/OUT: Buffer for requested data */ 1285 }; 1286 1287 /* 1288 * CCB for sending async events 1289 */ 1290 struct ccb_async { 1291 struct ccb_hdr ccb_h; 1292 uint32_t async_code; 1293 off_t async_arg_size; 1294 void *async_arg_ptr; 1295 }; 1296 1297 /* 1298 * Union of all CCB types for kernel space allocation. This union should 1299 * never be used for manipulating CCBs - its only use is for the allocation 1300 * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc 1301 * and the argument to xpt_ccb_free. 1302 */ 1303 union ccb { 1304 struct ccb_hdr ccb_h; /* For convenience */ 1305 struct ccb_scsiio csio; 1306 struct ccb_getdev cgd; 1307 struct ccb_getdevlist cgdl; 1308 struct ccb_pathinq cpi; 1309 struct ccb_relsim crs; 1310 struct ccb_setasync csa; 1311 struct ccb_setdev csd; 1312 struct ccb_pathstats cpis; 1313 struct ccb_getdevstats cgds; 1314 struct ccb_dev_match cdm; 1315 struct ccb_trans_settings cts; 1316 struct ccb_calc_geometry ccg; 1317 struct ccb_sim_knob knob; 1318 struct ccb_abort cab; 1319 struct ccb_resetbus crb; 1320 struct ccb_resetdev crd; 1321 struct ccb_termio tio; 1322 struct ccb_accept_tio atio; 1323 struct ccb_scsiio ctio; 1324 struct ccb_en_lun cel; 1325 struct ccb_immed_notify cin; 1326 struct ccb_notify_ack cna; 1327 struct ccb_immediate_notify cin1; 1328 struct ccb_notify_acknowledge cna2; 1329 struct ccb_eng_inq cei; 1330 struct ccb_eng_exec cee; 1331 struct ccb_smpio smpio; 1332 struct ccb_rescan crcn; 1333 struct ccb_debug cdbg; 1334 struct ccb_ataio ataio; 1335 struct ccb_dev_advinfo cdai; 1336 struct ccb_async casync; 1337 struct ccb_nvmeio nvmeio; 1338 struct ccb_mmcio mmcio; 1339 }; 1340 1341 #define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \ 1342 bzero((char *)(ccbp) + sizeof((ccbp)->ccb_h), \ 1343 sizeof(*(ccbp)) - sizeof((ccbp)->ccb_h)) 1344 1345 __BEGIN_DECLS 1346 static __inline void 1347 cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, 1348 void (*cbfcnp)(struct cam_periph *, union ccb *), 1349 u_int32_t flags, u_int8_t tag_action, 1350 u_int8_t *data_ptr, u_int32_t dxfer_len, 1351 u_int8_t sense_len, u_int8_t cdb_len, 1352 u_int32_t timeout); 1353 1354 static __inline void 1355 cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries, 1356 void (*cbfcnp)(struct cam_periph *, union ccb *), 1357 u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, 1358 u_int32_t timeout); 1359 1360 static __inline void 1361 cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, 1362 void (*cbfcnp)(struct cam_periph *, union ccb *), 1363 u_int32_t flags, u_int tag_action, u_int tag_id, 1364 u_int init_id, u_int scsi_status, u_int8_t *data_ptr, 1365 u_int32_t dxfer_len, u_int32_t timeout); 1366 1367 static __inline void 1368 cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, 1369 void (*cbfcnp)(struct cam_periph *, union ccb *), 1370 u_int32_t flags, u_int tag_action, 1371 u_int8_t *data_ptr, u_int32_t dxfer_len, 1372 u_int32_t timeout); 1373 1374 static __inline void 1375 cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, 1376 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, 1377 uint8_t *smp_request, int smp_request_len, 1378 uint8_t *smp_response, int smp_response_len, 1379 uint32_t timeout); 1380 1381 static __inline void 1382 cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, 1383 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, 1384 uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, 1385 struct mmc_data *mmc_d, 1386 uint32_t timeout); 1387 1388 static __inline void 1389 cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, 1390 void (*cbfcnp)(struct cam_periph *, union ccb *), 1391 u_int32_t flags, u_int8_t tag_action, 1392 u_int8_t *data_ptr, u_int32_t dxfer_len, 1393 u_int8_t sense_len, u_int8_t cdb_len, 1394 u_int32_t timeout) 1395 { 1396 csio->ccb_h.func_code = XPT_SCSI_IO; 1397 csio->ccb_h.flags = flags; 1398 csio->ccb_h.xflags = 0; 1399 csio->ccb_h.retry_count = retries; 1400 csio->ccb_h.cbfcnp = cbfcnp; 1401 csio->ccb_h.timeout = timeout; 1402 csio->data_ptr = data_ptr; 1403 csio->dxfer_len = dxfer_len; 1404 csio->sense_len = sense_len; 1405 csio->cdb_len = cdb_len; 1406 csio->tag_action = tag_action; 1407 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 1408 csio->bio = NULL; 1409 #endif 1410 } 1411 1412 static __inline void 1413 cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, 1414 void (*cbfcnp)(struct cam_periph *, union ccb *), 1415 u_int32_t flags, u_int tag_action, u_int tag_id, 1416 u_int init_id, u_int scsi_status, u_int8_t *data_ptr, 1417 u_int32_t dxfer_len, u_int32_t timeout) 1418 { 1419 csio->ccb_h.func_code = XPT_CONT_TARGET_IO; 1420 csio->ccb_h.flags = flags; 1421 csio->ccb_h.xflags = 0; 1422 csio->ccb_h.retry_count = retries; 1423 csio->ccb_h.cbfcnp = cbfcnp; 1424 csio->ccb_h.timeout = timeout; 1425 csio->data_ptr = data_ptr; 1426 csio->dxfer_len = dxfer_len; 1427 csio->scsi_status = scsi_status; 1428 csio->tag_action = tag_action; 1429 csio->tag_id = tag_id; 1430 csio->init_id = init_id; 1431 } 1432 1433 static __inline void 1434 cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, 1435 void (*cbfcnp)(struct cam_periph *, union ccb *), 1436 u_int32_t flags, u_int tag_action __unused, 1437 u_int8_t *data_ptr, u_int32_t dxfer_len, 1438 u_int32_t timeout) 1439 { 1440 ataio->ccb_h.func_code = XPT_ATA_IO; 1441 ataio->ccb_h.flags = flags; 1442 ataio->ccb_h.retry_count = retries; 1443 ataio->ccb_h.cbfcnp = cbfcnp; 1444 ataio->ccb_h.timeout = timeout; 1445 ataio->data_ptr = data_ptr; 1446 ataio->dxfer_len = dxfer_len; 1447 ataio->ata_flags = 0; 1448 } 1449 1450 static __inline void 1451 cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, 1452 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, 1453 uint8_t *smp_request, int smp_request_len, 1454 uint8_t *smp_response, int smp_response_len, 1455 uint32_t timeout) 1456 { 1457 #ifdef _KERNEL 1458 KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, 1459 ("direction != CAM_DIR_BOTH")); 1460 KASSERT((smp_request != NULL) && (smp_response != NULL), 1461 ("need valid request and response buffers")); 1462 KASSERT((smp_request_len != 0) && (smp_response_len != 0), 1463 ("need non-zero request and response lengths")); 1464 #endif /*_KERNEL*/ 1465 smpio->ccb_h.func_code = XPT_SMP_IO; 1466 smpio->ccb_h.flags = flags; 1467 smpio->ccb_h.retry_count = retries; 1468 smpio->ccb_h.cbfcnp = cbfcnp; 1469 smpio->ccb_h.timeout = timeout; 1470 smpio->smp_request = smp_request; 1471 smpio->smp_request_len = smp_request_len; 1472 smpio->smp_response = smp_response; 1473 smpio->smp_response_len = smp_response_len; 1474 } 1475 1476 static __inline void 1477 cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries, 1478 void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, 1479 uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags, 1480 struct mmc_data *mmc_d, 1481 uint32_t timeout) 1482 { 1483 mmcio->ccb_h.func_code = XPT_MMC_IO; 1484 mmcio->ccb_h.flags = flags; 1485 mmcio->ccb_h.retry_count = retries; 1486 mmcio->ccb_h.cbfcnp = cbfcnp; 1487 mmcio->ccb_h.timeout = timeout; 1488 mmcio->cmd.opcode = mmc_opcode; 1489 mmcio->cmd.arg = mmc_arg; 1490 mmcio->cmd.flags = mmc_flags; 1491 mmcio->stop.opcode = 0; 1492 mmcio->stop.arg = 0; 1493 mmcio->stop.flags = 0; 1494 if (mmc_d != NULL) { 1495 mmcio->cmd.data = mmc_d; 1496 } else 1497 mmcio->cmd.data = NULL; 1498 mmcio->cmd.resp[0] = 0; 1499 mmcio->cmd.resp[1] = 0; 1500 mmcio->cmd.resp[2] = 0; 1501 mmcio->cmd.resp[3] = 0; 1502 } 1503 1504 static __inline void 1505 cam_set_ccbstatus(union ccb *ccb, cam_status status) 1506 { 1507 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1508 ccb->ccb_h.status |= status; 1509 } 1510 1511 static __inline cam_status 1512 cam_ccb_status(union ccb *ccb) 1513 { 1514 return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); 1515 } 1516 1517 void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 1518 1519 static __inline void 1520 cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries, 1521 void (*cbfcnp)(struct cam_periph *, union ccb *), 1522 u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, 1523 u_int32_t timeout) 1524 { 1525 nvmeio->ccb_h.func_code = XPT_NVME_IO; 1526 nvmeio->ccb_h.flags = flags; 1527 nvmeio->ccb_h.retry_count = retries; 1528 nvmeio->ccb_h.cbfcnp = cbfcnp; 1529 nvmeio->ccb_h.timeout = timeout; 1530 nvmeio->data_ptr = data_ptr; 1531 nvmeio->dxfer_len = dxfer_len; 1532 } 1533 1534 static __inline void 1535 cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, u_int32_t retries, 1536 void (*cbfcnp)(struct cam_periph *, union ccb *), 1537 u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len, 1538 u_int32_t timeout) 1539 { 1540 nvmeio->ccb_h.func_code = XPT_NVME_ADMIN; 1541 nvmeio->ccb_h.flags = flags; 1542 nvmeio->ccb_h.retry_count = retries; 1543 nvmeio->ccb_h.cbfcnp = cbfcnp; 1544 nvmeio->ccb_h.timeout = timeout; 1545 nvmeio->data_ptr = data_ptr; 1546 nvmeio->dxfer_len = dxfer_len; 1547 } 1548 __END_DECLS 1549 1550 #endif /* _CAM_CAM_CCB_H */ 1551