1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef TARGET_CORE_BASE_H 3 #define TARGET_CORE_BASE_H 4 5 #include <linux/configfs.h> /* struct config_group */ 6 #include <linux/dma-direction.h> /* enum dma_data_direction */ 7 #include <linux/sbitmap.h> 8 #include <linux/percpu-refcount.h> 9 #include <linux/semaphore.h> /* struct semaphore */ 10 #include <linux/completion.h> 11 12 #define TARGET_CORE_VERSION "v5.0" 13 14 /* 15 * Maximum size of a CDB that can be stored in se_cmd without allocating 16 * memory dynamically for the CDB. 17 */ 18 #define TCM_MAX_COMMAND_SIZE 32 19 /* 20 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently 21 * defined 96, but the real limit is 252 (or 260 including the header) 22 */ 23 #define TRANSPORT_SENSE_BUFFER 96 24 /* Used by transport_send_check_condition_and_sense() */ 25 #define SPC_SENSE_KEY_OFFSET 2 26 #define SPC_ADD_SENSE_LEN_OFFSET 7 27 #define SPC_DESC_TYPE_OFFSET 8 28 #define SPC_ADDITIONAL_DESC_LEN_OFFSET 9 29 #define SPC_VALIDITY_OFFSET 10 30 #define SPC_ASC_KEY_OFFSET 12 31 #define SPC_ASCQ_KEY_OFFSET 13 32 #define TRANSPORT_IQN_LEN 224 33 /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ 34 #define LU_GROUP_NAME_BUF 256 35 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ 36 #define TG_PT_GROUP_NAME_BUF 256 37 /* Used to parse VPD into struct t10_vpd */ 38 #define VPD_TMP_BUF_SIZE 254 39 /* Used by transport_generic_cmd_sequencer() */ 40 #define READ_BLOCK_LEN 6 41 #define READ_CAP_LEN 8 42 #define READ_POSITION_LEN 20 43 #define INQUIRY_LEN 36 44 /* Used by transport_get_inquiry_vpd_serial() */ 45 #define INQUIRY_VPD_SERIAL_LEN 254 46 /* Used by transport_get_inquiry_vpd_device_ident() */ 47 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 48 49 #define INQUIRY_VENDOR_LEN 8 50 #define INQUIRY_MODEL_LEN 16 51 #define INQUIRY_REVISION_LEN 4 52 53 /* Attempts before moving from SHORT to LONG */ 54 #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 55 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ 56 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */ 57 58 #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ 59 60 /* struct se_dev_attrib sanity values */ 61 /* Default max_unmap_lba_count */ 62 #define DA_MAX_UNMAP_LBA_COUNT 0 63 /* Default max_unmap_block_desc_count */ 64 #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0 65 /* Default unmap_granularity */ 66 #define DA_UNMAP_GRANULARITY_DEFAULT 0 67 /* Default unmap_granularity_alignment */ 68 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 69 /* Default unmap_zeroes_data */ 70 #define DA_UNMAP_ZEROES_DATA_DEFAULT 0 71 /* Default max_write_same_len, disabled by default */ 72 #define DA_MAX_WRITE_SAME_LEN 0 73 /* Use a model alias based on the configfs backend device name */ 74 #define DA_EMULATE_MODEL_ALIAS 0 75 /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ 76 #define DA_EMULATE_WRITE_CACHE 0 77 /* Emulation for TASK_ABORTED status (TAS) by default */ 78 #define DA_EMULATE_TAS 1 79 /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ 80 #define DA_EMULATE_TPU 0 81 /* 82 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using 83 * block/blk-lib.c:blkdev_issue_discard() 84 */ 85 #define DA_EMULATE_TPWS 0 86 /* Emulation for CompareAndWrite (AtomicTestandSet) by default */ 87 #define DA_EMULATE_CAW 1 88 /* Emulation for 3rd Party Copy (ExtendedCopy) by default */ 89 #define DA_EMULATE_3PC 1 90 /* No Emulation for PSCSI by default */ 91 #define DA_EMULATE_ALUA 0 92 /* Emulate SCSI2 RESERVE/RELEASE and Persistent Reservations by default */ 93 #define DA_EMULATE_PR 1 94 /* Emulation for REPORT SUPPORTED OPERATION CODES */ 95 #define DA_EMULATE_RSOC 1 96 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ 97 #define DA_ENFORCE_PR_ISIDS 1 98 /* Force SPC-3 PR Activate Persistence across Target Power Loss */ 99 #define DA_FORCE_PR_APTPL 0 100 #define DA_STATUS_MAX_SECTORS_MIN 16 101 #define DA_STATUS_MAX_SECTORS_MAX 8192 102 /* By default don't report non-rotating (solid state) medium */ 103 #define DA_IS_NONROT 0 104 /* Queue Algorithm Modifier default for restricted reordering in control mode page */ 105 #define DA_EMULATE_REST_REORD 0 106 107 #define SE_INQUIRY_BUF 1024 108 #define SE_MODE_PAGE_BUF 512 109 #define SE_SENSE_BUF 96 110 111 /* Peripheral Device Text Identification Information */ 112 #define PD_TEXT_ID_INFO_LEN 256 113 114 enum target_compl_type { 115 /* Use the fabric driver's default completion type */ 116 TARGET_FABRIC_DEFAULT_COMPL, 117 /* Complete from the backend calling context */ 118 TARGET_DIRECT_COMPL, 119 /* Defer completion to the LIO workqueue */ 120 TARGET_QUEUE_COMPL, 121 }; 122 123 enum target_submit_type { 124 /* Use the fabric driver's default submission type */ 125 TARGET_FABRIC_DEFAULT_SUBMIT, 126 /* Submit from the calling context */ 127 TARGET_DIRECT_SUBMIT, 128 /* Defer submission to the LIO workqueue */ 129 TARGET_QUEUE_SUBMIT, 130 }; 131 132 /* struct se_hba->hba_flags */ 133 enum hba_flags_table { 134 HBA_FLAGS_INTERNAL_USE = 0x01, 135 HBA_FLAGS_PSCSI_MODE = 0x02, 136 }; 137 138 /* Special transport agnostic struct se_cmd->t_states */ 139 enum transport_state_table { 140 TRANSPORT_NO_STATE = 0, 141 TRANSPORT_NEW_CMD = 1, 142 TRANSPORT_WRITE_PENDING = 3, 143 TRANSPORT_PROCESSING = 5, 144 TRANSPORT_COMPLETE = 6, 145 TRANSPORT_ISTATE_PROCESSING = 11, 146 TRANSPORT_COMPLETE_QF_WP = 18, 147 TRANSPORT_COMPLETE_QF_OK = 19, 148 TRANSPORT_COMPLETE_QF_ERR = 20, 149 }; 150 151 /* Used for struct se_cmd->se_cmd_flags */ 152 enum se_cmd_flags_table { 153 SCF_SUPPORTED_SAM_OPCODE = (1 << 0), 154 SCF_TRANSPORT_TASK_SENSE = (1 << 1), 155 SCF_EMULATED_TASK_SENSE = (1 << 2), 156 SCF_SCSI_DATA_CDB = (1 << 3), 157 SCF_SCSI_TMR_CDB = (1 << 4), 158 SCF_FUA = (1 << 5), 159 SCF_SE_LUN_CMD = (1 << 6), 160 SCF_BIDI = (1 << 7), 161 SCF_SENT_CHECK_CONDITION = (1 << 8), 162 SCF_OVERFLOW_BIT = (1 << 9), 163 SCF_UNDERFLOW_BIT = (1 << 10), 164 SCF_ALUA_NON_OPTIMIZED = (1 << 11), 165 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = (1 << 12), 166 SCF_COMPARE_AND_WRITE = (1 << 13), 167 SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = (1 << 14), 168 SCF_ACK_KREF = (1 << 15), 169 SCF_USE_CPUID = (1 << 16), 170 SCF_TASK_ATTR_SET = (1 << 17), 171 SCF_TREAT_READ_AS_NORMAL = (1 << 18), 172 SCF_TASK_ORDERED_SYNC = (1 << 19), 173 SCF_ATOMIC = (1 << 20), 174 }; 175 176 /* 177 * Used by transport_send_check_condition_and_sense() 178 * to signal which ASC/ASCQ sense payload should be built. 179 */ 180 typedef unsigned __bitwise sense_reason_t; 181 182 enum tcm_sense_reason_table { 183 #define R(x) (__force sense_reason_t )(x) 184 TCM_NO_SENSE = R(0x00), 185 TCM_NON_EXISTENT_LUN = R(0x01), 186 TCM_UNSUPPORTED_SCSI_OPCODE = R(0x02), 187 TCM_INCORRECT_AMOUNT_OF_DATA = R(0x03), 188 TCM_UNEXPECTED_UNSOLICITED_DATA = R(0x04), 189 TCM_SERVICE_CRC_ERROR = R(0x05), 190 TCM_SNACK_REJECTED = R(0x06), 191 TCM_SECTOR_COUNT_TOO_MANY = R(0x07), 192 TCM_INVALID_CDB_FIELD = R(0x08), 193 TCM_INVALID_PARAMETER_LIST = R(0x09), 194 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = R(0x0a), 195 TCM_UNKNOWN_MODE_PAGE = R(0x0b), 196 TCM_WRITE_PROTECTED = R(0x0c), 197 TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d), 198 TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e), 199 200 TCM_RESERVATION_CONFLICT = R(0x10), 201 TCM_ADDRESS_OUT_OF_RANGE = R(0x11), 202 TCM_OUT_OF_RESOURCES = R(0x12), 203 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), 204 TCM_MISCOMPARE_VERIFY = R(0x14), 205 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), 206 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), 207 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), 208 TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), 209 TCM_TOO_MANY_TARGET_DESCS = R(0x19), 210 TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a), 211 TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b), 212 TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c), 213 TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d), 214 TCM_LUN_BUSY = R(0x1e), 215 TCM_INVALID_FIELD_IN_COMMAND_IU = R(0x1f), 216 TCM_ALUA_TG_PT_STANDBY = R(0x20), 217 TCM_ALUA_TG_PT_UNAVAILABLE = R(0x21), 218 TCM_ALUA_STATE_TRANSITION = R(0x22), 219 TCM_ALUA_OFFLINE = R(0x23), 220 #undef R 221 }; 222 223 enum target_sc_flags_table { 224 TARGET_SCF_BIDI_OP = 0x01, 225 TARGET_SCF_ACK_KREF = 0x02, 226 TARGET_SCF_UNKNOWN_SIZE = 0x04, 227 TARGET_SCF_USE_CPUID = 0x08, 228 }; 229 230 /* fabric independent task management function values */ 231 enum tcm_tmreq_table { 232 TMR_ABORT_TASK = 1, 233 TMR_ABORT_TASK_SET = 2, 234 TMR_CLEAR_ACA = 3, 235 TMR_CLEAR_TASK_SET = 4, 236 TMR_LUN_RESET = 5, 237 TMR_TARGET_WARM_RESET = 6, 238 TMR_TARGET_COLD_RESET = 7, 239 TMR_LUN_RESET_PRO = 0x80, 240 TMR_UNKNOWN = 0xff, 241 }; 242 243 /* fabric independent task management response values */ 244 enum tcm_tmrsp_table { 245 TMR_FUNCTION_FAILED = 0, 246 TMR_FUNCTION_COMPLETE = 1, 247 TMR_TASK_DOES_NOT_EXIST = 2, 248 TMR_LUN_DOES_NOT_EXIST = 3, 249 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 4, 250 TMR_FUNCTION_REJECTED = 5, 251 }; 252 253 /* 254 * Used for target SCSI statistics 255 */ 256 typedef enum { 257 SCSI_INST_INDEX, 258 SCSI_AUTH_INTR_INDEX, 259 SCSI_INDEX_TYPE_MAX 260 } scsi_index_t; 261 262 struct se_cmd; 263 264 struct t10_alua_lba_map_member { 265 struct list_head lba_map_mem_list; 266 int lba_map_mem_alua_state; 267 int lba_map_mem_alua_pg_id; 268 }; 269 270 struct t10_alua_lba_map { 271 u64 lba_map_first_lba; 272 u64 lba_map_last_lba; 273 struct list_head lba_map_list; 274 struct list_head lba_map_mem_list; 275 }; 276 277 struct t10_alua { 278 /* ALUA Target Port Group ID */ 279 u16 alua_tg_pt_gps_counter; 280 u32 alua_tg_pt_gps_count; 281 /* Referrals support */ 282 spinlock_t lba_map_lock; 283 u32 lba_map_segment_size; 284 u32 lba_map_segment_multiplier; 285 struct list_head lba_map_list; 286 spinlock_t tg_pt_gps_lock; 287 struct se_device *t10_dev; 288 /* Used for default ALUA Target Port Group */ 289 struct t10_alua_tg_pt_gp *default_tg_pt_gp; 290 /* Used for default ALUA Target Port Group ConfigFS group */ 291 struct config_group alua_tg_pt_gps_group; 292 struct list_head tg_pt_gps_list; 293 }; 294 295 struct t10_alua_lu_gp { 296 u16 lu_gp_id; 297 int lu_gp_valid_id; 298 u32 lu_gp_members; 299 atomic_t lu_gp_ref_cnt; 300 spinlock_t lu_gp_lock; 301 struct config_group lu_gp_group; 302 struct list_head lu_gp_node; 303 struct list_head lu_gp_mem_list; 304 }; 305 306 struct t10_alua_lu_gp_member { 307 bool lu_gp_assoc; 308 atomic_t lu_gp_mem_ref_cnt; 309 spinlock_t lu_gp_mem_lock; 310 struct t10_alua_lu_gp *lu_gp; 311 struct se_device *lu_gp_mem_dev; 312 struct list_head lu_gp_mem_list; 313 }; 314 315 struct t10_alua_tg_pt_gp { 316 u16 tg_pt_gp_id; 317 int tg_pt_gp_valid_id; 318 int tg_pt_gp_alua_supported_states; 319 int tg_pt_gp_alua_access_status; 320 int tg_pt_gp_alua_access_type; 321 int tg_pt_gp_nonop_delay_msecs; 322 int tg_pt_gp_trans_delay_msecs; 323 int tg_pt_gp_implicit_trans_secs; 324 int tg_pt_gp_pref; 325 int tg_pt_gp_write_metadata; 326 u32 tg_pt_gp_members; 327 int tg_pt_gp_alua_access_state; 328 atomic_t tg_pt_gp_ref_cnt; 329 spinlock_t tg_pt_gp_lock; 330 struct mutex tg_pt_gp_transition_mutex; 331 struct se_device *tg_pt_gp_dev; 332 struct config_group tg_pt_gp_group; 333 struct list_head tg_pt_gp_list; 334 struct list_head tg_pt_gp_lun_list; 335 struct se_lun *tg_pt_gp_alua_lun; 336 struct se_node_acl *tg_pt_gp_alua_nacl; 337 }; 338 339 struct t10_vpd { 340 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; 341 int protocol_identifier_set; 342 u32 protocol_identifier; 343 u32 device_identifier_code_set; 344 u32 association; 345 u32 device_identifier_type; 346 struct list_head vpd_list; 347 }; 348 349 struct t10_wwn { 350 /* 351 * SCSI left aligned strings may not be null terminated. +1 to ensure a 352 * null terminator is always present. 353 */ 354 char vendor[INQUIRY_VENDOR_LEN + 1]; 355 char model[INQUIRY_MODEL_LEN + 1]; 356 char revision[INQUIRY_REVISION_LEN + 1]; 357 char unit_serial[INQUIRY_VPD_SERIAL_LEN]; 358 u32 company_id; 359 spinlock_t t10_vpd_lock; 360 struct se_device *t10_dev; 361 struct config_group t10_wwn_group; 362 struct list_head t10_vpd_list; 363 char pd_text_id_info[PD_TEXT_ID_INFO_LEN]; 364 }; 365 366 struct t10_pr_registration { 367 /* Used for fabrics that contain WWN+ISID */ 368 #define PR_REG_ISID_LEN 16 369 /* PR_REG_ISID_LEN + ',i,0x' */ 370 #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) 371 char pr_reg_isid[PR_REG_ISID_LEN]; 372 /* Used during APTPL metadata reading */ 373 #define PR_APTPL_MAX_IPORT_LEN 256 374 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; 375 /* Used during APTPL metadata reading */ 376 #define PR_APTPL_MAX_TPORT_LEN 256 377 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; 378 u16 pr_aptpl_rpti; 379 u16 pr_reg_tpgt; 380 /* Reservation effects all target ports */ 381 int pr_reg_all_tg_pt; 382 /* Activate Persistence across Target Power Loss */ 383 int pr_reg_aptpl; 384 int pr_res_holder; 385 int pr_res_type; 386 int pr_res_scope; 387 /* Used for fabric initiator WWPNs using a ISID */ 388 bool isid_present_at_reg; 389 u64 pr_res_mapped_lun; 390 u64 pr_aptpl_target_lun; 391 u16 tg_pt_sep_rtpi; 392 u32 pr_res_generation; 393 u64 pr_reg_bin_isid; 394 u64 pr_res_key; 395 atomic_t pr_res_holders; 396 struct se_node_acl *pr_reg_nacl; 397 /* Used by ALL_TG_PT=1 registration with deve->pr_ref taken */ 398 struct se_dev_entry *pr_reg_deve; 399 struct list_head pr_reg_list; 400 struct list_head pr_reg_abort_list; 401 struct list_head pr_reg_aptpl_list; 402 struct list_head pr_reg_atp_list; 403 struct list_head pr_reg_atp_mem_list; 404 }; 405 406 struct t10_reservation { 407 /* Reservation effects all target ports */ 408 int pr_all_tg_pt; 409 /* Activate Persistence across Target Power Loss enabled 410 * for SCSI device */ 411 int pr_aptpl_active; 412 #define PR_APTPL_BUF_LEN 262144 413 u32 pr_generation; 414 spinlock_t registration_lock; 415 spinlock_t aptpl_reg_lock; 416 /* 417 * This will always be set by one individual I_T Nexus. 418 * However with all_tg_pt=1, other I_T Nexus from the 419 * same initiator can access PR reg/res info on a different 420 * target port. 421 * 422 * There is also the 'All Registrants' case, where there is 423 * a single *pr_res_holder of the reservation, but all 424 * registrations are considered reservation holders. 425 */ 426 struct se_node_acl *pr_res_holder; 427 struct list_head registration_list; 428 struct list_head aptpl_reg_list; 429 }; 430 431 struct se_tmr_req { 432 /* Task Management function to be performed */ 433 u8 function; 434 /* Task Management response to send */ 435 u8 response; 436 int call_transport; 437 /* Reference to ITT that Task Mgmt should be performed */ 438 u64 ref_task_tag; 439 void *fabric_tmr_ptr; 440 struct se_cmd *task_cmd; 441 struct se_device *tmr_dev; 442 struct list_head tmr_list; 443 }; 444 445 enum target_prot_op { 446 TARGET_PROT_NORMAL = 0, 447 TARGET_PROT_DIN_INSERT = (1 << 0), 448 TARGET_PROT_DOUT_INSERT = (1 << 1), 449 TARGET_PROT_DIN_STRIP = (1 << 2), 450 TARGET_PROT_DOUT_STRIP = (1 << 3), 451 TARGET_PROT_DIN_PASS = (1 << 4), 452 TARGET_PROT_DOUT_PASS = (1 << 5), 453 }; 454 455 #define TARGET_PROT_ALL TARGET_PROT_DIN_INSERT | TARGET_PROT_DOUT_INSERT | \ 456 TARGET_PROT_DIN_STRIP | TARGET_PROT_DOUT_STRIP | \ 457 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS 458 459 enum target_prot_type { 460 TARGET_DIF_TYPE0_PROT, 461 TARGET_DIF_TYPE1_PROT, 462 TARGET_DIF_TYPE2_PROT, 463 TARGET_DIF_TYPE3_PROT, 464 }; 465 466 /* Emulation for UNIT ATTENTION Interlock Control */ 467 enum target_ua_intlck_ctrl { 468 TARGET_UA_INTLCK_CTRL_CLEAR = 0, 469 TARGET_UA_INTLCK_CTRL_NO_CLEAR = 1, 470 TARGET_UA_INTLCK_CTRL_ESTABLISH_UA = 2, 471 }; 472 473 enum target_core_dif_check { 474 TARGET_DIF_CHECK_GUARD = 0x1 << 0, 475 TARGET_DIF_CHECK_APPTAG = 0x1 << 1, 476 TARGET_DIF_CHECK_REFTAG = 0x1 << 2, 477 }; 478 479 /* for sam_task_attr */ 480 #define TCM_SIMPLE_TAG 0x20 481 #define TCM_HEAD_TAG 0x21 482 #define TCM_ORDERED_TAG 0x22 483 #define TCM_ACA_TAG 0x24 484 485 struct se_cmd { 486 /* Used for fail with specific sense codes */ 487 sense_reason_t sense_reason; 488 /* SAM response code being sent to initiator */ 489 u8 scsi_status; 490 u16 scsi_sense_length; 491 unsigned unknown_data_length:1; 492 bool state_active:1; 493 u64 tag; /* SAM command identifier aka task tag */ 494 /* Delay for ALUA Active/NonOptimized state access in milliseconds */ 495 int alua_nonop_delay; 496 /* See include/linux/dma-mapping.h */ 497 enum dma_data_direction data_direction; 498 /* For SAM Task Attribute */ 499 int sam_task_attr; 500 /* Used for se_sess->sess_tag_pool */ 501 unsigned int map_tag; 502 int map_cpu; 503 /* Transport protocol dependent state, see transport_state_table */ 504 enum transport_state_table t_state; 505 /* See se_cmd_flags_table */ 506 u32 se_cmd_flags; 507 /* Total size in bytes associated with command */ 508 u32 data_length; 509 u32 residual_count; 510 u64 orig_fe_lun; 511 /* Persistent Reservation key */ 512 u64 pr_res_key; 513 /* Used for sense data */ 514 void *sense_buffer; 515 struct list_head se_delayed_node; 516 struct list_head se_qf_node; 517 struct se_device *se_dev; 518 struct se_lun *se_lun; 519 /* Only used for internal passthrough and legacy TCM fabric modules */ 520 struct se_session *se_sess; 521 struct target_cmd_counter *cmd_cnt; 522 struct se_tmr_req *se_tmr_req; 523 struct llist_node se_cmd_list; 524 struct completion *free_compl; 525 struct completion *abrt_compl; 526 const struct target_core_fabric_ops *se_tfo; 527 sense_reason_t (*execute_cmd)(struct se_cmd *); 528 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); 529 void *protocol_data; 530 531 unsigned char *t_task_cdb; 532 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 533 unsigned long long t_task_lba; 534 unsigned int t_task_nolb; 535 unsigned int transport_state; 536 #define CMD_T_ABORTED (1 << 0) 537 #define CMD_T_ACTIVE (1 << 1) 538 #define CMD_T_COMPLETE (1 << 2) 539 #define CMD_T_SENT (1 << 4) 540 #define CMD_T_STOP (1 << 5) 541 #define CMD_T_TAS (1 << 10) 542 #define CMD_T_FABRIC_STOP (1 << 11) 543 spinlock_t t_state_lock; 544 struct kref cmd_kref; 545 struct completion t_transport_stop_comp; 546 547 struct work_struct work; 548 549 struct scatterlist *t_data_sg; 550 struct scatterlist *t_data_sg_orig; 551 unsigned int t_data_nents; 552 unsigned int t_data_nents_orig; 553 void *t_data_vmap; 554 struct scatterlist *t_bidi_data_sg; 555 unsigned int t_bidi_data_nents; 556 557 /* Used for lun->lun_ref counting */ 558 int lun_ref_active; 559 560 struct list_head state_list; 561 562 /* backend private data */ 563 void *priv; 564 565 /* DIF related members */ 566 enum target_prot_op prot_op; 567 enum target_prot_type prot_type; 568 u8 prot_checks; 569 bool prot_pto; 570 u32 prot_length; 571 u32 reftag_seed; 572 struct scatterlist *t_prot_sg; 573 unsigned int t_prot_nents; 574 sense_reason_t pi_err; 575 u64 sense_info; 576 /* 577 * CPU LIO will execute the cmd on. Defaults to the CPU the cmd is 578 * initialized on. Drivers can override. 579 */ 580 int cpuid; 581 }; 582 583 struct se_ua { 584 u8 ua_asc; 585 u8 ua_ascq; 586 struct list_head ua_nacl_list; 587 }; 588 589 struct se_node_acl { 590 char initiatorname[TRANSPORT_IQN_LEN]; 591 /* Used to signal demo mode created ACL, disabled by default */ 592 bool dynamic_node_acl; 593 bool dynamic_stop; 594 u32 queue_depth; 595 u32 acl_index; 596 enum target_prot_type saved_prot_type; 597 #define MAX_ACL_TAG_SIZE 64 598 char acl_tag[MAX_ACL_TAG_SIZE]; 599 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 600 atomic_t acl_pr_ref_count; 601 struct hlist_head lun_entry_hlist; 602 struct se_session *nacl_sess; 603 struct se_portal_group *se_tpg; 604 struct mutex lun_entry_mutex; 605 spinlock_t nacl_sess_lock; 606 struct config_group acl_group; 607 struct config_group acl_attrib_group; 608 struct config_group acl_auth_group; 609 struct config_group acl_param_group; 610 struct config_group acl_fabric_stat_group; 611 struct list_head acl_list; 612 struct list_head acl_sess_list; 613 struct completion acl_free_comp; 614 struct kref acl_kref; 615 }; 616 617 static inline struct se_node_acl *acl_to_nacl(struct config_item *item) 618 { 619 return container_of(to_config_group(item), struct se_node_acl, 620 acl_group); 621 } 622 623 static inline struct se_node_acl *attrib_to_nacl(struct config_item *item) 624 { 625 return container_of(to_config_group(item), struct se_node_acl, 626 acl_attrib_group); 627 } 628 629 static inline struct se_node_acl *auth_to_nacl(struct config_item *item) 630 { 631 return container_of(to_config_group(item), struct se_node_acl, 632 acl_auth_group); 633 } 634 635 static inline struct se_node_acl *param_to_nacl(struct config_item *item) 636 { 637 return container_of(to_config_group(item), struct se_node_acl, 638 acl_param_group); 639 } 640 641 static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item) 642 { 643 return container_of(to_config_group(item), struct se_node_acl, 644 acl_fabric_stat_group); 645 } 646 647 struct target_cmd_counter { 648 struct percpu_ref refcnt; 649 wait_queue_head_t refcnt_wq; 650 struct completion stop_done; 651 atomic_t stopped; 652 }; 653 654 struct se_session { 655 u64 sess_bin_isid; 656 enum target_prot_op sup_prot_ops; 657 enum target_prot_type sess_prot_type; 658 struct se_node_acl *se_node_acl; 659 struct se_portal_group *se_tpg; 660 void *fabric_sess_ptr; 661 struct list_head sess_list; 662 struct list_head sess_acl_list; 663 spinlock_t sess_cmd_lock; 664 void *sess_cmd_map; 665 struct sbitmap_queue sess_tag_pool; 666 struct target_cmd_counter *cmd_cnt; 667 }; 668 669 struct se_device; 670 struct se_transform_info; 671 struct scatterlist; 672 673 struct se_ml_stat_grps { 674 struct config_group stat_group; 675 struct config_group scsi_auth_intr_group; 676 struct config_group scsi_att_intr_port_group; 677 }; 678 679 struct se_lun_acl { 680 u64 mapped_lun; 681 struct se_node_acl *se_lun_nacl; 682 struct se_lun *se_lun; 683 struct config_group se_lun_group; 684 struct se_ml_stat_grps ml_stat_grps; 685 }; 686 687 struct se_dev_entry_io_stats { 688 u64 total_cmds; 689 u64 read_bytes; 690 u64 write_bytes; 691 }; 692 693 struct se_dev_entry { 694 u64 mapped_lun; 695 u64 pr_res_key; 696 u64 creation_time; 697 bool lun_access_ro; 698 u32 attach_count; 699 struct se_dev_entry_io_stats __percpu *stats; 700 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 701 struct kref pr_kref; 702 struct completion pr_comp; 703 struct se_lun_acl *se_lun_acl; 704 spinlock_t ua_lock; 705 struct se_lun *se_lun; 706 #define DEF_PR_REG_ACTIVE 1 707 unsigned long deve_flags; 708 struct list_head alua_port_list; 709 struct list_head lun_link; 710 struct list_head ua_list; 711 struct hlist_node link; 712 struct rcu_head rcu_head; 713 }; 714 715 struct se_dev_attrib { 716 bool emulate_model_alias; 717 bool emulate_dpo; /* deprecated */ 718 bool emulate_fua_write; 719 bool emulate_fua_read; /* deprecated */ 720 bool emulate_write_cache; 721 enum target_ua_intlck_ctrl emulate_ua_intlck_ctrl; 722 bool emulate_tas; 723 bool emulate_tpu; 724 bool emulate_tpws; 725 bool emulate_caw; 726 bool emulate_3pc; 727 bool emulate_pr; 728 bool emulate_rsoc; 729 enum target_prot_type pi_prot_type; 730 enum target_prot_type hw_pi_prot_type; 731 bool pi_prot_verify; 732 bool enforce_pr_isids; 733 bool force_pr_aptpl; 734 bool is_nonrot; 735 bool emulate_rest_reord; 736 bool unmap_zeroes_data; 737 u32 hw_block_size; 738 u32 block_size; 739 u32 hw_max_sectors; 740 u32 optimal_sectors; 741 u32 hw_queue_depth; 742 u32 queue_depth; 743 u32 max_unmap_lba_count; 744 u32 max_unmap_block_desc_count; 745 u32 unmap_granularity; 746 u32 unmap_granularity_alignment; 747 u32 max_write_same_len; 748 u32 atomic_max_len; 749 u32 atomic_alignment; 750 u32 atomic_granularity; 751 u32 atomic_max_with_boundary; 752 u32 atomic_max_boundary; 753 u8 complete_type; 754 u8 submit_type; 755 struct se_device *da_dev; 756 struct config_group da_group; 757 }; 758 759 struct se_port_stat_grps { 760 struct config_group stat_group; 761 struct config_group scsi_port_group; 762 struct config_group scsi_tgt_port_group; 763 struct config_group scsi_transport_group; 764 }; 765 766 struct scsi_port_stats { 767 u64 cmd_pdus; 768 u64 tx_data_octets; 769 u64 rx_data_octets; 770 }; 771 772 struct se_lun { 773 u64 unpacked_lun; 774 bool lun_shutdown; 775 bool lun_access_ro; 776 u32 lun_index; 777 778 atomic_t lun_acl_count; 779 struct se_device __rcu *lun_se_dev; 780 781 struct list_head lun_deve_list; 782 spinlock_t lun_deve_lock; 783 784 /* ALUA state */ 785 int lun_tg_pt_secondary_stat; 786 int lun_tg_pt_secondary_write_md; 787 atomic_t lun_tg_pt_secondary_offline; 788 struct mutex lun_tg_pt_md_mutex; 789 790 /* ALUA target port group linkage */ 791 struct list_head lun_tg_pt_gp_link; 792 struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp; 793 spinlock_t lun_tg_pt_gp_lock; 794 795 struct se_portal_group *lun_tpg; 796 struct scsi_port_stats __percpu *lun_stats; 797 struct config_group lun_group; 798 struct se_port_stat_grps port_stat_grps; 799 struct completion lun_shutdown_comp; 800 struct percpu_ref lun_ref; 801 struct list_head lun_dev_link; 802 struct hlist_node link; 803 struct rcu_head rcu_head; 804 }; 805 806 struct se_dev_stat_grps { 807 struct config_group stat_group; 808 struct config_group scsi_dev_group; 809 struct config_group scsi_tgt_dev_group; 810 struct config_group scsi_lu_group; 811 }; 812 813 struct se_cmd_queue { 814 struct llist_head cmd_list; 815 struct work_struct work; 816 }; 817 818 struct se_dev_plug { 819 struct se_device *se_dev; 820 }; 821 822 struct se_device_queue { 823 struct list_head state_list; 824 spinlock_t lock; 825 struct se_cmd_queue sq; 826 }; 827 828 struct se_dev_io_stats { 829 u64 total_cmds; 830 u64 read_bytes; 831 u64 write_bytes; 832 }; 833 834 struct se_device { 835 /* Used for SAM Task Attribute ordering */ 836 u32 dev_cur_ordered_id; 837 u32 dev_flags; 838 #define DF_CONFIGURED 0x00000001 839 #define DF_FIRMWARE_VPD_UNIT_SERIAL 0x00000002 840 #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 841 #define DF_USING_UDEV_PATH 0x00000008 842 #define DF_USING_ALIAS 0x00000010 843 #define DF_READ_ONLY 0x00000020 844 u8 transport_flags; 845 /* Physical device queue depth */ 846 u32 queue_depth; 847 /* Used for SPC-2 reservations enforce of ISIDs */ 848 u64 dev_res_bin_isid; 849 /* Pointer to transport specific device structure */ 850 u32 dev_index; 851 u64 creation_time; 852 atomic_long_t num_resets; 853 atomic_long_t aborts_complete; 854 atomic_long_t aborts_no_task; 855 struct se_dev_io_stats __percpu *stats; 856 /* Active commands on this virtual SE device */ 857 struct percpu_ref non_ordered; 858 bool ordered_sync_in_progress; 859 atomic_t dev_qf_count; 860 u32 export_count; 861 spinlock_t delayed_cmd_lock; 862 spinlock_t dev_reservation_lock; 863 unsigned int dev_reservation_flags; 864 #define DRF_SPC2_RESERVATIONS 0x00000001 865 #define DRF_SPC2_RESERVATIONS_WITH_ISID 0x00000002 866 spinlock_t se_port_lock; 867 spinlock_t se_tmr_lock; 868 spinlock_t qf_cmd_lock; 869 struct semaphore caw_sem; 870 /* Used for legacy SPC-2 reservations */ 871 struct se_session *reservation_holder; 872 /* Used for ALUA Logical Unit Group membership */ 873 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; 874 /* Used for SPC-3 Persistent Reservations */ 875 struct t10_pr_registration *dev_pr_res_holder; 876 struct list_head dev_sep_list; 877 struct list_head dev_tmr_list; 878 struct work_struct qf_work_queue; 879 struct work_struct delayed_cmd_work; 880 struct list_head delayed_cmd_list; 881 struct list_head qf_cmd_list; 882 /* Pointer to associated SE HBA */ 883 struct se_hba *se_hba; 884 /* T10 Inquiry and VPD WWN Information */ 885 struct t10_wwn t10_wwn; 886 /* T10 Asymmetric Logical Unit Assignment for Target Ports */ 887 struct t10_alua t10_alua; 888 /* T10 SPC-2 + SPC-3 Reservations */ 889 struct t10_reservation t10_pr; 890 struct se_dev_attrib dev_attrib; 891 struct config_group dev_action_group; 892 struct config_group dev_group; 893 struct config_group dev_pr_group; 894 struct se_dev_stat_grps dev_stat_grps; 895 #define SE_DEV_ALIAS_LEN 512 /* must be less than PAGE_SIZE */ 896 unsigned char dev_alias[SE_DEV_ALIAS_LEN]; 897 #define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */ 898 unsigned char udev_path[SE_UDEV_PATH_LEN]; 899 /* Pointer to template of function pointers for transport */ 900 const struct target_backend_ops *transport; 901 struct se_lun xcopy_lun; 902 /* Protection Information */ 903 int prot_length; 904 /* For se_lun->lun_se_dev RCU read-side critical access */ 905 u32 hba_index; 906 struct rcu_head rcu_head; 907 int queue_cnt; 908 struct se_device_queue *queues; 909 struct mutex lun_reset_mutex; 910 }; 911 912 struct target_opcode_descriptor { 913 u8 support:3; 914 u8 serv_action_valid:1; 915 u8 opcode; 916 u16 service_action; 917 u32 cdb_size; 918 u8 specific_timeout; 919 u16 nominal_timeout; 920 u16 recommended_timeout; 921 bool (*enabled)(const struct target_opcode_descriptor *descr, 922 struct se_cmd *cmd); 923 void (*update_usage_bits)(u8 *usage_bits, 924 struct se_device *dev); 925 u8 usage_bits[]; 926 }; 927 928 struct se_hba { 929 u16 hba_tpgt; 930 u32 hba_id; 931 /* See hba_flags_table */ 932 u32 hba_flags; 933 /* Virtual iSCSI devices attached. */ 934 u32 dev_count; 935 u32 hba_index; 936 /* Pointer to transport specific host structure. */ 937 void *hba_ptr; 938 struct list_head hba_node; 939 spinlock_t device_lock; 940 struct config_group hba_group; 941 struct mutex hba_access_mutex; 942 struct target_backend *backend; 943 }; 944 945 struct se_tpg_np { 946 struct se_portal_group *tpg_np_parent; 947 struct config_group tpg_np_group; 948 }; 949 950 static inline struct se_tpg_np *to_tpg_np(struct config_item *item) 951 { 952 return container_of(to_config_group(item), struct se_tpg_np, 953 tpg_np_group); 954 } 955 956 struct se_portal_group { 957 /* 958 * PROTOCOL IDENTIFIER value per SPC4, 7.5.1. 959 * 960 * Negative values can be used by fabric drivers for internal use TPGs. 961 */ 962 int proto_id; 963 bool enabled; 964 /* RELATIVE TARGET PORT IDENTIFIER */ 965 u16 tpg_rtpi; 966 bool rtpi_manual; 967 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 968 atomic_t tpg_pr_ref_count; 969 /* Spinlock for adding/removing ACLed Nodes */ 970 struct mutex acl_node_mutex; 971 /* Spinlock for adding/removing sessions */ 972 spinlock_t session_lock; 973 struct mutex tpg_lun_mutex; 974 /* linked list for initiator ACL list */ 975 struct list_head acl_node_list; 976 struct hlist_head tpg_lun_hlist; 977 struct se_lun *tpg_virt_lun0; 978 /* List of TCM sessions associated wth this TPG */ 979 struct list_head tpg_sess_list; 980 /* Pointer to $FABRIC_MOD dependent code */ 981 const struct target_core_fabric_ops *se_tpg_tfo; 982 struct se_wwn *se_tpg_wwn; 983 struct config_group tpg_group; 984 struct config_group tpg_lun_group; 985 struct config_group tpg_np_group; 986 struct config_group tpg_acl_group; 987 struct config_group tpg_attrib_group; 988 struct config_group tpg_auth_group; 989 struct config_group tpg_param_group; 990 }; 991 992 static inline struct se_portal_group *to_tpg(struct config_item *item) 993 { 994 return container_of(to_config_group(item), struct se_portal_group, 995 tpg_group); 996 } 997 998 static inline struct se_portal_group *attrib_to_tpg(struct config_item *item) 999 { 1000 return container_of(to_config_group(item), struct se_portal_group, 1001 tpg_attrib_group); 1002 } 1003 1004 static inline struct se_portal_group *auth_to_tpg(struct config_item *item) 1005 { 1006 return container_of(to_config_group(item), struct se_portal_group, 1007 tpg_auth_group); 1008 } 1009 1010 static inline struct se_portal_group *param_to_tpg(struct config_item *item) 1011 { 1012 return container_of(to_config_group(item), struct se_portal_group, 1013 tpg_param_group); 1014 } 1015 1016 enum { 1017 /* Use se_cmd's cpuid for completion */ 1018 SE_COMPL_AFFINITY_CPUID = -1, 1019 /* Complete on current CPU */ 1020 SE_COMPL_AFFINITY_CURR_CPU = -2, 1021 }; 1022 1023 struct se_wwn { 1024 struct target_fabric_configfs *wwn_tf; 1025 void *priv; 1026 struct config_group wwn_group; 1027 struct config_group fabric_stat_group; 1028 struct config_group param_group; 1029 int cmd_compl_affinity; 1030 }; 1031 1032 static inline void atomic_inc_mb(atomic_t *v) 1033 { 1034 smp_mb__before_atomic(); 1035 atomic_inc(v); 1036 smp_mb__after_atomic(); 1037 } 1038 1039 static inline void atomic_dec_mb(atomic_t *v) 1040 { 1041 smp_mb__before_atomic(); 1042 atomic_dec(v); 1043 smp_mb__after_atomic(); 1044 } 1045 1046 static inline void target_free_tag(struct se_session *sess, struct se_cmd *cmd) 1047 { 1048 sbitmap_queue_clear(&sess->sess_tag_pool, cmd->map_tag, cmd->map_cpu); 1049 } 1050 1051 #endif /* TARGET_CORE_BASE_H */ 1052