1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org> 8 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Edward Tomasz Napierala 12 * under sponsorship from the FreeBSD Foundation. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification. 20 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 21 * substantially similar to the "NO WARRANTY" disclaimer below 22 * ("Disclaimer") and any redistribution must be conditioned upon 23 * including a substantially similar Disclaimer requirement for further 24 * binary redistribution. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 * $Id$ 40 */ 41 /* 42 * CAM Target Layer, a SCSI device emulation subsystem. 43 * 44 * Author: Ken Merry <ken@FreeBSD.org> 45 */ 46 47 #include <sys/cdefs.h> 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/ctype.h> 51 #include <sys/kernel.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/bio.h> 55 #include <sys/fcntl.h> 56 #include <sys/lock.h> 57 #include <sys/module.h> 58 #include <sys/mutex.h> 59 #include <sys/condvar.h> 60 #include <sys/malloc.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/endian.h> 67 #include <sys/proc.h> 68 #include <sys/sched.h> 69 #include <sys/sysctl.h> 70 #include <sys/nv.h> 71 #include <sys/dnv.h> 72 #include <vm/uma.h> 73 74 #include <cam/cam.h> 75 #include <cam/scsi/scsi_all.h> 76 #include <cam/scsi/scsi_cd.h> 77 #include <cam/scsi/scsi_da.h> 78 #include <cam/ctl/ctl_io.h> 79 #include <cam/ctl/ctl.h> 80 #include <cam/ctl/ctl_frontend.h> 81 #include <cam/ctl/ctl_util.h> 82 #include <cam/ctl/ctl_backend.h> 83 #include <cam/ctl/ctl_ioctl.h> 84 #include <cam/ctl/ctl_ha.h> 85 #include <cam/ctl/ctl_private.h> 86 #include <cam/ctl/ctl_debug.h> 87 #include <cam/ctl/ctl_scsi_all.h> 88 #include <cam/ctl/ctl_error.h> 89 90 struct ctl_softc *control_softc = NULL; 91 92 /* 93 * Template mode pages. 94 */ 95 96 /* 97 * Note that these are default values only. The actual values will be 98 * filled in when the user does a mode sense. 99 */ 100 const static struct scsi_da_rw_recovery_page rw_er_page_default = { 101 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 102 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 103 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 104 /*read_retry_count*/0, 105 /*correction_span*/0, 106 /*head_offset_count*/0, 107 /*data_strobe_offset_cnt*/0, 108 /*byte8*/SMS_RWER_LBPERE, 109 /*write_retry_count*/0, 110 /*reserved2*/0, 111 /*recovery_time_limit*/{0, 0}, 112 }; 113 114 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 117 /*byte3*/SMS_RWER_PER, 118 /*read_retry_count*/0, 119 /*correction_span*/0, 120 /*head_offset_count*/0, 121 /*data_strobe_offset_cnt*/0, 122 /*byte8*/SMS_RWER_LBPERE, 123 /*write_retry_count*/0, 124 /*reserved2*/0, 125 /*recovery_time_limit*/{0, 0}, 126 }; 127 128 const static struct scsi_format_page format_page_default = { 129 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 130 /*page_length*/sizeof(struct scsi_format_page) - 2, 131 /*tracks_per_zone*/ {0, 0}, 132 /*alt_sectors_per_zone*/ {0, 0}, 133 /*alt_tracks_per_zone*/ {0, 0}, 134 /*alt_tracks_per_lun*/ {0, 0}, 135 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 136 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 137 /*bytes_per_sector*/ {0, 0}, 138 /*interleave*/ {0, 0}, 139 /*track_skew*/ {0, 0}, 140 /*cylinder_skew*/ {0, 0}, 141 /*flags*/ SFP_HSEC, 142 /*reserved*/ {0, 0, 0} 143 }; 144 145 const static struct scsi_format_page format_page_changeable = { 146 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 147 /*page_length*/sizeof(struct scsi_format_page) - 2, 148 /*tracks_per_zone*/ {0, 0}, 149 /*alt_sectors_per_zone*/ {0, 0}, 150 /*alt_tracks_per_zone*/ {0, 0}, 151 /*alt_tracks_per_lun*/ {0, 0}, 152 /*sectors_per_track*/ {0, 0}, 153 /*bytes_per_sector*/ {0, 0}, 154 /*interleave*/ {0, 0}, 155 /*track_skew*/ {0, 0}, 156 /*cylinder_skew*/ {0, 0}, 157 /*flags*/ 0, 158 /*reserved*/ {0, 0, 0} 159 }; 160 161 const static struct scsi_rigid_disk_page rigid_disk_page_default = { 162 /*page_code*/SMS_RIGID_DISK_PAGE, 163 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 164 /*cylinders*/ {0, 0, 0}, 165 /*heads*/ CTL_DEFAULT_HEADS, 166 /*start_write_precomp*/ {0, 0, 0}, 167 /*start_reduced_current*/ {0, 0, 0}, 168 /*step_rate*/ {0, 0}, 169 /*landing_zone_cylinder*/ {0, 0, 0}, 170 /*rpl*/ SRDP_RPL_DISABLED, 171 /*rotational_offset*/ 0, 172 /*reserved1*/ 0, 173 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 174 CTL_DEFAULT_ROTATION_RATE & 0xff}, 175 /*reserved2*/ {0, 0} 176 }; 177 178 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 179 /*page_code*/SMS_RIGID_DISK_PAGE, 180 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 181 /*cylinders*/ {0, 0, 0}, 182 /*heads*/ 0, 183 /*start_write_precomp*/ {0, 0, 0}, 184 /*start_reduced_current*/ {0, 0, 0}, 185 /*step_rate*/ {0, 0}, 186 /*landing_zone_cylinder*/ {0, 0, 0}, 187 /*rpl*/ 0, 188 /*rotational_offset*/ 0, 189 /*reserved1*/ 0, 190 /*rotation_rate*/ {0, 0}, 191 /*reserved2*/ {0, 0} 192 }; 193 194 const static struct scsi_da_verify_recovery_page verify_er_page_default = { 195 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 196 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 197 /*byte3*/0, 198 /*read_retry_count*/0, 199 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 200 /*recovery_time_limit*/{0, 0}, 201 }; 202 203 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 204 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 205 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 206 /*byte3*/SMS_VER_PER, 207 /*read_retry_count*/0, 208 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 209 /*recovery_time_limit*/{0, 0}, 210 }; 211 212 const static struct scsi_caching_page caching_page_default = { 213 /*page_code*/SMS_CACHING_PAGE, 214 /*page_length*/sizeof(struct scsi_caching_page) - 2, 215 /*flags1*/ SCP_DISC | SCP_WCE, 216 /*ret_priority*/ 0, 217 /*disable_pf_transfer_len*/ {0xff, 0xff}, 218 /*min_prefetch*/ {0, 0}, 219 /*max_prefetch*/ {0xff, 0xff}, 220 /*max_pf_ceiling*/ {0xff, 0xff}, 221 /*flags2*/ 0, 222 /*cache_segments*/ 0, 223 /*cache_seg_size*/ {0, 0}, 224 /*reserved*/ 0, 225 /*non_cache_seg_size*/ {0, 0, 0} 226 }; 227 228 const static struct scsi_caching_page caching_page_changeable = { 229 /*page_code*/SMS_CACHING_PAGE, 230 /*page_length*/sizeof(struct scsi_caching_page) - 2, 231 /*flags1*/ SCP_WCE | SCP_RCD, 232 /*ret_priority*/ 0, 233 /*disable_pf_transfer_len*/ {0, 0}, 234 /*min_prefetch*/ {0, 0}, 235 /*max_prefetch*/ {0, 0}, 236 /*max_pf_ceiling*/ {0, 0}, 237 /*flags2*/ 0, 238 /*cache_segments*/ 0, 239 /*cache_seg_size*/ {0, 0}, 240 /*reserved*/ 0, 241 /*non_cache_seg_size*/ {0, 0, 0} 242 }; 243 244 const static struct scsi_control_page control_page_default = { 245 /*page_code*/SMS_CONTROL_MODE_PAGE, 246 /*page_length*/sizeof(struct scsi_control_page) - 2, 247 /*rlec*/0, 248 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 249 /*eca_and_aen*/0, 250 /*flags4*/SCP_TAS, 251 /*aen_holdoff_period*/{0, 0}, 252 /*busy_timeout_period*/{0, 0}, 253 /*extended_selftest_completion_time*/{0, 0} 254 }; 255 256 const static struct scsi_control_page control_page_changeable = { 257 /*page_code*/SMS_CONTROL_MODE_PAGE, 258 /*page_length*/sizeof(struct scsi_control_page) - 2, 259 /*rlec*/SCP_DSENSE, 260 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 261 /*eca_and_aen*/SCP_SWP, 262 /*flags4*/0, 263 /*aen_holdoff_period*/{0, 0}, 264 /*busy_timeout_period*/{0, 0}, 265 /*extended_selftest_completion_time*/{0, 0} 266 }; 267 268 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 269 270 const static struct scsi_control_ext_page control_ext_page_default = { 271 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 272 /*subpage_code*/0x01, 273 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 274 /*flags*/0, 275 /*prio*/0, 276 /*max_sense*/0 277 }; 278 279 const static struct scsi_control_ext_page control_ext_page_changeable = { 280 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 281 /*subpage_code*/0x01, 282 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 283 /*flags*/0, 284 /*prio*/0, 285 /*max_sense*/0xff 286 }; 287 288 const static struct scsi_info_exceptions_page ie_page_default = { 289 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 290 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 291 /*info_flags*/SIEP_FLAGS_EWASC, 292 /*mrie*/SIEP_MRIE_NO, 293 /*interval_timer*/{0, 0, 0, 0}, 294 /*report_count*/{0, 0, 0, 1} 295 }; 296 297 const static struct scsi_info_exceptions_page ie_page_changeable = { 298 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 299 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 300 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 301 SIEP_FLAGS_LOGERR, 302 /*mrie*/0x0f, 303 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 304 /*report_count*/{0xff, 0xff, 0xff, 0xff} 305 }; 306 307 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 308 309 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 310 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 311 /*subpage_code*/0x02, 312 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 313 /*flags*/0, 314 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 315 /*descr*/{}}, 316 {{/*flags*/0, 317 /*resource*/0x01, 318 /*reserved*/{0, 0}, 319 /*count*/{0, 0, 0, 0}}, 320 {/*flags*/0, 321 /*resource*/0x02, 322 /*reserved*/{0, 0}, 323 /*count*/{0, 0, 0, 0}}, 324 {/*flags*/0, 325 /*resource*/0xf1, 326 /*reserved*/{0, 0}, 327 /*count*/{0, 0, 0, 0}}, 328 {/*flags*/0, 329 /*resource*/0xf2, 330 /*reserved*/{0, 0}, 331 /*count*/{0, 0, 0, 0}} 332 } 333 }; 334 335 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 336 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 337 /*subpage_code*/0x02, 338 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 339 /*flags*/SLBPP_SITUA, 340 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 341 /*descr*/{}}, 342 {{/*flags*/0, 343 /*resource*/0, 344 /*reserved*/{0, 0}, 345 /*count*/{0, 0, 0, 0}}, 346 {/*flags*/0, 347 /*resource*/0, 348 /*reserved*/{0, 0}, 349 /*count*/{0, 0, 0, 0}}, 350 {/*flags*/0, 351 /*resource*/0, 352 /*reserved*/{0, 0}, 353 /*count*/{0, 0, 0, 0}}, 354 {/*flags*/0, 355 /*resource*/0, 356 /*reserved*/{0, 0}, 357 /*count*/{0, 0, 0, 0}} 358 } 359 }; 360 361 const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 362 /*page_code*/SMS_CDDVD_CAPS_PAGE, 363 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 364 /*caps1*/0x3f, 365 /*caps2*/0x00, 366 /*caps3*/0xf0, 367 /*caps4*/0x00, 368 /*caps5*/0x29, 369 /*caps6*/0x00, 370 /*obsolete*/{0, 0}, 371 /*nvol_levels*/{0, 0}, 372 /*buffer_size*/{8, 0}, 373 /*obsolete2*/{0, 0}, 374 /*reserved*/0, 375 /*digital*/0, 376 /*obsolete3*/0, 377 /*copy_management*/0, 378 /*reserved2*/0, 379 /*rotation_control*/0, 380 /*cur_write_speed*/0, 381 /*num_speed_descr*/0, 382 }; 383 384 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 385 /*page_code*/SMS_CDDVD_CAPS_PAGE, 386 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 387 /*caps1*/0, 388 /*caps2*/0, 389 /*caps3*/0, 390 /*caps4*/0, 391 /*caps5*/0, 392 /*caps6*/0, 393 /*obsolete*/{0, 0}, 394 /*nvol_levels*/{0, 0}, 395 /*buffer_size*/{0, 0}, 396 /*obsolete2*/{0, 0}, 397 /*reserved*/0, 398 /*digital*/0, 399 /*obsolete3*/0, 400 /*copy_management*/0, 401 /*reserved2*/0, 402 /*rotation_control*/0, 403 /*cur_write_speed*/0, 404 /*num_speed_descr*/0, 405 }; 406 407 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 408 "CAM Target Layer"); 409 static int worker_threads = -1; 410 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 411 &worker_threads, 1, "Number of worker threads"); 412 static int ctl_debug = CTL_DEBUG_NONE; 413 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 414 &ctl_debug, 0, "Enabled debug flags"); 415 static int ctl_lun_map_size = 1024; 416 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 417 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 418 #ifdef CTL_TIME_IO 419 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; 420 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, 421 &ctl_time_io_secs, 0, "Log requests taking more seconds"); 422 #endif 423 424 /* 425 * Maximum number of LUNs we support. MUST be a power of 2. 426 */ 427 #define CTL_DEFAULT_MAX_LUNS 1024 428 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 429 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); 430 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, 431 &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); 432 433 /* 434 * Maximum number of ports registered at one time. 435 */ 436 #define CTL_DEFAULT_MAX_PORTS 1024 437 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 438 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); 439 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, 440 &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); 441 442 /* 443 * Maximum number of initiators we support. 444 */ 445 #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) 446 447 /* 448 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 449 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 450 * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), 451 * Block limits (0xB0), Block Device Characteristics (0xB1) and 452 * Logical Block Provisioning (0xB2) 453 */ 454 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 455 456 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 457 int param); 458 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 459 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 460 static int ctl_init(void); 461 static int ctl_shutdown(void); 462 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 463 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 464 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 465 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 466 struct ctl_ooa *ooa_hdr, 467 struct ctl_ooa_entry *kern_entries); 468 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 469 struct thread *td); 470 static int ctl_enable_lun(struct ctl_lun *lun); 471 static int ctl_disable_lun(struct ctl_lun *lun); 472 static int ctl_free_lun(struct ctl_lun *lun); 473 474 static int ctl_do_mode_select(union ctl_io *io); 475 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 476 uint64_t res_key, uint64_t sa_res_key, 477 uint8_t type, uint32_t residx, 478 struct ctl_scsiio *ctsio, 479 struct scsi_per_res_out *cdb, 480 struct scsi_per_res_out_parms* param); 481 static void ctl_pro_preempt_other(struct ctl_lun *lun, 482 union ctl_ha_msg *msg); 483 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 484 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 485 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 486 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 487 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 488 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 489 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 490 int alloc_len); 491 static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); 492 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 493 int alloc_len); 494 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 495 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 496 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 497 static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 498 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 499 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 500 bool seq); 501 static ctl_action ctl_seq_check(union ctl_io *io1, union ctl_io *io2); 502 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 503 union ctl_io *pending_io, const uint8_t *serialize_row, 504 union ctl_io *ooa_io); 505 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 506 union ctl_io **starting_io); 507 static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, 508 bool skip); 509 static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, 510 bool skip); 511 static int ctl_scsiio_lun_check(struct ctl_lun *lun, 512 const struct ctl_cmd_entry *entry, 513 struct ctl_scsiio *ctsio); 514 static void ctl_failover_lun(union ctl_io *io); 515 static void ctl_scsiio_precheck(struct ctl_scsiio *ctsio); 516 static int ctl_scsiio(struct ctl_scsiio *ctsio); 517 518 static int ctl_target_reset(union ctl_io *io); 519 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, 520 ctl_ua_type ua_type); 521 static int ctl_lun_reset(union ctl_io *io); 522 static int ctl_abort_task(union ctl_io *io); 523 static int ctl_abort_task_set(union ctl_io *io); 524 static int ctl_query_task(union ctl_io *io, int task_set); 525 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 526 ctl_ua_type ua_type); 527 static int ctl_i_t_nexus_reset(union ctl_io *io); 528 static int ctl_query_async_event(union ctl_io *io); 529 static void ctl_run_task(union ctl_io *io); 530 #ifdef CTL_IO_DELAY 531 static void ctl_datamove_timer_wakeup(void *arg); 532 static void ctl_done_timer_wakeup(void *arg); 533 #endif /* CTL_IO_DELAY */ 534 535 static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 536 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 537 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr); 538 static void ctl_datamove_remote_write(union ctl_io *io); 539 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr); 540 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 541 static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 542 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 543 ctl_ha_dt_cb callback); 544 static void ctl_datamove_remote_read(union ctl_io *io); 545 static void ctl_datamove_remote(union ctl_io *io); 546 static void ctl_process_done(union ctl_io *io); 547 static void ctl_thresh_thread(void *arg); 548 static void ctl_work_thread(void *arg); 549 static void ctl_enqueue_incoming(union ctl_io *io); 550 static void ctl_enqueue_rtr(union ctl_io *io); 551 static void ctl_enqueue_done(union ctl_io *io); 552 static void ctl_enqueue_isc(union ctl_io *io); 553 static const struct ctl_cmd_entry * 554 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 555 static const struct ctl_cmd_entry * 556 ctl_validate_command(struct ctl_scsiio *ctsio); 557 static int ctl_cmd_applicable(uint8_t lun_type, 558 const struct ctl_cmd_entry *entry); 559 static int ctl_ha_init(void); 560 static int ctl_ha_shutdown(void); 561 562 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 563 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 564 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 565 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 566 567 /* 568 * Load the serialization table. This isn't very pretty, but is probably 569 * the easiest way to do it. 570 */ 571 #include "ctl_ser_table.c" 572 573 /* 574 * We only need to define open, close and ioctl routines for this driver. 575 */ 576 static struct cdevsw ctl_cdevsw = { 577 .d_version = D_VERSION, 578 .d_flags = 0, 579 .d_open = ctl_open, 580 .d_close = ctl_close, 581 .d_ioctl = ctl_ioctl, 582 .d_name = "ctl", 583 }; 584 585 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 586 587 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 588 589 static moduledata_t ctl_moduledata = { 590 "ctl", 591 ctl_module_event_handler, 592 NULL 593 }; 594 595 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 596 MODULE_VERSION(ctl, 1); 597 598 static struct ctl_frontend ha_frontend = 599 { 600 .name = "ha", 601 .init = ctl_ha_init, 602 .shutdown = ctl_ha_shutdown, 603 }; 604 605 static int 606 ctl_ha_init(void) 607 { 608 struct ctl_softc *softc = control_softc; 609 610 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 611 &softc->othersc_pool) != 0) 612 return (ENOMEM); 613 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 614 ctl_pool_free(softc->othersc_pool); 615 return (EIO); 616 } 617 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 618 != CTL_HA_STATUS_SUCCESS) { 619 ctl_ha_msg_destroy(softc); 620 ctl_pool_free(softc->othersc_pool); 621 return (EIO); 622 } 623 return (0); 624 }; 625 626 static int 627 ctl_ha_shutdown(void) 628 { 629 struct ctl_softc *softc = control_softc; 630 struct ctl_port *port; 631 632 ctl_ha_msg_shutdown(softc); 633 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 634 return (EIO); 635 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 636 return (EIO); 637 ctl_pool_free(softc->othersc_pool); 638 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 639 ctl_port_deregister(port); 640 free(port->port_name, M_CTL); 641 free(port, M_CTL); 642 } 643 return (0); 644 }; 645 646 static void 647 ctl_ha_datamove(union ctl_io *io) 648 { 649 struct ctl_lun *lun = CTL_LUN(io); 650 struct ctl_sg_entry *sgl; 651 union ctl_ha_msg msg; 652 uint32_t sg_entries_sent; 653 int do_sg_copy, i, j; 654 655 memset(&msg.dt, 0, sizeof(msg.dt)); 656 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 657 msg.hdr.original_sc = io->io_hdr.remote_io; 658 msg.hdr.serializing_sc = io; 659 msg.hdr.nexus = io->io_hdr.nexus; 660 msg.hdr.status = io->io_hdr.status; 661 msg.dt.flags = io->io_hdr.flags; 662 663 /* 664 * We convert everything into a S/G list here. We can't 665 * pass by reference, only by value between controllers. 666 * So we can't pass a pointer to the S/G list, only as many 667 * S/G entries as we can fit in here. If it's possible for 668 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 669 * then we need to break this up into multiple transfers. 670 */ 671 if (io->scsiio.kern_sg_entries == 0) { 672 msg.dt.kern_sg_entries = 1; 673 #if 0 674 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 675 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 676 } else { 677 /* XXX KDM use busdma here! */ 678 msg.dt.sg_list[0].addr = 679 (void *)vtophys(io->scsiio.kern_data_ptr); 680 } 681 #else 682 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 683 ("HA does not support BUS_ADDR")); 684 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 685 #endif 686 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 687 do_sg_copy = 0; 688 } else { 689 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 690 do_sg_copy = 1; 691 } 692 693 msg.dt.kern_data_len = io->scsiio.kern_data_len; 694 msg.dt.kern_total_len = io->scsiio.kern_total_len; 695 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 696 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 697 msg.dt.sg_sequence = 0; 698 699 /* 700 * Loop until we've sent all of the S/G entries. On the 701 * other end, we'll recompose these S/G entries into one 702 * contiguous list before processing. 703 */ 704 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 705 msg.dt.sg_sequence++) { 706 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 707 sizeof(msg.dt.sg_list[0])), 708 msg.dt.kern_sg_entries - sg_entries_sent); 709 if (do_sg_copy != 0) { 710 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 711 for (i = sg_entries_sent, j = 0; 712 i < msg.dt.cur_sg_entries; i++, j++) { 713 #if 0 714 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 715 msg.dt.sg_list[j].addr = sgl[i].addr; 716 } else { 717 /* XXX KDM use busdma here! */ 718 msg.dt.sg_list[j].addr = 719 (void *)vtophys(sgl[i].addr); 720 } 721 #else 722 KASSERT((io->io_hdr.flags & 723 CTL_FLAG_BUS_ADDR) == 0, 724 ("HA does not support BUS_ADDR")); 725 msg.dt.sg_list[j].addr = sgl[i].addr; 726 #endif 727 msg.dt.sg_list[j].len = sgl[i].len; 728 } 729 } 730 731 sg_entries_sent += msg.dt.cur_sg_entries; 732 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 733 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 734 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 735 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 736 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 737 io->io_hdr.port_status = 31341; 738 ctl_datamove_done(io, true); 739 return; 740 } 741 msg.dt.sent_sg_entries = sg_entries_sent; 742 } 743 744 /* 745 * Officially handover the request from us to peer. 746 * If failover has just happened, then we must return error. 747 * If failover happen just after, then it is not our problem. 748 */ 749 if (lun) 750 mtx_lock(&lun->lun_lock); 751 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 752 if (lun) 753 mtx_unlock(&lun->lun_lock); 754 io->io_hdr.port_status = 31342; 755 ctl_datamove_done(io, true); 756 return; 757 } 758 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 759 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 760 if (lun) 761 mtx_unlock(&lun->lun_lock); 762 } 763 764 static void 765 ctl_ha_done(union ctl_io *io) 766 { 767 union ctl_ha_msg msg; 768 769 if (io->io_hdr.io_type == CTL_IO_SCSI) { 770 memset(&msg, 0, sizeof(msg)); 771 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 772 msg.hdr.original_sc = io->io_hdr.remote_io; 773 msg.hdr.nexus = io->io_hdr.nexus; 774 msg.hdr.status = io->io_hdr.status; 775 msg.scsi.scsi_status = io->scsiio.scsi_status; 776 msg.scsi.tag_num = io->scsiio.tag_num; 777 msg.scsi.tag_type = io->scsiio.tag_type; 778 msg.scsi.sense_len = io->scsiio.sense_len; 779 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 780 io->scsiio.sense_len); 781 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 782 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 783 msg.scsi.sense_len, M_WAITOK); 784 } 785 ctl_free_io(io); 786 } 787 788 static void 789 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 790 union ctl_ha_msg *msg_info) 791 { 792 struct ctl_scsiio *ctsio; 793 794 if (msg_info->hdr.original_sc == NULL) { 795 printf("%s: original_sc == NULL!\n", __func__); 796 /* XXX KDM now what? */ 797 return; 798 } 799 800 ctsio = &msg_info->hdr.original_sc->scsiio; 801 ctsio->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 802 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 803 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 804 ctsio->io_hdr.status = msg_info->hdr.status; 805 ctsio->scsi_status = msg_info->scsi.scsi_status; 806 ctsio->sense_len = msg_info->scsi.sense_len; 807 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 808 msg_info->scsi.sense_len); 809 ctl_enqueue_isc((union ctl_io *)ctsio); 810 } 811 812 static void 813 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 814 union ctl_ha_msg *msg_info) 815 { 816 struct ctl_scsiio *ctsio; 817 818 if (msg_info->hdr.serializing_sc == NULL) { 819 printf("%s: serializing_sc == NULL!\n", __func__); 820 /* XXX KDM now what? */ 821 return; 822 } 823 824 ctsio = &msg_info->hdr.serializing_sc->scsiio; 825 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 826 ctl_enqueue_isc((union ctl_io *)ctsio); 827 } 828 829 void 830 ctl_isc_announce_lun(struct ctl_lun *lun) 831 { 832 struct ctl_softc *softc = lun->ctl_softc; 833 union ctl_ha_msg *msg; 834 struct ctl_ha_msg_lun_pr_key pr_key; 835 int i, k; 836 837 if (softc->ha_link != CTL_HA_LINK_ONLINE) 838 return; 839 mtx_lock(&lun->lun_lock); 840 i = sizeof(msg->lun); 841 if (lun->lun_devid) 842 i += lun->lun_devid->len; 843 i += sizeof(pr_key) * lun->pr_key_count; 844 alloc: 845 mtx_unlock(&lun->lun_lock); 846 msg = malloc(i, M_CTL, M_WAITOK); 847 mtx_lock(&lun->lun_lock); 848 k = sizeof(msg->lun); 849 if (lun->lun_devid) 850 k += lun->lun_devid->len; 851 k += sizeof(pr_key) * lun->pr_key_count; 852 if (i < k) { 853 free(msg, M_CTL); 854 i = k; 855 goto alloc; 856 } 857 bzero(&msg->lun, sizeof(msg->lun)); 858 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 859 msg->hdr.nexus.targ_lun = lun->lun; 860 msg->hdr.nexus.targ_mapped_lun = lun->lun; 861 msg->lun.flags = lun->flags; 862 msg->lun.pr_generation = lun->pr_generation; 863 msg->lun.pr_res_idx = lun->pr_res_idx; 864 msg->lun.pr_res_type = lun->pr_res_type; 865 msg->lun.pr_key_count = lun->pr_key_count; 866 i = 0; 867 if (lun->lun_devid) { 868 msg->lun.lun_devid_len = lun->lun_devid->len; 869 memcpy(&msg->lun.data[i], lun->lun_devid->data, 870 msg->lun.lun_devid_len); 871 i += msg->lun.lun_devid_len; 872 } 873 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 874 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 875 continue; 876 pr_key.pr_iid = k; 877 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 878 i += sizeof(pr_key); 879 } 880 mtx_unlock(&lun->lun_lock); 881 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->lun, sizeof(msg->lun) + i, 882 M_WAITOK); 883 free(msg, M_CTL); 884 885 if (lun->flags & CTL_LUN_PRIMARY_SC) { 886 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 887 ctl_isc_announce_mode(lun, -1, 888 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 889 lun->mode_pages.index[i].subpage); 890 } 891 } 892 } 893 894 void 895 ctl_isc_announce_port(struct ctl_port *port) 896 { 897 struct ctl_softc *softc = port->ctl_softc; 898 union ctl_ha_msg *msg; 899 int i; 900 901 if (port->targ_port < softc->port_min || 902 port->targ_port >= softc->port_max || 903 softc->ha_link != CTL_HA_LINK_ONLINE) 904 return; 905 i = sizeof(msg->port) + strlen(port->port_name) + 1; 906 if (port->lun_map) 907 i += port->lun_map_size * sizeof(uint32_t); 908 if (port->port_devid) 909 i += port->port_devid->len; 910 if (port->target_devid) 911 i += port->target_devid->len; 912 if (port->init_devid) 913 i += port->init_devid->len; 914 msg = malloc(i, M_CTL, M_WAITOK); 915 bzero(&msg->port, sizeof(msg->port)); 916 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 917 msg->hdr.nexus.targ_port = port->targ_port; 918 msg->port.port_type = port->port_type; 919 msg->port.physical_port = port->physical_port; 920 msg->port.virtual_port = port->virtual_port; 921 msg->port.status = port->status; 922 i = 0; 923 msg->port.name_len = sprintf(&msg->port.data[i], 924 "%d:%s", softc->ha_id, port->port_name) + 1; 925 i += msg->port.name_len; 926 if (port->lun_map) { 927 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 928 memcpy(&msg->port.data[i], port->lun_map, 929 msg->port.lun_map_len); 930 i += msg->port.lun_map_len; 931 } 932 if (port->port_devid) { 933 msg->port.port_devid_len = port->port_devid->len; 934 memcpy(&msg->port.data[i], port->port_devid->data, 935 msg->port.port_devid_len); 936 i += msg->port.port_devid_len; 937 } 938 if (port->target_devid) { 939 msg->port.target_devid_len = port->target_devid->len; 940 memcpy(&msg->port.data[i], port->target_devid->data, 941 msg->port.target_devid_len); 942 i += msg->port.target_devid_len; 943 } 944 if (port->init_devid) { 945 msg->port.init_devid_len = port->init_devid->len; 946 memcpy(&msg->port.data[i], port->init_devid->data, 947 msg->port.init_devid_len); 948 i += msg->port.init_devid_len; 949 } 950 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 951 M_WAITOK); 952 free(msg, M_CTL); 953 } 954 955 void 956 ctl_isc_announce_iid(struct ctl_port *port, int iid) 957 { 958 struct ctl_softc *softc = port->ctl_softc; 959 union ctl_ha_msg *msg; 960 int i, l; 961 962 if (port->targ_port < softc->port_min || 963 port->targ_port >= softc->port_max || 964 softc->ha_link != CTL_HA_LINK_ONLINE) 965 return; 966 mtx_lock(&softc->ctl_lock); 967 i = sizeof(msg->iid); 968 l = 0; 969 if (port->wwpn_iid[iid].name) 970 l = strlen(port->wwpn_iid[iid].name) + 1; 971 i += l; 972 msg = malloc(i, M_CTL, M_NOWAIT); 973 if (msg == NULL) { 974 mtx_unlock(&softc->ctl_lock); 975 return; 976 } 977 bzero(&msg->iid, sizeof(msg->iid)); 978 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 979 msg->hdr.nexus.targ_port = port->targ_port; 980 msg->hdr.nexus.initid = iid; 981 msg->iid.in_use = port->wwpn_iid[iid].in_use; 982 msg->iid.name_len = l; 983 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 984 if (port->wwpn_iid[iid].name) 985 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 986 mtx_unlock(&softc->ctl_lock); 987 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 988 free(msg, M_CTL); 989 } 990 991 void 992 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 993 uint8_t page, uint8_t subpage) 994 { 995 struct ctl_softc *softc = lun->ctl_softc; 996 union ctl_ha_msg *msg; 997 u_int i, l; 998 999 if (softc->ha_link != CTL_HA_LINK_ONLINE) 1000 return; 1001 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1002 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1003 page && lun->mode_pages.index[i].subpage == subpage) 1004 break; 1005 } 1006 if (i == CTL_NUM_MODE_PAGES) 1007 return; 1008 1009 /* Don't try to replicate pages not present on this device. */ 1010 if (lun->mode_pages.index[i].page_data == NULL) 1011 return; 1012 1013 l = sizeof(msg->mode) + lun->mode_pages.index[i].page_len; 1014 msg = malloc(l, M_CTL, M_WAITOK | M_ZERO); 1015 msg->hdr.msg_type = CTL_MSG_MODE_SYNC; 1016 msg->hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 1017 msg->hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 1018 msg->hdr.nexus.targ_lun = lun->lun; 1019 msg->hdr.nexus.targ_mapped_lun = lun->lun; 1020 msg->mode.page_code = page; 1021 msg->mode.subpage = subpage; 1022 msg->mode.page_len = lun->mode_pages.index[i].page_len; 1023 memcpy(msg->mode.data, lun->mode_pages.index[i].page_data, 1024 msg->mode.page_len); 1025 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->mode, l, M_WAITOK); 1026 free(msg, M_CTL); 1027 } 1028 1029 static void 1030 ctl_isc_ha_link_up(struct ctl_softc *softc) 1031 { 1032 struct ctl_port *port; 1033 struct ctl_lun *lun; 1034 union ctl_ha_msg msg; 1035 int i; 1036 1037 /* Announce this node parameters to peer for validation. */ 1038 msg.login.msg_type = CTL_MSG_LOGIN; 1039 msg.login.version = CTL_HA_VERSION; 1040 msg.login.ha_mode = softc->ha_mode; 1041 msg.login.ha_id = softc->ha_id; 1042 msg.login.max_luns = ctl_max_luns; 1043 msg.login.max_ports = ctl_max_ports; 1044 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1045 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1046 M_WAITOK); 1047 1048 STAILQ_FOREACH(port, &softc->port_list, links) { 1049 ctl_isc_announce_port(port); 1050 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1051 if (port->wwpn_iid[i].in_use) 1052 ctl_isc_announce_iid(port, i); 1053 } 1054 } 1055 STAILQ_FOREACH(lun, &softc->lun_list, links) 1056 ctl_isc_announce_lun(lun); 1057 } 1058 1059 static void 1060 ctl_isc_ha_link_down(struct ctl_softc *softc) 1061 { 1062 struct ctl_port *port; 1063 struct ctl_lun *lun; 1064 union ctl_io *io; 1065 int i; 1066 1067 mtx_lock(&softc->ctl_lock); 1068 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1069 mtx_lock(&lun->lun_lock); 1070 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1071 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1072 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1073 } 1074 mtx_unlock(&lun->lun_lock); 1075 1076 mtx_unlock(&softc->ctl_lock); 1077 io = ctl_alloc_io(softc->othersc_pool); 1078 mtx_lock(&softc->ctl_lock); 1079 ctl_zero_io(io); 1080 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1081 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1082 ctl_enqueue_isc(io); 1083 } 1084 1085 STAILQ_FOREACH(port, &softc->port_list, links) { 1086 if (port->targ_port >= softc->port_min && 1087 port->targ_port < softc->port_max) 1088 continue; 1089 port->status &= ~CTL_PORT_STATUS_ONLINE; 1090 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1091 port->wwpn_iid[i].in_use = 0; 1092 free(port->wwpn_iid[i].name, M_CTL); 1093 port->wwpn_iid[i].name = NULL; 1094 } 1095 } 1096 mtx_unlock(&softc->ctl_lock); 1097 } 1098 1099 static void 1100 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1101 { 1102 struct ctl_lun *lun; 1103 uint32_t iid; 1104 1105 if (len < sizeof(msg->ua)) { 1106 printf("%s: Received truncated message %d < %zu\n", 1107 __func__, len, sizeof(msg->ua)); 1108 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1109 return; 1110 } 1111 1112 mtx_lock(&softc->ctl_lock); 1113 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || 1114 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1115 mtx_unlock(&softc->ctl_lock); 1116 return; 1117 } 1118 mtx_lock(&lun->lun_lock); 1119 mtx_unlock(&softc->ctl_lock); 1120 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1121 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1122 iid = ctl_get_initindex(&msg->hdr.nexus); 1123 if (msg->ua.ua_all) { 1124 if (msg->ua.ua_set) 1125 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1126 else 1127 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1128 } else { 1129 if (msg->ua.ua_set) 1130 ctl_est_ua(lun, iid, msg->ua.ua_type); 1131 else 1132 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1133 } 1134 mtx_unlock(&lun->lun_lock); 1135 } 1136 1137 static void 1138 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1139 { 1140 struct ctl_lun *lun; 1141 struct ctl_ha_msg_lun_pr_key pr_key; 1142 int i, k; 1143 ctl_lun_flags oflags; 1144 uint32_t targ_lun; 1145 1146 if (len < offsetof(struct ctl_ha_msg_lun, data[0])) { 1147 printf("%s: Received truncated message %d < %zu\n", 1148 __func__, len, offsetof(struct ctl_ha_msg_lun, data[0])); 1149 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1150 return; 1151 } 1152 i = msg->lun.lun_devid_len + msg->lun.pr_key_count * sizeof(pr_key); 1153 if (len < offsetof(struct ctl_ha_msg_lun, data[i])) { 1154 printf("%s: Received truncated message data %d < %zu\n", 1155 __func__, len, offsetof(struct ctl_ha_msg_lun, data[i])); 1156 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1157 return; 1158 } 1159 1160 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1161 mtx_lock(&softc->ctl_lock); 1162 if (targ_lun >= ctl_max_luns || 1163 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1164 mtx_unlock(&softc->ctl_lock); 1165 return; 1166 } 1167 mtx_lock(&lun->lun_lock); 1168 mtx_unlock(&softc->ctl_lock); 1169 if (lun->flags & CTL_LUN_DISABLED) { 1170 mtx_unlock(&lun->lun_lock); 1171 return; 1172 } 1173 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1174 if (msg->lun.lun_devid_len != i || (i > 0 && 1175 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1176 mtx_unlock(&lun->lun_lock); 1177 printf("%s: Received conflicting HA LUN %d\n", 1178 __func__, targ_lun); 1179 return; 1180 } else { 1181 /* Record whether peer is primary. */ 1182 oflags = lun->flags; 1183 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1184 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1185 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1186 else 1187 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1188 if (oflags != lun->flags) 1189 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1190 1191 /* If peer is primary and we are not -- use data */ 1192 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1193 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1194 lun->pr_generation = msg->lun.pr_generation; 1195 lun->pr_res_idx = msg->lun.pr_res_idx; 1196 lun->pr_res_type = msg->lun.pr_res_type; 1197 lun->pr_key_count = msg->lun.pr_key_count; 1198 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1199 ctl_clr_prkey(lun, k); 1200 for (k = 0; k < msg->lun.pr_key_count; k++) { 1201 memcpy(&pr_key, &msg->lun.data[i], 1202 sizeof(pr_key)); 1203 ctl_alloc_prkey(lun, pr_key.pr_iid); 1204 ctl_set_prkey(lun, pr_key.pr_iid, 1205 pr_key.pr_key); 1206 i += sizeof(pr_key); 1207 } 1208 } 1209 1210 mtx_unlock(&lun->lun_lock); 1211 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1212 __func__, targ_lun, 1213 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1214 "primary" : "secondary")); 1215 1216 /* If we are primary but peer doesn't know -- notify */ 1217 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1218 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1219 ctl_isc_announce_lun(lun); 1220 } 1221 } 1222 1223 static void 1224 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1225 { 1226 struct ctl_port *port; 1227 struct ctl_lun *lun; 1228 int i, new; 1229 1230 if (len < offsetof(struct ctl_ha_msg_port, data[0])) { 1231 printf("%s: Received truncated message %d < %zu\n", 1232 __func__, len, offsetof(struct ctl_ha_msg_port, data[0])); 1233 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1234 return; 1235 } 1236 i = msg->port.name_len + msg->port.lun_map_len + 1237 msg->port.port_devid_len + msg->port.target_devid_len + 1238 msg->port.init_devid_len; 1239 if (len < offsetof(struct ctl_ha_msg_port, data[i])) { 1240 printf("%s: Received truncated message data %d < %zu\n", 1241 __func__, len, offsetof(struct ctl_ha_msg_port, data[i])); 1242 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1243 return; 1244 } 1245 1246 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1247 if (port == NULL) { 1248 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1249 msg->hdr.nexus.targ_port)); 1250 new = 1; 1251 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1252 port->frontend = &ha_frontend; 1253 port->targ_port = msg->hdr.nexus.targ_port; 1254 port->fe_datamove = ctl_ha_datamove; 1255 port->fe_done = ctl_ha_done; 1256 } else if (port->frontend == &ha_frontend) { 1257 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1258 msg->hdr.nexus.targ_port)); 1259 new = 0; 1260 } else { 1261 printf("%s: Received conflicting HA port %d\n", 1262 __func__, msg->hdr.nexus.targ_port); 1263 return; 1264 } 1265 port->port_type = msg->port.port_type; 1266 port->physical_port = msg->port.physical_port; 1267 port->virtual_port = msg->port.virtual_port; 1268 port->status = msg->port.status; 1269 i = 0; 1270 free(port->port_name, M_CTL); 1271 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1272 M_CTL); 1273 i += msg->port.name_len; 1274 if (msg->port.lun_map_len != 0) { 1275 if (port->lun_map == NULL || 1276 port->lun_map_size * sizeof(uint32_t) < 1277 msg->port.lun_map_len) { 1278 port->lun_map_size = 0; 1279 free(port->lun_map, M_CTL); 1280 port->lun_map = malloc(msg->port.lun_map_len, 1281 M_CTL, M_WAITOK); 1282 } 1283 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1284 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1285 i += msg->port.lun_map_len; 1286 } else { 1287 port->lun_map_size = 0; 1288 free(port->lun_map, M_CTL); 1289 port->lun_map = NULL; 1290 } 1291 if (msg->port.port_devid_len != 0) { 1292 if (port->port_devid == NULL || 1293 port->port_devid->len < msg->port.port_devid_len) { 1294 free(port->port_devid, M_CTL); 1295 port->port_devid = malloc(sizeof(struct ctl_devid) + 1296 msg->port.port_devid_len, M_CTL, M_WAITOK); 1297 } 1298 memcpy(port->port_devid->data, &msg->port.data[i], 1299 msg->port.port_devid_len); 1300 port->port_devid->len = msg->port.port_devid_len; 1301 i += msg->port.port_devid_len; 1302 } else { 1303 free(port->port_devid, M_CTL); 1304 port->port_devid = NULL; 1305 } 1306 if (msg->port.target_devid_len != 0) { 1307 if (port->target_devid == NULL || 1308 port->target_devid->len < msg->port.target_devid_len) { 1309 free(port->target_devid, M_CTL); 1310 port->target_devid = malloc(sizeof(struct ctl_devid) + 1311 msg->port.target_devid_len, M_CTL, M_WAITOK); 1312 } 1313 memcpy(port->target_devid->data, &msg->port.data[i], 1314 msg->port.target_devid_len); 1315 port->target_devid->len = msg->port.target_devid_len; 1316 i += msg->port.target_devid_len; 1317 } else { 1318 free(port->target_devid, M_CTL); 1319 port->target_devid = NULL; 1320 } 1321 if (msg->port.init_devid_len != 0) { 1322 if (port->init_devid == NULL || 1323 port->init_devid->len < msg->port.init_devid_len) { 1324 free(port->init_devid, M_CTL); 1325 port->init_devid = malloc(sizeof(struct ctl_devid) + 1326 msg->port.init_devid_len, M_CTL, M_WAITOK); 1327 } 1328 memcpy(port->init_devid->data, &msg->port.data[i], 1329 msg->port.init_devid_len); 1330 port->init_devid->len = msg->port.init_devid_len; 1331 i += msg->port.init_devid_len; 1332 } else { 1333 free(port->init_devid, M_CTL); 1334 port->init_devid = NULL; 1335 } 1336 if (new) { 1337 if (ctl_port_register(port) != 0) { 1338 printf("%s: ctl_port_register() failed with error\n", 1339 __func__); 1340 } 1341 } 1342 mtx_lock(&softc->ctl_lock); 1343 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1344 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1345 continue; 1346 mtx_lock(&lun->lun_lock); 1347 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1348 mtx_unlock(&lun->lun_lock); 1349 } 1350 mtx_unlock(&softc->ctl_lock); 1351 } 1352 1353 static void 1354 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1355 { 1356 struct ctl_port *port; 1357 int i, iid; 1358 1359 if (len < offsetof(struct ctl_ha_msg_iid, data[0])) { 1360 printf("%s: Received truncated message %d < %zu\n", 1361 __func__, len, offsetof(struct ctl_ha_msg_iid, data[0])); 1362 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1363 return; 1364 } 1365 i = msg->iid.name_len; 1366 if (len < offsetof(struct ctl_ha_msg_iid, data[i])) { 1367 printf("%s: Received truncated message data %d < %zu\n", 1368 __func__, len, offsetof(struct ctl_ha_msg_iid, data[i])); 1369 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1370 return; 1371 } 1372 1373 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1374 if (port == NULL) { 1375 printf("%s: Received IID for unknown port %d\n", 1376 __func__, msg->hdr.nexus.targ_port); 1377 return; 1378 } 1379 iid = msg->hdr.nexus.initid; 1380 if (port->wwpn_iid[iid].in_use != 0 && 1381 msg->iid.in_use == 0) 1382 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 1383 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1384 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1385 free(port->wwpn_iid[iid].name, M_CTL); 1386 if (msg->iid.name_len) { 1387 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1388 msg->iid.name_len, M_CTL); 1389 } else 1390 port->wwpn_iid[iid].name = NULL; 1391 } 1392 1393 static void 1394 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1395 { 1396 1397 if (len < sizeof(msg->login)) { 1398 printf("%s: Received truncated message %d < %zu\n", 1399 __func__, len, sizeof(msg->login)); 1400 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1401 return; 1402 } 1403 1404 if (msg->login.version != CTL_HA_VERSION) { 1405 printf("CTL HA peers have different versions %d != %d\n", 1406 msg->login.version, CTL_HA_VERSION); 1407 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1408 return; 1409 } 1410 if (msg->login.ha_mode != softc->ha_mode) { 1411 printf("CTL HA peers have different ha_mode %d != %d\n", 1412 msg->login.ha_mode, softc->ha_mode); 1413 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1414 return; 1415 } 1416 if (msg->login.ha_id == softc->ha_id) { 1417 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1418 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1419 return; 1420 } 1421 if (msg->login.max_luns != ctl_max_luns || 1422 msg->login.max_ports != ctl_max_ports || 1423 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1424 printf("CTL HA peers have different limits\n"); 1425 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1426 return; 1427 } 1428 } 1429 1430 static void 1431 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1432 { 1433 struct ctl_lun *lun; 1434 u_int i; 1435 uint32_t initidx, targ_lun; 1436 1437 if (len < offsetof(struct ctl_ha_msg_mode, data[0])) { 1438 printf("%s: Received truncated message %d < %zu\n", 1439 __func__, len, offsetof(struct ctl_ha_msg_mode, data[0])); 1440 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1441 return; 1442 } 1443 i = msg->mode.page_len; 1444 if (len < offsetof(struct ctl_ha_msg_mode, data[i])) { 1445 printf("%s: Received truncated message data %d < %zu\n", 1446 __func__, len, offsetof(struct ctl_ha_msg_mode, data[i])); 1447 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1448 return; 1449 } 1450 1451 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1452 mtx_lock(&softc->ctl_lock); 1453 if (targ_lun >= ctl_max_luns || 1454 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1455 mtx_unlock(&softc->ctl_lock); 1456 return; 1457 } 1458 mtx_lock(&lun->lun_lock); 1459 mtx_unlock(&softc->ctl_lock); 1460 if (lun->flags & CTL_LUN_DISABLED) { 1461 mtx_unlock(&lun->lun_lock); 1462 return; 1463 } 1464 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1465 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1466 msg->mode.page_code && 1467 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1468 break; 1469 } 1470 if (i == CTL_NUM_MODE_PAGES) { 1471 mtx_unlock(&lun->lun_lock); 1472 return; 1473 } 1474 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1475 min(lun->mode_pages.index[i].page_len, msg->mode.page_len)); 1476 initidx = ctl_get_initindex(&msg->hdr.nexus); 1477 if (initidx != -1) 1478 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1479 mtx_unlock(&lun->lun_lock); 1480 } 1481 1482 /* 1483 * ISC (Inter Shelf Communication) event handler. Events from the HA 1484 * subsystem come in here. 1485 */ 1486 static void 1487 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1488 { 1489 struct ctl_softc *softc = control_softc; 1490 union ctl_io *io; 1491 struct ctl_prio *presio; 1492 ctl_ha_status isc_status; 1493 1494 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1495 if (event == CTL_HA_EVT_MSG_RECV) { 1496 union ctl_ha_msg *msg, msgbuf; 1497 1498 if (param > sizeof(msgbuf)) 1499 msg = malloc(param, M_CTL, M_WAITOK); 1500 else 1501 msg = &msgbuf; 1502 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1503 M_WAITOK); 1504 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1505 printf("%s: Error receiving message: %d\n", 1506 __func__, isc_status); 1507 if (msg != &msgbuf) 1508 free(msg, M_CTL); 1509 return; 1510 } 1511 1512 CTL_DEBUG_PRINT(("CTL: msg_type %d len %d\n", 1513 msg->hdr.msg_type, param)); 1514 switch (msg->hdr.msg_type) { 1515 case CTL_MSG_SERIALIZE: 1516 io = ctl_alloc_io(softc->othersc_pool); 1517 ctl_zero_io(io); 1518 // populate ctsio from msg 1519 io->io_hdr.io_type = CTL_IO_SCSI; 1520 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1521 io->io_hdr.remote_io = msg->hdr.original_sc; 1522 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1523 CTL_FLAG_IO_ACTIVE; 1524 /* 1525 * If we're in serialization-only mode, we don't 1526 * want to go through full done processing. Thus 1527 * the COPY flag. 1528 * 1529 * XXX KDM add another flag that is more specific. 1530 */ 1531 if (softc->ha_mode != CTL_HA_MODE_XFER) 1532 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1533 io->io_hdr.nexus = msg->hdr.nexus; 1534 io->scsiio.priority = msg->scsi.priority; 1535 io->scsiio.tag_num = msg->scsi.tag_num; 1536 io->scsiio.tag_type = msg->scsi.tag_type; 1537 #ifdef CTL_TIME_IO 1538 io->io_hdr.start_time = time_uptime; 1539 getbinuptime(&io->io_hdr.start_bt); 1540 #endif /* CTL_TIME_IO */ 1541 io->scsiio.cdb_len = msg->scsi.cdb_len; 1542 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1543 CTL_MAX_CDBLEN); 1544 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1545 const struct ctl_cmd_entry *entry; 1546 1547 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1548 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1549 io->io_hdr.flags |= 1550 entry->flags & CTL_FLAG_DATA_MASK; 1551 } 1552 ctl_enqueue_isc(io); 1553 break; 1554 1555 /* Performed on the Originating SC, XFER mode only */ 1556 case CTL_MSG_DATAMOVE: { 1557 struct ctl_sg_entry *sgl; 1558 int i, j; 1559 1560 io = msg->hdr.original_sc; 1561 if (io == NULL) { 1562 printf("%s: original_sc == NULL!\n", __func__); 1563 /* XXX KDM do something here */ 1564 break; 1565 } 1566 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1567 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1568 /* 1569 * Keep track of this, we need to send it back over 1570 * when the datamove is complete. 1571 */ 1572 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1573 if (msg->hdr.status == CTL_SUCCESS) 1574 io->io_hdr.status = msg->hdr.status; 1575 1576 if (msg->dt.sg_sequence == 0) { 1577 #ifdef CTL_TIME_IO 1578 getbinuptime(&io->io_hdr.dma_start_bt); 1579 #endif 1580 i = msg->dt.kern_sg_entries + 1581 msg->dt.kern_data_len / 1582 CTL_HA_DATAMOVE_SEGMENT + 1; 1583 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1584 M_WAITOK | M_ZERO); 1585 CTL_RSGL(io) = sgl; 1586 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; 1587 1588 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1589 1590 io->scsiio.kern_sg_entries = 1591 msg->dt.kern_sg_entries; 1592 io->scsiio.rem_sg_entries = 1593 msg->dt.kern_sg_entries; 1594 io->scsiio.kern_data_len = 1595 msg->dt.kern_data_len; 1596 io->scsiio.kern_total_len = 1597 msg->dt.kern_total_len; 1598 io->scsiio.kern_data_resid = 1599 msg->dt.kern_data_resid; 1600 io->scsiio.kern_rel_offset = 1601 msg->dt.kern_rel_offset; 1602 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1603 io->io_hdr.flags |= msg->dt.flags & 1604 CTL_FLAG_BUS_ADDR; 1605 } else 1606 sgl = (struct ctl_sg_entry *) 1607 io->scsiio.kern_data_ptr; 1608 1609 for (i = msg->dt.sent_sg_entries, j = 0; 1610 i < (msg->dt.sent_sg_entries + 1611 msg->dt.cur_sg_entries); i++, j++) { 1612 sgl[i].addr = msg->dt.sg_list[j].addr; 1613 sgl[i].len = msg->dt.sg_list[j].len; 1614 } 1615 1616 /* 1617 * If this is the last piece of the I/O, we've got 1618 * the full S/G list. Queue processing in the thread. 1619 * Otherwise wait for the next piece. 1620 */ 1621 if (msg->dt.sg_last != 0) 1622 ctl_enqueue_isc(io); 1623 break; 1624 } 1625 /* Performed on the Serializing (primary) SC, XFER mode only */ 1626 case CTL_MSG_DATAMOVE_DONE: { 1627 if (msg->hdr.serializing_sc == NULL) { 1628 printf("%s: serializing_sc == NULL!\n", 1629 __func__); 1630 /* XXX KDM now what? */ 1631 break; 1632 } 1633 /* 1634 * We grab the sense information here in case 1635 * there was a failure, so we can return status 1636 * back to the initiator. 1637 */ 1638 io = msg->hdr.serializing_sc; 1639 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1640 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1641 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1642 io->io_hdr.port_status = msg->scsi.port_status; 1643 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1644 if (msg->hdr.status != CTL_STATUS_NONE) { 1645 io->io_hdr.status = msg->hdr.status; 1646 io->scsiio.scsi_status = msg->scsi.scsi_status; 1647 io->scsiio.sense_len = msg->scsi.sense_len; 1648 memcpy(&io->scsiio.sense_data, 1649 &msg->scsi.sense_data, 1650 msg->scsi.sense_len); 1651 if (msg->hdr.status == CTL_SUCCESS) 1652 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1653 } 1654 ctl_enqueue_isc(io); 1655 break; 1656 } 1657 1658 /* Preformed on Originating SC, SER_ONLY mode */ 1659 case CTL_MSG_R2R: 1660 io = msg->hdr.original_sc; 1661 if (io == NULL) { 1662 printf("%s: original_sc == NULL!\n", 1663 __func__); 1664 break; 1665 } 1666 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1667 io->io_hdr.msg_type = CTL_MSG_R2R; 1668 io->io_hdr.remote_io = msg->hdr.serializing_sc; 1669 ctl_enqueue_isc(io); 1670 break; 1671 1672 /* 1673 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1674 * mode. 1675 * Performed on the Originating (i.e. secondary) SC in XFER 1676 * mode 1677 */ 1678 case CTL_MSG_FINISH_IO: 1679 if (softc->ha_mode == CTL_HA_MODE_XFER) 1680 ctl_isc_handler_finish_xfer(softc, msg); 1681 else 1682 ctl_isc_handler_finish_ser_only(softc, msg); 1683 break; 1684 1685 /* Preformed on Originating SC */ 1686 case CTL_MSG_BAD_JUJU: 1687 io = msg->hdr.original_sc; 1688 if (io == NULL) { 1689 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1690 __func__); 1691 break; 1692 } 1693 ctl_copy_sense_data(msg, io); 1694 /* 1695 * IO should have already been cleaned up on other 1696 * SC so clear this flag so we won't send a message 1697 * back to finish the IO there. 1698 */ 1699 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1700 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1701 1702 /* io = msg->hdr.serializing_sc; */ 1703 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1704 ctl_enqueue_isc(io); 1705 break; 1706 1707 /* Handle resets sent from the other side */ 1708 case CTL_MSG_MANAGE_TASKS: { 1709 struct ctl_taskio *taskio; 1710 taskio = (struct ctl_taskio *)ctl_alloc_io( 1711 softc->othersc_pool); 1712 ctl_zero_io((union ctl_io *)taskio); 1713 taskio->io_hdr.io_type = CTL_IO_TASK; 1714 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1715 taskio->io_hdr.nexus = msg->hdr.nexus; 1716 taskio->task_action = msg->task.task_action; 1717 taskio->tag_num = msg->task.tag_num; 1718 taskio->tag_type = msg->task.tag_type; 1719 #ifdef CTL_TIME_IO 1720 taskio->io_hdr.start_time = time_uptime; 1721 getbinuptime(&taskio->io_hdr.start_bt); 1722 #endif /* CTL_TIME_IO */ 1723 ctl_run_task((union ctl_io *)taskio); 1724 break; 1725 } 1726 /* Persistent Reserve action which needs attention */ 1727 case CTL_MSG_PERS_ACTION: 1728 presio = (struct ctl_prio *)ctl_alloc_io( 1729 softc->othersc_pool); 1730 ctl_zero_io((union ctl_io *)presio); 1731 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1732 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1733 presio->io_hdr.nexus = msg->hdr.nexus; 1734 presio->pr_msg = msg->pr; 1735 ctl_enqueue_isc((union ctl_io *)presio); 1736 break; 1737 case CTL_MSG_UA: 1738 ctl_isc_ua(softc, msg, param); 1739 break; 1740 case CTL_MSG_PORT_SYNC: 1741 ctl_isc_port_sync(softc, msg, param); 1742 break; 1743 case CTL_MSG_LUN_SYNC: 1744 ctl_isc_lun_sync(softc, msg, param); 1745 break; 1746 case CTL_MSG_IID_SYNC: 1747 ctl_isc_iid_sync(softc, msg, param); 1748 break; 1749 case CTL_MSG_LOGIN: 1750 ctl_isc_login(softc, msg, param); 1751 break; 1752 case CTL_MSG_MODE_SYNC: 1753 ctl_isc_mode_sync(softc, msg, param); 1754 break; 1755 default: 1756 printf("Received HA message of unknown type %d\n", 1757 msg->hdr.msg_type); 1758 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1759 break; 1760 } 1761 if (msg != &msgbuf) 1762 free(msg, M_CTL); 1763 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1764 printf("CTL: HA link status changed from %d to %d\n", 1765 softc->ha_link, param); 1766 if (param == softc->ha_link) 1767 return; 1768 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1769 softc->ha_link = param; 1770 ctl_isc_ha_link_down(softc); 1771 } else { 1772 softc->ha_link = param; 1773 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1774 ctl_isc_ha_link_up(softc); 1775 } 1776 return; 1777 } else { 1778 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1779 return; 1780 } 1781 } 1782 1783 static void 1784 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1785 { 1786 1787 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1788 src->scsi.sense_len); 1789 dest->scsiio.scsi_status = src->scsi.scsi_status; 1790 dest->scsiio.sense_len = src->scsi.sense_len; 1791 dest->io_hdr.status = src->hdr.status; 1792 } 1793 1794 static void 1795 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1796 { 1797 1798 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1799 src->scsiio.sense_len); 1800 dest->scsi.scsi_status = src->scsiio.scsi_status; 1801 dest->scsi.sense_len = src->scsiio.sense_len; 1802 dest->hdr.status = src->io_hdr.status; 1803 } 1804 1805 void 1806 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1807 { 1808 struct ctl_softc *softc = lun->ctl_softc; 1809 ctl_ua_type *pu; 1810 1811 if (initidx < softc->init_min || initidx >= softc->init_max) 1812 return; 1813 mtx_assert(&lun->lun_lock, MA_OWNED); 1814 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1815 if (pu == NULL) 1816 return; 1817 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1818 } 1819 1820 void 1821 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1822 { 1823 int i; 1824 1825 mtx_assert(&lun->lun_lock, MA_OWNED); 1826 if (lun->pending_ua[port] == NULL) 1827 return; 1828 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1829 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1830 continue; 1831 lun->pending_ua[port][i] |= ua; 1832 } 1833 } 1834 1835 void 1836 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1837 { 1838 struct ctl_softc *softc = lun->ctl_softc; 1839 int i; 1840 1841 mtx_assert(&lun->lun_lock, MA_OWNED); 1842 for (i = softc->port_min; i < softc->port_max; i++) 1843 ctl_est_ua_port(lun, i, except, ua); 1844 } 1845 1846 void 1847 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1848 { 1849 struct ctl_softc *softc = lun->ctl_softc; 1850 ctl_ua_type *pu; 1851 1852 if (initidx < softc->init_min || initidx >= softc->init_max) 1853 return; 1854 mtx_assert(&lun->lun_lock, MA_OWNED); 1855 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1856 if (pu == NULL) 1857 return; 1858 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1859 } 1860 1861 void 1862 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1863 { 1864 struct ctl_softc *softc = lun->ctl_softc; 1865 int i, j; 1866 1867 mtx_assert(&lun->lun_lock, MA_OWNED); 1868 for (i = softc->port_min; i < softc->port_max; i++) { 1869 if (lun->pending_ua[i] == NULL) 1870 continue; 1871 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1872 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1873 continue; 1874 lun->pending_ua[i][j] &= ~ua; 1875 } 1876 } 1877 } 1878 1879 void 1880 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1881 ctl_ua_type ua_type) 1882 { 1883 struct ctl_lun *lun; 1884 1885 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1886 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1887 mtx_lock(&lun->lun_lock); 1888 ctl_clr_ua(lun, initidx, ua_type); 1889 mtx_unlock(&lun->lun_lock); 1890 } 1891 } 1892 1893 static int 1894 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1895 { 1896 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1897 struct ctl_lun *lun; 1898 struct ctl_lun_req ireq; 1899 int error, value; 1900 1901 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1902 error = sysctl_handle_int(oidp, &value, 0, req); 1903 if ((error != 0) || (req->newptr == NULL)) 1904 return (error); 1905 1906 mtx_lock(&softc->ctl_lock); 1907 if (value == 0) 1908 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1909 else 1910 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1911 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1912 mtx_unlock(&softc->ctl_lock); 1913 bzero(&ireq, sizeof(ireq)); 1914 ireq.reqtype = CTL_LUNREQ_MODIFY; 1915 ireq.reqdata.modify.lun_id = lun->lun; 1916 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1917 curthread); 1918 if (ireq.status != CTL_LUN_OK) { 1919 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1920 __func__, ireq.status, ireq.error_str); 1921 } 1922 mtx_lock(&softc->ctl_lock); 1923 } 1924 mtx_unlock(&softc->ctl_lock); 1925 return (0); 1926 } 1927 1928 static int 1929 ctl_init(void) 1930 { 1931 struct make_dev_args args; 1932 struct ctl_softc *softc; 1933 int i, error; 1934 1935 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1936 M_WAITOK | M_ZERO); 1937 1938 make_dev_args_init(&args); 1939 args.mda_devsw = &ctl_cdevsw; 1940 args.mda_uid = UID_ROOT; 1941 args.mda_gid = GID_OPERATOR; 1942 args.mda_mode = 0600; 1943 args.mda_si_drv1 = softc; 1944 args.mda_si_drv2 = NULL; 1945 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1946 if (error != 0) { 1947 free(softc, M_DEVBUF); 1948 control_softc = NULL; 1949 return (error); 1950 } 1951 1952 sysctl_ctx_init(&softc->sysctl_ctx); 1953 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1954 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1955 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); 1956 1957 if (softc->sysctl_tree == NULL) { 1958 printf("%s: unable to allocate sysctl tree\n", __func__); 1959 destroy_dev(softc->dev); 1960 free(softc, M_DEVBUF); 1961 control_softc = NULL; 1962 return (ENOMEM); 1963 } 1964 1965 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1966 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1967 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1968 softc->flags = 0; 1969 1970 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1971 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1972 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1973 1974 if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { 1975 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", 1976 ctl_max_luns, CTL_DEFAULT_MAX_LUNS); 1977 ctl_max_luns = CTL_DEFAULT_MAX_LUNS; 1978 } 1979 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, 1980 M_DEVBUF, M_WAITOK | M_ZERO); 1981 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * 1982 ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1983 if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { 1984 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", 1985 ctl_max_ports, CTL_DEFAULT_MAX_PORTS); 1986 ctl_max_ports = CTL_DEFAULT_MAX_PORTS; 1987 } 1988 softc->ctl_port_mask = malloc(sizeof(uint32_t) * 1989 ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); 1990 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, 1991 M_DEVBUF, M_WAITOK | M_ZERO); 1992 1993 /* 1994 * In Copan's HA scheme, the "master" and "slave" roles are 1995 * figured out through the slot the controller is in. Although it 1996 * is an active/active system, someone has to be in charge. 1997 */ 1998 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1999 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 2000 "HA head ID (0 - no HA)"); 2001 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 2002 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 2003 softc->is_single = 1; 2004 softc->port_cnt = ctl_max_ports; 2005 softc->port_min = 0; 2006 } else { 2007 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; 2008 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 2009 } 2010 softc->port_max = softc->port_min + softc->port_cnt; 2011 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 2012 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 2013 2014 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 2015 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 2016 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 2017 2018 STAILQ_INIT(&softc->lun_list); 2019 STAILQ_INIT(&softc->fe_list); 2020 STAILQ_INIT(&softc->port_list); 2021 STAILQ_INIT(&softc->be_list); 2022 ctl_tpc_init(softc); 2023 2024 if (worker_threads <= 0) 2025 worker_threads = max(1, mp_ncpus / 4); 2026 if (worker_threads > CTL_MAX_THREADS) 2027 worker_threads = CTL_MAX_THREADS; 2028 2029 for (i = 0; i < worker_threads; i++) { 2030 struct ctl_thread *thr = &softc->threads[i]; 2031 2032 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 2033 thr->ctl_softc = softc; 2034 STAILQ_INIT(&thr->incoming_queue); 2035 STAILQ_INIT(&thr->rtr_queue); 2036 STAILQ_INIT(&thr->done_queue); 2037 STAILQ_INIT(&thr->isc_queue); 2038 2039 error = kproc_kthread_add(ctl_work_thread, thr, 2040 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 2041 if (error != 0) { 2042 printf("error creating CTL work thread!\n"); 2043 return (error); 2044 } 2045 } 2046 error = kproc_kthread_add(ctl_thresh_thread, softc, 2047 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 2048 if (error != 0) { 2049 printf("error creating CTL threshold thread!\n"); 2050 return (error); 2051 } 2052 2053 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 2054 OID_AUTO, "ha_role", 2055 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 2056 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 2057 2058 if (softc->is_single == 0) { 2059 if (ctl_frontend_register(&ha_frontend) != 0) 2060 softc->is_single = 1; 2061 } 2062 return (0); 2063 } 2064 2065 static int 2066 ctl_shutdown(void) 2067 { 2068 struct ctl_softc *softc = control_softc; 2069 int i; 2070 2071 if (softc->is_single == 0) 2072 ctl_frontend_deregister(&ha_frontend); 2073 2074 destroy_dev(softc->dev); 2075 2076 /* Shutdown CTL threads. */ 2077 softc->shutdown = 1; 2078 for (i = 0; i < worker_threads; i++) { 2079 struct ctl_thread *thr = &softc->threads[i]; 2080 while (thr->thread != NULL) { 2081 wakeup(thr); 2082 if (thr->thread != NULL) 2083 pause("CTL thr shutdown", 1); 2084 } 2085 mtx_destroy(&thr->queue_lock); 2086 } 2087 while (softc->thresh_thread != NULL) { 2088 wakeup(softc->thresh_thread); 2089 if (softc->thresh_thread != NULL) 2090 pause("CTL thr shutdown", 1); 2091 } 2092 2093 ctl_tpc_shutdown(softc); 2094 uma_zdestroy(softc->io_zone); 2095 mtx_destroy(&softc->ctl_lock); 2096 2097 free(softc->ctl_luns, M_DEVBUF); 2098 free(softc->ctl_lun_mask, M_DEVBUF); 2099 free(softc->ctl_port_mask, M_DEVBUF); 2100 free(softc->ctl_ports, M_DEVBUF); 2101 2102 sysctl_ctx_free(&softc->sysctl_ctx); 2103 2104 free(softc, M_DEVBUF); 2105 control_softc = NULL; 2106 return (0); 2107 } 2108 2109 static int 2110 ctl_module_event_handler(module_t mod, int what, void *arg) 2111 { 2112 2113 switch (what) { 2114 case MOD_LOAD: 2115 return (ctl_init()); 2116 case MOD_UNLOAD: 2117 return (ctl_shutdown()); 2118 default: 2119 return (EOPNOTSUPP); 2120 } 2121 } 2122 2123 /* 2124 * XXX KDM should we do some access checks here? Bump a reference count to 2125 * prevent a CTL module from being unloaded while someone has it open? 2126 */ 2127 static int 2128 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2129 { 2130 return (0); 2131 } 2132 2133 static int 2134 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2135 { 2136 return (0); 2137 } 2138 2139 /* 2140 * Remove an initiator by port number and initiator ID. 2141 * Returns 0 for success, -1 for failure. 2142 */ 2143 int 2144 ctl_remove_initiator(struct ctl_port *port, int iid) 2145 { 2146 struct ctl_softc *softc = port->ctl_softc; 2147 int last; 2148 2149 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2150 2151 if (iid > CTL_MAX_INIT_PER_PORT) { 2152 printf("%s: initiator ID %u > maximun %u!\n", 2153 __func__, iid, CTL_MAX_INIT_PER_PORT); 2154 return (-1); 2155 } 2156 2157 mtx_lock(&softc->ctl_lock); 2158 last = (--port->wwpn_iid[iid].in_use == 0); 2159 port->wwpn_iid[iid].last_use = time_uptime; 2160 mtx_unlock(&softc->ctl_lock); 2161 if (last) 2162 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); 2163 ctl_isc_announce_iid(port, iid); 2164 2165 return (0); 2166 } 2167 2168 /* 2169 * Add an initiator to the initiator map. 2170 * Returns iid for success, < 0 for failure. 2171 */ 2172 int 2173 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2174 { 2175 struct ctl_softc *softc = port->ctl_softc; 2176 time_t best_time; 2177 int i, best; 2178 2179 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2180 2181 if (iid >= CTL_MAX_INIT_PER_PORT) { 2182 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2183 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2184 free(name, M_CTL); 2185 return (-1); 2186 } 2187 2188 mtx_lock(&softc->ctl_lock); 2189 2190 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2191 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2192 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2193 iid = i; 2194 break; 2195 } 2196 if (name != NULL && port->wwpn_iid[i].name != NULL && 2197 strcmp(name, port->wwpn_iid[i].name) == 0) { 2198 iid = i; 2199 break; 2200 } 2201 } 2202 } 2203 2204 if (iid < 0) { 2205 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2206 if (port->wwpn_iid[i].in_use == 0 && 2207 port->wwpn_iid[i].wwpn == 0 && 2208 port->wwpn_iid[i].name == NULL) { 2209 iid = i; 2210 break; 2211 } 2212 } 2213 } 2214 2215 if (iid < 0) { 2216 best = -1; 2217 best_time = INT32_MAX; 2218 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2219 if (port->wwpn_iid[i].in_use == 0) { 2220 if (port->wwpn_iid[i].last_use < best_time) { 2221 best = i; 2222 best_time = port->wwpn_iid[i].last_use; 2223 } 2224 } 2225 } 2226 iid = best; 2227 } 2228 2229 if (iid < 0) { 2230 mtx_unlock(&softc->ctl_lock); 2231 free(name, M_CTL); 2232 return (-2); 2233 } 2234 2235 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2236 /* 2237 * This is not an error yet. 2238 */ 2239 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2240 #if 0 2241 printf("%s: port %d iid %u WWPN %#jx arrived" 2242 " again\n", __func__, port->targ_port, 2243 iid, (uintmax_t)wwpn); 2244 #endif 2245 goto take; 2246 } 2247 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2248 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2249 #if 0 2250 printf("%s: port %d iid %u name '%s' arrived" 2251 " again\n", __func__, port->targ_port, 2252 iid, name); 2253 #endif 2254 goto take; 2255 } 2256 2257 /* 2258 * This is an error, but what do we do about it? The 2259 * driver is telling us we have a new WWPN for this 2260 * initiator ID, so we pretty much need to use it. 2261 */ 2262 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2263 " but WWPN %#jx '%s' is still at that address\n", 2264 __func__, port->targ_port, iid, wwpn, name, 2265 (uintmax_t)port->wwpn_iid[iid].wwpn, 2266 port->wwpn_iid[iid].name); 2267 } 2268 take: 2269 free(port->wwpn_iid[iid].name, M_CTL); 2270 port->wwpn_iid[iid].name = name; 2271 port->wwpn_iid[iid].wwpn = wwpn; 2272 port->wwpn_iid[iid].in_use++; 2273 mtx_unlock(&softc->ctl_lock); 2274 ctl_isc_announce_iid(port, iid); 2275 2276 return (iid); 2277 } 2278 2279 static int 2280 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2281 { 2282 int len; 2283 2284 switch (port->port_type) { 2285 case CTL_PORT_FC: 2286 { 2287 struct scsi_transportid_fcp *id = 2288 (struct scsi_transportid_fcp *)buf; 2289 if (port->wwpn_iid[iid].wwpn == 0) 2290 return (0); 2291 memset(id, 0, sizeof(*id)); 2292 id->format_protocol = SCSI_PROTO_FC; 2293 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2294 return (sizeof(*id)); 2295 } 2296 case CTL_PORT_ISCSI: 2297 { 2298 struct scsi_transportid_iscsi_port *id = 2299 (struct scsi_transportid_iscsi_port *)buf; 2300 if (port->wwpn_iid[iid].name == NULL) 2301 return (0); 2302 memset(id, 0, 256); 2303 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2304 SCSI_PROTO_ISCSI; 2305 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2306 len = roundup2(min(len, 252), 4); 2307 scsi_ulto2b(len, id->additional_length); 2308 return (sizeof(*id) + len); 2309 } 2310 case CTL_PORT_SAS: 2311 { 2312 struct scsi_transportid_sas *id = 2313 (struct scsi_transportid_sas *)buf; 2314 if (port->wwpn_iid[iid].wwpn == 0) 2315 return (0); 2316 memset(id, 0, sizeof(*id)); 2317 id->format_protocol = SCSI_PROTO_SAS; 2318 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2319 return (sizeof(*id)); 2320 } 2321 default: 2322 { 2323 struct scsi_transportid_spi *id = 2324 (struct scsi_transportid_spi *)buf; 2325 memset(id, 0, sizeof(*id)); 2326 id->format_protocol = SCSI_PROTO_SPI; 2327 scsi_ulto2b(iid, id->scsi_addr); 2328 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2329 return (sizeof(*id)); 2330 } 2331 } 2332 } 2333 2334 /* 2335 * Serialize a command that went down the "wrong" side, and so was sent to 2336 * this controller for execution. The logic is a little different than the 2337 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2338 * sent back to the other side, but in the success case, we execute the 2339 * command on this side (XFER mode) or tell the other side to execute it 2340 * (SER_ONLY mode). 2341 */ 2342 static void 2343 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2344 { 2345 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2346 struct ctl_port *port = CTL_PORT(ctsio); 2347 union ctl_ha_msg msg_info; 2348 struct ctl_lun *lun; 2349 const struct ctl_cmd_entry *entry; 2350 union ctl_io *bio; 2351 uint32_t targ_lun; 2352 2353 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2354 2355 /* Make sure that we know about this port. */ 2356 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2357 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2358 /*retry_count*/ 1); 2359 goto badjuju; 2360 } 2361 2362 /* Make sure that we know about this LUN. */ 2363 mtx_lock(&softc->ctl_lock); 2364 if (targ_lun >= ctl_max_luns || 2365 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2366 mtx_unlock(&softc->ctl_lock); 2367 2368 /* 2369 * The other node would not send this request to us unless 2370 * received announce that we are primary node for this LUN. 2371 * If this LUN does not exist now, it is probably result of 2372 * a race, so respond to initiator in the most opaque way. 2373 */ 2374 ctl_set_busy(ctsio); 2375 goto badjuju; 2376 } 2377 mtx_lock(&lun->lun_lock); 2378 mtx_unlock(&softc->ctl_lock); 2379 2380 /* 2381 * If the LUN is invalid, pretend that it doesn't exist. 2382 * It will go away as soon as all pending I/Os completed. 2383 */ 2384 if (lun->flags & CTL_LUN_DISABLED) { 2385 mtx_unlock(&lun->lun_lock); 2386 ctl_set_busy(ctsio); 2387 goto badjuju; 2388 } 2389 2390 entry = ctl_get_cmd_entry(ctsio, NULL); 2391 ctsio->seridx = entry->seridx; 2392 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2393 mtx_unlock(&lun->lun_lock); 2394 goto badjuju; 2395 } 2396 2397 CTL_LUN(ctsio) = lun; 2398 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2399 2400 /* 2401 * Every I/O goes into the OOA queue for a 2402 * particular LUN, and stays there until completion. 2403 */ 2404 #ifdef CTL_TIME_IO 2405 if (LIST_EMPTY(&lun->ooa_queue)) 2406 lun->idle_time += getsbinuptime() - lun->last_busy; 2407 #endif 2408 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2409 2410 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); 2411 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 2412 case CTL_ACTION_PASS: 2413 case CTL_ACTION_SKIP: 2414 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2415 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2416 ctl_enqueue_rtr((union ctl_io *)ctsio); 2417 mtx_unlock(&lun->lun_lock); 2418 } else { 2419 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2420 mtx_unlock(&lun->lun_lock); 2421 2422 /* send msg back to other side */ 2423 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2424 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2425 msg_info.hdr.msg_type = CTL_MSG_R2R; 2426 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2427 sizeof(msg_info.hdr), M_WAITOK); 2428 } 2429 break; 2430 case CTL_ACTION_BLOCK: 2431 ctsio->io_hdr.blocker = bio; 2432 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 2433 blocked_links); 2434 mtx_unlock(&lun->lun_lock); 2435 break; 2436 case CTL_ACTION_OVERLAP: 2437 LIST_REMOVE(&ctsio->io_hdr, ooa_links); 2438 mtx_unlock(&lun->lun_lock); 2439 ctl_set_overlapped_cmd(ctsio); 2440 goto badjuju; 2441 case CTL_ACTION_OVERLAP_TAG: 2442 LIST_REMOVE(&ctsio->io_hdr, ooa_links); 2443 mtx_unlock(&lun->lun_lock); 2444 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 2445 badjuju: 2446 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2447 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; 2448 msg_info.hdr.serializing_sc = NULL; 2449 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2450 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2451 sizeof(msg_info.scsi), M_WAITOK); 2452 ctl_free_io((union ctl_io *)ctsio); 2453 break; 2454 default: 2455 __assert_unreachable(); 2456 } 2457 } 2458 2459 /* 2460 * Returns 0 for success, errno for failure. 2461 */ 2462 static void 2463 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2464 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2465 { 2466 struct ctl_io_hdr *ioh; 2467 2468 mtx_lock(&lun->lun_lock); 2469 ioh = LIST_FIRST(&lun->ooa_queue); 2470 if (ioh == NULL) { 2471 mtx_unlock(&lun->lun_lock); 2472 return; 2473 } 2474 while (LIST_NEXT(ioh, ooa_links) != NULL) 2475 ioh = LIST_NEXT(ioh, ooa_links); 2476 for ( ; ioh; ioh = LIST_PREV(ioh, &lun->ooa_queue, ctl_io_hdr, ooa_links)) { 2477 union ctl_io *io = (union ctl_io *)ioh; 2478 struct ctl_ooa_entry *entry; 2479 2480 /* 2481 * If we've got more than we can fit, just count the 2482 * remaining entries. 2483 */ 2484 if (*cur_fill_num >= ooa_hdr->alloc_num) { 2485 (*cur_fill_num)++; 2486 continue; 2487 } 2488 2489 entry = &kern_entries[*cur_fill_num]; 2490 2491 entry->tag_num = io->scsiio.tag_num; 2492 entry->tag_type = io->scsiio.tag_type; 2493 entry->lun_num = lun->lun; 2494 #ifdef CTL_TIME_IO 2495 entry->start_bt = io->io_hdr.start_bt; 2496 #endif 2497 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2498 entry->cdb_len = io->scsiio.cdb_len; 2499 if (io->io_hdr.blocker != NULL) 2500 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2501 2502 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2503 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2504 2505 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2506 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2507 2508 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2509 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2510 2511 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2512 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2513 2514 if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) 2515 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; 2516 2517 if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) 2518 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; 2519 (*cur_fill_num)++; 2520 } 2521 mtx_unlock(&lun->lun_lock); 2522 } 2523 2524 /* 2525 * Escape characters that are illegal or not recommended in XML. 2526 */ 2527 int 2528 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2529 { 2530 char *end = str + size; 2531 int retval; 2532 2533 retval = 0; 2534 2535 for (; *str && str < end; str++) { 2536 switch (*str) { 2537 case '&': 2538 retval = sbuf_printf(sb, "&"); 2539 break; 2540 case '>': 2541 retval = sbuf_printf(sb, ">"); 2542 break; 2543 case '<': 2544 retval = sbuf_printf(sb, "<"); 2545 break; 2546 default: 2547 retval = sbuf_putc(sb, *str); 2548 break; 2549 } 2550 2551 if (retval != 0) 2552 break; 2553 } 2554 2555 return (retval); 2556 } 2557 2558 static void 2559 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2560 { 2561 struct scsi_vpd_id_descriptor *desc; 2562 int i; 2563 2564 if (id == NULL || id->len < 4) 2565 return; 2566 desc = (struct scsi_vpd_id_descriptor *)id->data; 2567 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2568 case SVPD_ID_TYPE_T10: 2569 sbuf_printf(sb, "t10."); 2570 break; 2571 case SVPD_ID_TYPE_EUI64: 2572 sbuf_printf(sb, "eui."); 2573 break; 2574 case SVPD_ID_TYPE_NAA: 2575 sbuf_printf(sb, "naa."); 2576 break; 2577 case SVPD_ID_TYPE_SCSI_NAME: 2578 break; 2579 } 2580 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2581 case SVPD_ID_CODESET_BINARY: 2582 for (i = 0; i < desc->length; i++) 2583 sbuf_printf(sb, "%02x", desc->identifier[i]); 2584 break; 2585 case SVPD_ID_CODESET_ASCII: 2586 sbuf_printf(sb, "%.*s", (int)desc->length, 2587 (char *)desc->identifier); 2588 break; 2589 case SVPD_ID_CODESET_UTF8: 2590 sbuf_printf(sb, "%s", (char *)desc->identifier); 2591 break; 2592 } 2593 } 2594 2595 static int 2596 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2597 struct thread *td) 2598 { 2599 struct ctl_softc *softc = dev->si_drv1; 2600 struct ctl_port *port; 2601 struct ctl_lun *lun; 2602 int retval; 2603 2604 retval = 0; 2605 2606 switch (cmd) { 2607 case CTL_IO: 2608 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2609 break; 2610 case CTL_ENABLE_PORT: 2611 case CTL_DISABLE_PORT: 2612 case CTL_SET_PORT_WWNS: { 2613 struct ctl_port *port; 2614 struct ctl_port_entry *entry; 2615 2616 entry = (struct ctl_port_entry *)addr; 2617 2618 mtx_lock(&softc->ctl_lock); 2619 STAILQ_FOREACH(port, &softc->port_list, links) { 2620 int action, done; 2621 2622 if (port->targ_port < softc->port_min || 2623 port->targ_port >= softc->port_max) 2624 continue; 2625 2626 action = 0; 2627 done = 0; 2628 if ((entry->port_type == CTL_PORT_NONE) 2629 && (entry->targ_port == port->targ_port)) { 2630 /* 2631 * If the user only wants to enable or 2632 * disable or set WWNs on a specific port, 2633 * do the operation and we're done. 2634 */ 2635 action = 1; 2636 done = 1; 2637 } else if (entry->port_type & port->port_type) { 2638 /* 2639 * Compare the user's type mask with the 2640 * particular frontend type to see if we 2641 * have a match. 2642 */ 2643 action = 1; 2644 done = 0; 2645 2646 /* 2647 * Make sure the user isn't trying to set 2648 * WWNs on multiple ports at the same time. 2649 */ 2650 if (cmd == CTL_SET_PORT_WWNS) { 2651 printf("%s: Can't set WWNs on " 2652 "multiple ports\n", __func__); 2653 retval = EINVAL; 2654 break; 2655 } 2656 } 2657 if (action == 0) 2658 continue; 2659 2660 /* 2661 * XXX KDM we have to drop the lock here, because 2662 * the online/offline operations can potentially 2663 * block. We need to reference count the frontends 2664 * so they can't go away, 2665 */ 2666 if (cmd == CTL_ENABLE_PORT) { 2667 mtx_unlock(&softc->ctl_lock); 2668 ctl_port_online(port); 2669 mtx_lock(&softc->ctl_lock); 2670 } else if (cmd == CTL_DISABLE_PORT) { 2671 mtx_unlock(&softc->ctl_lock); 2672 ctl_port_offline(port); 2673 mtx_lock(&softc->ctl_lock); 2674 } else if (cmd == CTL_SET_PORT_WWNS) { 2675 ctl_port_set_wwns(port, 2676 (entry->flags & CTL_PORT_WWNN_VALID) ? 2677 1 : 0, entry->wwnn, 2678 (entry->flags & CTL_PORT_WWPN_VALID) ? 2679 1 : 0, entry->wwpn); 2680 } 2681 if (done != 0) 2682 break; 2683 } 2684 mtx_unlock(&softc->ctl_lock); 2685 break; 2686 } 2687 case CTL_GET_OOA: { 2688 struct ctl_ooa *ooa_hdr; 2689 struct ctl_ooa_entry *entries; 2690 uint32_t cur_fill_num; 2691 2692 ooa_hdr = (struct ctl_ooa *)addr; 2693 2694 if ((ooa_hdr->alloc_len == 0) 2695 || (ooa_hdr->alloc_num == 0)) { 2696 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2697 "must be non-zero\n", __func__, 2698 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2699 retval = EINVAL; 2700 break; 2701 } 2702 2703 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2704 sizeof(struct ctl_ooa_entry))) { 2705 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2706 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2707 __func__, ooa_hdr->alloc_len, 2708 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2709 retval = EINVAL; 2710 break; 2711 } 2712 2713 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2714 if (entries == NULL) { 2715 printf("%s: could not allocate %d bytes for OOA " 2716 "dump\n", __func__, ooa_hdr->alloc_len); 2717 retval = ENOMEM; 2718 break; 2719 } 2720 2721 mtx_lock(&softc->ctl_lock); 2722 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2723 (ooa_hdr->lun_num >= ctl_max_luns || 2724 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2725 mtx_unlock(&softc->ctl_lock); 2726 free(entries, M_CTL); 2727 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2728 __func__, (uintmax_t)ooa_hdr->lun_num); 2729 retval = EINVAL; 2730 break; 2731 } 2732 2733 cur_fill_num = 0; 2734 2735 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2736 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2737 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2738 ooa_hdr, entries); 2739 } 2740 } else { 2741 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2742 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2743 entries); 2744 } 2745 mtx_unlock(&softc->ctl_lock); 2746 2747 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2748 ooa_hdr->fill_len = ooa_hdr->fill_num * 2749 sizeof(struct ctl_ooa_entry); 2750 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2751 if (retval != 0) { 2752 printf("%s: error copying out %d bytes for OOA dump\n", 2753 __func__, ooa_hdr->fill_len); 2754 } 2755 2756 getbinuptime(&ooa_hdr->cur_bt); 2757 2758 if (cur_fill_num > ooa_hdr->alloc_num) { 2759 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2760 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2761 } else { 2762 ooa_hdr->dropped_num = 0; 2763 ooa_hdr->status = CTL_OOA_OK; 2764 } 2765 2766 free(entries, M_CTL); 2767 break; 2768 } 2769 case CTL_DELAY_IO: { 2770 struct ctl_io_delay_info *delay_info; 2771 2772 delay_info = (struct ctl_io_delay_info *)addr; 2773 2774 #ifdef CTL_IO_DELAY 2775 mtx_lock(&softc->ctl_lock); 2776 if (delay_info->lun_id >= ctl_max_luns || 2777 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2778 mtx_unlock(&softc->ctl_lock); 2779 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2780 break; 2781 } 2782 mtx_lock(&lun->lun_lock); 2783 mtx_unlock(&softc->ctl_lock); 2784 delay_info->status = CTL_DELAY_STATUS_OK; 2785 switch (delay_info->delay_type) { 2786 case CTL_DELAY_TYPE_CONT: 2787 case CTL_DELAY_TYPE_ONESHOT: 2788 break; 2789 default: 2790 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2791 break; 2792 } 2793 switch (delay_info->delay_loc) { 2794 case CTL_DELAY_LOC_DATAMOVE: 2795 lun->delay_info.datamove_type = delay_info->delay_type; 2796 lun->delay_info.datamove_delay = delay_info->delay_secs; 2797 break; 2798 case CTL_DELAY_LOC_DONE: 2799 lun->delay_info.done_type = delay_info->delay_type; 2800 lun->delay_info.done_delay = delay_info->delay_secs; 2801 break; 2802 default: 2803 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2804 break; 2805 } 2806 mtx_unlock(&lun->lun_lock); 2807 #else 2808 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2809 #endif /* CTL_IO_DELAY */ 2810 break; 2811 } 2812 case CTL_ERROR_INJECT: { 2813 struct ctl_error_desc *err_desc, *new_err_desc; 2814 2815 err_desc = (struct ctl_error_desc *)addr; 2816 2817 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2818 M_WAITOK | M_ZERO); 2819 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2820 2821 mtx_lock(&softc->ctl_lock); 2822 if (err_desc->lun_id >= ctl_max_luns || 2823 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2824 mtx_unlock(&softc->ctl_lock); 2825 free(new_err_desc, M_CTL); 2826 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2827 __func__, (uintmax_t)err_desc->lun_id); 2828 retval = EINVAL; 2829 break; 2830 } 2831 mtx_lock(&lun->lun_lock); 2832 mtx_unlock(&softc->ctl_lock); 2833 2834 /* 2835 * We could do some checking here to verify the validity 2836 * of the request, but given the complexity of error 2837 * injection requests, the checking logic would be fairly 2838 * complex. 2839 * 2840 * For now, if the request is invalid, it just won't get 2841 * executed and might get deleted. 2842 */ 2843 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2844 2845 /* 2846 * XXX KDM check to make sure the serial number is unique, 2847 * in case we somehow manage to wrap. That shouldn't 2848 * happen for a very long time, but it's the right thing to 2849 * do. 2850 */ 2851 new_err_desc->serial = lun->error_serial; 2852 err_desc->serial = lun->error_serial; 2853 lun->error_serial++; 2854 2855 mtx_unlock(&lun->lun_lock); 2856 break; 2857 } 2858 case CTL_ERROR_INJECT_DELETE: { 2859 struct ctl_error_desc *delete_desc, *desc, *desc2; 2860 int delete_done; 2861 2862 delete_desc = (struct ctl_error_desc *)addr; 2863 delete_done = 0; 2864 2865 mtx_lock(&softc->ctl_lock); 2866 if (delete_desc->lun_id >= ctl_max_luns || 2867 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2868 mtx_unlock(&softc->ctl_lock); 2869 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2870 __func__, (uintmax_t)delete_desc->lun_id); 2871 retval = EINVAL; 2872 break; 2873 } 2874 mtx_lock(&lun->lun_lock); 2875 mtx_unlock(&softc->ctl_lock); 2876 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2877 if (desc->serial != delete_desc->serial) 2878 continue; 2879 2880 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2881 links); 2882 free(desc, M_CTL); 2883 delete_done = 1; 2884 } 2885 mtx_unlock(&lun->lun_lock); 2886 if (delete_done == 0) { 2887 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2888 "error serial %ju on LUN %u\n", __func__, 2889 delete_desc->serial, delete_desc->lun_id); 2890 retval = EINVAL; 2891 break; 2892 } 2893 break; 2894 } 2895 case CTL_DUMP_STRUCTS: { 2896 int j, k; 2897 struct ctl_port *port; 2898 struct ctl_frontend *fe; 2899 2900 mtx_lock(&softc->ctl_lock); 2901 printf("CTL Persistent Reservation information start:\n"); 2902 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2903 mtx_lock(&lun->lun_lock); 2904 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2905 mtx_unlock(&lun->lun_lock); 2906 continue; 2907 } 2908 2909 for (j = 0; j < ctl_max_ports; j++) { 2910 if (lun->pr_keys[j] == NULL) 2911 continue; 2912 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2913 if (lun->pr_keys[j][k] == 0) 2914 continue; 2915 printf(" LUN %ju port %d iid %d key " 2916 "%#jx\n", lun->lun, j, k, 2917 (uintmax_t)lun->pr_keys[j][k]); 2918 } 2919 } 2920 mtx_unlock(&lun->lun_lock); 2921 } 2922 printf("CTL Persistent Reservation information end\n"); 2923 printf("CTL Ports:\n"); 2924 STAILQ_FOREACH(port, &softc->port_list, links) { 2925 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2926 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2927 port->frontend->name, port->port_type, 2928 port->physical_port, port->virtual_port, 2929 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2930 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2931 if (port->wwpn_iid[j].in_use == 0 && 2932 port->wwpn_iid[j].wwpn == 0 && 2933 port->wwpn_iid[j].name == NULL) 2934 continue; 2935 2936 printf(" iid %u use %d WWPN %#jx '%s'\n", 2937 j, port->wwpn_iid[j].in_use, 2938 (uintmax_t)port->wwpn_iid[j].wwpn, 2939 port->wwpn_iid[j].name); 2940 } 2941 } 2942 printf("CTL Port information end\n"); 2943 mtx_unlock(&softc->ctl_lock); 2944 /* 2945 * XXX KDM calling this without a lock. We'd likely want 2946 * to drop the lock before calling the frontend's dump 2947 * routine anyway. 2948 */ 2949 printf("CTL Frontends:\n"); 2950 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2951 printf(" Frontend '%s'\n", fe->name); 2952 if (fe->fe_dump != NULL) 2953 fe->fe_dump(); 2954 } 2955 printf("CTL Frontend information end\n"); 2956 break; 2957 } 2958 case CTL_LUN_REQ: { 2959 struct ctl_lun_req *lun_req; 2960 struct ctl_backend_driver *backend; 2961 void *packed; 2962 nvlist_t *tmp_args_nvl; 2963 size_t packed_len; 2964 2965 lun_req = (struct ctl_lun_req *)addr; 2966 tmp_args_nvl = lun_req->args_nvl; 2967 2968 backend = ctl_backend_find(lun_req->backend); 2969 if (backend == NULL) { 2970 lun_req->status = CTL_LUN_ERROR; 2971 snprintf(lun_req->error_str, 2972 sizeof(lun_req->error_str), 2973 "Backend \"%s\" not found.", 2974 lun_req->backend); 2975 break; 2976 } 2977 2978 if (lun_req->args != NULL) { 2979 if (lun_req->args_len > CTL_MAX_ARGS_LEN) { 2980 lun_req->status = CTL_LUN_ERROR; 2981 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2982 "Too big args."); 2983 break; 2984 } 2985 packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); 2986 if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { 2987 free(packed, M_CTL); 2988 lun_req->status = CTL_LUN_ERROR; 2989 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 2990 "Cannot copyin args."); 2991 break; 2992 } 2993 lun_req->args_nvl = nvlist_unpack(packed, 2994 lun_req->args_len, 0); 2995 free(packed, M_CTL); 2996 2997 if (lun_req->args_nvl == NULL) { 2998 lun_req->status = CTL_LUN_ERROR; 2999 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 3000 "Cannot unpack args nvlist."); 3001 break; 3002 } 3003 } else 3004 lun_req->args_nvl = nvlist_create(0); 3005 3006 lun_req->result_nvl = NULL; 3007 retval = backend->ioctl(dev, cmd, addr, flag, td); 3008 nvlist_destroy(lun_req->args_nvl); 3009 lun_req->args_nvl = tmp_args_nvl; 3010 3011 if (lun_req->result_nvl != NULL) { 3012 if (lun_req->result != NULL) { 3013 packed = nvlist_pack(lun_req->result_nvl, 3014 &packed_len); 3015 if (packed == NULL) { 3016 lun_req->status = CTL_LUN_ERROR; 3017 snprintf(lun_req->error_str, 3018 sizeof(lun_req->error_str), 3019 "Cannot pack result nvlist."); 3020 break; 3021 } 3022 3023 if (packed_len > lun_req->result_len) { 3024 lun_req->status = CTL_LUN_ERROR; 3025 snprintf(lun_req->error_str, 3026 sizeof(lun_req->error_str), 3027 "Result nvlist too large."); 3028 free(packed, M_NVLIST); 3029 break; 3030 } 3031 3032 if (copyout(packed, lun_req->result, packed_len)) { 3033 lun_req->status = CTL_LUN_ERROR; 3034 snprintf(lun_req->error_str, 3035 sizeof(lun_req->error_str), 3036 "Cannot copyout() the result."); 3037 free(packed, M_NVLIST); 3038 break; 3039 } 3040 3041 lun_req->result_len = packed_len; 3042 free(packed, M_NVLIST); 3043 } 3044 3045 nvlist_destroy(lun_req->result_nvl); 3046 } 3047 break; 3048 } 3049 case CTL_LUN_LIST: { 3050 struct sbuf *sb; 3051 struct ctl_lun_list *list; 3052 const char *name, *value; 3053 void *cookie; 3054 int type; 3055 3056 list = (struct ctl_lun_list *)addr; 3057 3058 /* 3059 * Allocate a fixed length sbuf here, based on the length 3060 * of the user's buffer. We could allocate an auto-extending 3061 * buffer, and then tell the user how much larger our 3062 * amount of data is than his buffer, but that presents 3063 * some problems: 3064 * 3065 * 1. The sbuf(9) routines use a blocking malloc, and so 3066 * we can't hold a lock while calling them with an 3067 * auto-extending buffer. 3068 * 3069 * 2. There is not currently a LUN reference counting 3070 * mechanism, outside of outstanding transactions on 3071 * the LUN's OOA queue. So a LUN could go away on us 3072 * while we're getting the LUN number, backend-specific 3073 * information, etc. Thus, given the way things 3074 * currently work, we need to hold the CTL lock while 3075 * grabbing LUN information. 3076 * 3077 * So, from the user's standpoint, the best thing to do is 3078 * allocate what he thinks is a reasonable buffer length, 3079 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3080 * double the buffer length and try again. (And repeat 3081 * that until he succeeds.) 3082 */ 3083 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3084 if (sb == NULL) { 3085 list->status = CTL_LUN_LIST_ERROR; 3086 snprintf(list->error_str, sizeof(list->error_str), 3087 "Unable to allocate %d bytes for LUN list", 3088 list->alloc_len); 3089 break; 3090 } 3091 3092 sbuf_printf(sb, "<ctllunlist>\n"); 3093 3094 mtx_lock(&softc->ctl_lock); 3095 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3096 mtx_lock(&lun->lun_lock); 3097 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3098 (uintmax_t)lun->lun); 3099 3100 /* 3101 * Bail out as soon as we see that we've overfilled 3102 * the buffer. 3103 */ 3104 if (retval != 0) 3105 break; 3106 3107 retval = sbuf_printf(sb, "\t<backend_type>%s" 3108 "</backend_type>\n", 3109 (lun->backend == NULL) ? "none" : 3110 lun->backend->name); 3111 3112 if (retval != 0) 3113 break; 3114 3115 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3116 lun->be_lun->lun_type); 3117 3118 if (retval != 0) 3119 break; 3120 3121 if (lun->backend == NULL) { 3122 retval = sbuf_printf(sb, "</lun>\n"); 3123 if (retval != 0) 3124 break; 3125 continue; 3126 } 3127 3128 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3129 (lun->be_lun->maxlba > 0) ? 3130 lun->be_lun->maxlba + 1 : 0); 3131 3132 if (retval != 0) 3133 break; 3134 3135 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3136 lun->be_lun->blocksize); 3137 3138 if (retval != 0) 3139 break; 3140 3141 retval = sbuf_printf(sb, "\t<serial_number>"); 3142 3143 if (retval != 0) 3144 break; 3145 3146 retval = ctl_sbuf_printf_esc(sb, 3147 lun->be_lun->serial_num, 3148 sizeof(lun->be_lun->serial_num)); 3149 3150 if (retval != 0) 3151 break; 3152 3153 retval = sbuf_printf(sb, "</serial_number>\n"); 3154 3155 if (retval != 0) 3156 break; 3157 3158 retval = sbuf_printf(sb, "\t<device_id>"); 3159 3160 if (retval != 0) 3161 break; 3162 3163 retval = ctl_sbuf_printf_esc(sb, 3164 lun->be_lun->device_id, 3165 sizeof(lun->be_lun->device_id)); 3166 3167 if (retval != 0) 3168 break; 3169 3170 retval = sbuf_printf(sb, "</device_id>\n"); 3171 3172 if (retval != 0) 3173 break; 3174 3175 if (lun->backend->lun_info != NULL) { 3176 retval = lun->backend->lun_info(lun->be_lun, sb); 3177 if (retval != 0) 3178 break; 3179 } 3180 3181 cookie = NULL; 3182 while ((name = nvlist_next(lun->be_lun->options, &type, 3183 &cookie)) != NULL) { 3184 sbuf_printf(sb, "\t<%s>", name); 3185 3186 if (type == NV_TYPE_STRING) { 3187 value = dnvlist_get_string( 3188 lun->be_lun->options, name, NULL); 3189 if (value != NULL) 3190 sbuf_printf(sb, "%s", value); 3191 } 3192 3193 sbuf_printf(sb, "</%s>\n", name); 3194 } 3195 3196 retval = sbuf_printf(sb, "</lun>\n"); 3197 3198 if (retval != 0) 3199 break; 3200 mtx_unlock(&lun->lun_lock); 3201 } 3202 if (lun != NULL) 3203 mtx_unlock(&lun->lun_lock); 3204 mtx_unlock(&softc->ctl_lock); 3205 3206 if ((retval != 0) 3207 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3208 retval = 0; 3209 sbuf_delete(sb); 3210 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3211 snprintf(list->error_str, sizeof(list->error_str), 3212 "Out of space, %d bytes is too small", 3213 list->alloc_len); 3214 break; 3215 } 3216 3217 sbuf_finish(sb); 3218 3219 retval = copyout(sbuf_data(sb), list->lun_xml, 3220 sbuf_len(sb) + 1); 3221 3222 list->fill_len = sbuf_len(sb) + 1; 3223 list->status = CTL_LUN_LIST_OK; 3224 sbuf_delete(sb); 3225 break; 3226 } 3227 case CTL_ISCSI: { 3228 struct ctl_iscsi *ci; 3229 struct ctl_frontend *fe; 3230 3231 ci = (struct ctl_iscsi *)addr; 3232 3233 fe = ctl_frontend_find("iscsi"); 3234 if (fe == NULL) { 3235 ci->status = CTL_ISCSI_ERROR; 3236 snprintf(ci->error_str, sizeof(ci->error_str), 3237 "Frontend \"iscsi\" not found."); 3238 break; 3239 } 3240 3241 retval = fe->ioctl(dev, cmd, addr, flag, td); 3242 break; 3243 } 3244 case CTL_PORT_REQ: { 3245 struct ctl_req *req; 3246 struct ctl_frontend *fe; 3247 void *packed; 3248 nvlist_t *tmp_args_nvl; 3249 size_t packed_len; 3250 3251 req = (struct ctl_req *)addr; 3252 tmp_args_nvl = req->args_nvl; 3253 3254 fe = ctl_frontend_find(req->driver); 3255 if (fe == NULL) { 3256 req->status = CTL_LUN_ERROR; 3257 snprintf(req->error_str, sizeof(req->error_str), 3258 "Frontend \"%s\" not found.", req->driver); 3259 break; 3260 } 3261 3262 if (req->args != NULL) { 3263 if (req->args_len > CTL_MAX_ARGS_LEN) { 3264 req->status = CTL_LUN_ERROR; 3265 snprintf(req->error_str, sizeof(req->error_str), 3266 "Too big args."); 3267 break; 3268 } 3269 packed = malloc(req->args_len, M_CTL, M_WAITOK); 3270 if (copyin(req->args, packed, req->args_len) != 0) { 3271 free(packed, M_CTL); 3272 req->status = CTL_LUN_ERROR; 3273 snprintf(req->error_str, sizeof(req->error_str), 3274 "Cannot copyin args."); 3275 break; 3276 } 3277 req->args_nvl = nvlist_unpack(packed, 3278 req->args_len, 0); 3279 free(packed, M_CTL); 3280 3281 if (req->args_nvl == NULL) { 3282 req->status = CTL_LUN_ERROR; 3283 snprintf(req->error_str, sizeof(req->error_str), 3284 "Cannot unpack args nvlist."); 3285 break; 3286 } 3287 } else 3288 req->args_nvl = nvlist_create(0); 3289 3290 req->result_nvl = NULL; 3291 if (fe->ioctl) 3292 retval = fe->ioctl(dev, cmd, addr, flag, td); 3293 else 3294 retval = ENODEV; 3295 3296 nvlist_destroy(req->args_nvl); 3297 req->args_nvl = tmp_args_nvl; 3298 3299 if (req->result_nvl != NULL) { 3300 if (req->result != NULL) { 3301 packed = nvlist_pack(req->result_nvl, 3302 &packed_len); 3303 if (packed == NULL) { 3304 req->status = CTL_LUN_ERROR; 3305 snprintf(req->error_str, 3306 sizeof(req->error_str), 3307 "Cannot pack result nvlist."); 3308 break; 3309 } 3310 3311 if (packed_len > req->result_len) { 3312 req->status = CTL_LUN_ERROR; 3313 snprintf(req->error_str, 3314 sizeof(req->error_str), 3315 "Result nvlist too large."); 3316 free(packed, M_NVLIST); 3317 break; 3318 } 3319 3320 if (copyout(packed, req->result, packed_len)) { 3321 req->status = CTL_LUN_ERROR; 3322 snprintf(req->error_str, 3323 sizeof(req->error_str), 3324 "Cannot copyout() the result."); 3325 free(packed, M_NVLIST); 3326 break; 3327 } 3328 3329 req->result_len = packed_len; 3330 free(packed, M_NVLIST); 3331 } 3332 3333 nvlist_destroy(req->result_nvl); 3334 } 3335 break; 3336 } 3337 case CTL_PORT_LIST: { 3338 struct sbuf *sb; 3339 struct ctl_port *port; 3340 struct ctl_lun_list *list; 3341 const char *name, *value; 3342 void *cookie; 3343 int j, type; 3344 uint32_t plun; 3345 3346 list = (struct ctl_lun_list *)addr; 3347 3348 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3349 if (sb == NULL) { 3350 list->status = CTL_LUN_LIST_ERROR; 3351 snprintf(list->error_str, sizeof(list->error_str), 3352 "Unable to allocate %d bytes for LUN list", 3353 list->alloc_len); 3354 break; 3355 } 3356 3357 sbuf_printf(sb, "<ctlportlist>\n"); 3358 3359 mtx_lock(&softc->ctl_lock); 3360 STAILQ_FOREACH(port, &softc->port_list, links) { 3361 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3362 (uintmax_t)port->targ_port); 3363 3364 /* 3365 * Bail out as soon as we see that we've overfilled 3366 * the buffer. 3367 */ 3368 if (retval != 0) 3369 break; 3370 3371 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3372 "</frontend_type>\n", port->frontend->name); 3373 if (retval != 0) 3374 break; 3375 3376 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3377 port->port_type); 3378 if (retval != 0) 3379 break; 3380 3381 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3382 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3383 if (retval != 0) 3384 break; 3385 3386 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3387 port->port_name); 3388 if (retval != 0) 3389 break; 3390 3391 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3392 port->physical_port); 3393 if (retval != 0) 3394 break; 3395 3396 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3397 port->virtual_port); 3398 if (retval != 0) 3399 break; 3400 3401 if (port->target_devid != NULL) { 3402 sbuf_printf(sb, "\t<target>"); 3403 ctl_id_sbuf(port->target_devid, sb); 3404 sbuf_printf(sb, "</target>\n"); 3405 } 3406 3407 if (port->port_devid != NULL) { 3408 sbuf_printf(sb, "\t<port>"); 3409 ctl_id_sbuf(port->port_devid, sb); 3410 sbuf_printf(sb, "</port>\n"); 3411 } 3412 3413 if (port->port_info != NULL) { 3414 retval = port->port_info(port->onoff_arg, sb); 3415 if (retval != 0) 3416 break; 3417 } 3418 3419 cookie = NULL; 3420 while ((name = nvlist_next(port->options, &type, 3421 &cookie)) != NULL) { 3422 sbuf_printf(sb, "\t<%s>", name); 3423 3424 if (type == NV_TYPE_STRING) { 3425 value = dnvlist_get_string(port->options, 3426 name, NULL); 3427 if (value != NULL) 3428 sbuf_printf(sb, "%s", value); 3429 } 3430 3431 sbuf_printf(sb, "</%s>\n", name); 3432 } 3433 3434 if (port->lun_map != NULL) { 3435 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3436 for (j = 0; j < port->lun_map_size; j++) { 3437 plun = ctl_lun_map_from_port(port, j); 3438 if (plun == UINT32_MAX) 3439 continue; 3440 sbuf_printf(sb, 3441 "\t<lun id=\"%u\">%u</lun>\n", 3442 j, plun); 3443 } 3444 } 3445 3446 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3447 if (port->wwpn_iid[j].in_use == 0 || 3448 (port->wwpn_iid[j].wwpn == 0 && 3449 port->wwpn_iid[j].name == NULL)) 3450 continue; 3451 3452 if (port->wwpn_iid[j].name != NULL) 3453 retval = sbuf_printf(sb, 3454 "\t<initiator id=\"%u\">%s</initiator>\n", 3455 j, port->wwpn_iid[j].name); 3456 else 3457 retval = sbuf_printf(sb, 3458 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3459 j, port->wwpn_iid[j].wwpn); 3460 if (retval != 0) 3461 break; 3462 } 3463 if (retval != 0) 3464 break; 3465 3466 retval = sbuf_printf(sb, "</targ_port>\n"); 3467 if (retval != 0) 3468 break; 3469 } 3470 mtx_unlock(&softc->ctl_lock); 3471 3472 if ((retval != 0) 3473 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3474 retval = 0; 3475 sbuf_delete(sb); 3476 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3477 snprintf(list->error_str, sizeof(list->error_str), 3478 "Out of space, %d bytes is too small", 3479 list->alloc_len); 3480 break; 3481 } 3482 3483 sbuf_finish(sb); 3484 3485 retval = copyout(sbuf_data(sb), list->lun_xml, 3486 sbuf_len(sb) + 1); 3487 3488 list->fill_len = sbuf_len(sb) + 1; 3489 list->status = CTL_LUN_LIST_OK; 3490 sbuf_delete(sb); 3491 break; 3492 } 3493 case CTL_LUN_MAP: { 3494 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3495 struct ctl_port *port; 3496 3497 mtx_lock(&softc->ctl_lock); 3498 if (lm->port < softc->port_min || 3499 lm->port >= softc->port_max || 3500 (port = softc->ctl_ports[lm->port]) == NULL) { 3501 mtx_unlock(&softc->ctl_lock); 3502 return (ENXIO); 3503 } 3504 if (port->status & CTL_PORT_STATUS_ONLINE) { 3505 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3506 if (ctl_lun_map_to_port(port, lun->lun) == 3507 UINT32_MAX) 3508 continue; 3509 mtx_lock(&lun->lun_lock); 3510 ctl_est_ua_port(lun, lm->port, -1, 3511 CTL_UA_LUN_CHANGE); 3512 mtx_unlock(&lun->lun_lock); 3513 } 3514 } 3515 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3516 if (lm->plun != UINT32_MAX) { 3517 if (lm->lun == UINT32_MAX) 3518 retval = ctl_lun_map_unset(port, lm->plun); 3519 else if (lm->lun < ctl_max_luns && 3520 softc->ctl_luns[lm->lun] != NULL) 3521 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3522 else 3523 return (ENXIO); 3524 } else { 3525 if (lm->lun == UINT32_MAX) 3526 retval = ctl_lun_map_deinit(port); 3527 else 3528 retval = ctl_lun_map_init(port); 3529 } 3530 if (port->status & CTL_PORT_STATUS_ONLINE) 3531 ctl_isc_announce_port(port); 3532 break; 3533 } 3534 case CTL_GET_LUN_STATS: { 3535 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3536 int i; 3537 3538 /* 3539 * XXX KDM no locking here. If the LUN list changes, 3540 * things can blow up. 3541 */ 3542 i = 0; 3543 stats->status = CTL_SS_OK; 3544 stats->fill_len = 0; 3545 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3546 if (lun->lun < stats->first_item) 3547 continue; 3548 if (stats->fill_len + sizeof(lun->stats) > 3549 stats->alloc_len) { 3550 stats->status = CTL_SS_NEED_MORE_SPACE; 3551 break; 3552 } 3553 retval = copyout(&lun->stats, &stats->stats[i++], 3554 sizeof(lun->stats)); 3555 if (retval != 0) 3556 break; 3557 stats->fill_len += sizeof(lun->stats); 3558 } 3559 stats->num_items = softc->num_luns; 3560 stats->flags = CTL_STATS_FLAG_NONE; 3561 #ifdef CTL_TIME_IO 3562 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3563 #endif 3564 getnanouptime(&stats->timestamp); 3565 break; 3566 } 3567 case CTL_GET_PORT_STATS: { 3568 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3569 int i; 3570 3571 /* 3572 * XXX KDM no locking here. If the LUN list changes, 3573 * things can blow up. 3574 */ 3575 i = 0; 3576 stats->status = CTL_SS_OK; 3577 stats->fill_len = 0; 3578 STAILQ_FOREACH(port, &softc->port_list, links) { 3579 if (port->targ_port < stats->first_item) 3580 continue; 3581 if (stats->fill_len + sizeof(port->stats) > 3582 stats->alloc_len) { 3583 stats->status = CTL_SS_NEED_MORE_SPACE; 3584 break; 3585 } 3586 retval = copyout(&port->stats, &stats->stats[i++], 3587 sizeof(port->stats)); 3588 if (retval != 0) 3589 break; 3590 stats->fill_len += sizeof(port->stats); 3591 } 3592 stats->num_items = softc->num_ports; 3593 stats->flags = CTL_STATS_FLAG_NONE; 3594 #ifdef CTL_TIME_IO 3595 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3596 #endif 3597 getnanouptime(&stats->timestamp); 3598 break; 3599 } 3600 default: { 3601 /* XXX KDM should we fix this? */ 3602 #if 0 3603 struct ctl_backend_driver *backend; 3604 unsigned int type; 3605 int found; 3606 3607 found = 0; 3608 3609 /* 3610 * We encode the backend type as the ioctl type for backend 3611 * ioctls. So parse it out here, and then search for a 3612 * backend of this type. 3613 */ 3614 type = _IOC_TYPE(cmd); 3615 3616 STAILQ_FOREACH(backend, &softc->be_list, links) { 3617 if (backend->type == type) { 3618 found = 1; 3619 break; 3620 } 3621 } 3622 if (found == 0) { 3623 printf("ctl: unknown ioctl command %#lx or backend " 3624 "%d\n", cmd, type); 3625 retval = EINVAL; 3626 break; 3627 } 3628 retval = backend->ioctl(dev, cmd, addr, flag, td); 3629 #endif 3630 retval = ENOTTY; 3631 break; 3632 } 3633 } 3634 return (retval); 3635 } 3636 3637 uint32_t 3638 ctl_get_initindex(struct ctl_nexus *nexus) 3639 { 3640 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3641 } 3642 3643 int 3644 ctl_lun_map_init(struct ctl_port *port) 3645 { 3646 struct ctl_softc *softc = port->ctl_softc; 3647 struct ctl_lun *lun; 3648 int size = ctl_lun_map_size; 3649 uint32_t i; 3650 3651 if (port->lun_map == NULL || port->lun_map_size < size) { 3652 port->lun_map_size = 0; 3653 free(port->lun_map, M_CTL); 3654 port->lun_map = malloc(size * sizeof(uint32_t), 3655 M_CTL, M_NOWAIT); 3656 } 3657 if (port->lun_map == NULL) 3658 return (ENOMEM); 3659 for (i = 0; i < size; i++) 3660 port->lun_map[i] = UINT32_MAX; 3661 port->lun_map_size = size; 3662 if (port->status & CTL_PORT_STATUS_ONLINE) { 3663 if (port->lun_disable != NULL) { 3664 STAILQ_FOREACH(lun, &softc->lun_list, links) 3665 port->lun_disable(port->targ_lun_arg, lun->lun); 3666 } 3667 ctl_isc_announce_port(port); 3668 } 3669 return (0); 3670 } 3671 3672 int 3673 ctl_lun_map_deinit(struct ctl_port *port) 3674 { 3675 struct ctl_softc *softc = port->ctl_softc; 3676 struct ctl_lun *lun; 3677 3678 if (port->lun_map == NULL) 3679 return (0); 3680 port->lun_map_size = 0; 3681 free(port->lun_map, M_CTL); 3682 port->lun_map = NULL; 3683 if (port->status & CTL_PORT_STATUS_ONLINE) { 3684 if (port->lun_enable != NULL) { 3685 STAILQ_FOREACH(lun, &softc->lun_list, links) 3686 port->lun_enable(port->targ_lun_arg, lun->lun); 3687 } 3688 ctl_isc_announce_port(port); 3689 } 3690 return (0); 3691 } 3692 3693 int 3694 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3695 { 3696 int status; 3697 uint32_t old; 3698 3699 if (port->lun_map == NULL) { 3700 status = ctl_lun_map_init(port); 3701 if (status != 0) 3702 return (status); 3703 } 3704 if (plun >= port->lun_map_size) 3705 return (EINVAL); 3706 old = port->lun_map[plun]; 3707 port->lun_map[plun] = glun; 3708 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3709 if (port->lun_enable != NULL) 3710 port->lun_enable(port->targ_lun_arg, plun); 3711 ctl_isc_announce_port(port); 3712 } 3713 return (0); 3714 } 3715 3716 int 3717 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3718 { 3719 uint32_t old; 3720 3721 if (port->lun_map == NULL || plun >= port->lun_map_size) 3722 return (0); 3723 old = port->lun_map[plun]; 3724 port->lun_map[plun] = UINT32_MAX; 3725 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3726 if (port->lun_disable != NULL) 3727 port->lun_disable(port->targ_lun_arg, plun); 3728 ctl_isc_announce_port(port); 3729 } 3730 return (0); 3731 } 3732 3733 uint32_t 3734 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3735 { 3736 3737 if (port == NULL) 3738 return (UINT32_MAX); 3739 if (port->lun_map == NULL) 3740 return (lun_id); 3741 if (lun_id > port->lun_map_size) 3742 return (UINT32_MAX); 3743 return (port->lun_map[lun_id]); 3744 } 3745 3746 uint32_t 3747 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3748 { 3749 uint32_t i; 3750 3751 if (port == NULL) 3752 return (UINT32_MAX); 3753 if (port->lun_map == NULL) 3754 return (lun_id); 3755 for (i = 0; i < port->lun_map_size; i++) { 3756 if (port->lun_map[i] == lun_id) 3757 return (i); 3758 } 3759 return (UINT32_MAX); 3760 } 3761 3762 uint32_t 3763 ctl_decode_lun(uint64_t encoded) 3764 { 3765 uint8_t lun[8]; 3766 uint32_t result = 0xffffffff; 3767 3768 be64enc(lun, encoded); 3769 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3770 case RPL_LUNDATA_ATYP_PERIPH: 3771 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3772 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3773 result = lun[1]; 3774 break; 3775 case RPL_LUNDATA_ATYP_FLAT: 3776 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3777 lun[6] == 0 && lun[7] == 0) 3778 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3779 break; 3780 case RPL_LUNDATA_ATYP_EXTLUN: 3781 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3782 case 0x02: 3783 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3784 case 0x00: 3785 result = lun[1]; 3786 break; 3787 case 0x10: 3788 result = (lun[1] << 16) + (lun[2] << 8) + 3789 lun[3]; 3790 break; 3791 case 0x20: 3792 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3793 result = (lun[2] << 24) + 3794 (lun[3] << 16) + (lun[4] << 8) + 3795 lun[5]; 3796 break; 3797 } 3798 break; 3799 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3800 result = 0xffffffff; 3801 break; 3802 } 3803 break; 3804 } 3805 return (result); 3806 } 3807 3808 uint64_t 3809 ctl_encode_lun(uint32_t decoded) 3810 { 3811 uint64_t l = decoded; 3812 3813 if (l <= 0xff) 3814 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3815 if (l <= 0x3fff) 3816 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3817 if (l <= 0xffffff) 3818 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3819 (l << 32)); 3820 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3821 } 3822 3823 int 3824 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3825 { 3826 int i; 3827 3828 for (i = first; i < last; i++) { 3829 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3830 return (i); 3831 } 3832 return (-1); 3833 } 3834 3835 int 3836 ctl_set_mask(uint32_t *mask, uint32_t bit) 3837 { 3838 uint32_t chunk, piece; 3839 3840 chunk = bit >> 5; 3841 piece = bit % (sizeof(uint32_t) * 8); 3842 3843 if ((mask[chunk] & (1 << piece)) != 0) 3844 return (-1); 3845 else 3846 mask[chunk] |= (1 << piece); 3847 3848 return (0); 3849 } 3850 3851 int 3852 ctl_clear_mask(uint32_t *mask, uint32_t bit) 3853 { 3854 uint32_t chunk, piece; 3855 3856 chunk = bit >> 5; 3857 piece = bit % (sizeof(uint32_t) * 8); 3858 3859 if ((mask[chunk] & (1 << piece)) == 0) 3860 return (-1); 3861 else 3862 mask[chunk] &= ~(1 << piece); 3863 3864 return (0); 3865 } 3866 3867 int 3868 ctl_is_set(uint32_t *mask, uint32_t bit) 3869 { 3870 uint32_t chunk, piece; 3871 3872 chunk = bit >> 5; 3873 piece = bit % (sizeof(uint32_t) * 8); 3874 3875 if ((mask[chunk] & (1 << piece)) == 0) 3876 return (0); 3877 else 3878 return (1); 3879 } 3880 3881 static uint64_t 3882 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3883 { 3884 uint64_t *t; 3885 3886 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3887 if (t == NULL) 3888 return (0); 3889 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3890 } 3891 3892 static void 3893 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3894 { 3895 uint64_t *t; 3896 3897 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3898 if (t == NULL) 3899 return; 3900 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3901 } 3902 3903 static void 3904 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3905 { 3906 uint64_t *p; 3907 u_int i; 3908 3909 i = residx/CTL_MAX_INIT_PER_PORT; 3910 if (lun->pr_keys[i] != NULL) 3911 return; 3912 mtx_unlock(&lun->lun_lock); 3913 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3914 M_WAITOK | M_ZERO); 3915 mtx_lock(&lun->lun_lock); 3916 if (lun->pr_keys[i] == NULL) 3917 lun->pr_keys[i] = p; 3918 else 3919 free(p, M_CTL); 3920 } 3921 3922 static void 3923 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3924 { 3925 uint64_t *t; 3926 3927 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3928 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3929 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3930 } 3931 3932 /* 3933 * ctl_softc, pool_name, total_ctl_io are passed in. 3934 * npool is passed out. 3935 */ 3936 int 3937 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3938 uint32_t total_ctl_io, void **npool) 3939 { 3940 struct ctl_io_pool *pool; 3941 3942 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3943 M_NOWAIT | M_ZERO); 3944 if (pool == NULL) 3945 return (ENOMEM); 3946 3947 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3948 pool->ctl_softc = ctl_softc; 3949 #ifdef IO_POOLS 3950 pool->zone = uma_zsecond_create(pool->name, NULL, 3951 NULL, NULL, NULL, ctl_softc->io_zone); 3952 /* uma_prealloc(pool->zone, total_ctl_io); */ 3953 #else 3954 pool->zone = ctl_softc->io_zone; 3955 #endif 3956 3957 *npool = pool; 3958 return (0); 3959 } 3960 3961 void 3962 ctl_pool_free(struct ctl_io_pool *pool) 3963 { 3964 3965 if (pool == NULL) 3966 return; 3967 3968 #ifdef IO_POOLS 3969 uma_zdestroy(pool->zone); 3970 #endif 3971 free(pool, M_CTL); 3972 } 3973 3974 union ctl_io * 3975 ctl_alloc_io(void *pool_ref) 3976 { 3977 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3978 union ctl_io *io; 3979 3980 io = uma_zalloc(pool->zone, M_WAITOK); 3981 if (io != NULL) { 3982 io->io_hdr.pool = pool_ref; 3983 CTL_SOFTC(io) = pool->ctl_softc; 3984 TAILQ_INIT(&io->io_hdr.blocked_queue); 3985 } 3986 return (io); 3987 } 3988 3989 union ctl_io * 3990 ctl_alloc_io_nowait(void *pool_ref) 3991 { 3992 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3993 union ctl_io *io; 3994 3995 io = uma_zalloc(pool->zone, M_NOWAIT); 3996 if (io != NULL) { 3997 io->io_hdr.pool = pool_ref; 3998 CTL_SOFTC(io) = pool->ctl_softc; 3999 TAILQ_INIT(&io->io_hdr.blocked_queue); 4000 } 4001 return (io); 4002 } 4003 4004 void 4005 ctl_free_io(union ctl_io *io) 4006 { 4007 struct ctl_io_pool *pool; 4008 4009 if (io == NULL) 4010 return; 4011 4012 pool = (struct ctl_io_pool *)io->io_hdr.pool; 4013 uma_zfree(pool->zone, io); 4014 } 4015 4016 void 4017 ctl_zero_io(union ctl_io *io) 4018 { 4019 struct ctl_io_pool *pool; 4020 4021 if (io == NULL) 4022 return; 4023 4024 /* 4025 * May need to preserve linked list pointers at some point too. 4026 */ 4027 pool = io->io_hdr.pool; 4028 memset(io, 0, sizeof(*io)); 4029 io->io_hdr.pool = pool; 4030 CTL_SOFTC(io) = pool->ctl_softc; 4031 TAILQ_INIT(&io->io_hdr.blocked_queue); 4032 } 4033 4034 int 4035 ctl_expand_number(const char *buf, uint64_t *num) 4036 { 4037 char *endptr; 4038 uint64_t number; 4039 unsigned shift; 4040 4041 number = strtoq(buf, &endptr, 0); 4042 4043 switch (tolower((unsigned char)*endptr)) { 4044 case 'e': 4045 shift = 60; 4046 break; 4047 case 'p': 4048 shift = 50; 4049 break; 4050 case 't': 4051 shift = 40; 4052 break; 4053 case 'g': 4054 shift = 30; 4055 break; 4056 case 'm': 4057 shift = 20; 4058 break; 4059 case 'k': 4060 shift = 10; 4061 break; 4062 case 'b': 4063 case '\0': /* No unit. */ 4064 *num = number; 4065 return (0); 4066 default: 4067 /* Unrecognized unit. */ 4068 return (-1); 4069 } 4070 4071 if ((number << shift) >> shift != number) { 4072 /* Overflow */ 4073 return (-1); 4074 } 4075 *num = number << shift; 4076 return (0); 4077 } 4078 4079 /* 4080 * This routine could be used in the future to load default and/or saved 4081 * mode page parameters for a particuar lun. 4082 */ 4083 static int 4084 ctl_init_page_index(struct ctl_lun *lun) 4085 { 4086 int i, page_code; 4087 struct ctl_page_index *page_index; 4088 const char *value; 4089 uint64_t ival; 4090 4091 memcpy(&lun->mode_pages.index, page_index_template, 4092 sizeof(page_index_template)); 4093 4094 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4095 page_index = &lun->mode_pages.index[i]; 4096 if (lun->be_lun->lun_type == T_DIRECT && 4097 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4098 continue; 4099 if (lun->be_lun->lun_type == T_PROCESSOR && 4100 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4101 continue; 4102 if (lun->be_lun->lun_type == T_CDROM && 4103 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4104 continue; 4105 4106 page_code = page_index->page_code & SMPH_PC_MASK; 4107 switch (page_code) { 4108 case SMS_RW_ERROR_RECOVERY_PAGE: { 4109 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4110 ("subpage %#x for page %#x is incorrect!", 4111 page_index->subpage, page_code)); 4112 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4113 &rw_er_page_default, 4114 sizeof(rw_er_page_default)); 4115 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4116 &rw_er_page_changeable, 4117 sizeof(rw_er_page_changeable)); 4118 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4119 &rw_er_page_default, 4120 sizeof(rw_er_page_default)); 4121 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4122 &rw_er_page_default, 4123 sizeof(rw_er_page_default)); 4124 page_index->page_data = 4125 (uint8_t *)lun->mode_pages.rw_er_page; 4126 break; 4127 } 4128 case SMS_FORMAT_DEVICE_PAGE: { 4129 struct scsi_format_page *format_page; 4130 4131 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4132 ("subpage %#x for page %#x is incorrect!", 4133 page_index->subpage, page_code)); 4134 4135 /* 4136 * Sectors per track are set above. Bytes per 4137 * sector need to be set here on a per-LUN basis. 4138 */ 4139 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4140 &format_page_default, 4141 sizeof(format_page_default)); 4142 memcpy(&lun->mode_pages.format_page[ 4143 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4144 sizeof(format_page_changeable)); 4145 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4146 &format_page_default, 4147 sizeof(format_page_default)); 4148 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4149 &format_page_default, 4150 sizeof(format_page_default)); 4151 4152 format_page = &lun->mode_pages.format_page[ 4153 CTL_PAGE_CURRENT]; 4154 scsi_ulto2b(lun->be_lun->blocksize, 4155 format_page->bytes_per_sector); 4156 4157 format_page = &lun->mode_pages.format_page[ 4158 CTL_PAGE_DEFAULT]; 4159 scsi_ulto2b(lun->be_lun->blocksize, 4160 format_page->bytes_per_sector); 4161 4162 format_page = &lun->mode_pages.format_page[ 4163 CTL_PAGE_SAVED]; 4164 scsi_ulto2b(lun->be_lun->blocksize, 4165 format_page->bytes_per_sector); 4166 4167 page_index->page_data = 4168 (uint8_t *)lun->mode_pages.format_page; 4169 break; 4170 } 4171 case SMS_RIGID_DISK_PAGE: { 4172 struct scsi_rigid_disk_page *rigid_disk_page; 4173 uint32_t sectors_per_cylinder; 4174 uint64_t cylinders; 4175 #ifndef __XSCALE__ 4176 int shift; 4177 #endif /* !__XSCALE__ */ 4178 4179 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4180 ("subpage %#x for page %#x is incorrect!", 4181 page_index->subpage, page_code)); 4182 4183 /* 4184 * Rotation rate and sectors per track are set 4185 * above. We calculate the cylinders here based on 4186 * capacity. Due to the number of heads and 4187 * sectors per track we're using, smaller arrays 4188 * may turn out to have 0 cylinders. Linux and 4189 * FreeBSD don't pay attention to these mode pages 4190 * to figure out capacity, but Solaris does. It 4191 * seems to deal with 0 cylinders just fine, and 4192 * works out a fake geometry based on the capacity. 4193 */ 4194 memcpy(&lun->mode_pages.rigid_disk_page[ 4195 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4196 sizeof(rigid_disk_page_default)); 4197 memcpy(&lun->mode_pages.rigid_disk_page[ 4198 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4199 sizeof(rigid_disk_page_changeable)); 4200 4201 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4202 CTL_DEFAULT_HEADS; 4203 4204 /* 4205 * The divide method here will be more accurate, 4206 * probably, but results in floating point being 4207 * used in the kernel on i386 (__udivdi3()). On the 4208 * XScale, though, __udivdi3() is implemented in 4209 * software. 4210 * 4211 * The shift method for cylinder calculation is 4212 * accurate if sectors_per_cylinder is a power of 4213 * 2. Otherwise it might be slightly off -- you 4214 * might have a bit of a truncation problem. 4215 */ 4216 #ifdef __XSCALE__ 4217 cylinders = (lun->be_lun->maxlba + 1) / 4218 sectors_per_cylinder; 4219 #else 4220 for (shift = 31; shift > 0; shift--) { 4221 if (sectors_per_cylinder & (1 << shift)) 4222 break; 4223 } 4224 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4225 #endif 4226 4227 /* 4228 * We've basically got 3 bytes, or 24 bits for the 4229 * cylinder size in the mode page. If we're over, 4230 * just round down to 2^24. 4231 */ 4232 if (cylinders > 0xffffff) 4233 cylinders = 0xffffff; 4234 4235 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4236 CTL_PAGE_DEFAULT]; 4237 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4238 4239 if ((value = dnvlist_get_string(lun->be_lun->options, 4240 "rpm", NULL)) != NULL) { 4241 scsi_ulto2b(strtol(value, NULL, 0), 4242 rigid_disk_page->rotation_rate); 4243 } 4244 4245 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4246 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4247 sizeof(rigid_disk_page_default)); 4248 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4249 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4250 sizeof(rigid_disk_page_default)); 4251 4252 page_index->page_data = 4253 (uint8_t *)lun->mode_pages.rigid_disk_page; 4254 break; 4255 } 4256 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4257 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4258 ("subpage %#x for page %#x is incorrect!", 4259 page_index->subpage, page_code)); 4260 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4261 &verify_er_page_default, 4262 sizeof(verify_er_page_default)); 4263 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4264 &verify_er_page_changeable, 4265 sizeof(verify_er_page_changeable)); 4266 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4267 &verify_er_page_default, 4268 sizeof(verify_er_page_default)); 4269 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4270 &verify_er_page_default, 4271 sizeof(verify_er_page_default)); 4272 page_index->page_data = 4273 (uint8_t *)lun->mode_pages.verify_er_page; 4274 break; 4275 } 4276 case SMS_CACHING_PAGE: { 4277 struct scsi_caching_page *caching_page; 4278 4279 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4280 ("subpage %#x for page %#x is incorrect!", 4281 page_index->subpage, page_code)); 4282 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4283 &caching_page_default, 4284 sizeof(caching_page_default)); 4285 memcpy(&lun->mode_pages.caching_page[ 4286 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4287 sizeof(caching_page_changeable)); 4288 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4289 &caching_page_default, 4290 sizeof(caching_page_default)); 4291 caching_page = &lun->mode_pages.caching_page[ 4292 CTL_PAGE_SAVED]; 4293 value = dnvlist_get_string(lun->be_lun->options, 4294 "writecache", NULL); 4295 if (value != NULL && strcmp(value, "off") == 0) 4296 caching_page->flags1 &= ~SCP_WCE; 4297 value = dnvlist_get_string(lun->be_lun->options, 4298 "readcache", NULL); 4299 if (value != NULL && strcmp(value, "off") == 0) 4300 caching_page->flags1 |= SCP_RCD; 4301 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4302 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4303 sizeof(caching_page_default)); 4304 page_index->page_data = 4305 (uint8_t *)lun->mode_pages.caching_page; 4306 break; 4307 } 4308 case SMS_CONTROL_MODE_PAGE: { 4309 switch (page_index->subpage) { 4310 case SMS_SUBPAGE_PAGE_0: { 4311 struct scsi_control_page *control_page; 4312 4313 memcpy(&lun->mode_pages.control_page[ 4314 CTL_PAGE_DEFAULT], 4315 &control_page_default, 4316 sizeof(control_page_default)); 4317 memcpy(&lun->mode_pages.control_page[ 4318 CTL_PAGE_CHANGEABLE], 4319 &control_page_changeable, 4320 sizeof(control_page_changeable)); 4321 memcpy(&lun->mode_pages.control_page[ 4322 CTL_PAGE_SAVED], 4323 &control_page_default, 4324 sizeof(control_page_default)); 4325 control_page = &lun->mode_pages.control_page[ 4326 CTL_PAGE_SAVED]; 4327 value = dnvlist_get_string(lun->be_lun->options, 4328 "reordering", NULL); 4329 if (value != NULL && 4330 strcmp(value, "unrestricted") == 0) { 4331 control_page->queue_flags &= 4332 ~SCP_QUEUE_ALG_MASK; 4333 control_page->queue_flags |= 4334 SCP_QUEUE_ALG_UNRESTRICTED; 4335 } 4336 memcpy(&lun->mode_pages.control_page[ 4337 CTL_PAGE_CURRENT], 4338 &lun->mode_pages.control_page[ 4339 CTL_PAGE_SAVED], 4340 sizeof(control_page_default)); 4341 page_index->page_data = 4342 (uint8_t *)lun->mode_pages.control_page; 4343 break; 4344 } 4345 case 0x01: 4346 memcpy(&lun->mode_pages.control_ext_page[ 4347 CTL_PAGE_DEFAULT], 4348 &control_ext_page_default, 4349 sizeof(control_ext_page_default)); 4350 memcpy(&lun->mode_pages.control_ext_page[ 4351 CTL_PAGE_CHANGEABLE], 4352 &control_ext_page_changeable, 4353 sizeof(control_ext_page_changeable)); 4354 memcpy(&lun->mode_pages.control_ext_page[ 4355 CTL_PAGE_SAVED], 4356 &control_ext_page_default, 4357 sizeof(control_ext_page_default)); 4358 memcpy(&lun->mode_pages.control_ext_page[ 4359 CTL_PAGE_CURRENT], 4360 &lun->mode_pages.control_ext_page[ 4361 CTL_PAGE_SAVED], 4362 sizeof(control_ext_page_default)); 4363 page_index->page_data = 4364 (uint8_t *)lun->mode_pages.control_ext_page; 4365 break; 4366 default: 4367 panic("subpage %#x for page %#x is incorrect!", 4368 page_index->subpage, page_code); 4369 } 4370 break; 4371 } 4372 case SMS_INFO_EXCEPTIONS_PAGE: { 4373 switch (page_index->subpage) { 4374 case SMS_SUBPAGE_PAGE_0: 4375 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4376 &ie_page_default, 4377 sizeof(ie_page_default)); 4378 memcpy(&lun->mode_pages.ie_page[ 4379 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4380 sizeof(ie_page_changeable)); 4381 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4382 &ie_page_default, 4383 sizeof(ie_page_default)); 4384 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4385 &ie_page_default, 4386 sizeof(ie_page_default)); 4387 page_index->page_data = 4388 (uint8_t *)lun->mode_pages.ie_page; 4389 break; 4390 case 0x02: { 4391 struct ctl_logical_block_provisioning_page *page; 4392 4393 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4394 &lbp_page_default, 4395 sizeof(lbp_page_default)); 4396 memcpy(&lun->mode_pages.lbp_page[ 4397 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4398 sizeof(lbp_page_changeable)); 4399 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4400 &lbp_page_default, 4401 sizeof(lbp_page_default)); 4402 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4403 value = dnvlist_get_string(lun->be_lun->options, 4404 "avail-threshold", NULL); 4405 if (value != NULL && 4406 ctl_expand_number(value, &ival) == 0) { 4407 page->descr[0].flags |= SLBPPD_ENABLED | 4408 SLBPPD_ARMING_DEC; 4409 if (lun->be_lun->blocksize) 4410 ival /= lun->be_lun->blocksize; 4411 else 4412 ival /= 512; 4413 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4414 page->descr[0].count); 4415 } 4416 value = dnvlist_get_string(lun->be_lun->options, 4417 "used-threshold", NULL); 4418 if (value != NULL && 4419 ctl_expand_number(value, &ival) == 0) { 4420 page->descr[1].flags |= SLBPPD_ENABLED | 4421 SLBPPD_ARMING_INC; 4422 if (lun->be_lun->blocksize) 4423 ival /= lun->be_lun->blocksize; 4424 else 4425 ival /= 512; 4426 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4427 page->descr[1].count); 4428 } 4429 value = dnvlist_get_string(lun->be_lun->options, 4430 "pool-avail-threshold", NULL); 4431 if (value != NULL && 4432 ctl_expand_number(value, &ival) == 0) { 4433 page->descr[2].flags |= SLBPPD_ENABLED | 4434 SLBPPD_ARMING_DEC; 4435 if (lun->be_lun->blocksize) 4436 ival /= lun->be_lun->blocksize; 4437 else 4438 ival /= 512; 4439 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4440 page->descr[2].count); 4441 } 4442 value = dnvlist_get_string(lun->be_lun->options, 4443 "pool-used-threshold", NULL); 4444 if (value != NULL && 4445 ctl_expand_number(value, &ival) == 0) { 4446 page->descr[3].flags |= SLBPPD_ENABLED | 4447 SLBPPD_ARMING_INC; 4448 if (lun->be_lun->blocksize) 4449 ival /= lun->be_lun->blocksize; 4450 else 4451 ival /= 512; 4452 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4453 page->descr[3].count); 4454 } 4455 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4456 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4457 sizeof(lbp_page_default)); 4458 page_index->page_data = 4459 (uint8_t *)lun->mode_pages.lbp_page; 4460 break; 4461 } 4462 default: 4463 panic("subpage %#x for page %#x is incorrect!", 4464 page_index->subpage, page_code); 4465 } 4466 break; 4467 } 4468 case SMS_CDDVD_CAPS_PAGE:{ 4469 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4470 ("subpage %#x for page %#x is incorrect!", 4471 page_index->subpage, page_code)); 4472 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4473 &cddvd_page_default, 4474 sizeof(cddvd_page_default)); 4475 memcpy(&lun->mode_pages.cddvd_page[ 4476 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4477 sizeof(cddvd_page_changeable)); 4478 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4479 &cddvd_page_default, 4480 sizeof(cddvd_page_default)); 4481 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4482 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4483 sizeof(cddvd_page_default)); 4484 page_index->page_data = 4485 (uint8_t *)lun->mode_pages.cddvd_page; 4486 break; 4487 } 4488 default: 4489 panic("invalid page code value %#x", page_code); 4490 } 4491 } 4492 4493 return (CTL_RETVAL_COMPLETE); 4494 } 4495 4496 static int 4497 ctl_init_log_page_index(struct ctl_lun *lun) 4498 { 4499 struct ctl_page_index *page_index; 4500 int i, j, k, prev; 4501 4502 memcpy(&lun->log_pages.index, log_page_index_template, 4503 sizeof(log_page_index_template)); 4504 4505 prev = -1; 4506 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4507 page_index = &lun->log_pages.index[i]; 4508 if (lun->be_lun->lun_type == T_DIRECT && 4509 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4510 continue; 4511 if (lun->be_lun->lun_type == T_PROCESSOR && 4512 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4513 continue; 4514 if (lun->be_lun->lun_type == T_CDROM && 4515 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4516 continue; 4517 4518 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4519 lun->backend->lun_attr == NULL) 4520 continue; 4521 4522 if (page_index->page_code != prev) { 4523 lun->log_pages.pages_page[j] = page_index->page_code; 4524 prev = page_index->page_code; 4525 j++; 4526 } 4527 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4528 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4529 k++; 4530 } 4531 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4532 lun->log_pages.index[0].page_len = j; 4533 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4534 lun->log_pages.index[1].page_len = k * 2; 4535 lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; 4536 lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); 4537 lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; 4538 lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; 4539 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; 4540 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); 4541 lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; 4542 lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); 4543 4544 return (CTL_RETVAL_COMPLETE); 4545 } 4546 4547 static int 4548 hex2bin(const char *str, uint8_t *buf, int buf_size) 4549 { 4550 int i; 4551 u_char c; 4552 4553 memset(buf, 0, buf_size); 4554 while (isspace(str[0])) 4555 str++; 4556 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4557 str += 2; 4558 buf_size *= 2; 4559 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4560 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4561 str++; 4562 c = str[i]; 4563 if (isdigit(c)) 4564 c -= '0'; 4565 else if (isalpha(c)) 4566 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4567 else 4568 break; 4569 if (c >= 16) 4570 break; 4571 if ((i & 1) == 0) 4572 buf[i / 2] |= (c << 4); 4573 else 4574 buf[i / 2] |= c; 4575 } 4576 return ((i + 1) / 2); 4577 } 4578 4579 /* 4580 * Add LUN. 4581 * 4582 * Returns 0 for success, non-zero (errno) for failure. 4583 */ 4584 int 4585 ctl_add_lun(struct ctl_be_lun *be_lun) 4586 { 4587 struct ctl_softc *ctl_softc = control_softc; 4588 struct ctl_lun *nlun, *lun; 4589 struct scsi_vpd_id_descriptor *desc; 4590 struct scsi_vpd_id_t10 *t10id; 4591 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4592 int lun_number; 4593 int devidlen, idlen1, idlen2 = 0, len; 4594 4595 /* 4596 * We support only Direct Access, CD-ROM or Processor LUN types. 4597 */ 4598 switch (be_lun->lun_type) { 4599 case T_DIRECT: 4600 case T_PROCESSOR: 4601 case T_CDROM: 4602 break; 4603 case T_SEQUENTIAL: 4604 case T_CHANGER: 4605 default: 4606 return (EINVAL); 4607 } 4608 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); 4609 4610 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * 4611 ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); 4612 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, 4613 M_DEVBUF, M_WAITOK | M_ZERO); 4614 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, 4615 M_DEVBUF, M_WAITOK | M_ZERO); 4616 4617 /* Generate LUN ID. */ 4618 devidlen = max(CTL_DEVID_MIN_LEN, 4619 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4620 idlen1 = sizeof(*t10id) + devidlen; 4621 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4622 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); 4623 if (scsiname != NULL) { 4624 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4625 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4626 } 4627 eui = dnvlist_get_string(be_lun->options, "eui", NULL); 4628 if (eui != NULL) { 4629 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4630 } 4631 naa = dnvlist_get_string(be_lun->options, "naa", NULL); 4632 if (naa != NULL) { 4633 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4634 } 4635 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); 4636 if (uuid != NULL) { 4637 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4638 } 4639 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4640 M_CTL, M_WAITOK | M_ZERO); 4641 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4642 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4643 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4644 desc->length = idlen1; 4645 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4646 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4647 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { 4648 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4649 } else { 4650 strncpy(t10id->vendor, vendor, 4651 min(sizeof(t10id->vendor), strlen(vendor))); 4652 } 4653 strncpy((char *)t10id->vendor_spec_id, 4654 (char *)be_lun->device_id, devidlen); 4655 if (scsiname != NULL) { 4656 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4657 desc->length); 4658 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4659 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4660 SVPD_ID_TYPE_SCSI_NAME; 4661 desc->length = idlen2; 4662 strlcpy(desc->identifier, scsiname, idlen2); 4663 } 4664 if (eui != NULL) { 4665 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4666 desc->length); 4667 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4668 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4669 SVPD_ID_TYPE_EUI64; 4670 desc->length = hex2bin(eui, desc->identifier, 16); 4671 desc->length = desc->length > 12 ? 16 : 4672 (desc->length > 8 ? 12 : 8); 4673 len -= 16 - desc->length; 4674 } 4675 if (naa != NULL) { 4676 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4677 desc->length); 4678 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4679 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4680 SVPD_ID_TYPE_NAA; 4681 desc->length = hex2bin(naa, desc->identifier, 16); 4682 desc->length = desc->length > 8 ? 16 : 8; 4683 len -= 16 - desc->length; 4684 } 4685 if (uuid != NULL) { 4686 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4687 desc->length); 4688 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4689 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4690 SVPD_ID_TYPE_UUID; 4691 desc->identifier[0] = 0x10; 4692 hex2bin(uuid, &desc->identifier[2], 16); 4693 desc->length = 18; 4694 } 4695 lun->lun_devid->len = len; 4696 4697 mtx_lock(&ctl_softc->ctl_lock); 4698 /* 4699 * See if the caller requested a particular LUN number. If so, see 4700 * if it is available. Otherwise, allocate the first available LUN. 4701 */ 4702 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4703 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) 4704 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4705 mtx_unlock(&ctl_softc->ctl_lock); 4706 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { 4707 printf("ctl: requested LUN ID %d is higher " 4708 "than ctl_max_luns - 1 (%d)\n", 4709 be_lun->req_lun_id, ctl_max_luns - 1); 4710 } else { 4711 /* 4712 * XXX KDM return an error, or just assign 4713 * another LUN ID in this case?? 4714 */ 4715 printf("ctl: requested LUN ID %d is already " 4716 "in use\n", be_lun->req_lun_id); 4717 } 4718 fail: 4719 free(lun->lun_devid, M_CTL); 4720 free(lun, M_CTL); 4721 return (ENOSPC); 4722 } 4723 lun_number = be_lun->req_lun_id; 4724 } else { 4725 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); 4726 if (lun_number == -1) { 4727 mtx_unlock(&ctl_softc->ctl_lock); 4728 printf("ctl: can't allocate LUN, out of LUNs\n"); 4729 goto fail; 4730 } 4731 } 4732 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4733 mtx_unlock(&ctl_softc->ctl_lock); 4734 4735 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4736 lun->lun = lun_number; 4737 lun->be_lun = be_lun; 4738 /* 4739 * The processor LUN is always enabled. Disk LUNs come on line 4740 * disabled, and must be enabled by the backend. 4741 */ 4742 lun->flags |= CTL_LUN_DISABLED; 4743 lun->backend = be_lun->be; 4744 be_lun->ctl_lun = lun; 4745 be_lun->lun_id = lun_number; 4746 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4747 lun->flags |= CTL_LUN_EJECTED; 4748 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4749 lun->flags |= CTL_LUN_NO_MEDIA; 4750 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4751 lun->flags |= CTL_LUN_STOPPED; 4752 4753 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4754 lun->flags |= CTL_LUN_PRIMARY_SC; 4755 4756 value = dnvlist_get_string(be_lun->options, "removable", NULL); 4757 if (value != NULL) { 4758 if (strcmp(value, "on") == 0) 4759 lun->flags |= CTL_LUN_REMOVABLE; 4760 } else if (be_lun->lun_type == T_CDROM) 4761 lun->flags |= CTL_LUN_REMOVABLE; 4762 4763 lun->ctl_softc = ctl_softc; 4764 #ifdef CTL_TIME_IO 4765 lun->last_busy = getsbinuptime(); 4766 #endif 4767 LIST_INIT(&lun->ooa_queue); 4768 STAILQ_INIT(&lun->error_list); 4769 lun->ie_reported = 1; 4770 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4771 ctl_tpc_lun_init(lun); 4772 if (lun->flags & CTL_LUN_REMOVABLE) { 4773 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4774 M_CTL, M_WAITOK); 4775 } 4776 4777 /* 4778 * Initialize the mode and log page index. 4779 */ 4780 ctl_init_page_index(lun); 4781 ctl_init_log_page_index(lun); 4782 4783 /* Setup statistics gathering */ 4784 lun->stats.item = lun_number; 4785 4786 /* 4787 * Now, before we insert this lun on the lun list, set the lun 4788 * inventory changed UA for all other luns. 4789 */ 4790 mtx_lock(&ctl_softc->ctl_lock); 4791 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4792 mtx_lock(&nlun->lun_lock); 4793 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4794 mtx_unlock(&nlun->lun_lock); 4795 } 4796 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4797 ctl_softc->ctl_luns[lun_number] = lun; 4798 ctl_softc->num_luns++; 4799 mtx_unlock(&ctl_softc->ctl_lock); 4800 4801 /* 4802 * We successfully added the LUN, attempt to enable it. 4803 */ 4804 if (ctl_enable_lun(lun) != 0) { 4805 printf("%s: ctl_enable_lun() failed!\n", __func__); 4806 mtx_lock(&ctl_softc->ctl_lock); 4807 STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); 4808 ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); 4809 ctl_softc->ctl_luns[lun_number] = NULL; 4810 ctl_softc->num_luns--; 4811 mtx_unlock(&ctl_softc->ctl_lock); 4812 free(lun->lun_devid, M_CTL); 4813 free(lun, M_CTL); 4814 return (EIO); 4815 } 4816 4817 return (0); 4818 } 4819 4820 /* 4821 * Free LUN that has no active requests. 4822 */ 4823 static int 4824 ctl_free_lun(struct ctl_lun *lun) 4825 { 4826 struct ctl_softc *softc = lun->ctl_softc; 4827 struct ctl_lun *nlun; 4828 int i; 4829 4830 KASSERT(LIST_EMPTY(&lun->ooa_queue), 4831 ("Freeing a LUN %p with outstanding I/O!\n", lun)); 4832 4833 mtx_lock(&softc->ctl_lock); 4834 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4835 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4836 softc->ctl_luns[lun->lun] = NULL; 4837 softc->num_luns--; 4838 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4839 mtx_lock(&nlun->lun_lock); 4840 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4841 mtx_unlock(&nlun->lun_lock); 4842 } 4843 mtx_unlock(&softc->ctl_lock); 4844 4845 /* 4846 * Tell the backend to free resources, if this LUN has a backend. 4847 */ 4848 lun->be_lun->lun_shutdown(lun->be_lun); 4849 4850 lun->ie_reportcnt = UINT32_MAX; 4851 callout_drain(&lun->ie_callout); 4852 ctl_tpc_lun_shutdown(lun); 4853 mtx_destroy(&lun->lun_lock); 4854 free(lun->lun_devid, M_CTL); 4855 for (i = 0; i < ctl_max_ports; i++) 4856 free(lun->pending_ua[i], M_CTL); 4857 free(lun->pending_ua, M_DEVBUF); 4858 for (i = 0; i < ctl_max_ports; i++) 4859 free(lun->pr_keys[i], M_CTL); 4860 free(lun->pr_keys, M_DEVBUF); 4861 free(lun->write_buffer, M_CTL); 4862 free(lun->prevent, M_CTL); 4863 free(lun, M_CTL); 4864 4865 return (0); 4866 } 4867 4868 static int 4869 ctl_enable_lun(struct ctl_lun *lun) 4870 { 4871 struct ctl_softc *softc; 4872 struct ctl_port *port, *nport; 4873 int retval; 4874 4875 softc = lun->ctl_softc; 4876 4877 mtx_lock(&softc->ctl_lock); 4878 mtx_lock(&lun->lun_lock); 4879 KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, 4880 ("%s: LUN not disabled", __func__)); 4881 lun->flags &= ~CTL_LUN_DISABLED; 4882 mtx_unlock(&lun->lun_lock); 4883 4884 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4885 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4886 port->lun_map != NULL || port->lun_enable == NULL) 4887 continue; 4888 4889 /* 4890 * Drop the lock while we call the FETD's enable routine. 4891 * This can lead to a callback into CTL (at least in the 4892 * case of the internal initiator frontend. 4893 */ 4894 mtx_unlock(&softc->ctl_lock); 4895 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4896 mtx_lock(&softc->ctl_lock); 4897 if (retval != 0) { 4898 printf("%s: FETD %s port %d returned error " 4899 "%d for lun_enable on lun %jd\n", 4900 __func__, port->port_name, port->targ_port, 4901 retval, (intmax_t)lun->lun); 4902 } 4903 } 4904 4905 mtx_unlock(&softc->ctl_lock); 4906 ctl_isc_announce_lun(lun); 4907 4908 return (0); 4909 } 4910 4911 static int 4912 ctl_disable_lun(struct ctl_lun *lun) 4913 { 4914 struct ctl_softc *softc; 4915 struct ctl_port *port; 4916 int retval; 4917 4918 softc = lun->ctl_softc; 4919 4920 mtx_lock(&softc->ctl_lock); 4921 mtx_lock(&lun->lun_lock); 4922 KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, 4923 ("%s: LUN not enabled", __func__)); 4924 lun->flags |= CTL_LUN_DISABLED; 4925 mtx_unlock(&lun->lun_lock); 4926 4927 STAILQ_FOREACH(port, &softc->port_list, links) { 4928 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4929 port->lun_map != NULL || port->lun_disable == NULL) 4930 continue; 4931 4932 /* 4933 * Drop the lock before we call the frontend's disable 4934 * routine, to avoid lock order reversals. 4935 * 4936 * XXX KDM what happens if the frontend list changes while 4937 * we're traversing it? It's unlikely, but should be handled. 4938 */ 4939 mtx_unlock(&softc->ctl_lock); 4940 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4941 mtx_lock(&softc->ctl_lock); 4942 if (retval != 0) { 4943 printf("%s: FETD %s port %d returned error " 4944 "%d for lun_disable on lun %jd\n", 4945 __func__, port->port_name, port->targ_port, 4946 retval, (intmax_t)lun->lun); 4947 } 4948 } 4949 4950 mtx_unlock(&softc->ctl_lock); 4951 ctl_isc_announce_lun(lun); 4952 4953 return (0); 4954 } 4955 4956 int 4957 ctl_start_lun(struct ctl_be_lun *be_lun) 4958 { 4959 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4960 4961 mtx_lock(&lun->lun_lock); 4962 lun->flags &= ~CTL_LUN_STOPPED; 4963 mtx_unlock(&lun->lun_lock); 4964 return (0); 4965 } 4966 4967 int 4968 ctl_stop_lun(struct ctl_be_lun *be_lun) 4969 { 4970 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4971 4972 mtx_lock(&lun->lun_lock); 4973 lun->flags |= CTL_LUN_STOPPED; 4974 mtx_unlock(&lun->lun_lock); 4975 return (0); 4976 } 4977 4978 int 4979 ctl_lun_no_media(struct ctl_be_lun *be_lun) 4980 { 4981 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4982 4983 mtx_lock(&lun->lun_lock); 4984 lun->flags |= CTL_LUN_NO_MEDIA; 4985 mtx_unlock(&lun->lun_lock); 4986 return (0); 4987 } 4988 4989 int 4990 ctl_lun_has_media(struct ctl_be_lun *be_lun) 4991 { 4992 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4993 union ctl_ha_msg msg; 4994 4995 mtx_lock(&lun->lun_lock); 4996 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4997 if (lun->flags & CTL_LUN_REMOVABLE) 4998 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4999 mtx_unlock(&lun->lun_lock); 5000 if ((lun->flags & CTL_LUN_REMOVABLE) && 5001 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5002 bzero(&msg.ua, sizeof(msg.ua)); 5003 msg.hdr.msg_type = CTL_MSG_UA; 5004 msg.hdr.nexus.initid = -1; 5005 msg.hdr.nexus.targ_port = -1; 5006 msg.hdr.nexus.targ_lun = lun->lun; 5007 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5008 msg.ua.ua_all = 1; 5009 msg.ua.ua_set = 1; 5010 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 5011 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5012 M_WAITOK); 5013 } 5014 return (0); 5015 } 5016 5017 int 5018 ctl_lun_ejected(struct ctl_be_lun *be_lun) 5019 { 5020 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5021 5022 mtx_lock(&lun->lun_lock); 5023 lun->flags |= CTL_LUN_EJECTED; 5024 mtx_unlock(&lun->lun_lock); 5025 return (0); 5026 } 5027 5028 int 5029 ctl_lun_primary(struct ctl_be_lun *be_lun) 5030 { 5031 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5032 5033 mtx_lock(&lun->lun_lock); 5034 lun->flags |= CTL_LUN_PRIMARY_SC; 5035 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5036 mtx_unlock(&lun->lun_lock); 5037 ctl_isc_announce_lun(lun); 5038 return (0); 5039 } 5040 5041 int 5042 ctl_lun_secondary(struct ctl_be_lun *be_lun) 5043 { 5044 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5045 5046 mtx_lock(&lun->lun_lock); 5047 lun->flags &= ~CTL_LUN_PRIMARY_SC; 5048 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 5049 mtx_unlock(&lun->lun_lock); 5050 ctl_isc_announce_lun(lun); 5051 return (0); 5052 } 5053 5054 /* 5055 * Remove LUN. If there are active requests, wait for completion. 5056 * 5057 * Returns 0 for success, non-zero (errno) for failure. 5058 * Completion is reported to backed via the lun_shutdown() method. 5059 */ 5060 int 5061 ctl_remove_lun(struct ctl_be_lun *be_lun) 5062 { 5063 struct ctl_lun *lun; 5064 5065 lun = (struct ctl_lun *)be_lun->ctl_lun; 5066 5067 ctl_disable_lun(lun); 5068 5069 mtx_lock(&lun->lun_lock); 5070 lun->flags |= CTL_LUN_INVALID; 5071 5072 /* 5073 * If there is nothing in the OOA queue, go ahead and free the LUN. 5074 * If we have something in the OOA queue, we'll free it when the 5075 * last I/O completes. 5076 */ 5077 if (LIST_EMPTY(&lun->ooa_queue)) { 5078 mtx_unlock(&lun->lun_lock); 5079 ctl_free_lun(lun); 5080 } else 5081 mtx_unlock(&lun->lun_lock); 5082 5083 return (0); 5084 } 5085 5086 void 5087 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5088 { 5089 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5090 union ctl_ha_msg msg; 5091 5092 mtx_lock(&lun->lun_lock); 5093 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5094 mtx_unlock(&lun->lun_lock); 5095 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5096 /* Send msg to other side. */ 5097 bzero(&msg.ua, sizeof(msg.ua)); 5098 msg.hdr.msg_type = CTL_MSG_UA; 5099 msg.hdr.nexus.initid = -1; 5100 msg.hdr.nexus.targ_port = -1; 5101 msg.hdr.nexus.targ_lun = lun->lun; 5102 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5103 msg.ua.ua_all = 1; 5104 msg.ua.ua_set = 1; 5105 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5106 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5107 M_WAITOK); 5108 } 5109 } 5110 5111 /* 5112 * Backend "memory move is complete" callback for requests that never 5113 * make it down to say RAIDCore's configuration code. 5114 */ 5115 int 5116 ctl_config_move_done(union ctl_io *io, bool samethr) 5117 { 5118 int retval; 5119 5120 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5121 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5122 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 5123 5124 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5125 ctl_data_print(io); 5126 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5127 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5128 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5129 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5130 /* 5131 * XXX KDM just assuming a single pointer here, and not a 5132 * S/G list. If we start using S/G lists for config data, 5133 * we'll need to know how to clean them up here as well. 5134 */ 5135 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5136 free(io->scsiio.kern_data_ptr, M_CTL); 5137 ctl_done(io); 5138 retval = CTL_RETVAL_COMPLETE; 5139 } else { 5140 /* 5141 * XXX KDM now we need to continue data movement. Some 5142 * options: 5143 * - call ctl_scsiio() again? We don't do this for data 5144 * writes, because for those at least we know ahead of 5145 * time where the write will go and how long it is. For 5146 * config writes, though, that information is largely 5147 * contained within the write itself, thus we need to 5148 * parse out the data again. 5149 * 5150 * - Call some other function once the data is in? 5151 */ 5152 5153 /* 5154 * XXX KDM call ctl_scsiio() again for now, and check flag 5155 * bits to see whether we're allocated or not. 5156 */ 5157 retval = ctl_scsiio(&io->scsiio); 5158 } 5159 return (retval); 5160 } 5161 5162 /* 5163 * This gets called by a backend driver when it is done with a 5164 * data_submit method. 5165 */ 5166 void 5167 ctl_data_submit_done(union ctl_io *io) 5168 { 5169 /* 5170 * If the IO_CONT flag is set, we need to call the supplied 5171 * function to continue processing the I/O, instead of completing 5172 * the I/O just yet. 5173 * 5174 * If there is an error, though, we don't want to keep processing. 5175 * Instead, just send status back to the initiator. 5176 */ 5177 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5178 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5179 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5180 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5181 io->scsiio.io_cont(io); 5182 return; 5183 } 5184 ctl_done(io); 5185 } 5186 5187 /* 5188 * This gets called by a backend driver when it is done with a 5189 * configuration write. 5190 */ 5191 void 5192 ctl_config_write_done(union ctl_io *io) 5193 { 5194 uint8_t *buf; 5195 5196 /* 5197 * If the IO_CONT flag is set, we need to call the supplied 5198 * function to continue processing the I/O, instead of completing 5199 * the I/O just yet. 5200 * 5201 * If there is an error, though, we don't want to keep processing. 5202 * Instead, just send status back to the initiator. 5203 */ 5204 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5205 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5206 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5207 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5208 io->scsiio.io_cont(io); 5209 return; 5210 } 5211 /* 5212 * Since a configuration write can be done for commands that actually 5213 * have data allocated, like write buffer, and commands that have 5214 * no data, like start/stop unit, we need to check here. 5215 */ 5216 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5217 buf = io->scsiio.kern_data_ptr; 5218 else 5219 buf = NULL; 5220 ctl_done(io); 5221 if (buf) 5222 free(buf, M_CTL); 5223 } 5224 5225 void 5226 ctl_config_read_done(union ctl_io *io) 5227 { 5228 uint8_t *buf; 5229 5230 /* 5231 * If there is some error -- we are done, skip data transfer. 5232 */ 5233 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5234 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5235 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5236 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5237 buf = io->scsiio.kern_data_ptr; 5238 else 5239 buf = NULL; 5240 ctl_done(io); 5241 if (buf) 5242 free(buf, M_CTL); 5243 return; 5244 } 5245 5246 /* 5247 * If the IO_CONT flag is set, we need to call the supplied 5248 * function to continue processing the I/O, instead of completing 5249 * the I/O just yet. 5250 */ 5251 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5252 io->scsiio.io_cont(io); 5253 return; 5254 } 5255 5256 ctl_datamove(io); 5257 } 5258 5259 /* 5260 * SCSI release command. 5261 */ 5262 int 5263 ctl_scsi_release(struct ctl_scsiio *ctsio) 5264 { 5265 struct ctl_lun *lun = CTL_LUN(ctsio); 5266 uint32_t residx; 5267 5268 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5269 5270 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5271 5272 /* 5273 * XXX KDM right now, we only support LUN reservation. We don't 5274 * support 3rd party reservations, or extent reservations, which 5275 * might actually need the parameter list. If we've gotten this 5276 * far, we've got a LUN reservation. Anything else got kicked out 5277 * above. So, according to SPC, ignore the length. 5278 */ 5279 5280 mtx_lock(&lun->lun_lock); 5281 5282 /* 5283 * According to SPC, it is not an error for an intiator to attempt 5284 * to release a reservation on a LUN that isn't reserved, or that 5285 * is reserved by another initiator. The reservation can only be 5286 * released, though, by the initiator who made it or by one of 5287 * several reset type events. 5288 */ 5289 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5290 lun->flags &= ~CTL_LUN_RESERVED; 5291 5292 mtx_unlock(&lun->lun_lock); 5293 5294 ctl_set_success(ctsio); 5295 ctl_done((union ctl_io *)ctsio); 5296 return (CTL_RETVAL_COMPLETE); 5297 } 5298 5299 int 5300 ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5301 { 5302 struct ctl_lun *lun = CTL_LUN(ctsio); 5303 uint32_t residx; 5304 5305 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5306 5307 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5308 5309 /* 5310 * XXX KDM right now, we only support LUN reservation. We don't 5311 * support 3rd party reservations, or extent reservations, which 5312 * might actually need the parameter list. If we've gotten this 5313 * far, we've got a LUN reservation. Anything else got kicked out 5314 * above. So, according to SPC, ignore the length. 5315 */ 5316 5317 mtx_lock(&lun->lun_lock); 5318 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5319 ctl_set_reservation_conflict(ctsio); 5320 goto bailout; 5321 } 5322 5323 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5324 if (lun->flags & CTL_LUN_PR_RESERVED) { 5325 ctl_set_success(ctsio); 5326 goto bailout; 5327 } 5328 5329 lun->flags |= CTL_LUN_RESERVED; 5330 lun->res_idx = residx; 5331 ctl_set_success(ctsio); 5332 5333 bailout: 5334 mtx_unlock(&lun->lun_lock); 5335 ctl_done((union ctl_io *)ctsio); 5336 return (CTL_RETVAL_COMPLETE); 5337 } 5338 5339 int 5340 ctl_start_stop(struct ctl_scsiio *ctsio) 5341 { 5342 struct ctl_lun *lun = CTL_LUN(ctsio); 5343 struct scsi_start_stop_unit *cdb; 5344 int retval; 5345 5346 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5347 5348 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5349 5350 if ((cdb->how & SSS_PC_MASK) == 0) { 5351 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5352 (cdb->how & SSS_START) == 0) { 5353 uint32_t residx; 5354 5355 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5356 if (ctl_get_prkey(lun, residx) == 0 || 5357 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5358 ctl_set_reservation_conflict(ctsio); 5359 ctl_done((union ctl_io *)ctsio); 5360 return (CTL_RETVAL_COMPLETE); 5361 } 5362 } 5363 5364 if ((cdb->how & SSS_LOEJ) && 5365 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5366 ctl_set_invalid_field(ctsio, 5367 /*sks_valid*/ 1, 5368 /*command*/ 1, 5369 /*field*/ 4, 5370 /*bit_valid*/ 1, 5371 /*bit*/ 1); 5372 ctl_done((union ctl_io *)ctsio); 5373 return (CTL_RETVAL_COMPLETE); 5374 } 5375 5376 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5377 lun->prevent_count > 0) { 5378 /* "Medium removal prevented" */ 5379 ctl_set_sense(ctsio, /*current_error*/ 1, 5380 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5381 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5382 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5383 ctl_done((union ctl_io *)ctsio); 5384 return (CTL_RETVAL_COMPLETE); 5385 } 5386 } 5387 5388 retval = lun->backend->config_write((union ctl_io *)ctsio); 5389 return (retval); 5390 } 5391 5392 int 5393 ctl_prevent_allow(struct ctl_scsiio *ctsio) 5394 { 5395 struct ctl_lun *lun = CTL_LUN(ctsio); 5396 struct scsi_prevent *cdb; 5397 int retval; 5398 uint32_t initidx; 5399 5400 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5401 5402 cdb = (struct scsi_prevent *)ctsio->cdb; 5403 5404 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5405 ctl_set_invalid_opcode(ctsio); 5406 ctl_done((union ctl_io *)ctsio); 5407 return (CTL_RETVAL_COMPLETE); 5408 } 5409 5410 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5411 mtx_lock(&lun->lun_lock); 5412 if ((cdb->how & PR_PREVENT) && 5413 ctl_is_set(lun->prevent, initidx) == 0) { 5414 ctl_set_mask(lun->prevent, initidx); 5415 lun->prevent_count++; 5416 } else if ((cdb->how & PR_PREVENT) == 0 && 5417 ctl_is_set(lun->prevent, initidx)) { 5418 ctl_clear_mask(lun->prevent, initidx); 5419 lun->prevent_count--; 5420 } 5421 mtx_unlock(&lun->lun_lock); 5422 retval = lun->backend->config_write((union ctl_io *)ctsio); 5423 return (retval); 5424 } 5425 5426 /* 5427 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5428 * we don't really do anything with the LBA and length fields if the user 5429 * passes them in. Instead we'll just flush out the cache for the entire 5430 * LUN. 5431 */ 5432 int 5433 ctl_sync_cache(struct ctl_scsiio *ctsio) 5434 { 5435 struct ctl_lun *lun = CTL_LUN(ctsio); 5436 struct ctl_lba_len_flags *lbalen; 5437 uint64_t starting_lba; 5438 uint32_t block_count; 5439 int retval; 5440 uint8_t byte2; 5441 5442 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5443 5444 retval = 0; 5445 5446 switch (ctsio->cdb[0]) { 5447 case SYNCHRONIZE_CACHE: { 5448 struct scsi_sync_cache *cdb; 5449 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5450 5451 starting_lba = scsi_4btoul(cdb->begin_lba); 5452 block_count = scsi_2btoul(cdb->lb_count); 5453 byte2 = cdb->byte2; 5454 break; 5455 } 5456 case SYNCHRONIZE_CACHE_16: { 5457 struct scsi_sync_cache_16 *cdb; 5458 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5459 5460 starting_lba = scsi_8btou64(cdb->begin_lba); 5461 block_count = scsi_4btoul(cdb->lb_count); 5462 byte2 = cdb->byte2; 5463 break; 5464 } 5465 default: 5466 ctl_set_invalid_opcode(ctsio); 5467 ctl_done((union ctl_io *)ctsio); 5468 goto bailout; 5469 break; /* NOTREACHED */ 5470 } 5471 5472 /* 5473 * We check the LBA and length, but don't do anything with them. 5474 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5475 * get flushed. This check will just help satisfy anyone who wants 5476 * to see an error for an out of range LBA. 5477 */ 5478 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5479 ctl_set_lba_out_of_range(ctsio, 5480 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5481 ctl_done((union ctl_io *)ctsio); 5482 goto bailout; 5483 } 5484 5485 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5486 lbalen->lba = starting_lba; 5487 lbalen->len = block_count; 5488 lbalen->flags = byte2; 5489 retval = lun->backend->config_write((union ctl_io *)ctsio); 5490 5491 bailout: 5492 return (retval); 5493 } 5494 5495 int 5496 ctl_format(struct ctl_scsiio *ctsio) 5497 { 5498 struct scsi_format *cdb; 5499 int length, defect_list_len; 5500 5501 CTL_DEBUG_PRINT(("ctl_format\n")); 5502 5503 cdb = (struct scsi_format *)ctsio->cdb; 5504 5505 length = 0; 5506 if (cdb->byte2 & SF_FMTDATA) { 5507 if (cdb->byte2 & SF_LONGLIST) 5508 length = sizeof(struct scsi_format_header_long); 5509 else 5510 length = sizeof(struct scsi_format_header_short); 5511 } 5512 5513 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5514 && (length > 0)) { 5515 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5516 ctsio->kern_data_len = length; 5517 ctsio->kern_total_len = length; 5518 ctsio->kern_rel_offset = 0; 5519 ctsio->kern_sg_entries = 0; 5520 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5521 ctsio->be_move_done = ctl_config_move_done; 5522 ctl_datamove((union ctl_io *)ctsio); 5523 5524 return (CTL_RETVAL_COMPLETE); 5525 } 5526 5527 defect_list_len = 0; 5528 5529 if (cdb->byte2 & SF_FMTDATA) { 5530 if (cdb->byte2 & SF_LONGLIST) { 5531 struct scsi_format_header_long *header; 5532 5533 header = (struct scsi_format_header_long *) 5534 ctsio->kern_data_ptr; 5535 5536 defect_list_len = scsi_4btoul(header->defect_list_len); 5537 if (defect_list_len != 0) { 5538 ctl_set_invalid_field(ctsio, 5539 /*sks_valid*/ 1, 5540 /*command*/ 0, 5541 /*field*/ 2, 5542 /*bit_valid*/ 0, 5543 /*bit*/ 0); 5544 goto bailout; 5545 } 5546 } else { 5547 struct scsi_format_header_short *header; 5548 5549 header = (struct scsi_format_header_short *) 5550 ctsio->kern_data_ptr; 5551 5552 defect_list_len = scsi_2btoul(header->defect_list_len); 5553 if (defect_list_len != 0) { 5554 ctl_set_invalid_field(ctsio, 5555 /*sks_valid*/ 1, 5556 /*command*/ 0, 5557 /*field*/ 2, 5558 /*bit_valid*/ 0, 5559 /*bit*/ 0); 5560 goto bailout; 5561 } 5562 } 5563 } 5564 5565 ctl_set_success(ctsio); 5566 bailout: 5567 5568 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5569 free(ctsio->kern_data_ptr, M_CTL); 5570 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5571 } 5572 5573 ctl_done((union ctl_io *)ctsio); 5574 return (CTL_RETVAL_COMPLETE); 5575 } 5576 5577 int 5578 ctl_read_buffer(struct ctl_scsiio *ctsio) 5579 { 5580 struct ctl_lun *lun = CTL_LUN(ctsio); 5581 uint64_t buffer_offset; 5582 uint32_t len; 5583 uint8_t byte2; 5584 static uint8_t descr[4]; 5585 static uint8_t echo_descr[4] = { 0 }; 5586 5587 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5588 5589 switch (ctsio->cdb[0]) { 5590 case READ_BUFFER: { 5591 struct scsi_read_buffer *cdb; 5592 5593 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5594 buffer_offset = scsi_3btoul(cdb->offset); 5595 len = scsi_3btoul(cdb->length); 5596 byte2 = cdb->byte2; 5597 break; 5598 } 5599 case READ_BUFFER_16: { 5600 struct scsi_read_buffer_16 *cdb; 5601 5602 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5603 buffer_offset = scsi_8btou64(cdb->offset); 5604 len = scsi_4btoul(cdb->length); 5605 byte2 = cdb->byte2; 5606 break; 5607 } 5608 default: /* This shouldn't happen. */ 5609 ctl_set_invalid_opcode(ctsio); 5610 ctl_done((union ctl_io *)ctsio); 5611 return (CTL_RETVAL_COMPLETE); 5612 } 5613 5614 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5615 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5616 ctl_set_invalid_field(ctsio, 5617 /*sks_valid*/ 1, 5618 /*command*/ 1, 5619 /*field*/ 6, 5620 /*bit_valid*/ 0, 5621 /*bit*/ 0); 5622 ctl_done((union ctl_io *)ctsio); 5623 return (CTL_RETVAL_COMPLETE); 5624 } 5625 5626 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5627 descr[0] = 0; 5628 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5629 ctsio->kern_data_ptr = descr; 5630 len = min(len, sizeof(descr)); 5631 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5632 ctsio->kern_data_ptr = echo_descr; 5633 len = min(len, sizeof(echo_descr)); 5634 } else { 5635 if (lun->write_buffer == NULL) { 5636 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5637 M_CTL, M_WAITOK); 5638 } 5639 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5640 } 5641 ctsio->kern_data_len = len; 5642 ctsio->kern_total_len = len; 5643 ctsio->kern_rel_offset = 0; 5644 ctsio->kern_sg_entries = 0; 5645 ctl_set_success(ctsio); 5646 ctsio->be_move_done = ctl_config_move_done; 5647 ctl_datamove((union ctl_io *)ctsio); 5648 return (CTL_RETVAL_COMPLETE); 5649 } 5650 5651 int 5652 ctl_write_buffer(struct ctl_scsiio *ctsio) 5653 { 5654 struct ctl_lun *lun = CTL_LUN(ctsio); 5655 struct scsi_write_buffer *cdb; 5656 int buffer_offset, len; 5657 5658 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5659 5660 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5661 5662 len = scsi_3btoul(cdb->length); 5663 buffer_offset = scsi_3btoul(cdb->offset); 5664 5665 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5666 ctl_set_invalid_field(ctsio, 5667 /*sks_valid*/ 1, 5668 /*command*/ 1, 5669 /*field*/ 6, 5670 /*bit_valid*/ 0, 5671 /*bit*/ 0); 5672 ctl_done((union ctl_io *)ctsio); 5673 return (CTL_RETVAL_COMPLETE); 5674 } 5675 5676 /* 5677 * If we've got a kernel request that hasn't been malloced yet, 5678 * malloc it and tell the caller the data buffer is here. 5679 */ 5680 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5681 if (lun->write_buffer == NULL) { 5682 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5683 M_CTL, M_WAITOK); 5684 } 5685 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5686 ctsio->kern_data_len = len; 5687 ctsio->kern_total_len = len; 5688 ctsio->kern_rel_offset = 0; 5689 ctsio->kern_sg_entries = 0; 5690 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5691 ctsio->be_move_done = ctl_config_move_done; 5692 ctl_datamove((union ctl_io *)ctsio); 5693 5694 return (CTL_RETVAL_COMPLETE); 5695 } 5696 5697 ctl_set_success(ctsio); 5698 ctl_done((union ctl_io *)ctsio); 5699 return (CTL_RETVAL_COMPLETE); 5700 } 5701 5702 static int 5703 ctl_write_same_cont(union ctl_io *io) 5704 { 5705 struct ctl_lun *lun = CTL_LUN(io); 5706 struct ctl_scsiio *ctsio; 5707 struct ctl_lba_len_flags *lbalen; 5708 int retval; 5709 5710 ctsio = &io->scsiio; 5711 ctsio->io_hdr.status = CTL_STATUS_NONE; 5712 lbalen = (struct ctl_lba_len_flags *) 5713 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5714 lbalen->lba += lbalen->len; 5715 if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { 5716 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 5717 lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; 5718 } 5719 5720 CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); 5721 retval = lun->backend->config_write((union ctl_io *)ctsio); 5722 return (retval); 5723 } 5724 5725 int 5726 ctl_write_same(struct ctl_scsiio *ctsio) 5727 { 5728 struct ctl_lun *lun = CTL_LUN(ctsio); 5729 struct ctl_lba_len_flags *lbalen; 5730 const char *val; 5731 uint64_t lba, ival; 5732 uint32_t num_blocks; 5733 int len, retval; 5734 uint8_t byte2; 5735 5736 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5737 5738 switch (ctsio->cdb[0]) { 5739 case WRITE_SAME_10: { 5740 struct scsi_write_same_10 *cdb; 5741 5742 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5743 5744 lba = scsi_4btoul(cdb->addr); 5745 num_blocks = scsi_2btoul(cdb->length); 5746 byte2 = cdb->byte2; 5747 break; 5748 } 5749 case WRITE_SAME_16: { 5750 struct scsi_write_same_16 *cdb; 5751 5752 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5753 5754 lba = scsi_8btou64(cdb->addr); 5755 num_blocks = scsi_4btoul(cdb->length); 5756 byte2 = cdb->byte2; 5757 break; 5758 } 5759 default: 5760 /* 5761 * We got a command we don't support. This shouldn't 5762 * happen, commands should be filtered out above us. 5763 */ 5764 ctl_set_invalid_opcode(ctsio); 5765 ctl_done((union ctl_io *)ctsio); 5766 5767 return (CTL_RETVAL_COMPLETE); 5768 break; /* NOTREACHED */ 5769 } 5770 5771 /* ANCHOR flag can be used only together with UNMAP */ 5772 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5773 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5774 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5775 ctl_done((union ctl_io *)ctsio); 5776 return (CTL_RETVAL_COMPLETE); 5777 } 5778 5779 /* 5780 * The first check is to make sure we're in bounds, the second 5781 * check is to catch wrap-around problems. If the lba + num blocks 5782 * is less than the lba, then we've wrapped around and the block 5783 * range is invalid anyway. 5784 */ 5785 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5786 || ((lba + num_blocks) < lba)) { 5787 ctl_set_lba_out_of_range(ctsio, 5788 MAX(lba, lun->be_lun->maxlba + 1)); 5789 ctl_done((union ctl_io *)ctsio); 5790 return (CTL_RETVAL_COMPLETE); 5791 } 5792 5793 /* Zero number of blocks means "to the last logical block" */ 5794 if (num_blocks == 0) { 5795 ival = UINT64_MAX; 5796 val = dnvlist_get_string(lun->be_lun->options, 5797 "write_same_max_lba", NULL); 5798 if (val != NULL) 5799 ctl_expand_number(val, &ival); 5800 if ((lun->be_lun->maxlba + 1) - lba > ival) { 5801 ctl_set_invalid_field(ctsio, 5802 /*sks_valid*/ 1, /*command*/ 1, 5803 /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, 5804 /*bit_valid*/ 0, /*bit*/ 0); 5805 ctl_done((union ctl_io *)ctsio); 5806 return (CTL_RETVAL_COMPLETE); 5807 } 5808 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5809 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 5810 ctsio->io_cont = ctl_write_same_cont; 5811 num_blocks = 1 << 31; 5812 } else 5813 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5814 } 5815 5816 len = lun->be_lun->blocksize; 5817 5818 /* 5819 * If we've got a kernel request that hasn't been malloced yet, 5820 * malloc it and tell the caller the data buffer is here. 5821 */ 5822 if ((byte2 & SWS_NDOB) == 0 && 5823 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5824 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5825 ctsio->kern_data_len = len; 5826 ctsio->kern_total_len = len; 5827 ctsio->kern_rel_offset = 0; 5828 ctsio->kern_sg_entries = 0; 5829 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5830 ctsio->be_move_done = ctl_config_move_done; 5831 ctl_datamove((union ctl_io *)ctsio); 5832 5833 return (CTL_RETVAL_COMPLETE); 5834 } 5835 5836 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5837 lbalen->lba = lba; 5838 lbalen->len = num_blocks; 5839 lbalen->flags = byte2; 5840 retval = lun->backend->config_write((union ctl_io *)ctsio); 5841 5842 return (retval); 5843 } 5844 5845 int 5846 ctl_unmap(struct ctl_scsiio *ctsio) 5847 { 5848 struct ctl_lun *lun = CTL_LUN(ctsio); 5849 struct scsi_unmap *cdb; 5850 struct ctl_ptr_len_flags *ptrlen; 5851 struct scsi_unmap_header *hdr; 5852 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5853 uint64_t lba; 5854 uint32_t num_blocks; 5855 int len, retval; 5856 uint8_t byte2; 5857 5858 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5859 5860 cdb = (struct scsi_unmap *)ctsio->cdb; 5861 len = scsi_2btoul(cdb->length); 5862 byte2 = cdb->byte2; 5863 5864 /* 5865 * If we've got a kernel request that hasn't been malloced yet, 5866 * malloc it and tell the caller the data buffer is here. 5867 */ 5868 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5869 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5870 ctsio->kern_data_len = len; 5871 ctsio->kern_total_len = len; 5872 ctsio->kern_rel_offset = 0; 5873 ctsio->kern_sg_entries = 0; 5874 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5875 ctsio->be_move_done = ctl_config_move_done; 5876 ctl_datamove((union ctl_io *)ctsio); 5877 5878 return (CTL_RETVAL_COMPLETE); 5879 } 5880 5881 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5882 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5883 if (len < sizeof (*hdr) || 5884 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5885 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5886 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5887 ctl_set_invalid_field(ctsio, 5888 /*sks_valid*/ 0, 5889 /*command*/ 0, 5890 /*field*/ 0, 5891 /*bit_valid*/ 0, 5892 /*bit*/ 0); 5893 goto done; 5894 } 5895 len = scsi_2btoul(hdr->desc_length); 5896 buf = (struct scsi_unmap_desc *)(hdr + 1); 5897 end = buf + len / sizeof(*buf); 5898 5899 endnz = buf; 5900 for (range = buf; range < end; range++) { 5901 lba = scsi_8btou64(range->lba); 5902 num_blocks = scsi_4btoul(range->length); 5903 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5904 || ((lba + num_blocks) < lba)) { 5905 ctl_set_lba_out_of_range(ctsio, 5906 MAX(lba, lun->be_lun->maxlba + 1)); 5907 ctl_done((union ctl_io *)ctsio); 5908 return (CTL_RETVAL_COMPLETE); 5909 } 5910 if (num_blocks != 0) 5911 endnz = range + 1; 5912 } 5913 5914 /* 5915 * Block backend can not handle zero last range. 5916 * Filter it out and return if there is nothing left. 5917 */ 5918 len = (uint8_t *)endnz - (uint8_t *)buf; 5919 if (len == 0) { 5920 ctl_set_success(ctsio); 5921 goto done; 5922 } 5923 5924 mtx_lock(&lun->lun_lock); 5925 ptrlen = (struct ctl_ptr_len_flags *) 5926 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5927 ptrlen->ptr = (void *)buf; 5928 ptrlen->len = len; 5929 ptrlen->flags = byte2; 5930 ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); 5931 mtx_unlock(&lun->lun_lock); 5932 5933 retval = lun->backend->config_write((union ctl_io *)ctsio); 5934 return (retval); 5935 5936 done: 5937 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5938 free(ctsio->kern_data_ptr, M_CTL); 5939 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5940 } 5941 ctl_done((union ctl_io *)ctsio); 5942 return (CTL_RETVAL_COMPLETE); 5943 } 5944 5945 int 5946 ctl_default_page_handler(struct ctl_scsiio *ctsio, 5947 struct ctl_page_index *page_index, uint8_t *page_ptr) 5948 { 5949 struct ctl_lun *lun = CTL_LUN(ctsio); 5950 uint8_t *current_cp; 5951 int set_ua; 5952 uint32_t initidx; 5953 5954 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5955 set_ua = 0; 5956 5957 current_cp = (page_index->page_data + (page_index->page_len * 5958 CTL_PAGE_CURRENT)); 5959 5960 mtx_lock(&lun->lun_lock); 5961 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5962 memcpy(current_cp, page_ptr, page_index->page_len); 5963 set_ua = 1; 5964 } 5965 if (set_ua != 0) 5966 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5967 mtx_unlock(&lun->lun_lock); 5968 if (set_ua) { 5969 ctl_isc_announce_mode(lun, 5970 ctl_get_initindex(&ctsio->io_hdr.nexus), 5971 page_index->page_code, page_index->subpage); 5972 } 5973 return (CTL_RETVAL_COMPLETE); 5974 } 5975 5976 static void 5977 ctl_ie_timer(void *arg) 5978 { 5979 struct ctl_lun *lun = arg; 5980 uint64_t t; 5981 5982 if (lun->ie_asc == 0) 5983 return; 5984 5985 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5986 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5987 else 5988 lun->ie_reported = 0; 5989 5990 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5991 lun->ie_reportcnt++; 5992 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5993 if (t == 0 || t == UINT32_MAX) 5994 t = 3000; /* 5 min */ 5995 callout_schedule_sbt(&lun->ie_callout, SBT_1S / 10 * t, 5996 SBT_1S / 10, 0); 5997 } 5998 } 5999 6000 int 6001 ctl_ie_page_handler(struct ctl_scsiio *ctsio, 6002 struct ctl_page_index *page_index, uint8_t *page_ptr) 6003 { 6004 struct ctl_lun *lun = CTL_LUN(ctsio); 6005 struct scsi_info_exceptions_page *pg; 6006 uint64_t t; 6007 6008 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 6009 6010 pg = (struct scsi_info_exceptions_page *)page_ptr; 6011 mtx_lock(&lun->lun_lock); 6012 if (pg->info_flags & SIEP_FLAGS_TEST) { 6013 lun->ie_asc = 0x5d; 6014 lun->ie_ascq = 0xff; 6015 if (pg->mrie == SIEP_MRIE_UA) { 6016 ctl_est_ua_all(lun, -1, CTL_UA_IE); 6017 lun->ie_reported = 1; 6018 } else { 6019 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6020 lun->ie_reported = -1; 6021 } 6022 lun->ie_reportcnt = 1; 6023 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 6024 lun->ie_reportcnt++; 6025 t = scsi_4btoul(pg->interval_timer); 6026 if (t == 0 || t == UINT32_MAX) 6027 t = 3000; /* 5 min */ 6028 callout_reset_sbt(&lun->ie_callout, SBT_1S / 10 * t, 6029 SBT_1S / 10, ctl_ie_timer, lun, 0); 6030 } 6031 } else { 6032 lun->ie_asc = 0; 6033 lun->ie_ascq = 0; 6034 lun->ie_reported = 1; 6035 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 6036 lun->ie_reportcnt = UINT32_MAX; 6037 callout_stop(&lun->ie_callout); 6038 } 6039 mtx_unlock(&lun->lun_lock); 6040 return (CTL_RETVAL_COMPLETE); 6041 } 6042 6043 static int 6044 ctl_do_mode_select(union ctl_io *io) 6045 { 6046 struct ctl_lun *lun = CTL_LUN(io); 6047 struct scsi_mode_page_header *page_header; 6048 struct ctl_page_index *page_index; 6049 struct ctl_scsiio *ctsio; 6050 int page_len, page_len_offset, page_len_size; 6051 union ctl_modepage_info *modepage_info; 6052 uint16_t *len_left, *len_used; 6053 int retval, i; 6054 6055 ctsio = &io->scsiio; 6056 page_index = NULL; 6057 page_len = 0; 6058 6059 modepage_info = (union ctl_modepage_info *) 6060 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6061 len_left = &modepage_info->header.len_left; 6062 len_used = &modepage_info->header.len_used; 6063 6064 do_next_page: 6065 6066 page_header = (struct scsi_mode_page_header *) 6067 (ctsio->kern_data_ptr + *len_used); 6068 6069 if (*len_left == 0) { 6070 free(ctsio->kern_data_ptr, M_CTL); 6071 ctl_set_success(ctsio); 6072 ctl_done((union ctl_io *)ctsio); 6073 return (CTL_RETVAL_COMPLETE); 6074 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6075 free(ctsio->kern_data_ptr, M_CTL); 6076 ctl_set_param_len_error(ctsio); 6077 ctl_done((union ctl_io *)ctsio); 6078 return (CTL_RETVAL_COMPLETE); 6079 6080 } else if ((page_header->page_code & SMPH_SPF) 6081 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6082 free(ctsio->kern_data_ptr, M_CTL); 6083 ctl_set_param_len_error(ctsio); 6084 ctl_done((union ctl_io *)ctsio); 6085 return (CTL_RETVAL_COMPLETE); 6086 } 6087 6088 /* 6089 * XXX KDM should we do something with the block descriptor? 6090 */ 6091 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6092 page_index = &lun->mode_pages.index[i]; 6093 if (lun->be_lun->lun_type == T_DIRECT && 6094 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6095 continue; 6096 if (lun->be_lun->lun_type == T_PROCESSOR && 6097 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6098 continue; 6099 if (lun->be_lun->lun_type == T_CDROM && 6100 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6101 continue; 6102 6103 if ((page_index->page_code & SMPH_PC_MASK) != 6104 (page_header->page_code & SMPH_PC_MASK)) 6105 continue; 6106 6107 /* 6108 * If neither page has a subpage code, then we've got a 6109 * match. 6110 */ 6111 if (((page_index->page_code & SMPH_SPF) == 0) 6112 && ((page_header->page_code & SMPH_SPF) == 0)) { 6113 page_len = page_header->page_length; 6114 break; 6115 } 6116 6117 /* 6118 * If both pages have subpages, then the subpage numbers 6119 * have to match. 6120 */ 6121 if ((page_index->page_code & SMPH_SPF) 6122 && (page_header->page_code & SMPH_SPF)) { 6123 struct scsi_mode_page_header_sp *sph; 6124 6125 sph = (struct scsi_mode_page_header_sp *)page_header; 6126 if (page_index->subpage == sph->subpage) { 6127 page_len = scsi_2btoul(sph->page_length); 6128 break; 6129 } 6130 } 6131 } 6132 6133 /* 6134 * If we couldn't find the page, or if we don't have a mode select 6135 * handler for it, send back an error to the user. 6136 */ 6137 if ((i >= CTL_NUM_MODE_PAGES) 6138 || (page_index->select_handler == NULL)) { 6139 ctl_set_invalid_field(ctsio, 6140 /*sks_valid*/ 1, 6141 /*command*/ 0, 6142 /*field*/ *len_used, 6143 /*bit_valid*/ 0, 6144 /*bit*/ 0); 6145 free(ctsio->kern_data_ptr, M_CTL); 6146 ctl_done((union ctl_io *)ctsio); 6147 return (CTL_RETVAL_COMPLETE); 6148 } 6149 6150 if (page_index->page_code & SMPH_SPF) { 6151 page_len_offset = 2; 6152 page_len_size = 2; 6153 } else { 6154 page_len_size = 1; 6155 page_len_offset = 1; 6156 } 6157 6158 /* 6159 * If the length the initiator gives us isn't the one we specify in 6160 * the mode page header, or if they didn't specify enough data in 6161 * the CDB to avoid truncating this page, kick out the request. 6162 */ 6163 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6164 ctl_set_invalid_field(ctsio, 6165 /*sks_valid*/ 1, 6166 /*command*/ 0, 6167 /*field*/ *len_used + page_len_offset, 6168 /*bit_valid*/ 0, 6169 /*bit*/ 0); 6170 free(ctsio->kern_data_ptr, M_CTL); 6171 ctl_done((union ctl_io *)ctsio); 6172 return (CTL_RETVAL_COMPLETE); 6173 } 6174 if (*len_left < page_index->page_len) { 6175 free(ctsio->kern_data_ptr, M_CTL); 6176 ctl_set_param_len_error(ctsio); 6177 ctl_done((union ctl_io *)ctsio); 6178 return (CTL_RETVAL_COMPLETE); 6179 } 6180 6181 /* 6182 * Run through the mode page, checking to make sure that the bits 6183 * the user changed are actually legal for him to change. 6184 */ 6185 for (i = 0; i < page_index->page_len; i++) { 6186 uint8_t *user_byte, *change_mask, *current_byte; 6187 int bad_bit; 6188 int j; 6189 6190 user_byte = (uint8_t *)page_header + i; 6191 change_mask = page_index->page_data + 6192 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6193 current_byte = page_index->page_data + 6194 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6195 6196 /* 6197 * Check to see whether the user set any bits in this byte 6198 * that he is not allowed to set. 6199 */ 6200 if ((*user_byte & ~(*change_mask)) == 6201 (*current_byte & ~(*change_mask))) 6202 continue; 6203 6204 /* 6205 * Go through bit by bit to determine which one is illegal. 6206 */ 6207 bad_bit = 0; 6208 for (j = 7; j >= 0; j--) { 6209 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6210 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6211 bad_bit = i; 6212 break; 6213 } 6214 } 6215 ctl_set_invalid_field(ctsio, 6216 /*sks_valid*/ 1, 6217 /*command*/ 0, 6218 /*field*/ *len_used + i, 6219 /*bit_valid*/ 1, 6220 /*bit*/ bad_bit); 6221 free(ctsio->kern_data_ptr, M_CTL); 6222 ctl_done((union ctl_io *)ctsio); 6223 return (CTL_RETVAL_COMPLETE); 6224 } 6225 6226 /* 6227 * Decrement these before we call the page handler, since we may 6228 * end up getting called back one way or another before the handler 6229 * returns to this context. 6230 */ 6231 *len_left -= page_index->page_len; 6232 *len_used += page_index->page_len; 6233 6234 retval = page_index->select_handler(ctsio, page_index, 6235 (uint8_t *)page_header); 6236 6237 /* 6238 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6239 * wait until this queued command completes to finish processing 6240 * the mode page. If it returns anything other than 6241 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6242 * already set the sense information, freed the data pointer, and 6243 * completed the io for us. 6244 */ 6245 if (retval != CTL_RETVAL_COMPLETE) 6246 goto bailout_no_done; 6247 6248 /* 6249 * If the initiator sent us more than one page, parse the next one. 6250 */ 6251 if (*len_left > 0) 6252 goto do_next_page; 6253 6254 ctl_set_success(ctsio); 6255 free(ctsio->kern_data_ptr, M_CTL); 6256 ctl_done((union ctl_io *)ctsio); 6257 6258 bailout_no_done: 6259 6260 return (CTL_RETVAL_COMPLETE); 6261 6262 } 6263 6264 int 6265 ctl_mode_select(struct ctl_scsiio *ctsio) 6266 { 6267 struct ctl_lun *lun = CTL_LUN(ctsio); 6268 union ctl_modepage_info *modepage_info; 6269 int bd_len, i, header_size, param_len, rtd; 6270 uint32_t initidx; 6271 6272 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6273 switch (ctsio->cdb[0]) { 6274 case MODE_SELECT_6: { 6275 struct scsi_mode_select_6 *cdb; 6276 6277 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6278 6279 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6280 param_len = cdb->length; 6281 header_size = sizeof(struct scsi_mode_header_6); 6282 break; 6283 } 6284 case MODE_SELECT_10: { 6285 struct scsi_mode_select_10 *cdb; 6286 6287 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6288 6289 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6290 param_len = scsi_2btoul(cdb->length); 6291 header_size = sizeof(struct scsi_mode_header_10); 6292 break; 6293 } 6294 default: 6295 ctl_set_invalid_opcode(ctsio); 6296 ctl_done((union ctl_io *)ctsio); 6297 return (CTL_RETVAL_COMPLETE); 6298 } 6299 6300 if (rtd) { 6301 if (param_len != 0) { 6302 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6303 /*command*/ 1, /*field*/ 0, 6304 /*bit_valid*/ 0, /*bit*/ 0); 6305 ctl_done((union ctl_io *)ctsio); 6306 return (CTL_RETVAL_COMPLETE); 6307 } 6308 6309 /* Revert to defaults. */ 6310 ctl_init_page_index(lun); 6311 mtx_lock(&lun->lun_lock); 6312 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6313 mtx_unlock(&lun->lun_lock); 6314 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6315 ctl_isc_announce_mode(lun, -1, 6316 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6317 lun->mode_pages.index[i].subpage); 6318 } 6319 ctl_set_success(ctsio); 6320 ctl_done((union ctl_io *)ctsio); 6321 return (CTL_RETVAL_COMPLETE); 6322 } 6323 6324 /* 6325 * From SPC-3: 6326 * "A parameter list length of zero indicates that the Data-Out Buffer 6327 * shall be empty. This condition shall not be considered as an error." 6328 */ 6329 if (param_len == 0) { 6330 ctl_set_success(ctsio); 6331 ctl_done((union ctl_io *)ctsio); 6332 return (CTL_RETVAL_COMPLETE); 6333 } 6334 6335 /* 6336 * Since we'll hit this the first time through, prior to 6337 * allocation, we don't need to free a data buffer here. 6338 */ 6339 if (param_len < header_size) { 6340 ctl_set_param_len_error(ctsio); 6341 ctl_done((union ctl_io *)ctsio); 6342 return (CTL_RETVAL_COMPLETE); 6343 } 6344 6345 /* 6346 * Allocate the data buffer and grab the user's data. In theory, 6347 * we shouldn't have to sanity check the parameter list length here 6348 * because the maximum size is 64K. We should be able to malloc 6349 * that much without too many problems. 6350 */ 6351 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6352 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6353 ctsio->kern_data_len = param_len; 6354 ctsio->kern_total_len = param_len; 6355 ctsio->kern_rel_offset = 0; 6356 ctsio->kern_sg_entries = 0; 6357 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6358 ctsio->be_move_done = ctl_config_move_done; 6359 ctl_datamove((union ctl_io *)ctsio); 6360 6361 return (CTL_RETVAL_COMPLETE); 6362 } 6363 6364 switch (ctsio->cdb[0]) { 6365 case MODE_SELECT_6: { 6366 struct scsi_mode_header_6 *mh6; 6367 6368 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6369 bd_len = mh6->blk_desc_len; 6370 break; 6371 } 6372 case MODE_SELECT_10: { 6373 struct scsi_mode_header_10 *mh10; 6374 6375 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6376 bd_len = scsi_2btoul(mh10->blk_desc_len); 6377 break; 6378 } 6379 default: 6380 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6381 } 6382 6383 if (param_len < (header_size + bd_len)) { 6384 free(ctsio->kern_data_ptr, M_CTL); 6385 ctl_set_param_len_error(ctsio); 6386 ctl_done((union ctl_io *)ctsio); 6387 return (CTL_RETVAL_COMPLETE); 6388 } 6389 6390 /* 6391 * Set the IO_CONT flag, so that if this I/O gets passed to 6392 * ctl_config_write_done(), it'll get passed back to 6393 * ctl_do_mode_select() for further processing, or completion if 6394 * we're all done. 6395 */ 6396 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6397 ctsio->io_cont = ctl_do_mode_select; 6398 6399 modepage_info = (union ctl_modepage_info *) 6400 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6401 memset(modepage_info, 0, sizeof(*modepage_info)); 6402 modepage_info->header.len_left = param_len - header_size - bd_len; 6403 modepage_info->header.len_used = header_size + bd_len; 6404 6405 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6406 } 6407 6408 int 6409 ctl_mode_sense(struct ctl_scsiio *ctsio) 6410 { 6411 struct ctl_lun *lun = CTL_LUN(ctsio); 6412 int pc, page_code, llba, subpage; 6413 int alloc_len, page_len, header_len, bd_len, total_len; 6414 void *block_desc; 6415 struct ctl_page_index *page_index; 6416 6417 llba = 0; 6418 6419 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6420 6421 switch (ctsio->cdb[0]) { 6422 case MODE_SENSE_6: { 6423 struct scsi_mode_sense_6 *cdb; 6424 6425 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6426 6427 header_len = sizeof(struct scsi_mode_hdr_6); 6428 if (cdb->byte2 & SMS_DBD) 6429 bd_len = 0; 6430 else 6431 bd_len = sizeof(struct scsi_mode_block_descr); 6432 header_len += bd_len; 6433 6434 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6435 page_code = cdb->page & SMS_PAGE_CODE; 6436 subpage = cdb->subpage; 6437 alloc_len = cdb->length; 6438 break; 6439 } 6440 case MODE_SENSE_10: { 6441 struct scsi_mode_sense_10 *cdb; 6442 6443 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6444 6445 header_len = sizeof(struct scsi_mode_hdr_10); 6446 if (cdb->byte2 & SMS_DBD) { 6447 bd_len = 0; 6448 } else if (lun->be_lun->lun_type == T_DIRECT) { 6449 if (cdb->byte2 & SMS10_LLBAA) { 6450 llba = 1; 6451 bd_len = sizeof(struct scsi_mode_block_descr_dlong); 6452 } else 6453 bd_len = sizeof(struct scsi_mode_block_descr_dshort); 6454 } else 6455 bd_len = sizeof(struct scsi_mode_block_descr); 6456 header_len += bd_len; 6457 6458 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6459 page_code = cdb->page & SMS_PAGE_CODE; 6460 subpage = cdb->subpage; 6461 alloc_len = scsi_2btoul(cdb->length); 6462 break; 6463 } 6464 default: 6465 ctl_set_invalid_opcode(ctsio); 6466 ctl_done((union ctl_io *)ctsio); 6467 return (CTL_RETVAL_COMPLETE); 6468 break; /* NOTREACHED */ 6469 } 6470 6471 /* 6472 * We have to make a first pass through to calculate the size of 6473 * the pages that match the user's query. Then we allocate enough 6474 * memory to hold it, and actually copy the data into the buffer. 6475 */ 6476 switch (page_code) { 6477 case SMS_ALL_PAGES_PAGE: { 6478 u_int i; 6479 6480 page_len = 0; 6481 6482 /* 6483 * At the moment, values other than 0 and 0xff here are 6484 * reserved according to SPC-3. 6485 */ 6486 if ((subpage != SMS_SUBPAGE_PAGE_0) 6487 && (subpage != SMS_SUBPAGE_ALL)) { 6488 ctl_set_invalid_field(ctsio, 6489 /*sks_valid*/ 1, 6490 /*command*/ 1, 6491 /*field*/ 3, 6492 /*bit_valid*/ 0, 6493 /*bit*/ 0); 6494 ctl_done((union ctl_io *)ctsio); 6495 return (CTL_RETVAL_COMPLETE); 6496 } 6497 6498 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6499 page_index = &lun->mode_pages.index[i]; 6500 6501 /* Make sure the page is supported for this dev type */ 6502 if (lun->be_lun->lun_type == T_DIRECT && 6503 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6504 continue; 6505 if (lun->be_lun->lun_type == T_PROCESSOR && 6506 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6507 continue; 6508 if (lun->be_lun->lun_type == T_CDROM && 6509 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6510 continue; 6511 6512 /* 6513 * We don't use this subpage if the user didn't 6514 * request all subpages. 6515 */ 6516 if ((page_index->subpage != 0) 6517 && (subpage == SMS_SUBPAGE_PAGE_0)) 6518 continue; 6519 6520 page_len += page_index->page_len; 6521 } 6522 break; 6523 } 6524 default: { 6525 u_int i; 6526 6527 page_len = 0; 6528 6529 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6530 page_index = &lun->mode_pages.index[i]; 6531 6532 /* Make sure the page is supported for this dev type */ 6533 if (lun->be_lun->lun_type == T_DIRECT && 6534 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6535 continue; 6536 if (lun->be_lun->lun_type == T_PROCESSOR && 6537 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6538 continue; 6539 if (lun->be_lun->lun_type == T_CDROM && 6540 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6541 continue; 6542 6543 /* Look for the right page code */ 6544 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6545 continue; 6546 6547 /* Look for the right subpage or the subpage wildcard*/ 6548 if ((page_index->subpage != subpage) 6549 && (subpage != SMS_SUBPAGE_ALL)) 6550 continue; 6551 6552 page_len += page_index->page_len; 6553 } 6554 6555 if (page_len == 0) { 6556 ctl_set_invalid_field(ctsio, 6557 /*sks_valid*/ 1, 6558 /*command*/ 1, 6559 /*field*/ 2, 6560 /*bit_valid*/ 1, 6561 /*bit*/ 5); 6562 ctl_done((union ctl_io *)ctsio); 6563 return (CTL_RETVAL_COMPLETE); 6564 } 6565 break; 6566 } 6567 } 6568 6569 total_len = header_len + page_len; 6570 6571 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6572 ctsio->kern_sg_entries = 0; 6573 ctsio->kern_rel_offset = 0; 6574 ctsio->kern_data_len = min(total_len, alloc_len); 6575 ctsio->kern_total_len = ctsio->kern_data_len; 6576 6577 switch (ctsio->cdb[0]) { 6578 case MODE_SENSE_6: { 6579 struct scsi_mode_hdr_6 *header; 6580 6581 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6582 6583 header->datalen = MIN(total_len - 1, 254); 6584 if (lun->be_lun->lun_type == T_DIRECT) { 6585 header->dev_specific = 0x10; /* DPOFUA */ 6586 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6587 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6588 header->dev_specific |= 0x80; /* WP */ 6589 } 6590 header->block_descr_len = bd_len; 6591 block_desc = &header[1]; 6592 break; 6593 } 6594 case MODE_SENSE_10: { 6595 struct scsi_mode_hdr_10 *header; 6596 int datalen; 6597 6598 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6599 6600 datalen = MIN(total_len - 2, 65533); 6601 scsi_ulto2b(datalen, header->datalen); 6602 if (lun->be_lun->lun_type == T_DIRECT) { 6603 header->dev_specific = 0x10; /* DPOFUA */ 6604 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6605 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6606 header->dev_specific |= 0x80; /* WP */ 6607 } 6608 if (llba) 6609 header->flags |= SMH_LONGLBA; 6610 scsi_ulto2b(bd_len, header->block_descr_len); 6611 block_desc = &header[1]; 6612 break; 6613 } 6614 default: 6615 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6616 } 6617 6618 /* 6619 * If we've got a disk, use its blocksize in the block 6620 * descriptor. Otherwise, just set it to 0. 6621 */ 6622 if (bd_len > 0) { 6623 if (lun->be_lun->lun_type == T_DIRECT) { 6624 if (llba) { 6625 struct scsi_mode_block_descr_dlong *bd = block_desc; 6626 if (lun->be_lun->maxlba != 0) 6627 scsi_u64to8b(lun->be_lun->maxlba + 1, 6628 bd->num_blocks); 6629 scsi_ulto4b(lun->be_lun->blocksize, 6630 bd->block_len); 6631 } else { 6632 struct scsi_mode_block_descr_dshort *bd = block_desc; 6633 if (lun->be_lun->maxlba != 0) 6634 scsi_ulto4b(MIN(lun->be_lun->maxlba+1, 6635 UINT32_MAX), bd->num_blocks); 6636 scsi_ulto3b(lun->be_lun->blocksize, 6637 bd->block_len); 6638 } 6639 } else { 6640 struct scsi_mode_block_descr *bd = block_desc; 6641 scsi_ulto3b(0, bd->block_len); 6642 } 6643 } 6644 6645 switch (page_code) { 6646 case SMS_ALL_PAGES_PAGE: { 6647 int i, data_used; 6648 6649 data_used = header_len; 6650 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6651 struct ctl_page_index *page_index; 6652 6653 page_index = &lun->mode_pages.index[i]; 6654 if (lun->be_lun->lun_type == T_DIRECT && 6655 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6656 continue; 6657 if (lun->be_lun->lun_type == T_PROCESSOR && 6658 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6659 continue; 6660 if (lun->be_lun->lun_type == T_CDROM && 6661 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6662 continue; 6663 6664 /* 6665 * We don't use this subpage if the user didn't 6666 * request all subpages. We already checked (above) 6667 * to make sure the user only specified a subpage 6668 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6669 */ 6670 if ((page_index->subpage != 0) 6671 && (subpage == SMS_SUBPAGE_PAGE_0)) 6672 continue; 6673 6674 /* 6675 * Call the handler, if it exists, to update the 6676 * page to the latest values. 6677 */ 6678 if (page_index->sense_handler != NULL) 6679 page_index->sense_handler(ctsio, page_index,pc); 6680 6681 memcpy(ctsio->kern_data_ptr + data_used, 6682 page_index->page_data + 6683 (page_index->page_len * pc), 6684 page_index->page_len); 6685 data_used += page_index->page_len; 6686 } 6687 break; 6688 } 6689 default: { 6690 int i, data_used; 6691 6692 data_used = header_len; 6693 6694 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6695 struct ctl_page_index *page_index; 6696 6697 page_index = &lun->mode_pages.index[i]; 6698 6699 /* Look for the right page code */ 6700 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6701 continue; 6702 6703 /* Look for the right subpage or the subpage wildcard*/ 6704 if ((page_index->subpage != subpage) 6705 && (subpage != SMS_SUBPAGE_ALL)) 6706 continue; 6707 6708 /* Make sure the page is supported for this dev type */ 6709 if (lun->be_lun->lun_type == T_DIRECT && 6710 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6711 continue; 6712 if (lun->be_lun->lun_type == T_PROCESSOR && 6713 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6714 continue; 6715 if (lun->be_lun->lun_type == T_CDROM && 6716 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6717 continue; 6718 6719 /* 6720 * Call the handler, if it exists, to update the 6721 * page to the latest values. 6722 */ 6723 if (page_index->sense_handler != NULL) 6724 page_index->sense_handler(ctsio, page_index,pc); 6725 6726 memcpy(ctsio->kern_data_ptr + data_used, 6727 page_index->page_data + 6728 (page_index->page_len * pc), 6729 page_index->page_len); 6730 data_used += page_index->page_len; 6731 } 6732 break; 6733 } 6734 } 6735 6736 ctl_set_success(ctsio); 6737 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6738 ctsio->be_move_done = ctl_config_move_done; 6739 ctl_datamove((union ctl_io *)ctsio); 6740 return (CTL_RETVAL_COMPLETE); 6741 } 6742 6743 int 6744 ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, 6745 struct ctl_page_index *page_index, 6746 int pc) 6747 { 6748 struct ctl_lun *lun = CTL_LUN(ctsio); 6749 struct scsi_log_temperature *data; 6750 const char *value; 6751 6752 data = (struct scsi_log_temperature *)page_index->page_data; 6753 6754 scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); 6755 data->hdr.param_control = SLP_LBIN; 6756 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6757 sizeof(struct scsi_log_param_header); 6758 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6759 NULL)) != NULL) 6760 data->temperature = strtol(value, NULL, 0); 6761 else 6762 data->temperature = 0xff; 6763 data++; 6764 6765 scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); 6766 data->hdr.param_control = SLP_LBIN; 6767 data->hdr.param_len = sizeof(struct scsi_log_temperature) - 6768 sizeof(struct scsi_log_param_header); 6769 if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", 6770 NULL)) != NULL) 6771 data->temperature = strtol(value, NULL, 0); 6772 else 6773 data->temperature = 0xff; 6774 return (0); 6775 } 6776 6777 int 6778 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6779 struct ctl_page_index *page_index, 6780 int pc) 6781 { 6782 struct ctl_lun *lun = CTL_LUN(ctsio); 6783 struct scsi_log_param_header *phdr; 6784 uint8_t *data; 6785 uint64_t val; 6786 6787 data = page_index->page_data; 6788 6789 if (lun->backend->lun_attr != NULL && 6790 (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) 6791 != UINT64_MAX) { 6792 phdr = (struct scsi_log_param_header *)data; 6793 scsi_ulto2b(0x0001, phdr->param_code); 6794 phdr->param_control = SLP_LBIN | SLP_LP; 6795 phdr->param_len = 8; 6796 data = (uint8_t *)(phdr + 1); 6797 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6798 data[4] = 0x02; /* per-pool */ 6799 data += phdr->param_len; 6800 } 6801 6802 if (lun->backend->lun_attr != NULL && 6803 (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) 6804 != UINT64_MAX) { 6805 phdr = (struct scsi_log_param_header *)data; 6806 scsi_ulto2b(0x0002, phdr->param_code); 6807 phdr->param_control = SLP_LBIN | SLP_LP; 6808 phdr->param_len = 8; 6809 data = (uint8_t *)(phdr + 1); 6810 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6811 data[4] = 0x01; /* per-LUN */ 6812 data += phdr->param_len; 6813 } 6814 6815 if (lun->backend->lun_attr != NULL && 6816 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) 6817 != UINT64_MAX) { 6818 phdr = (struct scsi_log_param_header *)data; 6819 scsi_ulto2b(0x00f1, phdr->param_code); 6820 phdr->param_control = SLP_LBIN | SLP_LP; 6821 phdr->param_len = 8; 6822 data = (uint8_t *)(phdr + 1); 6823 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6824 data[4] = 0x02; /* per-pool */ 6825 data += phdr->param_len; 6826 } 6827 6828 if (lun->backend->lun_attr != NULL && 6829 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) 6830 != UINT64_MAX) { 6831 phdr = (struct scsi_log_param_header *)data; 6832 scsi_ulto2b(0x00f2, phdr->param_code); 6833 phdr->param_control = SLP_LBIN | SLP_LP; 6834 phdr->param_len = 8; 6835 data = (uint8_t *)(phdr + 1); 6836 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6837 data[4] = 0x02; /* per-pool */ 6838 data += phdr->param_len; 6839 } 6840 6841 page_index->page_len = data - page_index->page_data; 6842 return (0); 6843 } 6844 6845 int 6846 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6847 struct ctl_page_index *page_index, 6848 int pc) 6849 { 6850 struct ctl_lun *lun = CTL_LUN(ctsio); 6851 struct stat_page *data; 6852 struct bintime *t; 6853 6854 data = (struct stat_page *)page_index->page_data; 6855 6856 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6857 data->sap.hdr.param_control = SLP_LBIN; 6858 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6859 sizeof(struct scsi_log_param_header); 6860 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6861 data->sap.read_num); 6862 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6863 data->sap.write_num); 6864 if (lun->be_lun->blocksize > 0) { 6865 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6866 lun->be_lun->blocksize, data->sap.recvieved_lba); 6867 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6868 lun->be_lun->blocksize, data->sap.transmitted_lba); 6869 } 6870 t = &lun->stats.time[CTL_STATS_READ]; 6871 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6872 data->sap.read_int); 6873 t = &lun->stats.time[CTL_STATS_WRITE]; 6874 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6875 data->sap.write_int); 6876 scsi_u64to8b(0, data->sap.weighted_num); 6877 scsi_u64to8b(0, data->sap.weighted_int); 6878 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6879 data->it.hdr.param_control = SLP_LBIN; 6880 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6881 sizeof(struct scsi_log_param_header); 6882 #ifdef CTL_TIME_IO 6883 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6884 #endif 6885 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6886 data->it.hdr.param_control = SLP_LBIN; 6887 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6888 sizeof(struct scsi_log_param_header); 6889 scsi_ulto4b(3, data->ti.exponent); 6890 scsi_ulto4b(1, data->ti.integer); 6891 return (0); 6892 } 6893 6894 int 6895 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6896 struct ctl_page_index *page_index, 6897 int pc) 6898 { 6899 struct ctl_lun *lun = CTL_LUN(ctsio); 6900 struct scsi_log_informational_exceptions *data; 6901 const char *value; 6902 6903 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6904 6905 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6906 data->hdr.param_control = SLP_LBIN; 6907 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6908 sizeof(struct scsi_log_param_header); 6909 data->ie_asc = lun->ie_asc; 6910 data->ie_ascq = lun->ie_ascq; 6911 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", 6912 NULL)) != NULL) 6913 data->temperature = strtol(value, NULL, 0); 6914 else 6915 data->temperature = 0xff; 6916 return (0); 6917 } 6918 6919 int 6920 ctl_log_sense(struct ctl_scsiio *ctsio) 6921 { 6922 struct ctl_lun *lun = CTL_LUN(ctsio); 6923 int i, pc, page_code, subpage; 6924 int alloc_len, total_len; 6925 struct ctl_page_index *page_index; 6926 struct scsi_log_sense *cdb; 6927 struct scsi_log_header *header; 6928 6929 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6930 6931 cdb = (struct scsi_log_sense *)ctsio->cdb; 6932 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6933 page_code = cdb->page & SLS_PAGE_CODE; 6934 subpage = cdb->subpage; 6935 alloc_len = scsi_2btoul(cdb->length); 6936 6937 page_index = NULL; 6938 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6939 page_index = &lun->log_pages.index[i]; 6940 6941 /* Look for the right page code */ 6942 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6943 continue; 6944 6945 /* Look for the right subpage or the subpage wildcard*/ 6946 if (page_index->subpage != subpage) 6947 continue; 6948 6949 break; 6950 } 6951 if (i >= CTL_NUM_LOG_PAGES) { 6952 ctl_set_invalid_field(ctsio, 6953 /*sks_valid*/ 1, 6954 /*command*/ 1, 6955 /*field*/ 2, 6956 /*bit_valid*/ 0, 6957 /*bit*/ 0); 6958 ctl_done((union ctl_io *)ctsio); 6959 return (CTL_RETVAL_COMPLETE); 6960 } 6961 6962 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6963 6964 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6965 ctsio->kern_sg_entries = 0; 6966 ctsio->kern_rel_offset = 0; 6967 ctsio->kern_data_len = min(total_len, alloc_len); 6968 ctsio->kern_total_len = ctsio->kern_data_len; 6969 6970 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6971 header->page = page_index->page_code; 6972 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6973 header->page |= SL_DS; 6974 if (page_index->subpage) { 6975 header->page |= SL_SPF; 6976 header->subpage = page_index->subpage; 6977 } 6978 scsi_ulto2b(page_index->page_len, header->datalen); 6979 6980 /* 6981 * Call the handler, if it exists, to update the 6982 * page to the latest values. 6983 */ 6984 if (page_index->sense_handler != NULL) 6985 page_index->sense_handler(ctsio, page_index, pc); 6986 6987 memcpy(header + 1, page_index->page_data, page_index->page_len); 6988 6989 ctl_set_success(ctsio); 6990 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6991 ctsio->be_move_done = ctl_config_move_done; 6992 ctl_datamove((union ctl_io *)ctsio); 6993 return (CTL_RETVAL_COMPLETE); 6994 } 6995 6996 int 6997 ctl_read_capacity(struct ctl_scsiio *ctsio) 6998 { 6999 struct ctl_lun *lun = CTL_LUN(ctsio); 7000 struct scsi_read_capacity *cdb; 7001 struct scsi_read_capacity_data *data; 7002 uint32_t lba; 7003 7004 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7005 7006 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7007 7008 lba = scsi_4btoul(cdb->addr); 7009 if (((cdb->pmi & SRC_PMI) == 0) 7010 && (lba != 0)) { 7011 ctl_set_invalid_field(/*ctsio*/ ctsio, 7012 /*sks_valid*/ 1, 7013 /*command*/ 1, 7014 /*field*/ 2, 7015 /*bit_valid*/ 0, 7016 /*bit*/ 0); 7017 ctl_done((union ctl_io *)ctsio); 7018 return (CTL_RETVAL_COMPLETE); 7019 } 7020 7021 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7022 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7023 ctsio->kern_data_len = sizeof(*data); 7024 ctsio->kern_total_len = sizeof(*data); 7025 ctsio->kern_rel_offset = 0; 7026 ctsio->kern_sg_entries = 0; 7027 7028 /* 7029 * If the maximum LBA is greater than 0xfffffffe, the user must 7030 * issue a SERVICE ACTION IN (16) command, with the read capacity 7031 * serivce action set. 7032 */ 7033 if (lun->be_lun->maxlba > 0xfffffffe) 7034 scsi_ulto4b(0xffffffff, data->addr); 7035 else 7036 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7037 7038 /* 7039 * XXX KDM this may not be 512 bytes... 7040 */ 7041 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7042 7043 ctl_set_success(ctsio); 7044 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7045 ctsio->be_move_done = ctl_config_move_done; 7046 ctl_datamove((union ctl_io *)ctsio); 7047 return (CTL_RETVAL_COMPLETE); 7048 } 7049 7050 int 7051 ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7052 { 7053 struct ctl_lun *lun = CTL_LUN(ctsio); 7054 struct scsi_read_capacity_16 *cdb; 7055 struct scsi_read_capacity_data_long *data; 7056 uint64_t lba; 7057 uint32_t alloc_len; 7058 7059 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7060 7061 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7062 7063 alloc_len = scsi_4btoul(cdb->alloc_len); 7064 lba = scsi_8btou64(cdb->addr); 7065 7066 if ((cdb->reladr & SRC16_PMI) 7067 && (lba != 0)) { 7068 ctl_set_invalid_field(/*ctsio*/ ctsio, 7069 /*sks_valid*/ 1, 7070 /*command*/ 1, 7071 /*field*/ 2, 7072 /*bit_valid*/ 0, 7073 /*bit*/ 0); 7074 ctl_done((union ctl_io *)ctsio); 7075 return (CTL_RETVAL_COMPLETE); 7076 } 7077 7078 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7079 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7080 ctsio->kern_rel_offset = 0; 7081 ctsio->kern_sg_entries = 0; 7082 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 7083 ctsio->kern_total_len = ctsio->kern_data_len; 7084 7085 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7086 /* XXX KDM this may not be 512 bytes... */ 7087 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7088 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7089 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7090 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7091 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7092 7093 ctl_set_success(ctsio); 7094 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7095 ctsio->be_move_done = ctl_config_move_done; 7096 ctl_datamove((union ctl_io *)ctsio); 7097 return (CTL_RETVAL_COMPLETE); 7098 } 7099 7100 int 7101 ctl_get_lba_status(struct ctl_scsiio *ctsio) 7102 { 7103 struct ctl_lun *lun = CTL_LUN(ctsio); 7104 struct scsi_get_lba_status *cdb; 7105 struct scsi_get_lba_status_data *data; 7106 struct ctl_lba_len_flags *lbalen; 7107 uint64_t lba; 7108 uint32_t alloc_len, total_len; 7109 int retval; 7110 7111 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7112 7113 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7114 lba = scsi_8btou64(cdb->addr); 7115 alloc_len = scsi_4btoul(cdb->alloc_len); 7116 7117 if (lba > lun->be_lun->maxlba) { 7118 ctl_set_lba_out_of_range(ctsio, lba); 7119 ctl_done((union ctl_io *)ctsio); 7120 return (CTL_RETVAL_COMPLETE); 7121 } 7122 7123 total_len = sizeof(*data) + sizeof(data->descr[0]); 7124 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7125 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7126 ctsio->kern_rel_offset = 0; 7127 ctsio->kern_sg_entries = 0; 7128 ctsio->kern_data_len = min(total_len, alloc_len); 7129 ctsio->kern_total_len = ctsio->kern_data_len; 7130 7131 /* Fill dummy data in case backend can't tell anything. */ 7132 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7133 scsi_u64to8b(lba, data->descr[0].addr); 7134 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7135 data->descr[0].length); 7136 data->descr[0].status = 0; /* Mapped or unknown. */ 7137 7138 ctl_set_success(ctsio); 7139 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7140 ctsio->be_move_done = ctl_config_move_done; 7141 7142 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7143 lbalen->lba = lba; 7144 lbalen->len = total_len; 7145 lbalen->flags = 0; 7146 retval = lun->backend->config_read((union ctl_io *)ctsio); 7147 return (retval); 7148 } 7149 7150 int 7151 ctl_read_defect(struct ctl_scsiio *ctsio) 7152 { 7153 struct scsi_read_defect_data_10 *ccb10; 7154 struct scsi_read_defect_data_12 *ccb12; 7155 struct scsi_read_defect_data_hdr_10 *data10; 7156 struct scsi_read_defect_data_hdr_12 *data12; 7157 uint32_t alloc_len, data_len; 7158 uint8_t format; 7159 7160 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7161 7162 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7163 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7164 format = ccb10->format; 7165 alloc_len = scsi_2btoul(ccb10->alloc_length); 7166 data_len = sizeof(*data10); 7167 } else { 7168 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7169 format = ccb12->format; 7170 alloc_len = scsi_4btoul(ccb12->alloc_length); 7171 data_len = sizeof(*data12); 7172 } 7173 if (alloc_len == 0) { 7174 ctl_set_success(ctsio); 7175 ctl_done((union ctl_io *)ctsio); 7176 return (CTL_RETVAL_COMPLETE); 7177 } 7178 7179 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7180 ctsio->kern_rel_offset = 0; 7181 ctsio->kern_sg_entries = 0; 7182 ctsio->kern_data_len = min(data_len, alloc_len); 7183 ctsio->kern_total_len = ctsio->kern_data_len; 7184 7185 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7186 data10 = (struct scsi_read_defect_data_hdr_10 *) 7187 ctsio->kern_data_ptr; 7188 data10->format = format; 7189 scsi_ulto2b(0, data10->length); 7190 } else { 7191 data12 = (struct scsi_read_defect_data_hdr_12 *) 7192 ctsio->kern_data_ptr; 7193 data12->format = format; 7194 scsi_ulto2b(0, data12->generation); 7195 scsi_ulto4b(0, data12->length); 7196 } 7197 7198 ctl_set_success(ctsio); 7199 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7200 ctsio->be_move_done = ctl_config_move_done; 7201 ctl_datamove((union ctl_io *)ctsio); 7202 return (CTL_RETVAL_COMPLETE); 7203 } 7204 7205 int 7206 ctl_report_ident_info(struct ctl_scsiio *ctsio) 7207 { 7208 struct ctl_lun *lun = CTL_LUN(ctsio); 7209 struct scsi_report_ident_info *cdb; 7210 struct scsi_report_ident_info_data *rii_ptr; 7211 struct scsi_report_ident_info_descr *riid_ptr; 7212 const char *oii, *otii; 7213 int retval, alloc_len, total_len = 0, len = 0; 7214 7215 CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); 7216 7217 cdb = (struct scsi_report_ident_info *)ctsio->cdb; 7218 retval = CTL_RETVAL_COMPLETE; 7219 7220 total_len = sizeof(struct scsi_report_ident_info_data); 7221 switch (cdb->type) { 7222 case RII_LUII: 7223 oii = dnvlist_get_string(lun->be_lun->options, 7224 "ident_info", NULL); 7225 if (oii) 7226 len = strlen(oii); /* Approximately */ 7227 break; 7228 case RII_LUTII: 7229 otii = dnvlist_get_string(lun->be_lun->options, 7230 "text_ident_info", NULL); 7231 if (otii) 7232 len = strlen(otii) + 1; /* NULL-terminated */ 7233 break; 7234 case RII_IIS: 7235 len = 2 * sizeof(struct scsi_report_ident_info_descr); 7236 break; 7237 default: 7238 ctl_set_invalid_field(/*ctsio*/ ctsio, 7239 /*sks_valid*/ 1, 7240 /*command*/ 1, 7241 /*field*/ 11, 7242 /*bit_valid*/ 1, 7243 /*bit*/ 2); 7244 ctl_done((union ctl_io *)ctsio); 7245 return(retval); 7246 } 7247 total_len += len; 7248 alloc_len = scsi_4btoul(cdb->length); 7249 7250 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7251 ctsio->kern_sg_entries = 0; 7252 ctsio->kern_rel_offset = 0; 7253 ctsio->kern_data_len = min(total_len, alloc_len); 7254 ctsio->kern_total_len = ctsio->kern_data_len; 7255 7256 rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; 7257 switch (cdb->type) { 7258 case RII_LUII: 7259 if (oii) { 7260 if (oii[0] == '0' && oii[1] == 'x') 7261 len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); 7262 else 7263 strncpy((uint8_t *)(rii_ptr + 1), oii, len); 7264 } 7265 break; 7266 case RII_LUTII: 7267 if (otii) 7268 strlcpy((uint8_t *)(rii_ptr + 1), otii, len); 7269 break; 7270 case RII_IIS: 7271 riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); 7272 riid_ptr->type = RII_LUII; 7273 scsi_ulto2b(0xffff, riid_ptr->length); 7274 riid_ptr++; 7275 riid_ptr->type = RII_LUTII; 7276 scsi_ulto2b(0xffff, riid_ptr->length); 7277 } 7278 scsi_ulto2b(len, rii_ptr->length); 7279 7280 ctl_set_success(ctsio); 7281 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7282 ctsio->be_move_done = ctl_config_move_done; 7283 ctl_datamove((union ctl_io *)ctsio); 7284 return(retval); 7285 } 7286 7287 int 7288 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7289 { 7290 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7291 struct ctl_lun *lun = CTL_LUN(ctsio); 7292 struct scsi_maintenance_in *cdb; 7293 int retval; 7294 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7295 int num_ha_groups, num_target_ports, shared_group; 7296 struct ctl_port *port; 7297 struct scsi_target_group_data *rtg_ptr; 7298 struct scsi_target_group_data_extended *rtg_ext_ptr; 7299 struct scsi_target_port_group_descriptor *tpg_desc; 7300 7301 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7302 7303 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7304 retval = CTL_RETVAL_COMPLETE; 7305 7306 switch (cdb->byte2 & STG_PDF_MASK) { 7307 case STG_PDF_LENGTH: 7308 ext = 0; 7309 break; 7310 case STG_PDF_EXTENDED: 7311 ext = 1; 7312 break; 7313 default: 7314 ctl_set_invalid_field(/*ctsio*/ ctsio, 7315 /*sks_valid*/ 1, 7316 /*command*/ 1, 7317 /*field*/ 2, 7318 /*bit_valid*/ 1, 7319 /*bit*/ 5); 7320 ctl_done((union ctl_io *)ctsio); 7321 return(retval); 7322 } 7323 7324 num_target_ports = 0; 7325 shared_group = (softc->is_single != 0); 7326 mtx_lock(&softc->ctl_lock); 7327 STAILQ_FOREACH(port, &softc->port_list, links) { 7328 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7329 continue; 7330 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7331 continue; 7332 num_target_ports++; 7333 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7334 shared_group = 1; 7335 } 7336 mtx_unlock(&softc->ctl_lock); 7337 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7338 7339 if (ext) 7340 total_len = sizeof(struct scsi_target_group_data_extended); 7341 else 7342 total_len = sizeof(struct scsi_target_group_data); 7343 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7344 (shared_group + num_ha_groups) + 7345 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7346 7347 alloc_len = scsi_4btoul(cdb->length); 7348 7349 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7350 ctsio->kern_sg_entries = 0; 7351 ctsio->kern_rel_offset = 0; 7352 ctsio->kern_data_len = min(total_len, alloc_len); 7353 ctsio->kern_total_len = ctsio->kern_data_len; 7354 7355 if (ext) { 7356 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7357 ctsio->kern_data_ptr; 7358 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7359 rtg_ext_ptr->format_type = 0x10; 7360 rtg_ext_ptr->implicit_transition_time = 0; 7361 tpg_desc = &rtg_ext_ptr->groups[0]; 7362 } else { 7363 rtg_ptr = (struct scsi_target_group_data *) 7364 ctsio->kern_data_ptr; 7365 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7366 tpg_desc = &rtg_ptr->groups[0]; 7367 } 7368 7369 mtx_lock(&softc->ctl_lock); 7370 pg = softc->port_min / softc->port_cnt; 7371 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7372 /* Some shelf is known to be primary. */ 7373 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7374 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7375 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7376 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7377 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7378 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7379 else 7380 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7381 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7382 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7383 } else { 7384 ts = os; 7385 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7386 } 7387 } else { 7388 /* No known primary shelf. */ 7389 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7390 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7391 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7392 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7393 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7394 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7395 } else { 7396 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7397 } 7398 } 7399 if (shared_group) { 7400 tpg_desc->pref_state = ts; 7401 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7402 TPG_U_SUP | TPG_T_SUP; 7403 scsi_ulto2b(1, tpg_desc->target_port_group); 7404 tpg_desc->status = TPG_IMPLICIT; 7405 pc = 0; 7406 STAILQ_FOREACH(port, &softc->port_list, links) { 7407 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7408 continue; 7409 if (!softc->is_single && 7410 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7411 continue; 7412 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7413 continue; 7414 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7415 relative_target_port_identifier); 7416 pc++; 7417 } 7418 tpg_desc->target_port_count = pc; 7419 tpg_desc = (struct scsi_target_port_group_descriptor *) 7420 &tpg_desc->descriptors[pc]; 7421 } 7422 for (g = 0; g < num_ha_groups; g++) { 7423 tpg_desc->pref_state = (g == pg) ? ts : os; 7424 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7425 TPG_U_SUP | TPG_T_SUP; 7426 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7427 tpg_desc->status = TPG_IMPLICIT; 7428 pc = 0; 7429 STAILQ_FOREACH(port, &softc->port_list, links) { 7430 if (port->targ_port < g * softc->port_cnt || 7431 port->targ_port >= (g + 1) * softc->port_cnt) 7432 continue; 7433 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7434 continue; 7435 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7436 continue; 7437 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7438 continue; 7439 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7440 relative_target_port_identifier); 7441 pc++; 7442 } 7443 tpg_desc->target_port_count = pc; 7444 tpg_desc = (struct scsi_target_port_group_descriptor *) 7445 &tpg_desc->descriptors[pc]; 7446 } 7447 mtx_unlock(&softc->ctl_lock); 7448 7449 ctl_set_success(ctsio); 7450 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7451 ctsio->be_move_done = ctl_config_move_done; 7452 ctl_datamove((union ctl_io *)ctsio); 7453 return(retval); 7454 } 7455 7456 int 7457 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7458 { 7459 struct ctl_lun *lun = CTL_LUN(ctsio); 7460 struct scsi_report_supported_opcodes *cdb; 7461 const struct ctl_cmd_entry *entry, *sentry; 7462 struct scsi_report_supported_opcodes_all *all; 7463 struct scsi_report_supported_opcodes_descr *descr; 7464 struct scsi_report_supported_opcodes_one *one; 7465 int retval; 7466 int alloc_len, total_len; 7467 int opcode, service_action, i, j, num; 7468 7469 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7470 7471 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7472 retval = CTL_RETVAL_COMPLETE; 7473 7474 opcode = cdb->requested_opcode; 7475 service_action = scsi_2btoul(cdb->requested_service_action); 7476 switch (cdb->options & RSO_OPTIONS_MASK) { 7477 case RSO_OPTIONS_ALL: 7478 num = 0; 7479 for (i = 0; i < 256; i++) { 7480 entry = &ctl_cmd_table[i]; 7481 if (entry->flags & CTL_CMD_FLAG_SA5) { 7482 for (j = 0; j < 32; j++) { 7483 sentry = &((const struct ctl_cmd_entry *) 7484 entry->execute)[j]; 7485 if (ctl_cmd_applicable( 7486 lun->be_lun->lun_type, sentry)) 7487 num++; 7488 } 7489 } else { 7490 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7491 entry)) 7492 num++; 7493 } 7494 } 7495 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7496 num * sizeof(struct scsi_report_supported_opcodes_descr); 7497 break; 7498 case RSO_OPTIONS_OC: 7499 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7500 ctl_set_invalid_field(/*ctsio*/ ctsio, 7501 /*sks_valid*/ 1, 7502 /*command*/ 1, 7503 /*field*/ 2, 7504 /*bit_valid*/ 1, 7505 /*bit*/ 2); 7506 ctl_done((union ctl_io *)ctsio); 7507 return (CTL_RETVAL_COMPLETE); 7508 } 7509 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7510 break; 7511 case RSO_OPTIONS_OC_SA: 7512 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7513 service_action >= 32) { 7514 ctl_set_invalid_field(/*ctsio*/ ctsio, 7515 /*sks_valid*/ 1, 7516 /*command*/ 1, 7517 /*field*/ 2, 7518 /*bit_valid*/ 1, 7519 /*bit*/ 2); 7520 ctl_done((union ctl_io *)ctsio); 7521 return (CTL_RETVAL_COMPLETE); 7522 } 7523 /* FALLTHROUGH */ 7524 case RSO_OPTIONS_OC_ASA: 7525 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7526 break; 7527 default: 7528 ctl_set_invalid_field(/*ctsio*/ ctsio, 7529 /*sks_valid*/ 1, 7530 /*command*/ 1, 7531 /*field*/ 2, 7532 /*bit_valid*/ 1, 7533 /*bit*/ 2); 7534 ctl_done((union ctl_io *)ctsio); 7535 return (CTL_RETVAL_COMPLETE); 7536 } 7537 7538 alloc_len = scsi_4btoul(cdb->length); 7539 7540 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7541 ctsio->kern_sg_entries = 0; 7542 ctsio->kern_rel_offset = 0; 7543 ctsio->kern_data_len = min(total_len, alloc_len); 7544 ctsio->kern_total_len = ctsio->kern_data_len; 7545 7546 switch (cdb->options & RSO_OPTIONS_MASK) { 7547 case RSO_OPTIONS_ALL: 7548 all = (struct scsi_report_supported_opcodes_all *) 7549 ctsio->kern_data_ptr; 7550 num = 0; 7551 for (i = 0; i < 256; i++) { 7552 entry = &ctl_cmd_table[i]; 7553 if (entry->flags & CTL_CMD_FLAG_SA5) { 7554 for (j = 0; j < 32; j++) { 7555 sentry = &((const struct ctl_cmd_entry *) 7556 entry->execute)[j]; 7557 if (!ctl_cmd_applicable( 7558 lun->be_lun->lun_type, sentry)) 7559 continue; 7560 descr = &all->descr[num++]; 7561 descr->opcode = i; 7562 scsi_ulto2b(j, descr->service_action); 7563 descr->flags = RSO_SERVACTV; 7564 scsi_ulto2b(sentry->length, 7565 descr->cdb_length); 7566 } 7567 } else { 7568 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7569 entry)) 7570 continue; 7571 descr = &all->descr[num++]; 7572 descr->opcode = i; 7573 scsi_ulto2b(0, descr->service_action); 7574 descr->flags = 0; 7575 scsi_ulto2b(entry->length, descr->cdb_length); 7576 } 7577 } 7578 scsi_ulto4b( 7579 num * sizeof(struct scsi_report_supported_opcodes_descr), 7580 all->length); 7581 break; 7582 case RSO_OPTIONS_OC: 7583 one = (struct scsi_report_supported_opcodes_one *) 7584 ctsio->kern_data_ptr; 7585 entry = &ctl_cmd_table[opcode]; 7586 goto fill_one; 7587 case RSO_OPTIONS_OC_SA: 7588 one = (struct scsi_report_supported_opcodes_one *) 7589 ctsio->kern_data_ptr; 7590 entry = &ctl_cmd_table[opcode]; 7591 entry = &((const struct ctl_cmd_entry *) 7592 entry->execute)[service_action]; 7593 fill_one: 7594 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7595 one->support = 3; 7596 scsi_ulto2b(entry->length, one->cdb_length); 7597 one->cdb_usage[0] = opcode; 7598 memcpy(&one->cdb_usage[1], entry->usage, 7599 entry->length - 1); 7600 } else 7601 one->support = 1; 7602 break; 7603 case RSO_OPTIONS_OC_ASA: 7604 one = (struct scsi_report_supported_opcodes_one *) 7605 ctsio->kern_data_ptr; 7606 entry = &ctl_cmd_table[opcode]; 7607 if (entry->flags & CTL_CMD_FLAG_SA5) { 7608 entry = &((const struct ctl_cmd_entry *) 7609 entry->execute)[service_action]; 7610 } else if (service_action != 0) { 7611 one->support = 1; 7612 break; 7613 } 7614 goto fill_one; 7615 } 7616 7617 ctl_set_success(ctsio); 7618 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7619 ctsio->be_move_done = ctl_config_move_done; 7620 ctl_datamove((union ctl_io *)ctsio); 7621 return(retval); 7622 } 7623 7624 int 7625 ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7626 { 7627 struct scsi_report_supported_tmf *cdb; 7628 struct scsi_report_supported_tmf_ext_data *data; 7629 int retval; 7630 int alloc_len, total_len; 7631 7632 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7633 7634 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7635 7636 retval = CTL_RETVAL_COMPLETE; 7637 7638 if (cdb->options & RST_REPD) 7639 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7640 else 7641 total_len = sizeof(struct scsi_report_supported_tmf_data); 7642 alloc_len = scsi_4btoul(cdb->length); 7643 7644 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7645 ctsio->kern_sg_entries = 0; 7646 ctsio->kern_rel_offset = 0; 7647 ctsio->kern_data_len = min(total_len, alloc_len); 7648 ctsio->kern_total_len = ctsio->kern_data_len; 7649 7650 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7651 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7652 RST_TRS; 7653 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7654 data->length = total_len - 4; 7655 7656 ctl_set_success(ctsio); 7657 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7658 ctsio->be_move_done = ctl_config_move_done; 7659 ctl_datamove((union ctl_io *)ctsio); 7660 return (retval); 7661 } 7662 7663 int 7664 ctl_report_timestamp(struct ctl_scsiio *ctsio) 7665 { 7666 struct scsi_report_timestamp *cdb; 7667 struct scsi_report_timestamp_data *data; 7668 struct timeval tv; 7669 int64_t timestamp; 7670 int retval; 7671 int alloc_len, total_len; 7672 7673 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7674 7675 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7676 7677 retval = CTL_RETVAL_COMPLETE; 7678 7679 total_len = sizeof(struct scsi_report_timestamp_data); 7680 alloc_len = scsi_4btoul(cdb->length); 7681 7682 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7683 ctsio->kern_sg_entries = 0; 7684 ctsio->kern_rel_offset = 0; 7685 ctsio->kern_data_len = min(total_len, alloc_len); 7686 ctsio->kern_total_len = ctsio->kern_data_len; 7687 7688 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7689 scsi_ulto2b(sizeof(*data) - 2, data->length); 7690 data->origin = RTS_ORIG_OUTSIDE; 7691 getmicrotime(&tv); 7692 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7693 scsi_ulto4b(timestamp >> 16, data->timestamp); 7694 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7695 7696 ctl_set_success(ctsio); 7697 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7698 ctsio->be_move_done = ctl_config_move_done; 7699 ctl_datamove((union ctl_io *)ctsio); 7700 return (retval); 7701 } 7702 7703 int 7704 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7705 { 7706 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7707 struct ctl_lun *lun = CTL_LUN(ctsio); 7708 struct scsi_per_res_in *cdb; 7709 int alloc_len, total_len = 0; 7710 /* struct scsi_per_res_in_rsrv in_data; */ 7711 uint64_t key; 7712 7713 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7714 7715 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7716 7717 alloc_len = scsi_2btoul(cdb->length); 7718 7719 retry: 7720 mtx_lock(&lun->lun_lock); 7721 switch (cdb->action) { 7722 case SPRI_RK: /* read keys */ 7723 total_len = sizeof(struct scsi_per_res_in_keys) + 7724 lun->pr_key_count * 7725 sizeof(struct scsi_per_res_key); 7726 break; 7727 case SPRI_RR: /* read reservation */ 7728 if (lun->flags & CTL_LUN_PR_RESERVED) 7729 total_len = sizeof(struct scsi_per_res_in_rsrv); 7730 else 7731 total_len = sizeof(struct scsi_per_res_in_header); 7732 break; 7733 case SPRI_RC: /* report capabilities */ 7734 total_len = sizeof(struct scsi_per_res_cap); 7735 break; 7736 case SPRI_RS: /* read full status */ 7737 total_len = sizeof(struct scsi_per_res_in_header) + 7738 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7739 lun->pr_key_count; 7740 break; 7741 default: 7742 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7743 } 7744 mtx_unlock(&lun->lun_lock); 7745 7746 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7747 ctsio->kern_rel_offset = 0; 7748 ctsio->kern_sg_entries = 0; 7749 ctsio->kern_data_len = min(total_len, alloc_len); 7750 ctsio->kern_total_len = ctsio->kern_data_len; 7751 7752 mtx_lock(&lun->lun_lock); 7753 switch (cdb->action) { 7754 case SPRI_RK: { // read keys 7755 struct scsi_per_res_in_keys *res_keys; 7756 int i, key_count; 7757 7758 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7759 7760 /* 7761 * We had to drop the lock to allocate our buffer, which 7762 * leaves time for someone to come in with another 7763 * persistent reservation. (That is unlikely, though, 7764 * since this should be the only persistent reservation 7765 * command active right now.) 7766 */ 7767 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7768 (lun->pr_key_count * 7769 sizeof(struct scsi_per_res_key)))){ 7770 mtx_unlock(&lun->lun_lock); 7771 free(ctsio->kern_data_ptr, M_CTL); 7772 printf("%s: reservation length changed, retrying\n", 7773 __func__); 7774 goto retry; 7775 } 7776 7777 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7778 7779 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7780 lun->pr_key_count, res_keys->header.length); 7781 7782 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7783 if ((key = ctl_get_prkey(lun, i)) == 0) 7784 continue; 7785 7786 /* 7787 * We used lun->pr_key_count to calculate the 7788 * size to allocate. If it turns out the number of 7789 * initiators with the registered flag set is 7790 * larger than that (i.e. they haven't been kept in 7791 * sync), we've got a problem. 7792 */ 7793 if (key_count >= lun->pr_key_count) { 7794 key_count++; 7795 continue; 7796 } 7797 scsi_u64to8b(key, res_keys->keys[key_count].key); 7798 key_count++; 7799 } 7800 break; 7801 } 7802 case SPRI_RR: { // read reservation 7803 struct scsi_per_res_in_rsrv *res; 7804 int tmp_len, header_only; 7805 7806 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7807 7808 scsi_ulto4b(lun->pr_generation, res->header.generation); 7809 7810 if (lun->flags & CTL_LUN_PR_RESERVED) 7811 { 7812 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7813 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7814 res->header.length); 7815 header_only = 0; 7816 } else { 7817 tmp_len = sizeof(struct scsi_per_res_in_header); 7818 scsi_ulto4b(0, res->header.length); 7819 header_only = 1; 7820 } 7821 7822 /* 7823 * We had to drop the lock to allocate our buffer, which 7824 * leaves time for someone to come in with another 7825 * persistent reservation. (That is unlikely, though, 7826 * since this should be the only persistent reservation 7827 * command active right now.) 7828 */ 7829 if (tmp_len != total_len) { 7830 mtx_unlock(&lun->lun_lock); 7831 free(ctsio->kern_data_ptr, M_CTL); 7832 printf("%s: reservation status changed, retrying\n", 7833 __func__); 7834 goto retry; 7835 } 7836 7837 /* 7838 * No reservation held, so we're done. 7839 */ 7840 if (header_only != 0) 7841 break; 7842 7843 /* 7844 * If the registration is an All Registrants type, the key 7845 * is 0, since it doesn't really matter. 7846 */ 7847 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7848 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7849 res->data.reservation); 7850 } 7851 res->data.scopetype = lun->pr_res_type; 7852 break; 7853 } 7854 case SPRI_RC: //report capabilities 7855 { 7856 struct scsi_per_res_cap *res_cap; 7857 uint16_t type_mask; 7858 7859 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7860 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7861 res_cap->flags1 = SPRI_CRH; 7862 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7863 type_mask = SPRI_TM_WR_EX_AR | 7864 SPRI_TM_EX_AC_RO | 7865 SPRI_TM_WR_EX_RO | 7866 SPRI_TM_EX_AC | 7867 SPRI_TM_WR_EX | 7868 SPRI_TM_EX_AC_AR; 7869 scsi_ulto2b(type_mask, res_cap->type_mask); 7870 break; 7871 } 7872 case SPRI_RS: { // read full status 7873 struct scsi_per_res_in_full *res_status; 7874 struct scsi_per_res_in_full_desc *res_desc; 7875 struct ctl_port *port; 7876 int i, len; 7877 7878 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7879 7880 /* 7881 * We had to drop the lock to allocate our buffer, which 7882 * leaves time for someone to come in with another 7883 * persistent reservation. (That is unlikely, though, 7884 * since this should be the only persistent reservation 7885 * command active right now.) 7886 */ 7887 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7888 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7889 lun->pr_key_count)){ 7890 mtx_unlock(&lun->lun_lock); 7891 free(ctsio->kern_data_ptr, M_CTL); 7892 printf("%s: reservation length changed, retrying\n", 7893 __func__); 7894 goto retry; 7895 } 7896 7897 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7898 7899 res_desc = &res_status->desc[0]; 7900 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7901 if ((key = ctl_get_prkey(lun, i)) == 0) 7902 continue; 7903 7904 scsi_u64to8b(key, res_desc->res_key.key); 7905 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7906 (lun->pr_res_idx == i || 7907 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7908 res_desc->flags = SPRI_FULL_R_HOLDER; 7909 res_desc->scopetype = lun->pr_res_type; 7910 } 7911 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7912 res_desc->rel_trgt_port_id); 7913 len = 0; 7914 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7915 if (port != NULL) 7916 len = ctl_create_iid(port, 7917 i % CTL_MAX_INIT_PER_PORT, 7918 res_desc->transport_id); 7919 scsi_ulto4b(len, res_desc->additional_length); 7920 res_desc = (struct scsi_per_res_in_full_desc *) 7921 &res_desc->transport_id[len]; 7922 } 7923 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7924 res_status->header.length); 7925 break; 7926 } 7927 default: 7928 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7929 } 7930 mtx_unlock(&lun->lun_lock); 7931 7932 ctl_set_success(ctsio); 7933 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7934 ctsio->be_move_done = ctl_config_move_done; 7935 ctl_datamove((union ctl_io *)ctsio); 7936 return (CTL_RETVAL_COMPLETE); 7937 } 7938 7939 /* 7940 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7941 * it should return. 7942 */ 7943 static int 7944 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7945 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7946 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7947 struct scsi_per_res_out_parms* param) 7948 { 7949 union ctl_ha_msg persis_io; 7950 int i; 7951 7952 mtx_lock(&lun->lun_lock); 7953 if (sa_res_key == 0) { 7954 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7955 /* validate scope and type */ 7956 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7957 SPR_LU_SCOPE) { 7958 mtx_unlock(&lun->lun_lock); 7959 ctl_set_invalid_field(/*ctsio*/ ctsio, 7960 /*sks_valid*/ 1, 7961 /*command*/ 1, 7962 /*field*/ 2, 7963 /*bit_valid*/ 1, 7964 /*bit*/ 4); 7965 ctl_done((union ctl_io *)ctsio); 7966 return (1); 7967 } 7968 7969 if (type>8 || type==2 || type==4 || type==0) { 7970 mtx_unlock(&lun->lun_lock); 7971 ctl_set_invalid_field(/*ctsio*/ ctsio, 7972 /*sks_valid*/ 1, 7973 /*command*/ 1, 7974 /*field*/ 2, 7975 /*bit_valid*/ 1, 7976 /*bit*/ 0); 7977 ctl_done((union ctl_io *)ctsio); 7978 return (1); 7979 } 7980 7981 /* 7982 * Unregister everybody else and build UA for 7983 * them 7984 */ 7985 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7986 if (i == residx || ctl_get_prkey(lun, i) == 0) 7987 continue; 7988 7989 ctl_clr_prkey(lun, i); 7990 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7991 } 7992 lun->pr_key_count = 1; 7993 lun->pr_res_type = type; 7994 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7995 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7996 lun->pr_res_idx = residx; 7997 lun->pr_generation++; 7998 mtx_unlock(&lun->lun_lock); 7999 8000 /* send msg to other side */ 8001 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8002 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8003 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8004 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8005 persis_io.pr.pr_info.res_type = type; 8006 memcpy(persis_io.pr.pr_info.sa_res_key, 8007 param->serv_act_res_key, 8008 sizeof(param->serv_act_res_key)); 8009 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8010 sizeof(persis_io.pr), M_WAITOK); 8011 } else { 8012 /* not all registrants */ 8013 mtx_unlock(&lun->lun_lock); 8014 free(ctsio->kern_data_ptr, M_CTL); 8015 ctl_set_invalid_field(ctsio, 8016 /*sks_valid*/ 1, 8017 /*command*/ 0, 8018 /*field*/ 8, 8019 /*bit_valid*/ 0, 8020 /*bit*/ 0); 8021 ctl_done((union ctl_io *)ctsio); 8022 return (1); 8023 } 8024 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8025 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8026 int found = 0; 8027 8028 if (res_key == sa_res_key) { 8029 /* special case */ 8030 /* 8031 * The spec implies this is not good but doesn't 8032 * say what to do. There are two choices either 8033 * generate a res conflict or check condition 8034 * with illegal field in parameter data. Since 8035 * that is what is done when the sa_res_key is 8036 * zero I'll take that approach since this has 8037 * to do with the sa_res_key. 8038 */ 8039 mtx_unlock(&lun->lun_lock); 8040 free(ctsio->kern_data_ptr, M_CTL); 8041 ctl_set_invalid_field(ctsio, 8042 /*sks_valid*/ 1, 8043 /*command*/ 0, 8044 /*field*/ 8, 8045 /*bit_valid*/ 0, 8046 /*bit*/ 0); 8047 ctl_done((union ctl_io *)ctsio); 8048 return (1); 8049 } 8050 8051 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8052 if (ctl_get_prkey(lun, i) != sa_res_key) 8053 continue; 8054 8055 found = 1; 8056 ctl_clr_prkey(lun, i); 8057 lun->pr_key_count--; 8058 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8059 } 8060 if (!found) { 8061 mtx_unlock(&lun->lun_lock); 8062 free(ctsio->kern_data_ptr, M_CTL); 8063 ctl_set_reservation_conflict(ctsio); 8064 ctl_done((union ctl_io *)ctsio); 8065 return (CTL_RETVAL_COMPLETE); 8066 } 8067 lun->pr_generation++; 8068 mtx_unlock(&lun->lun_lock); 8069 8070 /* send msg to other side */ 8071 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8072 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8073 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8074 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8075 persis_io.pr.pr_info.res_type = type; 8076 memcpy(persis_io.pr.pr_info.sa_res_key, 8077 param->serv_act_res_key, 8078 sizeof(param->serv_act_res_key)); 8079 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8080 sizeof(persis_io.pr), M_WAITOK); 8081 } else { 8082 /* Reserved but not all registrants */ 8083 /* sa_res_key is res holder */ 8084 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8085 /* validate scope and type */ 8086 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8087 SPR_LU_SCOPE) { 8088 mtx_unlock(&lun->lun_lock); 8089 ctl_set_invalid_field(/*ctsio*/ ctsio, 8090 /*sks_valid*/ 1, 8091 /*command*/ 1, 8092 /*field*/ 2, 8093 /*bit_valid*/ 1, 8094 /*bit*/ 4); 8095 ctl_done((union ctl_io *)ctsio); 8096 return (1); 8097 } 8098 8099 if (type>8 || type==2 || type==4 || type==0) { 8100 mtx_unlock(&lun->lun_lock); 8101 ctl_set_invalid_field(/*ctsio*/ ctsio, 8102 /*sks_valid*/ 1, 8103 /*command*/ 1, 8104 /*field*/ 2, 8105 /*bit_valid*/ 1, 8106 /*bit*/ 0); 8107 ctl_done((union ctl_io *)ctsio); 8108 return (1); 8109 } 8110 8111 /* 8112 * Do the following: 8113 * if sa_res_key != res_key remove all 8114 * registrants w/sa_res_key and generate UA 8115 * for these registrants(Registrations 8116 * Preempted) if it wasn't an exclusive 8117 * reservation generate UA(Reservations 8118 * Preempted) for all other registered nexuses 8119 * if the type has changed. Establish the new 8120 * reservation and holder. If res_key and 8121 * sa_res_key are the same do the above 8122 * except don't unregister the res holder. 8123 */ 8124 8125 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8126 if (i == residx || ctl_get_prkey(lun, i) == 0) 8127 continue; 8128 8129 if (sa_res_key == ctl_get_prkey(lun, i)) { 8130 ctl_clr_prkey(lun, i); 8131 lun->pr_key_count--; 8132 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8133 } else if (type != lun->pr_res_type && 8134 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8135 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8136 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8137 } 8138 } 8139 lun->pr_res_type = type; 8140 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8141 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8142 lun->pr_res_idx = residx; 8143 else 8144 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8145 lun->pr_generation++; 8146 mtx_unlock(&lun->lun_lock); 8147 8148 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8149 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8150 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8151 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8152 persis_io.pr.pr_info.res_type = type; 8153 memcpy(persis_io.pr.pr_info.sa_res_key, 8154 param->serv_act_res_key, 8155 sizeof(param->serv_act_res_key)); 8156 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8157 sizeof(persis_io.pr), M_WAITOK); 8158 } else { 8159 /* 8160 * sa_res_key is not the res holder just 8161 * remove registrants 8162 */ 8163 int found=0; 8164 8165 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8166 if (sa_res_key != ctl_get_prkey(lun, i)) 8167 continue; 8168 8169 found = 1; 8170 ctl_clr_prkey(lun, i); 8171 lun->pr_key_count--; 8172 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8173 } 8174 8175 if (!found) { 8176 mtx_unlock(&lun->lun_lock); 8177 free(ctsio->kern_data_ptr, M_CTL); 8178 ctl_set_reservation_conflict(ctsio); 8179 ctl_done((union ctl_io *)ctsio); 8180 return (1); 8181 } 8182 lun->pr_generation++; 8183 mtx_unlock(&lun->lun_lock); 8184 8185 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8186 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8187 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8188 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8189 persis_io.pr.pr_info.res_type = type; 8190 memcpy(persis_io.pr.pr_info.sa_res_key, 8191 param->serv_act_res_key, 8192 sizeof(param->serv_act_res_key)); 8193 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8194 sizeof(persis_io.pr), M_WAITOK); 8195 } 8196 } 8197 return (0); 8198 } 8199 8200 static void 8201 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8202 { 8203 uint64_t sa_res_key; 8204 int i; 8205 8206 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8207 8208 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8209 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8210 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8211 if (sa_res_key == 0) { 8212 /* 8213 * Unregister everybody else and build UA for 8214 * them 8215 */ 8216 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8217 if (i == msg->pr.pr_info.residx || 8218 ctl_get_prkey(lun, i) == 0) 8219 continue; 8220 8221 ctl_clr_prkey(lun, i); 8222 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8223 } 8224 8225 lun->pr_key_count = 1; 8226 lun->pr_res_type = msg->pr.pr_info.res_type; 8227 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8228 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8229 lun->pr_res_idx = msg->pr.pr_info.residx; 8230 } else { 8231 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8232 if (sa_res_key == ctl_get_prkey(lun, i)) 8233 continue; 8234 8235 ctl_clr_prkey(lun, i); 8236 lun->pr_key_count--; 8237 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8238 } 8239 } 8240 } else { 8241 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8242 if (i == msg->pr.pr_info.residx || 8243 ctl_get_prkey(lun, i) == 0) 8244 continue; 8245 8246 if (sa_res_key == ctl_get_prkey(lun, i)) { 8247 ctl_clr_prkey(lun, i); 8248 lun->pr_key_count--; 8249 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8250 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8251 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8252 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8253 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8254 } 8255 } 8256 lun->pr_res_type = msg->pr.pr_info.res_type; 8257 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8258 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8259 lun->pr_res_idx = msg->pr.pr_info.residx; 8260 else 8261 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8262 } 8263 lun->pr_generation++; 8264 8265 } 8266 8267 int 8268 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8269 { 8270 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8271 struct ctl_lun *lun = CTL_LUN(ctsio); 8272 int retval; 8273 uint32_t param_len; 8274 struct scsi_per_res_out *cdb; 8275 struct scsi_per_res_out_parms* param; 8276 uint32_t residx; 8277 uint64_t res_key, sa_res_key, key; 8278 uint8_t type; 8279 union ctl_ha_msg persis_io; 8280 int i; 8281 8282 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8283 8284 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8285 retval = CTL_RETVAL_COMPLETE; 8286 8287 /* 8288 * We only support whole-LUN scope. The scope & type are ignored for 8289 * register, register and ignore existing key and clear. 8290 * We sometimes ignore scope and type on preempts too!! 8291 * Verify reservation type here as well. 8292 */ 8293 type = cdb->scope_type & SPR_TYPE_MASK; 8294 if ((cdb->action == SPRO_RESERVE) 8295 || (cdb->action == SPRO_RELEASE)) { 8296 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8297 ctl_set_invalid_field(/*ctsio*/ ctsio, 8298 /*sks_valid*/ 1, 8299 /*command*/ 1, 8300 /*field*/ 2, 8301 /*bit_valid*/ 1, 8302 /*bit*/ 4); 8303 ctl_done((union ctl_io *)ctsio); 8304 return (CTL_RETVAL_COMPLETE); 8305 } 8306 8307 if (type>8 || type==2 || type==4 || type==0) { 8308 ctl_set_invalid_field(/*ctsio*/ ctsio, 8309 /*sks_valid*/ 1, 8310 /*command*/ 1, 8311 /*field*/ 2, 8312 /*bit_valid*/ 1, 8313 /*bit*/ 0); 8314 ctl_done((union ctl_io *)ctsio); 8315 return (CTL_RETVAL_COMPLETE); 8316 } 8317 } 8318 8319 param_len = scsi_4btoul(cdb->length); 8320 8321 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8322 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8323 ctsio->kern_data_len = param_len; 8324 ctsio->kern_total_len = param_len; 8325 ctsio->kern_rel_offset = 0; 8326 ctsio->kern_sg_entries = 0; 8327 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8328 ctsio->be_move_done = ctl_config_move_done; 8329 ctl_datamove((union ctl_io *)ctsio); 8330 8331 return (CTL_RETVAL_COMPLETE); 8332 } 8333 8334 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8335 8336 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8337 res_key = scsi_8btou64(param->res_key.key); 8338 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8339 8340 /* 8341 * Validate the reservation key here except for SPRO_REG_IGNO 8342 * This must be done for all other service actions 8343 */ 8344 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8345 mtx_lock(&lun->lun_lock); 8346 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8347 if (res_key != key) { 8348 /* 8349 * The current key passed in doesn't match 8350 * the one the initiator previously 8351 * registered. 8352 */ 8353 mtx_unlock(&lun->lun_lock); 8354 free(ctsio->kern_data_ptr, M_CTL); 8355 ctl_set_reservation_conflict(ctsio); 8356 ctl_done((union ctl_io *)ctsio); 8357 return (CTL_RETVAL_COMPLETE); 8358 } 8359 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8360 /* 8361 * We are not registered 8362 */ 8363 mtx_unlock(&lun->lun_lock); 8364 free(ctsio->kern_data_ptr, M_CTL); 8365 ctl_set_reservation_conflict(ctsio); 8366 ctl_done((union ctl_io *)ctsio); 8367 return (CTL_RETVAL_COMPLETE); 8368 } else if (res_key != 0) { 8369 /* 8370 * We are not registered and trying to register but 8371 * the register key isn't zero. 8372 */ 8373 mtx_unlock(&lun->lun_lock); 8374 free(ctsio->kern_data_ptr, M_CTL); 8375 ctl_set_reservation_conflict(ctsio); 8376 ctl_done((union ctl_io *)ctsio); 8377 return (CTL_RETVAL_COMPLETE); 8378 } 8379 mtx_unlock(&lun->lun_lock); 8380 } 8381 8382 switch (cdb->action & SPRO_ACTION_MASK) { 8383 case SPRO_REGISTER: 8384 case SPRO_REG_IGNO: { 8385 /* 8386 * We don't support any of these options, as we report in 8387 * the read capabilities request (see 8388 * ctl_persistent_reserve_in(), above). 8389 */ 8390 if ((param->flags & SPR_SPEC_I_PT) 8391 || (param->flags & SPR_ALL_TG_PT) 8392 || (param->flags & SPR_APTPL)) { 8393 int bit_ptr; 8394 8395 if (param->flags & SPR_APTPL) 8396 bit_ptr = 0; 8397 else if (param->flags & SPR_ALL_TG_PT) 8398 bit_ptr = 2; 8399 else /* SPR_SPEC_I_PT */ 8400 bit_ptr = 3; 8401 8402 free(ctsio->kern_data_ptr, M_CTL); 8403 ctl_set_invalid_field(ctsio, 8404 /*sks_valid*/ 1, 8405 /*command*/ 0, 8406 /*field*/ 20, 8407 /*bit_valid*/ 1, 8408 /*bit*/ bit_ptr); 8409 ctl_done((union ctl_io *)ctsio); 8410 return (CTL_RETVAL_COMPLETE); 8411 } 8412 8413 mtx_lock(&lun->lun_lock); 8414 8415 /* 8416 * The initiator wants to clear the 8417 * key/unregister. 8418 */ 8419 if (sa_res_key == 0) { 8420 if ((res_key == 0 8421 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8422 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8423 && ctl_get_prkey(lun, residx) == 0)) { 8424 mtx_unlock(&lun->lun_lock); 8425 goto done; 8426 } 8427 8428 ctl_clr_prkey(lun, residx); 8429 lun->pr_key_count--; 8430 8431 if (residx == lun->pr_res_idx) { 8432 lun->flags &= ~CTL_LUN_PR_RESERVED; 8433 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8434 8435 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8436 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8437 lun->pr_key_count) { 8438 /* 8439 * If the reservation is a registrants 8440 * only type we need to generate a UA 8441 * for other registered inits. The 8442 * sense code should be RESERVATIONS 8443 * RELEASED 8444 */ 8445 8446 for (i = softc->init_min; i < softc->init_max; i++){ 8447 if (ctl_get_prkey(lun, i) == 0) 8448 continue; 8449 ctl_est_ua(lun, i, 8450 CTL_UA_RES_RELEASE); 8451 } 8452 } 8453 lun->pr_res_type = 0; 8454 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8455 if (lun->pr_key_count==0) { 8456 lun->flags &= ~CTL_LUN_PR_RESERVED; 8457 lun->pr_res_type = 0; 8458 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8459 } 8460 } 8461 lun->pr_generation++; 8462 mtx_unlock(&lun->lun_lock); 8463 8464 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8465 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8466 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8467 persis_io.pr.pr_info.residx = residx; 8468 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8469 sizeof(persis_io.pr), M_WAITOK); 8470 } else /* sa_res_key != 0 */ { 8471 /* 8472 * If we aren't registered currently then increment 8473 * the key count and set the registered flag. 8474 */ 8475 ctl_alloc_prkey(lun, residx); 8476 if (ctl_get_prkey(lun, residx) == 0) 8477 lun->pr_key_count++; 8478 ctl_set_prkey(lun, residx, sa_res_key); 8479 lun->pr_generation++; 8480 mtx_unlock(&lun->lun_lock); 8481 8482 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8483 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8484 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8485 persis_io.pr.pr_info.residx = residx; 8486 memcpy(persis_io.pr.pr_info.sa_res_key, 8487 param->serv_act_res_key, 8488 sizeof(param->serv_act_res_key)); 8489 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8490 sizeof(persis_io.pr), M_WAITOK); 8491 } 8492 8493 break; 8494 } 8495 case SPRO_RESERVE: 8496 mtx_lock(&lun->lun_lock); 8497 if (lun->flags & CTL_LUN_PR_RESERVED) { 8498 /* 8499 * if this isn't the reservation holder and it's 8500 * not a "all registrants" type or if the type is 8501 * different then we have a conflict 8502 */ 8503 if ((lun->pr_res_idx != residx 8504 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8505 || lun->pr_res_type != type) { 8506 mtx_unlock(&lun->lun_lock); 8507 free(ctsio->kern_data_ptr, M_CTL); 8508 ctl_set_reservation_conflict(ctsio); 8509 ctl_done((union ctl_io *)ctsio); 8510 return (CTL_RETVAL_COMPLETE); 8511 } 8512 mtx_unlock(&lun->lun_lock); 8513 } else /* create a reservation */ { 8514 /* 8515 * If it's not an "all registrants" type record 8516 * reservation holder 8517 */ 8518 if (type != SPR_TYPE_WR_EX_AR 8519 && type != SPR_TYPE_EX_AC_AR) 8520 lun->pr_res_idx = residx; /* Res holder */ 8521 else 8522 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8523 8524 lun->flags |= CTL_LUN_PR_RESERVED; 8525 lun->pr_res_type = type; 8526 8527 mtx_unlock(&lun->lun_lock); 8528 8529 /* send msg to other side */ 8530 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8531 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8532 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8533 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8534 persis_io.pr.pr_info.res_type = type; 8535 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8536 sizeof(persis_io.pr), M_WAITOK); 8537 } 8538 break; 8539 8540 case SPRO_RELEASE: 8541 mtx_lock(&lun->lun_lock); 8542 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8543 /* No reservation exists return good status */ 8544 mtx_unlock(&lun->lun_lock); 8545 goto done; 8546 } 8547 /* 8548 * Is this nexus a reservation holder? 8549 */ 8550 if (lun->pr_res_idx != residx 8551 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8552 /* 8553 * not a res holder return good status but 8554 * do nothing 8555 */ 8556 mtx_unlock(&lun->lun_lock); 8557 goto done; 8558 } 8559 8560 if (lun->pr_res_type != type) { 8561 mtx_unlock(&lun->lun_lock); 8562 free(ctsio->kern_data_ptr, M_CTL); 8563 ctl_set_illegal_pr_release(ctsio); 8564 ctl_done((union ctl_io *)ctsio); 8565 return (CTL_RETVAL_COMPLETE); 8566 } 8567 8568 /* okay to release */ 8569 lun->flags &= ~CTL_LUN_PR_RESERVED; 8570 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8571 lun->pr_res_type = 0; 8572 8573 /* 8574 * If this isn't an exclusive access reservation and NUAR 8575 * is not set, generate UA for all other registrants. 8576 */ 8577 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8578 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8579 for (i = softc->init_min; i < softc->init_max; i++) { 8580 if (i == residx || ctl_get_prkey(lun, i) == 0) 8581 continue; 8582 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8583 } 8584 } 8585 mtx_unlock(&lun->lun_lock); 8586 8587 /* Send msg to other side */ 8588 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8589 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8590 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8591 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8592 sizeof(persis_io.pr), M_WAITOK); 8593 break; 8594 8595 case SPRO_CLEAR: 8596 /* send msg to other side */ 8597 8598 mtx_lock(&lun->lun_lock); 8599 lun->flags &= ~CTL_LUN_PR_RESERVED; 8600 lun->pr_res_type = 0; 8601 lun->pr_key_count = 0; 8602 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8603 8604 ctl_clr_prkey(lun, residx); 8605 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8606 if (ctl_get_prkey(lun, i) != 0) { 8607 ctl_clr_prkey(lun, i); 8608 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8609 } 8610 lun->pr_generation++; 8611 mtx_unlock(&lun->lun_lock); 8612 8613 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8614 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8615 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8616 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8617 sizeof(persis_io.pr), M_WAITOK); 8618 break; 8619 8620 case SPRO_PREEMPT: 8621 case SPRO_PRE_ABO: { 8622 int nretval; 8623 8624 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8625 residx, ctsio, cdb, param); 8626 if (nretval != 0) 8627 return (CTL_RETVAL_COMPLETE); 8628 break; 8629 } 8630 default: 8631 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8632 } 8633 8634 done: 8635 free(ctsio->kern_data_ptr, M_CTL); 8636 ctl_set_success(ctsio); 8637 ctl_done((union ctl_io *)ctsio); 8638 8639 return (retval); 8640 } 8641 8642 /* 8643 * This routine is for handling a message from the other SC pertaining to 8644 * persistent reserve out. All the error checking will have been done 8645 * so only performing the action need be done here to keep the two 8646 * in sync. 8647 */ 8648 static void 8649 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8650 { 8651 struct ctl_softc *softc = CTL_SOFTC(io); 8652 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8653 struct ctl_lun *lun; 8654 int i; 8655 uint32_t residx, targ_lun; 8656 8657 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8658 mtx_lock(&softc->ctl_lock); 8659 if (targ_lun >= ctl_max_luns || 8660 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8661 mtx_unlock(&softc->ctl_lock); 8662 return; 8663 } 8664 mtx_lock(&lun->lun_lock); 8665 mtx_unlock(&softc->ctl_lock); 8666 if (lun->flags & CTL_LUN_DISABLED) { 8667 mtx_unlock(&lun->lun_lock); 8668 return; 8669 } 8670 residx = ctl_get_initindex(&msg->hdr.nexus); 8671 switch(msg->pr.pr_info.action) { 8672 case CTL_PR_REG_KEY: 8673 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8674 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8675 lun->pr_key_count++; 8676 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8677 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8678 lun->pr_generation++; 8679 break; 8680 8681 case CTL_PR_UNREG_KEY: 8682 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8683 lun->pr_key_count--; 8684 8685 /* XXX Need to see if the reservation has been released */ 8686 /* if so do we need to generate UA? */ 8687 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8688 lun->flags &= ~CTL_LUN_PR_RESERVED; 8689 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8690 8691 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8692 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8693 lun->pr_key_count) { 8694 /* 8695 * If the reservation is a registrants 8696 * only type we need to generate a UA 8697 * for other registered inits. The 8698 * sense code should be RESERVATIONS 8699 * RELEASED 8700 */ 8701 8702 for (i = softc->init_min; i < softc->init_max; i++) { 8703 if (ctl_get_prkey(lun, i) == 0) 8704 continue; 8705 8706 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8707 } 8708 } 8709 lun->pr_res_type = 0; 8710 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8711 if (lun->pr_key_count==0) { 8712 lun->flags &= ~CTL_LUN_PR_RESERVED; 8713 lun->pr_res_type = 0; 8714 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8715 } 8716 } 8717 lun->pr_generation++; 8718 break; 8719 8720 case CTL_PR_RESERVE: 8721 lun->flags |= CTL_LUN_PR_RESERVED; 8722 lun->pr_res_type = msg->pr.pr_info.res_type; 8723 lun->pr_res_idx = msg->pr.pr_info.residx; 8724 8725 break; 8726 8727 case CTL_PR_RELEASE: 8728 /* 8729 * If this isn't an exclusive access reservation and NUAR 8730 * is not set, generate UA for all other registrants. 8731 */ 8732 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8733 lun->pr_res_type != SPR_TYPE_WR_EX && 8734 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8735 for (i = softc->init_min; i < softc->init_max; i++) { 8736 if (i == residx || ctl_get_prkey(lun, i) == 0) 8737 continue; 8738 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8739 } 8740 } 8741 8742 lun->flags &= ~CTL_LUN_PR_RESERVED; 8743 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8744 lun->pr_res_type = 0; 8745 break; 8746 8747 case CTL_PR_PREEMPT: 8748 ctl_pro_preempt_other(lun, msg); 8749 break; 8750 case CTL_PR_CLEAR: 8751 lun->flags &= ~CTL_LUN_PR_RESERVED; 8752 lun->pr_res_type = 0; 8753 lun->pr_key_count = 0; 8754 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8755 8756 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8757 if (ctl_get_prkey(lun, i) == 0) 8758 continue; 8759 ctl_clr_prkey(lun, i); 8760 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8761 } 8762 lun->pr_generation++; 8763 break; 8764 } 8765 8766 mtx_unlock(&lun->lun_lock); 8767 } 8768 8769 int 8770 ctl_read_write(struct ctl_scsiio *ctsio) 8771 { 8772 struct ctl_lun *lun = CTL_LUN(ctsio); 8773 struct ctl_lba_len_flags *lbalen; 8774 uint64_t lba; 8775 uint32_t num_blocks; 8776 int flags, retval; 8777 int isread; 8778 8779 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8780 8781 flags = 0; 8782 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8783 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8784 switch (ctsio->cdb[0]) { 8785 case READ_6: 8786 case WRITE_6: { 8787 struct scsi_rw_6 *cdb; 8788 8789 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8790 8791 lba = scsi_3btoul(cdb->addr); 8792 /* only 5 bits are valid in the most significant address byte */ 8793 lba &= 0x1fffff; 8794 num_blocks = cdb->length; 8795 /* 8796 * This is correct according to SBC-2. 8797 */ 8798 if (num_blocks == 0) 8799 num_blocks = 256; 8800 break; 8801 } 8802 case READ_10: 8803 case WRITE_10: { 8804 struct scsi_rw_10 *cdb; 8805 8806 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8807 if (cdb->byte2 & SRW10_FUA) 8808 flags |= CTL_LLF_FUA; 8809 if (cdb->byte2 & SRW10_DPO) 8810 flags |= CTL_LLF_DPO; 8811 lba = scsi_4btoul(cdb->addr); 8812 num_blocks = scsi_2btoul(cdb->length); 8813 break; 8814 } 8815 case WRITE_VERIFY_10: { 8816 struct scsi_write_verify_10 *cdb; 8817 8818 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8819 flags |= CTL_LLF_FUA; 8820 if (cdb->byte2 & SWV_DPO) 8821 flags |= CTL_LLF_DPO; 8822 lba = scsi_4btoul(cdb->addr); 8823 num_blocks = scsi_2btoul(cdb->length); 8824 break; 8825 } 8826 case READ_12: 8827 case WRITE_12: { 8828 struct scsi_rw_12 *cdb; 8829 8830 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8831 if (cdb->byte2 & SRW12_FUA) 8832 flags |= CTL_LLF_FUA; 8833 if (cdb->byte2 & SRW12_DPO) 8834 flags |= CTL_LLF_DPO; 8835 lba = scsi_4btoul(cdb->addr); 8836 num_blocks = scsi_4btoul(cdb->length); 8837 break; 8838 } 8839 case WRITE_VERIFY_12: { 8840 struct scsi_write_verify_12 *cdb; 8841 8842 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8843 flags |= CTL_LLF_FUA; 8844 if (cdb->byte2 & SWV_DPO) 8845 flags |= CTL_LLF_DPO; 8846 lba = scsi_4btoul(cdb->addr); 8847 num_blocks = scsi_4btoul(cdb->length); 8848 break; 8849 } 8850 case READ_16: 8851 case WRITE_16: { 8852 struct scsi_rw_16 *cdb; 8853 8854 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8855 if (cdb->byte2 & SRW12_FUA) 8856 flags |= CTL_LLF_FUA; 8857 if (cdb->byte2 & SRW12_DPO) 8858 flags |= CTL_LLF_DPO; 8859 lba = scsi_8btou64(cdb->addr); 8860 num_blocks = scsi_4btoul(cdb->length); 8861 break; 8862 } 8863 case WRITE_ATOMIC_16: { 8864 struct scsi_write_atomic_16 *cdb; 8865 8866 if (lun->be_lun->atomicblock == 0) { 8867 ctl_set_invalid_opcode(ctsio); 8868 ctl_done((union ctl_io *)ctsio); 8869 return (CTL_RETVAL_COMPLETE); 8870 } 8871 8872 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8873 if (cdb->byte2 & SRW12_FUA) 8874 flags |= CTL_LLF_FUA; 8875 if (cdb->byte2 & SRW12_DPO) 8876 flags |= CTL_LLF_DPO; 8877 lba = scsi_8btou64(cdb->addr); 8878 num_blocks = scsi_2btoul(cdb->length); 8879 if (num_blocks > lun->be_lun->atomicblock) { 8880 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8881 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8882 /*bit*/ 0); 8883 ctl_done((union ctl_io *)ctsio); 8884 return (CTL_RETVAL_COMPLETE); 8885 } 8886 break; 8887 } 8888 case WRITE_VERIFY_16: { 8889 struct scsi_write_verify_16 *cdb; 8890 8891 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8892 flags |= CTL_LLF_FUA; 8893 if (cdb->byte2 & SWV_DPO) 8894 flags |= CTL_LLF_DPO; 8895 lba = scsi_8btou64(cdb->addr); 8896 num_blocks = scsi_4btoul(cdb->length); 8897 break; 8898 } 8899 default: 8900 /* 8901 * We got a command we don't support. This shouldn't 8902 * happen, commands should be filtered out above us. 8903 */ 8904 ctl_set_invalid_opcode(ctsio); 8905 ctl_done((union ctl_io *)ctsio); 8906 8907 return (CTL_RETVAL_COMPLETE); 8908 break; /* NOTREACHED */ 8909 } 8910 8911 /* 8912 * The first check is to make sure we're in bounds, the second 8913 * check is to catch wrap-around problems. If the lba + num blocks 8914 * is less than the lba, then we've wrapped around and the block 8915 * range is invalid anyway. 8916 */ 8917 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8918 || ((lba + num_blocks) < lba)) { 8919 ctl_set_lba_out_of_range(ctsio, 8920 MAX(lba, lun->be_lun->maxlba + 1)); 8921 ctl_done((union ctl_io *)ctsio); 8922 return (CTL_RETVAL_COMPLETE); 8923 } 8924 8925 /* 8926 * According to SBC-3, a transfer length of 0 is not an error. 8927 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8928 * translates to 256 blocks for those commands. 8929 */ 8930 if (num_blocks == 0) { 8931 ctl_set_success(ctsio); 8932 ctl_done((union ctl_io *)ctsio); 8933 return (CTL_RETVAL_COMPLETE); 8934 } 8935 8936 /* Set FUA and/or DPO if caches are disabled. */ 8937 if (isread) { 8938 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8939 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8940 } else { 8941 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8942 flags |= CTL_LLF_FUA; 8943 } 8944 8945 lbalen = (struct ctl_lba_len_flags *) 8946 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8947 lbalen->lba = lba; 8948 lbalen->len = num_blocks; 8949 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8950 8951 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8952 ctsio->kern_rel_offset = 0; 8953 8954 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8955 8956 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8957 return (retval); 8958 } 8959 8960 static int 8961 ctl_cnw_cont(union ctl_io *io) 8962 { 8963 struct ctl_lun *lun = CTL_LUN(io); 8964 struct ctl_scsiio *ctsio; 8965 struct ctl_lba_len_flags *lbalen; 8966 int retval; 8967 8968 ctsio = &io->scsiio; 8969 ctsio->io_hdr.status = CTL_STATUS_NONE; 8970 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8971 lbalen = (struct ctl_lba_len_flags *) 8972 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8973 lbalen->flags &= ~CTL_LLF_COMPARE; 8974 lbalen->flags |= CTL_LLF_WRITE; 8975 8976 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8977 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8978 return (retval); 8979 } 8980 8981 int 8982 ctl_cnw(struct ctl_scsiio *ctsio) 8983 { 8984 struct ctl_lun *lun = CTL_LUN(ctsio); 8985 struct ctl_lba_len_flags *lbalen; 8986 uint64_t lba; 8987 uint32_t num_blocks; 8988 int flags, retval; 8989 8990 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8991 8992 flags = 0; 8993 switch (ctsio->cdb[0]) { 8994 case COMPARE_AND_WRITE: { 8995 struct scsi_compare_and_write *cdb; 8996 8997 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8998 if (cdb->byte2 & SRW10_FUA) 8999 flags |= CTL_LLF_FUA; 9000 if (cdb->byte2 & SRW10_DPO) 9001 flags |= CTL_LLF_DPO; 9002 lba = scsi_8btou64(cdb->addr); 9003 num_blocks = cdb->length; 9004 break; 9005 } 9006 default: 9007 /* 9008 * We got a command we don't support. This shouldn't 9009 * happen, commands should be filtered out above us. 9010 */ 9011 ctl_set_invalid_opcode(ctsio); 9012 ctl_done((union ctl_io *)ctsio); 9013 9014 return (CTL_RETVAL_COMPLETE); 9015 break; /* NOTREACHED */ 9016 } 9017 9018 /* 9019 * The first check is to make sure we're in bounds, the second 9020 * check is to catch wrap-around problems. If the lba + num blocks 9021 * is less than the lba, then we've wrapped around and the block 9022 * range is invalid anyway. 9023 */ 9024 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9025 || ((lba + num_blocks) < lba)) { 9026 ctl_set_lba_out_of_range(ctsio, 9027 MAX(lba, lun->be_lun->maxlba + 1)); 9028 ctl_done((union ctl_io *)ctsio); 9029 return (CTL_RETVAL_COMPLETE); 9030 } 9031 9032 /* 9033 * According to SBC-3, a transfer length of 0 is not an error. 9034 */ 9035 if (num_blocks == 0) { 9036 ctl_set_success(ctsio); 9037 ctl_done((union ctl_io *)ctsio); 9038 return (CTL_RETVAL_COMPLETE); 9039 } 9040 9041 /* Set FUA if write cache is disabled. */ 9042 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 9043 flags |= CTL_LLF_FUA; 9044 9045 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9046 ctsio->kern_rel_offset = 0; 9047 9048 /* 9049 * Set the IO_CONT flag, so that if this I/O gets passed to 9050 * ctl_data_submit_done(), it'll get passed back to 9051 * ctl_ctl_cnw_cont() for further processing. 9052 */ 9053 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9054 ctsio->io_cont = ctl_cnw_cont; 9055 9056 lbalen = (struct ctl_lba_len_flags *) 9057 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9058 lbalen->lba = lba; 9059 lbalen->len = num_blocks; 9060 lbalen->flags = CTL_LLF_COMPARE | flags; 9061 9062 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9063 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9064 return (retval); 9065 } 9066 9067 int 9068 ctl_verify(struct ctl_scsiio *ctsio) 9069 { 9070 struct ctl_lun *lun = CTL_LUN(ctsio); 9071 struct ctl_lba_len_flags *lbalen; 9072 uint64_t lba; 9073 uint32_t num_blocks; 9074 int bytchk, flags; 9075 int retval; 9076 9077 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9078 9079 bytchk = 0; 9080 flags = CTL_LLF_FUA; 9081 switch (ctsio->cdb[0]) { 9082 case VERIFY_10: { 9083 struct scsi_verify_10 *cdb; 9084 9085 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9086 if (cdb->byte2 & SVFY_BYTCHK) 9087 bytchk = 1; 9088 if (cdb->byte2 & SVFY_DPO) 9089 flags |= CTL_LLF_DPO; 9090 lba = scsi_4btoul(cdb->addr); 9091 num_blocks = scsi_2btoul(cdb->length); 9092 break; 9093 } 9094 case VERIFY_12: { 9095 struct scsi_verify_12 *cdb; 9096 9097 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9098 if (cdb->byte2 & SVFY_BYTCHK) 9099 bytchk = 1; 9100 if (cdb->byte2 & SVFY_DPO) 9101 flags |= CTL_LLF_DPO; 9102 lba = scsi_4btoul(cdb->addr); 9103 num_blocks = scsi_4btoul(cdb->length); 9104 break; 9105 } 9106 case VERIFY_16: { 9107 struct scsi_rw_16 *cdb; 9108 9109 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9110 if (cdb->byte2 & SVFY_BYTCHK) 9111 bytchk = 1; 9112 if (cdb->byte2 & SVFY_DPO) 9113 flags |= CTL_LLF_DPO; 9114 lba = scsi_8btou64(cdb->addr); 9115 num_blocks = scsi_4btoul(cdb->length); 9116 break; 9117 } 9118 default: 9119 /* 9120 * We got a command we don't support. This shouldn't 9121 * happen, commands should be filtered out above us. 9122 */ 9123 ctl_set_invalid_opcode(ctsio); 9124 ctl_done((union ctl_io *)ctsio); 9125 return (CTL_RETVAL_COMPLETE); 9126 } 9127 9128 /* 9129 * The first check is to make sure we're in bounds, the second 9130 * check is to catch wrap-around problems. If the lba + num blocks 9131 * is less than the lba, then we've wrapped around and the block 9132 * range is invalid anyway. 9133 */ 9134 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9135 || ((lba + num_blocks) < lba)) { 9136 ctl_set_lba_out_of_range(ctsio, 9137 MAX(lba, lun->be_lun->maxlba + 1)); 9138 ctl_done((union ctl_io *)ctsio); 9139 return (CTL_RETVAL_COMPLETE); 9140 } 9141 9142 /* 9143 * According to SBC-3, a transfer length of 0 is not an error. 9144 */ 9145 if (num_blocks == 0) { 9146 ctl_set_success(ctsio); 9147 ctl_done((union ctl_io *)ctsio); 9148 return (CTL_RETVAL_COMPLETE); 9149 } 9150 9151 lbalen = (struct ctl_lba_len_flags *) 9152 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9153 lbalen->lba = lba; 9154 lbalen->len = num_blocks; 9155 if (bytchk) { 9156 lbalen->flags = CTL_LLF_COMPARE | flags; 9157 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9158 } else { 9159 lbalen->flags = CTL_LLF_VERIFY | flags; 9160 ctsio->kern_total_len = 0; 9161 } 9162 ctsio->kern_rel_offset = 0; 9163 9164 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9165 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9166 return (retval); 9167 } 9168 9169 int 9170 ctl_report_luns(struct ctl_scsiio *ctsio) 9171 { 9172 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9173 struct ctl_port *port = CTL_PORT(ctsio); 9174 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 9175 struct scsi_report_luns *cdb; 9176 struct scsi_report_luns_data *lun_data; 9177 int num_filled, num_luns, num_port_luns, retval; 9178 uint32_t alloc_len, lun_datalen; 9179 uint32_t initidx, targ_lun_id, lun_id; 9180 9181 retval = CTL_RETVAL_COMPLETE; 9182 cdb = (struct scsi_report_luns *)ctsio->cdb; 9183 9184 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9185 9186 num_luns = 0; 9187 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; 9188 mtx_lock(&softc->ctl_lock); 9189 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9190 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9191 num_luns++; 9192 } 9193 mtx_unlock(&softc->ctl_lock); 9194 9195 switch (cdb->select_report) { 9196 case RPL_REPORT_DEFAULT: 9197 case RPL_REPORT_ALL: 9198 case RPL_REPORT_NONSUBSID: 9199 break; 9200 case RPL_REPORT_WELLKNOWN: 9201 case RPL_REPORT_ADMIN: 9202 case RPL_REPORT_CONGLOM: 9203 num_luns = 0; 9204 break; 9205 default: 9206 ctl_set_invalid_field(ctsio, 9207 /*sks_valid*/ 1, 9208 /*command*/ 1, 9209 /*field*/ 2, 9210 /*bit_valid*/ 0, 9211 /*bit*/ 0); 9212 ctl_done((union ctl_io *)ctsio); 9213 return (retval); 9214 break; /* NOTREACHED */ 9215 } 9216 9217 alloc_len = scsi_4btoul(cdb->length); 9218 /* 9219 * The initiator has to allocate at least 16 bytes for this request, 9220 * so he can at least get the header and the first LUN. Otherwise 9221 * we reject the request (per SPC-3 rev 14, section 6.21). 9222 */ 9223 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9224 sizeof(struct scsi_report_luns_lundata))) { 9225 ctl_set_invalid_field(ctsio, 9226 /*sks_valid*/ 1, 9227 /*command*/ 1, 9228 /*field*/ 6, 9229 /*bit_valid*/ 0, 9230 /*bit*/ 0); 9231 ctl_done((union ctl_io *)ctsio); 9232 return (retval); 9233 } 9234 9235 lun_datalen = sizeof(*lun_data) + 9236 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9237 9238 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9239 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9240 ctsio->kern_sg_entries = 0; 9241 9242 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9243 9244 mtx_lock(&softc->ctl_lock); 9245 for (targ_lun_id = 0, num_filled = 0; 9246 targ_lun_id < num_port_luns && num_filled < num_luns; 9247 targ_lun_id++) { 9248 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9249 if (lun_id == UINT32_MAX) 9250 continue; 9251 lun = softc->ctl_luns[lun_id]; 9252 if (lun == NULL) 9253 continue; 9254 9255 be64enc(lun_data->luns[num_filled++].lundata, 9256 ctl_encode_lun(targ_lun_id)); 9257 9258 /* 9259 * According to SPC-3, rev 14 section 6.21: 9260 * 9261 * "The execution of a REPORT LUNS command to any valid and 9262 * installed logical unit shall clear the REPORTED LUNS DATA 9263 * HAS CHANGED unit attention condition for all logical 9264 * units of that target with respect to the requesting 9265 * initiator. A valid and installed logical unit is one 9266 * having a PERIPHERAL QUALIFIER of 000b in the standard 9267 * INQUIRY data (see 6.4.2)." 9268 * 9269 * If request_lun is NULL, the LUN this report luns command 9270 * was issued to is either disabled or doesn't exist. In that 9271 * case, we shouldn't clear any pending lun change unit 9272 * attention. 9273 */ 9274 if (request_lun != NULL) { 9275 mtx_lock(&lun->lun_lock); 9276 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9277 mtx_unlock(&lun->lun_lock); 9278 } 9279 } 9280 mtx_unlock(&softc->ctl_lock); 9281 9282 /* 9283 * It's quite possible that we've returned fewer LUNs than we allocated 9284 * space for. Trim it. 9285 */ 9286 lun_datalen = sizeof(*lun_data) + 9287 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9288 ctsio->kern_rel_offset = 0; 9289 ctsio->kern_sg_entries = 0; 9290 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9291 ctsio->kern_total_len = ctsio->kern_data_len; 9292 9293 /* 9294 * We set this to the actual data length, regardless of how much 9295 * space we actually have to return results. If the user looks at 9296 * this value, he'll know whether or not he allocated enough space 9297 * and reissue the command if necessary. We don't support well 9298 * known logical units, so if the user asks for that, return none. 9299 */ 9300 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9301 9302 /* 9303 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9304 * this request. 9305 */ 9306 ctl_set_success(ctsio); 9307 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9308 ctsio->be_move_done = ctl_config_move_done; 9309 ctl_datamove((union ctl_io *)ctsio); 9310 return (retval); 9311 } 9312 9313 int 9314 ctl_request_sense(struct ctl_scsiio *ctsio) 9315 { 9316 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9317 struct ctl_lun *lun = CTL_LUN(ctsio); 9318 struct scsi_request_sense *cdb; 9319 struct scsi_sense_data *sense_ptr, *ps; 9320 uint32_t initidx; 9321 int have_error; 9322 u_int sense_len = SSD_FULL_SIZE; 9323 scsi_sense_data_type sense_format; 9324 ctl_ua_type ua_type; 9325 uint8_t asc = 0, ascq = 0; 9326 9327 cdb = (struct scsi_request_sense *)ctsio->cdb; 9328 9329 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9330 9331 /* 9332 * Determine which sense format the user wants. 9333 */ 9334 if (cdb->byte2 & SRS_DESC) 9335 sense_format = SSD_TYPE_DESC; 9336 else 9337 sense_format = SSD_TYPE_FIXED; 9338 9339 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9340 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9341 ctsio->kern_sg_entries = 0; 9342 ctsio->kern_rel_offset = 0; 9343 9344 /* 9345 * struct scsi_sense_data, which is currently set to 256 bytes, is 9346 * larger than the largest allowed value for the length field in the 9347 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9348 */ 9349 ctsio->kern_data_len = cdb->length; 9350 ctsio->kern_total_len = cdb->length; 9351 9352 /* 9353 * If we don't have a LUN, we don't have any pending sense. 9354 */ 9355 if (lun == NULL || 9356 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9357 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9358 /* "Logical unit not supported" */ 9359 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9360 /*current_error*/ 1, 9361 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9362 /*asc*/ 0x25, 9363 /*ascq*/ 0x00, 9364 SSD_ELEM_NONE); 9365 goto send; 9366 } 9367 9368 have_error = 0; 9369 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9370 /* 9371 * Check for pending sense, and then for pending unit attentions. 9372 * Pending sense gets returned first, then pending unit attentions. 9373 */ 9374 mtx_lock(&lun->lun_lock); 9375 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9376 if (ps != NULL) 9377 ps += initidx % CTL_MAX_INIT_PER_PORT; 9378 if (ps != NULL && ps->error_code != 0) { 9379 scsi_sense_data_type stored_format; 9380 9381 /* 9382 * Check to see which sense format was used for the stored 9383 * sense data. 9384 */ 9385 stored_format = scsi_sense_type(ps); 9386 9387 /* 9388 * If the user requested a different sense format than the 9389 * one we stored, then we need to convert it to the other 9390 * format. If we're going from descriptor to fixed format 9391 * sense data, we may lose things in translation, depending 9392 * on what options were used. 9393 * 9394 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9395 * for some reason we'll just copy it out as-is. 9396 */ 9397 if ((stored_format == SSD_TYPE_FIXED) 9398 && (sense_format == SSD_TYPE_DESC)) 9399 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9400 ps, (struct scsi_sense_data_desc *)sense_ptr); 9401 else if ((stored_format == SSD_TYPE_DESC) 9402 && (sense_format == SSD_TYPE_FIXED)) 9403 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9404 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9405 else 9406 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9407 9408 ps->error_code = 0; 9409 have_error = 1; 9410 } else { 9411 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9412 sense_format); 9413 if (ua_type != CTL_UA_NONE) 9414 have_error = 1; 9415 } 9416 if (have_error == 0) { 9417 /* 9418 * Report informational exception if have one and allowed. 9419 */ 9420 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9421 asc = lun->ie_asc; 9422 ascq = lun->ie_ascq; 9423 } 9424 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9425 /*current_error*/ 1, 9426 /*sense_key*/ SSD_KEY_NO_SENSE, 9427 /*asc*/ asc, 9428 /*ascq*/ ascq, 9429 SSD_ELEM_NONE); 9430 } 9431 mtx_unlock(&lun->lun_lock); 9432 9433 send: 9434 /* 9435 * We report the SCSI status as OK, since the status of the command 9436 * itself is OK. We're reporting sense as parameter data. 9437 */ 9438 ctl_set_success(ctsio); 9439 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9440 ctsio->be_move_done = ctl_config_move_done; 9441 ctl_datamove((union ctl_io *)ctsio); 9442 return (CTL_RETVAL_COMPLETE); 9443 } 9444 9445 int 9446 ctl_tur(struct ctl_scsiio *ctsio) 9447 { 9448 9449 CTL_DEBUG_PRINT(("ctl_tur\n")); 9450 9451 ctl_set_success(ctsio); 9452 ctl_done((union ctl_io *)ctsio); 9453 9454 return (CTL_RETVAL_COMPLETE); 9455 } 9456 9457 /* 9458 * SCSI VPD page 0x00, the Supported VPD Pages page. 9459 */ 9460 static int 9461 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9462 { 9463 struct ctl_lun *lun = CTL_LUN(ctsio); 9464 struct scsi_vpd_supported_pages *pages; 9465 int sup_page_size; 9466 int p; 9467 9468 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9469 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9470 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9471 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9472 ctsio->kern_rel_offset = 0; 9473 ctsio->kern_sg_entries = 0; 9474 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9475 ctsio->kern_total_len = ctsio->kern_data_len; 9476 9477 /* 9478 * The control device is always connected. The disk device, on the 9479 * other hand, may not be online all the time. Need to change this 9480 * to figure out whether the disk device is actually online or not. 9481 */ 9482 if (lun != NULL) 9483 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9484 lun->be_lun->lun_type; 9485 else 9486 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9487 9488 p = 0; 9489 /* Supported VPD pages */ 9490 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9491 /* Serial Number */ 9492 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9493 /* Device Identification */ 9494 pages->page_list[p++] = SVPD_DEVICE_ID; 9495 /* Extended INQUIRY Data */ 9496 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9497 /* Mode Page Policy */ 9498 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9499 /* SCSI Ports */ 9500 pages->page_list[p++] = SVPD_SCSI_PORTS; 9501 /* Third-party Copy */ 9502 pages->page_list[p++] = SVPD_SCSI_TPC; 9503 /* SCSI Feature Sets */ 9504 pages->page_list[p++] = SVPD_SCSI_SFS; 9505 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9506 /* Block limits */ 9507 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9508 /* Block Device Characteristics */ 9509 pages->page_list[p++] = SVPD_BDC; 9510 /* Logical Block Provisioning */ 9511 pages->page_list[p++] = SVPD_LBP; 9512 } 9513 pages->length = p; 9514 9515 ctl_set_success(ctsio); 9516 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9517 ctsio->be_move_done = ctl_config_move_done; 9518 ctl_datamove((union ctl_io *)ctsio); 9519 return (CTL_RETVAL_COMPLETE); 9520 } 9521 9522 /* 9523 * SCSI VPD page 0x80, the Unit Serial Number page. 9524 */ 9525 static int 9526 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9527 { 9528 struct ctl_lun *lun = CTL_LUN(ctsio); 9529 struct scsi_vpd_unit_serial_number *sn_ptr; 9530 int data_len; 9531 9532 data_len = 4 + CTL_SN_LEN; 9533 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9534 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9535 ctsio->kern_rel_offset = 0; 9536 ctsio->kern_sg_entries = 0; 9537 ctsio->kern_data_len = min(data_len, alloc_len); 9538 ctsio->kern_total_len = ctsio->kern_data_len; 9539 9540 /* 9541 * The control device is always connected. The disk device, on the 9542 * other hand, may not be online all the time. Need to change this 9543 * to figure out whether the disk device is actually online or not. 9544 */ 9545 if (lun != NULL) 9546 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9547 lun->be_lun->lun_type; 9548 else 9549 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9550 9551 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9552 sn_ptr->length = CTL_SN_LEN; 9553 /* 9554 * If we don't have a LUN, we just leave the serial number as 9555 * all spaces. 9556 */ 9557 if (lun != NULL) { 9558 strncpy((char *)sn_ptr->serial_num, 9559 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9560 } else 9561 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9562 9563 ctl_set_success(ctsio); 9564 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9565 ctsio->be_move_done = ctl_config_move_done; 9566 ctl_datamove((union ctl_io *)ctsio); 9567 return (CTL_RETVAL_COMPLETE); 9568 } 9569 9570 /* 9571 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9572 */ 9573 static int 9574 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9575 { 9576 struct ctl_lun *lun = CTL_LUN(ctsio); 9577 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9578 int data_len; 9579 9580 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9581 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9582 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9583 ctsio->kern_sg_entries = 0; 9584 ctsio->kern_rel_offset = 0; 9585 ctsio->kern_data_len = min(data_len, alloc_len); 9586 ctsio->kern_total_len = ctsio->kern_data_len; 9587 9588 /* 9589 * The control device is always connected. The disk device, on the 9590 * other hand, may not be online all the time. 9591 */ 9592 if (lun != NULL) 9593 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9594 lun->be_lun->lun_type; 9595 else 9596 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9597 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9598 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9599 /* 9600 * We support head of queue, ordered and simple tags. 9601 */ 9602 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9603 /* 9604 * Volatile cache supported. 9605 */ 9606 eid_ptr->flags3 = SVPD_EID_V_SUP; 9607 9608 /* 9609 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9610 * attention for a particular IT nexus on all LUNs once we report 9611 * it to that nexus once. This bit is required as of SPC-4. 9612 */ 9613 eid_ptr->flags4 = SVPD_EID_LUICLR; 9614 9615 /* 9616 * We support revert to defaults (RTD) bit in MODE SELECT. 9617 */ 9618 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9619 9620 /* 9621 * XXX KDM in order to correctly answer this, we would need 9622 * information from the SIM to determine how much sense data it 9623 * can send. So this would really be a path inquiry field, most 9624 * likely. This can be set to a maximum of 252 according to SPC-4, 9625 * but the hardware may or may not be able to support that much. 9626 * 0 just means that the maximum sense data length is not reported. 9627 */ 9628 eid_ptr->max_sense_length = 0; 9629 9630 ctl_set_success(ctsio); 9631 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9632 ctsio->be_move_done = ctl_config_move_done; 9633 ctl_datamove((union ctl_io *)ctsio); 9634 return (CTL_RETVAL_COMPLETE); 9635 } 9636 9637 static int 9638 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9639 { 9640 struct ctl_lun *lun = CTL_LUN(ctsio); 9641 struct scsi_vpd_mode_page_policy *mpp_ptr; 9642 int data_len; 9643 9644 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9645 sizeof(struct scsi_vpd_mode_page_policy_descr); 9646 9647 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9648 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9649 ctsio->kern_rel_offset = 0; 9650 ctsio->kern_sg_entries = 0; 9651 ctsio->kern_data_len = min(data_len, alloc_len); 9652 ctsio->kern_total_len = ctsio->kern_data_len; 9653 9654 /* 9655 * The control device is always connected. The disk device, on the 9656 * other hand, may not be online all the time. 9657 */ 9658 if (lun != NULL) 9659 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9660 lun->be_lun->lun_type; 9661 else 9662 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9663 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9664 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9665 mpp_ptr->descr[0].page_code = 0x3f; 9666 mpp_ptr->descr[0].subpage_code = 0xff; 9667 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9668 9669 ctl_set_success(ctsio); 9670 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9671 ctsio->be_move_done = ctl_config_move_done; 9672 ctl_datamove((union ctl_io *)ctsio); 9673 return (CTL_RETVAL_COMPLETE); 9674 } 9675 9676 /* 9677 * SCSI VPD page 0x83, the Device Identification page. 9678 */ 9679 static int 9680 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9681 { 9682 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9683 struct ctl_port *port = CTL_PORT(ctsio); 9684 struct ctl_lun *lun = CTL_LUN(ctsio); 9685 struct scsi_vpd_device_id *devid_ptr; 9686 struct scsi_vpd_id_descriptor *desc; 9687 int data_len, g; 9688 uint8_t proto; 9689 9690 data_len = sizeof(struct scsi_vpd_device_id) + 9691 sizeof(struct scsi_vpd_id_descriptor) + 9692 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9693 sizeof(struct scsi_vpd_id_descriptor) + 9694 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9695 if (lun && lun->lun_devid) 9696 data_len += lun->lun_devid->len; 9697 if (port && port->port_devid) 9698 data_len += port->port_devid->len; 9699 if (port && port->target_devid) 9700 data_len += port->target_devid->len; 9701 9702 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9703 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9704 ctsio->kern_sg_entries = 0; 9705 ctsio->kern_rel_offset = 0; 9706 ctsio->kern_sg_entries = 0; 9707 ctsio->kern_data_len = min(data_len, alloc_len); 9708 ctsio->kern_total_len = ctsio->kern_data_len; 9709 9710 /* 9711 * The control device is always connected. The disk device, on the 9712 * other hand, may not be online all the time. 9713 */ 9714 if (lun != NULL) 9715 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9716 lun->be_lun->lun_type; 9717 else 9718 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9719 devid_ptr->page_code = SVPD_DEVICE_ID; 9720 scsi_ulto2b(data_len - 4, devid_ptr->length); 9721 9722 if (port && port->port_type == CTL_PORT_FC) 9723 proto = SCSI_PROTO_FC << 4; 9724 else if (port && port->port_type == CTL_PORT_SAS) 9725 proto = SCSI_PROTO_SAS << 4; 9726 else if (port && port->port_type == CTL_PORT_ISCSI) 9727 proto = SCSI_PROTO_ISCSI << 4; 9728 else 9729 proto = SCSI_PROTO_SPI << 4; 9730 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9731 9732 /* 9733 * We're using a LUN association here. i.e., this device ID is a 9734 * per-LUN identifier. 9735 */ 9736 if (lun && lun->lun_devid) { 9737 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9738 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9739 lun->lun_devid->len); 9740 } 9741 9742 /* 9743 * This is for the WWPN which is a port association. 9744 */ 9745 if (port && port->port_devid) { 9746 memcpy(desc, port->port_devid->data, port->port_devid->len); 9747 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9748 port->port_devid->len); 9749 } 9750 9751 /* 9752 * This is for the Relative Target Port(type 4h) identifier 9753 */ 9754 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9755 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9756 SVPD_ID_TYPE_RELTARG; 9757 desc->length = 4; 9758 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9759 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9760 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9761 9762 /* 9763 * This is for the Target Port Group(type 5h) identifier 9764 */ 9765 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9766 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9767 SVPD_ID_TYPE_TPORTGRP; 9768 desc->length = 4; 9769 if (softc->is_single || 9770 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9771 g = 1; 9772 else 9773 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9774 scsi_ulto2b(g, &desc->identifier[2]); 9775 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9776 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9777 9778 /* 9779 * This is for the Target identifier 9780 */ 9781 if (port && port->target_devid) { 9782 memcpy(desc, port->target_devid->data, port->target_devid->len); 9783 } 9784 9785 ctl_set_success(ctsio); 9786 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9787 ctsio->be_move_done = ctl_config_move_done; 9788 ctl_datamove((union ctl_io *)ctsio); 9789 return (CTL_RETVAL_COMPLETE); 9790 } 9791 9792 static int 9793 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9794 { 9795 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9796 struct ctl_lun *lun = CTL_LUN(ctsio); 9797 struct scsi_vpd_scsi_ports *sp; 9798 struct scsi_vpd_port_designation *pd; 9799 struct scsi_vpd_port_designation_cont *pdc; 9800 struct ctl_port *port; 9801 int data_len, num_target_ports, iid_len, id_len; 9802 9803 num_target_ports = 0; 9804 iid_len = 0; 9805 id_len = 0; 9806 mtx_lock(&softc->ctl_lock); 9807 STAILQ_FOREACH(port, &softc->port_list, links) { 9808 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9809 continue; 9810 if (lun != NULL && 9811 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9812 continue; 9813 num_target_ports++; 9814 if (port->init_devid) 9815 iid_len += port->init_devid->len; 9816 if (port->port_devid) 9817 id_len += port->port_devid->len; 9818 } 9819 mtx_unlock(&softc->ctl_lock); 9820 9821 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9822 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9823 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9824 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9825 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9826 ctsio->kern_sg_entries = 0; 9827 ctsio->kern_rel_offset = 0; 9828 ctsio->kern_sg_entries = 0; 9829 ctsio->kern_data_len = min(data_len, alloc_len); 9830 ctsio->kern_total_len = ctsio->kern_data_len; 9831 9832 /* 9833 * The control device is always connected. The disk device, on the 9834 * other hand, may not be online all the time. Need to change this 9835 * to figure out whether the disk device is actually online or not. 9836 */ 9837 if (lun != NULL) 9838 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9839 lun->be_lun->lun_type; 9840 else 9841 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9842 9843 sp->page_code = SVPD_SCSI_PORTS; 9844 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9845 sp->page_length); 9846 pd = &sp->design[0]; 9847 9848 mtx_lock(&softc->ctl_lock); 9849 STAILQ_FOREACH(port, &softc->port_list, links) { 9850 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9851 continue; 9852 if (lun != NULL && 9853 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9854 continue; 9855 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9856 if (port->init_devid) { 9857 iid_len = port->init_devid->len; 9858 memcpy(pd->initiator_transportid, 9859 port->init_devid->data, port->init_devid->len); 9860 } else 9861 iid_len = 0; 9862 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9863 pdc = (struct scsi_vpd_port_designation_cont *) 9864 (&pd->initiator_transportid[iid_len]); 9865 if (port->port_devid) { 9866 id_len = port->port_devid->len; 9867 memcpy(pdc->target_port_descriptors, 9868 port->port_devid->data, port->port_devid->len); 9869 } else 9870 id_len = 0; 9871 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9872 pd = (struct scsi_vpd_port_designation *) 9873 ((uint8_t *)pdc->target_port_descriptors + id_len); 9874 } 9875 mtx_unlock(&softc->ctl_lock); 9876 9877 ctl_set_success(ctsio); 9878 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9879 ctsio->be_move_done = ctl_config_move_done; 9880 ctl_datamove((union ctl_io *)ctsio); 9881 return (CTL_RETVAL_COMPLETE); 9882 } 9883 9884 static int 9885 ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) 9886 { 9887 struct ctl_lun *lun = CTL_LUN(ctsio); 9888 struct scsi_vpd_sfs *sfs_ptr; 9889 int sfs_page_size, n; 9890 9891 sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; 9892 ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); 9893 sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; 9894 ctsio->kern_sg_entries = 0; 9895 ctsio->kern_rel_offset = 0; 9896 ctsio->kern_sg_entries = 0; 9897 ctsio->kern_data_len = min(sfs_page_size, alloc_len); 9898 ctsio->kern_total_len = ctsio->kern_data_len; 9899 9900 /* 9901 * The control device is always connected. The disk device, on the 9902 * other hand, may not be online all the time. Need to change this 9903 * to figure out whether the disk device is actually online or not. 9904 */ 9905 if (lun != NULL) 9906 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9907 lun->be_lun->lun_type; 9908 else 9909 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9910 9911 sfs_ptr->page_code = SVPD_SCSI_SFS; 9912 n = 0; 9913 /* Discovery 2016 */ 9914 scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); 9915 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9916 /* SBC Base 2016 */ 9917 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); 9918 /* SBC Base 2010 */ 9919 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); 9920 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9921 /* Basic Provisioning 2016 */ 9922 scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); 9923 } 9924 /* Drive Maintenance 2016 */ 9925 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); 9926 } 9927 scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); 9928 9929 ctl_set_success(ctsio); 9930 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9931 ctsio->be_move_done = ctl_config_move_done; 9932 ctl_datamove((union ctl_io *)ctsio); 9933 return (CTL_RETVAL_COMPLETE); 9934 } 9935 9936 static int 9937 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9938 { 9939 struct ctl_lun *lun = CTL_LUN(ctsio); 9940 struct scsi_vpd_block_limits *bl_ptr; 9941 const char *val; 9942 uint64_t ival; 9943 9944 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9945 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9946 ctsio->kern_sg_entries = 0; 9947 ctsio->kern_rel_offset = 0; 9948 ctsio->kern_sg_entries = 0; 9949 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9950 ctsio->kern_total_len = ctsio->kern_data_len; 9951 9952 /* 9953 * The control device is always connected. The disk device, on the 9954 * other hand, may not be online all the time. Need to change this 9955 * to figure out whether the disk device is actually online or not. 9956 */ 9957 if (lun != NULL) 9958 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9959 lun->be_lun->lun_type; 9960 else 9961 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9962 9963 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9964 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9965 bl_ptr->max_cmp_write_len = 0xff; 9966 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9967 if (lun != NULL) { 9968 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9969 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9970 ival = 0xffffffff; 9971 val = dnvlist_get_string(lun->be_lun->options, 9972 "unmap_max_lba", NULL); 9973 if (val != NULL) 9974 ctl_expand_number(val, &ival); 9975 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9976 ival = 0xffffffff; 9977 val = dnvlist_get_string(lun->be_lun->options, 9978 "unmap_max_descr", NULL); 9979 if (val != NULL) 9980 ctl_expand_number(val, &ival); 9981 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9982 if (lun->be_lun->ublockexp != 0) { 9983 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9984 bl_ptr->opt_unmap_grain); 9985 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9986 bl_ptr->unmap_grain_align); 9987 } 9988 } 9989 scsi_ulto4b(lun->be_lun->atomicblock, 9990 bl_ptr->max_atomic_transfer_length); 9991 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9992 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9993 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9994 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9995 ival = UINT64_MAX; 9996 val = dnvlist_get_string(lun->be_lun->options, 9997 "write_same_max_lba", NULL); 9998 if (val != NULL) 9999 ctl_expand_number(val, &ival); 10000 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 10001 if (lun->be_lun->maxlba + 1 > ival) 10002 bl_ptr->flags |= SVPD_BL_WSNZ; 10003 } 10004 10005 ctl_set_success(ctsio); 10006 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10007 ctsio->be_move_done = ctl_config_move_done; 10008 ctl_datamove((union ctl_io *)ctsio); 10009 return (CTL_RETVAL_COMPLETE); 10010 } 10011 10012 static int 10013 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10014 { 10015 struct ctl_lun *lun = CTL_LUN(ctsio); 10016 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10017 const char *value; 10018 u_int i; 10019 10020 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10021 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10022 ctsio->kern_sg_entries = 0; 10023 ctsio->kern_rel_offset = 0; 10024 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 10025 ctsio->kern_total_len = ctsio->kern_data_len; 10026 10027 /* 10028 * The control device is always connected. The disk device, on the 10029 * other hand, may not be online all the time. Need to change this 10030 * to figure out whether the disk device is actually online or not. 10031 */ 10032 if (lun != NULL) 10033 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10034 lun->be_lun->lun_type; 10035 else 10036 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10037 bdc_ptr->page_code = SVPD_BDC; 10038 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10039 if (lun != NULL && 10040 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) 10041 i = strtol(value, NULL, 0); 10042 else 10043 i = CTL_DEFAULT_ROTATION_RATE; 10044 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10045 if (lun != NULL && 10046 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) 10047 i = strtol(value, NULL, 0); 10048 else 10049 i = 0; 10050 bdc_ptr->wab_wac_ff = (i & 0x0f); 10051 bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; 10052 10053 ctl_set_success(ctsio); 10054 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10055 ctsio->be_move_done = ctl_config_move_done; 10056 ctl_datamove((union ctl_io *)ctsio); 10057 return (CTL_RETVAL_COMPLETE); 10058 } 10059 10060 static int 10061 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10062 { 10063 struct ctl_lun *lun = CTL_LUN(ctsio); 10064 struct scsi_vpd_logical_block_prov *lbp_ptr; 10065 const char *value; 10066 10067 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10068 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10069 ctsio->kern_sg_entries = 0; 10070 ctsio->kern_rel_offset = 0; 10071 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 10072 ctsio->kern_total_len = ctsio->kern_data_len; 10073 10074 /* 10075 * The control device is always connected. The disk device, on the 10076 * other hand, may not be online all the time. Need to change this 10077 * to figure out whether the disk device is actually online or not. 10078 */ 10079 if (lun != NULL) 10080 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10081 lun->be_lun->lun_type; 10082 else 10083 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10084 10085 lbp_ptr->page_code = SVPD_LBP; 10086 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10087 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10088 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10089 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10090 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10091 value = dnvlist_get_string(lun->be_lun->options, 10092 "provisioning_type", NULL); 10093 if (value != NULL) { 10094 if (strcmp(value, "resource") == 0) 10095 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 10096 else if (strcmp(value, "thin") == 0) 10097 lbp_ptr->prov_type = SVPD_LBP_THIN; 10098 } else 10099 lbp_ptr->prov_type = SVPD_LBP_THIN; 10100 } 10101 10102 ctl_set_success(ctsio); 10103 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10104 ctsio->be_move_done = ctl_config_move_done; 10105 ctl_datamove((union ctl_io *)ctsio); 10106 return (CTL_RETVAL_COMPLETE); 10107 } 10108 10109 /* 10110 * INQUIRY with the EVPD bit set. 10111 */ 10112 static int 10113 ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10114 { 10115 struct ctl_lun *lun = CTL_LUN(ctsio); 10116 struct scsi_inquiry *cdb; 10117 int alloc_len, retval; 10118 10119 cdb = (struct scsi_inquiry *)ctsio->cdb; 10120 alloc_len = scsi_2btoul(cdb->length); 10121 10122 switch (cdb->page_code) { 10123 case SVPD_SUPPORTED_PAGES: 10124 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10125 break; 10126 case SVPD_UNIT_SERIAL_NUMBER: 10127 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10128 break; 10129 case SVPD_DEVICE_ID: 10130 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10131 break; 10132 case SVPD_EXTENDED_INQUIRY_DATA: 10133 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10134 break; 10135 case SVPD_MODE_PAGE_POLICY: 10136 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10137 break; 10138 case SVPD_SCSI_PORTS: 10139 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10140 break; 10141 case SVPD_SCSI_TPC: 10142 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10143 break; 10144 case SVPD_SCSI_SFS: 10145 retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); 10146 break; 10147 case SVPD_BLOCK_LIMITS: 10148 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10149 goto err; 10150 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10151 break; 10152 case SVPD_BDC: 10153 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10154 goto err; 10155 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10156 break; 10157 case SVPD_LBP: 10158 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10159 goto err; 10160 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10161 break; 10162 default: 10163 err: 10164 ctl_set_invalid_field(ctsio, 10165 /*sks_valid*/ 1, 10166 /*command*/ 1, 10167 /*field*/ 2, 10168 /*bit_valid*/ 0, 10169 /*bit*/ 0); 10170 ctl_done((union ctl_io *)ctsio); 10171 retval = CTL_RETVAL_COMPLETE; 10172 break; 10173 } 10174 10175 return (retval); 10176 } 10177 10178 /* 10179 * Standard INQUIRY data. 10180 */ 10181 static int 10182 ctl_inquiry_std(struct ctl_scsiio *ctsio) 10183 { 10184 struct ctl_softc *softc = CTL_SOFTC(ctsio); 10185 struct ctl_port *port = CTL_PORT(ctsio); 10186 struct ctl_lun *lun = CTL_LUN(ctsio); 10187 struct scsi_inquiry_data *inq_ptr; 10188 struct scsi_inquiry *cdb; 10189 const char *val; 10190 uint32_t alloc_len, data_len; 10191 ctl_port_type port_type; 10192 10193 port_type = port->port_type; 10194 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10195 port_type = CTL_PORT_SCSI; 10196 10197 cdb = (struct scsi_inquiry *)ctsio->cdb; 10198 alloc_len = scsi_2btoul(cdb->length); 10199 10200 /* 10201 * We malloc the full inquiry data size here and fill it 10202 * in. If the user only asks for less, we'll give him 10203 * that much. 10204 */ 10205 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10206 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10207 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10208 ctsio->kern_sg_entries = 0; 10209 ctsio->kern_rel_offset = 0; 10210 ctsio->kern_data_len = min(data_len, alloc_len); 10211 ctsio->kern_total_len = ctsio->kern_data_len; 10212 10213 if (lun != NULL) { 10214 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10215 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10216 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10217 lun->be_lun->lun_type; 10218 } else { 10219 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10220 lun->be_lun->lun_type; 10221 } 10222 if (lun->flags & CTL_LUN_REMOVABLE) 10223 inq_ptr->dev_qual2 |= SID_RMB; 10224 } else 10225 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10226 10227 /* RMB in byte 2 is 0 */ 10228 inq_ptr->version = SCSI_REV_SPC5; 10229 10230 /* 10231 * According to SAM-3, even if a device only supports a single 10232 * level of LUN addressing, it should still set the HISUP bit: 10233 * 10234 * 4.9.1 Logical unit numbers overview 10235 * 10236 * All logical unit number formats described in this standard are 10237 * hierarchical in structure even when only a single level in that 10238 * hierarchy is used. The HISUP bit shall be set to one in the 10239 * standard INQUIRY data (see SPC-2) when any logical unit number 10240 * format described in this standard is used. Non-hierarchical 10241 * formats are outside the scope of this standard. 10242 * 10243 * Therefore we set the HiSup bit here. 10244 * 10245 * The response format is 2, per SPC-3. 10246 */ 10247 inq_ptr->response_format = SID_HiSup | 2; 10248 10249 inq_ptr->additional_length = data_len - 10250 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10251 CTL_DEBUG_PRINT(("additional_length = %d\n", 10252 inq_ptr->additional_length)); 10253 10254 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10255 if (port_type == CTL_PORT_SCSI) 10256 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10257 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10258 inq_ptr->flags = SID_CmdQue; 10259 if (port_type == CTL_PORT_SCSI) 10260 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10261 10262 /* 10263 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10264 * We have 8 bytes for the vendor name, and 16 bytes for the device 10265 * name and 4 bytes for the revision. 10266 */ 10267 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10268 "vendor", NULL)) == NULL) { 10269 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10270 } else { 10271 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10272 strncpy(inq_ptr->vendor, val, 10273 min(sizeof(inq_ptr->vendor), strlen(val))); 10274 } 10275 if (lun == NULL) { 10276 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10277 sizeof(inq_ptr->product)); 10278 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", 10279 NULL)) == NULL) { 10280 switch (lun->be_lun->lun_type) { 10281 case T_DIRECT: 10282 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10283 sizeof(inq_ptr->product)); 10284 break; 10285 case T_PROCESSOR: 10286 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10287 sizeof(inq_ptr->product)); 10288 break; 10289 case T_CDROM: 10290 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10291 sizeof(inq_ptr->product)); 10292 break; 10293 default: 10294 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10295 sizeof(inq_ptr->product)); 10296 break; 10297 } 10298 } else { 10299 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10300 strncpy(inq_ptr->product, val, 10301 min(sizeof(inq_ptr->product), strlen(val))); 10302 } 10303 10304 /* 10305 * XXX make this a macro somewhere so it automatically gets 10306 * incremented when we make changes. 10307 */ 10308 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, 10309 "revision", NULL)) == NULL) { 10310 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10311 } else { 10312 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10313 strncpy(inq_ptr->revision, val, 10314 min(sizeof(inq_ptr->revision), strlen(val))); 10315 } 10316 10317 /* 10318 * For parallel SCSI, we support double transition and single 10319 * transition clocking. We also support QAS (Quick Arbitration 10320 * and Selection) and Information Unit transfers on both the 10321 * control and array devices. 10322 */ 10323 if (port_type == CTL_PORT_SCSI) 10324 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10325 SID_SPI_IUS; 10326 10327 /* SAM-6 (no version claimed) */ 10328 scsi_ulto2b(0x00C0, inq_ptr->version1); 10329 /* SPC-5 (no version claimed) */ 10330 scsi_ulto2b(0x05C0, inq_ptr->version2); 10331 if (port_type == CTL_PORT_FC) { 10332 /* FCP-2 ANSI INCITS.350:2003 */ 10333 scsi_ulto2b(0x0917, inq_ptr->version3); 10334 } else if (port_type == CTL_PORT_SCSI) { 10335 /* SPI-4 ANSI INCITS.362:200x */ 10336 scsi_ulto2b(0x0B56, inq_ptr->version3); 10337 } else if (port_type == CTL_PORT_ISCSI) { 10338 /* iSCSI (no version claimed) */ 10339 scsi_ulto2b(0x0960, inq_ptr->version3); 10340 } else if (port_type == CTL_PORT_SAS) { 10341 /* SAS (no version claimed) */ 10342 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10343 } else if (port_type == CTL_PORT_UMASS) { 10344 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ 10345 scsi_ulto2b(0x1730, inq_ptr->version3); 10346 } 10347 10348 if (lun == NULL) { 10349 /* SBC-4 (no version claimed) */ 10350 scsi_ulto2b(0x0600, inq_ptr->version4); 10351 } else { 10352 switch (lun->be_lun->lun_type) { 10353 case T_DIRECT: 10354 /* SBC-4 (no version claimed) */ 10355 scsi_ulto2b(0x0600, inq_ptr->version4); 10356 break; 10357 case T_PROCESSOR: 10358 break; 10359 case T_CDROM: 10360 /* MMC-6 (no version claimed) */ 10361 scsi_ulto2b(0x04E0, inq_ptr->version4); 10362 break; 10363 default: 10364 break; 10365 } 10366 } 10367 10368 ctl_set_success(ctsio); 10369 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10370 ctsio->be_move_done = ctl_config_move_done; 10371 ctl_datamove((union ctl_io *)ctsio); 10372 return (CTL_RETVAL_COMPLETE); 10373 } 10374 10375 int 10376 ctl_inquiry(struct ctl_scsiio *ctsio) 10377 { 10378 struct scsi_inquiry *cdb; 10379 int retval; 10380 10381 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10382 10383 cdb = (struct scsi_inquiry *)ctsio->cdb; 10384 if (cdb->byte2 & SI_EVPD) 10385 retval = ctl_inquiry_evpd(ctsio); 10386 else if (cdb->page_code == 0) 10387 retval = ctl_inquiry_std(ctsio); 10388 else { 10389 ctl_set_invalid_field(ctsio, 10390 /*sks_valid*/ 1, 10391 /*command*/ 1, 10392 /*field*/ 2, 10393 /*bit_valid*/ 0, 10394 /*bit*/ 0); 10395 ctl_done((union ctl_io *)ctsio); 10396 return (CTL_RETVAL_COMPLETE); 10397 } 10398 10399 return (retval); 10400 } 10401 10402 int 10403 ctl_get_config(struct ctl_scsiio *ctsio) 10404 { 10405 struct ctl_lun *lun = CTL_LUN(ctsio); 10406 struct scsi_get_config_header *hdr; 10407 struct scsi_get_config_feature *feature; 10408 struct scsi_get_config *cdb; 10409 uint32_t alloc_len, data_len; 10410 int rt, starting; 10411 10412 cdb = (struct scsi_get_config *)ctsio->cdb; 10413 rt = (cdb->rt & SGC_RT_MASK); 10414 starting = scsi_2btoul(cdb->starting_feature); 10415 alloc_len = scsi_2btoul(cdb->length); 10416 10417 data_len = sizeof(struct scsi_get_config_header) + 10418 sizeof(struct scsi_get_config_feature) + 8 + 10419 sizeof(struct scsi_get_config_feature) + 8 + 10420 sizeof(struct scsi_get_config_feature) + 4 + 10421 sizeof(struct scsi_get_config_feature) + 4 + 10422 sizeof(struct scsi_get_config_feature) + 8 + 10423 sizeof(struct scsi_get_config_feature) + 10424 sizeof(struct scsi_get_config_feature) + 4 + 10425 sizeof(struct scsi_get_config_feature) + 4 + 10426 sizeof(struct scsi_get_config_feature) + 4 + 10427 sizeof(struct scsi_get_config_feature) + 4 + 10428 sizeof(struct scsi_get_config_feature) + 4 + 10429 sizeof(struct scsi_get_config_feature) + 4; 10430 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10431 ctsio->kern_sg_entries = 0; 10432 ctsio->kern_rel_offset = 0; 10433 10434 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10435 if (lun->flags & CTL_LUN_NO_MEDIA) 10436 scsi_ulto2b(0x0000, hdr->current_profile); 10437 else 10438 scsi_ulto2b(0x0010, hdr->current_profile); 10439 feature = (struct scsi_get_config_feature *)(hdr + 1); 10440 10441 if (starting > 0x003b) 10442 goto done; 10443 if (starting > 0x003a) 10444 goto f3b; 10445 if (starting > 0x002b) 10446 goto f3a; 10447 if (starting > 0x002a) 10448 goto f2b; 10449 if (starting > 0x001f) 10450 goto f2a; 10451 if (starting > 0x001e) 10452 goto f1f; 10453 if (starting > 0x001d) 10454 goto f1e; 10455 if (starting > 0x0010) 10456 goto f1d; 10457 if (starting > 0x0003) 10458 goto f10; 10459 if (starting > 0x0002) 10460 goto f3; 10461 if (starting > 0x0001) 10462 goto f2; 10463 if (starting > 0x0000) 10464 goto f1; 10465 10466 /* Profile List */ 10467 scsi_ulto2b(0x0000, feature->feature_code); 10468 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10469 feature->add_length = 8; 10470 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10471 feature->feature_data[2] = 0x00; 10472 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10473 feature->feature_data[6] = 0x01; 10474 feature = (struct scsi_get_config_feature *) 10475 &feature->feature_data[feature->add_length]; 10476 10477 f1: /* Core */ 10478 scsi_ulto2b(0x0001, feature->feature_code); 10479 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10480 feature->add_length = 8; 10481 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10482 feature->feature_data[4] = 0x03; 10483 feature = (struct scsi_get_config_feature *) 10484 &feature->feature_data[feature->add_length]; 10485 10486 f2: /* Morphing */ 10487 scsi_ulto2b(0x0002, feature->feature_code); 10488 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10489 feature->add_length = 4; 10490 feature->feature_data[0] = 0x02; 10491 feature = (struct scsi_get_config_feature *) 10492 &feature->feature_data[feature->add_length]; 10493 10494 f3: /* Removable Medium */ 10495 scsi_ulto2b(0x0003, feature->feature_code); 10496 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10497 feature->add_length = 4; 10498 feature->feature_data[0] = 0x39; 10499 feature = (struct scsi_get_config_feature *) 10500 &feature->feature_data[feature->add_length]; 10501 10502 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10503 goto done; 10504 10505 f10: /* Random Read */ 10506 scsi_ulto2b(0x0010, feature->feature_code); 10507 feature->flags = 0x00; 10508 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10509 feature->flags |= SGC_F_CURRENT; 10510 feature->add_length = 8; 10511 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10512 scsi_ulto2b(1, &feature->feature_data[4]); 10513 feature->feature_data[6] = 0x00; 10514 feature = (struct scsi_get_config_feature *) 10515 &feature->feature_data[feature->add_length]; 10516 10517 f1d: /* Multi-Read */ 10518 scsi_ulto2b(0x001D, feature->feature_code); 10519 feature->flags = 0x00; 10520 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10521 feature->flags |= SGC_F_CURRENT; 10522 feature->add_length = 0; 10523 feature = (struct scsi_get_config_feature *) 10524 &feature->feature_data[feature->add_length]; 10525 10526 f1e: /* CD Read */ 10527 scsi_ulto2b(0x001E, feature->feature_code); 10528 feature->flags = 0x00; 10529 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10530 feature->flags |= SGC_F_CURRENT; 10531 feature->add_length = 4; 10532 feature->feature_data[0] = 0x00; 10533 feature = (struct scsi_get_config_feature *) 10534 &feature->feature_data[feature->add_length]; 10535 10536 f1f: /* DVD Read */ 10537 scsi_ulto2b(0x001F, feature->feature_code); 10538 feature->flags = 0x08; 10539 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10540 feature->flags |= SGC_F_CURRENT; 10541 feature->add_length = 4; 10542 feature->feature_data[0] = 0x01; 10543 feature->feature_data[2] = 0x03; 10544 feature = (struct scsi_get_config_feature *) 10545 &feature->feature_data[feature->add_length]; 10546 10547 f2a: /* DVD+RW */ 10548 scsi_ulto2b(0x002A, feature->feature_code); 10549 feature->flags = 0x04; 10550 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10551 feature->flags |= SGC_F_CURRENT; 10552 feature->add_length = 4; 10553 feature->feature_data[0] = 0x00; 10554 feature->feature_data[1] = 0x00; 10555 feature = (struct scsi_get_config_feature *) 10556 &feature->feature_data[feature->add_length]; 10557 10558 f2b: /* DVD+R */ 10559 scsi_ulto2b(0x002B, feature->feature_code); 10560 feature->flags = 0x00; 10561 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10562 feature->flags |= SGC_F_CURRENT; 10563 feature->add_length = 4; 10564 feature->feature_data[0] = 0x00; 10565 feature = (struct scsi_get_config_feature *) 10566 &feature->feature_data[feature->add_length]; 10567 10568 f3a: /* DVD+RW Dual Layer */ 10569 scsi_ulto2b(0x003A, feature->feature_code); 10570 feature->flags = 0x00; 10571 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10572 feature->flags |= SGC_F_CURRENT; 10573 feature->add_length = 4; 10574 feature->feature_data[0] = 0x00; 10575 feature->feature_data[1] = 0x00; 10576 feature = (struct scsi_get_config_feature *) 10577 &feature->feature_data[feature->add_length]; 10578 10579 f3b: /* DVD+R Dual Layer */ 10580 scsi_ulto2b(0x003B, feature->feature_code); 10581 feature->flags = 0x00; 10582 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10583 feature->flags |= SGC_F_CURRENT; 10584 feature->add_length = 4; 10585 feature->feature_data[0] = 0x00; 10586 feature = (struct scsi_get_config_feature *) 10587 &feature->feature_data[feature->add_length]; 10588 10589 done: 10590 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10591 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10592 feature = (struct scsi_get_config_feature *)(hdr + 1); 10593 if (scsi_2btoul(feature->feature_code) == starting) 10594 feature = (struct scsi_get_config_feature *) 10595 &feature->feature_data[feature->add_length]; 10596 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10597 } 10598 scsi_ulto4b(data_len - 4, hdr->data_length); 10599 ctsio->kern_data_len = min(data_len, alloc_len); 10600 ctsio->kern_total_len = ctsio->kern_data_len; 10601 10602 ctl_set_success(ctsio); 10603 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10604 ctsio->be_move_done = ctl_config_move_done; 10605 ctl_datamove((union ctl_io *)ctsio); 10606 return (CTL_RETVAL_COMPLETE); 10607 } 10608 10609 int 10610 ctl_get_event_status(struct ctl_scsiio *ctsio) 10611 { 10612 struct scsi_get_event_status_header *hdr; 10613 struct scsi_get_event_status *cdb; 10614 uint32_t alloc_len, data_len; 10615 10616 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10617 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10618 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10619 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10620 ctl_done((union ctl_io *)ctsio); 10621 return (CTL_RETVAL_COMPLETE); 10622 } 10623 alloc_len = scsi_2btoul(cdb->length); 10624 10625 data_len = sizeof(struct scsi_get_event_status_header); 10626 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10627 ctsio->kern_sg_entries = 0; 10628 ctsio->kern_rel_offset = 0; 10629 ctsio->kern_data_len = min(data_len, alloc_len); 10630 ctsio->kern_total_len = ctsio->kern_data_len; 10631 10632 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10633 scsi_ulto2b(0, hdr->descr_length); 10634 hdr->nea_class = SGESN_NEA; 10635 hdr->supported_class = 0; 10636 10637 ctl_set_success(ctsio); 10638 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10639 ctsio->be_move_done = ctl_config_move_done; 10640 ctl_datamove((union ctl_io *)ctsio); 10641 return (CTL_RETVAL_COMPLETE); 10642 } 10643 10644 int 10645 ctl_mechanism_status(struct ctl_scsiio *ctsio) 10646 { 10647 struct scsi_mechanism_status_header *hdr; 10648 struct scsi_mechanism_status *cdb; 10649 uint32_t alloc_len, data_len; 10650 10651 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10652 alloc_len = scsi_2btoul(cdb->length); 10653 10654 data_len = sizeof(struct scsi_mechanism_status_header); 10655 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10656 ctsio->kern_sg_entries = 0; 10657 ctsio->kern_rel_offset = 0; 10658 ctsio->kern_data_len = min(data_len, alloc_len); 10659 ctsio->kern_total_len = ctsio->kern_data_len; 10660 10661 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10662 hdr->state1 = 0x00; 10663 hdr->state2 = 0xe0; 10664 scsi_ulto3b(0, hdr->lba); 10665 hdr->slots_num = 0; 10666 scsi_ulto2b(0, hdr->slots_length); 10667 10668 ctl_set_success(ctsio); 10669 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10670 ctsio->be_move_done = ctl_config_move_done; 10671 ctl_datamove((union ctl_io *)ctsio); 10672 return (CTL_RETVAL_COMPLETE); 10673 } 10674 10675 static void 10676 ctl_ultomsf(uint32_t lba, uint8_t *buf) 10677 { 10678 10679 lba += 150; 10680 buf[0] = 0; 10681 buf[1] = bin2bcd((lba / 75) / 60); 10682 buf[2] = bin2bcd((lba / 75) % 60); 10683 buf[3] = bin2bcd(lba % 75); 10684 } 10685 10686 int 10687 ctl_read_toc(struct ctl_scsiio *ctsio) 10688 { 10689 struct ctl_lun *lun = CTL_LUN(ctsio); 10690 struct scsi_read_toc_hdr *hdr; 10691 struct scsi_read_toc_type01_descr *descr; 10692 struct scsi_read_toc *cdb; 10693 uint32_t alloc_len, data_len; 10694 int format, msf; 10695 10696 cdb = (struct scsi_read_toc *)ctsio->cdb; 10697 msf = (cdb->byte2 & CD_MSF) != 0; 10698 format = cdb->format; 10699 alloc_len = scsi_2btoul(cdb->data_len); 10700 10701 data_len = sizeof(struct scsi_read_toc_hdr); 10702 if (format == 0) 10703 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10704 else 10705 data_len += sizeof(struct scsi_read_toc_type01_descr); 10706 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10707 ctsio->kern_sg_entries = 0; 10708 ctsio->kern_rel_offset = 0; 10709 ctsio->kern_data_len = min(data_len, alloc_len); 10710 ctsio->kern_total_len = ctsio->kern_data_len; 10711 10712 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10713 if (format == 0) { 10714 scsi_ulto2b(0x12, hdr->data_length); 10715 hdr->first = 1; 10716 hdr->last = 1; 10717 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10718 descr->addr_ctl = 0x14; 10719 descr->track_number = 1; 10720 if (msf) 10721 ctl_ultomsf(0, descr->track_start); 10722 else 10723 scsi_ulto4b(0, descr->track_start); 10724 descr++; 10725 descr->addr_ctl = 0x14; 10726 descr->track_number = 0xaa; 10727 if (msf) 10728 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10729 else 10730 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10731 } else { 10732 scsi_ulto2b(0x0a, hdr->data_length); 10733 hdr->first = 1; 10734 hdr->last = 1; 10735 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10736 descr->addr_ctl = 0x14; 10737 descr->track_number = 1; 10738 if (msf) 10739 ctl_ultomsf(0, descr->track_start); 10740 else 10741 scsi_ulto4b(0, descr->track_start); 10742 } 10743 10744 ctl_set_success(ctsio); 10745 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10746 ctsio->be_move_done = ctl_config_move_done; 10747 ctl_datamove((union ctl_io *)ctsio); 10748 return (CTL_RETVAL_COMPLETE); 10749 } 10750 10751 /* 10752 * For known CDB types, parse the LBA and length. 10753 */ 10754 static int 10755 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10756 { 10757 10758 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 10759 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 10760 10761 switch (io->scsiio.cdb[0]) { 10762 case COMPARE_AND_WRITE: { 10763 struct scsi_compare_and_write *cdb; 10764 10765 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10766 10767 *lba = scsi_8btou64(cdb->addr); 10768 *len = cdb->length; 10769 break; 10770 } 10771 case READ_6: 10772 case WRITE_6: { 10773 struct scsi_rw_6 *cdb; 10774 10775 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10776 10777 *lba = scsi_3btoul(cdb->addr); 10778 /* only 5 bits are valid in the most significant address byte */ 10779 *lba &= 0x1fffff; 10780 *len = cdb->length; 10781 break; 10782 } 10783 case READ_10: 10784 case WRITE_10: { 10785 struct scsi_rw_10 *cdb; 10786 10787 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10788 10789 *lba = scsi_4btoul(cdb->addr); 10790 *len = scsi_2btoul(cdb->length); 10791 break; 10792 } 10793 case WRITE_VERIFY_10: { 10794 struct scsi_write_verify_10 *cdb; 10795 10796 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10797 10798 *lba = scsi_4btoul(cdb->addr); 10799 *len = scsi_2btoul(cdb->length); 10800 break; 10801 } 10802 case READ_12: 10803 case WRITE_12: { 10804 struct scsi_rw_12 *cdb; 10805 10806 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10807 10808 *lba = scsi_4btoul(cdb->addr); 10809 *len = scsi_4btoul(cdb->length); 10810 break; 10811 } 10812 case WRITE_VERIFY_12: { 10813 struct scsi_write_verify_12 *cdb; 10814 10815 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10816 10817 *lba = scsi_4btoul(cdb->addr); 10818 *len = scsi_4btoul(cdb->length); 10819 break; 10820 } 10821 case READ_16: 10822 case WRITE_16: { 10823 struct scsi_rw_16 *cdb; 10824 10825 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10826 10827 *lba = scsi_8btou64(cdb->addr); 10828 *len = scsi_4btoul(cdb->length); 10829 break; 10830 } 10831 case WRITE_ATOMIC_16: { 10832 struct scsi_write_atomic_16 *cdb; 10833 10834 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10835 10836 *lba = scsi_8btou64(cdb->addr); 10837 *len = scsi_2btoul(cdb->length); 10838 break; 10839 } 10840 case WRITE_VERIFY_16: { 10841 struct scsi_write_verify_16 *cdb; 10842 10843 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10844 10845 *lba = scsi_8btou64(cdb->addr); 10846 *len = scsi_4btoul(cdb->length); 10847 break; 10848 } 10849 case WRITE_SAME_10: { 10850 struct scsi_write_same_10 *cdb; 10851 10852 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10853 10854 *lba = scsi_4btoul(cdb->addr); 10855 *len = scsi_2btoul(cdb->length); 10856 break; 10857 } 10858 case WRITE_SAME_16: { 10859 struct scsi_write_same_16 *cdb; 10860 10861 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10862 10863 *lba = scsi_8btou64(cdb->addr); 10864 *len = scsi_4btoul(cdb->length); 10865 break; 10866 } 10867 case VERIFY_10: { 10868 struct scsi_verify_10 *cdb; 10869 10870 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10871 10872 *lba = scsi_4btoul(cdb->addr); 10873 *len = scsi_2btoul(cdb->length); 10874 break; 10875 } 10876 case VERIFY_12: { 10877 struct scsi_verify_12 *cdb; 10878 10879 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10880 10881 *lba = scsi_4btoul(cdb->addr); 10882 *len = scsi_4btoul(cdb->length); 10883 break; 10884 } 10885 case VERIFY_16: { 10886 struct scsi_verify_16 *cdb; 10887 10888 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10889 10890 *lba = scsi_8btou64(cdb->addr); 10891 *len = scsi_4btoul(cdb->length); 10892 break; 10893 } 10894 case UNMAP: { 10895 *lba = 0; 10896 *len = UINT64_MAX; 10897 break; 10898 } 10899 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10900 struct scsi_get_lba_status *cdb; 10901 10902 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10903 *lba = scsi_8btou64(cdb->addr); 10904 *len = UINT32_MAX; 10905 break; 10906 } 10907 default: 10908 *lba = 0; 10909 *len = UINT64_MAX; 10910 return (1); 10911 } 10912 10913 return (0); 10914 } 10915 10916 static ctl_action 10917 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10918 bool seq) 10919 { 10920 uint64_t endlba1, endlba2; 10921 10922 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10923 endlba2 = lba2 + len2 - 1; 10924 10925 if ((endlba1 < lba2) || (endlba2 < lba1)) 10926 return (CTL_ACTION_PASS); 10927 else 10928 return (CTL_ACTION_BLOCK); 10929 } 10930 10931 static int 10932 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10933 { 10934 struct ctl_ptr_len_flags *ptrlen; 10935 struct scsi_unmap_desc *buf, *end, *range; 10936 uint64_t lba; 10937 uint32_t len; 10938 10939 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 10940 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 10941 10942 /* If not UNMAP -- go other way. */ 10943 if (io->scsiio.cdb[0] != UNMAP) 10944 return (CTL_ACTION_SKIP); 10945 10946 /* If UNMAP without data -- block and wait for data. */ 10947 ptrlen = (struct ctl_ptr_len_flags *) 10948 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10949 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10950 ptrlen->ptr == NULL) 10951 return (CTL_ACTION_BLOCK); 10952 10953 /* UNMAP with data -- check for collision. */ 10954 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10955 end = buf + ptrlen->len / sizeof(*buf); 10956 for (range = buf; range < end; range++) { 10957 lba = scsi_8btou64(range->lba); 10958 len = scsi_4btoul(range->length); 10959 if ((lba < lba2 + len2) && (lba + len > lba2)) 10960 return (CTL_ACTION_BLOCK); 10961 } 10962 return (CTL_ACTION_PASS); 10963 } 10964 10965 static ctl_action 10966 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10967 { 10968 uint64_t lba1, lba2; 10969 uint64_t len1, len2; 10970 int retval; 10971 10972 retval = ctl_get_lba_len(io2, &lba2, &len2); 10973 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10974 10975 retval = ctl_extent_check_unmap(io1, lba2, len2); 10976 if (retval != CTL_ACTION_SKIP) 10977 return (retval); 10978 10979 retval = ctl_get_lba_len(io1, &lba1, &len1); 10980 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10981 10982 if (seq && (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)) 10983 seq = FALSE; 10984 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10985 } 10986 10987 static ctl_action 10988 ctl_seq_check(union ctl_io *io1, union ctl_io *io2) 10989 { 10990 uint64_t lba1, lba2; 10991 uint64_t len1, len2; 10992 int retval __diagused; 10993 10994 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10995 return (CTL_ACTION_PASS); 10996 retval = ctl_get_lba_len(io1, &lba1, &len1); 10997 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 10998 retval = ctl_get_lba_len(io2, &lba2, &len2); 10999 KASSERT(retval == 0, ("ctl_get_lba_len() error")); 11000 11001 if (lba1 + len1 == lba2) 11002 return (CTL_ACTION_BLOCK); 11003 return (CTL_ACTION_PASS); 11004 } 11005 11006 static ctl_action 11007 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 11008 const uint8_t *serialize_row, union ctl_io *ooa_io) 11009 { 11010 11011 /* 11012 * The initiator attempted multiple untagged commands at the same 11013 * time. Can't do that. 11014 */ 11015 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11016 && __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11017 && ((pending_io->io_hdr.nexus.targ_port == 11018 ooa_io->io_hdr.nexus.targ_port) 11019 && (pending_io->io_hdr.nexus.initid == 11020 ooa_io->io_hdr.nexus.initid)) 11021 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 11022 CTL_FLAG_STATUS_SENT)) == 0)) 11023 return (CTL_ACTION_OVERLAP); 11024 11025 /* 11026 * The initiator attempted to send multiple tagged commands with 11027 * the same ID. (It's fine if different initiators have the same 11028 * tag ID.) 11029 * 11030 * Even if all of those conditions are true, we don't kill the I/O 11031 * if the command ahead of us has been aborted. We won't end up 11032 * sending it to the FETD, and it's perfectly legal to resend a 11033 * command with the same tag number as long as the previous 11034 * instance of this tag number has been aborted somehow. 11035 */ 11036 if (__predict_true(pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11037 && __predict_true(ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11038 && __predict_false(pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 11039 && ((pending_io->io_hdr.nexus.targ_port == 11040 ooa_io->io_hdr.nexus.targ_port) 11041 && (pending_io->io_hdr.nexus.initid == 11042 ooa_io->io_hdr.nexus.initid)) 11043 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 11044 CTL_FLAG_STATUS_SENT)) == 0)) 11045 return (CTL_ACTION_OVERLAP_TAG); 11046 11047 /* 11048 * If we get a head of queue tag, SAM-3 says that we should 11049 * immediately execute it. 11050 * 11051 * What happens if this command would normally block for some other 11052 * reason? e.g. a request sense with a head of queue tag 11053 * immediately after a write. Normally that would block, but this 11054 * will result in its getting executed immediately... 11055 * 11056 * We currently return "pass" instead of "skip", so we'll end up 11057 * going through the rest of the queue to check for overlapped tags. 11058 * 11059 * XXX KDM check for other types of blockage first?? 11060 */ 11061 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 11062 return (CTL_ACTION_PASS); 11063 11064 /* 11065 * Simple tags get blocked until all head of queue and ordered tags 11066 * ahead of them have completed. I'm lumping untagged commands in 11067 * with simple tags here. XXX KDM is that the right thing to do? 11068 */ 11069 if (__predict_false(ooa_io->scsiio.tag_type == CTL_TAG_ORDERED) || 11070 __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 11071 return (CTL_ACTION_BLOCK); 11072 11073 /* Unsupported command in OOA queue. */ 11074 if (__predict_false(ooa_io->scsiio.seridx == CTL_SERIDX_INVLD)) 11075 return (CTL_ACTION_PASS); 11076 11077 switch (serialize_row[ooa_io->scsiio.seridx]) { 11078 case CTL_SER_SEQ: 11079 if (lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 11080 return (ctl_seq_check(ooa_io, pending_io)); 11081 /* FALLTHROUGH */ 11082 case CTL_SER_PASS: 11083 return (CTL_ACTION_PASS); 11084 case CTL_SER_EXTENTOPT: 11085 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == 11086 SCP_QUEUE_ALG_UNRESTRICTED) 11087 return (CTL_ACTION_PASS); 11088 /* FALLTHROUGH */ 11089 case CTL_SER_EXTENT: 11090 return (ctl_extent_check(ooa_io, pending_io, 11091 (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 11092 case CTL_SER_BLOCKOPT: 11093 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == 11094 SCP_QUEUE_ALG_UNRESTRICTED) 11095 return (CTL_ACTION_PASS); 11096 /* FALLTHROUGH */ 11097 case CTL_SER_BLOCK: 11098 return (CTL_ACTION_BLOCK); 11099 default: 11100 __assert_unreachable(); 11101 } 11102 } 11103 11104 /* 11105 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11106 * Assumptions: 11107 * - pending_io is generally either incoming, or on the blocked queue 11108 * - starting I/O is the I/O we want to start the check with. 11109 */ 11110 static ctl_action 11111 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11112 union ctl_io **starting_io) 11113 { 11114 union ctl_io *ooa_io = *starting_io; 11115 const uint8_t *serialize_row; 11116 ctl_action action; 11117 11118 mtx_assert(&lun->lun_lock, MA_OWNED); 11119 11120 /* 11121 * Aborted commands are not going to be executed and may even 11122 * not report completion, so we don't care about their order. 11123 * Let them complete ASAP to clean the OOA queue. 11124 */ 11125 if (__predict_false(pending_io->io_hdr.flags & CTL_FLAG_ABORT)) 11126 return (CTL_ACTION_SKIP); 11127 11128 /* 11129 * Ordered tags have to block until all items ahead of them have 11130 * completed. If we get called with an ordered tag, we always 11131 * block, if something else is ahead of us in the queue. 11132 */ 11133 if ((pending_io->scsiio.tag_type == CTL_TAG_ORDERED) && 11134 (ooa_io != NULL)) 11135 return (CTL_ACTION_BLOCK); 11136 11137 serialize_row = ctl_serialize_table[pending_io->scsiio.seridx]; 11138 11139 /* 11140 * Run back along the OOA queue, starting with the current 11141 * blocked I/O and going through every I/O before it on the 11142 * queue. If starting_io is NULL, we'll just end up returning 11143 * CTL_ACTION_PASS. 11144 */ 11145 for (; ooa_io != NULL; 11146 ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) { 11147 action = ctl_check_for_blockage(lun, pending_io, serialize_row, 11148 ooa_io); 11149 if (action != CTL_ACTION_PASS) { 11150 *starting_io = ooa_io; 11151 return (action); 11152 } 11153 } 11154 11155 *starting_io = NULL; 11156 return (CTL_ACTION_PASS); 11157 } 11158 11159 /* 11160 * Try to unblock the specified I/O. 11161 * 11162 * skip parameter allows explicitly skip present blocker of the I/O, 11163 * starting from the previous one on OOA queue. It can be used when 11164 * we know for sure that the blocker I/O does no longer count. 11165 */ 11166 static void 11167 ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) 11168 { 11169 struct ctl_softc *softc = lun->ctl_softc; 11170 union ctl_io *bio, *obio; 11171 const struct ctl_cmd_entry *entry; 11172 union ctl_ha_msg msg_info; 11173 ctl_action action; 11174 11175 mtx_assert(&lun->lun_lock, MA_OWNED); 11176 11177 if (io->io_hdr.blocker == NULL) 11178 return; 11179 11180 obio = bio = io->io_hdr.blocker; 11181 if (skip) 11182 bio = (union ctl_io *)LIST_NEXT(&bio->io_hdr, ooa_links); 11183 action = ctl_check_ooa(lun, io, &bio); 11184 if (action == CTL_ACTION_BLOCK) { 11185 /* Still blocked, but may be by different I/O now. */ 11186 if (bio != obio) { 11187 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, 11188 &io->io_hdr, blocked_links); 11189 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, 11190 &io->io_hdr, blocked_links); 11191 io->io_hdr.blocker = bio; 11192 } 11193 return; 11194 } 11195 11196 /* No longer blocked, one way or another. */ 11197 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); 11198 io->io_hdr.blocker = NULL; 11199 11200 switch (action) { 11201 case CTL_ACTION_PASS: 11202 case CTL_ACTION_SKIP: 11203 11204 /* Serializing commands from the other SC retire there. */ 11205 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11206 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11207 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11208 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11209 msg_info.hdr.serializing_sc = io; 11210 msg_info.hdr.msg_type = CTL_MSG_R2R; 11211 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11212 sizeof(msg_info.hdr), M_NOWAIT); 11213 break; 11214 } 11215 11216 /* 11217 * Check this I/O for LUN state changes that may have happened 11218 * while this command was blocked. The LUN state may have been 11219 * changed by a command ahead of us in the queue. 11220 */ 11221 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11222 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 11223 ctl_done(io); 11224 break; 11225 } 11226 11227 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11228 ctl_enqueue_rtr(io); 11229 break; 11230 default: 11231 __assert_unreachable(); 11232 case CTL_ACTION_OVERLAP: 11233 ctl_set_overlapped_cmd(&io->scsiio); 11234 goto error; 11235 case CTL_ACTION_OVERLAP_TAG: 11236 ctl_set_overlapped_tag(&io->scsiio, 11237 io->scsiio.tag_num & 0xff); 11238 error: 11239 /* Serializing commands from the other SC are done here. */ 11240 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && 11241 (softc->ha_mode != CTL_HA_MODE_XFER)) { 11242 ctl_try_unblock_others(lun, io, TRUE); 11243 LIST_REMOVE(&io->io_hdr, ooa_links); 11244 11245 ctl_copy_sense_data_back(io, &msg_info); 11246 msg_info.hdr.original_sc = io->io_hdr.remote_io; 11247 msg_info.hdr.serializing_sc = NULL; 11248 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 11249 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11250 sizeof(msg_info.scsi), M_WAITOK); 11251 ctl_free_io(io); 11252 break; 11253 } 11254 11255 ctl_done(io); 11256 break; 11257 } 11258 } 11259 11260 /* 11261 * Try to unblock I/Os blocked by the specified I/O. 11262 * 11263 * skip parameter allows explicitly skip the specified I/O as blocker, 11264 * starting from the previous one on the OOA queue. It can be used when 11265 * we know for sure that the specified I/O does no longer count (done). 11266 * It has to be still on OOA queue though so that we know where to start. 11267 */ 11268 static void 11269 ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) 11270 { 11271 union ctl_io *io, *next_io; 11272 11273 mtx_assert(&lun->lun_lock, MA_OWNED); 11274 11275 for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); 11276 io != NULL; io = next_io) { 11277 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); 11278 11279 KASSERT(io->io_hdr.blocker != NULL, 11280 ("I/O %p on blocked list without blocker", io)); 11281 ctl_try_unblock_io(lun, io, skip); 11282 } 11283 KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), 11284 ("blocked_queue is not empty after skipping %p", bio)); 11285 } 11286 11287 /* 11288 * This routine (with one exception) checks LUN flags that can be set by 11289 * commands ahead of us in the OOA queue. These flags have to be checked 11290 * when a command initially comes in, and when we pull a command off the 11291 * blocked queue and are preparing to execute it. The reason we have to 11292 * check these flags for commands on the blocked queue is that the LUN 11293 * state may have been changed by a command ahead of us while we're on the 11294 * blocked queue. 11295 * 11296 * Ordering is somewhat important with these checks, so please pay 11297 * careful attention to the placement of any new checks. 11298 */ 11299 static int 11300 ctl_scsiio_lun_check(struct ctl_lun *lun, 11301 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11302 { 11303 struct ctl_softc *softc = lun->ctl_softc; 11304 int retval; 11305 uint32_t residx; 11306 11307 retval = 0; 11308 11309 mtx_assert(&lun->lun_lock, MA_OWNED); 11310 11311 /* 11312 * If this shelf is a secondary shelf controller, we may have to 11313 * reject some commands disallowed by HA mode and link state. 11314 */ 11315 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11316 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11317 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11318 ctl_set_lun_unavail(ctsio); 11319 retval = 1; 11320 goto bailout; 11321 } 11322 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11323 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11324 ctl_set_lun_transit(ctsio); 11325 retval = 1; 11326 goto bailout; 11327 } 11328 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11329 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11330 ctl_set_lun_standby(ctsio); 11331 retval = 1; 11332 goto bailout; 11333 } 11334 11335 /* The rest of checks are only done on executing side */ 11336 if (softc->ha_mode == CTL_HA_MODE_XFER) 11337 goto bailout; 11338 } 11339 11340 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11341 if (lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11342 ctl_set_hw_write_protected(ctsio); 11343 retval = 1; 11344 goto bailout; 11345 } 11346 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11347 ctl_set_sense(ctsio, /*current_error*/ 1, 11348 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11349 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11350 retval = 1; 11351 goto bailout; 11352 } 11353 } 11354 11355 /* 11356 * Check for a reservation conflict. If this command isn't allowed 11357 * even on reserved LUNs, and if this initiator isn't the one who 11358 * reserved us, reject the command with a reservation conflict. 11359 */ 11360 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11361 if ((lun->flags & CTL_LUN_RESERVED) 11362 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11363 if (lun->res_idx != residx) { 11364 ctl_set_reservation_conflict(ctsio); 11365 retval = 1; 11366 goto bailout; 11367 } 11368 } 11369 11370 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11371 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11372 /* No reservation or command is allowed. */; 11373 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11374 (lun->pr_res_type == SPR_TYPE_WR_EX || 11375 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11376 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11377 /* The command is allowed for Write Exclusive resv. */; 11378 } else { 11379 /* 11380 * if we aren't registered or it's a res holder type 11381 * reservation and this isn't the res holder then set a 11382 * conflict. 11383 */ 11384 if (ctl_get_prkey(lun, residx) == 0 || 11385 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11386 ctl_set_reservation_conflict(ctsio); 11387 retval = 1; 11388 goto bailout; 11389 } 11390 } 11391 11392 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11393 if (lun->flags & CTL_LUN_EJECTED) 11394 ctl_set_lun_ejected(ctsio); 11395 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11396 if (lun->flags & CTL_LUN_REMOVABLE) 11397 ctl_set_lun_no_media(ctsio); 11398 else 11399 ctl_set_lun_int_reqd(ctsio); 11400 } else if (lun->flags & CTL_LUN_STOPPED) 11401 ctl_set_lun_stopped(ctsio); 11402 else 11403 goto bailout; 11404 retval = 1; 11405 goto bailout; 11406 } 11407 11408 bailout: 11409 return (retval); 11410 } 11411 11412 static void 11413 ctl_failover_io(union ctl_io *io, int have_lock) 11414 { 11415 ctl_set_busy(&io->scsiio); 11416 ctl_done(io); 11417 } 11418 11419 static void 11420 ctl_failover_lun(union ctl_io *rio) 11421 { 11422 struct ctl_softc *softc = CTL_SOFTC(rio); 11423 struct ctl_lun *lun; 11424 struct ctl_io_hdr *io, *next_io; 11425 uint32_t targ_lun; 11426 11427 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11428 CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); 11429 11430 /* Find and lock the LUN. */ 11431 mtx_lock(&softc->ctl_lock); 11432 if (targ_lun > ctl_max_luns || 11433 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11434 mtx_unlock(&softc->ctl_lock); 11435 return; 11436 } 11437 mtx_lock(&lun->lun_lock); 11438 mtx_unlock(&softc->ctl_lock); 11439 if (lun->flags & CTL_LUN_DISABLED) { 11440 mtx_unlock(&lun->lun_lock); 11441 return; 11442 } 11443 11444 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11445 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11446 /* We are master */ 11447 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11448 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11449 io->flags |= CTL_FLAG_ABORT | 11450 CTL_FLAG_FAILOVER; 11451 ctl_try_unblock_io(lun, 11452 (union ctl_io *)io, FALSE); 11453 } else { /* This can be only due to DATAMOVE */ 11454 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11455 io->flags &= ~CTL_FLAG_DMA_INPROG; 11456 io->flags |= CTL_FLAG_IO_ACTIVE; 11457 io->port_status = 31340; 11458 ctl_enqueue_isc((union ctl_io *)io); 11459 } 11460 } else 11461 /* We are slave */ 11462 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11463 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11464 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11465 io->flags |= CTL_FLAG_FAILOVER; 11466 } else { 11467 ctl_set_busy(&((union ctl_io *)io)-> 11468 scsiio); 11469 ctl_done((union ctl_io *)io); 11470 } 11471 } 11472 } 11473 } else { /* SERIALIZE modes */ 11474 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11475 /* We are master */ 11476 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11477 if (io->blocker != NULL) { 11478 TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, 11479 io, blocked_links); 11480 io->blocker = NULL; 11481 } 11482 ctl_try_unblock_others(lun, (union ctl_io *)io, 11483 TRUE); 11484 LIST_REMOVE(io, ooa_links); 11485 ctl_free_io((union ctl_io *)io); 11486 } else 11487 /* We are slave */ 11488 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11489 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11490 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11491 ctl_set_busy(&((union ctl_io *)io)-> 11492 scsiio); 11493 ctl_done((union ctl_io *)io); 11494 } 11495 } 11496 } 11497 } 11498 mtx_unlock(&lun->lun_lock); 11499 } 11500 11501 static void 11502 ctl_scsiio_precheck(struct ctl_scsiio *ctsio) 11503 { 11504 struct ctl_softc *softc = CTL_SOFTC(ctsio); 11505 struct ctl_lun *lun; 11506 const struct ctl_cmd_entry *entry; 11507 union ctl_io *bio; 11508 uint32_t initidx, targ_lun; 11509 11510 lun = NULL; 11511 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11512 if (targ_lun < ctl_max_luns) 11513 lun = softc->ctl_luns[targ_lun]; 11514 if (lun) { 11515 /* 11516 * If the LUN is invalid, pretend that it doesn't exist. 11517 * It will go away as soon as all pending I/O has been 11518 * completed. 11519 */ 11520 mtx_lock(&lun->lun_lock); 11521 if (lun->flags & CTL_LUN_DISABLED) { 11522 mtx_unlock(&lun->lun_lock); 11523 lun = NULL; 11524 } 11525 } 11526 CTL_LUN(ctsio) = lun; 11527 if (lun) { 11528 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11529 11530 /* 11531 * Every I/O goes into the OOA queue for a particular LUN, 11532 * and stays there until completion. 11533 */ 11534 #ifdef CTL_TIME_IO 11535 if (LIST_EMPTY(&lun->ooa_queue)) 11536 lun->idle_time += getsbinuptime() - lun->last_busy; 11537 #endif 11538 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11539 } 11540 11541 /* Get command entry and return error if it is unsuppotyed. */ 11542 entry = ctl_validate_command(ctsio); 11543 if (entry == NULL) { 11544 if (lun) 11545 mtx_unlock(&lun->lun_lock); 11546 return; 11547 } 11548 11549 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11550 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11551 11552 /* 11553 * Check to see whether we can send this command to LUNs that don't 11554 * exist. This should pretty much only be the case for inquiry 11555 * and request sense. Further checks, below, really require having 11556 * a LUN, so we can't really check the command anymore. Just put 11557 * it on the rtr queue. 11558 */ 11559 if (lun == NULL) { 11560 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11561 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11562 ctl_enqueue_rtr((union ctl_io *)ctsio); 11563 return; 11564 } 11565 11566 ctl_set_unsupported_lun(ctsio); 11567 ctl_done((union ctl_io *)ctsio); 11568 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11569 return; 11570 } else { 11571 /* 11572 * Make sure we support this particular command on this LUN. 11573 * e.g., we don't support writes to the control LUN. 11574 */ 11575 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11576 mtx_unlock(&lun->lun_lock); 11577 ctl_set_invalid_opcode(ctsio); 11578 ctl_done((union ctl_io *)ctsio); 11579 return; 11580 } 11581 } 11582 11583 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11584 11585 /* 11586 * If we've got a request sense, it'll clear the contingent 11587 * allegiance condition. Otherwise, if we have a CA condition for 11588 * this initiator, clear it, because it sent down a command other 11589 * than request sense. 11590 */ 11591 if (ctsio->cdb[0] != REQUEST_SENSE) { 11592 struct scsi_sense_data *ps; 11593 11594 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11595 if (ps != NULL) 11596 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11597 } 11598 11599 /* 11600 * If the command has this flag set, it handles its own unit 11601 * attention reporting, we shouldn't do anything. Otherwise we 11602 * check for any pending unit attentions, and send them back to the 11603 * initiator. We only do this when a command initially comes in, 11604 * not when we pull it off the blocked queue. 11605 * 11606 * According to SAM-3, section 5.3.2, the order that things get 11607 * presented back to the host is basically unit attentions caused 11608 * by some sort of reset event, busy status, reservation conflicts 11609 * or task set full, and finally any other status. 11610 * 11611 * One issue here is that some of the unit attentions we report 11612 * don't fall into the "reset" category (e.g. "reported luns data 11613 * has changed"). So reporting it here, before the reservation 11614 * check, may be technically wrong. I guess the only thing to do 11615 * would be to check for and report the reset events here, and then 11616 * check for the other unit attention types after we check for a 11617 * reservation conflict. 11618 * 11619 * XXX KDM need to fix this 11620 */ 11621 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11622 ctl_ua_type ua_type; 11623 u_int sense_len = 0; 11624 11625 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11626 &sense_len, SSD_TYPE_NONE); 11627 if (ua_type != CTL_UA_NONE) { 11628 mtx_unlock(&lun->lun_lock); 11629 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11630 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11631 ctsio->sense_len = sense_len; 11632 ctl_done((union ctl_io *)ctsio); 11633 return; 11634 } 11635 } 11636 11637 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11638 mtx_unlock(&lun->lun_lock); 11639 ctl_done((union ctl_io *)ctsio); 11640 return; 11641 } 11642 11643 /* 11644 * XXX CHD this is where we want to send IO to other side if 11645 * this LUN is secondary on this SC. We will need to make a copy 11646 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11647 * the copy we send as FROM_OTHER. 11648 * We also need to stuff the address of the original IO so we can 11649 * find it easily. Something similar will need be done on the other 11650 * side so when we are done we can find the copy. 11651 */ 11652 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11653 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11654 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11655 union ctl_ha_msg msg_info; 11656 int isc_retval; 11657 11658 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11659 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11660 mtx_unlock(&lun->lun_lock); 11661 11662 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11663 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11664 msg_info.hdr.serializing_sc = NULL; 11665 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11666 msg_info.scsi.tag_num = ctsio->tag_num; 11667 msg_info.scsi.tag_type = ctsio->tag_type; 11668 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11669 msg_info.scsi.cdb_len = ctsio->cdb_len; 11670 msg_info.scsi.priority = ctsio->priority; 11671 11672 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11673 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11674 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11675 ctsio->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11676 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 11677 ctl_set_busy(ctsio); 11678 ctl_done((union ctl_io *)ctsio); 11679 return; 11680 } 11681 return; 11682 } 11683 11684 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); 11685 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { 11686 case CTL_ACTION_PASS: 11687 case CTL_ACTION_SKIP: 11688 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11689 mtx_unlock(&lun->lun_lock); 11690 ctl_enqueue_rtr((union ctl_io *)ctsio); 11691 break; 11692 case CTL_ACTION_BLOCK: 11693 ctsio->io_hdr.blocker = bio; 11694 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, 11695 blocked_links); 11696 mtx_unlock(&lun->lun_lock); 11697 break; 11698 case CTL_ACTION_OVERLAP: 11699 mtx_unlock(&lun->lun_lock); 11700 ctl_set_overlapped_cmd(ctsio); 11701 ctl_done((union ctl_io *)ctsio); 11702 break; 11703 case CTL_ACTION_OVERLAP_TAG: 11704 mtx_unlock(&lun->lun_lock); 11705 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11706 ctl_done((union ctl_io *)ctsio); 11707 break; 11708 default: 11709 __assert_unreachable(); 11710 } 11711 } 11712 11713 const struct ctl_cmd_entry * 11714 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11715 { 11716 const struct ctl_cmd_entry *entry; 11717 int service_action; 11718 11719 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11720 if (sa) 11721 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11722 if (entry->flags & CTL_CMD_FLAG_SA5) { 11723 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11724 entry = &((const struct ctl_cmd_entry *) 11725 entry->execute)[service_action]; 11726 } 11727 return (entry); 11728 } 11729 11730 const struct ctl_cmd_entry * 11731 ctl_validate_command(struct ctl_scsiio *ctsio) 11732 { 11733 const struct ctl_cmd_entry *entry; 11734 int i, sa; 11735 uint8_t diff; 11736 11737 entry = ctl_get_cmd_entry(ctsio, &sa); 11738 ctsio->seridx = entry->seridx; 11739 if (entry->execute == NULL) { 11740 if (sa) 11741 ctl_set_invalid_field(ctsio, 11742 /*sks_valid*/ 1, 11743 /*command*/ 1, 11744 /*field*/ 1, 11745 /*bit_valid*/ 1, 11746 /*bit*/ 4); 11747 else 11748 ctl_set_invalid_opcode(ctsio); 11749 ctl_done((union ctl_io *)ctsio); 11750 return (NULL); 11751 } 11752 KASSERT(entry->length > 0, 11753 ("Not defined length for command 0x%02x/0x%02x", 11754 ctsio->cdb[0], ctsio->cdb[1])); 11755 for (i = 1; i < entry->length; i++) { 11756 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11757 if (diff == 0) 11758 continue; 11759 ctl_set_invalid_field(ctsio, 11760 /*sks_valid*/ 1, 11761 /*command*/ 1, 11762 /*field*/ i, 11763 /*bit_valid*/ 1, 11764 /*bit*/ fls(diff) - 1); 11765 ctl_done((union ctl_io *)ctsio); 11766 return (NULL); 11767 } 11768 return (entry); 11769 } 11770 11771 static int 11772 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11773 { 11774 11775 switch (lun_type) { 11776 case T_DIRECT: 11777 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11778 return (0); 11779 break; 11780 case T_PROCESSOR: 11781 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11782 return (0); 11783 break; 11784 case T_CDROM: 11785 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11786 return (0); 11787 break; 11788 default: 11789 return (0); 11790 } 11791 return (1); 11792 } 11793 11794 static int 11795 ctl_scsiio(struct ctl_scsiio *ctsio) 11796 { 11797 int retval; 11798 const struct ctl_cmd_entry *entry; 11799 11800 retval = CTL_RETVAL_COMPLETE; 11801 11802 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11803 11804 entry = ctl_get_cmd_entry(ctsio, NULL); 11805 11806 /* 11807 * If this I/O has been aborted, just send it straight to 11808 * ctl_done() without executing it. 11809 */ 11810 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11811 ctl_done((union ctl_io *)ctsio); 11812 goto bailout; 11813 } 11814 11815 /* 11816 * All the checks should have been handled by ctl_scsiio_precheck(). 11817 * We should be clear now to just execute the I/O. 11818 */ 11819 retval = entry->execute(ctsio); 11820 11821 bailout: 11822 return (retval); 11823 } 11824 11825 static int 11826 ctl_target_reset(union ctl_io *io) 11827 { 11828 struct ctl_softc *softc = CTL_SOFTC(io); 11829 struct ctl_port *port = CTL_PORT(io); 11830 struct ctl_lun *lun; 11831 uint32_t initidx; 11832 ctl_ua_type ua_type; 11833 11834 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11835 union ctl_ha_msg msg_info; 11836 11837 msg_info.hdr.nexus = io->io_hdr.nexus; 11838 msg_info.task.task_action = io->taskio.task_action; 11839 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11840 msg_info.hdr.original_sc = NULL; 11841 msg_info.hdr.serializing_sc = NULL; 11842 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11843 sizeof(msg_info.task), M_WAITOK); 11844 } 11845 11846 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11847 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) 11848 ua_type = CTL_UA_TARG_RESET; 11849 else 11850 ua_type = CTL_UA_BUS_RESET; 11851 mtx_lock(&softc->ctl_lock); 11852 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11853 if (port != NULL && 11854 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11855 continue; 11856 ctl_do_lun_reset(lun, initidx, ua_type); 11857 } 11858 mtx_unlock(&softc->ctl_lock); 11859 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11860 return (0); 11861 } 11862 11863 /* 11864 * The LUN should always be set. The I/O is optional, and is used to 11865 * distinguish between I/Os sent by this initiator, and by other 11866 * initiators. We set unit attention for initiators other than this one. 11867 * SAM-3 is vague on this point. It does say that a unit attention should 11868 * be established for other initiators when a LUN is reset (see section 11869 * 5.7.3), but it doesn't specifically say that the unit attention should 11870 * be established for this particular initiator when a LUN is reset. Here 11871 * is the relevant text, from SAM-3 rev 8: 11872 * 11873 * 5.7.2 When a SCSI initiator port aborts its own tasks 11874 * 11875 * When a SCSI initiator port causes its own task(s) to be aborted, no 11876 * notification that the task(s) have been aborted shall be returned to 11877 * the SCSI initiator port other than the completion response for the 11878 * command or task management function action that caused the task(s) to 11879 * be aborted and notification(s) associated with related effects of the 11880 * action (e.g., a reset unit attention condition). 11881 * 11882 * XXX KDM for now, we're setting unit attention for all initiators. 11883 */ 11884 static void 11885 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) 11886 { 11887 struct ctl_io_hdr *xioh; 11888 int i; 11889 11890 mtx_lock(&lun->lun_lock); 11891 /* Abort tasks. */ 11892 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 11893 xioh->flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11894 ctl_try_unblock_io(lun, (union ctl_io *)xioh, FALSE); 11895 } 11896 /* Clear CA. */ 11897 for (i = 0; i < ctl_max_ports; i++) { 11898 free(lun->pending_sense[i], M_CTL); 11899 lun->pending_sense[i] = NULL; 11900 } 11901 /* Clear reservation. */ 11902 lun->flags &= ~CTL_LUN_RESERVED; 11903 /* Clear prevent media removal. */ 11904 if (lun->prevent) { 11905 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11906 ctl_clear_mask(lun->prevent, i); 11907 lun->prevent_count = 0; 11908 } 11909 /* Clear TPC status */ 11910 ctl_tpc_lun_clear(lun, -1); 11911 /* Establish UA. */ 11912 #if 0 11913 ctl_est_ua_all(lun, initidx, ua_type); 11914 #else 11915 ctl_est_ua_all(lun, -1, ua_type); 11916 #endif 11917 mtx_unlock(&lun->lun_lock); 11918 } 11919 11920 static int 11921 ctl_lun_reset(union ctl_io *io) 11922 { 11923 struct ctl_softc *softc = CTL_SOFTC(io); 11924 struct ctl_lun *lun; 11925 uint32_t targ_lun, initidx; 11926 11927 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11928 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11929 mtx_lock(&softc->ctl_lock); 11930 if (targ_lun >= ctl_max_luns || 11931 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11932 mtx_unlock(&softc->ctl_lock); 11933 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11934 return (1); 11935 } 11936 ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); 11937 mtx_unlock(&softc->ctl_lock); 11938 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11939 11940 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11941 union ctl_ha_msg msg_info; 11942 11943 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11944 msg_info.hdr.nexus = io->io_hdr.nexus; 11945 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11946 msg_info.hdr.original_sc = NULL; 11947 msg_info.hdr.serializing_sc = NULL; 11948 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11949 sizeof(msg_info.task), M_WAITOK); 11950 } 11951 return (0); 11952 } 11953 11954 static void 11955 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11956 int other_sc) 11957 { 11958 struct ctl_io_hdr *xioh; 11959 11960 mtx_assert(&lun->lun_lock, MA_OWNED); 11961 11962 /* 11963 * Run through the OOA queue and attempt to find the given I/O. 11964 * The target port, initiator ID, tag type and tag number have to 11965 * match the values that we got from the initiator. If we have an 11966 * untagged command to abort, simply abort the first untagged command 11967 * we come to. We only allow one untagged command at a time of course. 11968 */ 11969 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 11970 union ctl_io *xio = (union ctl_io *)xioh; 11971 if ((targ_port == UINT32_MAX || 11972 targ_port == xioh->nexus.targ_port) && 11973 (init_id == UINT32_MAX || 11974 init_id == xioh->nexus.initid)) { 11975 if (targ_port != xioh->nexus.targ_port || 11976 init_id != xioh->nexus.initid) 11977 xioh->flags |= CTL_FLAG_ABORT_STATUS; 11978 xioh->flags |= CTL_FLAG_ABORT; 11979 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11980 union ctl_ha_msg msg_info; 11981 11982 msg_info.hdr.nexus = xioh->nexus; 11983 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11984 msg_info.task.tag_num = xio->scsiio.tag_num; 11985 msg_info.task.tag_type = xio->scsiio.tag_type; 11986 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11987 msg_info.hdr.original_sc = NULL; 11988 msg_info.hdr.serializing_sc = NULL; 11989 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11990 sizeof(msg_info.task), M_NOWAIT); 11991 } 11992 ctl_try_unblock_io(lun, xio, FALSE); 11993 } 11994 } 11995 } 11996 11997 static int 11998 ctl_abort_task_set(union ctl_io *io) 11999 { 12000 struct ctl_softc *softc = CTL_SOFTC(io); 12001 struct ctl_lun *lun; 12002 uint32_t targ_lun; 12003 12004 /* 12005 * Look up the LUN. 12006 */ 12007 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12008 mtx_lock(&softc->ctl_lock); 12009 if (targ_lun >= ctl_max_luns || 12010 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12011 mtx_unlock(&softc->ctl_lock); 12012 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12013 return (1); 12014 } 12015 12016 mtx_lock(&lun->lun_lock); 12017 mtx_unlock(&softc->ctl_lock); 12018 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 12019 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12020 io->io_hdr.nexus.initid, 12021 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12022 } else { /* CTL_TASK_CLEAR_TASK_SET */ 12023 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 12024 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12025 } 12026 mtx_unlock(&lun->lun_lock); 12027 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12028 return (0); 12029 } 12030 12031 static void 12032 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, 12033 ctl_ua_type ua_type) 12034 { 12035 struct ctl_lun *lun; 12036 struct scsi_sense_data *ps; 12037 uint32_t p, i; 12038 12039 p = initidx / CTL_MAX_INIT_PER_PORT; 12040 i = initidx % CTL_MAX_INIT_PER_PORT; 12041 mtx_lock(&softc->ctl_lock); 12042 STAILQ_FOREACH(lun, &softc->lun_list, links) { 12043 mtx_lock(&lun->lun_lock); 12044 /* Abort tasks. */ 12045 ctl_abort_tasks_lun(lun, p, i, 1); 12046 /* Clear CA. */ 12047 ps = lun->pending_sense[p]; 12048 if (ps != NULL) 12049 ps[i].error_code = 0; 12050 /* Clear reservation. */ 12051 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 12052 lun->flags &= ~CTL_LUN_RESERVED; 12053 /* Clear prevent media removal. */ 12054 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 12055 ctl_clear_mask(lun->prevent, initidx); 12056 lun->prevent_count--; 12057 } 12058 /* Clear TPC status */ 12059 ctl_tpc_lun_clear(lun, initidx); 12060 /* Establish UA. */ 12061 ctl_est_ua(lun, initidx, ua_type); 12062 mtx_unlock(&lun->lun_lock); 12063 } 12064 mtx_unlock(&softc->ctl_lock); 12065 } 12066 12067 static int 12068 ctl_i_t_nexus_reset(union ctl_io *io) 12069 { 12070 struct ctl_softc *softc = CTL_SOFTC(io); 12071 uint32_t initidx; 12072 12073 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12074 union ctl_ha_msg msg_info; 12075 12076 msg_info.hdr.nexus = io->io_hdr.nexus; 12077 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 12078 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12079 msg_info.hdr.original_sc = NULL; 12080 msg_info.hdr.serializing_sc = NULL; 12081 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12082 sizeof(msg_info.task), M_WAITOK); 12083 } 12084 12085 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12086 ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); 12087 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12088 return (0); 12089 } 12090 12091 static int 12092 ctl_abort_task(union ctl_io *io) 12093 { 12094 struct ctl_softc *softc = CTL_SOFTC(io); 12095 struct ctl_io_hdr *xioh; 12096 struct ctl_lun *lun; 12097 uint32_t targ_lun; 12098 12099 /* 12100 * Look up the LUN. 12101 */ 12102 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12103 mtx_lock(&softc->ctl_lock); 12104 if (targ_lun >= ctl_max_luns || 12105 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12106 mtx_unlock(&softc->ctl_lock); 12107 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12108 return (1); 12109 } 12110 12111 mtx_lock(&lun->lun_lock); 12112 mtx_unlock(&softc->ctl_lock); 12113 /* 12114 * Run through the OOA queue and attempt to find the given I/O. 12115 * The target port, initiator ID, tag type and tag number have to 12116 * match the values that we got from the initiator. If we have an 12117 * untagged command to abort, simply abort the first untagged command 12118 * we come to. We only allow one untagged command at a time of course. 12119 */ 12120 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 12121 union ctl_io *xio = (union ctl_io *)xioh; 12122 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) 12123 || (xioh->nexus.initid != io->io_hdr.nexus.initid) 12124 || (xioh->flags & CTL_FLAG_ABORT)) 12125 continue; 12126 12127 /* 12128 * If the abort says that the task is untagged, the 12129 * task in the queue must be untagged. Otherwise, 12130 * we just check to see whether the tag numbers 12131 * match. This is because the QLogic firmware 12132 * doesn't pass back the tag type in an abort 12133 * request. 12134 */ 12135 #if 0 12136 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12137 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12138 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12139 #else 12140 /* 12141 * XXX KDM we've got problems with FC, because it 12142 * doesn't send down a tag type with aborts. So we 12143 * can only really go by the tag number... 12144 * This may cause problems with parallel SCSI. 12145 * Need to figure that out!! 12146 */ 12147 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12148 #endif 12149 xioh->flags |= CTL_FLAG_ABORT; 12150 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12151 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12152 union ctl_ha_msg msg_info; 12153 12154 msg_info.hdr.nexus = io->io_hdr.nexus; 12155 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12156 msg_info.task.tag_num = io->taskio.tag_num; 12157 msg_info.task.tag_type = io->taskio.tag_type; 12158 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12159 msg_info.hdr.original_sc = NULL; 12160 msg_info.hdr.serializing_sc = NULL; 12161 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 12162 sizeof(msg_info.task), M_NOWAIT); 12163 } 12164 ctl_try_unblock_io(lun, xio, FALSE); 12165 } 12166 } 12167 mtx_unlock(&lun->lun_lock); 12168 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12169 return (0); 12170 } 12171 12172 static int 12173 ctl_query_task(union ctl_io *io, int task_set) 12174 { 12175 struct ctl_softc *softc = CTL_SOFTC(io); 12176 struct ctl_io_hdr *xioh; 12177 struct ctl_lun *lun; 12178 int found = 0; 12179 uint32_t targ_lun; 12180 12181 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12182 mtx_lock(&softc->ctl_lock); 12183 if (targ_lun >= ctl_max_luns || 12184 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12185 mtx_unlock(&softc->ctl_lock); 12186 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12187 return (1); 12188 } 12189 mtx_lock(&lun->lun_lock); 12190 mtx_unlock(&softc->ctl_lock); 12191 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { 12192 union ctl_io *xio = (union ctl_io *)xioh; 12193 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) 12194 || (xioh->nexus.initid != io->io_hdr.nexus.initid) 12195 || (xioh->flags & CTL_FLAG_ABORT)) 12196 continue; 12197 12198 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12199 found = 1; 12200 break; 12201 } 12202 } 12203 mtx_unlock(&lun->lun_lock); 12204 if (found) 12205 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12206 else 12207 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12208 return (0); 12209 } 12210 12211 static int 12212 ctl_query_async_event(union ctl_io *io) 12213 { 12214 struct ctl_softc *softc = CTL_SOFTC(io); 12215 struct ctl_lun *lun; 12216 ctl_ua_type ua; 12217 uint32_t targ_lun, initidx; 12218 12219 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12220 mtx_lock(&softc->ctl_lock); 12221 if (targ_lun >= ctl_max_luns || 12222 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12223 mtx_unlock(&softc->ctl_lock); 12224 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12225 return (1); 12226 } 12227 mtx_lock(&lun->lun_lock); 12228 mtx_unlock(&softc->ctl_lock); 12229 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12230 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12231 mtx_unlock(&lun->lun_lock); 12232 if (ua != CTL_UA_NONE) 12233 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12234 else 12235 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12236 return (0); 12237 } 12238 12239 static void 12240 ctl_run_task(union ctl_io *io) 12241 { 12242 int retval = 1; 12243 12244 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12245 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12246 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12247 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12248 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12249 switch (io->taskio.task_action) { 12250 case CTL_TASK_ABORT_TASK: 12251 retval = ctl_abort_task(io); 12252 break; 12253 case CTL_TASK_ABORT_TASK_SET: 12254 case CTL_TASK_CLEAR_TASK_SET: 12255 retval = ctl_abort_task_set(io); 12256 break; 12257 case CTL_TASK_CLEAR_ACA: 12258 break; 12259 case CTL_TASK_I_T_NEXUS_RESET: 12260 retval = ctl_i_t_nexus_reset(io); 12261 break; 12262 case CTL_TASK_LUN_RESET: 12263 retval = ctl_lun_reset(io); 12264 break; 12265 case CTL_TASK_TARGET_RESET: 12266 case CTL_TASK_BUS_RESET: 12267 retval = ctl_target_reset(io); 12268 break; 12269 case CTL_TASK_PORT_LOGIN: 12270 break; 12271 case CTL_TASK_PORT_LOGOUT: 12272 break; 12273 case CTL_TASK_QUERY_TASK: 12274 retval = ctl_query_task(io, 0); 12275 break; 12276 case CTL_TASK_QUERY_TASK_SET: 12277 retval = ctl_query_task(io, 1); 12278 break; 12279 case CTL_TASK_QUERY_ASYNC_EVENT: 12280 retval = ctl_query_async_event(io); 12281 break; 12282 default: 12283 printf("%s: got unknown task management event %d\n", 12284 __func__, io->taskio.task_action); 12285 break; 12286 } 12287 if (retval == 0) 12288 io->io_hdr.status = CTL_SUCCESS; 12289 else 12290 io->io_hdr.status = CTL_ERROR; 12291 ctl_done(io); 12292 } 12293 12294 /* 12295 * For HA operation. Handle commands that come in from the other 12296 * controller. 12297 */ 12298 static void 12299 ctl_handle_isc(union ctl_io *io) 12300 { 12301 struct ctl_softc *softc = CTL_SOFTC(io); 12302 struct ctl_lun *lun; 12303 const struct ctl_cmd_entry *entry; 12304 uint32_t targ_lun; 12305 12306 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12307 switch (io->io_hdr.msg_type) { 12308 case CTL_MSG_SERIALIZE: 12309 ctl_serialize_other_sc_cmd(&io->scsiio); 12310 break; 12311 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12312 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12313 if (targ_lun >= ctl_max_luns || 12314 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12315 ctl_done(io); 12316 break; 12317 } 12318 mtx_lock(&lun->lun_lock); 12319 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12320 mtx_unlock(&lun->lun_lock); 12321 ctl_done(io); 12322 break; 12323 } 12324 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12325 mtx_unlock(&lun->lun_lock); 12326 ctl_enqueue_rtr(io); 12327 break; 12328 case CTL_MSG_FINISH_IO: 12329 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12330 ctl_done(io); 12331 break; 12332 } 12333 if (targ_lun >= ctl_max_luns || 12334 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12335 ctl_free_io(io); 12336 break; 12337 } 12338 mtx_lock(&lun->lun_lock); 12339 ctl_try_unblock_others(lun, io, TRUE); 12340 LIST_REMOVE(&io->io_hdr, ooa_links); 12341 mtx_unlock(&lun->lun_lock); 12342 ctl_free_io(io); 12343 break; 12344 case CTL_MSG_PERS_ACTION: 12345 ctl_hndl_per_res_out_on_other_sc(io); 12346 ctl_free_io(io); 12347 break; 12348 case CTL_MSG_BAD_JUJU: 12349 ctl_done(io); 12350 break; 12351 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12352 ctl_datamove_remote(io); 12353 break; 12354 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12355 ctl_datamove_done(io, false); 12356 break; 12357 case CTL_MSG_FAILOVER: 12358 ctl_failover_lun(io); 12359 ctl_free_io(io); 12360 break; 12361 default: 12362 printf("%s: Invalid message type %d\n", 12363 __func__, io->io_hdr.msg_type); 12364 ctl_free_io(io); 12365 break; 12366 } 12367 12368 } 12369 12370 /* 12371 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12372 * there is no match. 12373 */ 12374 static ctl_lun_error_pattern 12375 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12376 { 12377 const struct ctl_cmd_entry *entry; 12378 ctl_lun_error_pattern filtered_pattern, pattern; 12379 12380 pattern = desc->error_pattern; 12381 12382 /* 12383 * XXX KDM we need more data passed into this function to match a 12384 * custom pattern, and we actually need to implement custom pattern 12385 * matching. 12386 */ 12387 if (pattern & CTL_LUN_PAT_CMD) 12388 return (CTL_LUN_PAT_CMD); 12389 12390 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12391 return (CTL_LUN_PAT_ANY); 12392 12393 entry = ctl_get_cmd_entry(ctsio, NULL); 12394 12395 filtered_pattern = entry->pattern & pattern; 12396 12397 /* 12398 * If the user requested specific flags in the pattern (e.g. 12399 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12400 * flags. 12401 * 12402 * If the user did not specify any flags, it doesn't matter whether 12403 * or not the command supports the flags. 12404 */ 12405 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12406 (pattern & ~CTL_LUN_PAT_MASK)) 12407 return (CTL_LUN_PAT_NONE); 12408 12409 /* 12410 * If the user asked for a range check, see if the requested LBA 12411 * range overlaps with this command's LBA range. 12412 */ 12413 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12414 uint64_t lba1; 12415 uint64_t len1; 12416 ctl_action action; 12417 int retval; 12418 12419 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12420 if (retval != 0) 12421 return (CTL_LUN_PAT_NONE); 12422 12423 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12424 desc->lba_range.len, FALSE); 12425 /* 12426 * A "pass" means that the LBA ranges don't overlap, so 12427 * this doesn't match the user's range criteria. 12428 */ 12429 if (action == CTL_ACTION_PASS) 12430 return (CTL_LUN_PAT_NONE); 12431 } 12432 12433 return (filtered_pattern); 12434 } 12435 12436 static void 12437 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12438 { 12439 struct ctl_error_desc *desc, *desc2; 12440 12441 mtx_assert(&lun->lun_lock, MA_OWNED); 12442 12443 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12444 ctl_lun_error_pattern pattern; 12445 /* 12446 * Check to see whether this particular command matches 12447 * the pattern in the descriptor. 12448 */ 12449 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12450 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12451 continue; 12452 12453 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12454 case CTL_LUN_INJ_ABORTED: 12455 ctl_set_aborted(&io->scsiio); 12456 break; 12457 case CTL_LUN_INJ_MEDIUM_ERR: 12458 ctl_set_medium_error(&io->scsiio, 12459 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12460 CTL_FLAG_DATA_OUT); 12461 break; 12462 case CTL_LUN_INJ_UA: 12463 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12464 * OCCURRED */ 12465 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12466 break; 12467 case CTL_LUN_INJ_CUSTOM: 12468 /* 12469 * We're assuming the user knows what he is doing. 12470 * Just copy the sense information without doing 12471 * checks. 12472 */ 12473 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12474 MIN(sizeof(desc->custom_sense), 12475 sizeof(io->scsiio.sense_data))); 12476 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12477 io->scsiio.sense_len = SSD_FULL_SIZE; 12478 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12479 break; 12480 case CTL_LUN_INJ_NONE: 12481 default: 12482 /* 12483 * If this is an error injection type we don't know 12484 * about, clear the continuous flag (if it is set) 12485 * so it will get deleted below. 12486 */ 12487 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12488 break; 12489 } 12490 /* 12491 * By default, each error injection action is a one-shot 12492 */ 12493 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12494 continue; 12495 12496 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12497 12498 free(desc, M_CTL); 12499 } 12500 } 12501 12502 #ifdef CTL_IO_DELAY 12503 static void 12504 ctl_datamove_timer_wakeup(void *arg) 12505 { 12506 union ctl_io *io; 12507 12508 io = (union ctl_io *)arg; 12509 12510 ctl_datamove(io); 12511 } 12512 #endif /* CTL_IO_DELAY */ 12513 12514 static void 12515 ctl_datamove_done_process(union ctl_io *io) 12516 { 12517 #ifdef CTL_TIME_IO 12518 struct bintime cur_bt; 12519 #endif 12520 12521 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 12522 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 12523 12524 #ifdef CTL_TIME_IO 12525 getbinuptime(&cur_bt); 12526 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12527 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12528 #endif 12529 io->io_hdr.num_dmas++; 12530 12531 if ((io->io_hdr.port_status != 0) && 12532 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 12533 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 12534 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 12535 /*retry_count*/ io->io_hdr.port_status); 12536 } else if (io->scsiio.kern_data_resid != 0 && 12537 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 12538 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 12539 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 12540 ctl_set_invalid_field_ciu(&io->scsiio); 12541 } else if (ctl_debug & CTL_DEBUG_CDB_DATA) 12542 ctl_data_print(io); 12543 } 12544 12545 void 12546 ctl_datamove_done(union ctl_io *io, bool samethr) 12547 { 12548 12549 ctl_datamove_done_process(io); 12550 io->scsiio.be_move_done(io, samethr); 12551 } 12552 12553 void 12554 ctl_datamove(union ctl_io *io) 12555 { 12556 void (*fe_datamove)(union ctl_io *io); 12557 12558 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12559 12560 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12561 12562 /* No data transferred yet. Frontend must update this when done. */ 12563 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12564 12565 #ifdef CTL_TIME_IO 12566 getbinuptime(&io->io_hdr.dma_start_bt); 12567 #endif /* CTL_TIME_IO */ 12568 12569 #ifdef CTL_IO_DELAY 12570 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12571 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12572 } else { 12573 struct ctl_lun *lun; 12574 12575 lun = CTL_LUN(io); 12576 if ((lun != NULL) 12577 && (lun->delay_info.datamove_delay > 0)) { 12578 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12579 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12580 callout_reset(&io->io_hdr.delay_callout, 12581 lun->delay_info.datamove_delay * hz, 12582 ctl_datamove_timer_wakeup, io); 12583 if (lun->delay_info.datamove_type == 12584 CTL_DELAY_TYPE_ONESHOT) 12585 lun->delay_info.datamove_delay = 0; 12586 return; 12587 } 12588 } 12589 #endif 12590 12591 /* 12592 * This command has been aborted. Set the port status, so we fail 12593 * the data move. 12594 */ 12595 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12596 printf("ctl_datamove: tag 0x%jx on (%u:%u:%u) aborted\n", 12597 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12598 io->io_hdr.nexus.targ_port, 12599 io->io_hdr.nexus.targ_lun); 12600 io->io_hdr.port_status = 31337; 12601 ctl_datamove_done_process(io); 12602 io->scsiio.be_move_done(io, true); 12603 return; 12604 } 12605 12606 /* Don't confuse frontend with zero length data move. */ 12607 if (io->scsiio.kern_data_len == 0) { 12608 ctl_datamove_done_process(io); 12609 io->scsiio.be_move_done(io, true); 12610 return; 12611 } 12612 12613 fe_datamove = CTL_PORT(io)->fe_datamove; 12614 fe_datamove(io); 12615 } 12616 12617 static void 12618 ctl_send_datamove_done(union ctl_io *io, int have_lock) 12619 { 12620 union ctl_ha_msg msg; 12621 #ifdef CTL_TIME_IO 12622 struct bintime cur_bt; 12623 #endif 12624 12625 memset(&msg, 0, sizeof(msg)); 12626 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12627 msg.hdr.original_sc = io; 12628 msg.hdr.serializing_sc = io->io_hdr.remote_io; 12629 msg.hdr.nexus = io->io_hdr.nexus; 12630 msg.hdr.status = io->io_hdr.status; 12631 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12632 msg.scsi.tag_num = io->scsiio.tag_num; 12633 msg.scsi.tag_type = io->scsiio.tag_type; 12634 msg.scsi.scsi_status = io->scsiio.scsi_status; 12635 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12636 io->scsiio.sense_len); 12637 msg.scsi.sense_len = io->scsiio.sense_len; 12638 msg.scsi.port_status = io->io_hdr.port_status; 12639 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12640 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12641 ctl_failover_io(io, /*have_lock*/ have_lock); 12642 return; 12643 } 12644 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12645 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12646 msg.scsi.sense_len, M_WAITOK); 12647 12648 #ifdef CTL_TIME_IO 12649 getbinuptime(&cur_bt); 12650 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12651 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12652 #endif 12653 io->io_hdr.num_dmas++; 12654 } 12655 12656 /* 12657 * The DMA to the remote side is done, now we need to tell the other side 12658 * we're done so it can continue with its data movement. 12659 */ 12660 static void 12661 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12662 { 12663 union ctl_io *io; 12664 uint32_t i; 12665 12666 io = rq->context; 12667 12668 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12669 printf("%s: ISC DMA write failed with error %d", __func__, 12670 rq->ret); 12671 ctl_set_internal_failure(&io->scsiio, 12672 /*sks_valid*/ 1, 12673 /*retry_count*/ rq->ret); 12674 } 12675 12676 ctl_dt_req_free(rq); 12677 12678 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12679 free(CTL_LSGLT(io)[i].addr, M_CTL); 12680 free(CTL_RSGL(io), M_CTL); 12681 CTL_RSGL(io) = NULL; 12682 CTL_LSGL(io) = NULL; 12683 12684 /* 12685 * The data is in local and remote memory, so now we need to send 12686 * status (good or back) back to the other side. 12687 */ 12688 ctl_send_datamove_done(io, /*have_lock*/ 0); 12689 } 12690 12691 /* 12692 * We've moved the data from the host/controller into local memory. Now we 12693 * need to push it over to the remote controller's memory. 12694 */ 12695 static int 12696 ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr) 12697 { 12698 int retval; 12699 12700 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12701 ctl_datamove_remote_write_cb); 12702 return (retval); 12703 } 12704 12705 static void 12706 ctl_datamove_remote_write(union ctl_io *io) 12707 { 12708 int retval; 12709 void (*fe_datamove)(union ctl_io *io); 12710 12711 /* 12712 * - Get the data from the host/HBA into local memory. 12713 * - DMA memory from the local controller to the remote controller. 12714 * - Send status back to the remote controller. 12715 */ 12716 12717 retval = ctl_datamove_remote_sgl_setup(io); 12718 if (retval != 0) 12719 return; 12720 12721 /* Switch the pointer over so the FETD knows what to do */ 12722 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12723 12724 /* 12725 * Use a custom move done callback, since we need to send completion 12726 * back to the other controller, not to the backend on this side. 12727 */ 12728 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12729 12730 fe_datamove = CTL_PORT(io)->fe_datamove; 12731 fe_datamove(io); 12732 } 12733 12734 static int 12735 ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr) 12736 { 12737 uint32_t i; 12738 12739 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12740 free(CTL_LSGLT(io)[i].addr, M_CTL); 12741 free(CTL_RSGL(io), M_CTL); 12742 CTL_RSGL(io) = NULL; 12743 CTL_LSGL(io) = NULL; 12744 12745 /* 12746 * The read is done, now we need to send status (good or bad) back 12747 * to the other side. 12748 */ 12749 ctl_send_datamove_done(io, /*have_lock*/ 0); 12750 12751 return (0); 12752 } 12753 12754 static void 12755 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12756 { 12757 union ctl_io *io; 12758 void (*fe_datamove)(union ctl_io *io); 12759 12760 io = rq->context; 12761 12762 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12763 printf("%s: ISC DMA read failed with error %d\n", __func__, 12764 rq->ret); 12765 ctl_set_internal_failure(&io->scsiio, 12766 /*sks_valid*/ 1, 12767 /*retry_count*/ rq->ret); 12768 } 12769 12770 ctl_dt_req_free(rq); 12771 12772 /* Switch the pointer over so the FETD knows what to do */ 12773 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); 12774 12775 /* 12776 * Use a custom move done callback, since we need to send completion 12777 * back to the other controller, not to the backend on this side. 12778 */ 12779 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12780 12781 /* XXX KDM add checks like the ones in ctl_datamove? */ 12782 12783 fe_datamove = CTL_PORT(io)->fe_datamove; 12784 fe_datamove(io); 12785 } 12786 12787 static int 12788 ctl_datamove_remote_sgl_setup(union ctl_io *io) 12789 { 12790 struct ctl_sg_entry *local_sglist; 12791 uint32_t len_to_go; 12792 int retval; 12793 int i; 12794 12795 retval = 0; 12796 local_sglist = CTL_LSGL(io); 12797 len_to_go = io->scsiio.kern_data_len; 12798 12799 /* 12800 * The difficult thing here is that the size of the various 12801 * S/G segments may be different than the size from the 12802 * remote controller. That'll make it harder when DMAing 12803 * the data back to the other side. 12804 */ 12805 for (i = 0; len_to_go > 0; i++) { 12806 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12807 local_sglist[i].addr = 12808 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12809 12810 len_to_go -= local_sglist[i].len; 12811 } 12812 /* 12813 * Reset the number of S/G entries accordingly. The original 12814 * number of S/G entries is available in rem_sg_entries. 12815 */ 12816 io->scsiio.kern_sg_entries = i; 12817 12818 return (retval); 12819 } 12820 12821 static int 12822 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12823 ctl_ha_dt_cb callback) 12824 { 12825 struct ctl_ha_dt_req *rq; 12826 struct ctl_sg_entry *remote_sglist, *local_sglist; 12827 uint32_t local_used, remote_used, total_used; 12828 int i, j, isc_ret; 12829 12830 rq = ctl_dt_req_alloc(); 12831 12832 /* 12833 * If we failed to allocate the request, and if the DMA didn't fail 12834 * anyway, set busy status. This is just a resource allocation 12835 * failure. 12836 */ 12837 if ((rq == NULL) 12838 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12839 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12840 ctl_set_busy(&io->scsiio); 12841 12842 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12843 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12844 if (rq != NULL) 12845 ctl_dt_req_free(rq); 12846 12847 /* 12848 * The data move failed. We need to return status back 12849 * to the other controller. No point in trying to DMA 12850 * data to the remote controller. 12851 */ 12852 12853 ctl_send_datamove_done(io, /*have_lock*/ 0); 12854 12855 return (1); 12856 } 12857 12858 local_sglist = CTL_LSGL(io); 12859 remote_sglist = CTL_RSGL(io); 12860 local_used = 0; 12861 remote_used = 0; 12862 total_used = 0; 12863 12864 /* 12865 * Pull/push the data over the wire from/to the other controller. 12866 * This takes into account the possibility that the local and 12867 * remote sglists may not be identical in terms of the size of 12868 * the elements and the number of elements. 12869 * 12870 * One fundamental assumption here is that the length allocated for 12871 * both the local and remote sglists is identical. Otherwise, we've 12872 * essentially got a coding error of some sort. 12873 */ 12874 isc_ret = CTL_HA_STATUS_SUCCESS; 12875 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12876 uint32_t cur_len; 12877 uint8_t *tmp_ptr; 12878 12879 rq->command = command; 12880 rq->context = io; 12881 12882 /* 12883 * Both pointers should be aligned. But it is possible 12884 * that the allocation length is not. They should both 12885 * also have enough slack left over at the end, though, 12886 * to round up to the next 8 byte boundary. 12887 */ 12888 cur_len = MIN(local_sglist[i].len - local_used, 12889 remote_sglist[j].len - remote_used); 12890 rq->size = cur_len; 12891 12892 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12893 tmp_ptr += local_used; 12894 12895 #if 0 12896 /* Use physical addresses when talking to ISC hardware */ 12897 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12898 /* XXX KDM use busdma */ 12899 rq->local = vtophys(tmp_ptr); 12900 } else 12901 rq->local = tmp_ptr; 12902 #else 12903 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12904 ("HA does not support BUS_ADDR")); 12905 rq->local = tmp_ptr; 12906 #endif 12907 12908 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12909 tmp_ptr += remote_used; 12910 rq->remote = tmp_ptr; 12911 12912 rq->callback = NULL; 12913 12914 local_used += cur_len; 12915 if (local_used >= local_sglist[i].len) { 12916 i++; 12917 local_used = 0; 12918 } 12919 12920 remote_used += cur_len; 12921 if (remote_used >= remote_sglist[j].len) { 12922 j++; 12923 remote_used = 0; 12924 } 12925 total_used += cur_len; 12926 12927 if (total_used >= io->scsiio.kern_data_len) 12928 rq->callback = callback; 12929 12930 isc_ret = ctl_dt_single(rq); 12931 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12932 break; 12933 } 12934 if (isc_ret != CTL_HA_STATUS_WAIT) { 12935 rq->ret = isc_ret; 12936 callback(rq); 12937 } 12938 12939 return (0); 12940 } 12941 12942 static void 12943 ctl_datamove_remote_read(union ctl_io *io) 12944 { 12945 int retval; 12946 uint32_t i; 12947 12948 /* 12949 * This will send an error to the other controller in the case of a 12950 * failure. 12951 */ 12952 retval = ctl_datamove_remote_sgl_setup(io); 12953 if (retval != 0) 12954 return; 12955 12956 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12957 ctl_datamove_remote_read_cb); 12958 if (retval != 0) { 12959 /* 12960 * Make sure we free memory if there was an error.. The 12961 * ctl_datamove_remote_xfer() function will send the 12962 * datamove done message, or call the callback with an 12963 * error if there is a problem. 12964 */ 12965 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12966 free(CTL_LSGLT(io)[i].addr, M_CTL); 12967 free(CTL_RSGL(io), M_CTL); 12968 CTL_RSGL(io) = NULL; 12969 CTL_LSGL(io) = NULL; 12970 } 12971 } 12972 12973 /* 12974 * Process a datamove request from the other controller. This is used for 12975 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12976 * first. Once that is complete, the data gets DMAed into the remote 12977 * controller's memory. For reads, we DMA from the remote controller's 12978 * memory into our memory first, and then move it out to the FETD. 12979 */ 12980 static void 12981 ctl_datamove_remote(union ctl_io *io) 12982 { 12983 12984 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12985 12986 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12987 ctl_failover_io(io, /*have_lock*/ 0); 12988 return; 12989 } 12990 12991 /* 12992 * Note that we look for an aborted I/O here, but don't do some of 12993 * the other checks that ctl_datamove() normally does. 12994 * We don't need to run the datamove delay code, since that should 12995 * have been done if need be on the other controller. 12996 */ 12997 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12998 printf("%s: tag 0x%jx on (%u:%u:%u) aborted\n", __func__, 12999 io->scsiio.tag_num, io->io_hdr.nexus.initid, 13000 io->io_hdr.nexus.targ_port, 13001 io->io_hdr.nexus.targ_lun); 13002 io->io_hdr.port_status = 31338; 13003 ctl_send_datamove_done(io, /*have_lock*/ 0); 13004 return; 13005 } 13006 13007 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 13008 ctl_datamove_remote_write(io); 13009 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 13010 ctl_datamove_remote_read(io); 13011 else { 13012 io->io_hdr.port_status = 31339; 13013 ctl_send_datamove_done(io, /*have_lock*/ 0); 13014 } 13015 } 13016 13017 static void 13018 ctl_process_done(union ctl_io *io) 13019 { 13020 struct ctl_softc *softc = CTL_SOFTC(io); 13021 struct ctl_port *port = CTL_PORT(io); 13022 struct ctl_lun *lun = CTL_LUN(io); 13023 void (*fe_done)(union ctl_io *io); 13024 union ctl_ha_msg msg; 13025 13026 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13027 fe_done = port->fe_done; 13028 13029 #ifdef CTL_TIME_IO 13030 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13031 char str[256]; 13032 char path_str[64]; 13033 struct sbuf sb; 13034 13035 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13036 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13037 13038 ctl_io_sbuf(io, &sb); 13039 sbuf_cat(&sb, path_str); 13040 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13041 (intmax_t)time_uptime - io->io_hdr.start_time); 13042 sbuf_finish(&sb); 13043 printf("%s", sbuf_data(&sb)); 13044 } 13045 #endif /* CTL_TIME_IO */ 13046 13047 switch (io->io_hdr.io_type) { 13048 case CTL_IO_SCSI: 13049 break; 13050 case CTL_IO_TASK: 13051 if (ctl_debug & CTL_DEBUG_INFO) 13052 ctl_io_error_print(io, NULL); 13053 fe_done(io); 13054 return; 13055 default: 13056 panic("%s: Invalid CTL I/O type %d\n", 13057 __func__, io->io_hdr.io_type); 13058 } 13059 13060 if (lun == NULL) { 13061 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13062 io->io_hdr.nexus.targ_mapped_lun)); 13063 goto bailout; 13064 } 13065 13066 mtx_lock(&lun->lun_lock); 13067 13068 /* 13069 * Check to see if we have any informational exception and status 13070 * of this command can be modified to report it in form of either 13071 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 13072 */ 13073 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 13074 io->io_hdr.status == CTL_SUCCESS && 13075 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 13076 uint8_t mrie = lun->MODE_IE.mrie; 13077 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 13078 (lun->MODE_VER.byte3 & SMS_VER_PER)); 13079 if (((mrie == SIEP_MRIE_REC_COND && per) || 13080 mrie == SIEP_MRIE_REC_UNCOND || 13081 mrie == SIEP_MRIE_NO_SENSE) && 13082 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 13083 CTL_CMD_FLAG_NO_SENSE) == 0) { 13084 ctl_set_sense(&io->scsiio, 13085 /*current_error*/ 1, 13086 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 13087 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 13088 /*asc*/ lun->ie_asc, 13089 /*ascq*/ lun->ie_ascq, 13090 SSD_ELEM_NONE); 13091 lun->ie_reported = 1; 13092 } 13093 } else if (lun->ie_reported < 0) 13094 lun->ie_reported = 0; 13095 13096 /* 13097 * Check to see if we have any errors to inject here. We only 13098 * inject errors for commands that don't already have errors set. 13099 */ 13100 if (!STAILQ_EMPTY(&lun->error_list) && 13101 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13102 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13103 ctl_inject_error(lun, io); 13104 13105 /* 13106 * XXX KDM how do we treat commands that aren't completed 13107 * successfully? 13108 * 13109 * XXX KDM should we also track I/O latency? 13110 */ 13111 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13112 io->io_hdr.io_type == CTL_IO_SCSI) { 13113 int type; 13114 #ifdef CTL_TIME_IO 13115 struct bintime bt; 13116 13117 getbinuptime(&bt); 13118 bintime_sub(&bt, &io->io_hdr.start_bt); 13119 #endif 13120 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13121 CTL_FLAG_DATA_IN) 13122 type = CTL_STATS_READ; 13123 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13124 CTL_FLAG_DATA_OUT) 13125 type = CTL_STATS_WRITE; 13126 else 13127 type = CTL_STATS_NO_IO; 13128 13129 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13130 lun->stats.operations[type] ++; 13131 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13132 #ifdef CTL_TIME_IO 13133 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13134 bintime_add(&lun->stats.time[type], &bt); 13135 #endif 13136 13137 mtx_lock(&port->port_lock); 13138 port->stats.bytes[type] += io->scsiio.kern_total_len; 13139 port->stats.operations[type] ++; 13140 port->stats.dmas[type] += io->io_hdr.num_dmas; 13141 #ifdef CTL_TIME_IO 13142 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13143 bintime_add(&port->stats.time[type], &bt); 13144 #endif 13145 mtx_unlock(&port->port_lock); 13146 } 13147 13148 /* 13149 * Run through the blocked queue of this I/O and see if anything 13150 * can be unblocked, now that this I/O is done and will be removed. 13151 * We need to do it before removal to have OOA position to start. 13152 */ 13153 ctl_try_unblock_others(lun, io, TRUE); 13154 13155 /* 13156 * Remove this from the OOA queue. 13157 */ 13158 LIST_REMOVE(&io->io_hdr, ooa_links); 13159 #ifdef CTL_TIME_IO 13160 if (LIST_EMPTY(&lun->ooa_queue)) 13161 lun->last_busy = getsbinuptime(); 13162 #endif 13163 13164 /* 13165 * If the LUN has been invalidated, free it if there is nothing 13166 * left on its OOA queue. 13167 */ 13168 if ((lun->flags & CTL_LUN_INVALID) 13169 && LIST_EMPTY(&lun->ooa_queue)) { 13170 mtx_unlock(&lun->lun_lock); 13171 ctl_free_lun(lun); 13172 } else 13173 mtx_unlock(&lun->lun_lock); 13174 13175 bailout: 13176 13177 /* 13178 * If this command has been aborted, make sure we set the status 13179 * properly. The FETD is responsible for freeing the I/O and doing 13180 * whatever it needs to do to clean up its state. 13181 */ 13182 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13183 ctl_set_task_aborted(&io->scsiio); 13184 13185 /* 13186 * If enabled, print command error status. 13187 */ 13188 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13189 (ctl_debug & CTL_DEBUG_INFO) != 0) 13190 ctl_io_error_print(io, NULL); 13191 13192 /* 13193 * Tell the FETD or the other shelf controller we're done with this 13194 * command. Note that only SCSI commands get to this point. Task 13195 * management commands are completed above. 13196 */ 13197 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13198 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13199 memset(&msg, 0, sizeof(msg)); 13200 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13201 msg.hdr.serializing_sc = io->io_hdr.remote_io; 13202 msg.hdr.nexus = io->io_hdr.nexus; 13203 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13204 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13205 M_WAITOK); 13206 } 13207 13208 fe_done(io); 13209 } 13210 13211 /* 13212 * Front end should call this if it doesn't do autosense. When the request 13213 * sense comes back in from the initiator, we'll dequeue this and send it. 13214 */ 13215 int 13216 ctl_queue_sense(union ctl_io *io) 13217 { 13218 struct ctl_softc *softc = CTL_SOFTC(io); 13219 struct ctl_port *port = CTL_PORT(io); 13220 struct ctl_lun *lun; 13221 struct scsi_sense_data *ps; 13222 uint32_t initidx, p, targ_lun; 13223 13224 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13225 13226 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13227 13228 /* 13229 * LUN lookup will likely move to the ctl_work_thread() once we 13230 * have our new queueing infrastructure (that doesn't put things on 13231 * a per-LUN queue initially). That is so that we can handle 13232 * things like an INQUIRY to a LUN that we don't have enabled. We 13233 * can't deal with that right now. 13234 * If we don't have a LUN for this, just toss the sense information. 13235 */ 13236 mtx_lock(&softc->ctl_lock); 13237 if (targ_lun >= ctl_max_luns || 13238 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13239 mtx_unlock(&softc->ctl_lock); 13240 goto bailout; 13241 } 13242 mtx_lock(&lun->lun_lock); 13243 mtx_unlock(&softc->ctl_lock); 13244 13245 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13246 p = initidx / CTL_MAX_INIT_PER_PORT; 13247 if (lun->pending_sense[p] == NULL) { 13248 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, 13249 M_CTL, M_NOWAIT | M_ZERO); 13250 } 13251 if ((ps = lun->pending_sense[p]) != NULL) { 13252 ps += initidx % CTL_MAX_INIT_PER_PORT; 13253 memset(ps, 0, sizeof(*ps)); 13254 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13255 } 13256 mtx_unlock(&lun->lun_lock); 13257 13258 bailout: 13259 ctl_free_io(io); 13260 return (CTL_RETVAL_COMPLETE); 13261 } 13262 13263 /* 13264 * Primary command inlet from frontend ports. All SCSI and task I/O 13265 * requests must go through this function. 13266 */ 13267 int 13268 ctl_queue(union ctl_io *io) 13269 { 13270 struct ctl_port *port = CTL_PORT(io); 13271 13272 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13273 13274 #ifdef CTL_TIME_IO 13275 io->io_hdr.start_time = time_uptime; 13276 getbinuptime(&io->io_hdr.start_bt); 13277 #endif /* CTL_TIME_IO */ 13278 13279 /* Map FE-specific LUN ID into global one. */ 13280 io->io_hdr.nexus.targ_mapped_lun = 13281 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13282 13283 switch (io->io_hdr.io_type) { 13284 case CTL_IO_SCSI: 13285 case CTL_IO_TASK: 13286 if (ctl_debug & CTL_DEBUG_CDB) 13287 ctl_io_print(io); 13288 ctl_enqueue_incoming(io); 13289 break; 13290 default: 13291 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13292 return (EINVAL); 13293 } 13294 13295 return (CTL_RETVAL_COMPLETE); 13296 } 13297 13298 int 13299 ctl_run(union ctl_io *io) 13300 { 13301 struct ctl_port *port = CTL_PORT(io); 13302 13303 CTL_DEBUG_PRINT(("ctl_run cdb[0]=%02X\n", io->scsiio.cdb[0])); 13304 13305 #ifdef CTL_TIME_IO 13306 io->io_hdr.start_time = time_uptime; 13307 getbinuptime(&io->io_hdr.start_bt); 13308 #endif /* CTL_TIME_IO */ 13309 13310 /* Map FE-specific LUN ID into global one. */ 13311 io->io_hdr.nexus.targ_mapped_lun = 13312 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13313 13314 switch (io->io_hdr.io_type) { 13315 case CTL_IO_SCSI: 13316 if (ctl_debug & CTL_DEBUG_CDB) 13317 ctl_io_print(io); 13318 ctl_scsiio_precheck(&io->scsiio); 13319 break; 13320 case CTL_IO_TASK: 13321 if (ctl_debug & CTL_DEBUG_CDB) 13322 ctl_io_print(io); 13323 ctl_run_task(io); 13324 break; 13325 default: 13326 printf("ctl_run: unknown I/O type %d\n", io->io_hdr.io_type); 13327 return (EINVAL); 13328 } 13329 13330 return (CTL_RETVAL_COMPLETE); 13331 } 13332 13333 #ifdef CTL_IO_DELAY 13334 static void 13335 ctl_done_timer_wakeup(void *arg) 13336 { 13337 union ctl_io *io; 13338 13339 io = (union ctl_io *)arg; 13340 ctl_done(io); 13341 } 13342 #endif /* CTL_IO_DELAY */ 13343 13344 void 13345 ctl_serseq_done(union ctl_io *io) 13346 { 13347 struct ctl_lun *lun = CTL_LUN(io); 13348 13349 /* This is racy, but should not be a problem. */ 13350 if (!TAILQ_EMPTY(&io->io_hdr.blocked_queue)) { 13351 mtx_lock(&lun->lun_lock); 13352 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13353 ctl_try_unblock_others(lun, io, FALSE); 13354 mtx_unlock(&lun->lun_lock); 13355 } else 13356 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13357 } 13358 13359 void 13360 ctl_done(union ctl_io *io) 13361 { 13362 13363 /* 13364 * Enable this to catch duplicate completion issues. 13365 */ 13366 #if 0 13367 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13368 printf("%s: type %d msg %d cdb %x iptl: " 13369 "%u:%u:%u tag 0x%04x " 13370 "flag %#x status %x\n", 13371 __func__, 13372 io->io_hdr.io_type, 13373 io->io_hdr.msg_type, 13374 io->scsiio.cdb[0], 13375 io->io_hdr.nexus.initid, 13376 io->io_hdr.nexus.targ_port, 13377 io->io_hdr.nexus.targ_lun, 13378 (io->io_hdr.io_type == 13379 CTL_IO_TASK) ? 13380 io->taskio.tag_num : 13381 io->scsiio.tag_num, 13382 io->io_hdr.flags, 13383 io->io_hdr.status); 13384 } else 13385 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13386 #endif 13387 13388 /* 13389 * This is an internal copy of an I/O, and should not go through 13390 * the normal done processing logic. 13391 */ 13392 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13393 return; 13394 13395 #ifdef CTL_IO_DELAY 13396 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13397 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13398 } else { 13399 struct ctl_lun *lun = CTL_LUN(io); 13400 13401 if ((lun != NULL) 13402 && (lun->delay_info.done_delay > 0)) { 13403 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13404 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13405 callout_reset(&io->io_hdr.delay_callout, 13406 lun->delay_info.done_delay * hz, 13407 ctl_done_timer_wakeup, io); 13408 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13409 lun->delay_info.done_delay = 0; 13410 return; 13411 } 13412 } 13413 #endif /* CTL_IO_DELAY */ 13414 13415 ctl_enqueue_done(io); 13416 } 13417 13418 static void 13419 ctl_work_thread(void *arg) 13420 { 13421 struct ctl_thread *thr = (struct ctl_thread *)arg; 13422 struct ctl_softc *softc = thr->ctl_softc; 13423 union ctl_io *io; 13424 int retval; 13425 13426 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13427 thread_lock(curthread); 13428 sched_prio(curthread, PUSER - 1); 13429 thread_unlock(curthread); 13430 13431 while (!softc->shutdown) { 13432 /* 13433 * We handle the queues in this order: 13434 * - ISC 13435 * - done queue (to free up resources, unblock other commands) 13436 * - incoming queue 13437 * - RtR queue 13438 * 13439 * If those queues are empty, we break out of the loop and 13440 * go to sleep. 13441 */ 13442 mtx_lock(&thr->queue_lock); 13443 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13444 if (io != NULL) { 13445 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13446 mtx_unlock(&thr->queue_lock); 13447 ctl_handle_isc(io); 13448 continue; 13449 } 13450 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13451 if (io != NULL) { 13452 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13453 /* clear any blocked commands, call fe_done */ 13454 mtx_unlock(&thr->queue_lock); 13455 ctl_process_done(io); 13456 continue; 13457 } 13458 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13459 if (io != NULL) { 13460 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13461 mtx_unlock(&thr->queue_lock); 13462 if (io->io_hdr.io_type == CTL_IO_TASK) 13463 ctl_run_task(io); 13464 else 13465 ctl_scsiio_precheck(&io->scsiio); 13466 continue; 13467 } 13468 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13469 if (io != NULL) { 13470 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13471 mtx_unlock(&thr->queue_lock); 13472 retval = ctl_scsiio(&io->scsiio); 13473 if (retval != CTL_RETVAL_COMPLETE) 13474 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13475 continue; 13476 } 13477 13478 /* Sleep until we have something to do. */ 13479 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); 13480 } 13481 thr->thread = NULL; 13482 kthread_exit(); 13483 } 13484 13485 static void 13486 ctl_thresh_thread(void *arg) 13487 { 13488 struct ctl_softc *softc = (struct ctl_softc *)arg; 13489 struct ctl_lun *lun; 13490 struct ctl_logical_block_provisioning_page *page; 13491 const char *attr; 13492 union ctl_ha_msg msg; 13493 uint64_t thres, val; 13494 int i, e, set; 13495 13496 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13497 thread_lock(curthread); 13498 sched_prio(curthread, PUSER - 1); 13499 thread_unlock(curthread); 13500 13501 while (!softc->shutdown) { 13502 mtx_lock(&softc->ctl_lock); 13503 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13504 if ((lun->flags & CTL_LUN_DISABLED) || 13505 (lun->flags & CTL_LUN_NO_MEDIA) || 13506 lun->backend->lun_attr == NULL) 13507 continue; 13508 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13509 softc->ha_mode == CTL_HA_MODE_XFER) 13510 continue; 13511 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13512 continue; 13513 e = 0; 13514 page = &lun->MODE_LBP; 13515 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13516 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13517 continue; 13518 thres = scsi_4btoul(page->descr[i].count); 13519 thres <<= CTL_LBP_EXPONENT; 13520 switch (page->descr[i].resource) { 13521 case 0x01: 13522 attr = "blocksavail"; 13523 break; 13524 case 0x02: 13525 attr = "blocksused"; 13526 break; 13527 case 0xf1: 13528 attr = "poolblocksavail"; 13529 break; 13530 case 0xf2: 13531 attr = "poolblocksused"; 13532 break; 13533 default: 13534 continue; 13535 } 13536 mtx_unlock(&softc->ctl_lock); // XXX 13537 val = lun->backend->lun_attr(lun->be_lun, attr); 13538 mtx_lock(&softc->ctl_lock); 13539 if (val == UINT64_MAX) 13540 continue; 13541 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13542 == SLBPPD_ARMING_INC) 13543 e = (val >= thres); 13544 else 13545 e = (val <= thres); 13546 if (e) 13547 break; 13548 } 13549 mtx_lock(&lun->lun_lock); 13550 if (e) { 13551 scsi_u64to8b((uint8_t *)&page->descr[i] - 13552 (uint8_t *)page, lun->ua_tpt_info); 13553 if (lun->lasttpt == 0 || 13554 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13555 lun->lasttpt = time_uptime; 13556 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13557 set = 1; 13558 } else 13559 set = 0; 13560 } else { 13561 lun->lasttpt = 0; 13562 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13563 set = -1; 13564 } 13565 mtx_unlock(&lun->lun_lock); 13566 if (set != 0 && 13567 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13568 /* Send msg to other side. */ 13569 bzero(&msg.ua, sizeof(msg.ua)); 13570 msg.hdr.msg_type = CTL_MSG_UA; 13571 msg.hdr.nexus.initid = -1; 13572 msg.hdr.nexus.targ_port = -1; 13573 msg.hdr.nexus.targ_lun = lun->lun; 13574 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13575 msg.ua.ua_all = 1; 13576 msg.ua.ua_set = (set > 0); 13577 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13578 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13579 mtx_unlock(&softc->ctl_lock); // XXX 13580 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13581 sizeof(msg.ua), M_WAITOK); 13582 mtx_lock(&softc->ctl_lock); 13583 } 13584 } 13585 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13586 PDROP, "-", CTL_LBP_PERIOD * hz); 13587 } 13588 softc->thresh_thread = NULL; 13589 kthread_exit(); 13590 } 13591 13592 static void 13593 ctl_enqueue_incoming(union ctl_io *io) 13594 { 13595 struct ctl_softc *softc = CTL_SOFTC(io); 13596 struct ctl_thread *thr; 13597 u_int idx; 13598 13599 idx = (io->io_hdr.nexus.targ_port * 127 + 13600 io->io_hdr.nexus.initid) % worker_threads; 13601 thr = &softc->threads[idx]; 13602 mtx_lock(&thr->queue_lock); 13603 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13604 mtx_unlock(&thr->queue_lock); 13605 wakeup(thr); 13606 } 13607 13608 static void 13609 ctl_enqueue_rtr(union ctl_io *io) 13610 { 13611 struct ctl_softc *softc = CTL_SOFTC(io); 13612 struct ctl_thread *thr; 13613 13614 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13615 mtx_lock(&thr->queue_lock); 13616 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13617 mtx_unlock(&thr->queue_lock); 13618 wakeup(thr); 13619 } 13620 13621 static void 13622 ctl_enqueue_done(union ctl_io *io) 13623 { 13624 struct ctl_softc *softc = CTL_SOFTC(io); 13625 struct ctl_thread *thr; 13626 13627 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13628 mtx_lock(&thr->queue_lock); 13629 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13630 mtx_unlock(&thr->queue_lock); 13631 wakeup(thr); 13632 } 13633 13634 static void 13635 ctl_enqueue_isc(union ctl_io *io) 13636 { 13637 struct ctl_softc *softc = CTL_SOFTC(io); 13638 struct ctl_thread *thr; 13639 13640 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13641 mtx_lock(&thr->queue_lock); 13642 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13643 mtx_unlock(&thr->queue_lock); 13644 wakeup(thr); 13645 } 13646 13647 /* 13648 * vim: ts=8 13649 */ 13650